diff --git a/.terraform.lock.hcl b/.terraform.lock.hcl index 664b83d5..4d8e919b 100644 --- a/.terraform.lock.hcl +++ b/.terraform.lock.hcl @@ -2,37 +2,37 @@ # Manual edits may be lost in future updates. provider "registry.terraform.io/hashicorp/aws" { - version = "6.37.0" + version = "6.38.0" constraints = ">= 4.56.0" hashes = [ - "h1:2OzkE8hCk+rVC0Ljxw6oCwNHpJfHHiU/i6sBIGg1ZZQ=", - "h1:4sToAMU3GsSC0Orc3JqcgOMRdMxHbAtbVRSav4fbvMk=", - "h1:IvmQvCUpUSL2T1xtxxPhP/MXl9efLD4NmXDBAG1YTNE=", - "h1:KMbPmTWY92+hoTEIYCJvmh5F7eJCESoswyOirL23Tws=", - "h1:OdQC/z+3ReUAPhlXCJIJYUZlSQ3b96TWfFK3lCT5zKU=", - "h1:Q6oMyOOO0SgKhXvDODv2nBWlO7m8/uXaGF7XALj1tuQ=", - "h1:Um4kBHMX/BW9k3wYGoyxcpNmd/5UdxHhSlyNXp6tVCU=", - "h1:VKlndKPfI8jFbdNAHdlzglmOt/XLynNc14wizVQY5z0=", - "h1:Z9l6B2KTCXI81ljNyriWU2vUd4gZlOs6uceBowDcOrQ=", - "h1:l6dLejTvvF+0HnvW7DsjY/jVKjgTo627Pc4C6nYGNHM=", - "h1:mcQsALya7Snbo3yxJ4Kb/ZJzYd6xFw7BX/ejn0qydCU=", - "h1:uUrMuvVNnDE13pIUuwNN/yyZcPB83cQzsc5RRHQQ14E=", - "h1:w3z/TApcKD3b/aMZoZZKSxOld4xw+gEtQ1ka6C1UN+4=", - "h1:z+wSA7CUTunt2Kb+O4TKNDT4pmWOovNoUxijZLn3n9w=", - "zh:0427fadb719ed5a32feb09f047539d2348e659056f3b8a8589d34d8f0a95be7a", - "zh:3891c670674aba2125a7ac6d4348cde43646b1b46ce6f829e6f4724091bc0dcd", - "zh:632cb24b7b5790b730b33bcbe9f1a7b75f2644fb52f9d6aaafb0249c9e7601d2", - "zh:6e96ed1f824c2efa9de5b7c22ab3715624ba34c28564a06e9a15e71bc3d3a30b", - "zh:7b8fd86907b659bc45f4a3f42c3c0ccc66925a74e265b01e9e66242c0b2cafef", - "zh:81f9a587deddef4dfcc2101c54ec28a3a554056837f68ebb920c83fe8327b16f", + "h1:2NhckHRVSF36dyFYd15myz4AjReQbOYgNQ/7Li29wcc=", + "h1:3MP3AOAntDTXnrWry8XlWL+6M8rMRhlTp0ZUxNyrH/A=", + "h1:7EtWzjLeg1qmbf7mSOvy3T/alXehJRCZxFu1Et/IHkw=", + "h1:7F3W4qGLTbr4aploSI8eIqE4AueoNe/Tq5Osuo0IgJ4=", + "h1:7al1E+/zrmxL7qgGRoR7b//8d2oJxpguLEuFnBmRCVY=", + "h1:GOlIkFIhuKpVeBG+m77rXhiT7/r1AKFfhokm9WdZj1o=", + "h1:IMf41BcW9huOeVcrt6XjQqadYR2xD8zkUpGLLERJ4NM=", + "h1:RDoKIzXmt7H1mNFcNIyRT+nA/gTJyO3+iW9QGN5I2eQ=", + "h1:Yx7kDYSjFBAuvq3nbmgy+N9+ilJB8NNsIrBusm0nm5w=", + "h1:fJlpWm8M4RGM5TZGeblUiP3WqhUI0zV0hM+NAJ9lVlY=", + "h1:kQD9Ehy9Iy+11jp2JkKE3I62DxshXeIMhA/su43Md9k=", + "h1:t/tF5CzmNbAxPVVVYjojbK2T60b4X/5RINmY8yKbu1E=", + "h1:thWyDCjV9CmSOSWBTCrG/P3bNlYBzRl6QVj0WcSisLM=", + "h1:wBWvJVZJBEQT6ty8LYk/QoS7xM2E+zAEwKTqFlvayGw=", + "zh:143f118ae71059a7a7026c6b950da23fef04a06e2362ffa688bef75e43e869ed", + "zh:29ee220a017306effd877e1280f8b2934dc957e16e0e72ca0222e5514d0db522", + "zh:3a31baabf7aea7aa7669f5a3d76f3445e0e6cce5e9aea0279992765c0df12aee", + "zh:4c1908e62040dbc9901d4426ffb253f53e5dae9e3e1a9125311291ee265c8d8c", + "zh:550f4789f5f5b00e16118d4c17770be3ef4535d6b6928af1cf91ebd30f2c263b", + "zh:6537b7b70bf2c127771b0b84e4b726c834d10666b6104f017edae50c67ebae37", "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", - "zh:a9a38a67cb98d690fec951ec3e133b6836279629db2ed3a0ebf97a5bea58674f", - "zh:b18f60d62e4bd4d466077e09c39259d1a85355f0f00b801fe8aedbc50193d357", - "zh:b7a51bc0faf60d17043b4df1d1b7bb55129eaa4bdeb65ff55f5b00b9b8fee9f7", - "zh:c28c42f91ca3a6b65b3fd3ed6e891fc0fc28d0cb5ab65dea65eda8eec5cea5f3", - "zh:d895ddc04280ed26b6ca64ca05b78caaa7b72c8e167af4093545efbc608d5482", - "zh:f4a56f5157009ef160fbd79105078fe675df479cb73c1b7e1fea2741403a0b67", - "zh:f547d6ca371b96fec97b972fc0c93bcfc23d58e34a9da215b94e9d2aa170fb77", - "zh:f7b0a3cd4adadd3f4b9609a54e651ed5eafa22c196ab229042fc1d0aa0ab8f3a", + "zh:af2f9cea0c8bdf5b2a2391f2d179a946c117196f7c829b919673cae3b71d2943", + "zh:c53ffa685381aa4e73158fd9f529239f95938dea330e7aca0b32e7b2a1210432", + "zh:d0995e1d64a7ec8bbc79fc3fbec3749f989e07f211a318705c37cd6a7c7d19e4", + "zh:d2348ffcffc1282983d7a5838dd5d61f372152fe6c0d10868cd6473352318750", + "zh:e449312efb73e4747165e689302a68a1df8ba5755e7f59097069acf82c94f011", + "zh:ec3a538d264ef79380e56fdf107ffb6c0446814f07fc5890c36855fe1e03196b", + "zh:f441e69699b22e32c96a8cdd3bbe694ed302c0dcfe867cd9bd683a16df362714", + "zh:f6f8eaa605ff902234d7e9bdab4fda977185fce14f8576f7b622c914c7d98008", ] } diff --git a/aws-source/module/.cursor/BUGBOT.md b/aws-source/module/.cursor/BUGBOT.md deleted file mode 100644 index 08d7fca8..00000000 --- a/aws-source/module/.cursor/BUGBOT.md +++ /dev/null @@ -1,16 +0,0 @@ -# Terraform Module Review Rules - -## HCL: IAM policy must stay read-only - -If any changed `.tf` file modifies an IAM policy statement's `Action` list: - -- Verify every action uses only read-only prefixes: `Get*`, `Describe*`, `List*`, `GetBucket*`, `ListAllMyBuckets`, `ListTagsForResource`, `GetMetricData`. -- Add a blocking Bug titled "IAM policy contains write actions" if any action allows mutation (e.g., `Put*`, `Create*`, `Delete*`, `Update*`, `Attach*`, `Detach*`). -- Body: "The Overmind IAM role must be strictly read-only. Write actions violate customer trust policies and the principle of least privilege. Remove the offending actions." - -## Provider Go: Use diag.Diagnostics for errors - -If any changed `.go` file in `provider/` returns an error from a resource or data source CRUD function using bare `fmt.Errorf` or `errors.New`: - -- Add a warning titled "Use diag.Diagnostics instead of bare errors" -- Body: "Terraform provider resource and data source functions should return errors via `diag.Diagnostics` (e.g., `diag.FromErr(err)`) so that Terraform can display structured error output to users. See the [Terraform Plugin Framework documentation](https://developer.hashicorp.com/terraform/plugin/framework/diagnostics) for guidance." diff --git a/cmd/pterm.go b/cmd/pterm.go index e67fc8f9..c64f99a7 100644 --- a/cmd/pterm.go +++ b/cmd/pterm.go @@ -2,8 +2,6 @@ package cmd import ( "context" - "encoding/base64" - "encoding/json" "errors" "fmt" "net/http" @@ -286,64 +284,3 @@ func heartbeatOptions(oi sdp.OvermindInstance, token *oauth2.Token) *discovery.H } } -func HasScopesFlexible(token *oauth2.Token, requiredScopes []string) (bool, string, error) { - if token == nil { - return false, "", errors.New("HasScopesFlexible: token is nil") - } - - claims, err := extractClaims(token.AccessToken) - if err != nil { - return false, "", fmt.Errorf("error extracting claims from token: %w", err) - } - - for _, scope := range requiredScopes { - if !claims.HasScope(scope) { - // If they don't have the *exact* scope, check to see if they have - // write access to the same service - sections := strings.Split(scope, ":") - var hasWriteInstead bool - - if len(sections) == 2 { - service, action := sections[0], sections[1] - - if action == "read" { - hasWriteInstead = claims.HasScope(fmt.Sprintf("%v:write", service)) - } - } - - if !hasWriteInstead { - return false, scope, nil - } - } - } - - return true, "", nil -} - -// extracts custom claims from a JWT token. Note that this does not verify the -// signature of the token, it just extracts the claims from the payload -func extractClaims(token string) (*auth.CustomClaims, error) { - // We aren't interested in checking the signature of the token since - // the server will do that. All we need to do is make sure it - // contains the right scopes. Therefore we just parse the payload - // directly - sections := strings.Split(token, ".") - if len(sections) != 3 { - return nil, errors.New("token is not a JWT") - } - - // Decode the payload - decodedPayload, err := base64.RawURLEncoding.DecodeString(sections[1]) - if err != nil { - return nil, fmt.Errorf("error decoding token payload: %w", err) - } - - // Parse the payload - claims := new(auth.CustomClaims) - err = json.Unmarshal(decodedPayload, claims) - if err != nil { - return nil, fmt.Errorf("error parsing token payload: %w", err) - } - - return claims, nil -} diff --git a/cmd/root.go b/cmd/root.go index 0148f684..07eeab2c 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -3,7 +3,6 @@ package cmd import ( "context" _ "embed" - "encoding/json" "errors" "fmt" "io" @@ -11,7 +10,6 @@ import ( "os" "os/signal" "path" - "path/filepath" "strings" "syscall" "time" @@ -23,9 +21,9 @@ import ( "github.com/google/uuid" "github.com/overmindtech/pterm" "github.com/overmindtech/cli/go/auth" + "github.com/overmindtech/cli/go/cliauth" "github.com/overmindtech/cli/go/sdp-go" "github.com/overmindtech/cli/go/tracing" - "github.com/pkg/browser" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -186,17 +184,32 @@ func Execute() { } } -const beginAuthMessage string = `# Authenticate with a browser +// ptermLogger adapts pterm output to the cliauth.Logger interface +type ptermLogger struct{} -Attempting to automatically open the SSO authorization page in your default browser. -If the browser does not open or you wish to use a different device to authorize this request, open the following URL: - - %v - -Then enter the code: +func (p *ptermLogger) Info(msg string, keysAndValues ...any) { + if len(keysAndValues) > 0 { + kvs := make([]string, 0, len(keysAndValues)/2) + for i := 0; i+1 < len(keysAndValues); i += 2 { + kvs = append(kvs, fmt.Sprintf("%v: %v", keysAndValues[i], keysAndValues[i+1])) + } + pterm.Info.Println(fmt.Sprintf("%s (%s)", msg, strings.Join(kvs, ", "))) + } else { + pterm.Info.Println(msg) + } +} - %v -` +func (p *ptermLogger) Error(msg string, keysAndValues ...any) { + if len(keysAndValues) > 0 { + kvs := make([]string, 0, len(keysAndValues)/2) + for i := 0; i+1 < len(keysAndValues); i += 2 { + kvs = append(kvs, fmt.Sprintf("%v: %v", keysAndValues[i], keysAndValues[i+1])) + } + pterm.Error.Println(fmt.Sprintf("%s (%s)", msg, strings.Join(kvs, ", "))) + } else { + pterm.Error.Println(msg) + } +} // getChangeUUIDAndCheckStatus returns the UUID of a change, as selected by --uuid or --change, or a change with the specified status and having --ticket-link func getChangeUUIDAndCheckStatus(ctx context.Context, oi sdp.OvermindInstance, expectedStatus sdp.ChangeStatus, ticketLink string, errorOnNotFound bool) (uuid.UUID, error) { @@ -495,9 +508,15 @@ func login(ctx context.Context, cmd *cobra.Command, scopes []string, writer io.W multi = pterm.DefaultMultiPrinter.WithWriter(writer) } + app := viper.GetString("app") + if err := cliauth.ConfirmUntrustedHost(app, viper.GetString("api-key") != "", os.Stdin, os.Stderr); err != nil { + _, _ = multi.Stop() + return ctx, sdp.OvermindInstance{}, nil, err + } + connectSpinner, _ := pterm.DefaultSpinner.WithWriter(multi.NewWriter()).Start("Connecting to Overmind") - oi, err := sdp.NewOvermindInstance(ctx, viper.GetString("app")) + oi, err := sdp.NewOvermindInstance(ctx, app) if err != nil { connectSpinner.Fail("Failed to get instance data from app") _, _ = multi.Stop() @@ -528,24 +547,18 @@ func login(ctx context.Context, cmd *cobra.Command, scopes []string, writer io.W } func ensureToken(ctx context.Context, oi sdp.OvermindInstance, requiredScopes []string) (context.Context, *oauth2.Token, error) { - var token *oauth2.Token - var err error + apiKey := viper.GetString("api-key") + app := viper.GetString("app") - // get a token from the api key if present - if apiKey := viper.GetString("api-key"); apiKey != "" { - token, err = getAPIKeyToken(ctx, oi, apiKey, requiredScopes) - } else { - token, err = getOauthToken(ctx, oi, requiredScopes) - } + token, err := cliauth.GetToken(ctx, oi, app, apiKey, requiredScopes, &ptermLogger{}) if err != nil { return ctx, nil, fmt.Errorf("error getting token: %w", err) } if token == nil { - // this should never happen, but just in case return ctx, nil, fmt.Errorf("error token: nil") } - // let's add account/auth info to the span for traceability + // Add account/auth info to the span for traceability tok, err := josejwt.ParseSigned(token.AccessToken, []jose.SignatureAlgorithm{jose.RS256}) if err != nil { return ctx, nil, fmt.Errorf("Error running program: received invalid token: %w", err) @@ -560,14 +573,11 @@ func ensureToken(ctx context.Context, oi sdp.OvermindInstance, requiredScopes [] attribute.Bool("ovm.auth.authenticated", true), attribute.String("ovm.auth.accountName", customClaims.AccountName), attribute.String("ovm.auth.scopes", customClaims.Scope), - // subject is the auth0 client id or the user id attribute.String("ovm.auth.subject", out.Subject), attribute.String("ovm.auth.expiry", out.Expiry.Time().String()), ) - // Check that we actually got the claims we asked for. If you don't have - // permission auth0 will just not assign those scopes rather than fail - ok, missing, err := HasScopesFlexible(token, requiredScopes) + ok, missing, err := cliauth.HasScopesFlexible(token, requiredScopes) if err != nil { return ctx, nil, fmt.Errorf("error checking token scopes: %w", err) } @@ -575,257 +585,15 @@ func ensureToken(ctx context.Context, oi sdp.OvermindInstance, requiredScopes [] return ctx, nil, fmt.Errorf("authenticated successfully, but you don't have the required permission: '%v'", missing) } - // store the token for later use by sdp-go's auth client. Note that this + // Store the token for later use by sdp-go's auth client. Note that this // loses access to the RefreshToken and could be done better by using an // oauth2.TokenSource, but this would require more work on updating sdp-go - // that is currently not scheduled + // that is currently not scheduled. ctx = context.WithValue(ctx, auth.UserTokenContextKey{}, token.AccessToken) return ctx, token, nil } -// Gets a token from Oauth with the required scopes. This method will also cache -// that token locally for use later, and will use the cached token if possible -func getOauthToken(ctx context.Context, oi sdp.OvermindInstance, requiredScopes []string) (*oauth2.Token, error) { - var localScopes []string - var localToken *oauth2.Token - home, err := os.UserHomeDir() - if err == nil { - // Check for a locally saved token in ~/.overmind - localToken, localScopes, err = readLocalTokenFile(home, viper.GetString("app"), requiredScopes) - if err != nil { - if !os.IsNotExist(err) { - pterm.Info.Println(fmt.Sprintf("Skipping using local token: %v. Re-authenticating.", err)) - } - } else { - // If we already have the right scopes, return the token - return localToken, nil - } - } - // If we need to get a new token, request the required scopes on top of - // whatever ones the current local, valid token has so that we don't - // keep replacing it - requestScopes := append(requiredScopes, localScopes...) - - // Authenticate using the oauth device authorization flow - config := oauth2.Config{ - ClientID: oi.CLIClientID, - Endpoint: oauth2.Endpoint{ - AuthURL: fmt.Sprintf("https://%v/authorize", oi.Auth0Domain), - TokenURL: fmt.Sprintf("https://%v/oauth/token", oi.Auth0Domain), - DeviceAuthURL: fmt.Sprintf("https://%v/oauth/device/code", oi.Auth0Domain), - }, - Scopes: requestScopes, - } - - deviceCode, err := config.DeviceAuth(ctx, - oauth2.SetAuthURLParam("audience", oi.Audience), - oauth2.AccessTypeOffline, - ) - if err != nil { - return nil, fmt.Errorf("error getting device code: %w", err) - } - - var token *oauth2.Token - var urlToOpen string - if deviceCode.VerificationURIComplete != "" { - urlToOpen = deviceCode.VerificationURIComplete - } else { - urlToOpen = deviceCode.VerificationURI - } - - _ = browser.OpenURL(urlToOpen) - pterm.Print( - markdownToString(MAX_TERMINAL_WIDTH, fmt.Sprintf( - beginAuthMessage, - deviceCode.VerificationURI, - deviceCode.UserCode, - ))) - - multi := pterm.DefaultMultiPrinter - _, _ = multi.Start() - - authSpinner, _ := pterm.DefaultSpinner.WithWriter(multi.NewWriter()).Start("Waiting for browser authentication") - - token, err = config.DeviceAccessToken(ctx, deviceCode) - if err != nil { - authSpinner.Fail("Unable to authenticate. Please try again.") - _, _ = multi.Stop() - return nil, fmt.Errorf("error getting device code: %w", err) - } - if token == nil { - authSpinner.Fail("Error running program: no token received") - _, _ = multi.Stop() - return nil, errors.New("no token received") - } - - authSpinner.Success("Authenticated successfully") - _, _ = multi.Stop() - - // Save the token to the local file, if the home directory is available - if home != "" { - err = saveLocalTokenFile(home, viper.GetString("app"), token) - if err != nil { - // we don't worry if we cannot save the token, it will just be requested again - log.WithContext(ctx).WithError(err).Error("Error saving token") - } - } - - return token, nil -} - -// Gets a token using an API key -func getAPIKeyToken(ctx context.Context, oi sdp.OvermindInstance, apiKey string, requiredScopes []string) (*oauth2.Token, error) { - var token *oauth2.Token - app := viper.GetString("app") - if !strings.HasPrefix(apiKey, "ovm_api_") { - return nil, errors.New("--api-key or OVM_API_KEY or API_KEY does not match pattern 'ovm_api_*'") - } - - // exchange api token for JWT - client := UnauthenticatedApiKeyClient(ctx, oi) - resp, err := client.ExchangeKeyForToken(ctx, &connect.Request[sdp.ExchangeKeyForTokenRequest]{ - Msg: &sdp.ExchangeKeyForTokenRequest{ - ApiKey: apiKey, - }, - }) - if err != nil { - return nil, fmt.Errorf("error authenticating the API token for %s: %w", app, err) - } - - token = &oauth2.Token{ - AccessToken: resp.Msg.GetAccessToken(), - TokenType: "Bearer", - } - - // Check that we actually got the claims we asked for. If you don't have - // permission auth0 will just not assign those scopes rather than fail - ok, missing, err := HasScopesFlexible(token, requiredScopes) - if err != nil { - return nil, fmt.Errorf("error checking token scopes for %s: %w", app, err) - } - if !ok { - return nil, fmt.Errorf("authenticated successfully against %s, but your API key is missing this permission: '%v'", app, missing) - } - log.WithField("app", app).Info("Using Overmind API key") - return token, nil -} - -type TokenFile struct { - AuthEntries map[string]*TokenEntry `json:"auth_entries"` -} - -type TokenEntry struct { - Token *oauth2.Token `json:"token"` - AddedDate time.Time `json:"added_date"` -} - -// readLocalTokenFile is also used in the gateway assistant cli tool. It is copied over, so if you change it here, you should also change it there. -func readLocalTokenFile(homeDir, app string, requiredScopes []string) (*oauth2.Token, []string, error) { - // Read in the token JSON file - path := filepath.Join(homeDir, ".overmind", "token.json") - - tokenFile := new(TokenFile) - - // Check that the file exists - if _, err := os.Stat(path); err != nil { - return nil, nil, err - } - - // Read the file - file, err := os.Open(path) - if err != nil { - return nil, nil, fmt.Errorf("error opening token file at %q: %w", path, err) - } - defer file.Close() - - // Decode the file - err = json.NewDecoder(file).Decode(tokenFile) - if err != nil { - return nil, nil, fmt.Errorf("error decoding token file at %q: %w", path, err) - } - - authEntry, ok := tokenFile.AuthEntries[app] - if !ok { - return nil, nil, fmt.Errorf("no token found for app %s in %q", app, path) - } - - // Check to see if the token is still valid - if !authEntry.Token.Valid() { - return nil, nil, errors.New("token is no longer valid") - } - - claims, err := extractClaims(authEntry.Token.AccessToken) - if err != nil { - return nil, nil, fmt.Errorf("error extracting claims from token: %s in %q: %w", app, path, err) - } - if claims.Scope == "" { - return nil, nil, errors.New("token does not have any scopes") - } - - currentScopes := strings.Split(claims.Scope, " ") - - // Check that we actually got the claims we asked for. - ok, missing, err := HasScopesFlexible(authEntry.Token, requiredScopes) - if err != nil { - return nil, currentScopes, fmt.Errorf("error checking token scopes: %s in %q: %w", app, path, err) - } - if !ok { - return nil, currentScopes, fmt.Errorf("local token is missing this permission: '%v'. %s in %q", missing, app, path) - } - - pterm.Info.Println(fmt.Sprintf("Using local token for %s in %q", app, path)) - return authEntry.Token, currentScopes, nil -} - -func saveLocalTokenFile(homeDir, app string, token *oauth2.Token) error { - // Read in the existing token file if it exists - path := filepath.Join(homeDir, ".overmind", "token.json") - - tokenFile := &TokenFile{ - AuthEntries: make(map[string]*TokenEntry), - } - - if _, err := os.Stat(path); err == nil { - file, err := os.Open(path) - if err == nil { - // file exists, read it - defer file.Close() - - err = json.NewDecoder(file).Decode(tokenFile) - if err != nil { - return fmt.Errorf("error decoding token file at %q: %w", path, err) - } - } - } else { - err = os.MkdirAll(filepath.Dir(path), 0755) - if err != nil { - return fmt.Errorf("unexpected fail creating directories: %w", err) - } - } - - // Update the token for the given app - tokenFile.AuthEntries[app] = &TokenEntry{ - Token: token, - AddedDate: time.Now(), - } - - // Write the updated token file - file, err := os.Create(path) - if err != nil { - return fmt.Errorf("error creating token file at %q: %w", path, err) - } - defer file.Close() - - err = json.NewEncoder(file).Encode(tokenFile) - if err != nil { - return fmt.Errorf("error encoding token file at %q: %w", path, err) - } - - pterm.Info.Println(fmt.Sprintf("Saving token locally for %s at %q", app, path)) - return nil -} - func getAppUrl(frontend, app string) string { if frontend == "" && app == "" { return "https://app.overmind.tech" @@ -838,3 +606,4 @@ func getAppUrl(frontend, app string) string { } return app } + diff --git a/cmd/root_test.go b/cmd/root_test.go index bac3a178..b0a05d6f 100644 --- a/cmd/root_test.go +++ b/cmd/root_test.go @@ -9,9 +9,15 @@ import ( "time" "github.com/overmindtech/cli/go/auth" + "github.com/overmindtech/cli/go/cliauth" "golang.org/x/oauth2" ) +type mockLogger struct{} + +func (m *mockLogger) Info(msg string, keysAndValues ...any) {} +func (m *mockLogger) Error(msg string, keysAndValues ...any) {} + func TestParseChangeUrl(t *testing.T) { tests := []struct { input string @@ -79,7 +85,7 @@ func TestHasScopesFlexible(t *testing.T) { for _, tc := range tests { t.Run(tc.Name, func(t *testing.T) { - if pass, _, _ := HasScopesFlexible(token, tc.RequiredScopes); pass != tc.ShouldPass { + if pass, _, _ := cliauth.HasScopesFlexible(token, tc.RequiredScopes); pass != tc.ShouldPass { t.Fatalf("expected: %v, got: %v", tc.ShouldPass, !tc.ShouldPass) } }) @@ -113,9 +119,9 @@ func Test_getAppUrl(t *testing.T) { } func TestSaveTokenFile(t *testing.T) { - // Setup temporary directory for testing tempDir := t.TempDir() app := "https://localhost.df.overmind-demo.com:3000" + log := &mockLogger{} claims := auth.CustomClaims{ Scope: "scope1 scope2", @@ -132,13 +138,12 @@ func TestSaveTokenFile(t *testing.T) { Expiry: time.Now().Add(1 * time.Hour), } - // Test saving the token file - err = saveLocalTokenFile(tempDir, app, token) + err = cliauth.SaveLocalToken(tempDir, app, token, log) if err != nil { t.Fatalf("unexpected fail saving token file: %v", err) } - // Test reading the token file - readAppToken, readClaims, err := readLocalTokenFile(tempDir, app, nil) + + readAppToken, readClaims, err := cliauth.ReadLocalToken(tempDir, app, nil, log) if err != nil { t.Fatalf("unexpected fail reading token file: %v", err) } @@ -152,8 +157,7 @@ func TestSaveTokenFile(t *testing.T) { t.Fatalf("expected: %v, got: %v", "scope2", readClaims[1]) } - // lets read a token from a non existent app - nonExistentToken, _, err := readLocalTokenFile(tempDir, "otherApp", nil) + nonExistentToken, _, err := cliauth.ReadLocalToken(tempDir, "otherApp", nil, log) if err == nil { t.Fatalf("expected error, got nil") } @@ -161,13 +165,12 @@ func TestSaveTokenFile(t *testing.T) { t.Fatalf("expected different tokens, got the same") } - // lets write the token to a different app otherApp := "otherApp" - err = saveLocalTokenFile(tempDir, otherApp, token) + err = cliauth.SaveLocalToken(tempDir, otherApp, token, log) if err != nil { t.Fatalf("unexpected fail saving token file: %v", err) } - readAppToken, _, err = readLocalTokenFile(tempDir, otherApp, nil) + readAppToken, _, err = cliauth.ReadLocalToken(tempDir, otherApp, nil, log) if err != nil { t.Fatalf("unexpected fail reading token file: %v", err) } @@ -175,7 +178,6 @@ func TestSaveTokenFile(t *testing.T) { t.Fatalf("expected: %v, got: %v", token.AccessToken, readAppToken.AccessToken) } - // lets update the first app token claims = auth.CustomClaims{ Scope: "scope3 scope4", AccountName: "test", @@ -190,11 +192,11 @@ func TestSaveTokenFile(t *testing.T) { AccessToken: accessToken, Expiry: time.Now().Add(1 * time.Hour), } - err = saveLocalTokenFile(tempDir, app, newToken) + err = cliauth.SaveLocalToken(tempDir, app, newToken, log) if err != nil { t.Fatalf("unexpected fail saving token file: %v", err) } - _, lastClaims, err := readLocalTokenFile(tempDir, app, nil) + _, lastClaims, err := cliauth.ReadLocalToken(tempDir, app, nil, log) if err != nil { t.Fatalf("unexpected fail reading token file: %v", err) } diff --git a/docs.overmind.tech/docs/sources/_category_.json b/docs.overmind.tech/docs/sources/_category_.json deleted file mode 100644 index 974a8cf6..00000000 --- a/docs.overmind.tech/docs/sources/_category_.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "label": "Infrastructure Sources", - "position": 3, - "collapsed": true, - "link": null -} diff --git a/docs.overmind.tech/docs/sources/aws/Types/apigateway-domain-name.md b/docs.overmind.tech/docs/sources/aws/Types/apigateway-domain-name.md deleted file mode 100644 index bb16518e..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/apigateway-domain-name.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: API Gateway Domain Name -sidebar_label: apigateway-domain-name ---- - -An AWS API Gateway Domain Name represents a custom DNS name (e.g. `api.example.com`) that you attach to one or more stages of a REST, HTTP or WebSocket API. By creating this resource you can present a branded, user-friendly endpoint instead of the default `*.execute-api..amazonaws.com` host, configure an ACM or imported TLS certificate, choose an edge-optimised or regional endpoint, enable mutual TLS and define API mappings. Further information can be found in the official documentation: https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-create-custom-domain-name.html - -**Terrafrom Mappings:** - -- `aws_api_gateway_domain_name.domain_name` - -## Supported Methods - -- `GET`: Get a Domain Name by domain-name -- `LIST`: List Domain Names -- `SEARCH`: Search Domain Names by ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/apigateway-resource.md b/docs.overmind.tech/docs/sources/aws/Types/apigateway-resource.md deleted file mode 100644 index 2816caa9..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/apigateway-resource.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: API Gateway -sidebar_label: apigateway-resource ---- - -An **API Gateway Resource** represents a single path segment within an Amazon API Gateway REST API. Each resource forms part of the hierarchical URL structure of your API and can have HTTP methods (such as GET, POST, DELETE) attached to it, along with integrations, authorisers and request/response models. Correctly mapping these resources is critical because mis-configured paths can expose unintended back-ends or shadow existing routes. Overmind pulls every API Gateway Resource into its graph so you can understand how proposed changes will affect downstream services before you deploy them. -For further details, refer to the official AWS documentation: https://docs.aws.amazon.com/apigateway/latest/api/API_Resource.html - -**Terrafrom Mappings:** - -- `aws_api_gateway_resource.id` - -## Supported Methods - -- `GET`: Get a Resource by rest-api-id/resource-id -- ~~`LIST`~~ -- `SEARCH`: Search Resources by REST API ID diff --git a/docs.overmind.tech/docs/sources/aws/Types/apigateway-rest-api.md b/docs.overmind.tech/docs/sources/aws/Types/apigateway-rest-api.md deleted file mode 100644 index 83569578..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/apigateway-rest-api.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: REST API -sidebar_label: apigateway-rest-api ---- - -AWS API Gateway REST APIs allow you to build, deploy and manage REST-style interfaces that front your application logic, Lambda functions or other AWS services. A REST API in API Gateway represents the top-level container for all stages, resources, methods, authorisers and deployments that make up your service. Once created, the API can be exposed publicly or kept private behind a VPC endpoint, throttled, monitored and versioned across stages. -For full details, refer to the official AWS documentation: https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-rest-api.html - -**Terrafrom Mappings:** - -- `aws_api_gateway_rest_api.id` - -## Supported Methods - -- `GET`: Get a REST API by ID -- `LIST`: List all REST APIs -- `SEARCH`: Search for REST APIs by their name - -## Possible Links - -### [`ec2-vpc-endpoint`](/sources/aws/Types/ec2-vpc-endpoint) - -If the REST API is configured as a private API, it is exposed inside a VPC through an Interface VPC Endpoint. Overmind links the `apigateway-rest-api` resource to the corresponding `ec2-vpc-endpoint` to show which endpoint clients inside the VPC must use to reach the API and to surface any network-level risks (such as missing security-group rules). - -### [`apigateway-resource`](/sources/aws/Types/apigateway-resource) - -An API Gateway REST API is composed of one or more resources, each representing a path segment (for example `/users` or `/orders/{orderId}`). Overmind links the parent `apigateway-rest-api` to each individual `apigateway-resource` so you can trace how a request traverses the API hierarchy and identify unprotected or redundant paths. diff --git a/docs.overmind.tech/docs/sources/aws/Types/autoscaling-auto-scaling-group.md b/docs.overmind.tech/docs/sources/aws/Types/autoscaling-auto-scaling-group.md deleted file mode 100644 index 03b3db07..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/autoscaling-auto-scaling-group.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: Autoscaling Group -sidebar_label: autoscaling-auto-scaling-group ---- - -An AWS Autoscaling Group (ASG) is a logical collection of Amazon EC2 instances that are treated as a single scalable resource. It automatically adjusts the number of running instances to maintain a desired capacity, respond to demand spikes, enforce health‐based replacement, and support rolling updates. Configuration parameters such as minimum, maximum and desired instance counts, scaling policies, health checks and lifecycle hooks are all defined at the group level. -Further information is available in the official AWS documentation: https://docs.aws.amazon.com/autoscaling/ec2/userguide/AutoScalingGroup.html - -**Terrafrom Mappings:** - -- `aws_autoscaling_group.arn` - -## Supported Methods - -- `GET`: Get an Autoscaling Group by name -- `LIST`: List Autoscaling Groups -- `SEARCH`: Search for Autoscaling Groups by ARN - -## Possible Links - -### [`ec2-launch-template`](/sources/aws/Types/ec2-launch-template) - -An ASG normally references a launch template that describes how each EC2 instance should be configured (AMI, instance type, security groups, IAM instance profile, user data, etc.). Therefore the ASG is linked to its associated `ec2-launch-template`. - -### [`elbv2-target-group`](/sources/aws/Types/elbv2-target-group) - -ASGs can be attached to one or more ALB/NLB target groups so that their member instances are automatically registered and deregistered as they scale. The link shows which `elbv2-target-group`(s) an ASG feeds. - -### [`ec2-instance`](/sources/aws/Types/ec2-instance) - -The running EC2 instances that currently belong to an ASG are directly related to it. Overmind surfaces this connection so you can see which `ec2-instance` objects are under the control of a specific ASG. - -### [`iam-role`](/sources/aws/Types/iam-role) - -Autoscaling uses an AWS service-linked role (typically `AWSServiceRoleForAutoScaling`) to perform scaling and health check actions on your behalf. Additionally, the launch template referenced by the ASG may specify an instance profile containing an IAM role for the launched instances. Both relationships are captured via the `iam-role` link. - -### [`ec2-placement-group`](/sources/aws/Types/ec2-placement-group) - -If the ASG’s launch template specifies a placement group, any instances it launches will be placed accordingly for improved networking performance or spread. The link reveals the `ec2-placement-group` associated with the ASG. diff --git a/docs.overmind.tech/docs/sources/aws/Types/cloudfront-cache-policy.md b/docs.overmind.tech/docs/sources/aws/Types/cloudfront-cache-policy.md deleted file mode 100644 index 892c7bac..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/cloudfront-cache-policy.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: CloudFront Cache Policy -sidebar_label: cloudfront-cache-policy ---- - -An AWS CloudFront Cache Policy specifies the rules that dictate how CloudFront caches HTTP responses at edge locations. It determines which headers, cookies and query-string parameters are included in the cache key, how long objects remain in the cache (TTL values), and whether to compress the response before it is served to viewers. By creating and attaching custom cache policies to distributions or behaviours, you can fine-tune cache efficiency, control origin load, and optimise performance for different types of content. For a full description of the resource and its attributes, refer to the [AWS documentation](https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_CachePolicy.html). - -**Terrafrom Mappings:** - -- `aws_cloudfront_cache_policy.id` - -## Supported Methods - -- `GET`: Get a CloudFront Cache Policy -- `LIST`: List CloudFront Cache Policies -- `SEARCH`: Search CloudFront Cache Policies by ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/cloudfront-continuous-deployment-policy.md b/docs.overmind.tech/docs/sources/aws/Types/cloudfront-continuous-deployment-policy.md deleted file mode 100644 index 49f4679c..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/cloudfront-continuous-deployment-policy.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: CloudFront Continuous Deployment Policy -sidebar_label: cloudfront-continuous-deployment-policy ---- - -A CloudFront Continuous Deployment Policy is an Amazon CloudFront configuration object that allows you to shift viewer traffic between two CloudFront distributions (normally a _staging_ and a _production_ distribution) in a controlled, progressive way. By defining percentage-based traffic splits or header-based routing rules, you can carry out blue/green or canary releases, test new versions of your application, and roll back instantly if problems occur. -Official documentation: https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/continuous-deployment.html - -## Supported Methods - -- `GET`: Get a CloudFront Continuous Deployment Policy by ID -- `LIST`: List CloudFront Continuous Deployment Policies -- `SEARCH`: Search CloudFront Continuous Deployment Policies by ARN - -## Possible Links - -### [`dns`](/sources/stdlib/Types/dns) - -DNS records (usually CNAME or ALIAS/ANAME) that point end-user domains to the target CloudFront distributions determine which viewers are subject to a continuous deployment policy. When a policy is enabled, those DNS entries still resolve to the same CloudFront hostnames, but the policy decides how the resulting requests are routed internally between the staging and production distributions. Overmind therefore links the policy to related DNS resources so you can trace which public hostnames—and consequently which users—are affected by a particular traffic-splitting setup. diff --git a/docs.overmind.tech/docs/sources/aws/Types/cloudfront-distribution.md b/docs.overmind.tech/docs/sources/aws/Types/cloudfront-distribution.md deleted file mode 100644 index 8222aaa1..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/cloudfront-distribution.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: CloudFront Distribution -sidebar_label: cloudfront-distribution ---- - -Amazon CloudFront Distributions are globally-replicated configurations that tell the CloudFront CDN how to cache and deliver your content to end-users. Each distribution defines one or more origins, cache behaviours, security settings and optional edge-compute integrations. See the official AWS documentation for a full description: https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-working-with.html - -**Terrafrom Mappings:** - -- `aws_cloudfront_distribution.arn` - -## Supported Methods - -- `GET`: Get a distribution by ID -- `LIST`: List all distributions -- `SEARCH`: Search distributions by ARN - -## Possible Links - -### [`cloudfront-key-group`](/sources/aws/Types/cloudfront-key-group) - -A distribution can reference one or more Key Groups in its `TrustedKeyGroups` configuration to validate signed URLs or signed cookies. If a Key Group ID appears in the distribution’s config, Overmind links the two. - -### [`cloudfront-continuous-deployment-policy`](/sources/aws/Types/cloudfront-continuous-deployment-policy) - -Distributions may have an attached Continuous Deployment Policy (`ContinuousDeploymentPolicyId`) that allows blue/green traffic shifting. Overmind links the distribution to that policy. - -### [`cloudfront-cache-policy`](/sources/aws/Types/cloudfront-cache-policy) - -Every cache behaviour in a distribution can specify a `CachePolicyId`. Overmind links the distribution to any Cache Policies it relies on. - -### [`cloudfront-function`](/sources/aws/Types/cloudfront-function) - -Viewer request / response CloudFront Functions can be associated with behaviours in the distribution. Those references create links between the distribution and the function resources. - -### [`cloudfront-origin-request-policy`](/sources/aws/Types/cloudfront-origin-request-policy) - -Behaviours can also specify an `OriginRequestPolicyId` that controls which headers, cookies and query strings are sent to the origin. Overmind links distributions to the referenced Origin Request Policies. - -### [`cloudfront-realtime-log-config`](/sources/aws/Types/cloudfront-realtime-log-config) - -If real-time logging is enabled, the distribution contains one or more `RealtimeLogConfigArn` values. Overmind uses those to link the distribution to its real-time log configuration. - -### [`cloudfront-response-headers-policy`](/sources/aws/Types/cloudfront-response-headers-policy) - -Behaviours may include a `ResponseHeadersPolicyId` that injects security or custom headers. Overmind links the distribution to the associated Response Headers Policies. - -### [`dns`](/sources/stdlib/Types/dns) - -Public access to a distribution is normally via the CloudFront domain name or an alias/CNAME such as `www.example.com`. When a DNS record (e.g., Route 53 ALIAS) targets the distribution’s domain, Overmind links the DNS record to the distribution. - -### [`lambda-function`](/sources/aws/Types/lambda-function) - -Lambda@Edge functions (standard Lambda functions replicated to edge locations) can be attached to behaviours for request or response processing. These associations create links between the distribution and the Lambda functions. - -### [`s3-bucket`](/sources/aws/Types/s3-bucket) - -An S3 bucket is commonly used as an origin. When the distribution’s origin points at an S3 bucket domain or ARN, Overmind links the distribution to that bucket. diff --git a/docs.overmind.tech/docs/sources/aws/Types/cloudfront-function.md b/docs.overmind.tech/docs/sources/aws/Types/cloudfront-function.md deleted file mode 100644 index dc43f440..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/cloudfront-function.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: CloudFront Function -sidebar_label: cloudfront-function ---- - -Amazon CloudFront Functions let you run lightweight JavaScript code at CloudFront edge locations, enabling real-time manipulation of HTTP requests and responses without the latency of invoking AWS Lambda. Typical use-cases include URL rewrites, header manipulation, access control and A/B testing, all executed in under one millisecond at every edge. For more detail see the official AWS documentation: https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-functions.html - -**Terrafrom Mappings:** - -- `aws_cloudfront_function.name` - -## Supported Methods - -- `GET`: Get a CloudFront Function by name -- `LIST`: List CloudFront Functions -- `SEARCH`: Search CloudFront Functions by ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/cloudfront-key-group.md b/docs.overmind.tech/docs/sources/aws/Types/cloudfront-key-group.md deleted file mode 100644 index f2647ffb..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/cloudfront-key-group.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: CloudFront Key Group -sidebar_label: cloudfront-key-group ---- - -A CloudFront Key Group is an Amazon CloudFront configuration object that aggregates several public keys under a single identifier. CloudFront uses the keys in the group to verify the signatures on signed URLs, signed cookies, or JSON Web Tokens that you employ to control access to private content. By attaching a key group to a distribution or cache behaviour you can centrally manage which public keys are trusted; adding or removing a key from the group immediately changes who can generate valid signatures without the need to touch individual distributions. -For more information, refer to the AWS documentation on Key Groups: https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html#PrivateContent-KeyGroups - -**Terrafrom Mappings:** - -- `aws_cloudfront_key_group.id` - -## Supported Methods - -- `GET`: Get a CloudFront Key Group by ID -- `LIST`: List CloudFront Key Groups -- `SEARCH`: Search CloudFront Key Groups by ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/cloudfront-origin-access-control.md b/docs.overmind.tech/docs/sources/aws/Types/cloudfront-origin-access-control.md deleted file mode 100644 index 42454fe8..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/cloudfront-origin-access-control.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Cloudfront Origin Access Control -sidebar_label: cloudfront-origin-access-control ---- - -Amazon CloudFront Origin Access Control (OAC) is a security feature that allows you to restrict access to the origin of a CloudFront distribution, ensuring that all requests are authenticated and authorised by CloudFront before reaching your S3 bucket, Application Load Balancer, or custom origin. OAC is the modern replacement for Origin Access Identities (OAI) and supports both SigV4‐signed requests and IAM authentication, giving you more granular control over how CloudFront communicates with your back-end resources. By configuring an OAC you prevent direct exposure of your origin on the public internet, helping to mitigate data-exfiltration and origin-based attacks. -For further information see the official AWS documentation: https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-restricting-access-to-origin.html#concept-origin-access-control - -**Terrafrom Mappings:** - -- `aws_cloudfront_origin_access_control.id` - -## Supported Methods - -- `GET`: Get Origin Access Control by ID -- `LIST`: List Origin Access Controls -- `SEARCH`: Origin Access Control by ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/cloudfront-origin-request-policy.md b/docs.overmind.tech/docs/sources/aws/Types/cloudfront-origin-request-policy.md deleted file mode 100644 index 2b59261c..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/cloudfront-origin-request-policy.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: CloudFront Origin Request Policy -sidebar_label: cloudfront-origin-request-policy ---- - -A CloudFront Origin Request Policy defines which HTTP headers, cookies and query-string parameters Amazon CloudFront passes from the edge to your origin. By attaching a policy to a cache behaviour you can standardise the information that reaches your origin, independent of any caching decisions. Policies are reusable across multiple distributions, making configuration simpler and less error-prone. -For further details refer to the [AWS documentation](https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_OriginRequestPolicy.html). - -**Terrafrom Mappings:** - -- `aws_cloudfront_origin_request_policy.id` - -## Supported Methods - -- `GET`: Get Origin Request Policy by ID -- `LIST`: List Origin Request Policies -- `SEARCH`: Origin Request Policy by ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/cloudfront-realtime-log-config.md b/docs.overmind.tech/docs/sources/aws/Types/cloudfront-realtime-log-config.md deleted file mode 100644 index 5202bc2c..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/cloudfront-realtime-log-config.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: CloudFront Realtime Log Config -sidebar_label: cloudfront-realtime-log-config ---- - -Amazon CloudFront Realtime Log Configs define the structure of the near-real-time log data that CloudFront can stream to a destination such as Kinesis Data Streams. A Realtime Log Config specifies which data fields are captured, the sampling rate, and the endpoint to which the records are delivered. This enables teams to observe viewer requests, latency, cache behaviour and other metrics with sub-second visibility, allowing faster troubleshooting and performance tuning. -For a detailed description of the service and its capabilities, refer to the official AWS documentation: https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/real-time-logs.html - -**Terrafrom Mappings:** - -- `aws_cloudfront_realtime_log_config.arn` - -## Supported Methods - -- `GET`: Get Realtime Log Config by Name -- `LIST`: List Realtime Log Configs -- `SEARCH`: Search Realtime Log Configs by ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/cloudfront-response-headers-policy.md b/docs.overmind.tech/docs/sources/aws/Types/cloudfront-response-headers-policy.md deleted file mode 100644 index 74c25ae6..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/cloudfront-response-headers-policy.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: CloudFront Response Headers Policy -sidebar_label: cloudfront-response-headers-policy ---- - -A CloudFront Response Headers Policy is an AWS configuration object that specifies the HTTP response headers that Amazon CloudFront adds to, removes from, or overrides on the responses it returns to viewers. By defining a policy you can, for example, enforce security-related headers (such as `Strict-Transport-Security` or `Content-Security-Policy`), apply custom cache-control directives, or expose additional headers to browsers for client-side logic. Once created, a response headers policy can be associated with one or more CloudFront distributions, allowing consistent header behaviour across multiple delivery configurations. -For full details see the AWS documentation: https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/response-headers-policies.html - -**Terrafrom Mappings:** - -- `aws_cloudfront_response_headers_policy.id` - -## Supported Methods - -- `GET`: Get Response Headers Policy by ID -- `LIST`: List Response Headers Policies -- `SEARCH`: Search Response Headers Policy by ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/cloudfront-streaming-distribution.md b/docs.overmind.tech/docs/sources/aws/Types/cloudfront-streaming-distribution.md deleted file mode 100644 index 91412fd6..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/cloudfront-streaming-distribution.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: CloudFront Streaming Distribution -sidebar_label: cloudfront-streaming-distribution ---- - -An Amazon CloudFront Streaming Distribution is a special type of CloudFront distribution optimised for on-demand media streaming (historically using the RTMP protocol) and for serving video content over HTTP/S from an origin such as Amazon S3 or an on-premises media server. It automatically places edge cache nodes close to viewers, reducing latency and bandwidth costs while providing scalability, encryption and access control options. For full details see the official AWS documentation: https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-streaming.html - -**Terrafrom Mappings:** - -- `aws_cloudfront_distribution.arn` -- `aws_cloudfront_distribution.id` - -## Supported Methods - -- `GET`: Get a Streaming Distribution by ID -- `LIST`: List Streaming Distributions -- `SEARCH`: Search Streaming Distributions by ARN - -## Possible Links - -### [`dns`](/sources/stdlib/Types/dns) - -Each CloudFront Streaming Distribution is reachable via a unique domain name that ends in `cloudfront.net`, and may also be associated with custom CNAMEs. These domain names appear in DNS records that overmind can discover and connect to the distribution resource. diff --git a/docs.overmind.tech/docs/sources/aws/Types/cloudwatch-alarm.md b/docs.overmind.tech/docs/sources/aws/Types/cloudwatch-alarm.md deleted file mode 100644 index eb3b85cb..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/cloudwatch-alarm.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: CloudWatch Alarm -sidebar_label: cloudwatch-alarm ---- - -An Amazon CloudWatch Alarm watches a single CloudWatch metric (or a maths expression based on one or more metrics) and performs one or more actions when the metric breaches a threshold for a specified number of evaluation periods. Typical actions include sending an SNS notification, invoking an Auto Scaling policy or stopping, terminating, rebooting or recovering an EC2 instance. Alarms are therefore often a critical part of operational resilience and cost-control strategies, and mis-configuration can lead to missed incidents or unwanted automated actions. -Official documentation: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html - -**Terraform Mappings:** - -- `aws_cloudwatch_metric_alarm.alarm_name` - -## Supported Methods - -- `GET`: Get an alarm by name -- `LIST`: List all alarms -- `SEARCH`: Search for alarms. This accepts JSON in the format of `cloudwatch.DescribeAlarmsForMetricInput` diff --git a/docs.overmind.tech/docs/sources/aws/Types/directconnect-connection.md b/docs.overmind.tech/docs/sources/aws/Types/directconnect-connection.md deleted file mode 100644 index 0d08e744..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/directconnect-connection.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: Connection -sidebar_label: directconnect-connection ---- - -An AWS Direct Connect Connection represents a single dedicated network circuit between your on-premises environment (or colocation facility) and an AWS Direct Connect location. By provisioning a connection you obtain a physical 1 Gbps, 10 Gbps or 100 Gbps port on an AWS router, through which you can create one or more virtual interfaces to reach AWS services or your VPCs. A connection is the fundamental building-block for achieving consistent, low-latency private connectivity into AWS, bypassing the public Internet and allowing you to commit to specific bandwidth and service-level requirements. See the official AWS documentation for further details: https://docs.aws.amazon.com/directconnect/latest/UserGuide/WorkingWithConnections.html - -**Terrafrom Mappings:** - -- `aws_dx_connection.id` - -## Supported Methods - -- `GET`: Get a connection by ID -- `LIST`: List all connections -- `SEARCH`: Search connection by ARN - -## Possible Links - -### [`directconnect-lag`](/sources/aws/Types/directconnect-lag) - -A Link Aggregation Group (LAG) can aggregate one or more individual connections into a single managed logical interface. A connection may belong to a LAG, and conversely a LAG lists each underlying connection that forms part of the group. - -### [`directconnect-location`](/sources/aws/Types/directconnect-location) - -Every connection is terminated at a specific Direct Connect location (e.g. an Equinix or Digital Realty data centre). The connection resource references its chosen location to indicate where the physical port is installed. - -### [`directconnect-virtual-interface`](/sources/aws/Types/directconnect-virtual-interface) - -Virtual interfaces (public, private or transit) are configured on top of a connection to carry customer traffic. Each virtual interface is associated with exactly one connection (or LAG), while a single connection can host multiple virtual interfaces for different routing purposes. diff --git a/docs.overmind.tech/docs/sources/aws/Types/directconnect-customer-metadata.md b/docs.overmind.tech/docs/sources/aws/Types/directconnect-customer-metadata.md deleted file mode 100644 index 1e33f6b8..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/directconnect-customer-metadata.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Customer Metadata -sidebar_label: directconnect-customer-metadata ---- - -Customer Metadata represents the customer agreement that is on file for your AWS account in relation to AWS Direct Connect. The record contains information such as the name and Amazon Resource Name (ARN) of the agreement, its current revision and status, and the Region in which the agreement applies. Being able to inspect this resource lets you confirm that the correct contractual terms have been accepted before you attempt to create or modify Direct Connect connections, helping you avoid deployment failures that stem from missing or outdated agreements. -For further details see the AWS API documentation: https://docs.aws.amazon.com/directconnect/latest/APIReference/API_DescribeCustomerMetadata.html - -## Supported Methods - -- `GET`: Get a customer agreement by name -- `LIST`: List all customer agreements -- `SEARCH`: Search customer agreements by ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/directconnect-direct-connect-gateway-association-proposal.md b/docs.overmind.tech/docs/sources/aws/Types/directconnect-direct-connect-gateway-association-proposal.md deleted file mode 100644 index 54d9add6..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/directconnect-direct-connect-gateway-association-proposal.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Direct Connect Gateway Association Proposal -sidebar_label: directconnect-direct-connect-gateway-association-proposal ---- - -An AWS Direct Connect Gateway Association Proposal represents a cross-account request to attach a Virtual Private Gateway (VGW) or Transit Gateway (TGW) to an existing Direct Connect Gateway (DXGW). -The proposal is created by the owner of the VGW/TGW and must be accepted by the DXGW owner before the association is established. It contains details such as allowed prefixes and the identifiers of the gateways involved, providing both parties with a clear record of what will change once the proposal is accepted. -For more information, see the official AWS API documentation: https://docs.aws.amazon.com/directconnect/latest/APIReference/API_CreateDirectConnectGatewayAssociationProposal.html - -**Terrafrom Mappings:** - -- `aws_dx_gateway_association_proposal.id` - -## Supported Methods - -- `GET`: Get a Direct Connect Gateway Association Proposal by ID -- `LIST`: List all Direct Connect Gateway Association Proposals -- `SEARCH`: Search Direct Connect Gateway Association Proposals by ARN - -## Possible Links - -### [`directconnect-direct-connect-gateway-association`](/sources/aws/Types/directconnect-direct-connect-gateway-association) - -A proposal, once accepted, becomes a Direct Connect Gateway Association. Therefore, every accepted `directconnect-direct-connect-gateway-association-proposal` will have a corresponding `directconnect-direct-connect-gateway-association` resource that represents the live attachment between the DXGW and the VGW/TGW. diff --git a/docs.overmind.tech/docs/sources/aws/Types/directconnect-direct-connect-gateway-association.md b/docs.overmind.tech/docs/sources/aws/Types/directconnect-direct-connect-gateway-association.md deleted file mode 100644 index 2f7f64bd..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/directconnect-direct-connect-gateway-association.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: Direct Connect Gateway Association -sidebar_label: directconnect-direct-connect-gateway-association ---- - -A Direct Connect Gateway Association represents the attachment of a virtual private gateway (VGW) or a transit gateway (TGW) to an AWS Direct Connect gateway. Once associated, the on-premises network that is connected through an AWS Direct Connect dedicated or hosted connection can reach the VPCs behind the VGW/TGW, even if they are in different AWS Regions. -For more detail, see the AWS documentation: https://docs.aws.amazon.com/directconnect/latest/UserGuide/direct-connect-gateways-intro.html#direct-connect-gateway-associations - -**Terraform Mappings:** - -- `aws_dx_gateway_association.id` - -## Supported Methods - -- `GET`: Get a direct connect gateway association by direct connect gateway ID and virtual gateway ID -- ~~`LIST`~~ -- `SEARCH`: Search direct connect gateway associations by direct connect gateway ID - -## Possible Links - -### [`directconnect-direct-connect-gateway`](/sources/aws/Types/directconnect-direct-connect-gateway) - -A Direct Connect Gateway Association is a child resource of a Direct Connect Gateway, so every association is linked to the Direct Connect Gateway to which the VGW/TGW is attached. diff --git a/docs.overmind.tech/docs/sources/aws/Types/directconnect-direct-connect-gateway-attachment.md b/docs.overmind.tech/docs/sources/aws/Types/directconnect-direct-connect-gateway-attachment.md deleted file mode 100644 index 86f6233b..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/directconnect-direct-connect-gateway-attachment.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: Direct Connect Gateway Attachment -sidebar_label: directconnect-direct-connect-gateway-attachment ---- - -An AWS Direct Connect **gateway attachment** represents the binding between a Direct Connect Gateway and a Virtual Interface (VIF). When the attachment is in the `attached` state, traffic that reaches the VIF can be routed to any VPCs or on-premises networks that are associated with the gateway, even across accounts or Regions. -For a full description of the concept, states, and quotas involved, see the AWS documentation: https://docs.aws.amazon.com/directconnect/latest/UserGuide/direct-connect-gateways.html#dx-gateway-attachments - -## Supported Methods - -- `GET`: Get a direct connect gateway attachment by DirectConnectGatewayId/VirtualInterfaceId -- ~~`LIST`~~ -- `SEARCH`: Search direct connect gateway attachments for given VirtualInterfaceId - -## Possible Links - -### [`directconnect-direct-connect-gateway`](/sources/aws/Types/directconnect-direct-connect-gateway) - -Each gateway attachment belongs to exactly one Direct Connect Gateway. Overmind links the attachment back to its parent gateway so you can see every VIF that is currently associated with that gateway. - -### [`directconnect-virtual-interface`](/sources/aws/Types/directconnect-virtual-interface) - -The attachment is also linked to the Virtual Interface that is being attached. This lets you trace which VIFs are connected to which gateways and, in turn, to the networks that sit behind those gateways. diff --git a/docs.overmind.tech/docs/sources/aws/Types/directconnect-direct-connect-gateway.md b/docs.overmind.tech/docs/sources/aws/Types/directconnect-direct-connect-gateway.md deleted file mode 100644 index 4deb30f5..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/directconnect-direct-connect-gateway.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Direct Connect Gateway -sidebar_label: directconnect-direct-connect-gateway ---- - -An AWS Direct Connect gateway is a global virtual routing resource that allows you to attach one or more Direct Connect private virtual interfaces to one or more Virtual Private Gateways (VGWs) or Transit Gateways (TGWs) across any AWS Region (with the exception of the AWS China Regions). By decoupling the physical Direct Connect connection from a specific VPC or Region, it simplifies multi-region and multi-account network architectures, provides centralised route control, and reduces the number of BGP sessions that need to be managed. -For a detailed overview, refer to the official AWS documentation: https://docs.aws.amazon.com/directconnect/latest/UserGuide/direct-connect-gateways.html - -**Terrafrom Mappings:** - -- `aws_dx_gateway.id` - -## Supported Methods - -- `GET`: Get a direct connect gateway by ID -- `LIST`: List all direct connect gateways -- `SEARCH`: Search direct connect gateway by ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/directconnect-hosted-connection.md b/docs.overmind.tech/docs/sources/aws/Types/directconnect-hosted-connection.md deleted file mode 100644 index 332dadab..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/directconnect-hosted-connection.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Hosted Connection -sidebar_label: directconnect-hosted-connection ---- - -A **Hosted Connection** is an AWS Direct Connect circuit that is provisioned for you by an AWS Direct Connect Delivery Partner on their own network infrastructure and then allocated to your AWS account. It provides a dedicated, layer-2 link that terminates at an AWS Direct Connect location and can be used to create virtual interfaces (VIFs) to access AWS services or your VPCs. Unlike dedicated connections, hosted connections are requested from the partner rather than from AWS directly, and their capacity is limited to 50 Mbps, 100 Mbps, 200 Mbps, 300 Mbps, 400 Mbps or 500 Mbps. -See the official AWS documentation for full details: https://docs.aws.amazon.com/directconnect/latest/UserGuide/WorkingWithConnections.html#HostedConnections - -**Terrafrom Mappings:** - -- `aws_dx_hosted_connection.id` - -## Supported Methods - -- `GET`: Get a Hosted Connection by connection ID -- ~~`LIST`~~ -- `SEARCH`: Search Hosted Connections by Interconnect or LAG ID - -## Possible Links - -### [`directconnect-lag`](/sources/aws/Types/directconnect-lag) - -A hosted connection can be delivered over a Link Aggregation Group (LAG). In this case the LAG is the parent resource that physically contains the hosted connection, so the hosted connection links **to** its associated LAG. - -### [`directconnect-location`](/sources/aws/Types/directconnect-location) - -Every hosted connection terminates at a specific AWS Direct Connect location (for example, a colocation data centre). The hosted connection therefore links **to** the location where its physical port is situated. - -### [`directconnect-virtual-interface`](/sources/aws/Types/directconnect-virtual-interface) - -After a hosted connection becomes available you create one or more virtual interfaces on top of it. These virtual interfaces depend on the hosted connection, so they link **from** the hosted connection. diff --git a/docs.overmind.tech/docs/sources/aws/Types/directconnect-interconnect.md b/docs.overmind.tech/docs/sources/aws/Types/directconnect-interconnect.md deleted file mode 100644 index aed4e26f..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/directconnect-interconnect.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: Interconnect -sidebar_label: directconnect-interconnect ---- - -An AWS Direct Connect **Interconnect** is a high-capacity physical Ethernet link (10 Gbps or 100 Gbps) between an AWS Direct Connect location and the network of an approved network service provider. The provider uses the interconnect to carve out and allocate Hosted Connections or Hosted Virtual Interfaces for individual customer accounts, allowing many end-users to share the same physical infrastructure while maintaining logical separation and security. In Overmind, the **directconnect-interconnect** type lets you surface configuration details (such as bandwidth, location, and operational state) and map its relationships to other Direct Connect resources so you can spot mis-configuration or single-point-of-failure risks before deployment. -For authoritative information see the AWS documentation: https://docs.aws.amazon.com/directconnect/latest/UserGuide/WorkingWithInterconnects.html - -## Supported Methods - -- `GET`: Get a Interconnect by InterconnectId -- `LIST`: List all Interconnects -- `SEARCH`: Search Interconnects by ARN - -## Possible Links - -### [`directconnect-hosted-connection`](/sources/aws/Types/directconnect-hosted-connection) - -Hosted connections are provisioned on top of an Interconnect. Each hosted connection link points back to the parent Interconnect that physically carries its traffic. - -### [`directconnect-lag`](/sources/aws/Types/directconnect-lag) - -LAGs (Link Aggregation Groups) created on an Interconnect combine multiple physical ports of that Interconnect into a single logical interface, increasing bandwidth and providing redundancy. - -### [`directconnect-location`](/sources/aws/Types/directconnect-location) - -Every Interconnect terminates at a specific Direct Connect location such as an AWS-aligned colocation facility; this link shows where the Interconnect is physically hosted. diff --git a/docs.overmind.tech/docs/sources/aws/Types/directconnect-lag.md b/docs.overmind.tech/docs/sources/aws/Types/directconnect-lag.md deleted file mode 100644 index e7c62741..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/directconnect-lag.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Link Aggregation Group -sidebar_label: directconnect-lag ---- - -An AWS Direct Connect **Link Aggregation Group (LAG)** allows you to combine multiple physical Direct Connect connections into a single, logical interface. Doing so simplifies management, provides higher aggregate bandwidth and offers built-in resiliency: if one underlying connection goes down, traffic is automatically redistributed across the remaining links. Each LAG behaves as a single port on the AWS side while still exposing the individual connections (with their own light-levels and alarms) for troubleshooting. -Official AWS documentation: https://docs.aws.amazon.com/directconnect/latest/UserGuide/lag.html - -**Terrafrom Mappings:** - -- `aws_dx_lag.id` - -## Supported Methods - -- `GET`: Get a Link Aggregation Group by ID -- `LIST`: List all Link Aggregation Groups -- `SEARCH`: Search Link Aggregation Group by ARN - -## Possible Links - -### [`directconnect-connection`](/sources/aws/Types/directconnect-connection) - -A LAG is essentially a collection of Direct Connect connections. Each linked `directconnect-connection` represents one of the physical ports that has been bundled into the LAG. - -### [`directconnect-hosted-connection`](/sources/aws/Types/directconnect-hosted-connection) - -Hosted connections can also be associated with a LAG. Overmind links these `directconnect-hosted-connection` resources to show which hosted (customer-provisioned) circuits are aggregated under the same LAG. - -### [`directconnect-location`](/sources/aws/Types/directconnect-location) - -Every LAG is created at a specific AWS Direct Connect location (data centre or colocation facility). The `directconnect-location` link identifies the physical site where the LAG’s constituent connections terminate. diff --git a/docs.overmind.tech/docs/sources/aws/Types/directconnect-location.md b/docs.overmind.tech/docs/sources/aws/Types/directconnect-location.md deleted file mode 100644 index 5d5f6c1b..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/directconnect-location.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Direct Connect Location -sidebar_label: directconnect-location ---- - -An AWS Direct Connect Location represents one of the globally distributed, carrier-neutral data-centre facilities where you can order and terminate an AWS Direct Connect dedicated circuit. Each location has a unique location code that you reference when requesting a connection, viewing available port speeds, generating LOAs, or validating the physical site of an existing circuit. Understanding which locations are available – and the risks or constraints linked to each – helps you design resilient, low-latency connectivity between your on-premises network and AWS. -For full details see the official AWS documentation: https://docs.aws.amazon.com/directconnect/latest/UserGuide/WorkingWithLocations.html - -**Terrafrom Mappings:** - -- `aws_dx_location.location_code` - -## Supported Methods - -- `GET`: Get a Location by its code -- `LIST`: List all Direct Connect Locations -- `SEARCH`: Search Direct Connect Locations by ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/directconnect-router-configuration.md b/docs.overmind.tech/docs/sources/aws/Types/directconnect-router-configuration.md deleted file mode 100644 index cf5c91d4..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/directconnect-router-configuration.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: Router Configuration -sidebar_label: directconnect-router-configuration ---- - -AWS Direct Connect can automatically generate a sample configuration that you can paste into the customer-side router that terminates a private, public or transit virtual interface. The Router Configuration object represents that text file. Because the template is created by AWS specifically for the selected virtual interface it already contains the correct BGP ASN, VLAN, IP addressing and other parameters for the connection, reducing the chance of a mis-configuration. -Full details of the API can be found in the AWS Direct Connect API Reference: https://docs.aws.amazon.com/directconnect/latest/APIReference/API_DescribeRouterConfiguration.html - -**Terrafrom Mappings:** - -- `aws_dx_router_configuration.virtual_interface_id` - -## Supported Methods - -- `GET`: Get a Router Configuration by Virtual Interface ID -- ~~`LIST`~~ -- `SEARCH`: Search Router Configuration by ARN - -## Possible Links - -### [`directconnect-virtual-interface`](/sources/aws/Types/directconnect-virtual-interface) - -A Router Configuration is generated for, and therefore has a **1-to-1** relationship with, a Direct Connect Virtual Interface. The link allows you to navigate from the virtual interface to the exact configuration you should apply to your on-premises router (and vice-versa), making it easier to validate that the interface has been deployed according to the recommended configuration. diff --git a/docs.overmind.tech/docs/sources/aws/Types/directconnect-virtual-gateway.md b/docs.overmind.tech/docs/sources/aws/Types/directconnect-virtual-gateway.md deleted file mode 100644 index 24a64425..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/directconnect-virtual-gateway.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Direct Connect Virtual Gateway -sidebar_label: directconnect-virtual-gateway ---- - -A Direct Connect virtual gateway (sometimes called a virtual private gateway, or **VGW**) is the AWS-managed end-point that terminates a private virtual interface and presents it to your Amazon VPC. It provides the control-plane for routing traffic between your on-premises network and one or more VPCs over an AWS Direct Connect link, removing the need to run VPN hardware or BGP sessions inside the VPC itself. By querying this resource, Overmind can show you which VPCs and Direct Connect virtual interfaces are affected, surface any missing or insecure route advertisements, and highlight configuration drift _before_ changes are deployed. - -For more information, refer to the AWS Direct Connect documentation: https://docs.aws.amazon.com/directconnect/latest/UserGuide/virtual-gateway.html - -## Supported Methods - -- `GET`: Get a virtual gateway by ID -- `LIST`: List all virtual gateways -- `SEARCH`: Search virtual gateways by ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/directconnect-virtual-interface.md b/docs.overmind.tech/docs/sources/aws/Types/directconnect-virtual-interface.md deleted file mode 100644 index 4d7222f1..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/directconnect-virtual-interface.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Virtual Interface -sidebar_label: directconnect-virtual-interface ---- - -A Virtual Interface (VIF) is the logical layer that sits on top of an AWS Direct Connect physical connection and provides Layer 3 access into AWS. Three flavours are available—private, public and transit—each supporting different routing destinations and services. A VIF defines the VLAN, BGP peering IPs, Autonomous System Numbers (ASNs), jumbo-frame settings and, optionally, a Direct Connect Gateway association. -Official AWS documentation: https://docs.aws.amazon.com/directconnect/latest/UserGuide/WorkingWithVirtualInterfaces.html - -**Terrafrom Mappings:** - -- `aws_dx_private_virtual_interface.id` -- `aws_dx_public_virtual_interface.id` -- `aws_dx_transit_virtual_interface.id` - -## Supported Methods - -- `GET`: Get a virtual interface by ID -- `LIST`: List all virtual interfaces -- `SEARCH`: Search virtual interfaces by connection ID - -## Possible Links - -### [`directconnect-connection`](/sources/aws/Types/directconnect-connection) - -Every VIF must be created against a Direct Connect physical connection. The link lets you trace which circuit (location, port speed, AWS account) the virtual interface is riding on. - -### [`directconnect-direct-connect-gateway`](/sources/aws/Types/directconnect-direct-connect-gateway) - -Private and transit VIFs can be attached to a Direct Connect Gateway to reach multiple VPCs or on-premises networks. This link shows that association, helping you see the downstream network blast-radius of a VIF change. - -### [`rdap-ip-network`](/sources/stdlib/Types/rdap-ip-network) - -The BGP peer IPs configured on a VIF belong to specific IPv4/IPv6 networks. Linking to RDAP IP network objects allows visibility of route-origin information and public registration data for those peer addresses. - -### [`directconnect-direct-connect-gateway-attachment`](/sources/aws/Types/directconnect-direct-connect-gateway-attachment) - -When a VIF is associated with a Direct Connect Gateway, an attachment resource is created in AWS. This link maps the VIF to its attachment object so you can understand and audit that relationship. - -### [`directconnect-virtual-interface`](/sources/aws/Types/directconnect-virtual-interface) - -Some organisations create multiple VIFs on the same physical connection for isolation (e.g., production vs. test). Overmind links sibling VIFs so you can view parallel logical circuits that share the same underlay. diff --git a/docs.overmind.tech/docs/sources/aws/Types/dynamodb-backup.md b/docs.overmind.tech/docs/sources/aws/Types/dynamodb-backup.md deleted file mode 100644 index 5b2aa507..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/dynamodb-backup.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: DynamoDB Backup -sidebar_label: dynamodb-backup ---- - -A DynamoDB Backup represents a point-in-time, fully-managed snapshot of an Amazon DynamoDB table, including all of its data and global secondary indexes. Back-ups can be created on demand or retained automatically through continuous point-in-time recovery (PITR). They allow you to restore the table to any state within the retention window, or to clone the data into a new table in the same or another region for testing and disaster-recovery purposes. For further details, see the official AWS documentation: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/BackupRestore.html - -## Supported Methods - -- ~~`GET`~~ -- `LIST`: List all DynamoDB backups -- `SEARCH`: Search for a DynamoDB backup by table name - -## Possible Links - -### [`dynamodb-table`](/sources/aws/Types/dynamodb-table) - -Each backup is intrinsically tied to the table from which it was taken; Overmind therefore links a `dynamodb-backup` item to its source `dynamodb-table` so you can trace data-protection coverage, understand restore scopes, and assess the blast radius of table changes or deletions. diff --git a/docs.overmind.tech/docs/sources/aws/Types/dynamodb-table.md b/docs.overmind.tech/docs/sources/aws/Types/dynamodb-table.md deleted file mode 100644 index 85dfe110..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/dynamodb-table.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: DynamoDB Table -sidebar_label: dynamodb-table ---- - -Amazon DynamoDB is AWS’s fully-managed NoSQL database service, providing single-millisecond latency at virtually any scale. A DynamoDB table is the primary container for data, storing items as key–value pairs and supporting features such as on-demand or provisioned capacity, global replication, streams and automatic encryption at rest. -For a full description of table capabilities, limits and API operations, see the official AWS documentation: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Table.html - -**Terrafrom Mappings:** - -- `aws_dynamodb_table.arn` - -## Supported Methods - -- `GET`: Get a DynamoDB table by name -- `LIST`: List all DynamoDB tables -- `SEARCH`: Search for DynamoDB tables by ARN - -## Possible Links - -### [`dynamodb-table`](/sources/aws/Types/dynamodb-table) - -When a table participates in a global table configuration, each regional replica is represented as a separate `dynamodb-table` item. Overmind links these peer replicas so that you can see the full set of regions involved in the same globally replicated table. - -### [`kms-key`](/sources/aws/Types/kms-key) - -If server-side encryption is enabled with a customer-managed KMS key, the table is linked to the `kms-key` that protects its data. This allows you to trace encryption dependencies and assess the impact of key rotation or deletion. diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-address.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-address.md deleted file mode 100644 index 62cf7c5e..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-address.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: EC2 Address -sidebar_label: ec2-address ---- - -An EC2 Address represents an Elastic IP (EIP) in AWS – a static, public IPv4 address that you can allocate to your account and assign to running resources such as EC2 instances or network interfaces. Elastic IPs let you mask the failure of a single instance by rapidly remapping the address to another resource, ensuring minimal disruption to services that rely on a fixed public endpoint. See the official AWS documentation for full details: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html - -**Terrafrom Mappings:** - -- `aws_eip.public_ip` -- `aws_eip_association.public_ip` - -## Supported Methods - -- `GET`: Get an EC2 address by Public IP -- `LIST`: List EC2 addresses -- `SEARCH`: Search for EC2 addresses by ARN - -## Possible Links - -### [`ec2-instance`](/sources/aws/Types/ec2-instance) - -An Elastic IP can be attached directly to an EC2 instance; this link shows which instance currently holds (or most recently held) the address, allowing you to trace external reachability back to the compute resource. - -### [`ip`](/sources/aws/Types/networkmanager-network-resource-relationship) - -The Elastic IP is ultimately a routable IPv4 address; this link connects the high-level EIP object to the underlying IP entity so that you can track dependencies and overlap with other networking resources in your estate. - -### [`ec2-network-interface`](/sources/aws/Types/ec2-network-interface) - -When an Elastic IP is associated with an EC2 instance, it is actually bound to one of the instance’s network interfaces (ENIs). This link identifies the specific ENI, enabling deeper analysis of traffic flow, security groups, and subnet placement that pertain to the EIP. diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-capacity-reservation-fleet.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-capacity-reservation-fleet.md deleted file mode 100644 index 51b2b13a..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-capacity-reservation-fleet.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: Capacity Reservation Fleet -sidebar_label: ec2-capacity-reservation-fleet ---- - -A Capacity Reservation Fleet is an Amazon EC2 resource that lets you create and manage a group of Capacity Reservations in a single operation. By specifying instance attributes such as instance types, platforms and Availability Zones, you can ensure that the compute capacity your workload requires will be held for you ahead of time, even during periods of high demand. This is especially useful when you need to guarantee that a heterogeneous mix of instances will be available at launch, for example during large-scale events or disaster-recovery drills. -For more information, see the official AWS documentation: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateCapacityReservationFleet.html - -## Supported Methods - -- `GET`: Get a capacity reservation fleet by ID -- `LIST`: List capacity reservation fleets -- `SEARCH`: Search capacity reservation fleets by ARN - -## Possible Links - -### [`ec2-capacity-reservation`](/sources/aws/Types/ec2-capacity-reservation) - -A Capacity Reservation Fleet is essentially an umbrella object that owns one or more individual Capacity Reservations. Each linked `ec2-capacity-reservation` represents a single slice of capacity that was created as part of the fleet’s allocation strategy, and tracking these links lets you understand which reservations belong to which fleet and how capacity is distributed across instance types and Availability Zones. diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-capacity-reservation.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-capacity-reservation.md deleted file mode 100644 index 1ef498bd..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-capacity-reservation.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: Capacity Reservation -sidebar_label: ec2-capacity-reservation ---- - -An Amazon EC2 Capacity Reservation is an AWS construct that sets aside compute capacity for one or more instance types in a specific Availability Zone, guaranteeing that the reserved capacity is available whenever you need to launch instances. Capacity Reservations can be created individually or as members of a Capacity Reservation Fleet, allowing you to reserve capacity across several instance types and Zones in a single request. This is particularly useful for workloads that must start at short notice, seasonal traffic peaks, or disaster-recovery scenarios. -For a detailed explanation, refer to the official AWS documentation: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-capacity-reservations.html - -**Terrafrom Mappings:** - -- `aws_ec2_capacity_reservation_fleet.id` - -## Supported Methods - -- `GET`: Get a capacity reservation fleet by ID -- `LIST`: List capacity reservation fleets -- `SEARCH`: Search capacity reservation fleets by ARN - -## Possible Links - -### [`ec2-placement-group`](/sources/aws/Types/ec2-placement-group) - -A Capacity Reservation can be scoped to a placement group. When the `placement_group_arn` (or equivalent Terraform argument) is specified, Overmind links the reservation to that placement group so you can see how the reserved capacity aligns with your low-latency or HPC topology. - -### [`ec2-capacity-reservation-fleet`](/sources/aws/Types/ec2-capacity-reservation-fleet) - -If the reservation was created as part of a Capacity Reservation Fleet, Overmind links it to its parent fleet. This lets you trace individual reservations back to the fleet that manages them and understand how they contribute to the overall pool of reserved capacity across instance types and Availability Zones. diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-egress-only-internet-gateway.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-egress-only-internet-gateway.md deleted file mode 100644 index a4b31106..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-egress-only-internet-gateway.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: Egress Only Internet Gateway -sidebar_label: ec2-egress-only-internet-gateway ---- - -An Egress Only Internet Gateway (EOIGW) is a horizontally-scaled, highly available AWS VPC component that allows outbound-only IPv6 traffic from your VPC to the internet while preventing unsolicited inbound connections. Unlike a standard Internet Gateway, an EOIGW supports IPv6 traffic exclusively and enforces one-way egress, making it a useful control when you want resources such as application servers to reach external IPv6 services without being directly reachable from the internet. -For detailed information, see the official AWS documentation: https://docs.aws.amazon.com/vpc/latest/userguide/egress-only-internet-gateway.html - -**Terrafrom Mappings:** - -- `egress_only_internet_gateway.id` - -## Supported Methods - -- `GET`: Get an egress only internet gateway by ID -- `LIST`: List all egress only internet gateways -- `SEARCH`: Search egress only internet gateways by ARN - -## Possible Links - -### [`ec2-vpc`](/sources/aws/Types/ec2-vpc) - -An EOIGW is attached to exactly one VPC. Overmind represents this relationship so that you can navigate from a VPC to its associated egress-only internet gateways and understand which networks can initiate outbound IPv6 traffic to the internet. diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-iam-instance-profile-association.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-iam-instance-profile-association.md deleted file mode 100644 index 9c55da00..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-iam-instance-profile-association.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: IAM Instance Profile Association -sidebar_label: ec2-iam-instance-profile-association ---- - -An IAM Instance Profile Association represents the live binding between an Amazon EC2 instance and an IAM instance profile (which in turn wraps an IAM role). The association determines which IAM permissions the instance receives via its metadata service. Only one profile can be associated with an instance at a time; changing the association effectively swaps the role that the instance assumes. -For further information see the AWS API reference: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_IamInstanceProfileAssociation.html - -## Supported Methods - -- `GET`: Get an IAM Instance Profile Association by ID -- `LIST`: List all IAM Instance Profile Associations -- `SEARCH`: Search IAM Instance Profile Associations by ARN - -## Possible Links - -### [`iam-instance-profile`](/sources/aws/Types/iam-instance-profile) - -The association points to exactly one IAM instance profile, identifying the set of IAM permissions that will be handed to the EC2 instance. - -### [`ec2-instance`](/sources/aws/Types/ec2-instance) - -Each association belongs to a single EC2 instance, indicating which profile (and hence which role) the instance is currently using. diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-image.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-image.md deleted file mode 100644 index 1d1ed76d..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-image.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Amazon Machine Image (AMI) -sidebar_label: ec2-image ---- - -An Amazon Machine Image (AMI) is a pre-configured, read-only template that defines the software stack required to launch an Amazon EC2 instance. It typically contains an operating system, application server, and any additional software or configuration needed for your workload. By selecting or creating an AMI you can reproduce identical instances at scale, roll back to known-good states, or share hardened golden images across accounts and Regions. -For a full explanation of AMIs, see the official AWS documentation: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html. - -**Terrafrom Mappings:** - -- `aws_ami.id` - -## Supported Methods - -- `GET`: Get an AMI by ID -- `LIST`: List all AMIs -- `SEARCH`: Search AMIs by ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-instance-event-window.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-instance-event-window.md deleted file mode 100644 index 0b47e4f7..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-instance-event-window.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: EC2 Instance Event Window -sidebar_label: ec2-instance-event-window ---- - -An EC2 Instance Event Window is an Amazon EC2 scheduling feature that lets you specify one or more preferred time ranges during which planned AWS maintenance events (for example, a reboot, stop/start or software update) may be applied to your instances. By defining event windows, you retain greater control over when service-initiated interruptions occur, enabling you to align maintenance with your own change-management processes and minimise unplanned impact. -For full details, see the official AWS documentation: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/event-windows.html - -## Supported Methods - -- `GET`: Get an event window by ID -- `LIST`: List all event windows -- `SEARCH`: Search for event windows by ARN - -## Possible Links - -### [`ec2-instance`](/sources/aws/Types/ec2-instance) - -An event window can be associated with one or more EC2 instances. When a linkage exists, those instances will only receive scheduled maintenance events during the time ranges defined in the referenced EC2 Instance Event Window. diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-instance-status.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-instance-status.md deleted file mode 100644 index 8b065a05..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-instance-status.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: EC2 Instance Status -sidebar_label: ec2-instance-status ---- - -An EC2 Instance Status record summarises the current health of a running Amazon Elastic Compute Cloud (EC2) instance. AWS performs two types of status checks—system checks (that assess the underlying host and network) and instance checks (that confirm the guest operating system is reachable). Together they indicate whether the instance is able to accept traffic and function as expected. -Overmind ingests these status objects so that you can surface potential availability risks (e.g. persistent instance check failures) before promoting or modifying a deployment. -For a detailed explanation of how AWS generates and interprets these checks, see the [official AWS documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/status-checks.html). - -## Supported Methods - -- `GET`: Get an EC2 instance status by Instance ID -- `LIST`: List all EC2 instance statuses -- `SEARCH`: Search EC2 instance statuses by ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-instance.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-instance.md deleted file mode 100644 index 2d969a8f..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-instance.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: EC2 Instance -sidebar_label: ec2-instance ---- - -An Amazon EC2 instance is a resizable virtual server that runs in the AWS cloud and provides the compute layer of most workloads. Instances can be started, stopped, terminated, resized and placed into different networking or storage configurations, allowing you to run applications without purchasing physical hardware. For full details see the official AWS documentation: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Instances.html - -**Terrafrom Mappings:** - -- `aws_instance.id` -- `aws_instance.arn` - -## Supported Methods - -- `GET`: Get an EC2 instance by ID -- `LIST`: List all EC2 instances -- `SEARCH`: Search EC2 instances by ARN - -## Possible Links - -### [`ec2-instance-status`](/sources/aws/Types/ec2-instance-status) - -Represents the current state of the instance (pending, running, stopping, stopped, etc.), health checks, and scheduled events. - -### [`iam-instance-profile`](/sources/aws/Types/iam-instance-profile) - -An instance can be launched with an IAM instance profile, enabling the software running on it to assume a role and gain AWS permissions. - -### [`ec2-capacity-reservation`](/sources/aws/Types/ec2-capacity-reservation) - -If the instance is launched into a specific capacity reservation, that reservation object is linked here to show the source of reserved compute capacity. - -### [`ec2-image`](/sources/aws/Types/ec2-image) - -Every EC2 instance is created from an Amazon Machine Image (AMI). This link points to the AMI used at launch time. - -### [`ec2-key-pair`](/sources/aws/Types/ec2-key-pair) - -For Linux and some Windows instances a key pair is specified for SSH/RDP access; the referenced key pair is linked here. - -### [`ec2-placement-group`](/sources/aws/Types/ec2-placement-group) - -Instances can be placed in a placement group to influence network performance or availability. This link shows that relationship. - -### [`ip`](/sources/aws/Types/networkmanager-network-resource-relationship) - -Each instance receives one or more private and, optionally, public IP addresses. These addresses are surfaced as separate `ip` resources linked to the instance. - -### [`ec2-subnet`](/sources/aws/Types/ec2-subnet) - -The instance’s primary network interface is attached to a specific subnet; that subnet is linked to reveal networking context. - -### [`ec2-vpc`](/sources/aws/Types/ec2-vpc) - -The subnet (and thus the instance) resides inside a VPC. Linking the VPC shows the broader network boundary and associated routing. - -### [`dns`](/sources/stdlib/Types/dns) - -Public and private DNS names resolve to the instance’s IP addresses; these DNS records are connected through this link. - -### [`ec2-security-group`](/sources/aws/Types/ec2-security-group) - -One or more security groups control inbound and outbound traffic to the instance network interfaces. Those groups are linked here. - -### [`ec2-volume`](/sources/aws/Types/ec2-volume) - -EBS volumes attached to the instance for root and additional block storage are represented and linked by this type. diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-internet-gateway.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-internet-gateway.md deleted file mode 100644 index 209f7d21..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-internet-gateway.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: Internet Gateway -sidebar_label: ec2-internet-gateway ---- - -An Internet Gateway is a highly-available, horizontally-scaled component that provides a Virtual Private Cloud (VPC) with a route to the public Internet. When attached to a VPC and referenced in the route table, it enables resources with public IP addresses—such as EC2 instances, NAT gateways or load balancers—to send and receive traffic to and from the wider Internet. Because it is a managed AWS service, it does not introduce any single point of failure and requires no administration beyond attachment and routing. -For the official AWS documentation, see https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Internet_Gateway.html. - -**Terrafrom Mappings:** - -- `aws_internet_gateway.id` - -## Supported Methods - -- `GET`: Get an internet gateway by ID -- `LIST`: List all internet gateways -- `SEARCH`: Search internet gateways by ARN - -## Possible Links - -### [`ec2-vpc`](/sources/aws/Types/ec2-vpc) - -An Internet Gateway must be attached to exactly one VPC; this link represents that one-to-one relationship. Through it, Overmind can surface configuration drift (for example, if the gateway is detached) and highlight risks such as missing or overly permissive route-table entries that would expose private resources to the Internet. diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-key-pair.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-key-pair.md deleted file mode 100644 index dc2d6700..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-key-pair.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Key Pair -sidebar_label: ec2-key-pair ---- - -An Amazon EC2 Key Pair is a set of cryptographic keys that enables secure, password-less SSH access to your EC2 instances and other compatible services. The public key is stored in AWS, while the private key is downloaded and managed by you. If the private key is compromised or lost, access to the associated instances is at risk, so tracking key pairs is critical for security posture. -For full details, see the official AWS documentation: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html - -**Terrafrom Mappings:** - -- `aws_key_pair.id` - -## Supported Methods - -- `GET`: Get a key pair by name -- `LIST`: List all key pairs -- `SEARCH`: Search for key pairs by ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-launch-template-version.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-launch-template-version.md deleted file mode 100644 index 993c306c..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-launch-template-version.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: Launch Template Version -sidebar_label: ec2-launch-template-version ---- - -An AWS EC2 Launch Template Version is an immutable snapshot of all the parameters that make up a particular revision of an EC2 launch template – such as AMI ID, instance type, network interfaces, storage, tags and user-data. Each version can be referenced directly when launching instances or by services like Auto Scaling, Spot Fleets and EC2 Fleet, giving you reproducible, auditable instance configuration. -For full details see the AWS documentation: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_LaunchTemplateVersion.html - -## Supported Methods - -- `GET`: Get a launch template version by `{templateId}.{version}` -- `LIST`: List all launch template versions -- `SEARCH`: Search launch template versions by ARN - -## Possible Links - -### [`ec2-network-interface`](/sources/aws/Types/ec2-network-interface) - -The version can embed zero or more network interface specifications, each of which becomes an `ec2-network-interface` when an instance is launched from the template. - -### [`ec2-subnet`](/sources/aws/Types/ec2-subnet) - -Within the network interface or placement settings the version may reference a specific subnet ID, tying the launched instance to that `ec2-subnet`. - -### [`ec2-security-group`](/sources/aws/Types/ec2-security-group) - -Security group IDs listed in the template control inbound and outbound traffic for instances started from this version, linking it to the relevant `ec2-security-group` resources. - -### [`ec2-image`](/sources/aws/Types/ec2-image) - -Every launch template version specifies an AMI ID, creating a dependency on the corresponding `ec2-image`. - -### [`ec2-key-pair`](/sources/aws/Types/ec2-key-pair) - -If a key name is supplied, the version references an `ec2-key-pair` used for SSH access to Linux instances or password encryption for Windows instances. - -### [`ec2-snapshot`](/sources/aws/Types/ec2-snapshot) - -EBS block-device mappings in the template can point to snapshot IDs, establishing a relationship with the relevant `ec2-snapshot` objects. - -### [`ec2-capacity-reservation`](/sources/aws/Types/ec2-capacity-reservation) - -The template may include a capacity reservation target, associating the version with a specific `ec2-capacity-reservation`. - -### [`ec2-placement-group`](/sources/aws/Types/ec2-placement-group) - -Placement settings in the version can name a placement group, indicating that instances should launch into the linked `ec2-placement-group`. - -### [`ip`](/sources/aws/Types/networkmanager-network-resource-relationship) - -Static private or public IP addresses specified in the network interface configuration will be materialised as `ip` resources when the template version is used to launch an instance. diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-launch-template.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-launch-template.md deleted file mode 100644 index 81457635..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-launch-template.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Launch Template -sidebar_label: ec2-launch-template ---- - -An EC2 Launch Template is an AWS resource that stores the complete configuration needed to spin up one or more Amazon EC2 instances, including AMI ID, instance type, network settings, user-data scripts, and optional purchasing options such as Spot or On-Demand. By saving these parameters in a versioned template, teams can reproduce environments consistently, roll back to previous configurations, and simplify autoscaling and fleet operations. -Official documentation: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html - -**Terrafrom Mappings:** - -- `aws_launch_template.id` - -## Supported Methods - -- `GET`: Get a launch template by ID -- `LIST`: List all launch templates -- `SEARCH`: Search for launch templates by ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-managed-prefix-list.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-managed-prefix-list.md deleted file mode 100644 index 39681af9..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-managed-prefix-list.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Managed Prefix List -sidebar_label: ec2-managed-prefix-list ---- - -A managed prefix list is a set of one or more CIDR blocks that you can reference in security group rules, route table routes, and other network configuration. Transit gateway routes can use a prefix list as the destination instead of a single CIDR. - -Official API documentation: [DescribeManagedPrefixLists](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeManagedPrefixLists.html) - -**Terraform Mappings:** - -- `aws_ec2_managed_prefix_list.id` - -## Supported Methods - -- `GET`: Get a managed prefix list by ID -- `LIST`: List all managed prefix lists -- `SEARCH`: Search by ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-nat-gateway.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-nat-gateway.md deleted file mode 100644 index e3450d15..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-nat-gateway.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: NAT Gateway -sidebar_label: ec2-nat-gateway ---- - -A NAT Gateway is an AWS managed network appliance that enables instances in a private subnet to initiate outbound IPv4 (and, in the case of an **NAT Gateway (v2)**, IPv6) traffic to the internet or other AWS services, while preventing unsolicited inbound connections from the public internet. It provides higher bandwidth and easier management compared to NAT instances, and is designed to be highly available within an Availability Zone. -For a full description of its features and limitations, see the official AWS documentation: https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html - -**Terrafrom Mappings:** - -- `aws_nat_gateway.id` - -## Supported Methods - -- `GET`: Get a NAT Gateway by ID -- `LIST`: List all NAT gateways -- `SEARCH`: Search for NAT gateways by ARN - -## Possible Links - -### [`ec2-vpc`](/sources/aws/Types/ec2-vpc) - -The NAT Gateway is always created inside a specific VPC; this link lets you trace which virtual network the gateway belongs to. - -### [`ec2-subnet`](/sources/aws/Types/ec2-subnet) - -A NAT Gateway is placed in exactly one subnet. This link shows the subnet that hosts the gateway’s elastic network interface. - -### [`ec2-network-interface`](/sources/aws/Types/ec2-network-interface) - -Each NAT Gateway is automatically assigned an elastic network interface (ENI). Following this link reveals the ENI that represents the gateway inside the subnet. - -### [`ip`](/sources/aws/Types/networkmanager-network-resource-relationship) - -When you create a NAT Gateway you must allocate at least one Elastic IP address. This link connects the gateway to the public IP(s) it advertises to the internet. diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-network-acl.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-network-acl.md deleted file mode 100644 index 2752cf06..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-network-acl.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: Network ACL -sidebar_label: ec2-network-acl ---- - -A Network Access Control List (ACL) is a stateless, virtual firewall that controls inbound and outbound traffic at the subnet boundary within an Amazon Virtual Private Cloud (VPC). Each rule in a Network ACL is evaluated in order, enabling or denying traffic based on protocol, port range and source or destination IP. Unlike security groups, Network ACLs apply to all resources inside the associated subnets, making them a coarse-grained layer of network security. -For full details, see the AWS documentation: https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html - -**Terrafrom Mappings:** - -- `aws_network_acl.id` - -## Supported Methods - -- `GET`: Get a network ACL -- `LIST`: List all network ACLs -- `SEARCH`: Search for network ACLs by ARN - -## Possible Links - -### [`ec2-subnet`](/sources/aws/Types/ec2-subnet) - -A Network ACL is attached to one or more subnets; traffic entering or leaving those subnets is evaluated against the ACL’s rule set. Overmind therefore links an `ec2-network-acl` to the `ec2-subnet` resources it governs. - -### [`ec2-vpc`](/sources/aws/Types/ec2-vpc) - -Every Network ACL exists inside a single VPC. Overmind links an `ec2-network-acl` to its parent `ec2-vpc` to show the broader network context in which the ACL operates. diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-network-interface-permission.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-network-interface-permission.md deleted file mode 100644 index 7eccd3a2..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-network-interface-permission.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: Network Interface Permission -sidebar_label: ec2-network-interface-permission ---- - -An EC2 **Network Interface Permission** represents the right of an AWS principal (usually another AWS account) to attach a specific Elastic Network Interface (ENI) to an instance in that principal’s account. By creating or revoking these permissions you can share network interfaces across accounts in a controlled manner without transferring ownership. -Further information can be found in the AWS official documentation: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_NetworkInterfacePermission.html - -## Supported Methods - -- `GET`: Get a network interface permission by ID -- `LIST`: List all network interface permissions -- `SEARCH`: Search network interface permissions by ARN - -## Possible Links - -### [`ec2-network-interface`](/sources/aws/Types/ec2-network-interface) - -A network interface permission is always associated with a single network interface; the linked `ec2-network-interface` item is the ENI to which this permission applies. diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-network-interface.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-network-interface.md deleted file mode 100644 index 3379b135..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-network-interface.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: EC2 Network Interface -sidebar_label: ec2-network-interface ---- - -An Amazon Elastic Compute Cloud (EC2) Network Interface – often referred to as an Elastic Network Interface (ENI) – is a virtual network card that can be attached to an EC2 instance. It provides the instance with connectivity within a Virtual Private Cloud (VPC) and, optionally, to the public Internet. Each ENI contains a primary private IPv4 address, one or more secondary IPv4 addresses, IPv6 addresses if enabled, one or more security groups, a MAC address, and, when required, an Elastic IP address or a public DNS name. ENIs can be moved between instances, created in advance, or used for high-availability network configurations such as dual-homed instances. -For complete details see the official AWS documentation: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html - -**Terrafrom Mappings:** - -- `aws_network_interface.id` - -## Supported Methods - -- `GET`: Get a network interface by ID -- `LIST`: List all network interfaces -- `SEARCH`: Search network interfaces by ARN - -## Possible Links - -### [`ec2-instance`](/sources/aws/Types/ec2-instance) - -An ENI can be attached to an EC2 instance, providing that instance with network connectivity. Overmind links the interface to the instance(s) it is or has been attached to. - -### [`ec2-security-group`](/sources/aws/Types/ec2-security-group) - -Each ENI is associated with one or more security groups. These groups define the inbound and outbound traffic rules applied at the interface level. The link shows which security groups control traffic for the ENI. - -### [`ip`](/sources/aws/Types/networkmanager-network-resource-relationship) - -The ENI owns one or more IP addresses (private IPv4, secondary IPv4, IPv6, and optionally Elastic IP). This relationship exposes the individual IP resources attached to the interface. - -### [`dns`](/sources/stdlib/Types/dns) - -If an ENI has a public IPv4 address, AWS automatically creates a corresponding public DNS name; private DNS names may also be present within the VPC. Overmind links these DNS records to the ENI. - -### [`ec2-subnet`](/sources/aws/Types/ec2-subnet) - -An ENI is created inside a specific subnet. The subnet determines the address range from which the ENI’s private IPs are allocated and the availability zone in which it resides. - -### [`ec2-vpc`](/sources/aws/Types/ec2-vpc) - -Every ENI exists within a single VPC, inheriting that VPC’s routing tables, DHCP options, and network ACLs. This link shows the parent VPC for the interface. diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-placement-group.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-placement-group.md deleted file mode 100644 index 7364f9bc..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-placement-group.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Placement Group -sidebar_label: ec2-placement-group ---- - -An EC2 Placement Group is an AWS construct that lets you influence how Elastic Compute Cloud (EC2) instances are positioned on the underlying hardware. By creating a placement group with a strategy of `cluster`, `spread`, or `partition`, you can optimise for high-bandwidth, low-latency networking, reduce the risk of simultaneous hardware failures, or isolate groups of instances from one another. For full details, refer to the official AWS documentation: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html - -**Terrafrom Mappings:** - -- `aws_placement_group.id` - -## Supported Methods - -- `GET`: Get a placement group by ID -- `LIST`: List all placement groups -- `SEARCH`: Search for placement groups by ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-reserved-instance.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-reserved-instance.md deleted file mode 100644 index 26ac06ef..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-reserved-instance.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Reserved EC2 Instance -sidebar_label: ec2-reserved-instance ---- - -An AWS Reserved EC2 Instance represents a pre-paid or partially pre-paid commitment to run a specific instance type in a given Availability Zone or Region for a fixed term (one or three years). By committing up-front, you can obtain a significant discount compared with on-demand pricing, but you also take on the risk of paying for capacity you might not end up using. Overmind treats each Reserved Instance as its own resource so that you can surface any financial or capacity-planning risk associated with your reservation portfolio before a deployment is made. -For detailed information on how Reserved Instances work, see the official AWS documentation: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/reserved-instances.html - -## Supported Methods - -- `GET`: Get a reserved EC2 instance by ID -- `LIST`: List all reserved EC2 instances -- `SEARCH`: Search reserved EC2 instances by ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-route-table.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-route-table.md deleted file mode 100644 index 6b29e850..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-route-table.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: Route Table -sidebar_label: ec2-route-table ---- - -A Route Table in Amazon Virtual Private Cloud (VPC) contains a set of rules, called routes, that determine where network traffic is directed. Each route specifies a destination CIDR block and a target (for example, an Internet Gateway, NAT Gateway, network interface or VPC peering connection). AWS evaluates the routes in the table to decide how packets that leave a subnet are forwarded. A VPC can have multiple route tables, allowing you to implement fine-grained traffic segregation and control. -For full details, see the official AWS documentation: https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html - -**Terrafrom Mappings:** - -- `aws_route_table.id` -- `aws_route_table_association.route_table_id` -- `aws_default_route_table.default_route_table_id` -- `aws_route.route_table_id` - -## Supported Methods - -- `GET`: Get a route table by ID -- `LIST`: List all route tables -- `SEARCH`: Search route tables by ARN - -## Possible Links - -### [`ec2-vpc`](/sources/aws/Types/ec2-vpc) - -The Route Table is created inside a specific VPC; every table therefore has a one-to-one parent relationship with the VPC in which it resides. - -### [`ec2-subnet`](/sources/aws/Types/ec2-subnet) - -Subnets are associated with a Route Table. Traffic that originates in a subnet is evaluated against the routes in its associated table. One route table can be linked to many subnets. - -### [`ec2-internet-gateway`](/sources/aws/Types/ec2-internet-gateway) - -A Route Table may contain a route whose target is an Internet Gateway, enabling outbound IPv4 traffic (and inbound responses) for the subnets that use the table. - -### [`ec2-vpc-endpoint`](/sources/aws/Types/ec2-vpc-endpoint) - -Interface and Gateway VPC Endpoints can appear as route targets, directing traffic destined for AWS services or private resources through the endpoint. - -### [`ec2-egress-only-internet-gateway`](/sources/aws/Types/ec2-egress-only-internet-gateway) - -For IPv6 connectivity, a Route Table can include a route to an Egress-only Internet Gateway, allowing outbound-only IPv6 traffic from the associated subnets. - -### [`ec2-instance`](/sources/aws/Types/ec2-instance) - -An individual EC2 instance can be specified as the route target (using its instance ID) when it is acting as a virtual appliance or host-based router. - -### [`ec2-nat-gateway`](/sources/aws/Types/ec2-nat-gateway) - -Routes can target a NAT Gateway, providing Internet access for private subnets while keeping the source IP addresses of instances hidden from the public Internet. - -### [`ec2-network-interface`](/sources/aws/Types/ec2-network-interface) - -A specific Elastic Network Interface (ENI) may be used as a route target to forward traffic to appliances such as firewalls or load balancers hosted on that interface. - -### [`ec2-vpc-peering-connection`](/sources/aws/Types/ec2-vpc-peering-connection) - -When traffic needs to flow between two VPCs, a route whose target is a VPC Peering Connection is added to the Route Table, enabling cross-VPC communication. diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-security-group-rule.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-security-group-rule.md deleted file mode 100644 index 74c84a62..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-security-group-rule.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: Security Group Rule -sidebar_label: ec2-security-group-rule ---- - -A Security Group Rule represents a single ingress or egress rule that belongs to an Amazon EC2 Security Group. Each rule specifies the protocol, port range, source or destination (IP range, prefix list, security group or prefix), and (optionally) a description that determines whether specific network traffic is allowed to reach, or leave, the resources associated with the parent security group. By analysing these rules, Overmind can surface unintended exposure, overly-permissive access, or conflicts before the configuration is deployed. -For full details see the official AWS documentation: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/security-group-rules.html - -**Terrafrom Mappings:** - -- `aws_security_group_rule.security_group_rule_id` -- `aws_vpc_security_group_ingress_rule.security_group_rule_id` -- `aws_vpc_security_group_egress_rule.security_group_rule_id` - -## Supported Methods - -- `GET`: Get a security group rule by ID -- `LIST`: List all security group rules -- `SEARCH`: Search security group rules by ARN - -## Possible Links - -### [`ec2-security-group`](/sources/aws/Types/ec2-security-group) - -Every Security Group Rule belongs to exactly one Security Group; Overmind links the rule back to its parent security group so you can trace how an individual rule contributes to the overall ingress or egress policy applied to your instances and other resources. diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-security-group.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-security-group.md deleted file mode 100644 index ad1fc210..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-security-group.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: Security Group -sidebar_label: ec2-security-group ---- - -An Amazon EC2 Security Group acts as a virtual firewall that regulates inbound and outbound traffic for resources such as EC2 instances, load balancers, and network interfaces within a Virtual Private Cloud (VPC). Rules are stateful, meaning that return traffic is automatically allowed, and can be specified by protocol, port range, and source or destination (CIDR block, prefix list, or another security group). For further details, refer to the official AWS documentation: https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html - -**Terrafrom Mappings:** - -- `aws_security_group.id` -- `aws_security_group_rule.security_group_id` - -## Supported Methods - -- `GET`: Get a security group by ID -- `LIST`: List all security groups -- `SEARCH`: Search for security groups by ARN - -## Possible Links - -### [`ec2-vpc`](/sources/aws/Types/ec2-vpc) - -Each security group is created within a single VPC, inherits its CIDR boundaries, and can only be attached to resources that also reside in that VPC. diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-snapshot.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-snapshot.md deleted file mode 100644 index 513ed38b..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-snapshot.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: EC2 Snapshot -sidebar_label: ec2-snapshot ---- - -An Amazon EBS (Elastic Block Store) snapshot is an incremental, point-in-time backup of an EBS volume. Snapshots are stored in Amazon S3 and can be used to restore the original volume, create new volumes in the same or different Availability Zones, and copy data across Regions. They form a key part of disaster-recovery and migration workflows, allowing users to preserve data durability and quickly re-provision storage. -Official documentation: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-creating-snapshot.html - -## Supported Methods - -- `GET`: Get a snapshot by ID -- `LIST`: List all snapshots -- `SEARCH`: Search snapshots by ARN - -## Possible Links - -### [`ec2-volume`](/sources/aws/Types/ec2-volume) - -A snapshot is created from, and can later be used to recreate, an EBS volume. Overmind links each `ec2-snapshot` to the `ec2-volume` it originated from (and, where relevant, the volumes restored from it), enabling you to trace data lineage and understand the blast radius of any change to the underlying storage. diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-subnet.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-subnet.md deleted file mode 100644 index 4822a8cd..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-subnet.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: EC2 Subnet -sidebar_label: ec2-subnet ---- - -An EC2 subnet is a logically isolated section of an Amazon Virtual Private Cloud that lets you group resources together and control how traffic flows to and from them. Each subnet resides in a single Availability Zone, inherits the VPC’s CIDR range, and can be configured as public or private depending on whether its routing table points traffic to an Internet Gateway or not. Subnets form the basic building blocks for networking in AWS, determining IP addressing, network reachability, and security-group/network-ACL boundaries. -For full details see the official AWS documentation: https://docs.aws.amazon.com/vpc/latest/userguide/configure-subnets.html - -**Terrafrom Mappings:** - -- `aws_route_table_association.subnet_id` -- `aws_subnet.id` - -## Supported Methods - -- `GET`: Get a subnet by ID -- `LIST`: List all subnets -- `SEARCH`: Search for subnets by ARN - -## Possible Links - -### [`ec2-vpc`](/sources/aws/Types/ec2-vpc) - -Every subnet must belong to exactly one VPC. This relationship allows Overmind to trace how traffic is routed from the subnet through VPC-level components such as Internet Gateways, NAT Gateways, route tables, and network ACLs. diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-transit-gateway-attachment.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-transit-gateway-attachment.md deleted file mode 100644 index 5b706d45..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-transit-gateway-attachment.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Transit Gateway Attachment -sidebar_label: ec2-transit-gateway-attachment ---- - -A Transit Gateway attachment connects a resource (VPC, VPN connection, Direct Connect gateway, peering connection, or Connect attachment) to a transit gateway. Attachments are associated with route tables and can have routes propagated to them. - -Official API documentation: [DescribeTransitGatewayAttachments](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTransitGatewayAttachments.html) - -**Terraform Mappings:** - -- `aws_ec2_transit_gateway_vpc_attachment.id` (VPC) -- `aws_ec2_transit_gateway_vpn_attachment.id` (VPN) -- Other attachment types have corresponding Terraform resources. - -## Supported Methods - -- `GET`: Get a transit gateway attachment by ID -- `LIST`: List all transit gateway attachments -- `SEARCH`: Search by ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-transit-gateway-route-table-announcement.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-transit-gateway-route-table-announcement.md deleted file mode 100644 index 878175d5..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-transit-gateway-route-table-announcement.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Transit Gateway Route Table Announcement -sidebar_label: ec2-transit-gateway-route-table-announcement ---- - -A Transit Gateway Route Table Announcement represents the advertisement of a transit gateway route table to a peer—for example, to another transit gateway (peering) or to an AWS Network Manager core network. Routes that originate from such an announcement appear in the route table with a `TransitGatewayRouteTableAnnouncementId`, and Overmind links those [ec2-transit-gateway-route](/sources/aws/Types/ec2-transit-gateway-route) items to this type. - -Official API documentation: [DescribeTransitGatewayRouteTableAnnouncements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTransitGatewayRouteTableAnnouncements.html) - -## Note - -Overmind does not currently provide a dedicated adapter for `ec2-transit-gateway-route-table-announcement`. This type is documented because [ec2-transit-gateway-route](/sources/aws/Types/ec2-transit-gateway-route) items can link to it when a route originates from a route table announcement (`TransitGatewayRouteTableAnnouncementId`). diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-transit-gateway-route-table-association.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-transit-gateway-route-table-association.md deleted file mode 100644 index e5828353..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-transit-gateway-route-table-association.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Transit Gateway Route Table Association -sidebar_label: ec2-transit-gateway-route-table-association ---- - -An association links a transit gateway attachment (VPC, VPN, Direct Connect gateway, peering, or Connect) to a transit gateway route table. Traffic for that attachment is routed according to the route table. - -Official API documentation: [GetTransitGatewayRouteTableAssociations](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetTransitGatewayRouteTableAssociations.html) - -**Terraform Mappings:** - -- `aws_ec2_transit_gateway_route_table_association.id` - -## Supported Methods - -- `GET`: Get by composite ID `TransitGatewayRouteTableId|TransitGatewayAttachmentId` -- `LIST`: List all route table associations (across all route tables in the scope) -- `SEARCH`: Search by `TransitGatewayRouteTableId` to list all associations for that route table (used by the route table’s link to associations) - -## Possible Links - -### [`ec2-transit-gateway-route-table`](/sources/aws/Types/ec2-transit-gateway-route-table) - -The route table that the attachment is associated with. - -### [`ec2-transit-gateway-attachment`](/sources/aws/Types/ec2-transit-gateway-attachment) - -The transit gateway attachment that is associated with the route table. - -### [`ec2-vpc`](/sources/aws/Types/ec2-vpc) - -When the attachment resource type is VPC, the linked VPC. - -### [`ec2-vpn-connection`](/sources/aws/Types/ec2-vpn-connection) - -When the attachment resource type is VPN, the linked VPN connection. - -### [`directconnect-direct-connect-gateway`](/sources/aws/Types/directconnect-direct-connect-gateway) - -When the attachment resource type is Direct Connect gateway, the linked Direct Connect gateway. diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-transit-gateway-route-table-propagation.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-transit-gateway-route-table-propagation.md deleted file mode 100644 index ed68c34f..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-transit-gateway-route-table-propagation.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Transit Gateway Route Table Propagation -sidebar_label: ec2-transit-gateway-route-table-propagation ---- - -A propagation enables a transit gateway route table to automatically learn routes from an attachment (VPC, VPN, Direct Connect gateway, peering, or Connect). When propagation is enabled, routes from that attachment appear in the route table. - -Official API documentation: [GetTransitGatewayRouteTablePropagations](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetTransitGatewayRouteTablePropagations.html) - -**Terraform Mappings:** - -- `aws_ec2_transit_gateway_route_table_propagation.id` - -## Supported Methods - -- `GET`: Get by composite ID `TransitGatewayRouteTableId|TransitGatewayAttachmentId` -- `LIST`: List all route table propagations (across all route tables in the scope) -- `SEARCH`: Search by `TransitGatewayRouteTableId` to list all propagations for that route table (used by the route table’s link to propagations) - -## Possible Links - -### [`ec2-transit-gateway-route-table`](/sources/aws/Types/ec2-transit-gateway-route-table) - -The route table that is propagating routes from the attachment. - -### [`ec2-transit-gateway-route-table-association`](/sources/aws/Types/ec2-transit-gateway-route-table-association) - -The route table association for the same route table and attachment (same composite ID). Links propagation and association in the graph. - -### [`ec2-transit-gateway-attachment`](/sources/aws/Types/ec2-transit-gateway-attachment) - -The attachment whose routes are being propagated into the route table. - -### [`ec2-vpc`](/sources/aws/Types/ec2-vpc) - -When the attachment resource type is VPC, the linked VPC. - -### [`ec2-vpn-connection`](/sources/aws/Types/ec2-vpn-connection) - -When the attachment resource type is VPN, the linked VPN connection. - -### [`directconnect-direct-connect-gateway`](/sources/aws/Types/directconnect-direct-connect-gateway) - -When the attachment resource type is Direct Connect gateway, the linked Direct Connect gateway. diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-transit-gateway-route-table.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-transit-gateway-route-table.md deleted file mode 100644 index f0efd09c..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-transit-gateway-route-table.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: Transit Gateway Route Table -sidebar_label: ec2-transit-gateway-route-table ---- - -A Transit Gateway Route Table determines how traffic is routed for attachments (VPCs, VPNs, Direct Connect gateways, peering connections, or Connect attachments) that are associated with it. Each transit gateway has a default route table; you can create additional route tables to control which attachments can reach which routes. - -Official API documentation: [DescribeTransitGatewayRouteTables](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTransitGatewayRouteTables.html) - -**Terraform Mappings:** - -- `aws_ec2_transit_gateway_route_table.id` - -## Supported Methods - -- `GET`: Get a transit gateway route table by ID -- `LIST`: List all transit gateway route tables -- `SEARCH`: Search transit gateway route tables by ARN - -## Possible Links - -### [`ec2-transit-gateway`](/sources/aws/Types/ec2-transit-gateway) - -Each transit gateway route table belongs to a single transit gateway. The route table controls routing for attachments that are associated with it. - -### [`ec2-transit-gateway-route-table-association`](/sources/aws/Types/ec2-transit-gateway-route-table-association) - -Associations for this route table (Search by route table ID). Each association links an attachment to this route table. - -### [`ec2-transit-gateway-route-table-propagation`](/sources/aws/Types/ec2-transit-gateway-route-table-propagation) - -Propagations for this route table (Search by route table ID). Each propagation enables the route table to learn routes from an attachment. - -### [`ec2-transit-gateway-route`](/sources/aws/Types/ec2-transit-gateway-route) - -Routes in this route table (Search by route table ID). diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-transit-gateway-route.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-transit-gateway-route.md deleted file mode 100644 index 361596ce..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-transit-gateway-route.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: Transit Gateway Route -sidebar_label: ec2-transit-gateway-route ---- - -A route in a transit gateway route table. Each route has a destination (CIDR or prefix list) and a target (attachment or resource). Routes can be static or propagated from attachments. - -Official API documentation: [SearchTransitGatewayRoutes](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_SearchTransitGatewayRoutes.html) - -**Terraform Mappings:** - -- `aws_ec2_transit_gateway_route.id` - -## Supported Methods - -- `GET`: Get by composite ID `TransitGatewayRouteTableId|Destination`, where Destination is a CIDR (e.g. `10.0.0.0/16`) or prefix list (e.g. `pl:PrefixListId`) -- `LIST`: List all transit gateway routes (across all route tables in the scope) -- `SEARCH`: Search by `TransitGatewayRouteTableId` to list all routes in that route table (used by the route table’s link to routes) - -## Possible Links - -### [`ec2-transit-gateway-route-table`](/sources/aws/Types/ec2-transit-gateway-route-table) - -The route table that contains this route. - -### [`ec2-transit-gateway-route-table-association`](/sources/aws/Types/ec2-transit-gateway-route-table-association) - -For each attachment that this route targets, the corresponding route table association (same route table and attachment). Links routes and associations in the graph. - -### [`ec2-transit-gateway-attachment`](/sources/aws/Types/ec2-transit-gateway-attachment) - -Each attachment that this route targets (from the route’s `TransitGatewayAttachments`). - -### [`ec2-transit-gateway-route-table-announcement`](/sources/aws/Types/ec2-transit-gateway-route-table-announcement) - -When the route originates from a route table announcement, the linked transit gateway route table announcement. - -### [`ec2-vpc`](/sources/aws/Types/ec2-vpc) - -When a route attachment’s resource type is VPC, the linked VPC. - -### [`ec2-vpn-connection`](/sources/aws/Types/ec2-vpn-connection) - -When a route attachment’s resource type is VPN, the linked VPN connection. - -### [`ec2-managed-prefix-list`](/sources/aws/Types/ec2-managed-prefix-list) - -When the route destination is a prefix list (instead of a CIDR), the managed prefix list. - -### [`directconnect-direct-connect-gateway`](/sources/aws/Types/directconnect-direct-connect-gateway) - -When a route attachment’s resource type is Direct Connect gateway, the linked Direct Connect gateway. diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-transit-gateway.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-transit-gateway.md deleted file mode 100644 index 70ade350..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-transit-gateway.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Transit Gateway -sidebar_label: ec2-transit-gateway ---- - -An AWS Transit Gateway is a network transit hub that you use to interconnect your VPCs and on-premises networks. Each transit gateway has a default route table and can have additional route tables to control routing for attachments (VPCs, VPNs, Direct Connect gateways, peering, Connect). - -Official API documentation: [DescribeTransitGateways](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTransitGateways.html) - -**Terraform Mappings:** - -- `aws_ec2_transit_gateway.id` - -## Supported Methods - -- `GET`: Get a transit gateway by ID -- `LIST`: List all transit gateways -- `SEARCH`: Search transit gateways by ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-volume-status.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-volume-status.md deleted file mode 100644 index 0a8becbb..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-volume-status.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: EC2 Volume Status -sidebar_label: ec2-volume-status ---- - -The EC2 Volume Status resource represents the health information that AWS exposes for every Amazon Elastic Block Store (EBS) volume. Derived from the `DescribeVolumeStatus` API call, it records the results of automated status checks, any events that might affect I/O, and recommended user actions. Monitoring these objects in Overmind lets you spot degraded or impaired volumes before they compromise a deployment. -For a complete description of the data returned by AWS, see the official documentation: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVolumeStatus.html - -## Supported Methods - -- `GET`: Get a volume status by volume ID -- `LIST`: List all volume statuses -- `SEARCH`: Search for volume statuses by ARN - -## Possible Links - -### [`ec2-instance`](/sources/aws/Types/ec2-instance) - -A Volume Status relates to the EC2 instance that the underlying EBS volume is currently attached to, if any. Overmind links the status object to the instance so you can trace how a failing or impaired volume might impact the workloads running on that instance. diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-volume.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-volume.md deleted file mode 100644 index ab7b8ee3..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-volume.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: EC2 Volume -sidebar_label: ec2-volume ---- - -An Amazon Elastic Block Store (EBS) volume provides persistent block-level storage for use with Amazon EC2 instances. Volumes can be attached to a single instance at a time (or multiple instances when using Multi-Attach), and retain their data independently of the life-cycle of that instance. Sizes, performance characteristics and encryption settings are configurable, allowing teams to tailor storage to the workload’s needs. Full service behaviour is documented by AWS here: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumes.html - -**Terrafrom Mappings:** - -- `aws_ebs_volume.id` - -## Supported Methods - -- `GET`: Get a volume by ID -- `LIST`: List all volumes -- `SEARCH`: Search volumes by ARN - -## Possible Links - -### [`ec2-instance`](/sources/aws/Types/ec2-instance) - -A volume may be attached to, detached from or created alongside an EC2 instance. Overmind links the two resources so you can trace how storage changes could affect, or be affected by, the compute resource that consumes it. diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-vpc-endpoint.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-vpc-endpoint.md deleted file mode 100644 index 5a2596b0..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-vpc-endpoint.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: VPC Endpoint -sidebar_label: ec2-vpc-endpoint ---- - -A VPC Endpoint is an elastic network interface or gateway that enables private connectivity between resources inside an Amazon Virtual Private Cloud (VPC) and supported AWS or third-party services, without traversing the public internet. By routing traffic through the AWS network, VPC Endpoints improve security, reduce latency and remove the need for NAT devices, VPNs or Direct Connect links. For full details, see the AWS documentation: https://docs.aws.amazon.com/vpc/latest/userguide/vpc-endpoints.html - -**Terrafrom Mappings:** - -- `aws_vpc_endpoint.id` - -## Supported Methods - -- `GET`: Get a VPC Endpoint by ID -- `LIST`: List all VPC Endpoints -- `SEARCH`: Search VPC Endpoints by ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-vpc-peering-connection.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-vpc-peering-connection.md deleted file mode 100644 index a108c04b..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-vpc-peering-connection.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: VPC Peering Connection -sidebar_label: ec2-vpc-peering-connection ---- - -A VPC Peering Connection enables you to route traffic privately between two Virtual Private Clouds (VPCs) without traversing the public internet. Peering can be established between VPCs in the same AWS account or across different AWS accounts, and—subject to region support—across regions. It is commonly used for micro-service communication, shared services networks, or multi-account architectures where low-latency, high-bandwidth connectivity with AWS-managed security controls is required. -For full details, refer to the official AWS documentation: https://docs.aws.amazon.com/vpc/latest/peering/what-is-vpc-peering.html - -**Terrafrom Mappings:** - -- `aws_vpc_peering_connection.id` -- `aws_vpc_peering_connection_accepter.id` -- `aws_vpc_peering_connection_options.vpc_peering_connection_id` - -## Supported Methods - -- `GET`: Get a VPC Peering Connection by ID -- `LIST`: List all VPC Peering Connections -- `SEARCH`: Search for VPC Peering Connections by their ARN - -## Possible Links - -### [`ec2-vpc`](/sources/aws/Types/ec2-vpc) - -Each VPC Peering Connection has exactly two endpoints—a requester VPC and an accepter VPC. Linking to the `ec2-vpc` resource allows Overmind to show which VPCs are joined by a given peering connection and, conversely, which peering connections a particular VPC participates in. diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-vpc.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-vpc.md deleted file mode 100644 index 54132959..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-vpc.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: VPC -sidebar_label: ec2-vpc ---- - -An Amazon Virtual Private Cloud (VPC) is a logically isolated section of AWS in which you can launch and manage AWS resources within a virtual network that you define. Within a VPC you control IP address ranges, subnets, route tables, network gateways, security groups, and network ACLs, allowing you to shape how traffic flows to and from your workloads while keeping them isolated from, or connected to, the public Internet and other VPCs as required. For a full overview, see the official AWS documentation: https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html. - -**Terrafrom Mappings:** - -- `aws_vpc.id` - -## Supported Methods - -- `GET`: Get a VPC by ID -- `LIST`: List all VPCs -- ~~`SEARCH`~~ diff --git a/docs.overmind.tech/docs/sources/aws/Types/ec2-vpn-connection.md b/docs.overmind.tech/docs/sources/aws/Types/ec2-vpn-connection.md deleted file mode 100644 index 46bf1cdc..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ec2-vpn-connection.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: VPN Connection -sidebar_label: ec2-vpn-connection ---- - -An AWS Site-to-Site VPN connection links your on-premises network to your VPC (or to a transit gateway) over an encrypted IPsec tunnel. VPN connections can be attached to a transit gateway for use in a hub-and-spoke topology. - -Official API documentation: [DescribeVpnConnections](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpnConnections.html) - -**Terraform Mappings:** - -- `aws_vpn_connection.id` - -## Supported Methods - -- `GET`: Get a VPN connection by ID -- `LIST`: List all VPN connections -- `SEARCH`: Search by ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/ecs-capacity-provider.md b/docs.overmind.tech/docs/sources/aws/Types/ecs-capacity-provider.md deleted file mode 100644 index be279e71..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ecs-capacity-provider.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: Capacity Provider -sidebar_label: ecs-capacity-provider ---- - -An Amazon ECS capacity provider tells a cluster where its compute capacity comes from and how that capacity should scale. It can point to an Auto Scaling group of EC2 instances or to the serverless Fargate/Fargate Spot capacity pools, and it contains rules that determine when and how instances are launched or terminated to satisfy task demand. Using capacity providers allows platform teams to separate scaling logic from task scheduling and to adopt multiple capacity sources within a single cluster. -For complete details see the official AWS documentation: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-capacity-providers.html - -**Terrafrom Mappings:** - -- `aws_ecs_capacity_provider.arn` - -## Supported Methods - -- `GET`: Get a capacity provider by its short name or full Amazon Resource Name (ARN). -- `LIST`: List capacity providers. -- `SEARCH`: Search capacity providers by ARN - -## Possible Links - -### [`autoscaling-auto-scaling-group`](/sources/aws/Types/autoscaling-auto-scaling-group) - -A capacity provider that is backed by EC2 instances references exactly one Auto Scaling group. The link lets you trace from the capacity provider to the group that actually supplies instances, making it easy to understand which fleet of instances will scale in response to ECS task demand and to assess risks such as insufficient instance types, mis-configured scaling policies, or conflicting lifecycle hooks. diff --git a/docs.overmind.tech/docs/sources/aws/Types/ecs-cluster.md b/docs.overmind.tech/docs/sources/aws/Types/ecs-cluster.md deleted file mode 100644 index 937d1c8d..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ecs-cluster.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: ECS Cluster -sidebar_label: ecs-cluster ---- - -An Amazon ECS (Elastic Container Service) cluster is a logical grouping of tasks or services. It acts as the fundamental boundary for scheduling, networking and capacity management in ECS: every task or service is launched into exactly one cluster, and the cluster manages the resources on which containers run. -For full details see the official AWS documentation: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html - -**Terrafrom Mappings:** - -- `aws_ecs_cluster.arn` - -## Supported Methods - -- `GET`: Get a cluster by name -- `LIST`: List all clusters -- `SEARCH`: Search for a cluster by ARN - -## Possible Links - -### [`ecs-container-instance`](/sources/aws/Types/ecs-container-instance) - -An ECS cluster is composed of zero or more container instances (EC2 hosts or AWS Fargate-managed capacity). Each `ecs-container-instance` record represents a specific compute resource that has registered itself to the cluster and is available for running tasks. - -### [`ecs-service`](/sources/aws/Types/ecs-service) - -Services define long-running workloads that are maintained by ECS within the cluster. Every `ecs-service` is created inside a particular cluster and relies on the cluster’s scheduler to place and maintain tasks according to the service definition. - -### [`ecs-task`](/sources/aws/Types/ecs-task) - -Tasks are the running instantiations of container definitions. When a task is started, it is launched into a specific cluster; therefore every `ecs-task` is linked back to the cluster that provided the capacity and networking for it. - -### [`ecs-capacity-provider`](/sources/aws/Types/ecs-capacity-provider) - -Capacity providers control how ECS acquires compute capacity for a cluster (e.g. Fargate, Auto Scaling groups). A cluster may have one or more `ecs-capacity-provider` resources associated with it, and those associations determine how tasks and services within the cluster obtain the underlying compute resources they require. diff --git a/docs.overmind.tech/docs/sources/aws/Types/ecs-container-instance.md b/docs.overmind.tech/docs/sources/aws/Types/ecs-container-instance.md deleted file mode 100644 index 0d0c65d4..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ecs-container-instance.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Container Instance -sidebar_label: ecs-container-instance ---- - -A container instance represents an Amazon EC2 host that has been registered to an Amazon ECS cluster and is therefore available for running one or more ECS tasks. Each container instance runs the ECS agent and reports its status, resource availability and running tasks back to the cluster’s control plane. For a detailed explanation of container instances, provisioning requirements, and lifecycle behaviour, see the official AWS documentation: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html - -## Supported Methods - -- `GET`: Get a container instance by ID which consists of `{clusterName}/{id}` -- ~~`LIST`~~ -- `SEARCH`: Search for container instances by cluster - -## Possible Links - -### [`ec2-instance`](/sources/aws/Types/ec2-instance) - -Every container instance is physically an Amazon EC2 instance. Linking to the `ec2-instance` type allows Overmind to surface the underlying compute resource, including its security groups, IAM roles and network configuration, all of which can influence the risk profile of the container instance and any tasks scheduled on it. diff --git a/docs.overmind.tech/docs/sources/aws/Types/ecs-service.md b/docs.overmind.tech/docs/sources/aws/Types/ecs-service.md deleted file mode 100644 index 4b22455b..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ecs-service.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: ECS Service -sidebar_label: ecs-service ---- - -An Amazon Elastic Container Service (ECS) **service** is the long-running, scalable unit that maintains a specified number of copies of a task definition running on an ECS cluster. The service schedules tasks either on EC2 instances or on Fargate, monitors their health, replaces unhealthy tasks and, when configured, integrates with Elastic Load Balancing and AWS Service Discovery. -For a full description see the AWS documentation: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html - -**Terrafrom Mappings:** - -- `aws_ecs_service.cluster_name` - -## Supported Methods - -- `GET`: Get an ECS service by full name (`{clusterName}/{id}`) -- ~~`LIST`~~ -- `SEARCH`: Search for ECS services by cluster - -## Possible Links - -### [`ecs-cluster`](/sources/aws/Types/ecs-cluster) - -The service is deployed into exactly one ECS cluster, so each ecs-service will have a **`parent`** relationship to the corresponding `ecs-cluster`. - -### [`elbv2-target-group`](/sources/aws/Types/elbv2-target-group) - -If the service is configured with a load balancer, it registers its tasks as targets in one or more ELBv2 target groups; Overmind creates a **`uses`** link from the service to every target group referenced in its loadBalancer or serviceConnect configuration. - -### [`ecs-task-definition`](/sources/aws/Types/ecs-task-definition) - -A service runs a specific revision of a task definition. There is therefore a **`depends_on`** link from the service to the task definition ARN specified in `taskDefinition`. - -### [`ecs-capacity-provider`](/sources/aws/Types/ecs-capacity-provider) - -When a capacity provider strategy is attached, the service relies on one or more capacity providers for scheduling. Overmind shows a **`uses`** link to each referenced `ecs-capacity-provider`. - -### [`ec2-subnet`](/sources/aws/Types/ec2-subnet) - -For services that use the `awsvpc` network mode (Fargate or ENI-aware EC2 launch type), the service’s tasks are launched inside specific subnets defined in the service’s network configuration; those subnets are exposed via **`uses`** links. - -### [`dns`](/sources/stdlib/Types/dns) - -If AWS Cloud Map service discovery is enabled, the ECS service automatically creates DNS records (A, AAAA, or SRV) for its tasks. Overmind surfaces a **`creates`** link to the resultant DNS names. diff --git a/docs.overmind.tech/docs/sources/aws/Types/ecs-task-definition.md b/docs.overmind.tech/docs/sources/aws/Types/ecs-task-definition.md deleted file mode 100644 index 3df84160..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ecs-task-definition.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: Task Definition -sidebar_label: ecs-task-definition ---- - -An Amazon ECS task definition is the blueprint that tells AWS ECS how to run one or more containers. It specifies details such as the container images, CPU and memory requirements, networking mode, logging configuration, IAM roles, and secrets that should be injected into the containers. Each time you register a new version, ECS creates a new immutable revision that can be referenced directly or through the family name. -For full details, see the official AWS documentation: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html - -**Terrafrom Mappings:** - -- `aws_ecs_task_definition.family` - -## Supported Methods - -- `GET`: Get a task definition by revision name (`{family}:{revision}`) -- `LIST`: List all task definitions -- `SEARCH`: Search for task definitions by ARN - -## Possible Links - -### [`iam-role`](/sources/aws/Types/iam-role) - -A task definition can reference an IAM role through `taskRoleArn` and/or `executionRoleArn`. These roles grant the running containers the permissions they need to interact with other AWS services or to pull private images and write logs. Overmind links the task definition to the IAM role resources so you can see the exact permissions that will be in effect at runtime. - -### [`ssm-parameter`](/sources/aws/Types/ssm-parameter) - -Environment variables or secrets defined in a task definition can be sourced from AWS Systems Manager Parameter Store. Whenever a task definition lists an SSM parameter (e.g., via the `secrets` block), Overmind surfaces a link to the corresponding `ssm-parameter` item, allowing you to trace where sensitive configuration values originate and assess the impact of changes. diff --git a/docs.overmind.tech/docs/sources/aws/Types/ecs-task.md b/docs.overmind.tech/docs/sources/aws/Types/ecs-task.md deleted file mode 100644 index 1d78ca8d..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ecs-task.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: ECS Task -sidebar_label: ecs-task ---- - -An ECS task is the fundamental unit of work that runs on Amazon Elastic Container Service (ECS). It represents one instantiation of a task definition: a group of one or more Docker containers that are deployed together on the same host. A task lives within an ECS cluster and may run on EC2 instances or on AWS Fargate. The task record captures runtime information such as status, start/stop times, allocated network interfaces and resource utilisation. -For full details, see the AWS documentation: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_tasks.html - -## Supported Methods - -- `GET`: Get an ECS task by ID -- ~~`LIST`~~ -- `SEARCH`: Search for ECS tasks by cluster - -## Possible Links - -### [`ecs-cluster`](/sources/aws/Types/ecs-cluster) - -The task is launched inside exactly one ECS cluster, so Overmind links each task back to the cluster that owns it. - -### [`ecs-container-instance`](/sources/aws/Types/ecs-container-instance) - -For tasks that use the EC2 launch type, the task runs on a specific ECS container instance (an EC2 host registered with the cluster). Overmind links the task to the container instance on which it is currently placed. - -### [`ecs-task-definition`](/sources/aws/Types/ecs-task-definition) - -Every task is an instantiation of a task definition. Overmind records this relationship so you can trace configuration changes in the task definition that may affect a running task. - -### [`ec2-network-interface`](/sources/aws/Types/ec2-network-interface) - -When a task uses the `awsvpc` network mode (or is a Fargate task), AWS allocates one or more elastic network interfaces (ENIs) to the task. These ENIs are linked so you can observe associated security groups, subnets and IP addresses. - -### [`ip`](/sources/aws/Types/networkmanager-network-resource-relationship) - -Each ENI attached to the task is assigned private (and optionally public) IP addresses. Overmind surfaces these IP resources, allowing you to see which IPs are in use by a given task and how they propagate through your network topology. diff --git a/docs.overmind.tech/docs/sources/aws/Types/efs-access-point.md b/docs.overmind.tech/docs/sources/aws/Types/efs-access-point.md deleted file mode 100644 index 8a0c8cd6..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/efs-access-point.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: EFS Access Point -sidebar_label: efs-access-point ---- - -Amazon Elastic File System (EFS) Access Points are application-specific entry points into an EFS file system. Each access point can enforce a unique POSIX user, group and root directory, allowing multiple workloads or tenants to share the same file system while maintaining separation and least-privilege access. Access points are commonly used to simplify permissions when deploying containers, serverless functions or batch jobs that need shared storage. -Official AWS documentation: https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html - -**Terrafrom Mappings:** - -- `aws_efs_access_point.id` - -## Supported Methods - -- `GET`: Get an access point by ID -- `LIST`: List all access points -- `SEARCH`: Search for an access point by ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/efs-backup-policy.md b/docs.overmind.tech/docs/sources/aws/Types/efs-backup-policy.md deleted file mode 100644 index 90c97b0d..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/efs-backup-policy.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: EFS Backup Policy -sidebar_label: efs-backup-policy ---- - -An EFS Backup Policy represents the setting on an Amazon Elastic File System (EFS) file system that turns automatic, daily AWS Backup protection on or off. When the policy is enabled, AWS Backup creates incremental backups of the file system and retains them according to the configured backup plan; when it is disabled, the file system is excluded from automated protection. Managing this resource helps ensure that critical data stored in EFS is covered by a consistent backup and retention strategy, reducing the risk of accidental data loss. -For full details, see the official AWS documentation: https://docs.aws.amazon.com/efs/latest/ug/awsbackup.html - -**Terrafrom Mappings:** - -- `aws_efs_backup_policy.id` - -## Supported Methods - -- `GET`: Get an Backup Policy by file system ID -- ~~`LIST`~~ -- `SEARCH`: Search for an Backup Policy by ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/efs-file-system.md b/docs.overmind.tech/docs/sources/aws/Types/efs-file-system.md deleted file mode 100644 index 5798dd98..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/efs-file-system.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: EFS File System -sidebar_label: efs-file-system ---- - -Amazon Elastic File System (EFS) provides a scalable, elastic and fully-managed Network File System (NFS) that can be mounted concurrently by multiple AWS compute services, including EC2, Lambda and containers. It automatically grows and shrinks as you add or remove data, removing the need to provision storage up front, and offers high availability across multiple Availability Zones. For a full overview, refer to the official AWS documentation: https://docs.aws.amazon.com/efs/latest/ug/whatisefs.html - -**Terrafrom Mappings:** - -- `aws_efs_file_system.id` - -## Supported Methods - -- `GET`: Get a file system by ID -- `LIST`: List file systems -- `SEARCH`: Search file systems by ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/efs-mount-target.md b/docs.overmind.tech/docs/sources/aws/Types/efs-mount-target.md deleted file mode 100644 index 14900d71..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/efs-mount-target.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: EFS Mount Target -sidebar_label: efs-mount-target ---- - -An EFS Mount Target is a network endpoint that resides in a specific subnet inside your VPC and exposes an Amazon Elastic File System (EFS) file system to compute resources such as EC2 instances, ECS tasks, Lambda functions and other AWS services. By creating one mount target in each Availability Zone where the file system will be accessed, you ensure low-latency, highly available access to shared file storage. Each mount target can be associated with one or more security groups, allowing fine-grained control over which clients can connect to the file system. -For further details, refer to the official AWS documentation: https://docs.aws.amazon.com/efs/latest/ug/efs-mount-targets.html - -**Terrafrom Mappings:** - -- `aws_efs_mount_target.id` - -## Supported Methods - -- `GET`: Get an mount target by ID -- ~~`LIST`~~ -- `SEARCH`: Search for mount targets by file system ID diff --git a/docs.overmind.tech/docs/sources/aws/Types/efs-replication-configuration.md b/docs.overmind.tech/docs/sources/aws/Types/efs-replication-configuration.md deleted file mode 100644 index 3728b8e9..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/efs-replication-configuration.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: EFS Replication Configuration -sidebar_label: efs-replication-configuration ---- - -An Amazon Elastic File System (EFS) Replication Configuration defines how an EFS file system is asynchronously replicated to another AWS Region or Availability Zone, providing disaster-recovery protection and enhanced data durability. By creating a replication configuration you specify the source file system, the destination Region, and the encryption and retention settings for the replica. Replication occurs automatically and continuously, with recovery point objectives (RPO) typically within minutes, allowing you to fail over quickly if the primary file system becomes unavailable. -For full details, refer to the AWS documentation: https://docs.aws.amazon.com/efs/latest/ug/efs-replication.html - -**Terrafrom Mappings:** - -- `aws_efs_replication_configuration.source_file_system_id` - -## Supported Methods - -- `GET`: Get a replication configuration by file system ID -- `LIST`: List all replication configurations -- `SEARCH`: Search for a replication configuration by ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/eks-addon.md b/docs.overmind.tech/docs/sources/aws/Types/eks-addon.md deleted file mode 100644 index 9ddd3e89..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/eks-addon.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: EKS Addon -sidebar_label: eks-addon ---- - -An Amazon EKS Addon is an AWS-managed installation of common operational software—such as CoreDNS, kube-proxy, the Amazon VPC CNI plugin or the Amazon EBS CSI driver—onto an Amazon Elastic Kubernetes Service (EKS) cluster. Addons let you declare the component, version and configuration you want, while AWS takes care of deployment, upgrades, security patches and ongoing lifecycle management. Using addons keeps the cluster’s critical services consistent and up to date without manual intervention. -For more information, see the official AWS documentation on EKS Add-ons: https://docs.aws.amazon.com/eks/latest/userguide/eks-add-ons.html - -**Terrafrom Mappings:** - -- `aws_eks_addon.id` - -## Supported Methods - -- `GET`: Get an addon by unique name (`{clusterName}:{addonName}`) -- ~~`LIST`~~ -- `SEARCH`: Search addons by cluster name diff --git a/docs.overmind.tech/docs/sources/aws/Types/eks-cluster.md b/docs.overmind.tech/docs/sources/aws/Types/eks-cluster.md deleted file mode 100644 index a8fc0c25..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/eks-cluster.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: EKS Cluster -sidebar_label: eks-cluster ---- - -Amazon Elastic Kubernetes Service (EKS) is a managed Kubernetes control plane that allows you to run Kubernetes workloads on AWS without the operational overhead of managing the underlying master nodes. An EKS cluster handles tasks such as control-plane provisioning, scalability, high availability and automatic patching, while letting you attach one or more node groups (either managed or self-managed) to run your containerised applications. See the official AWS documentation for full details: https://docs.aws.amazon.com/eks/latest/userguide/what-is-eks.html - -**Terraform Mappings:** - -- `aws_eks_cluster.arn` - -## Supported Methods - -- `GET`: Get a cluster by name -- `LIST`: List all clusters -- `SEARCH`: Search for clusters by ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/eks-fargate-profile.md b/docs.overmind.tech/docs/sources/aws/Types/eks-fargate-profile.md deleted file mode 100644 index 3653feb8..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/eks-fargate-profile.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: Fargate Profile -sidebar_label: eks-fargate-profile ---- - -An Amazon EKS Fargate profile tells EKS which pods in a cluster should run on AWS Fargate rather than on self-managed or managed EC2 worker nodes. It contains a set of selectors (namespace and optional labels) and the networking configuration (subnets and the pod execution IAM role) that EKS will use when it launches Fargate tasks on your behalf. See the official documentation for full details: https://docs.aws.amazon.com/eks/latest/userguide/fargate-profile.html - -**Terrafrom Mappings:** - -- `aws_eks_fargate_profile.id` - -## Supported Methods - -- `GET`: Get a fargate profile by unique name (`{clusterName}:{FargateProfileName}`) -- ~~`LIST`~~ -- `SEARCH`: Search for fargate profiles by cluster name - -## Possible Links - -### [`iam-role`](/sources/aws/Types/iam-role) - -Each Fargate profile references a “pod execution role”, an IAM role that grants EKS permission to pull container images and publish pod logs when it provisions the Fargate tasks. Overmind therefore creates a link from the profile to the IAM role specified in `pod_execution_role_arn`. - -### [`ec2-subnet`](/sources/aws/Types/ec2-subnet) - -The profile’s `subnet_ids` field defines the VPC subnets into which the Fargate pods will be launched. Overmind links the profile to every subnet listed, helping you trace network reachability and security-group inheritance for the pods that will run under this profile. diff --git a/docs.overmind.tech/docs/sources/aws/Types/eks-nodegroup.md b/docs.overmind.tech/docs/sources/aws/Types/eks-nodegroup.md deleted file mode 100644 index 85b58201..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/eks-nodegroup.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: EKS Nodegroup -sidebar_label: eks-nodegroup ---- - -Amazon EKS managed node groups are a higher-level abstraction that simplifies the provision and lifecycle management of the worker nodes that run your Kubernetes pods. Instead of creating and operating the underlying Amazon EC2 instances yourself, you declare the desired configuration (instance types, scaling parameters, AMI, etc.) and EKS creates and manages an Auto Scaling group on your behalf. See the official AWS documentation for full details: https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html - -**Terrafrom Mappings:** - -- `aws_eks_node_group.id` - -## Supported Methods - -- `GET`: Get a node group by unique name (`{clusterName}:{NodegroupName}`) -- ~~`LIST`~~ -- `SEARCH`: Search for node groups by cluster name - -## Possible Links - -### [`ec2-key-pair`](/sources/aws/Types/ec2-key-pair) - -If “remote access” is enabled, a node group references an EC2 key pair to allow SSH access to the worker nodes. This creates a dependency on the specified key pair. - -### [`ec2-security-group`](/sources/aws/Types/ec2-security-group) - -Each node group attaches one or more security groups to the network interfaces of its nodes. These security groups control inbound and outbound traffic to the worker nodes. - -### [`ec2-subnet`](/sources/aws/Types/ec2-subnet) - -When you create a node group you must provide a list of subnets where the nodes will be launched. The node group therefore depends on, and is constrained by, the networking configuration of those subnets. - -### [`autoscaling-auto-scaling-group`](/sources/aws/Types/autoscaling-auto-scaling-group) - -Behind the scenes, a managed node group is realised as an Auto Scaling group. Changes to the node group propagate directly to its underlying Auto Scaling group. - -### [`ec2-launch-template`](/sources/aws/Types/ec2-launch-template) - -You can optionally supply a custom launch template to define advanced EC2 settings (user data, tags, block-device mappings, etc.) for the nodes. When used, the node group links to that launch template. diff --git a/docs.overmind.tech/docs/sources/aws/Types/elb-instance-health.md b/docs.overmind.tech/docs/sources/aws/Types/elb-instance-health.md deleted file mode 100644 index 713545df..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/elb-instance-health.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: ELB Instance Health -sidebar_label: elb-instance-health ---- - -An ELB Instance Health resource represents the current health status of an individual Amazon EC2 instance as reported by an Elastic Load Balancer. The data is returned by the `DescribeInstanceHealth` API call and indicates whether the instance is `InService`, `OutOfService`, or in a transitional state (e.g. `Draining`, `Unknown`). By tracking these objects Overmind can warn you when a deployment will place traffic on unhealthy instances or reduce overall service capacity. -For full details see the AWS documentation: https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-healthchecks.html - -## Supported Methods - -- `GET`: Get instance health by ID (`{LoadBalancerName}/{InstanceId}`) -- `LIST`: List all instance healths -- ~~`SEARCH`~~ - -## Possible Links - -### [`ec2-instance`](/sources/aws/Types/ec2-instance) - -Each ELB Instance Health object is intrinsically linked to the EC2 instance whose state it describes. Following this link allows you to inspect configuration details (such as security groups or attached volumes) that may be contributing to an unhealthy status. diff --git a/docs.overmind.tech/docs/sources/aws/Types/elb-load-balancer.md b/docs.overmind.tech/docs/sources/aws/Types/elb-load-balancer.md deleted file mode 100644 index d32502e5..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/elb-load-balancer.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: Classic Load Balancer -sidebar_label: elb-load-balancer ---- - -A Classic Load Balancer (CLB) is the original generation of AWS Elastic Load Balancing. It automatically distributes incoming application or network traffic across multiple Amazon EC2 instances that are located in one or more Availability Zones, improving fault-tolerance and scalability. A CLB provides either HTTP/HTTPS or TCP load balancing and exposes a single DNS end-point that clients connect to. -Official documentation: https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/introduction.html - -**Terrafrom Mappings:** - -- `aws_elb.arn` - -## Supported Methods - -- `GET`: Get a classic load balancer by name -- `LIST`: List all classic load balancers -- `SEARCH`: Search for classic load balancers by ARN - -## Possible Links - -### [`dns`](/sources/stdlib/Types/dns) - -The load balancer’s endpoint is presented as a DNS A/AAAA/CNAME record (e.g. `my-clb-123456.eu-west-2.elb.amazonaws.com`). Overmind links the CLB to this DNS record so that you can see which hostname is exposed publicly. - -### [`route53-hosted-zone`](/sources/aws/Types/route53-hosted-zone) - -AWS hosts the CLB DNS name inside an Amazon-owned Route 53 hosted zone, and you may also create alias or CNAME records in your own hosted zones that point to the CLB. The link shows every hosted zone that contains records referencing the load balancer. - -### [`ec2-subnet`](/sources/aws/Types/ec2-subnet) - -A Classic Load Balancer must be attached to one or more subnets in each Availability Zone where it is enabled. This link reveals the exact subnets the CLB is deployed into. - -### [`ec2-vpc`](/sources/aws/Types/ec2-vpc) - -Because the selected subnets belong to a specific VPC, the CLB itself resides inside that VPC. The link allows you to trace the load balancer back to its enclosing network boundary. - -### [`ec2-instance`](/sources/aws/Types/ec2-instance) - -Backend EC2 instances are registered with the CLB as targets. Overmind lists every registered instance so you can assess what workloads will receive traffic from the load balancer. - -### [`elb-instance-health`](/sources/aws/Types/elb-instance-health) - -For each registered EC2 instance AWS maintains per-target health information (healthy, unhealthy, etc.). This link surfaces those health objects, letting you understand why particular instances may not be receiving traffic. - -### [`ec2-security-group`](/sources/aws/Types/ec2-security-group) - -In a VPC, a Classic Load Balancer is associated with one or more security groups that govern allowed inbound and outbound traffic. Overmind links to these security groups so you can inspect the firewall rules that protect the load balancer. diff --git a/docs.overmind.tech/docs/sources/aws/Types/elbv2-listener.md b/docs.overmind.tech/docs/sources/aws/Types/elbv2-listener.md deleted file mode 100644 index eaf7991d..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/elbv2-listener.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: ELB Listener -sidebar_label: elbv2-listener ---- - -An Elastic Load Balancing (ELB) v2 Listener is the component of an Application Load Balancer (ALB) or Network Load Balancer (NLB) that checks for connection requests, using a specified protocol and port, and then routes those requests to one or more target groups according to its rules. Each listener belongs to a single load balancer, can have one default action and multiple conditional rules, and is the entry point for traffic into your load-balancing configuration. -Further details can be found in the AWS documentation: https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-listeners.html - -**Terrafrom Mappings:** - -- `aws_alb_listener.arn` -- `aws_lb_listener.arn` - -## Supported Methods - -- `GET`: Get an ELB listener by ARN -- ~~`LIST`~~ -- `SEARCH`: Search for ELB listeners by load balancer ARN - -## Possible Links - -### [`elbv2-load-balancer`](/sources/aws/Types/elbv2-load-balancer) - -The listener is directly attached to exactly one load balancer. Overmind uses this link to show which ALB or NLB will be affected if the listener configuration is changed or deleted. - -### [`elbv2-rule`](/sources/aws/Types/elbv2-rule) - -A listener owns a set of rules that determine how incoming requests are evaluated and forwarded. This link exposes those rules so you can trace the impact of modifying conditions, priorities, or actions. - -### [`http`](/sources/stdlib/Types/http) - -If the listener uses the HTTP or HTTPS protocol, Overmind represents the public-facing endpoint as an `http` item. This allows cross-checking of listener ports with accessible URLs and aids in identifying unintended exposure. - -### [`elbv2-target-group`](/sources/aws/Types/elbv2-target-group) - -Listener actions forward traffic to one or more target groups. Overmind links these dependencies so you can see which instances, containers, or IPs will receive traffic, helping you assess downstream blast radius. diff --git a/docs.overmind.tech/docs/sources/aws/Types/elbv2-load-balancer.md b/docs.overmind.tech/docs/sources/aws/Types/elbv2-load-balancer.md deleted file mode 100644 index a60dc818..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/elbv2-load-balancer.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Elastic Load Balancer -sidebar_label: elbv2-load-balancer ---- - -Elastic Load Balancers distribute incoming traffic across multiple targets, improving the availability and scalability of applications. The “v2” API covers Application, Network and Gateway Load Balancers, each of which can automatically scale to meet demand and provide a single DNS endpoint for users. Full service behaviour and limits are documented in the AWS Elastic Load Balancing User Guide (https://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/). - -**Terrafrom Mappings:** - -- `aws_lb.arn` -- `aws_lb.id` - -## Supported Methods - -- `GET`: Get an ELB by name -- `LIST`: List all ELBs -- `SEARCH`: Search for ELBs by ARN - -## Possible Links - -### [`elbv2-target-group`](/sources/aws/Types/elbv2-target-group) - -The load balancer forwards requests to one or more target groups; each listener rule references a target group that contains the actual EC2 instances, IPs or Lambda functions receiving traffic. - -### [`elbv2-listener`](/sources/aws/Types/elbv2-listener) - -Listeners define the port and protocol that the load balancer accepts and contain the rules that map traffic to target groups; every load balancer has at least one listener. - -### [`dns`](/sources/stdlib/Types/dns) - -ELBs are accessed via a DNS name (e.g., `my-alb-123456.eu-west-1.elb.amazonaws.com`). External DNS records resolve to the IPs managed by AWS behind this name. - -### [`route53-hosted-zone`](/sources/aws/Types/route53-hosted-zone) - -Route 53 alias or CNAME records are commonly created in a hosted zone to point a friendly domain name to the load balancer’s DNS name. - -### [`ec2-vpc`](/sources/aws/Types/ec2-vpc) - -An ELB is deployed inside a specific VPC, inheriting its network boundaries and able to route traffic only within that VPC (except for internet-facing endpoints). - -### [`ec2-subnet`](/sources/aws/Types/ec2-subnet) - -The load balancer is placed into one or more subnets; for high availability at least two subnets (usually across AZs) are required. - -### [`ec2-address`](/sources/aws/Types/ec2-address) - -Network Load Balancers can be allocated static Elastic IP addresses, one per subnet, providing fixed public IPs for the load balancer. - -### [`ip`](/sources/aws/Types/networkmanager-network-resource-relationship) - -Each Elastic IP (for NLB) or the dynamically allocated addresses (for ALB/Gateway LB) represent the underlying IP resources that the DNS name resolves to. - -### [`ec2-security-group`](/sources/aws/Types/ec2-security-group) - -Application and Gateway Load Balancers are associated with security groups which control the allowed inbound and outbound traffic to the load balancer endpoints. diff --git a/docs.overmind.tech/docs/sources/aws/Types/elbv2-rule.md b/docs.overmind.tech/docs/sources/aws/Types/elbv2-rule.md deleted file mode 100644 index d4a0f500..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/elbv2-rule.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: ELB Rule -sidebar_label: elbv2-rule ---- - -An ELBv2 listener rule specifies how an Application Load Balancer (ALB) or Network Load Balancer (NLB) should handle requests that arrive on a particular listener. Each rule has a priority, a set of conditions (for example, host-based or path-based matches) and a set of actions (such as forwarding to a target group, redirecting, or returning a fixed response). When traffic reaches the listener, the load balancer evaluates its rules in priority order and executes the actions associated with the first rule whose conditions are met. Refer to the official AWS documentation for further information: https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-listeners.html#listener-rules - -**Terrafrom Mappings:** - -- `aws_alb_listener_rule.arn` -- `aws_lb_listener_rule.arn` - -## Supported Methods - -- `GET`: Get a rule by ARN -- ~~`LIST`~~ -- `SEARCH`: Search for rules by listener ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/elbv2-target-group.md b/docs.overmind.tech/docs/sources/aws/Types/elbv2-target-group.md deleted file mode 100644 index 789a5692..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/elbv2-target-group.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: Target Group -sidebar_label: elbv2-target-group ---- - -An Amazon Elastic Load Balancing v2 (ELBv2) target group is a logical grouping of targets—such as EC2 instances, IP addresses, Lambda functions or Application Load Balancers—that a load balancer routes traffic to. It contains configuration such as the protocol and port to use, health-check settings, stickiness, deregistration delay and slow-start settings, all within a single VPC. Listeners on an Application Load Balancer (ALB) or Network Load Balancer (NLB) forward requests to one or more target groups based on listener rules. -For full details see the official AWS documentation: https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-target-groups.html - -**Terrafrom Mappings:** - -- `aws_alb_target_group.arn` -- `aws_lb_target_group.arn` - -## Supported Methods - -- `GET`: Get a target group by name -- `LIST`: List all target groups -- `SEARCH`: Search for target groups by load balancer ARN or target group ARN - -## Possible Links - -### [`ec2-vpc`](/sources/aws/Types/ec2-vpc) - -A target group is always created within a specific VPC, and all of its registered IP addresses or instance-based targets must reside in that VPC. Therefore the target group is linked to the VPC where its network resources live. - -### [`elbv2-load-balancer`](/sources/aws/Types/elbv2-load-balancer) - -Load balancers reference target groups in their listener rules. This link shows which load balancers are configured to forward traffic to the target group, or conversely, which target groups a given load balancer depends upon. - -### [`elbv2-target-health`](/sources/aws/Types/elbv2-target-health) - -Each target group has a corresponding set of target-health descriptions indicating the current health status of every registered target. This link surfaces those health objects so you can see whether the targets in the group are healthy, unhealthy, initialising or unused. diff --git a/docs.overmind.tech/docs/sources/aws/Types/elbv2-target-health.md b/docs.overmind.tech/docs/sources/aws/Types/elbv2-target-health.md deleted file mode 100644 index 45e3ae77..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/elbv2-target-health.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: ELB Target Health -sidebar_label: elbv2-target-health ---- - -Elastic Load Balancing (v2) distributes traffic across multiple targets such as EC2 instances, IP addresses, and Lambda functions. -The ELB Target Health resource in Overmind represents the status of a single target as returned by the AWS `DescribeTargetHealth` API. -It shows whether the target is healthy, unhealthy, initialising, or draining, together with any failure reasons, so you can spot issues before a change is deployed. -Official documentation: https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeTargetHealth.html - -## Supported Methods - -- `GET`: Get target health by unique ID (`{TargetGroupArn}|{Id}|{AvailabilityZone}|{Port}`) -- ~~`LIST`~~ -- `SEARCH`: Search for target health by target group ARN - -## Possible Links - -### [`ec2-instance`](/sources/aws/Types/ec2-instance) - -When the target group’s type is `instance`, each registered EC2 instance appears as an ELB target. The target-health record shows whether that particular EC2 instance is currently considered healthy by the load balancer. - -### [`lambda-function`](/sources/aws/Types/lambda-function) - -For target groups of type `lambda`, the Lambda function itself is the target. The target-health item reports the invocation health of the function as assessed by the load balancer. - -### [`ip`](/sources/aws/Types/networkmanager-network-resource-relationship) - -If the target group is of type `ip`, every registered IP address becomes a target. The target-health entry records the health of that IP address, enabling you to see whether traffic will be routed to it. - -### [`elbv2-load-balancer`](/sources/aws/Types/elbv2-load-balancer) - -The load balancer associated with the target group uses these health results to decide where to send traffic. Linking to the load balancer lets you trace how a target’s health status could affect overall load-balancer behaviour. diff --git a/docs.overmind.tech/docs/sources/aws/Types/iam-group.md b/docs.overmind.tech/docs/sources/aws/Types/iam-group.md deleted file mode 100644 index d3500c2e..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/iam-group.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: IAM Group -sidebar_label: iam-group ---- - -An IAM (Identity and Access Management) group is a logical collection of IAM users within an AWS account. Permissions—attached to the group via policies—apply to every user who is a member, making it easier to manage access at scale. Because groups do not have their own security credentials, they cannot be used to log in directly; instead, they serve solely as a mechanism for permission inheritance and simplified administration. For full details, refer to the AWS documentation: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_groups.html - -**Terrafrom Mappings:** - -- `aws_iam_group.arn` - -## Supported Methods - -- `GET`: Get a group by name -- `LIST`: List all IAM groups -- `SEARCH`: Search for a group by ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/iam-instance-profile.md b/docs.overmind.tech/docs/sources/aws/Types/iam-instance-profile.md deleted file mode 100644 index f261d8dc..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/iam-instance-profile.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: IAM Instance Profile -sidebar_label: iam-instance-profile ---- - -An IAM Instance Profile is a logical container for an IAM role that you can attach to an Amazon EC2 instance when it is launched. The profile passes the role’s credentials to the instance so that applications running on the instance can securely call other AWS services without embedding long-lived access keys in the code or configuration. For full details see the AWS documentation: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html - -**Terrafrom Mappings:** - -- `aws_iam_instance_profile.arn` - -## Supported Methods - -- `GET`: Get an IAM instance profile by name -- `LIST`: List all IAM instance profiles -- `SEARCH`: Search IAM instance profiles by ARN - -## Possible Links - -### [`iam-role`](/sources/aws/Types/iam-role) - -Every instance profile contains exactly one IAM role (though a role can exist without an instance profile). Overmind links the profile to the role it encapsulates so that you can see which permissions will be passed to the EC2 instance. - -### [`iam-policy`](/sources/aws/Types/iam-policy) - -Policies are not attached directly to the instance profile but to the role inside it. Overmind surfaces these indirect relationships so that you can trace what policies – and therefore permissions – will ultimately be available on the instance through the profile. diff --git a/docs.overmind.tech/docs/sources/aws/Types/iam-policy.md b/docs.overmind.tech/docs/sources/aws/Types/iam-policy.md deleted file mode 100644 index 9a122311..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/iam-policy.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: IAM Policy -sidebar_label: iam-policy ---- - -An IAM policy is a standalone document that defines a set of permissions which determine whether a principal (user, group, or role) is allowed or denied the ability to call specific AWS APIs. Policies are expressed in JSON, may be created and managed by customers or AWS, and are attached to identities or resources to enforce least-privilege access. See the official AWS documentation for full details: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html - -**Terrafrom Mappings:** - -- `aws_iam_policy.arn` -- `aws_iam_user_policy_attachment.policy_arn` - -## Supported Methods - -## Supported Methods - -- `GET`: Get a policy by ARN or path. `{path}` is extracted from the ARN path component. -- `LIST`: List all policies -- `SEARCH`: Search for IAM policies by ARN - -## Possible Links - -### [`iam-group`](/sources/aws/Types/iam-group) - -An IAM policy can be attached to an IAM group to grant all members of the group the permissions described in the policy. Overmind therefore links a policy to any groups to which it is attached. - -### [`iam-user`](/sources/aws/Types/iam-user) - -An IAM policy may be directly attached to an individual IAM user, granting that user the specified permissions. Overmind surfaces this relationship so you can see every user that inherits rights from the policy. - -### [`iam-role`](/sources/aws/Types/iam-role) - -IAM roles often receive permissions through attached policies. Overmind links a policy to any roles that reference it, allowing you to trace which compute workloads or federated identities can exercise the policy’s privileges. diff --git a/docs.overmind.tech/docs/sources/aws/Types/iam-role.md b/docs.overmind.tech/docs/sources/aws/Types/iam-role.md deleted file mode 100644 index e4dbad1d..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/iam-role.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: IAM Role -sidebar_label: iam-role ---- - -An AWS Identity and Access Management (IAM) role is an identity that you can assume to obtain temporary security credentials so that you can make AWS requests. Unlike users, roles do not have long-term credentials; instead, they rely on trust relationships and attached policies to define who can assume the role and what they can do once they have it. IAM roles are typically used for granting permissions to AWS services, cross-account access, or federated users. -For full details, see the AWS documentation: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html - -**Terrafrom Mappings:** - -- `aws_iam_role.arn` - -## Supported Methods - -- `GET`: Get an IAM role by name -- `LIST`: List all IAM roles -- `SEARCH`: Search for IAM roles by ARN - -## Possible Links - -### [`iam-policy`](/sources/aws/Types/iam-policy) - -An IAM role is functionally useless without one or more IAM policies attached to it. Overmind links an `iam-role` to the `iam-policy` resources that 1) are attached as inline or managed policies granting permissions, and 2) define the trust relationship (the role’s assume-role policy). This allows you to trace which permissions the role grants and who or what is allowed to assume it. diff --git a/docs.overmind.tech/docs/sources/aws/Types/iam-user.md b/docs.overmind.tech/docs/sources/aws/Types/iam-user.md deleted file mode 100644 index d34c4c60..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/iam-user.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: IAM User -sidebar_label: iam-user ---- - -An IAM user is a discrete identity within AWS Identity and Access Management that represents a human, service or application which needs to interact with AWS resources. Each user has its own credentials and permissions that determine what actions it can perform in an AWS account. For full details, refer to the AWS documentation: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users.html - -**Terrafrom Mappings:** - -- `aws_iam_user.arn` -- `aws_iam_user_group_membership.user` - -## Supported Methods - -- `GET`: Get an IAM user by name -- `LIST`: List all IAM users -- `SEARCH`: Search for IAM users by ARN - -## Possible Links - -### [`iam-group`](/sources/aws/Types/iam-group) - -IAM users can be members of one or more IAM groups, inheriting the group’s managed and inline policies. Overmind therefore links an IAM user to the `iam-group` type whenever the user is listed as a member of that group. diff --git a/docs.overmind.tech/docs/sources/aws/Types/kms-alias.md b/docs.overmind.tech/docs/sources/aws/Types/kms-alias.md deleted file mode 100644 index 51dd4e6b..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/kms-alias.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: KMS Alias -sidebar_label: kms-alias ---- - -An AWS Key Management Service (KMS) alias is a human-readable pointer to a specific KMS key, allowing you to reference that key without exposing its full KeyID or ARN. Aliases make it simpler to rotate keys and update applications, because you can move the alias to a new key rather than changing code or configurations that use the key directly. They are unique within an account and region, and can reference either customer-managed or AWS-managed keys. -For further details, see the official AWS documentation: https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html - -**Terrafrom Mappings:** - -- `aws_kms_alias.arn` - -## Supported Methods - -- `GET`: Get an alias by keyID/aliasName -- `LIST`: List all aliases -- `SEARCH`: Search aliases by keyID - -## Possible Links - -### [`kms-key`](/sources/aws/Types/kms-key) - -Each alias is a shorthand reference that maps to exactly one KMS key; the link shows which underlying `kms-key` the alias currently points to, enabling you to trace risk and usage back to the actual cryptographic key material. diff --git a/docs.overmind.tech/docs/sources/aws/Types/kms-custom-key-store.md b/docs.overmind.tech/docs/sources/aws/Types/kms-custom-key-store.md deleted file mode 100644 index edb3cb89..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/kms-custom-key-store.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Custom Key Store -sidebar_label: kms-custom-key-store ---- - -A custom key store in AWS Key Management Service (KMS) enables you to back your KMS keys with your own AWS CloudHSM cluster rather than with the default, multi-tenant KMS hardware security modules. This gives you exclusive control over the cryptographic hardware that protects your key material while still allowing you to use KMS APIs and integrations. You can create, connect, disconnect, or delete a custom key store, and any KMS keys that reside in it remain under your sole tenancy. See the official AWS documentation for full details: https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html - -**Terrafrom Mappings:** - -- `aws_kms_custom_key_store.id` - -## Supported Methods - -- `GET`: Get a custom key store by its ID -- `LIST`: List all custom key stores -- `SEARCH`: Search custom key store by ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/kms-grant.md b/docs.overmind.tech/docs/sources/aws/Types/kms-grant.md deleted file mode 100644 index 63c2197a..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/kms-grant.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: KMS Grant -sidebar_label: kms-grant ---- - -AWS Key Management Service (KMS) grants are lightweight authorisations that give a specified principal permission to use a particular KMS key for a defined set of operations (such as Encrypt, Decrypt, GenerateDataKey or RetireGrant). Unlike key policies and IAM policies, grants can be created and retired programmatically and have an optional time-to-live, making them ideal for short-lived workloads or delegated access. For a full description see the official AWS documentation: https://docs.aws.amazon.com/kms/latest/developerguide/grants.html - -**Terrafrom Mappings:** - -- `aws_kms_grant.grant_id` - -## Supported Methods - -- `GET`: Get a grant by keyID/grantId -- ~~`LIST`~~ -- `SEARCH`: Search grants by keyID - -## Possible Links - -### [`kms-key`](/sources/aws/Types/kms-key) - -Every grant is created against exactly one KMS key. The grant specifies which operations are allowed on that key, so the relationship is “KMS key ­— has → grant”. - -### [`iam-user`](/sources/aws/Types/iam-user) - -An IAM user can appear as the grantee principal or the retiring principal in a grant. If the user is referenced, the link shows which grants give that user access to which keys. - -### [`iam-role`](/sources/aws/Types/iam-role) - -Similar to IAM users, an IAM role may be listed as the grantee or retiring principal. The link reveals the grants that permit the role to use or retire access to specific KMS keys. diff --git a/docs.overmind.tech/docs/sources/aws/Types/kms-key-policy.md b/docs.overmind.tech/docs/sources/aws/Types/kms-key-policy.md deleted file mode 100644 index ff75bb7f..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/kms-key-policy.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: KMS Key Policy -sidebar_label: kms-key-policy ---- - -AWS Key Management Service (KMS) key policies are the primary access-control mechanism for customer-managed KMS keys. A key policy is a JSON document attached directly to a KMS key that defines which principals can use the key and what cryptographic operations they may perform. Every customer-managed key must have exactly one key policy, and this policy is evaluated in combination with IAM policies to determine effective permissions. -For full details, see the official AWS documentation: https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html - -**Terrafrom Mappings:** - -- `aws_kms_key_policy.key_id` - -## Supported Methods - -- `GET`: Get a KMS key policy by its Key ID -- ~~`LIST`~~ -- `SEARCH`: Search KMS key policies by Key ID - -## Possible Links - -### [`kms-key`](/sources/aws/Types/kms-key) - -A KMS key policy is attached to exactly one KMS key; this link represents that one-to-one relationship. Following the link from a policy to its `kms-key` will show the cryptographic key whose usage and management are governed by the policy. diff --git a/docs.overmind.tech/docs/sources/aws/Types/kms-key.md b/docs.overmind.tech/docs/sources/aws/Types/kms-key.md deleted file mode 100644 index df127d19..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/kms-key.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: KMS Key -sidebar_label: kms-key ---- - -An AWS Key Management Service (KMS) Key is a logical representation of a cryptographic key used to encrypt and decrypt data across AWS services and your own applications. Each key is uniquely identifiable by its Key ID and Amazon Resource Name (ARN), can be either customer-managed or AWS-managed, and is stored within an AWS-managed hardware security module (HSM) cluster or, when using a custom key store, in an AWS CloudHSM cluster that you control. KMS Keys are central to implementing envelope encryption, controlling access to encrypted resources, and meeting compliance requirements related to data protection. -For full details, see the official AWS documentation: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html - -**Terrafrom Mappings:** - -- `aws_kms_key.key_id` - -## Supported Methods - -- `GET`: Get a KMS Key by its ID -- `LIST`: List all KMS Keys -- `SEARCH`: Search for KMS Keys by ARN - -## Possible Links - -### [`kms-custom-key-store`](/sources/aws/Types/kms-custom-key-store) - -A KMS Key may reside in a custom key store backed by your own AWS CloudHSM cluster. This link is produced when the key’s `KeyStoreId` attribute is set, allowing Overmind to trace the relationship between the key and the custom key store that physically holds its material. - -### [`kms-key-policy`](/sources/aws/Types/kms-key-policy) - -Every KMS Key has exactly one key policy that defines which principals are authorised to use or administer the key. Overmind links a key to its policy so that you can quickly inspect who can access the key and identify potential misconfigurations or excessive permissions. - -### [`kms-grant`](/sources/aws/Types/kms-grant) - -Grants provide time-bound or scoped permissions for principals to use a KMS Key without modifying its key policy. Overmind records links from a key to all active grants, enabling you to see what temporary or delegated access exists and to assess the risk of unintended key usage. diff --git a/docs.overmind.tech/docs/sources/aws/Types/lambda-event-source-mapping.md b/docs.overmind.tech/docs/sources/aws/Types/lambda-event-source-mapping.md deleted file mode 100644 index 8696a046..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/lambda-event-source-mapping.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: Lambda Event Source Mapping -sidebar_label: lambda-event-source-mapping ---- - -AWS Lambda event source mappings are configuration objects that connect an event-producing resource (for example, an SQS queue, DynamoDB stream, Kinesis data stream or Amazon MQ broker) to a Lambda function. They tell Lambda from which resource to poll, what batch size to use, whether to enable the mapping immediately, and numerous advanced options such as filtering and batching windows. In essence, an event source mapping is the glue that turns an upstream stream or queue into invocations of your function. -Official documentation: https://docs.aws.amazon.com/lambda/latest/dg/intro-core-components.html#event-source-mapping - -**Terrafrom Mappings:** - -- `aws_lambda_event_source_mapping.arn` - -## Supported Methods - -- `GET`: Get a Lambda event source mapping by UUID -- `LIST`: List all Lambda event source mappings -- `SEARCH`: Search for Lambda event source mappings by Event Source ARN (SQS, DynamoDB, Kinesis, etc.) - -## Possible Links - -### [`lambda-function`](/sources/aws/Types/lambda-function) - -Every event source mapping targets exactly one Lambda function. The mapping’s `FunctionName` points to the ARN of that function, so Overmind will create a link from the mapping to the lambda-function resource it invokes. - -### [`dynamodb-table`](/sources/aws/Types/dynamodb-table) - -When the event source ARN refers to a DynamoDB stream, the underlying DynamoDB table is important context. Overmind links the mapping to the dynamodb-table that owns the stream so that you can trace how table updates lead to Lambda executions. - -### [`sqs-queue`](/sources/aws/Types/sqs-queue) - -For SQS, the mapping’s `EventSourceArn` is the ARN of an SQS queue. Linking to the sqs-queue resource lets you understand queue configuration (visibility timeout, encryption, redrive policy) and how it might influence Lambda processing. - -### [`rds-db-cluster`](/sources/aws/Types/rds-db-cluster) - -If the event source is an Amazon RDS for PostgreSQL or MySQL DB cluster emitting events through Amazon RDS for PostgreSQL logical replication slots (via the `RDS Data API` or Aurora’s `MysqlBinlog` integration), the mapping may reference the cluster’s ARN. Overmind links to the rds-db-cluster so you can assess the impact of database changes on the Lambda workflow. diff --git a/docs.overmind.tech/docs/sources/aws/Types/lambda-function.md b/docs.overmind.tech/docs/sources/aws/Types/lambda-function.md deleted file mode 100644 index 4352b000..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/lambda-function.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Lambda Function -sidebar_label: lambda-function ---- - -AWS Lambda is a serverless compute service that runs your code in response to events and automatically manages the underlying compute resources for you. A Lambda function is the fundamental execution unit: it contains your application code, runtime settings and configuration such as memory, timeout and environment variables. For a full description see the official AWS documentation: https://docs.aws.amazon.com/lambda/latest/dg/welcome.html - -**Terrafrom Mappings:** - -- `aws_lambda_function.arn` -- `aws_lambda_function_event_invoke_config.id` -- `aws_lambda_function_url.function_arn` - -## Supported Methods - -- `GET`: Get a lambda function by name -- `LIST`: List all lambda functions -- `SEARCH`: Search for lambda functions by ARN - -## Possible Links - -### [`iam-role`](/sources/aws/Types/iam-role) - -Each Lambda function is executed with an IAM role (its “execution role”). Overmind links the function to that `iam-role` so you can immediately see what permissions the function has and what downstream resources could be affected by its actions. - -### [`s3-bucket`](/sources/aws/Types/s3-bucket) - -A Lambda function can be triggered by S3 events (e.g. object creation) or load its deployment artefact from an S3 bucket. Overmind links the function to any referenced `s3-bucket` so you can assess event-driven couplings and code-package storage risks. - -### [`sns-topic`](/sources/aws/Types/sns-topic) - -Lambda functions may subscribe to, or publish messages to, Amazon SNS topics. When a function is configured as an SNS subscription target, Overmind links it to the relevant `sns-topic` so that you can trace message flows and understand failure blast-radius. - -### [`sqs-queue`](/sources/aws/Types/sqs-queue) - -Lambda can poll SQS queues as an event source. Overmind establishes a link between the function and the `sqs-queue` it consumes so that queue backlogs, permissions and dead-letter configurations are visible in the dependency graph. - -### [`lambda-function`](/sources/aws/Types/lambda-function) - -A Lambda function can synchronously or asynchronously invoke another Lambda function (for example, in micro-service fan-out patterns). Overmind links calling and called `lambda-function` resources to expose these internal service dependencies. - -### [`elbv2-target-group`](/sources/aws/Types/elbv2-target-group) - -Application Load Balancers (ALB) can forward requests to Lambda targets via an ELBv2 target group. Overmind links the function to any associated `elbv2-target-group`, allowing you to see inbound HTTP pathways and evaluate scaling or security implications. diff --git a/docs.overmind.tech/docs/sources/aws/Types/lambda-layer-version.md b/docs.overmind.tech/docs/sources/aws/Types/lambda-layer-version.md deleted file mode 100644 index fdcf79b1..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/lambda-layer-version.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Lambda Layer Version -sidebar_label: lambda-layer-version ---- - -AWS Lambda Layer Version represents an immutable, version-numbered snapshot of a Lambda layer—an archive of shared code, libraries, custom runtimes or other assets that can be attached to multiple Lambda functions. Each time you publish a layer you create a new layer version, referenced in the form `arn:aws:lambda:::layer::`. Using layers helps decouple shared dependencies from individual function packages, streamline updates and encourage code reuse across your serverless estate. -Further details can be found in the official AWS documentation: https://docs.aws.amazon.com/lambda/latest/dg/configuration-layers.html - -**Terrafrom Mappings:** - -- `aws_lambda_layer_version.arn` - -## Supported Methods - -- `GET`: Get a layer version by full name (`{layerName}:{versionNumber}`) -- ~~`LIST`~~ -- `SEARCH`: Search for layer versions by ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/lambda-layer.md b/docs.overmind.tech/docs/sources/aws/Types/lambda-layer.md deleted file mode 100644 index 1641736c..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/lambda-layer.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: Lambda Layer -sidebar_label: lambda-layer ---- - -AWS Lambda Layers are a packaging construct used to share code, data, and runtimes between multiple Lambda functions. A layer is published once and can then be referenced by any function in the same AWS account (or, if shared, by functions in other accounts), keeping deployment packages small and ensuring that common dependencies are managed in a single place. Overmind surfaces Lambda Layers so that you can see which functions depend on them and understand the blast radius of any proposed change. -For full details, see the official AWS documentation: https://docs.aws.amazon.com/lambda/latest/dg/configuration-layers.html - -## Supported Methods - -- ~~`GET`~~ -- `LIST`: List all lambda layers -- ~~`SEARCH`~~ - -## Possible Links - -### [`lambda-layer-version`](/sources/aws/Types/lambda-layer-version) - -A Lambda Layer can have multiple immutable versions; this link shows the individual versions that belong to the parent layer. diff --git a/docs.overmind.tech/docs/sources/aws/Types/network-firewall-firewall-policy.md b/docs.overmind.tech/docs/sources/aws/Types/network-firewall-firewall-policy.md deleted file mode 100644 index bea09a33..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/network-firewall-firewall-policy.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Network Firewall Policy -sidebar_label: network-firewall-firewall-policy ---- - -An AWS Network Firewall Policy is the central configuration object that tells the AWS Network Firewall service how to inspect, filter, and log traffic that flows through a firewall. The policy groups together references to stateless and stateful rule groups, sets default actions for traffic that does not match a rule, and can optionally attach TLS inspection configurations. Multiple firewalls can share the same policy, making it easy to apply a consistent security posture across different VPCs or accounts. -For full service documentation, see the official AWS docs: https://docs.aws.amazon.com/network-firewall/latest/developerguide/firewall-policies.html - -**Terrafrom Mappings:** - -- `aws_networkfirewall_firewall_policy.name` - -## Supported Methods - -- `GET`: Get a Network Firewall Policy by name -- `LIST`: List Network Firewall Policies -- `SEARCH`: Search for Network Firewall Policies by ARN - -## Possible Links - -### [`network-firewall-rule-group`](/sources/aws/Types/network-firewall-rule-group) - -A firewall policy is essentially a collection of references to stateless and stateful rule groups. Each rule group defined under the policy dictates how specific traffic patterns are handled. Overmind links a policy to its rule groups so that you can quickly understand which inspection rules are being applied. - -### [`network-firewall-tls-inspection-configuration`](/sources/aws/Types/network-firewall-tls-inspection-configuration) - -If the policy includes a TLS inspection configuration, encrypted traffic can be decrypted, inspected, and then re-encrypted. Overmind links the policy to any associated TLS inspection configurations to show whether the firewall is capable of deep packet inspection for TLS flows. - -### [`kms-key`](/sources/aws/Types/kms-key) - -Firewall policies may specify a KMS key for the encryption of log data or stateful rule group data at rest. Overmind surfaces this link so that you can assess the cryptographic controls protecting your firewall’s sensitive data. diff --git a/docs.overmind.tech/docs/sources/aws/Types/network-firewall-firewall.md b/docs.overmind.tech/docs/sources/aws/Types/network-firewall-firewall.md deleted file mode 100644 index f185d8a0..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/network-firewall-firewall.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: Network Firewall -sidebar_label: network-firewall-firewall ---- - -AWS Network Firewall is a managed, stateful, layer-4 and layer-7 firewall service that you deploy inside your own Amazon Virtual Private Cloud (VPC). It lets you inspect and filter both inbound and outbound traffic by applying rule groups that you author or obtain from third-party providers. Because the service is fully managed, AWS handles availability, scaling and patching, allowing you to focus on writing network-security rules rather than on the underlying infrastructure. For a full overview, see the official documentation: https://docs.aws.amazon.com/network-firewall/latest/developerguide/what-is-aws-network-firewall.html - -**Terrafrom Mappings:** - -- `aws_networkfirewall_firewall.name` - -## Supported Methods - -- `GET`: Get a Network Firewall by name -- `LIST`: List Network Firewalls -- `SEARCH`: Search for Network Firewalls by ARN - -## Possible Links - -### [`network-firewall-firewall-policy`](/sources/aws/Types/network-firewall-firewall-policy) - -Each Network Firewall is associated with exactly one firewall policy, which defines the stateful and stateless rule groups, default actions and logging configuration that the firewall must enforce. - -### [`ec2-subnet`](/sources/aws/Types/ec2-subnet) - -A firewall is deployed into one or more dedicated subnets—known as firewall subnets—within the VPC. These subnets host the firewall endpoints that inspect traffic traversing the Availability Zones. - -### [`ec2-vpc`](/sources/aws/Types/ec2-vpc) - -The firewall operates inside a specific VPC, inspecting traffic that enters, leaves or moves within that VPC according to the routing configuration you set up. - -### [`s3-bucket`](/sources/aws/Types/s3-bucket) - -You can configure Network Firewall to export alert and flow logs to an Amazon S3 bucket for long-term storage, auditing or further analysis; the bucket therefore becomes a downstream logging destination for the firewall. - -### [`iam-policy`](/sources/aws/Types/iam-policy) - -Creation, modification and deletion of Network Firewall resources are controlled through IAM policies. These policies grant or deny the required `network-firewall:*` permissions to principals such as users, roles and service accounts. - -### [`kms-key`](/sources/aws/Types/kms-key) - -If you choose to encrypt log data that Network Firewall delivers to Amazon S3 or CloudWatch Logs with a customer-managed key, the firewall references an AWS KMS key. The key is used for server-side encryption of the exported log objects. diff --git a/docs.overmind.tech/docs/sources/aws/Types/network-firewall-rule-group.md b/docs.overmind.tech/docs/sources/aws/Types/network-firewall-rule-group.md deleted file mode 100644 index 3de4cd07..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/network-firewall-rule-group.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: Network Firewall Rule Group -sidebar_label: network-firewall-rule-group ---- - -AWS Network Firewall Rule Groups are reusable collections of stateless or stateful inspection rules that you attach to a Network Firewall policy. They let you define, version, and manage traffic-inspection logic independently from the firewalls that enforce it. A rule group may contain Suricata-compatible stateful rules, 5-tuple stateless rules, or a combination of both, and can optionally be encrypted with a customer-managed AWS KMS key. See the official AWS documentation for full details: https://docs.aws.amazon.com/network-firewall/latest/developerguide/rule-groups.html - -**Terrafrom Mappings:** - -- `aws_networkfirewall_rule_group.name` - -## Supported Methods - -- `GET`: Get a Network Firewall Rule Group by name -- `LIST`: List Network Firewall Rule Groups -- `SEARCH`: Search for Network Firewall Rule Groups by ARN - -## Possible Links - -### [`kms-key`](/sources/aws/Types/kms-key) - -If the rule group was created with an `EncryptionConfiguration`, the ARN of the customer-managed KMS key used for encryption is stored in the resource metadata. Overmind therefore links the rule group to the corresponding `kms-key` item. - -### [`sns-topic`](/sources/aws/Types/sns-topic) - -Operational teams often configure CloudWatch alarms on Network Firewall metrics that publish to an SNS topic; the alarm definition contains the rule group ARN as a dimension. When such a relationship exists, Overmind links the rule group to the `sns-topic` so that users can trace alerting pathways. - -### [`network-firewall-rule-group`](/sources/aws/Types/network-firewall-rule-group) - -Firewall policies can reference multiple rule groups, and a single rule group can be associated with several policies. Overmind records these associations, allowing one rule group to be linked to other rule groups that are attached to the same policy or that replace it through versioning. diff --git a/docs.overmind.tech/docs/sources/aws/Types/network-firewall-tls-inspection-configuration.md b/docs.overmind.tech/docs/sources/aws/Types/network-firewall-tls-inspection-configuration.md deleted file mode 100644 index f447983d..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/network-firewall-tls-inspection-configuration.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Network Firewall TLS Inspection Configuration -sidebar_label: network-firewall-tls-inspection-configuration ---- - -An AWS Network Firewall TLS Inspection Configuration represents the collection of certificates and related settings that AWS Network Firewall uses to decrypt, inspect and, when appropriate, re-encrypt TLS-encrypted traffic flowing through a firewall. The configuration is referenced by a firewall policy and allows the firewall to analyse traffic that would otherwise be opaque, enabling the detection of threats hidden inside encrypted sessions. -For full details, see the AWS documentation: https://docs.aws.amazon.com/network-firewall/latest/developerguide/tls-inspection-configuration.html - -## Supported Methods - -- `GET`: Get a Network Firewall TLS Inspection Configuration by name -- `LIST`: List Network Firewall TLS Inspection Configurations -- `SEARCH`: Search for Network Firewall TLS Inspection Configurations by ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-connect-attachment.md b/docs.overmind.tech/docs/sources/aws/Types/networkmanager-connect-attachment.md deleted file mode 100644 index e2c5a117..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-connect-attachment.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: Networkmanager Connect Attachment -sidebar_label: networkmanager-connect-attachment ---- - -A Network Manager Connect Attachment represents the logical connection used to link a third-party SD-WAN, on-premises router or other non-AWS network appliance to an AWS Cloud WAN core network. It enables you to extend a core network beyond AWS, transporting traffic through GRE tunnels that are established and maintained by a subsequently created Connect Peer. -For full details see the AWS documentation: https://docs.aws.amazon.com/network-manager/latest/cloudwan/cloudwan-network-attachments.html#cloudwan-attachment-connect - -**Terrafrom Mappings:** - -- `aws_networkmanager_core_network.id` - -## Supported Methods - -- `GET`: - -## Possible Links - -### [`networkmanager-core-network`](/sources/aws/Types/networkmanager-core-network) - -Every Connect Attachment is created inside a specific Cloud WAN core network, referenced by its `CoreNetworkId`. Consequently, Overmind links a connect attachment back to the corresponding `networkmanager-core-network` so that you can trace how external connectivity feeds into, and potentially affects, the wider Cloud WAN topology. diff --git a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-connect-peer-association.md b/docs.overmind.tech/docs/sources/aws/Types/networkmanager-connect-peer-association.md deleted file mode 100644 index 92a76c89..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-connect-peer-association.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Networkmanager Connect Peer Association -sidebar_label: networkmanager-connect-peer-association ---- - -An AWS Network Manager **Connect Peer Association** records the relationship between a Transit Gateway Connect peer and the on-premises device and link through which that peer reaches the AWS global network. It lets you see which Connect peers are presently attached to which devices and links inside a particular Global Network, and in which state the attachment currently is (for example, _pending_ or _available_). -For full API details, refer to the official AWS documentation: https://docs.aws.amazon.com/networkmanager/latest/APIReference/API_ConnectPeerAssociation.html - -## Supported Methods - -- `GET`: Get a Networkmanager Connect Peer Association -- `LIST`: List all Networkmanager Connect Peer Associations -- `SEARCH`: Search for Networkmanager ConnectPeerAssociations by GlobalNetworkId - -## Possible Links - -### [`networkmanager-global-network`](/sources/aws/Types/networkmanager-global-network) - -The association is scoped to a single Global Network; every Connect Peer Association includes the `GlobalNetworkId` that ties it back to this parent resource. - -### [`networkmanager-connect-peer`](/sources/aws/Types/networkmanager-connect-peer) - -The association identifies the specific Connect Peer (`ConnectPeerId`) whose attachment details are being tracked. - -### [`networkmanager-device`](/sources/aws/Types/networkmanager-device) - -If the Connect peer terminates on a particular on-premises or edge device, the association includes the `DeviceId`, linking it to this device resource. - -### [`networkmanager-link`](/sources/aws/Types/networkmanager-link) - -Where applicable, the association also records the `LinkId`, showing which physical or logical link is being used by the Connect peer to reach AWS. diff --git a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-connect-peer.md b/docs.overmind.tech/docs/sources/aws/Types/networkmanager-connect-peer.md deleted file mode 100644 index 8ec2f5b0..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-connect-peer.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Networkmanager Connect Peer -sidebar_label: networkmanager-connect-peer ---- - -An AWS Network Manager **Connect Peer** represents one end of a GRE tunnel that is established over a Network Manager _Connect attachment_ (for example, between an AWS Transit Gateway/Cloud WAN core network and an external router). -The peer stores the tunnel’s **inside and outside IP addresses**, BGP configuration (peer ASN, BGP addresses and keys), the subnet in which the tunnel terminates, and the current operational state. Creating the peer is the final step that brings a Connect attachment into service, enabling traffic to flow between AWS and on-premises or third-party networks. -For full details see the official AWS documentation: https://docs.aws.amazon.com/networkmanager/latest/APIReference/API_ConnectPeer.html - -**Terrafrom Mappings:** - -- `aws_networkmanager_connect_peer.id` - -## Supported Methods - -- `GET`: Get a Networkmanager Connect Peer by id -- ~~`LIST`~~ -- ~~`SEARCH`~~ - -## Possible Links - -### [`networkmanager-core-network`](/sources/aws/Types/networkmanager-core-network) - -A Connect peer ultimately belongs to a core network; through its parent Connect attachment it is associated with a specific core network ID, so the peer can be traced back to the Cloud WAN or Transit Gateway core it serves. - -### [`networkmanager-connect-attachment`](/sources/aws/Types/networkmanager-connect-attachment) - -Each Connect peer is created **within** a single Connect attachment. This link identifies the attachment that houses the peer and through which the GRE tunnel is terminated. - -### [`ip`](/sources/aws/Types/networkmanager-network-resource-relationship) - -The peer exposes both _inside_ and _outside_ tunnel IP addresses. These addresses are modelled as IP resources and linked so you can see which IPs are consumed by the peer. - -### [`rdap-asn`](/sources/stdlib/Types/rdap-asn) - -When BGP is enabled the peer records the remote BGP ASN. Overmind links that ASN so you can quickly inspect public registration information for the autonomous system you are peering with. - -### [`ec2-subnet`](/sources/aws/Types/ec2-subnet) - -The peer must be associated with a specific subnet that contains the tunnel’s AWS endpoint. Linking to the EC2 subnet shows the precise network segment in which the peer resides, helping to check routing and security settings. diff --git a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-connection.md b/docs.overmind.tech/docs/sources/aws/Types/networkmanager-connection.md deleted file mode 100644 index f24ff668..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-connection.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: Networkmanager Connection -sidebar_label: networkmanager-connection ---- - -An AWS Network Manager Connection represents the logical relationship between two network devices (for example, a branch router and a transit gateway) inside an AWS Global Network. It stores metadata about how the two endpoints are linked, enabling Network Manager to map, monitor and troubleshoot your private WAN from a single view. See the official AWS documentation for full details: https://docs.aws.amazon.com/networkmanager/latest/APIReference/API_Connection.html - -**Terrafrom Mappings:** - -- `aws_networkmanager_connection.arn` - -## Supported Methods - -- `GET`: Get a Networkmanager Connection -- ~~`LIST`~~ -- `SEARCH`: Search for Networkmanager Connections by GlobalNetworkId, Device ARN, or Connection ARN - -## Possible Links - -### [`networkmanager-global-network`](/sources/aws/Types/networkmanager-global-network) - -Every connection is created within exactly one Global Network. Overmind follows this link to understand which overarching corporate network the connection belongs to and to enumerate all other resources that share the same scope. - -### [`networkmanager-link`](/sources/aws/Types/networkmanager-link) - -A connection is realised by one or two underlying Links, representing the actual circuits or VPN tunnels that carry traffic. Linking to these allows Overmind to surface characteristics such as bandwidth, provider and health for each side of the connection. - -### [`networkmanager-device`](/sources/aws/Types/networkmanager-device) - -Each connection terminates on two Devices (the `SourceDeviceId` and `DestinationDeviceId`). From a connection, Overmind can pivot to the involved devices to reveal their configurations, attached links and any downstream dependencies that could be affected by changes to the connection. diff --git a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-core-network-policy.md b/docs.overmind.tech/docs/sources/aws/Types/networkmanager-core-network-policy.md deleted file mode 100644 index 2dfff562..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-core-network-policy.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: Networkmanager Core Network Policy -sidebar_label: networkmanager-core-network-policy ---- - -An AWS Network Manager Core Network Policy represents the set of declarative rules that describe how traffic may flow within and between the segments of an AWS Cloud WAN core network (for example, how on-premises VPNs, VPCs and Transit Gateways are connected, and which segments are allowed to communicate). Each policy is versioned and attached to a single core network, allowing you to stage, validate and apply changes safely. For further details see the AWS documentation: https://docs.aws.amazon.com/network-manager/latest/cloudwan/cloudwan-policy-operations.html - -**Terrafrom Mappings:** - -- `aws_networkmanager_core_network_policy.core_network_id` - -## Supported Methods - -- `GET`: Get a Networkmanager Core Network Policy by Core Network id -- ~~`LIST`~~ -- ~~`SEARCH`~~ - -## Possible Links - -### [`networkmanager-core-network`](/sources/aws/Types/networkmanager-core-network) - -Every core network policy is bound to exactly one core network; therefore, Overmind links a `networkmanager-core-network-policy` item back to the corresponding `networkmanager-core-network` to show which core network the policy governs and to make it easier to assess the blast-radius of changes. diff --git a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-core-network.md b/docs.overmind.tech/docs/sources/aws/Types/networkmanager-core-network.md deleted file mode 100644 index bb9905ca..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-core-network.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: Networkmanager Core Network -sidebar_label: networkmanager-core-network ---- - -An AWS Network Manager **core network** represents the logical, centrally-managed backbone created by AWS Cloud WAN. It defines the global routing fabric, network segments, and edge locations that connect your AWS Regions and on-premises sites. Once a core network is in place you can attach VPCs, VPNs, Direct Connects and third-party SD-WAN devices, and let Cloud WAN automatically propagate routes between them according to the policy you supply. -For further details see the [official documentation](https://docs.aws.amazon.com/vpc/latest/cloudwan/what-is-cloudwan.html). - -**Terrafrom Mappings:** - -- `aws_networkmanager_core_network.id` - -## Supported Methods - -- `GET`: Get a Networkmanager Core Network by id -- ~~`LIST`~~ -- ~~`SEARCH`~~ - -## Possible Links - -### [`networkmanager-core-network-policy`](/sources/aws/Types/networkmanager-core-network-policy) - -Every core network is governed by a **core network policy** that declares its segments, attachment permissions, and routing intent. Overmind links a `networkmanager-core-network` to its current `networkmanager-core-network-policy` so that you can inspect or diff the policy that is actively controlling the network. - -### [`networkmanager-connect-peer`](/sources/aws/Types/networkmanager-connect-peer) - -A **Connect peer** represents a GRE/BGP session that terminates on a Connect attachment belonging to a core network. Overmind exposes this link to show which Connect peers (and therefore which on-premises routers) are logically attached to the given core network. diff --git a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-device.md b/docs.overmind.tech/docs/sources/aws/Types/networkmanager-device.md deleted file mode 100644 index b96cef18..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-device.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: Networkmanager Device -sidebar_label: networkmanager-device ---- - -An AWS Network Manager Device represents a physical or virtual network appliance (e.g. router, firewall, SD-WAN box, software VPN endpoint) that you register with a Global Network in AWS Network Manager. Once registered, the device becomes a first-class object that can be linked to Sites, Links and Connections, allowing you to model and monitor your entire hybrid network topology in AWS. -For full details see the AWS API reference: https://docs.aws.amazon.com/networkmanager/latest/APIReference/API_Device.html - -**Terrafrom Mappings:** - -- `aws_networkmanager_device.arn` - -## Supported Methods - -- `GET`: Get a Networkmanager Device -- ~~`LIST`~~ -- `SEARCH`: Search for Networkmanager Devices by GlobalNetworkId, `{GlobalNetworkId|SiteId}` or ARN - -## Possible Links - -### [`networkmanager-global-network`](/sources/aws/Types/networkmanager-global-network) - -A device is always created inside a single Global Network. This link shows which Global Network the device belongs to so you can understand its administrative domain. - -### [`networkmanager-site`](/sources/aws/Types/networkmanager-site) - -Each device is associated with one Site (for example, a particular data centre or branch office). The link reveals the physical location context of the device. - -### [`networkmanager-link-association`](/sources/aws/Types/networkmanager-link-association) - -A device can have one or more Link Associations that describe the physical or logical circuits (Links) terminating on that device. Following this link surfaces the underlying connectivity for the device. - -### [`networkmanager-connection`](/sources/aws/Types/networkmanager-connection) - -Connections model the logical relationship between two devices. This link lists all point-to-point or multi-point Connections in which the device participates. - -### [`networkmanager-network-resource-relationship`](/sources/aws/Types/networkmanager-network-resource-relationship) - -This link captures any additional resource relationships (for example, Transit Gateway attachments or VPNs) that reference the device, providing a holistic view of dependencies and potential blast-radius. diff --git a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-global-network.md b/docs.overmind.tech/docs/sources/aws/Types/networkmanager-global-network.md deleted file mode 100644 index af31ebf5..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-global-network.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: Network Manager Global Network -sidebar_label: networkmanager-global-network ---- - -An AWS Network Manager Global Network is the top-level container that represents your organisation’s private global network within AWS. It groups together sites, on-premises devices, AWS Transit Gateways, and the connections between them, allowing you to view and manage the entire topology from a single place. You must create a global network before you can register any resources with Network Manager. -Official documentation: https://docs.aws.amazon.com/networkmanager/latest/APIReference/API_GlobalNetwork.html - -**Terrafrom Mappings:** - -- `aws_networkmanager_global_network.arn` - -## Supported Methods - -- `GET`: Get a global network by id -- `LIST`: List all global networks -- `SEARCH`: Search for a global network by ARN - -## Possible Links - -### [`networkmanager-site`](/sources/aws/Types/networkmanager-site) - -A Site is created inside a Global Network. Each `networkmanager-site` record therefore links back to the Global Network that owns it. - -### [`networkmanager-transit-gateway-registration`](/sources/aws/Types/networkmanager-transit-gateway-registration) - -Transit Gateways must be registered with a specific Global Network before they can be visualised or managed by Network Manager. These registration objects reference the parent Global Network. - -### [`networkmanager-connect-peer-association`](/sources/aws/Types/networkmanager-connect-peer-association) - -A Connect Peer Association represents the attachment of a Connect peer to a Global Network. The association record points to the Global Network in which the peer is enrolled. - -### [`networkmanager-transit-gateway-connect-peer-association`](/sources/aws/Types/networkmanager-transit-gateway-connect-peer-association) - -Similar to the above, but for Transit Gateway Connect peers. The association is made within the scope of a single Global Network. - -### [`networkmanager-network-resource-relationship`](/sources/aws/Types/networkmanager-network-resource-relationship) - -This type models relationships between any two resources (devices, links, TGWs, etc.) that are part of the same Global Network. Each relationship object is tied to the Global Network it belongs to. - -### [`networkmanager-link`](/sources/aws/Types/networkmanager-link) - -Links represent the physical or logical connections at a Site and, by extension, sit within the Global Network that the Site is part of. - -### [`networkmanager-device`](/sources/aws/Types/networkmanager-device) - -Devices (routers, switches, firewalls, etc.) are registered to Sites, and consequently to the parent Global Network. Each device record references its Global Network identifier. - -### [`networkmanager-connection`](/sources/aws/Types/networkmanager-connection) - -Connections join two Devices over one or more Links inside a Global Network. The connection object therefore includes the Global Network ID to denote its scope. diff --git a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-link-association.md b/docs.overmind.tech/docs/sources/aws/Types/networkmanager-link-association.md deleted file mode 100644 index 14b54762..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-link-association.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: Networkmanager LinkAssociation -sidebar_label: networkmanager-link-association ---- - -A Network Manager **Link Association** represents the attachment of a specific physical or logical network **link** (for example, a DIA, MPLS or broadband circuit) to a **device** (such as a router, firewall, SD-WAN appliance) that resides at a site in an AWS Cloud WAN / Network Manager **global network**. -Each association records which device terminates the link, the site it belongs to, bandwidth details and the operational state of that attachment. -Official AWS documentation: -https://docs.aws.amazon.com/networkmanager/latest/APIReference/API_LinkAssociation.html - -## Supported Methods - -- `GET`: Get a Networkmanager Link Association -- ~~`LIST`~~ -- `SEARCH`: Search for Networkmanager Link Associations by GlobalNetworkId and DeviceId or LinkId - -## Possible Links - -### [`networkmanager-global-network`](/sources/aws/Types/networkmanager-global-network) - -Every Link Association is scoped to exactly one Global Network; the GlobalNetworkId is part of the composite key for the association. Following this link lets you see all other resources (sites, devices, links, transit gateways, etc.) that belong to the same overarching global network. - -### [`networkmanager-link`](/sources/aws/Types/networkmanager-link) - -The association couples a device to a particular LinkId. Traversing this link shows the underlying circuit or connectivity object that is being attached, along with its provider, bandwidth and cost details. - -### [`networkmanager-device`](/sources/aws/Types/networkmanager-device) - -The DeviceId in the association identifies the hardware or virtual appliance that terminates the link. Navigating this link reveals the device’s interfaces, status and any other links or connections it participates in. diff --git a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-link.md b/docs.overmind.tech/docs/sources/aws/Types/networkmanager-link.md deleted file mode 100644 index 2c44c83a..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-link.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: Networkmanager Link -sidebar_label: networkmanager-link ---- - -An AWS Network Manager **Link** represents a physical or logical connection (for example, an MPLS circuit, Direct Connect connection, broadband, or internet link) that provides connectivity at a specific site within a global network. Links are used by Network Manager to calculate network health, aggregate telemetry and visualise topology. Each link is created inside a Site, and therefore inside a Global Network, and can later be associated with one or more network devices. -Official documentation: https://docs.aws.amazon.com/networkmanager/latest/APIReference/API_Link.html - -**Terrafrom Mappings:** - -- `aws_networkmanager_link.arn` - -## Supported Methods - -- `GET`: Get a Networkmanager Link -- ~~`LIST`~~ -- `SEARCH`: Search for Networkmanager Links by GlobalNetworkId, GlobalNetworkId with SiteId, or ARN - -## Possible Links - -### [`networkmanager-global-network`](/sources/aws/Types/networkmanager-global-network) - -A Link is a component of a single Global Network; this edge points from the Link to the Global Network that owns it. - -### [`networkmanager-link-association`](/sources/aws/Types/networkmanager-link-association) - -A Link can be associated with one or more devices. These associations are represented by Network Manager Link Association resources, which reference the Link as their parent. - -### [`networkmanager-site`](/sources/aws/Types/networkmanager-site) - -Every Link resides in exactly one Site; this relationship shows which Site the Link belongs to. - -### [`networkmanager-network-resource-relationship`](/sources/aws/Types/networkmanager-network-resource-relationship) - -Network Manager records discovered relationships between Links and other network resources (for example, AWS Transit Gateway attachments). This edge captures those discovered `network-resource-relationship` objects that involve the Link. diff --git a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-network-resource-relationship.md b/docs.overmind.tech/docs/sources/aws/Types/networkmanager-network-resource-relationship.md deleted file mode 100644 index 3c38c328..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-network-resource-relationship.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Networkmanager Network Resource Relationships -sidebar_label: networkmanager-network-resource-relationship ---- - -Represents an association between two AWS Network Manager resources within a single Global Network. A Network Resource Relationship records how different components—such as devices, links, connections and Direct Connect objects—are connected, enabling topology visualisation and impact analysis. Each relationship object identifies a **source resource**, a **destination resource**, and the **type of relationship** (for example `CONNECTED_TO` or `CHILD_OF`). -For full field-level details see the AWS API reference: https://docs.aws.amazon.com/networkmanager/latest/APIReference/API_NetworkResourceRelationship.html - -## Supported Methods - -- ~~`GET`~~ -- ~~`LIST`~~ -- `SEARCH`: Search for Networkmanager NetworkResourceRelationships by GlobalNetworkId - -## Possible Links - -### [`networkmanager-connection`](/sources/aws/Types/networkmanager-connection) - -A Network Manager connection (for example a VPN or Transit Gateway attachment) can appear as either the **source** or **destination** in a relationship, indicating that it is logically connected to another resource—most commonly a site, device or Direct Connect virtual interface. - -### [`networkmanager-device`](/sources/aws/Types/networkmanager-device) - -Devices (routers, firewalls or SD-WAN appliances) are frequently linked to links and connections. When a device participates in a relationship, the record shows which link it uses or which connection terminates on the device. - -### [`networkmanager-link`](/sources/aws/Types/networkmanager-link) - -A link represents physical or logical connectivity (for example an MPLS circuit). Relationships illustrate which device, site or Direct Connect virtual interface is using, or is reached through, a given link. - -### [`networkmanager-site`](/sources/aws/Types/networkmanager-site) - -Site resources group devices and links. Relationships referencing a site capture a **CHILD_OF** type association, showing that a particular device or link belongs to, or is located within, the site. - -### [`directconnect-connection`](/sources/aws/Types/directconnect-connection) - -Direct Connect connections are mapped into the global network; relationships show how a Direct Connect line is attached to a Network Manager link or gateway, providing visibility of dedicated connectivity paths. - -### [`directconnect-direct-connect-gateway`](/sources/aws/Types/directconnect-direct-connect-gateway) - -When a Direct Connect gateway is part of a global network, relationships identify which connections or virtual interfaces are routed through the gateway, enabling you to trace traffic flows. - -### [`directconnect-virtual-interface`](/sources/aws/Types/directconnect-virtual-interface) - -Virtual interfaces (private, public or transit) may be related to Direct Connect connections, gateways or Network Manager links. The relationship clarifies which physical connection a VIF is presented on and how it integrates with the wider network. diff --git a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-site-to-site-vpn-attachment.md b/docs.overmind.tech/docs/sources/aws/Types/networkmanager-site-to-site-vpn-attachment.md deleted file mode 100644 index 5a3f8b7c..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-site-to-site-vpn-attachment.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: Networkmanager Site To Site Vpn Attachment -sidebar_label: networkmanager-site-to-site-vpn-attachment ---- - -A Network Manager Site-to-Site VPN attachment represents the connection of an AWS Site-to-Site VPN to an AWS Cloud WAN / Network Manager core network. By creating this attachment you allow traffic from a remote on-premises site, carried over an IPsec VPN tunnel, to be routed through the core network alongside other AWS and on-premises connections. -Further information can be found in the [official AWS documentation](https://docs.aws.amazon.com/networkmanager/latest/APIReference/API_SiteToSiteVpnAttachment.html). - -**Terrafrom Mappings:** - -- `aws_networkmanager_site_to_site_vpn_attachment.id` - -## Supported Methods - -- `GET`: Get a Networkmanager Site To Site Vpn Attachment by id -- ~~`LIST`~~ -- ~~`SEARCH`~~ - -## Possible Links - -### [`networkmanager-core-network`](/sources/aws/Types/networkmanager-core-network) - -Each Site-to-Site VPN attachment is created inside a single core network, so the attachment item is linked to the `networkmanager-core-network` that owns it. diff --git a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-site.md b/docs.overmind.tech/docs/sources/aws/Types/networkmanager-site.md deleted file mode 100644 index b905c942..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-site.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: Networkmanager Site -sidebar_label: networkmanager-site ---- - -An AWS Network Manager **Site** represents a real-world location—such as a corporate office, data centre or colocation facility—that forms part of an organisation’s Global Network. It provides the context in which devices and network links are deployed, enabling AWS Network Manager to map physical geography to logical connectivity. For a full description of the resource and its attributes, see the official AWS documentation: https://docs.aws.amazon.com/networkmanager/latest/APIReference/API_Site.html - -**Terrafrom Mappings:** - -- `aws_networkmanager_site.arn` - -## Supported Methods - -- `GET`: Get a Networkmanager Site -- ~~`LIST`~~ -- `SEARCH`: Search for Networkmanager Sites by GlobalNetworkId or Site ARN - -## Possible Links - -### [`networkmanager-global-network`](/sources/aws/Types/networkmanager-global-network) - -A Site is always created within a single Global Network. The `GlobalNetworkId` on the Site identifies its parent `networkmanager-global-network`, forming a one-to-many relationship (one Global Network, many Sites). - -### [`networkmanager-link`](/sources/aws/Types/networkmanager-link) - -Links represent individual network connections (e.g., MPLS, broadband) that terminate at a Site. Each `networkmanager-link` includes the `SiteId` of the Site where the connection is installed, so multiple Links can be related to one Site. - -### [`networkmanager-device`](/sources/aws/Types/networkmanager-device) - -Devices such as routers, firewalls or SD-WAN appliances are housed at a Site. Every `networkmanager-device` records the `SiteId` where it resides, creating a one-to-many relationship between a Site and its Devices. diff --git a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-transit-gateway-connect-peer-association.md b/docs.overmind.tech/docs/sources/aws/Types/networkmanager-transit-gateway-connect-peer-association.md deleted file mode 100644 index f74b6ec8..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-transit-gateway-connect-peer-association.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: Networkmanager Transit Gateway Connect Peer Association -sidebar_label: networkmanager-transit-gateway-connect-peer-association ---- - -A Network Manager Transit Gateway Connect Peer Association represents the connection between an AWS Transit Gateway Connect peer (a GRE tunnel endpoint created as part of a Transit Gateway Connect attachment) and a site that you have modelled inside AWS Network Manager. -The object records which Global Network the peer belongs to and, optionally, which on-premises device and physical/virtual link it should be mapped to. Maintaining this mapping allows Network Manager to draw accurate topology diagrams and to include the GRE tunnel in route analytics, performance monitoring, and policy assessments. - -Official documentation: https://docs.aws.amazon.com/networkmanager/latest/APIReference/API_TransitGatewayConnectPeerAssociation.html - -## Supported Methods - -- `GET`: Get a Networkmanager Transit Gateway Connect Peer Association by id -- `LIST`: List all Networkmanager Transit Gateway Connect Peer Associations -- `SEARCH`: Search for Networkmanager Transit Gateway Connect Peer Associations by GlobalNetworkId - -## Possible Links - -### [`networkmanager-global-network`](/sources/aws/Types/networkmanager-global-network) - -Every Transit Gateway Connect Peer Association is scoped to a single Global Network. The `GlobalNetworkId` on the association points to the corresponding `networkmanager-global-network` item, indicating which overall corporate network the peer is part of. - -### [`networkmanager-device`](/sources/aws/Types/networkmanager-device) - -The association can specify a `DeviceId` to indicate the on-premises or edge device (for example, a customer router or firewall) that terminates the GRE tunnel. Linking to the `networkmanager-device` item shows where the peer logically lands in your topology. - -### [`networkmanager-link`](/sources/aws/Types/networkmanager-link) - -If the Connect peer is tied to a particular circuit, VLAN, or VPN link at the site, the association includes a `LinkId`. This links the peer to a `networkmanager-link` item, allowing you to trace the physical or logical connectivity that underpins the GRE tunnel. diff --git a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-transit-gateway-peering.md b/docs.overmind.tech/docs/sources/aws/Types/networkmanager-transit-gateway-peering.md deleted file mode 100644 index 3ef718ca..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-transit-gateway-peering.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: Networkmanager Transit Gateway Peering -sidebar_label: networkmanager-transit-gateway-peering ---- - -An AWS Network Manager Transit Gateway Peering represents a peering attachment between an AWS Cloud WAN _core network_ and an existing AWS Transit Gateway (TGW). Creating this peering allows traffic to flow transparently between VPCs or on-premises networks connected to the Transit Gateway and the segments that make up the Cloud WAN core network, extending the reach of both fabrics without the need for additional VPNs or direct-connect links. -For more information, see the [AWS documentation](https://docs.aws.amazon.com/networkmanager/latest/APIReference/API_TransitGatewayPeering.html). - -**Terrafrom Mappings:** - -- `aws_networkmanager_transit_gateway_peering.id` - -## Supported Methods - -- `GET`: Get a Networkmanager Transit Gateway Peering by id -- ~~`LIST`~~ -- ~~`SEARCH`~~ - -## Possible Links - -### [`networkmanager-core-network`](/sources/aws/Types/networkmanager-core-network) - -Every Transit Gateway Peering is created **within** a specific Cloud WAN core network; the core network is the logical container that owns the peering attachment. Consequently, querying a `networkmanager-core-network` item allows you to enumerate or drill down to its associated Transit Gateway Peerings, and conversely, each Transit Gateway Peering stores the identifier of the core network it belongs to. diff --git a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-transit-gateway-registration.md b/docs.overmind.tech/docs/sources/aws/Types/networkmanager-transit-gateway-registration.md deleted file mode 100644 index d1f19264..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-transit-gateway-registration.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Networkmanager Transit Gateway Registrations -sidebar_label: networkmanager-transit-gateway-registration ---- - -A Network Manager Transit Gateway Registration represents the association of an AWS Transit Gateway with an AWS Network Manager Global Network. By registering a Transit Gateway, you enable Network Manager to map its attachments, monitor routing changes and performance, and include the gateway in your overall network topology visualisation. For more information, see the official AWS documentation: https://docs.aws.amazon.com/vpc/latest/tgw/register-transit-gateway.html - -## Supported Methods - -- `GET`: Get a Networkmanager Transit Gateway Registrations -- `LIST`: List all Networkmanager Transit Gateway Registrations -- `SEARCH`: Search for Networkmanager Transit Gateway Registrations by GlobalNetworkId - -## Possible Links - -### [`networkmanager-global-network`](/sources/aws/Types/networkmanager-global-network) - -A Transit Gateway registration is always scoped to, and therefore linked with, a single Network Manager Global Network. This link indicates the parent Global Network that owns the registration, allowing Overmind to traverse from the high-level network to the individual Transit Gateway associations. diff --git a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-transit-gateway-route-table-attachment.md b/docs.overmind.tech/docs/sources/aws/Types/networkmanager-transit-gateway-route-table-attachment.md deleted file mode 100644 index 779efbdd..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-transit-gateway-route-table-attachment.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: Networkmanager Transit Gateway Route Table Attachment -sidebar_label: networkmanager-transit-gateway-route-table-attachment ---- - -The Network Manager Transit Gateway Route Table Attachment represents the binding between an AWS Transit Gateway (TGW) route table and an AWS Cloud WAN (Network Manager Core Network) segment. Creating this attachment allows routes that exist in the TGW route table to be advertised into the Cloud WAN segment and, conversely, permits segment routes to be propagated to the TGW. In effect, it provides a controlled integration point between an existing TGW-based topology and a Cloud WAN fabric. -Official API documentation: https://docs.aws.amazon.com/networkmanager/latest/APIReference/API_CreateTransitGatewayRouteTableAttachment.html - -**Terrafrom Mappings:** - -- `aws_networkmanager_transit_gateway_route_table_attachment.id` - -## Supported Methods - -- `GET`: Get a Networkmanager Transit Gateway Route Table Attachment by id -- ~~`LIST`~~ -- ~~`SEARCH`~~ - -## Possible Links - -### [`networkmanager-core-network`](/sources/aws/Types/networkmanager-core-network) - -Every Transit Gateway Route Table Attachment is created inside a specific Core Network and targets one of its segments. Therefore, the attachment is a child resource of the Core Network and inherits its administrative domain and policy constraints. - -### [`networkmanager-transit-gateway-peering`](/sources/aws/Types/networkmanager-transit-gateway-peering) - -Before a TGW route table can be attached, a Transit Gateway Peering must exist between the TGW and the Core Network. The attachment references that peering to determine the underlay connection over which route exchange will occur. diff --git a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-vpc-attachment.md b/docs.overmind.tech/docs/sources/aws/Types/networkmanager-vpc-attachment.md deleted file mode 100644 index cf4ac041..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/networkmanager-vpc-attachment.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: Networkmanager VPC Attachment -sidebar_label: networkmanager-vpc-attachment ---- - -A Network Manager VPC attachment represents the logical link between an Amazon Virtual Private Cloud (VPC) and an AWS Cloud WAN / Network Manager **core network**. By creating an attachment you allow the sub-nets inside the VPC to participate in the global routing domain managed by Network Manager, making it possible for traffic to reach other VPCs, on-premises networks, or SD-WAN devices that are also attached to the same core network. -For a detailed explanation of the resource and its properties, see the [official AWS documentation](https://docs.aws.amazon.com/vpc/latest/cloudwan/what-is-cloudwan.html). - -**Terrafrom Mappings:** - -- `aws_networkmanager_vpc_attachment.id` - -## Supported Methods - -- `GET`: Get a Networkmanager VPC Attachment by id -- ~~`LIST`~~ -- ~~`SEARCH`~~ - -## Possible Links - -### [`networkmanager-core-network`](/sources/aws/Types/networkmanager-core-network) - -Every VPC attachment is created inside a specific core network and inherits its routing policies. The `core_network_id` field on the attachment identifies that parent, so Overmind can follow this link to reveal the wider network fabric that the VPC will join. diff --git a/docs.overmind.tech/docs/sources/aws/Types/rds-db-cluster-parameter-group.md b/docs.overmind.tech/docs/sources/aws/Types/rds-db-cluster-parameter-group.md deleted file mode 100644 index 643876bf..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/rds-db-cluster-parameter-group.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: RDS Cluster Parameter Group -sidebar_label: rds-db-cluster-parameter-group ---- - -An RDS Cluster Parameter Group is a named collection of engine configuration values that are applied to every instance within an Amazon RDS or Aurora DB cluster. By adjusting the parameters in the group you can fine-tune settings such as memory management, logging, and query optimisation, and have those settings propagated consistently across the cluster. If you do not specify a custom group when you create a cluster, AWS assigns the default engine-specific parameter group. For details, see the AWS documentation: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_WorkingWithParamGroups.html. - -**Terrafrom Mappings:** - -- `aws_rds_cluster_parameter_group.arn` - -## Supported Methods - -- `GET`: Get a parameter group by name -- `LIST`: List all RDS parameter groups -- `SEARCH`: Search for a parameter group by ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/rds-db-cluster.md b/docs.overmind.tech/docs/sources/aws/Types/rds-db-cluster.md deleted file mode 100644 index d9e128c8..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/rds-db-cluster.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: RDS Cluster -sidebar_label: rds-db-cluster ---- - -Amazon Relational Database Service (RDS) Clusters provide a managed, highly-available relational database running on multiple Availability Zones. An RDS Cluster contains one or more database instances that share storage, backups, and endpoints, and can be configured for automatic fail-over and read-scaling. Aurora MySQL and Aurora PostgreSQL engines run exclusively within clusters, while other engines (e.g. MySQL, PostgreSQL) can participate in global database topologies through cluster links. -Official documentation: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html - -**Terrafrom Mappings:** - -- `aws_rds_cluster.cluster_identifier` - -## Supported Methods - -- `GET`: Get a cluster by identifier -- `LIST`: List all RDS clusters -- `SEARCH`: Search for a cluster by ARN - -## Possible Links - -### [`rds-db-subnet-group`](/sources/aws/Types/rds-db-subnet-group) - -Each RDS Cluster is associated with a DB subnet group that defines the set of subnets (and therefore Availability Zones) in which its instances can run. - -### [`dns`](/sources/stdlib/Types/dns) - -The cluster exposes an endpoint such as `mycluster.cluster-123456789012.eu-west-2.rds.amazonaws.com`; this hostname is represented as a DNS record linked to the cluster. - -### [`rds-db-cluster`](/sources/aws/Types/rds-db-cluster) - -Clusters can reference other clusters as replication sources or targets (e.g. in an Aurora global database), creating a dependency link between the participating RDS clusters. - -### [`ec2-security-group`](/sources/aws/Types/ec2-security-group) - -Traffic to and from the cluster’s instances is controlled by one or more EC2 security groups attached to the cluster. - -### [`route53-hosted-zone`](/sources/aws/Types/route53-hosted-zone) - -Organisations often create Route 53 records (A/AAAA or CNAME) in their hosted zones to provide friendly names for the cluster endpoint, linking the hosted zone to the RDS Cluster. - -### [`kms-key`](/sources/aws/Types/kms-key) - -If storage encryption is enabled, the cluster uses a customer-managed or AWS-managed KMS key; compromising or deleting the key will render the data inaccessible. - -### [`rds-option-group`](/sources/aws/Types/rds-option-group) - -Certain engines allow additional features to be enabled via option groups (e.g. Oracle options); a cluster may reference an option group to configure those extensions. - -### [`iam-role`](/sources/aws/Types/iam-role) - -An RDS Cluster can assume IAM roles for tasks such as exporting snapshots to S3, publishing logs to CloudWatch, or accessing AWS services like Kinesis; these roles are linked resources. diff --git a/docs.overmind.tech/docs/sources/aws/Types/rds-db-instance.md b/docs.overmind.tech/docs/sources/aws/Types/rds-db-instance.md deleted file mode 100644 index 7f02149b..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/rds-db-instance.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: RDS Instance -sidebar_label: rds-db-instance ---- - -Amazon Relational Database Service (RDS) DB instances are the managed compute and storage resources that run your relational database engines in AWS. An instance encapsulates the underlying virtual hardware, disk, network interfaces, and database server software that form a single, addressable database node. Full service description and behaviour are documented in the AWS RDS User Guide: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Overview.DBInstance.html - -**Terrafrom Mappings:** - -- `aws_db_instance.identifier` -- `aws_db_instance_role_association.db_instance_identifier` - -## Supported Methods - -- `GET`: Get an instance by ID -- `LIST`: List all instances -- `SEARCH`: Search for instances by ARN - -## Possible Links - -### [`dns`](/sources/stdlib/Types/dns) - -Every RDS instance exposes an endpoint such as `mydb.abc123.eu-west-2.rds.amazonaws.com`. Overmind links the instance to the corresponding DNS record so you can trace how applications resolve and reach the database. - -### [`route53-hosted-zone`](/sources/aws/Types/route53-hosted-zone) - -The automatically-created DNS record for an RDS endpoint lives inside an AWS-managed Route 53 hosted zone, and private zones in your account may contain CNAMEs pointing to it. Overmind surfaces these zones to show where the endpoint is published and overridden. - -### [`ec2-security-group`](/sources/aws/Types/ec2-security-group) - -In a VPC, an RDS instance is attached to one or more security groups that define its inbound and outbound traffic rules. These links let you audit which networks and EC2 instances are permitted to reach the database. - -### [`rds-db-parameter-group`](/sources/aws/Types/rds-db-parameter-group) - -A DB parameter group controls engine-level configuration such as `max_connections` or `log_min_duration_statement`. Each instance references exactly one parameter group (or the default), so Overmind links them for configuration drift and compliance checks. - -### [`rds-db-subnet-group`](/sources/aws/Types/rds-db-subnet-group) - -The subnet group lists the subnets (and therefore the AZs) where the instance may be placed. Linking highlights the network reachability and resiliency zone choices for the database. - -### [`rds-db-cluster`](/sources/aws/Types/rds-db-cluster) - -For Aurora and other clustered engines, individual DB instances are members of an RDS DB cluster. Overmind links them so you can see the relationship between writer/reader nodes and the cluster-level endpoints. - -### [`kms-key`](/sources/aws/Types/kms-key) - -When storage encryption is enabled, an RDS instance uses an AWS KMS key to encrypt its underlying EBS volumes and snapshots. The link shows which key protects the data and who can decrypt it. - -### [`iam-role`](/sources/aws/Types/iam-role) - -Features such as S3 import/export, AWS Lambda integration, and CloudWatch Logs require the database service to assume an IAM service role. Overmind lists these roles so you can review permissions the database can exercise in your account. - -### [`iam-instance-profile`](/sources/aws/Types/iam-instance-profile) - -RDS Custom instances (and certain on-host integrations) run on dedicated EC2 instances within your account and therefore use an IAM instance profile. If present, Overmind links the profile to reveal any additional permissions granted to the underlying host. diff --git a/docs.overmind.tech/docs/sources/aws/Types/rds-db-parameter-group.md b/docs.overmind.tech/docs/sources/aws/Types/rds-db-parameter-group.md deleted file mode 100644 index ad921286..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/rds-db-parameter-group.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: RDS Parameter Group -sidebar_label: rds-db-parameter-group ---- - -An Amazon RDS DB parameter group is a container for engine configuration values that determine how a database instance or cluster behaves. By attaching a parameter group to one or more RDS resources you override the engine’s built-in defaults with your own settings, allowing you to tune performance, security and operational behaviour. Changes made to the group are propagated to every associated instance; static parameters take effect after the next reboot, while dynamic parameters may apply immediately. -For a full explanation see the official AWS documentation: [Working with DB parameter groups](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithParamGroups.html). - -**Terrafrom Mappings:** - -- `aws_db_parameter_group.arn` - -## Supported Methods - -- `GET`: Get a parameter group by name -- `LIST`: List all parameter groups -- `SEARCH`: Search for a parameter group by ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/rds-db-subnet-group.md b/docs.overmind.tech/docs/sources/aws/Types/rds-db-subnet-group.md deleted file mode 100644 index 011d6ec6..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/rds-db-subnet-group.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: RDS Subnet Group -sidebar_label: rds-db-subnet-group ---- - -An RDS DB subnet group is a named collection of one or more subnets that belong to a single Amazon VPC. When you create an Amazon RDS DB instance in a VPC, the subnet group tells RDS which subnets, and therefore which Availability Zones, it may use to provision and maintain the instance. Subnet groups are essential for ensuring high availability and proper network isolation of database workloads. -For full details, see the AWS documentation: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.Subnets.html - -**Terrafrom Mappings:** - -- `aws_db_subnet_group.arn` - -## Supported Methods - -- `GET`: Get a subnet group by name -- `LIST`: List all subnet groups -- `SEARCH`: Search for subnet groups by ARN - -## Possible Links - -### [`ec2-vpc`](/sources/aws/Types/ec2-vpc) - -The DB subnet group is created within exactly one VPC; its subnets must all belong to this VPC, so the group inherits the VPC’s routing and network-security boundaries. - -### [`ec2-subnet`](/sources/aws/Types/ec2-subnet) - -A DB subnet group is a container for multiple EC2 subnets, typically spanning at least two Availability Zones. Each listed subnet in the group contributes one possible placement zone for RDS DB instances. diff --git a/docs.overmind.tech/docs/sources/aws/Types/rds-option-group.md b/docs.overmind.tech/docs/sources/aws/Types/rds-option-group.md deleted file mode 100644 index 57217f31..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/rds-option-group.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: RDS Option Group -sidebar_label: rds-option-group ---- - -An Amazon Relational Database Service (RDS) Option Group is a logical container that lets you enable and configure additional features—known as “options”—for an RDS DB instance or cluster. Typical options include Oracle Transparent Data Encryption, SQL Server Audit, MariaDB Audit Plugin and many others that are not activated by default with the engine. By assigning an option group to one or more databases you ensure that each instance inherits the same, centrally-managed configuration, simplifying governance and compliance. -For complete details see the official AWS documentation: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithOptionGroups.html - -**Terrafrom Mappings:** - -- `aws_db_option_group.arn` - -## Supported Methods - -- `GET`: Get an option group by name -- `LIST`: List all RDS option groups -- `SEARCH`: Search for an option group by ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/route53-health-check.md b/docs.overmind.tech/docs/sources/aws/Types/route53-health-check.md deleted file mode 100644 index a73f438b..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/route53-health-check.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: Route53 Health Check -sidebar_label: route53-health-check ---- - -Amazon Route 53 health checks continuously monitor the availability and latency of your application endpoints (such as web servers, API gateways or other resources) and can automatically trigger DNS fail-over when an endpoint becomes unhealthy. Each health check can also be configured to integrate with Amazon CloudWatch, enabling alerting and automation based on the current health state. -For full details, refer to the official AWS documentation: [Amazon Route 53 Health Checks](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover.html). - -**Terrafrom Mappings:** - -- `aws_route53_health_check.id` - -## Supported Methods - -- `GET`: Get health check by ID -- `LIST`: List all health checks -- `SEARCH`: Search for health checks by ARN - -## Possible Links - -### [`cloudwatch-alarm`](/sources/aws/Types/cloudwatch-alarm) - -A CloudWatch alarm can be created that uses the `HealthCheckStatus` metric emitted for a specific Route 53 health check. This allows the alarm to publish notifications or trigger automated responses whenever the health check reports an unhealthy or healthy state. Overmind therefore records a link from a Route 53 health check to any CloudWatch alarms that reference its ID so you can immediately see which alarms will fire if the check changes status. diff --git a/docs.overmind.tech/docs/sources/aws/Types/route53-hosted-zone.md b/docs.overmind.tech/docs/sources/aws/Types/route53-hosted-zone.md deleted file mode 100644 index b8afe159..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/route53-hosted-zone.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: Hosted Zone -sidebar_label: route53-hosted-zone ---- - -An Amazon Route 53 hosted zone is a container for all of the DNS records that belong to a single domain (for example `example.com`) or a sub-domain. It represents a DNS namespace within Route 53 and is the primary object you create when you want AWS to answer queries for your domain. Hosted zones can be public (resolving queries on the public Internet) or private (resolving only within one or more associated VPCs), and support advanced features such as DNSSEC signing and alias records to AWS resources. -For full details see the AWS documentation: https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/hosted-zones-working-with.html - -**Terrafrom Mappings:** - -- `aws_route53_hosted_zone_dnssec.id` -- `aws_route53_zone.zone_id` -- `aws_route53_zone_association.zone_id` - -## Supported Methods - -- `GET`: Get a hosted zone by ID -- `LIST`: List all hosted zones -- `SEARCH`: Search for a hosted zone by ARN - -## Possible Links - -### [`route53-resource-record-set`](/sources/aws/Types/route53-resource-record-set) - -Each hosted zone contains one or more resource record sets. Overmind establishes a link from a Hosted Zone item to the `route53-resource-record-set` items that reside within it, allowing you to explore every DNS record that will be created, modified or deleted as part of a deployment. diff --git a/docs.overmind.tech/docs/sources/aws/Types/route53-resource-record-set.md b/docs.overmind.tech/docs/sources/aws/Types/route53-resource-record-set.md deleted file mode 100644 index 89aca6f3..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/route53-resource-record-set.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: Route53 Record Set -sidebar_label: route53-resource-record-set ---- - -A Route 53 Resource Record Set represents a single DNS record (or a group of records with the same name and type) that lives inside a specific hosted zone. It defines how Amazon Route 53 answers DNS queries for the associated domain name, including the record type (A, AAAA, CNAME, MX, TXT, SRV, etc.), routing policy (simple, weighted, latency, geolocation, fail-over, multi-value, or alias), time-to-live (TTL) and, optionally, a linked health check. -For full details see the AWS documentation: https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/RRSet.html - -**Terrafrom Mappings:** - -- `aws_route53_record.arn` -- `aws_route53_record.id` - -## Supported Methods - -- `GET`: Get a resource record set. The ID is the concatenation of the hosted zone, name, and record type (`{hostedZone}.{name}.{type}`) -- `LIST`: List all resource record sets - -## Possible Links - -### [`dns`](/sources/stdlib/Types/dns) - -Because a Route 53 record set ultimately becomes a DNS record that can be queried on the public or private internet, each record set naturally maps to an Overmind `dns` item. Following this link lets you see the vendor-agnostic representation of the record (name, type, TTL and value) and how it is consumed by other infrastructure components. - -### [`route53-health-check`](/sources/aws/Types/route53-health-check) - -If the record set is configured with a fail-over, latency, or weighted routing policy that refers to a Route 53 health check, Overmind links the record set to that `route53-health-check` item. This shows the dependency between DNS resolution and the health status of the monitored endpoint, helping you understand how an unhealthy resource could affect name resolution. diff --git a/docs.overmind.tech/docs/sources/aws/Types/s3-bucket.md b/docs.overmind.tech/docs/sources/aws/Types/s3-bucket.md deleted file mode 100644 index afcfb3f3..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/s3-bucket.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: S3 Bucket -sidebar_label: s3-bucket ---- - -Amazon S3 (Simple Storage Service) buckets are globally-unique containers used to store and organise objects such as files, logs and backups. Each bucket is created within a specific AWS Region, can be configured with fine-grained access controls, lifecycle rules, encryption, versioning and event notifications, and can serve as the origin for many other AWS services. Full service documentation is available in the AWS User Guide: https://docs.aws.amazon.com/AmazonS3/latest/userguide/Welcome.html - -**Terrafrom Mappings:** - -- `aws_s3_bucket_acl.bucket` -- `aws_s3_bucket_analytics_configuration.bucket` -- `aws_s3_bucket_cors_configuration.bucket` -- `aws_s3_bucket_intelligent_tiering_configuration.bucket` -- `aws_s3_bucket_inventory.bucket` -- `aws_s3_bucket_lifecycle_configuration.bucket` -- `aws_s3_bucket_logging.bucket` -- `aws_s3_bucket_metric.bucket` -- `aws_s3_bucket_notification.bucket` -- `aws_s3_bucket_object_lock_configuration.bucket` -- `aws_s3_bucket_object.bucket` -- `aws_s3_bucket_ownership_controls.bucket` -- `aws_s3_bucket_policy.bucket` -- `aws_s3_bucket_public_access_block.bucket` -- `aws_s3_bucket_replication_configuration.bucket` -- `aws_s3_bucket_request_payment_configuration.bucket` -- `aws_s3_bucket_server_side_encryption_configuration.bucket` -- `aws_s3_bucket_versioning.bucket` -- `aws_s3_bucket_website_configuration.bucket` -- `aws_s3_bucket.id` -- `aws_s3_object_copy.bucket` -- `aws_s3_object.bucket` - -## Supported Methods - -- `GET`: Get an S3 bucket by name -- `LIST`: List all S3 buckets -- `SEARCH`: Search for S3 buckets by ARN - -## Possible Links - -### [`lambda-function`](/sources/aws/Types/lambda-function) - -An S3 bucket can invoke Lambda functions through S3 event notifications (e.g. when an object is created, deleted or restored). Overmind surfaces this relationship so that you can identify deployment risks such as circular triggers or permissions gaps between the bucket and the associated Lambda execution role. - -### [`sqs-queue`](/sources/aws/Types/sqs-queue) - -Buckets may be configured to send event notifications to SQS queues. Overmind links the bucket to any target queue, allowing you to assess the impact of queue deletion, encryption settings or IAM policies on the integrity of the event pipeline. - -### [`sns-topic`](/sources/aws/Types/sns-topic) - -Similar to SQS, S3 buckets can publish object-level events to SNS topics. Overmind records this connection so you can verify that topic policies permit delivery and that message fan-out will still function after your planned changes. - -### [`s3-bucket`](/sources/aws/Types/s3-bucket) - -Buckets are often paired through cross-Region replication or configured as website redirects to one another. Overmind creates links between the source and destination buckets to highlight dependencies such as replication roles, encryption configuration compatibility and versioning status. diff --git a/docs.overmind.tech/docs/sources/aws/Types/sns-data-protection-policy.md b/docs.overmind.tech/docs/sources/aws/Types/sns-data-protection-policy.md deleted file mode 100644 index 93e4c5db..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/sns-data-protection-policy.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: SNS Data Protection Policy -sidebar_label: sns-data-protection-policy ---- - -Amazon Simple Notification Service (SNS) is a fully managed messaging service for both application-to-application (A2A) and application-to-person (A2P) communication. SNS topics allow you to fan out messages to a large number of subscribers, including distributed systems, and serverless applications. The SNS Data Protection Policy provides a mechanism to ensure that the data transmitted through SNS is compliant with your organisational and regulatory requirements. This policy is used to define and enforce encryption, data retention, and access control practices on SNS topics. For more details, you can refer to the [official AWS SNS Data Protection documentation](https://docs.aws.amazon.com/sns/latest/dg/sns-data-encryption.html). - -**Terraform Mappings:** - -- `aws_sns_topic_data_protection_policy.arn` - -## Supported Methods - -- `GET`: Get an SNS data protection policy by associated topic ARN -- ~~`LIST`~~ -- `SEARCH`: Search SNS data protection policies by its ARN - -## Possible Links - -### [`sns-topic`](/sources/aws/Types/sns-topic) - -The SNS Data Protection Policy is directly related to SNS topics as it outlines the security measures and data management practices that are applied to messages sent through these topics. By associating a data protection policy with an SNS topic, users can ensure that their SNS workflows adhere to the necessary data protection and compliance standards. diff --git a/docs.overmind.tech/docs/sources/aws/Types/sns-endpoint.md b/docs.overmind.tech/docs/sources/aws/Types/sns-endpoint.md deleted file mode 100644 index 308162a5..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/sns-endpoint.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: SNS Endpoint -sidebar_label: sns-endpoint ---- - -The SNS Endpoint resource represents a single destination—typically a mobile device, browser, or desktop application instance—that can receive push notifications through Amazon Simple Notification Service (SNS). Each endpoint is created under a specific Platform Application and is identified by a unique Amazon Resource Name (ARN). Managing endpoints correctly is crucial, as inactive or mis-configured endpoints can lead to failed deliveries, increased costs, or even unwanted data exposure. For full details see the official AWS documentation: https://docs.aws.amazon.com/sns/latest/dg/mobile-push-send-devicetoken.html - -## Supported Methods - -- `GET`: Get an SNS endpoint by its ARN -- ~~`LIST`~~ -- `SEARCH`: Search SNS endpoints by associated Platform Application ARN diff --git a/docs.overmind.tech/docs/sources/aws/Types/sns-platform-application.md b/docs.overmind.tech/docs/sources/aws/Types/sns-platform-application.md deleted file mode 100644 index f04a1f58..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/sns-platform-application.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: SNS Platform Application -sidebar_label: sns-platform-application ---- - -An Amazon Simple Notification Service (SNS) **platform application** represents a collection of credentials that allow SNS to send push notifications through a specific mobile push service, such as Apple APNS, Google FCM or Amazon ADM. Once you create a platform application, you can register individual mobile devices (platform endpoints) under it and publish messages that will be delivered to those devices by the relevant push provider. -For a full description see the AWS documentation: https://docs.aws.amazon.com/sns/latest/dg/mobile-push-send.html#mobile-push-sns-platform. - -**Terrafrom Mappings:** - -- `aws_sns_platform_application.id` - -## Supported Methods - -- `GET`: Get an SNS platform application by its ARN -- `LIST`: List all SNS platform applications -- `SEARCH`: Search SNS platform applications by ARN - -## Possible Links - -### [`sns-endpoint`](/sources/aws/Types/sns-endpoint) - -Each platform application can have many child **SNS platform endpoints**—one per registered device. Linking the application to its endpoints lets Overmind surface which devices are affected by configuration changes or credential mis-configurations in the parent application. diff --git a/docs.overmind.tech/docs/sources/aws/Types/sns-subscription.md b/docs.overmind.tech/docs/sources/aws/Types/sns-subscription.md deleted file mode 100644 index 82565304..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/sns-subscription.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: SNS Subscription -sidebar_label: sns-subscription ---- - -An Amazon Simple Notification Service (SNS) subscription represents the association between an SNS topic and the endpoint that receives the messages published to that topic. Each subscription specifies the delivery protocol (e-mail, SMS, HTTP/S, Lambda, SQS, Firehose, etc.), the endpoint address, and optional delivery policies or filter policies that control how and when messages are delivered. For full details see the official AWS documentation: https://docs.aws.amazon.com/sns/latest/dg/sns-subscription.html - -**Terrafrom Mappings:** - -- `aws_sns_topic_subscription.id` - -## Supported Methods - -- `GET`: Get an SNS subscription by its ARN -- `LIST`: List all SNS subscriptions -- `SEARCH`: Search SNS subscription by ARN - -## Possible Links - -### [`sns-topic`](/sources/aws/Types/sns-topic) - -Every subscription belongs to exactly one SNS topic. The subscription’s ARN embeds the topic ARN, and deleting the topic automatically removes the subscription. Overmind links the subscription to its parent `sns-topic` so you can trace message flow from publisher (topic) to consumer (subscription endpoint). - -### [`iam-role`](/sources/aws/Types/iam-role) - -If the subscription delivers to an AWS resource in another account (e.g., cross-account SQS queue, Lambda function, or Kinesis Data Firehose), SNS must assume an IAM role that grants it permission to publish to that resource. Overmind links the subscription to any `iam-role` referenced in its delivery policy to help you verify that the correct cross-account permissions are in place. diff --git a/docs.overmind.tech/docs/sources/aws/Types/sns-topic.md b/docs.overmind.tech/docs/sources/aws/Types/sns-topic.md deleted file mode 100644 index 317b0b13..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/sns-topic.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: SNS Topic -sidebar_label: sns-topic ---- - -An Amazon Simple Notification Service (SNS) topic is a logical access point through which publishers send messages that are then fanned-out to subscribed endpoints such as email addresses, HTTP/S webhooks, Lambda functions or SQS queues. Topics can be configured with attributes such as delivery policies, access control policies and optional server-side encryption using AWS Key Management Service (KMS). For further details refer to the official AWS documentation: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html - -**Terrafrom Mappings:** - -- `aws_sns_topic.id` - -## Supported Methods - -- `GET`: Get an SNS topic by its ARN -- `LIST`: List all SNS topics -- `SEARCH`: Search SNS topic by ARN - -## Possible Links - -### [`kms-key`](/sources/aws/Types/kms-key) - -If server-side encryption is enabled for the SNS topic, it references a KMS customer master key (CMK). This link allows Overmind to surface the relationship between the topic and the key that protects its message payloads in transit and at rest. diff --git a/docs.overmind.tech/docs/sources/aws/Types/sqs-queue.md b/docs.overmind.tech/docs/sources/aws/Types/sqs-queue.md deleted file mode 100644 index 14493b73..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/sqs-queue.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: SQS Queue -sidebar_label: sqs-queue ---- - -Amazon Simple Queue Service (SQS) provides fully-managed message queues that decouple and scale micro-services, distributed systems and serverless applications. A queue acts as a buffer, reliably storing any amount of messages until they are processed and deleted by consumers. Two delivery modes are available – standard (at-least-once, best-effort ordering) and FIFO (exactly-once, ordered). Queues can be encrypted, configured with dead-letter queues, and integrated with other AWS services such as Lambda or SNS. -For a comprehensive description see the official AWS documentation: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/welcome.html - -**Terrafrom Mappings:** - -- `aws_sqs_queue.id` - -## Supported Methods - -- `GET`: Get an SQS queue attributes by its URL -- `LIST`: List all SQS queue URLs -- `SEARCH`: Search SQS queue by ARN - -## Possible Links - -### [`http`](/sources/stdlib/Types/http) - -Each SQS queue is identified by an HTTPS URL of the form `https://sqs..amazonaws.com//`. Overmind represents this URL as an `http` item, so the queue is linked to the corresponding `http` item that models the endpoint used by the AWS API. - -### [`lambda-event-source-mapping`](/sources/aws/Types/lambda-event-source-mapping) - -When a Lambda function is configured with an event-source mapping that pulls messages from an SQS queue, Overmind creates a `lambda-event-source-mapping` item. The mapping item is linked to the SQS queue it reads from, allowing impact analysis when either the queue or the Lambda configuration changes. diff --git a/docs.overmind.tech/docs/sources/aws/Types/ssm-parameter.md b/docs.overmind.tech/docs/sources/aws/Types/ssm-parameter.md deleted file mode 100644 index 7f776d0f..00000000 --- a/docs.overmind.tech/docs/sources/aws/Types/ssm-parameter.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: SSM Parameter -sidebar_label: ssm-parameter ---- - -AWS Systems Manager (SSM) Parameters, stored in the Systems Manager Parameter Store, provide a centralised, version-controlled repository for configuration data such as plain strings, SecureStrings (encrypted secrets), and hierarchical documents. They allow you to decouple configuration and secrets from code, share settings across services and accounts, and take advantage of fine-grained IAM access controls. See the official AWS documentation for full details: https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-parameter-store.html - -**Terrafrom Mappings:** - -- `aws_ssm_parameter.name` -- `aws_ssm_parameter.arn` - -## Supported Methods - -- `GET`: Get an SSM parameter by name -- `LIST`: List all SSM parameters -- `SEARCH`: Search for SSM parameters by ARN. This supports ARNs from IAM policies that contain wildcards - -## Possible Links - -### [`ip`](/sources/aws/Types/networkmanager-network-resource-relationship) - -If a parameter’s value represents an IP address or a list of addresses (for example, a whitelist used by a Lambda function or security group rule generator), Overmind will surface a link to the corresponding `ip` entity so that you can trace where the address originates and what else depends on it. - -### [`http`](/sources/stdlib/Types/http) - -Parameters often store URLs for upstream APIs, S3 buckets, or internal services. When the value of a parameter matches an HTTP or HTTPS URL, Overmind creates an `http` link, enabling you to follow the dependency chain from the configuration to the external or internal endpoint. - -### [`dns`](/sources/stdlib/Types/dns) - -Likewise, when a parameter’s value contains a hostname or FQDN, Overmind links it to the relevant `dns` record. This makes it easy to assess the impact of DNS changes on applications that retrieve their endpoint addresses from Parameter Store. diff --git a/docs.overmind.tech/docs/sources/aws/_category_.json b/docs.overmind.tech/docs/sources/aws/_category_.json deleted file mode 100644 index 3e7fb1b9..00000000 --- a/docs.overmind.tech/docs/sources/aws/_category_.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "label": "AWS", - "position": 2, - "collapsed": true, - "link": { - "type": "generated-index", - "description": "How to integrate your AWS" - } -} diff --git a/docs.overmind.tech/docs/sources/aws/account_settings.png b/docs.overmind.tech/docs/sources/aws/account_settings.png deleted file mode 100644 index c7e184cb..00000000 Binary files a/docs.overmind.tech/docs/sources/aws/account_settings.png and /dev/null differ diff --git a/docs.overmind.tech/docs/sources/aws/aws_manual.png b/docs.overmind.tech/docs/sources/aws/aws_manual.png deleted file mode 100644 index 9a01d077..00000000 Binary files a/docs.overmind.tech/docs/sources/aws/aws_manual.png and /dev/null differ diff --git a/docs.overmind.tech/docs/sources/aws/aws_source_settings.png b/docs.overmind.tech/docs/sources/aws/aws_source_settings.png deleted file mode 100644 index b6eca5e4..00000000 Binary files a/docs.overmind.tech/docs/sources/aws/aws_source_settings.png and /dev/null differ diff --git a/docs.overmind.tech/docs/sources/aws/cloudformation-update-stack.png b/docs.overmind.tech/docs/sources/aws/cloudformation-update-stack.png deleted file mode 100644 index f5ee8568..00000000 Binary files a/docs.overmind.tech/docs/sources/aws/cloudformation-update-stack.png and /dev/null differ diff --git a/docs.overmind.tech/docs/sources/aws/configuration.md b/docs.overmind.tech/docs/sources/aws/configuration.md deleted file mode 100644 index 329d3763..00000000 --- a/docs.overmind.tech/docs/sources/aws/configuration.md +++ /dev/null @@ -1,118 +0,0 @@ ---- -title: AWS Configuration -sidebar_position: 1 ---- - -To be able to analyse and discover your infrastructure, Overmind requires read-only access to your AWS account. There are two ways to configure this: - -- **Temporarily:** When you run the `overmind terraform` commands locally, the CLI uses the same AWS access that Terraform does to create a temporary local source. This gives Overmind access to AWS while the command is running, but not afterwards. -- **Permanently** (Recommended): This is known as a "Managed Source". Managed sources are always running and assume an IAM role that you create in your AWS account that gives them read-only AWS access. - -## Configure a Managed Source - -To create an AWS source, open [Settings](https://app.overmind.tech/settings) by clicking your avatar in the sidebar, then navigating to [Sources](https://app.overmind.tech/settings/sources). - -![User settings menu in the sidebar](./account_settings.png) - -Click **Add source** and select **AWS**. - -![Sources settings page with Add source popover](./aws_source_settings.png) - -Use "Deploy with AWS CloudFormation" to be taken to the AWS console. You may need to sign in and reload the page. With the results from the CloudFormation deployment, choose a name for your source (e.g. "prod") and fill in "Region" and "AWSTargetRoleARN". - -![Screenshot of the "Add AWS Source" dialogue, showing tabs for automatic and manual setup. The automatic setup pane is selected. There is explanation text and input fields for Source name, Region and AWSTargetRoleARN.](./configure-aws.png) - -Press "Create source" to finish the configuration. - -## Manual Setup - -To allow Overmind to access your infrastructure safely, you need to first configure a role and trust relationship that the Overmind AWS account can assume. - -This role will be protected by an [external ID](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html#external-id-purpose). - -To create the role, open the AWS console for the account you wish to link to Overmind, then: - -1. Open IAM -1. Click Roles -1. Click "Create role" -1. In "Trusted entity type" select "AWS account" -1. In "An AWS account" select "Another AWS account" and enter `942836531449` -1. (Optional, you can do this later) Tick "Require external ID". **Note:** Each source within Overmind has its own unique external ID. In order to find the external ID for a source go to Settings > Sources > Add Source > AWS > Manual Setup and copy the external ID from Step 3. Do not close this window after you have done this, you'll need it later -1. On the "Add permissions", don't select anything, just click "Next" -1. In "Role name" enter a descriptive name like `overmind-read-only` -1. Click "Create Role" - -The next step is to assign permissions to this role. To do this open your newly created role, then: - -1. Click "Add Permissions" > "Create inline policy" -1. Select JSON -1. Paste the following policy: - - ```json - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "apigateway:Get*", - "autoscaling:Describe*", - "cloudfront:Get*", - "cloudfront:List*", - "cloudwatch:Describe*", - "cloudwatch:GetMetricData", - "cloudwatch:ListTagsForResource", - "directconnect:Describe*", - "dynamodb:Describe*", - "dynamodb:List*", - "ec2:Describe*", - "ecs:Describe*", - "ecs:List*", - "eks:Describe*", - "eks:List*", - "elasticfilesystem:Describe*", - "elasticloadbalancing:Describe*", - "iam:Get*", - "iam:List*", - "kms:Describe*", - "kms:Get*", - "kms:List*", - "lambda:Get*", - "lambda:List*", - "network-firewall:Describe*", - "network-firewall:List*", - "networkmanager:Describe*", - "networkmanager:Get*", - "networkmanager:List*", - "rds:Describe*", - "rds:ListTagsForResource", - "route53:Get*", - "route53:List*", - "s3:GetBucket*", - "s3:ListAllMyBuckets", - "sns:Get*", - "sns:List*", - "sqs:Get*", - "sqs:List*", - "ssm:Describe*", - "ssm:Get*", - "ssm:ListTagsForResource" - ], - "Resource": "*" - } - ] - } - ``` - -1. Name the policy `overmind-read-only` -1. Click "Create policy" - -At this point the permissions are complete, the last step is to copy the ARN of the role from the IAM console, and paste it back into Overmind, and create the source. The source will get a green tick once it's started and connected, which should take less than a minute. - -## Check your sources - -After you have configured a source, it'll show up in [Settings › Sources](https://app.overmind.tech/settings/sources). There you can check that the source is healthy. - -## Explore your new data - -Once your new source is healthy, jump over to the [Explore page](https://app.overmind.tech/explore?type=*&method=LIST&linkDepth=1) to show all your resources. diff --git a/docs.overmind.tech/docs/sources/aws/configure-aws.png b/docs.overmind.tech/docs/sources/aws/configure-aws.png deleted file mode 100644 index c88e146f..00000000 Binary files a/docs.overmind.tech/docs/sources/aws/configure-aws.png and /dev/null differ diff --git a/docs.overmind.tech/docs/sources/aws/terraform.md b/docs.overmind.tech/docs/sources/aws/terraform.md deleted file mode 100644 index 9cbe206a..00000000 --- a/docs.overmind.tech/docs/sources/aws/terraform.md +++ /dev/null @@ -1,191 +0,0 @@ ---- -title: Configure with Terraform / OpenTofu -sidebar_position: 2 ---- - -The Overmind Terraform module configures an AWS account for Overmind infrastructure discovery in a single `terraform apply` (or `tofu apply`). It creates an IAM role with a read-only policy, sets up the trust relationship, and registers the source with Overmind's API. The module and provider are available on both the [Terraform Registry](https://registry.terraform.io/modules/overmindtech/aws-source/overmind) and the [OpenTofu Registry](https://search.opentofu.org/module/overmindtech/aws-source/overmind). - -## Prerequisites - -- **Overmind API key** with `sources:write` scope. Create one in [Settings > API Keys](https://app.overmind.tech/settings/api-keys). -- **AWS credentials** with permission to create IAM roles and policies in the target account. -- **Terraform >= 1.5.0** or **OpenTofu >= 1.6.0**. - -## Quick Start - -```hcl -provider "overmind" {} - -provider "aws" { - region = "us-east-1" -} - -module "overmind_aws_source" { - source = "overmindtech/aws-source/overmind" - - name = "production" -} - -output "role_arn" { - value = module.overmind_aws_source.role_arn -} - -output "source_id" { - value = module.overmind_aws_source.source_id -} -``` - -Then run: - -```bash -export OVERMIND_API_KEY="your-api-key" -terraform init -terraform plan -terraform apply -``` - -Or with OpenTofu: - -```bash -export OVERMIND_API_KEY="your-api-key" -tofu init -tofu plan -tofu apply -``` - -## Authentication - -### Overmind Provider - -The Overmind provider reads `OVERMIND_API_KEY` from the environment. The API key must have `sources:write` scope. - -You can also set it in the provider block: - -```hcl -provider "overmind" { - api_key = var.overmind_api_key -} -``` - -### AWS Provider - -The AWS provider must have permissions to create IAM roles and policies in the target account. Any standard AWS authentication method works (environment variables, shared credentials file, SSO, etc.). See the [AWS provider documentation](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#authentication-and-configuration) for details. - -## Multi-Account Setup - -Use AWS provider aliases to onboard several accounts at once: - -```hcl -provider "overmind" {} - -provider "aws" { - alias = "production" - region = "us-east-1" - - assume_role { - role_arn = "arn:aws:iam::111111111111:role/terraform" - } -} - -provider "aws" { - alias = "staging" - region = "eu-west-1" - - assume_role { - role_arn = "arn:aws:iam::222222222222:role/terraform" - } -} - -module "overmind_production" { - source = "overmindtech/aws-source/overmind" - name = "production" - - providers = { - aws = aws.production - overmind = overmind - } -} - -module "overmind_staging" { - source = "overmindtech/aws-source/overmind" - name = "staging" - regions = ["eu-west-1"] - - providers = { - aws = aws.staging - overmind = overmind - } -} -``` - -## Inputs - -| Name | Description | Type | Default | Required | -| ----------- | ------------------------------------------------------------ | -------------- | ----------------------- | -------- | -| `name` | Descriptive name for the source in Overmind | `string` | n/a | yes | -| `regions` | AWS regions to discover (defaults to all non-opt-in regions) | `list(string)` | All 17 standard regions | no | -| `role_name` | Name for the IAM role created in this account | `string` | `"overmind-read-only"` | no | -| `tags` | Additional tags to apply to IAM resources | `map(string)` | `{}` | no | - -## Outputs - -| Name | Description | -| ------------- | -------------------------------------------- | -| `role_arn` | ARN of the created IAM role | -| `source_id` | UUID of the Overmind source | -| `external_id` | AWS STS external ID used in the trust policy | - -## Importing Existing Sources - -If you already created an Overmind AWS source through the UI and want to bring it under Terraform management, you can import it using the source UUID. Find the UUID on the source details page in [Settings > Sources](https://app.overmind.tech/settings/sources). - -When using the module: - -```shell -terraform import module.overmind_aws_source.overmind_aws_source.this -``` - -When using the provider resource directly: - -```shell -terraform import overmind_aws_source.example -``` - -After importing, run `terraform plan` to verify the state matches your configuration. Terraform will show any drift between the imported resource and your HCL. - -Note that importing brings only the Overmind source under Terraform management. If the IAM role was also created outside of Terraform, you will need to import it separately with `terraform import aws_iam_role.overmind `. - -## Verify Your Source - -After `terraform apply` completes: - -1. Open [Settings > Sources](https://app.overmind.tech/settings/sources) in the Overmind app. -2. Your new source should appear with a green healthy status within about a minute. -3. Navigate to [Explore](https://app.overmind.tech/explore) to browse discovered resources. - -## Registry Links - -- **Terraform Registry**: [overmindtech/overmind provider](https://registry.terraform.io/providers/overmindtech/overmind/latest) | [overmindtech/aws-source module](https://registry.terraform.io/modules/overmindtech/aws-source/overmind/latest) -- **OpenTofu Registry**: [overmindtech/overmind provider](https://search.opentofu.org/provider/overmindtech/overmind) | [overmindtech/aws-source module](https://search.opentofu.org/module/overmindtech/aws-source/overmind) - -## Troubleshooting - -### "Provider not found" during terraform init - -Ensure you are running Terraform >= 1.5.0 or OpenTofu >= 1.6.0, and that you have internet access to reach the registry. Run `terraform init -upgrade` to refresh provider caches. - -### "Unauthorized" or "invalid API key" - -Verify that `OVERMIND_API_KEY` is set and that the key has `sources:write` scope. You can check your API keys in [Settings > API Keys](https://app.overmind.tech/settings/api-keys). - -### "Access Denied" creating IAM resources - -The AWS credentials used by Terraform need permission to create IAM roles and policies. Verify your credentials have the `iam:CreateRole`, `iam:PutRolePolicy`, and `iam:CreatePolicy` permissions in the target account. - -### Source shows as unhealthy after apply - -The IAM role may take a few seconds to propagate. Wait one to two minutes and refresh the Sources page. If the source remains unhealthy, verify the role ARN in the AWS console matches the `role_arn` output. - -### Destroying resources - -`terraform destroy` cleanly removes both the IAM resources in AWS and the Overmind source registration. diff --git a/docs.overmind.tech/docs/sources/aws/update-to-pod-identity.md b/docs.overmind.tech/docs/sources/aws/update-to-pod-identity.md deleted file mode 100644 index ae30c276..00000000 --- a/docs.overmind.tech/docs/sources/aws/update-to-pod-identity.md +++ /dev/null @@ -1,179 +0,0 @@ ---- -title: Update IAM Role for Enhanced Security -sidebar_position: 2 ---- - -# Updating Your AWS IAM Role for Enhanced Security - -Starting December 2025, we've enhanced how Overmind connects to your AWS infrastructure using **EKS Pod Identity**. This update improves security by using short-lived, automatically rotated credentials when accessing your AWS resources. - -## Why This Update is Important - -Previously, Overmind used static AWS credentials to assume the IAM role in your account. With EKS Pod Identity, we now use: - -- **Short-lived credentials** that are automatically rotated -- **Session tagging** for better auditability and tracing -- **Reduced attack surface** with no long-lived credentials - -To benefit from these security improvements, you need to update the IAM role trust policy in your AWS account to allow the `sts:TagSession` permission. - -## How to Check if You Need to Update - -You can check if your IAM role needs updating by looking at the version tag: - -1. Open the **AWS IAM Console** -2. Navigate to **Roles** and find your Overmind role (usually named "Overmind" or "overmind-read-only") -3. Click on the role and go to the **Tags** tab -4. Look for the `overmind.version` tag - -| Version Tag | Status | -| ----------------------- | ------------------ | -| `2025-12-01` or later | ✅ Up to date | -| `2023-03-14` or earlier | ⚠️ Update required | -| No tag | ⚠️ Update required | - -## Update Instructions - -### Option A: Update via CloudFormation (Recommended) - -If you originally created your IAM role using our CloudFormation template, follow these steps: - -#### Step 1: Open AWS CloudFormation Console - -Go to the [AWS CloudFormation Console](https://console.aws.amazon.com/cloudformation) in the region where you deployed the Overmind stack. - -#### Step 2: Select the Overmind Stack - -Find and select the CloudFormation stack named **"Overmind"** (or "OvermindDevelopment" for development environments). - -:::tip -Look for a stack named "Overmind" or "OvermindDevelopment" in the region where you originally deployed it. -::: - -#### Step 3: Update the Stack - -1. Click the **"Update"** button at the top of the page -2. Under "Prepare template", select **"Replace existing template"** -3. Under "Specify template", select **"Amazon S3 URL"** -4. Enter the template URL provided by Overmind (see below for how to find it) -5. Click **"Next"** - -![Screenshot of AWS CloudFormation Update stack wizard showing "Replace existing template" selected and the Amazon S3 URL input field](./cloudformation-update-stack.png) - -:::info Finding the CloudFormation Template URL -To get the latest CloudFormation template URL: - -1. Go to [Overmind Settings > Sources](https://app.overmind.tech/settings/sources) -2. Click **Add Source > AWS** -3. Right-click the "Deploy" button and copy the link - the URL contains the `templateURL` parameter -::: - -#### Step 4: Review and Apply - -1. Keep the existing **External ID** parameter unchanged -2. Click **"Next"** through the configuration pages -3. On the review page, check the box acknowledging that CloudFormation might create IAM resources -4. Click **"Submit"** - -The update typically takes less than a minute to complete. - -### Option B: Manual Update - -If you prefer to update the IAM role manually, or if you created the role without CloudFormation: - -#### Step 1: Open IAM Console - -Go to the [AWS IAM Console](https://console.aws.amazon.com/iam) and navigate to **Roles**. - -#### Step 2: Find Your Overmind Role - -Search for and select your Overmind role (usually named "Overmind" or the name you specified during setup). - -#### Step 3: Edit the Trust Policy - -1. Go to the **Trust relationships** tab -2. Click **"Edit trust policy"** -3. Add the following statement to the `Statement` array: - -```json -{ - "Effect": "Allow", - "Principal": { - "AWS": "arn:aws:iam::944651592624:root" - }, - "Action": "sts:TagSession" -} -``` - -Your complete trust policy should look like this: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "AWS": "arn:aws:iam::944651592624:root" - }, - "Action": "sts:AssumeRole", - "Condition": { - "StringEquals": { - "sts:ExternalId": "YOUR-EXTERNAL-ID-HERE" - } - } - }, - { - "Effect": "Allow", - "Principal": { - "AWS": "arn:aws:iam::944651592624:root" - }, - "Action": "sts:TagSession" - } - ] -} -``` - -1. Click **"Update policy"** - -#### Step 4: Update the Version Tag (Optional) - -To help track the version of your role configuration: - -1. Go to the **Tags** tab -2. Add or update the tag: - - **Key:** `overmind.version` - - **Value:** `2025-12-01` - -## Verification - -After updating, your existing AWS sources will continue to work without interruption. The enhanced security features will be automatically enabled within the next few minutes. - -You can verify the update was successful by: - -1. Checking that your source shows a green status in [Overmind Settings > Sources](https://app.overmind.tech/settings/sources) -2. Verifying the role's `overmind.version` tag shows `2025-12-01` or later - -## Frequently Asked Questions - -### Will this cause any downtime? - -No. The update adds a new permission without removing any existing permissions. Your sources will continue to work throughout the update process. - -### What if I have multiple AWS accounts? - -You'll need to update the IAM role in each AWS account where you have an Overmind source configured. - -### What happens if I don't update? - -Your sources will continue to work, but won't benefit from the enhanced security features provided by EKS Pod Identity. We strongly recommend updating for improved security posture. - -### I'm using a different Overmind AWS account ID - -If you're on a dedicated or on-premises deployment, the AWS account ID in the trust policy may be different. Contact your Overmind administrator for the correct account ID. - -## Need Help? - -If you encounter any issues during the update: - -- Contact support at support@overmind.tech diff --git a/docs.overmind.tech/docs/sources/azure/_category_.json b/docs.overmind.tech/docs/sources/azure/_category_.json deleted file mode 100644 index ccab81aa..00000000 --- a/docs.overmind.tech/docs/sources/azure/_category_.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "label": "Azure", - "position": 4, - "collapsed": true, - "link": { - "type": "generated-index", - "description": "How to integrate your Azure subscription." - } -} diff --git a/docs.overmind.tech/docs/sources/azure/configuration.md b/docs.overmind.tech/docs/sources/azure/configuration.md deleted file mode 100644 index d0cf0ca9..00000000 --- a/docs.overmind.tech/docs/sources/azure/configuration.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: Azure Configuration -sidebar_position: 1 ---- - -# Azure Configuration - -## Overview - -Overmind's Azure infrastructure discovery provides visibility into your Microsoft Azure resources through secure, read-only access. Overmind uses an Azure AD App Registration with federated credentials (workload identity) when running the source for you—no client secrets are stored or entered in the UI. - -To connect an Azure source, you need a **Name** (friendly label in Overmind), **Subscription ID**, **Tenant ID**, and **Client ID**. Overmind only ever requests read-only access (minimum **Reader** role on the subscription). - -## Prerequisites - -- **Azure subscription**: An active subscription you want to discover. -- **Azure AD App Registration**: An app registered in Azure AD with at least **Reader** role on the subscription (used for workload identity; no client secret is required in the Overmind UI). -- **Permissions**: Ability to create an App Registration and assign roles in the subscription (e.g. Owner or User Access Administrator). - -## Where to get the IDs - -You need three values from Azure. All are GUIDs. - -### Subscription ID - -- **Azure Portal:** In the portal, go to **Cost Management + Billing** → **Subscriptions** (or see [View subscriptions in the Azure portal](https://learn.microsoft.com/en-us/azure/cost-management-billing/manage/view-all-accounts)), select your subscription, and copy **Subscription ID**. -- **Azure CLI:** Run `az account show --query id -o tsv` (after `az login` and, if needed, `az account set --subscription "your-subscription-name-or-id"`). - -### Tenant ID - -- **Azure Portal:** See [Find your Azure AD tenant ID](https://learn.microsoft.com/en-us/azure/active-directory/fundamentals/active-directory-how-to-find-tenant) — in the portal, go to **Azure Active Directory** → **Overview** and copy **Tenant ID**. -- **Azure CLI:** Run `az account show --query tenantId -o tsv`. - -### Client ID (Application ID) - -- **Azure Portal:** See [Register an application](https://learn.microsoft.com/en-us/azure/active-directory/develop/quickstart-register-app) — in **Azure Active Directory** → **App registrations**, select your app (or create one) and copy **Application (client) ID**. -- **If you create a service principal via CLI:** The **appId** in the command output is your Client ID. - -Your app must have at least **Reader** on the subscription. For Overmind’s managed source we use federated credentials (workload identity), so you do **not** need to create or paste a client secret in Overmind. - -For detailed setup (e.g. App Registration, role assignment, federated credentials), see [Microsoft’s documentation on registering an application](https://learn.microsoft.com/en-us/azure/active-directory/develop/quickstart-register-app) and [Reader role](https://learn.microsoft.com/en-us/azure/role-based-access-control/built-in-roles#reader). - -## Add an Azure source in Overmind - -1. In Overmind, go to **Settings** (profile menu) → **Sources** → **Add source** → **Azure**. -2. Enter a **Name** (e.g. "Production Azure") so you can identify the source in Overmind. -3. Enter **Subscription ID**, **Tenant ID**, and **Client ID** using the values from [Where to get the IDs](#where-to-get-the-ids) above. -4. (Optional) **Regions:** Select specific Azure regions to limit discovery. If you leave this empty, Overmind discovers resources in all regions in the subscription. -5. Click **Create source**. - -The source will appear in your Sources list. Once the connection is established, its status will show as healthy and you can use it in Explore and change analysis. - -## Check your sources - -After you have configured a source, it will appear under [Settings → Sources](https://app.overmind.tech/settings/sources). There you can confirm the source is healthy and view its details (Source UUID, Subscription ID, Tenant ID, Client ID, and Regions). - -## Explore your data - -Once your Azure source is healthy, go to the [Explore page](https://app.overmind.tech/explore) to browse your Azure resources and their relationships. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-batch-prediction-job.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-batch-prediction-job.md deleted file mode 100644 index fae56aff..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-batch-prediction-job.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: GCP Ai Platform Batch Prediction Job -sidebar_label: gcp-ai-platform-batch-prediction-job ---- - -A **Batch Prediction Job** in Google Cloud’s AI Platform (now part of Vertex AI) lets you run large-scale, asynchronous inference on a saved Machine Learning model. Instead of serving predictions request-by-request, you supply a dataset stored in Cloud Storage or BigQuery and the service spins up the necessary compute, distributes the workload, writes the predictions to your chosen destination, and then shuts itself down. This is ideal for one-off or periodic scoring of very large datasets. -Official documentation: https://cloud.google.com/vertex-ai/docs/predictions/batch-predictions - -## Supported Methods - -- `GET`: Get a gcp-ai-platform-batch-prediction-job by its "locations|batchPredictionJobs" -- ~~`LIST`~~ -- `SEARCH`: Search Batch Prediction Jobs within a location. Use the location name e.g., 'us-central1' - -## Possible Links - -### [`gcp-ai-platform-endpoint`](/sources/gcp/Types/gcp-ai-platform-endpoint) - -A Batch Prediction Job can read from a Model that is already deployed to an Endpoint; when that is the case the job records the Endpoint name it referenced, creating this link. - -### [`gcp-ai-platform-model`](/sources/gcp/Types/gcp-ai-platform-model) - -Every Batch Prediction Job must specify the Model it will use for inference. The job stores the fully-qualified model resource name, creating a direct dependency on this Model. - -### [`gcp-big-query-table`](/sources/gcp/Types/gcp-big-query-table) - -The job may take its input instances from a BigQuery table or write its prediction outputs to one. When either the source or destination is a BigQuery table, that table is linked to the job. - -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) - -If customer-managed encryption keys (CMEK) are chosen, the Batch Prediction Job references the CryptoKey that encrypts the job metadata and any intermediate files, producing this link. - -### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) - -When the job is configured for private service access, it is attached to a specific VPC network for egress. That VPC network is therefore related to, and linked from, the job. - -### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) - -The Batch Prediction Job executes under a user-specified or default service account, which needs permission to read the model and the input data and to write outputs. That execution identity is linked here. - -### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) - -Cloud Storage buckets are commonly used both for the input artefacts (CSV/JSON/TFRecord files) and for the output prediction files. Any bucket mentioned in the job’s specification is linked to the job. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-custom-job.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-custom-job.md deleted file mode 100644 index 510763fa..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-custom-job.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: GCP Ai Platform Custom Job -sidebar_label: gcp-ai-platform-custom-job ---- - -A Vertex AI / AI Platform Custom Job represents an ad-hoc machine-learning workload that you want Google Cloud to run on managed infrastructure. By pointing the job at a custom container image or a Python package, you can execute training, hyper-parameter tuning or batch-processing logic with fine-grained control over machine types, accelerators, networking and encryption. The job definition is submitted to the `projects.locations.customJobs` API and Google Cloud provisions the required compute, streams logs, stores artefacts and tears the resources down once the job finishes. -Official documentation: https://cloud.google.com/vertex-ai/docs/training/create-custom-job - -## Supported Methods - -- `GET`: Get a gcp-ai-platform-custom-job by its "name" -- `LIST`: List all gcp-ai-platform-custom-job -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-ai-platform-model`](/sources/gcp/Types/gcp-ai-platform-model) - -A successful Custom Job can optionally call `model.upload()` or configure `model_to_upload`, causing Vertex AI to register a `Model` resource containing the trained artefacts. Overmind links the job to the resulting `gcp-ai-platform-model` so you can trace how the model was produced. - -### [`gcp-artifact-registry-docker-image`](/sources/gcp/Types/gcp-artifact-registry-docker-image) - -Custom Jobs usually run inside user-supplied container images. When the image is stored in Artifact Registry, Overmind records a link between the job and the specific `gcp-artifact-registry-docker-image` it pulled, making it easy to audit code and dependency provenance. - -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) - -If you enable customer-managed encryption keys (CMEK) for the job, Google Cloud encrypts logs, checkpoints and model files with the specified KMS key. The job therefore references a `gcp-cloud-kms-crypto-key`, which Overmind surfaces to highlight encryption dependencies and key-rotation risks. - -### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) - -Custom Jobs can be configured to run on a private VPC network (VPC-SC or VPC-hosted training). In that case the job is associated with the chosen `gcp-compute-network`, allowing Overmind to show ingress/egress paths and potential network exposure. - -### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) - -Vertex AI executes the workload under a user-specified or default service account. The job’s permissions—and hence its ability to read data, write artefacts or call other Google APIs—are determined by this `gcp-iam-service-account`. Overmind links them to flag overly-privileged identities. - -### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) - -Training data, intermediate checkpoints and exported models are commonly read from or written to Cloud Storage. The Custom Job specifies bucket URIs (e.g., `gs://my-dataset/*`, `gs://my-model-output/`). Overmind connects the job to each referenced `gcp-storage-bucket` so you can assess data residency and access controls. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-endpoint.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-endpoint.md deleted file mode 100644 index 6867c939..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-endpoint.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: GCP Ai Platform Endpoint -sidebar_label: gcp-ai-platform-endpoint ---- - -A **Google Cloud AI Platform Endpoint** (now part of Vertex AI) is a regional, fully-managed HTTPS entry point that receives online prediction requests and routes them to one or more deployed models. Endpoints let you perform low-latency, autoscaled inference, apply access controls, add request/response logging and attach monitoring jobs. -Official documentation: https://cloud.google.com/vertex-ai/docs/predictions/getting-predictions#deploy_model_to_endpoint - -## Supported Methods - -- `GET`: Get a gcp-ai-platform-endpoint by its "name" -- `LIST`: List all gcp-ai-platform-endpoint -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-ai-platform-model`](/sources/gcp/Types/gcp-ai-platform-model) - -An Endpoint hosts one or more _DeployedModels_, each of which references a standalone AI Platform/Vertex AI Model resource. The link shows which models are currently deployed to, or have traffic routed through, the endpoint. - -### [`gcp-ai-platform-model-deployment-monitoring-job`](/sources/gcp/Types/gcp-ai-platform-model-deployment-monitoring-job) - -If data-drift or prediction-quality monitoring has been enabled, a Model Deployment Monitoring Job is attached to the endpoint. This relationship identifies the monitoring configuration that observes traffic on the endpoint. - -### [`gcp-big-query-table`](/sources/gcp/Types/gcp-big-query-table) - -Prediction request and response payloads can be logged to a BigQuery table when logging is enabled on the endpoint. The link indicates which table is used as the logging sink for the endpoint’s traffic. - -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) - -Customer-managed encryption keys (CMEK) from Cloud KMS can be specified to encrypt endpoint resources at rest. This link reveals the KMS key protecting the endpoint and its deployed models. - -### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) - -Endpoints can be configured for private service access, allowing prediction traffic to stay within a specified VPC network. The relationship points to the Compute Network that provides the private connectivity for the endpoint. - -### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) - -Each deployed model on an endpoint runs under a service account whose permissions govern access to other GCP resources (e.g., storage buckets, KMS keys). The link shows which IAM service account is associated with the endpoint’s runtime. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-model-deployment-monitoring-job.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-model-deployment-monitoring-job.md deleted file mode 100644 index a5178ca8..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-model-deployment-monitoring-job.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: GCP Ai Platform Model Deployment Monitoring Job -sidebar_label: gcp-ai-platform-model-deployment-monitoring-job ---- - -Google Cloud’s Model Deployment Monitoring Job is a managed Vertex AI (formerly AI Platform) service that continuously analyses a deployed model’s predictions to detect data drift, prediction drift and skew between training and online data. A job is attached to one or more deployed models on an Endpoint and periodically samples incoming predictions, calculates statistics, raises alerts and writes monitoring reports to BigQuery or Cloud Storage. -Official documentation: https://cloud.google.com/vertex-ai/docs/model-monitoring/overview - -## Supported Methods - -- `GET`: Get a gcp-ai-platform-model-deployment-monitoring-job by its "locations|modelDeploymentMonitoringJobs" -- ~~`LIST`~~ -- `SEARCH`: Search Model Deployment Monitoring Jobs within a location. Use the location name e.g., 'us-central1' - -## Possible Links - -### [`gcp-ai-platform-endpoint`](/sources/gcp/Types/gcp-ai-platform-endpoint) - -The monitoring job is created against a specific Endpoint; it inspects the request/response traffic that the Endpoint receives for the deployed model versions. - -### [`gcp-ai-platform-model`](/sources/gcp/Types/gcp-ai-platform-model) - -Each job’s `modelDeploymentMonitoringObjectiveConfigs` identifies the Model (or model version) whose predictions are being monitored for drift or skew. - -### [`gcp-big-query-table`](/sources/gcp/Types/gcp-big-query-table) - -If BigQuery is chosen as the analysis destination, the job writes sampled prediction data and computed statistics into a BigQuery table referenced by this link. - -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) - -The `encryptionSpec.kmsKeyName` field can point to a customer-managed KMS key that encrypts all monitoring artefacts produced by the job. - -### [`gcp-monitoring-notification-channel`](/sources/gcp/Types/gcp-monitoring-notification-channel) - -Alerting rules created by the job use Cloud Monitoring notification channels (e-mail, Pub/Sub, SMS, etc.) to notify operators when drift thresholds are breached. - -### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) - -When Cloud Storage is selected, the job stores prediction samples, intermediate files and final monitoring reports in a user-provided bucket. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-model.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-model.md deleted file mode 100644 index ad28f59f..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-model.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: GCP Ai Platform Model -sidebar_label: gcp-ai-platform-model ---- - -A GCP AI Platform Model (now part of Vertex AI) is a logical container that holds the metadata and artefacts required to serve machine-learning predictions. A model record points to one or more model versions or container images, the Cloud Storage location of the trained parameters, and optional encryption settings. Models are deployed to Endpoints for online prediction or used directly in batch/streaming inference jobs. Official documentation: https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.models - -## Supported Methods - -- `GET`: Get a gcp-ai-platform-model by its "name" -- `LIST`: List all gcp-ai-platform-model -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-ai-platform-endpoint`](/sources/gcp/Types/gcp-ai-platform-endpoint) - -A model is deployed to one or more Endpoints. The link shows where this model is currently serving traffic or could be routed for prediction. - -### [`gcp-ai-platform-pipeline-job`](/sources/gcp/Types/gcp-ai-platform-pipeline-job) - -Training or transformation Pipeline Jobs often create or update Model resources; linking them highlights which automated workflow produced the model and therefore which code/data lineage applies. - -### [`gcp-artifact-registry-docker-image`](/sources/gcp/Types/gcp-artifact-registry-docker-image) - -If the model is served via a custom prediction container, the Model record references a Docker image stored in Artifact Registry. This link surfaces that underlying image and its associated vulnerabilities. - -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) - -Models can be protected with customer-managed encryption keys (CMEK). Overmind links the model to the specific KMS key to expose encryption scope and key rotation risks. - -### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) - -The model’s artefacts (e.g., SavedModel, scikit-learn pickle, PyTorch state) reside in a Cloud Storage bucket referenced by `artifactUri`. Linking to the bucket reveals data-at-rest location and its IAM policy. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-pipeline-job.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-pipeline-job.md deleted file mode 100644 index 23aab5ae..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-pipeline-job.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: GCP Ai Platform Pipeline Job -sidebar_label: gcp-ai-platform-pipeline-job ---- - -A GCP AI Platform Pipeline Job (now part of Vertex AI Pipelines) represents a single execution of a machine-learning workflow defined in a Kubeflow/Vertex AI pipeline. The job orchestrates a directed acyclic graph (DAG) of pipeline components such as data preparation, model training and evaluation, and optionally deployment. Each run is stored as a resource that tracks the DAG definition, runtime parameters, execution state, logs and metadata. -Official documentation: https://cloud.google.com/vertex-ai/docs/pipelines/introduction - -## Supported Methods - -- `GET`: Get a gcp-ai-platform-pipeline-job by its "name" -- `LIST`: List all gcp-ai-platform-pipeline-job -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) - -If the pipeline job is configured to use customer-managed encryption keys (CMEK), the key referenced here encrypts pipeline artefacts such as metadata, intermediate files and model checkpoints. - -### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) - -Pipeline components that run in custom training containers or Dataflow/Dataproc jobs may be attached to a specific VPC network to control egress, ingress and private service access. The pipeline job therefore has an implicit or explicit relationship with the VPC network used at execution time. - -### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) - -The pipeline job executes under a service account which grants it permissions to create and manage downstream resources (e.g. training jobs, storage objects, BigQuery datasets). Overmind links the job to the service account that appears in its runtime configuration. - -### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) - -Vertex AI Pipelines store pipeline definitions, intermediate artefacts, and output models in Cloud Storage. A pipeline job will reference one or more buckets for source code, artefacts and logging, so Overmind creates links to each bucket it touches. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-artifact-registry-docker-image.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-artifact-registry-docker-image.md deleted file mode 100644 index b346f80f..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-artifact-registry-docker-image.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: GCP Artifact Registry Docker Image -sidebar_label: gcp-artifact-registry-docker-image ---- - -A GCP Artifact Registry Docker Image represents a single container image stored within Google Cloud Artifact Registry. Artifact Registry is Google Cloud’s fully-managed, secure, and scalable repository service that allows teams to store, manage and secure their build artefacts, including Docker container images. Each Docker image is identified by its path in the form `projects/{project}/locations/{location}/repositories/{repository}/dockerImages/{image}` and can hold multiple tags and versions. Managing images through Artifact Registry enables fine-grained IAM permissions, vulnerability scanning, and seamless integration with Cloud Build and Cloud Run. -For more information, see the official documentation: https://cloud.google.com/artifact-registry/docs/docker - -**Terrafrom Mappings:** - -- `google_artifact_registry_docker_image.name` - -## Supported Methods - -- `GET`: Get a gcp-artifact-registry-docker-image by its "locations|repositories|dockerImages" -- ~~`LIST`~~ -- `SEARCH`: Search for Docker images in Artifact Registry. Use the format "location|repository_id" or "projects/[project]/locations/[location]/repository/[repository_id]/dockerImages/[docker_image]" which is supported for terraform mappings. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-query-data-transfer-transfer-config.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-query-data-transfer-transfer-config.md deleted file mode 100644 index c7dc4bae..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-query-data-transfer-transfer-config.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: GCP Big Query Data Transfer Transfer Config -sidebar_label: gcp-big-query-data-transfer-transfer-config ---- - -A BigQuery Data Transfer transfer configuration defines the schedule, destination dataset and credentials that the BigQuery Data Transfer Service will use to load data from a supported SaaS application, Google service or external data source into BigQuery. Each configuration specifies when transfers should run, the parameters required by the source system and, optionally, Pub/Sub notification settings and Cloud KMS encryption keys. -For a full description of the resource see the Google Cloud documentation: https://cloud.google.com/bigquery/docs/reference/datatransfer/rest/v1/projects.locations.transferConfigs - -**Terrafrom Mappings:** - -- `google_bigquery_data_transfer_config.id` - -## Supported Methods - -- `GET`: Get a gcp-big-query-data-transfer-transfer-config by its "locations|transferConfigs" -- ~~`LIST`~~ -- `SEARCH`: Search for BigQuery Data Transfer transfer configs in a location. Use the format "location" or "projects/project_id/locations/location/transferConfigs/transfer_config_id" which is supported for terraform mappings. - -## Possible Links - -### [`gcp-big-query-dataset`](/sources/gcp/Types/gcp-big-query-dataset) - -The transfer configuration writes its imported data into a specific BigQuery dataset; the dataset’s identifier is stored in the configuration’s `destinationDatasetId` field. Overmind therefore links the config to the dataset that will receive the transferred data. - -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) - -If the destination dataset is protected with customer-managed encryption keys (CMEK), the transfer runs inherit that key. Consequently, the configuration is indirectly associated with the Cloud KMS crypto key that encrypts the loaded tables, allowing Overmind to surface encryption-related risks. - -### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) - -Transfers execute using a dedicated service account (`project-number@gcp-sa-bigquerydt.iam.gserviceaccount.com`) or, in some cases, a user-provided service account. The configuration stores this principal, and appropriate IAM roles must be granted. Overmind links the transfer config to the service account to assess permission scopes. - -### [`gcp-pub-sub-topic`](/sources/gcp/Types/gcp-pub-sub-topic) - -A transfer configuration can be set to publish run status notifications to a Pub/Sub topic specified in its `notificationPubsubTopic` field. Overmind links the configuration to that topic so that message-flow and permissions between the two resources can be evaluated. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-query-dataset.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-query-dataset.md deleted file mode 100644 index 41435289..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-query-dataset.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: GCP Big Query Dataset -sidebar_label: gcp-big-query-dataset ---- - -A Google Cloud BigQuery Dataset is a logical container that holds tables, views, routines (stored procedures and functions) and metadata, and defines the geographic location where the underlying data is stored. Datasets also act as the administrative boundary for access-control policies and encryption configuration. For a full description, see the official documentation: https://cloud.google.com/bigquery/docs/datasets-intro - -**Terrafrom Mappings:** - -- `google_bigquery_dataset.dataset_id` -- `google_bigquery_dataset_iam_binding.dataset_id` -- `google_bigquery_dataset_iam_member.dataset_id` -- `google_bigquery_dataset_iam_policy.dataset_id` - -## Supported Methods - -- `GET`: Get GCP Big Query Dataset by "gcp-big-query-dataset-id" -- `LIST`: List all GCP Big Query Dataset items -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-big-query-dataset`](/sources/gcp/Types/gcp-big-query-dataset) - -Datasets can reference, copy from or authorise access to other BigQuery datasets, so Overmind may surface links where cross-dataset operations or shared access exist. - -### [`gcp-big-query-routine`](/sources/gcp/Types/gcp-big-query-routine) - -Every BigQuery routine (stored procedure or user-defined function) resides inside a specific dataset; therefore routines are children of the current dataset. - -### [`gcp-big-query-table`](/sources/gcp/Types/gcp-big-query-table) - -Tables and views are stored within a dataset. All tables that belong to this dataset will be linked here. - -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) - -If customer-managed encryption is enabled, the dataset (and everything inside it) may be encrypted with a specific Cloud KMS crypto key. This link shows which key is in use. - -### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) - -Access to a dataset is granted via IAM, often to service accounts. Linked service accounts represent principals that have explicit permissions on the dataset. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-query-routine.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-query-routine.md deleted file mode 100644 index 62ddb34a..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-query-routine.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: GCP Big Query Routine -sidebar_label: gcp-big-query-routine ---- - -A BigQuery Routine is a reusable piece of SQL or JavaScript logic—such as a stored procedure, user-defined function (UDF), or table-valued function—stored inside a BigQuery dataset. Routines let you encapsulate complex transformations, calculations, or business rules and call them from queries just like native BigQuery functions. They can reference other BigQuery objects (tables, views, models, etc.) and may be version-controlled and secured independently of the data they operate on. -Official documentation: https://cloud.google.com/bigquery/docs/reference/rest/v2/routines - -**Terrafrom Mappings:** - -- `google_bigquery_routine.id` - -## Supported Methods - -- `GET`: Get GCP Big Query Routine by "gcp-big-query-dataset-id|gcp-big-query-routine-id" -- ~~`LIST`~~ -- `SEARCH`: Search for GCP Big Query Routine by "gcp-big-query-routine-id" - -## Possible Links - -### [`gcp-big-query-dataset`](/sources/gcp/Types/gcp-big-query-dataset) - -A routine is always contained within exactly one BigQuery dataset. The link lets you trace from a routine to its parent dataset to understand data location, access controls, and retention policies that also apply to the routine. - -### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) - -If a routine’s SQL references an external table backed by Cloud Storage, or if the routine loads/stages data via the `LOAD DATA` or `EXPORT DATA` statements, the routine implicitly depends on the corresponding Cloud Storage bucket. This link surfaces that dependency so you can assess the impact of bucket-level permissions and lifecycle rules on the routine’s execution. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-query-table.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-query-table.md deleted file mode 100644 index 5b9487fc..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-query-table.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: GCP Big Query Table -sidebar_label: gcp-big-query-table ---- - -A BigQuery table is the fundamental storage unit inside Google Cloud BigQuery. It holds the actual rows of structured data that can be queried with SQL, shared, exported or used to build materialised views and machine-learning models. Tables live inside a dataset, can be partitioned or clustered, and may be encrypted either with Google-managed keys or customer-managed keys stored in Cloud KMS. They can also act as logical wrappers around external data held in Cloud Storage. -Official documentation: https://cloud.google.com/bigquery/docs/tables - -**Terrafrom Mappings:** - -- `google_bigquery_table.id` -- `google_bigquery_table_iam_binding.dataset_id` -- `google_bigquery_table_iam_member.dataset_id` -- `google_bigquery_table_iam_policy.dataset_id` - -## Supported Methods - -- `GET`: Get GCP Big Query Table by "gcp-big-query-dataset-id|gcp-big-query-table-id" -- ~~`LIST`~~ -- `SEARCH`: Search for GCP Big Query Table by "gcp-big-query-dataset-id" - -## Possible Links - -### [`gcp-big-query-dataset`](/sources/gcp/Types/gcp-big-query-dataset) - -The dataset is the immediate parent container of the table; every table must belong to exactly one dataset and inherits default encryption, location and IAM settings from it. - -### [`gcp-big-query-table`](/sources/gcp/Types/gcp-big-query-table) - -BigQuery tables can reference, copy from, or be copied to other tables (for example when creating snapshots, clones, views with explicit table references or COPY jobs). Such relationships are captured as links between table resources. - -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) - -If the table (or its parent dataset) is configured to use customer-managed encryption, it points to the Cloud KMS CryptoKey that protects the data at rest. - -### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) - -An external BigQuery table may use objects stored in a Cloud Storage bucket as its underlying data source; in that case the table is linked to the bucket holding those objects. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-table-admin-app-profile.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-table-admin-app-profile.md deleted file mode 100644 index ab874cf5..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-table-admin-app-profile.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: GCP Big Table Admin App Profile -sidebar_label: gcp-big-table-admin-app-profile ---- - -A Bigtable **App Profile** is a logical configuration that tells Google Cloud Bigtable how client traffic for a particular application should be routed to one or more clusters within an instance. It lets you choose between single-cluster routing (for the lowest latency within a specific region) or multi-cluster routing (for higher availability across several regions) and also defines the consistency model that the application will see. Because app profiles govern the path that live data takes, mis-configuration can lead to increased latency, unexpected fail-over behaviour, or cross-region egress costs. -Official documentation: https://cloud.google.com/bigtable/docs/app-profiles - -**Terrafrom Mappings:** - -- `google_bigtable_app_profile.id` - -## Supported Methods - -- `GET`: Get a gcp-big-table-admin-app-profile by its "instances|appProfiles" -- ~~`LIST`~~ -- `SEARCH`: Search for BigTable App Profiles in an instance. Use the format "instance" or "projects/[project_id]/instances/[instance_name]/appProfiles/[app_profile_id]" which is supported for terraform mappings. - -## Possible Links - -### [`gcp-big-table-admin-cluster`](/sources/gcp/Types/gcp-big-table-admin-cluster) - -An App Profile points client traffic towards one or more specific clusters. Each routing policy within the profile references the cluster identifiers defined by `gcp-big-table-admin-cluster`. Observing this link lets you see which clusters will receive traffic from the application and assess redundancy or regional placement risks. - -### [`gcp-big-table-admin-instance`](/sources/gcp/Types/gcp-big-table-admin-instance) - -Every App Profile exists inside a single Bigtable instance. Linking to `gcp-big-table-admin-instance` shows the broader configuration—such as replication settings and all clusters—that frames the context in which the App Profile operates. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-table-admin-backup.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-table-admin-backup.md deleted file mode 100644 index b7cdf6a7..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-table-admin-backup.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: GCP Big Table Admin Backup -sidebar_label: gcp-big-table-admin-backup ---- - -A Cloud Bigtable Admin Backup represents a point-in-time copy of a single Bigtable table that is stored within the same Bigtable cluster for a user-defined retention period. Back-ups allow you to restore data that has been deleted or corrupted without replaying your entire write history, and they can also be copied to other regions for disaster-recovery purposes. The resource is created, managed and deleted through the Cloud Bigtable Admin API. -Official documentation: https://cloud.google.com/bigtable/docs/backups - -## Supported Methods - -- `GET`: Get a gcp-big-table-admin-backup by its "instances|clusters|backups" -- ~~`LIST`~~ -- `SEARCH`: Search for gcp-big-table-admin-backup by its "instances|clusters" - -## Possible Links - -### [`gcp-big-table-admin-backup`](/sources/gcp/Types/gcp-big-table-admin-backup) - -If the current backup is used as the source for a cross-cluster copy, or if multiple back-ups are chained through copy operations, Overmind links the related `gcp-big-table-admin-backup` resources together so you can trace provenance and inheritance of data. - -### [`gcp-big-table-admin-cluster`](/sources/gcp/Types/gcp-big-table-admin-cluster) - -Every backup is physically stored in the Bigtable cluster where it was created. The backup therefore links to its parent `gcp-big-table-admin-cluster`, enabling you to understand locality, storage costs and the failure domain that may affect both the cluster and its back-ups. - -### [`gcp-big-table-admin-table`](/sources/gcp/Types/gcp-big-table-admin-table) - -A backup is a snapshot of a specific Bigtable table at the moment the backup was taken. This link points back to that source `gcp-big-table-admin-table`, allowing you to see which dataset the backup protects and to assess the impact of schema or data changes. - -### [`gcp-cloud-kms-crypto-key-version`](/sources/gcp/Types/gcp-cloud-kms-crypto-key-version) - -When customer-managed encryption (CMEK) is enabled, the backup’s data is encrypted with a particular Cloud KMS key version. Linking to `gcp-cloud-kms-crypto-key-version` lets you audit encryption lineage and verify that the correct key material is being used for protecting the backup. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-table-admin-cluster.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-table-admin-cluster.md deleted file mode 100644 index 8160b874..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-table-admin-cluster.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: GCP Big Table Admin Cluster -sidebar_label: gcp-big-table-admin-cluster ---- - -A GCP Bigtable Admin Cluster resource represents the configuration of a single cluster that belongs to a Cloud Bigtable instance. The cluster defines the geographic location where data is stored, the number and type of serving nodes, the storage type (HDD or SSD), autoscaling settings, and any customer-managed encryption keys (CMEK) that protect the data. It is managed through the Cloud Bigtable Admin API, which allows you to create, update, or delete clusters programmatically. -For further details, see Google’s official documentation: https://cloud.google.com/bigtable/docs/instances-clusters-nodes - -## Supported Methods - -- `GET`: Get a gcp-big-table-admin-cluster by its "instances|clusters" -- ~~`LIST`~~ -- `SEARCH`: Search for gcp-big-table-admin-cluster by its "instances" - -## Possible Links - -### [`gcp-big-table-admin-instance`](/sources/gcp/Types/gcp-big-table-admin-instance) - -A cluster is always a child of a Bigtable instance. This link represents the parent–child relationship: the instance contains one or more clusters, and every cluster must reference its parent instance. - -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) - -If Customer-Managed Encryption Keys (CMEK) are enabled, the cluster’s encryption configuration points to the Cloud KMS CryptoKey that is used to encrypt data at rest. This link captures that dependency between the cluster and the key. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-table-admin-instance.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-table-admin-instance.md deleted file mode 100644 index 6b010f4f..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-table-admin-instance.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: GCP Big Table Admin Instance -sidebar_label: gcp-big-table-admin-instance ---- - -Cloud Bigtable instances are the top-level administrative containers for all tables and data stored in Bigtable. An instance defines the service tier (production or development), the geographic placement of data through its clusters, and provides the entry point for IAM policy management, encryption settings, labelling and more. For a detailed overview of instances, see the official Google Cloud documentation: https://cloud.google.com/bigtable/docs/instances-clusters-nodes - -**Terrafrom Mappings:** - -- `google_bigtable_instance.name` -- `google_bigtable_instance_iam_binding.instance` -- `google_bigtable_instance_iam_member.instance` -- `google_bigtable_instance_iam_policy.instance` - -## Supported Methods - -- `GET`: Get a gcp-big-table-admin-instance by its "name" -- `LIST`: List all gcp-big-table-admin-instance -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-big-table-admin-cluster`](/sources/gcp/Types/gcp-big-table-admin-cluster) - -Every Bigtable instance is composed of one or more clusters. A `gcp-big-table-admin-cluster` represents the individual cluster resources that reside within, and are owned by, a given Bigtable instance. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-table-admin-table.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-table-admin-table.md deleted file mode 100644 index 62f6a263..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-table-admin-table.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: GCP Big Table Admin Table -sidebar_label: gcp-big-table-admin-table ---- - -Google Cloud Bigtable is a scalable NoSQL database service for large analytical and operational workloads. A Bigtable **table** is the primary data container within an instance, organised into rows and column families. The Bigtable Admin API allows you to create, configure, list, and delete tables, as well as manage their IAM policies and column–family schemas. Full details can be found in the official documentation: https://cloud.google.com/bigtable/docs/reference/admin/rest - -**Terrafrom Mappings:** - -- `google_bigtable_table.id` -- `google_bigtable_table_iam_binding.instance_name` -- `google_bigtable_table_iam_member.instance_name` -- `google_bigtable_table_iam_policy.instance_name` - -## Supported Methods - -- `GET`: Get a gcp-big-table-admin-table by its "instances|tables" -- ~~`LIST`~~ -- `SEARCH`: Search for BigTable tables in an instance. Use the format "instance_name" or "projects/[project_id]/instances/[instance_name]/tables/[table_name]" which is supported for terraform mappings. - -## Possible Links - -### [`gcp-big-table-admin-backup`](/sources/gcp/Types/gcp-big-table-admin-backup) - -A Bigtable table can have one or more backups. Overmind links a table to its related `gcp-big-table-admin-backup` resources, making it easy to assess how backup configurations might be impacted by changes to the table. - -### [`gcp-big-table-admin-instance`](/sources/gcp/Types/gcp-big-table-admin-instance) - -Every table is created inside a single Bigtable instance. This link shows the parent `gcp-big-table-admin-instance` that owns the table so you can understand instance-level settings (such as clusters and IAM) that may affect the table. - -### [`gcp-big-table-admin-table`](/sources/gcp/Types/gcp-big-table-admin-table) - -Tables may reference each other indirectly through IAM policies or schema design. Overmind links tables to other tables when such relationships are detected, allowing you to trace dependencies across multiple Bigtable tables within or across instances. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-certificate-manager-certificate.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-certificate-manager-certificate.md deleted file mode 100644 index 3623be9f..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-certificate-manager-certificate.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: GCP Certificate Manager Certificate -sidebar_label: gcp-certificate-manager-certificate ---- - -A **GCP Certificate Manager Certificate** represents an SSL/TLS certificate that is stored and managed by Google Cloud Certificate Manager. Certificates configured here can be Google-managed (automatically provisioned and renewed by Google) or self-managed (imported by the user) and can be attached to load balancers, Cloud CDN, or other Google Cloud resources to provide encrypted connections. Managing certificates through Certificate Manager centralises lifecycle operations such as issuance, rotation and revocation, reducing operational overhead and the risk of serving expired certificates. For full details, see the official documentation: https://cloud.google.com/certificate-manager/docs - -**Terrafrom Mappings:** - -- `google_certificate_manager_certificate.id` - -## Supported Methods - -- `GET`: Get GCP Certificate Manager Certificate by "gcp-certificate-manager-certificate-location|gcp-certificate-manager-certificate-name" -- ~~`LIST`~~ -- `SEARCH`: Search for GCP Certificate Manager Certificate by "gcp-certificate-manager-certificate-location" diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-billing-billing-info.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-billing-billing-info.md deleted file mode 100644 index 08b81211..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-billing-billing-info.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: GCP Cloud Billing Billing Info -sidebar_label: gcp-cloud-billing-billing-info ---- - -The **Cloud Billing – Billing Info** resource represents the billing configuration that is attached to an individual Google Cloud project. -For a given project it records which Cloud Billing Account is linked, whether billing is currently enabled, and other metadata that controls how usage costs are charged. -The resource is surfaced by the Cloud Billing API endpoint -`cloudbilling.googleapis.com/v1/projects/{projectId}/billingInfo`. -Full details are available in the official Google documentation: -https://cloud.google.com/billing/docs/reference/rest/v1/projects/getBillingInfo - -Knowing the contents of this object allows Overmind to determine, for example, whether a project is running with an unexpectedly disabled billing account or whether it is tied to the correct cost centre before a deployment is made. - -## Supported Methods - -- `GET`: Get a gcp-cloud-billing-billing-info by its "name" -- ~~`LIST`~~ -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-cloud-resource-manager-project`](/sources/gcp/Types/gcp-cloud-resource-manager-project) - -Every Billing Info object belongs to exactly one Cloud Resource Manager Project. -Overmind creates a link from `gcp-cloud-billing-billing-info` → `gcp-cloud-resource-manager-project` so that users can trace the billing configuration back to the workload and other resources that live inside the same project. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-build-build.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-build-build.md deleted file mode 100644 index 3ff463e4..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-build-build.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: GCP Cloud Build Build -sidebar_label: gcp-cloud-build-build ---- - -A **Cloud Build Build** represents a single execution of Google Cloud Build, Google Cloud’s CI/CD service. Each build contains one or more build steps (Docker containers) that run in sequence or in parallel to compile code, run tests, or package and deploy artefacts. Metadata recorded on the build includes its source, substitutions, images, logs, secrets used, time-stamps, and overall status. -See the official documentation for full details: https://cloud.google.com/build/docs/api/reference/rest/v1/projects.builds - -## Supported Methods - -- `GET`: Get a gcp-cloud-build-build by its "name" -- `LIST`: List all gcp-cloud-build-build -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-artifact-registry-docker-image`](/sources/gcp/Types/gcp-artifact-registry-docker-image) - -A build often produces container images and pushes them to Artifact Registry. Overmind links the build to every `gcp-artifact-registry-docker-image` whose digest or tag is declared in the build’s `images` field. - -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) - -Builds can be configured to decrypt secrets with Cloud KMS. If the build specification references a KMS key (for example in `secretEnv`), Overmind records a link to the corresponding `gcp-cloud-kms-crypto-key`. - -### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) - -Cloud Build runs under a service account (`serviceAccount` field). The build is therefore linked to the `gcp-iam-service-account` that actually executes the build steps and accesses other resources. - -### [`gcp-logging-bucket`](/sources/gcp/Types/gcp-logging-bucket) - -Build logs are written to Cloud Logging and can be routed into a custom logging bucket. If log sink routing points the build’s logs to a specific `gcp-logging-bucket`, Overmind associates the two objects. - -### [`gcp-secret-manager-secret`](/sources/gcp/Types/gcp-secret-manager-secret) - -Secrets injected into build steps via `secretEnv` or `availableSecrets` are stored in Secret Manager. A link is created between the build and every `gcp-secret-manager-secret` it consumes. - -### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) - -Cloud Build can pull its source from a Cloud Storage bucket and write build logs or artefacts back to buckets (e.g. via the `logsBucket` or `artifacts` fields). These buckets appear as related `gcp-storage-bucket` resources. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-functions-function.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-functions-function.md deleted file mode 100644 index 07856620..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-functions-function.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: GCP Cloud Functions Function -sidebar_label: gcp-cloud-functions-function ---- - -Google Cloud Functions is a server-less execution environment that lets you run event-driven code without provisioning or managing servers. A “Function” is the deployed piece of code together with its configuration (runtime, memory/CPU limits, environment variables, ingress/egress settings, triggers and IAM bindings). Documentation: https://cloud.google.com/functions/docs - -## Supported Methods - -- `GET`: Get a gcp-cloud-functions-function by its "locations|functions" -- ~~`LIST`~~ -- `SEARCH`: Search for gcp-cloud-functions-function by its "locations" - -## Possible Links - -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) - -A function can reference a Cloud KMS crypto key to decrypt secrets or to use Customer-Managed Encryption Keys (CMEK) for its source code stored in Cloud Storage. Overmind therefore links the function to any KMS keys it is authorised to use. - -### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) - -Each Cloud Function executes as a service account, and other service accounts may be granted permission to invoke or manage it. Overmind links the function to the runtime service account and to any caller or admin accounts discovered in its IAM policy. - -### [`gcp-pub-sub-topic`](/sources/gcp/Types/gcp-pub-sub-topic) - -Pub/Sub topics are commonly used as event triggers. When a function is configured to fire on messages published to a topic, Overmind records a link between the function and that topic. - -### [`gcp-run-service`](/sources/gcp/Types/gcp-run-service) - -Second-generation Cloud Functions are built and deployed as Cloud Run services under the hood. Overmind links the function to the underlying Cloud Run service so you can trace configuration and runtime dependencies. - -### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) - -Cloud Storage buckets can be both event sources (object create/delete triggers) and repositories for a function’s source code during deployment. Overmind links the function to any bucket that serves as a trigger or holds its source archive. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-kms-crypto-key-version.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-kms-crypto-key-version.md deleted file mode 100644 index ef5e8169..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-kms-crypto-key-version.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: GCP Cloud Kms Crypto Key Version -sidebar_label: gcp-cloud-kms-crypto-key-version ---- - -A **Cloud KMS CryptoKeyVersion** is an immutable representation of a single piece of key material managed by Google Cloud Key Management Service. Each CryptoKey can have many versions, allowing you to rotate key material without changing the logical key that your workloads use. A version holds state (e.g., `ENABLED`, `DISABLED`, `DESTROYED`), an algorithm specification (RSA, AES-GCM, etc.), and lifecycle metadata such as creation and destruction timestamps. See the official Google documentation for full details: https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions - -**Terrafrom Mappings:** - -- `google_kms_crypto_key_version.id` - -## Supported Methods - -- `GET`: Get GCP Cloud Kms Crypto Key Version by "gcp-cloud-kms-key-ring-location|gcp-cloud-kms-key-ring-name|gcp-cloud-kms-crypto-key-name|gcp-cloud-kms-crypto-key-version-version" -- ~~`LIST`~~ -- `SEARCH`: Search for GCP Cloud Kms Crypto Key Version by "gcp-cloud-kms-key-ring-location|gcp-cloud-kms-key-ring-name|gcp-cloud-kms-crypto-key-name" - -## Possible Links - -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) - -A CryptoKeyVersion is always a child of a CryptoKey. The `gcp-cloud-kms-crypto-key` resource represents the logical key, while the current item represents a particular version of that key’s material. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-kms-crypto-key.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-kms-crypto-key.md deleted file mode 100644 index 25af555c..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-kms-crypto-key.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: GCP Cloud Kms Crypto Key -sidebar_label: gcp-cloud-kms-crypto-key ---- - -A **Cloud KMS CryptoKey** is the logical resource in Google Cloud that represents a single cryptographic key and its primary metadata. It defines the algorithm, purpose (encryption/decryption, signing/verification, MAC, etc.), rotation schedule, and IAM policy for the key. Each CryptoKey lives inside a Key Ring, can have multiple immutable versions, and is used by Google-managed services (or your own applications) to perform cryptographic operations. -Official documentation: https://cloud.google.com/kms/docs/object-hierarchy#key - -**Terrafrom Mappings:** - -- `google_kms_crypto_key.id` - -## Supported Methods - -- `GET`: Get GCP Cloud Kms Crypto Key by "gcp-cloud-kms-key-ring-location|gcp-cloud-kms-key-ring-name|gcp-cloud-kms-crypto-key-name" -- ~~`LIST`~~ -- `SEARCH`: Search for GCP Cloud Kms Crypto Key by "gcp-cloud-kms-key-ring-location|gcp-cloud-kms-key-ring-name" - -## Possible Links - -### [`gcp-cloud-kms-crypto-key-version`](/sources/gcp/Types/gcp-cloud-kms-crypto-key-version) - -A CryptoKey is the parent of one or more CryptoKeyVersions. Each version contains the actual key material and its own state (enabled, disabled, destroyed, etc.). Overmind links to these versions so you can inspect individual key material lifecycles and detect risks such as disabled or scheduled-for-destruction versions. - -### [`gcp-cloud-kms-key-ring`](/sources/gcp/Types/gcp-cloud-kms-key-ring) - -Every CryptoKey resides within a Key Ring, which provides a namespace and location boundary. This link shows the Key Ring that owns the CryptoKey, allowing you to trace location-specific compliance requirements or IAM inheritance issues. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-kms-key-ring.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-kms-key-ring.md deleted file mode 100644 index 9893a1d2..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-kms-key-ring.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: GCP Cloud Kms Key Ring -sidebar_label: gcp-cloud-kms-key-ring ---- - -A **Cloud KMS Key Ring** is a top-level container within Google Cloud KMS that groups one or more CryptoKeys in a specific GCP location (region). It acts as both an organisational unit and an IAM boundary: all CryptoKeys inside a Key Ring inherit the same location and share the same access-control policies. Creating a Key Ring is an irreversible, free operation and is a prerequisite for creating any CryptoKeys. -For full details, see the official documentation: https://cloud.google.com/kms/docs/object-hierarchy#key_rings - -**Terrafrom Mappings:** - -- `google_kms_key_ring.id` - -## Supported Methods - -- `GET`: Get GCP Cloud Kms Key Ring by "gcp-cloud-kms-key-ring-location|gcp-cloud-kms-key-ring-name" -- `LIST`: List all GCP Cloud Kms Key Ring items -- `SEARCH`: Search for GCP Cloud Kms Key Ring by "gcp-cloud-kms-key-ring-location" - -## Possible Links - -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) - -Each CryptoKey belongs to exactly one Key Ring. Linking a Key Ring to its child `gcp-cloud-kms-crypto-key` items lets Overmind surface all encryption keys that share the same location and IAM policy, making it easier to assess the blast radius of any permission or configuration changes applied to the Key Ring. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-resource-manager-project.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-resource-manager-project.md deleted file mode 100644 index 492a056e..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-resource-manager-project.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: GCP Cloud Resource Manager Project -sidebar_label: gcp-cloud-resource-manager-project ---- - -A Google Cloud Resource Manager Project represents the fundamental organisational unit within Google Cloud Platform (GCP). Every compute, storage or networking asset you create must live inside a Project, which in turn sits under a Folder or Organisation node. Projects provide isolated boundaries for Identity and Access Management (IAM), quotas, billing, API enablement and lifecycle operations such as creation, update, suspension and deletion. By modelling Projects, Overmind can surface risks linked to mis-scoped IAM roles, neglected billing settings or interactions with other resources _before_ any change is pushed to production. -Official documentation: https://cloud.google.com/resource-manager/docs/creating-managing-projects - -## Supported Methods - -- `GET`: Get a gcp-cloud-resource-manager-project by its "name" -- ~~`LIST`~~ -- ~~`SEARCH`~~ diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-resource-manager-tag-value.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-resource-manager-tag-value.md deleted file mode 100644 index 2d62d471..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-resource-manager-tag-value.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: GCP Cloud Resource Manager Tag Value -sidebar_label: gcp-cloud-resource-manager-tag-value ---- - -A GCP Cloud Resource Manager **Tag Value** is the second layer in Google Cloud’s new tagging hierarchy, sitting beneath a Tag Key and above the individual resources to which it is applied. Together, Tag Keys and Tag Values allow administrators to attach fine-grained, organisation-wide metadata to projects, folders and individual cloud resources, enabling consistent policy enforcement, cost allocation, automation and reporting across an estate. Each Tag Value represents a specific, permitted value for a given Tag Key (e.g. Tag Key `environment` may have Tag Values `production`, `staging`, `test`). -For a full description of Tag Values and how they fit into the tagging system, refer to Google’s documentation: https://cloud.google.com/resource-manager/reference/rest/v3/tagValues. - -**Terrafrom Mappings:** - -- `google_tags_tag_value.name` - -## Supported Methods - -- `GET`: Get a gcp-cloud-resource-manager-tag-value by its "name" -- ~~`LIST`~~ -- `SEARCH`: Search for TagValues by TagKey. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-address.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-address.md deleted file mode 100644 index bc91e016..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-address.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: GCP Compute Address -sidebar_label: gcp-compute-address ---- - -A GCP Compute Address is a reserved, static IP address that can be either regional (tied to a specific region and VPC network) or global (usable by global load-balancing resources). Once reserved, the address can be attached to forwarding rules, virtual machine (VM) instances, Cloud NAT configurations and other networking resources, ensuring its IP does not change even if the underlying resource is recreated. See the official documentation for full details: https://cloud.google.com/compute/docs/ip-addresses/reserve-static-external-ip-address. - -**Terrafrom Mappings:** - -- `google_compute_address.name` - -## Supported Methods - -- `GET`: Get GCP Compute Address by "gcp-compute-address-name" -- `LIST`: List all GCP Compute Address items -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-compute-address`](/sources/gcp/Types/gcp-compute-address) - -Static addresses rarely reference one another directly, but Overmind may surface links where an address is used as a reference target (for example, when one resource releases and another takes ownership of the same address). - -### [`gcp-compute-forwarding-rule`](/sources/gcp/Types/gcp-compute-forwarding-rule) - -Regional forwarding rules for Network Load Balancers or protocol forwarding can be configured with a specific static IP. The forwarding rule’s `IPAddress` field points to the Compute Address. - -### [`gcp-compute-global-forwarding-rule`](/sources/gcp/Types/gcp-compute-global-forwarding-rule) - -Global forwarding rules, used by HTTP(S), SSL, or TCP Proxy load balancers, reference a global static IP address. The global forwarding rule therefore links back to the associated Compute Address. - -### [`gcp-compute-instance`](/sources/gcp/Types/gcp-compute-instance) - -A VM instance’s network interface may be assigned a reserved external or internal IP. If an instance uses a static IP, the instance resource contains a link to the corresponding Compute Address. - -### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) - -Internal (private) static addresses are always allocated within a specific VPC network. The Compute Address resource stores the ID of the network from which the IP is taken, creating a link to the Network. - -### [`gcp-compute-public-delegated-prefix`](/sources/gcp/Types/gcp-compute-public-delegated-prefix) - -When you own a public delegated prefix, you can allocate individual static addresses from that range. Each resulting Compute Address records the delegated prefix it belongs to. - -### [`gcp-compute-router`](/sources/gcp/Types/gcp-compute-router) - -Cloud NAT configurations on a Cloud Router can consume one or more reserved external IP addresses. The router’s NAT config lists the Compute Addresses being used, forming a link. - -### [`gcp-compute-subnetwork`](/sources/gcp/Types/gcp-compute-subnetwork) - -For regional internal addresses you must specify the subnetwork (IP range) to allocate from. The Compute Address therefore references, and is linked to, the Subnetwork resource. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-autoscaler.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-autoscaler.md deleted file mode 100644 index 2ee81ac6..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-autoscaler.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: GCP Compute Autoscaler -sidebar_label: gcp-compute-autoscaler ---- - -A GCP Compute Autoscaler is a zonal or regional resource that automatically adds or removes VM instances from a managed instance group to keep your application running at the desired performance level and cost. Scaling decisions can be driven by policies based on average CPU utilisation, HTTP load-balancing capacity, Cloud Monitoring metrics, schedules, or per-instance utilisation. Full details can be found in the official documentation: https://cloud.google.com/compute/docs/autoscaler - -**Terrafrom Mappings:** - -- `google_compute_autoscaler.name` - -## Supported Methods - -- `GET`: Get GCP Compute Autoscaler by "gcp-compute-autoscaler-name" -- `LIST`: List all GCP Compute Autoscaler items -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-compute-instance-group-manager`](/sources/gcp/Types/gcp-compute-instance-group-manager) - -Every autoscaler is attached to exactly one managed instance group; in the GCP API this relationship is expressed through the `target` field, which points to the relevant `instanceGroupManager` resource. Following this link in Overmind reveals which VM instances the autoscaler is responsible for scaling. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-backend-service.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-backend-service.md deleted file mode 100644 index b29c676f..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-backend-service.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: GCP Compute Backend Service -sidebar_label: gcp-compute-backend-service ---- - -A Compute Backend Service defines how Google Cloud Load Balancers distribute traffic to one or more back-end targets (Instance Groups, Network Endpoint Groups, or serverless workloads). It specifies the load-balancing algorithm, session affinity, capacity controls, health checks, time-outs, protocol and (optionally) a Cloud Armor security policy. Backend services exist as either regional or global resources, depending on the load balancer type. -For full details see the official Google Cloud documentation: https://cloud.google.com/load-balancing/docs/backend-service - -**Terrafrom Mappings:** - -- `google_compute_backend_service.name` -- `google_compute_region_backend_service.name` - -## Supported Methods - -- `GET`: Get GCP Compute Backend Service by "gcp-compute-backend-service-name" -- `LIST`: List all GCP Compute Backend Service items -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-compute-health-check`](/sources/gcp/Types/gcp-compute-health-check) - -A backend service is required to reference one or more Health Checks. These determine the health of each backend target and whether traffic should be sent to it. - -### [`gcp-compute-instance`](/sources/gcp/Types/gcp-compute-instance) - -Individual VM instances receive traffic indirectly through a backend service when they belong to an instance group or unmanaged instance list that the backend service uses. - -### [`gcp-compute-instance-group`](/sources/gcp/Types/gcp-compute-instance-group) - -Managed or unmanaged Instance Groups are the most common type of backend that a backend service points to. The group’s VMs are the actual targets for load-balanced traffic. - -### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) - -Backends referenced by a backend service must reside in a specific VPC network; therefore the backend service is effectively bound to that network and its associated subnets and firewall rules. - -### [`gcp-compute-network-endpoint-group`](/sources/gcp/Types/gcp-compute-network-endpoint-group) - -Network Endpoint Groups (NEGs) can be configured as backends of a backend service to route traffic to endpoints such as containers, serverless services, or on-premises resources. - -### [`gcp-compute-security-policy`](/sources/gcp/Types/gcp-compute-security-policy) - -A backend service can optionally attach a Cloud Armor Security Policy to enforce L7 firewall rules, rate limiting, and other protective measures on incoming traffic before it reaches the backends. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-disk.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-disk.md deleted file mode 100644 index 4d61d32c..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-disk.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: GCP Compute Disk -sidebar_label: gcp-compute-disk ---- - -A GCP Compute Disk—formally known as a Persistent Disk—is block-level storage that can be attached to Google Compute Engine virtual machine (VM) instances. Disks may be zonal or regional, support features such as snapshots, replication, and Customer-Managed Encryption Keys (CMEK), and can be resized or detached without data loss. Official documentation: https://cloud.google.com/compute/docs/disks - -**Terrafrom Mappings:** - -- `google_compute_disk.name` - -## Supported Methods - -- `GET`: Get GCP Compute Disk by "gcp-compute-disk-name" -- `LIST`: List all GCP Compute Disk items -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-cloud-kms-crypto-key-version`](/sources/gcp/Types/gcp-cloud-kms-crypto-key-version) - -Indicates the specific Cloud KMS key version used when the disk is encrypted with a customer-managed encryption key. - -### [`gcp-compute-disk`](/sources/gcp/Types/gcp-compute-disk) - -For regional or replicated disks, the resource records the relationship to its source or replica peer disk. - -### [`gcp-compute-image`](/sources/gcp/Types/gcp-compute-image) - -Shows the image from which the disk was created, or images that have been built from this disk. - -### [`gcp-compute-instance`](/sources/gcp/Types/gcp-compute-instance) - -Lists the VM instances to which the disk is currently attached or has been attached historically. - -### [`gcp-compute-instant-snapshot`](/sources/gcp/Types/gcp-compute-instant-snapshot) - -Captures the association between the disk and any instant snapshots taken for rapid backup or restore operations. - -### [`gcp-compute-snapshot`](/sources/gcp/Types/gcp-compute-snapshot) - -Represents traditional snapshots for the disk, enabling point-in-time recovery or disk cloning. - -### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) - -If disk snapshots or images are exported to Cloud Storage, this link records the destination bucket holding those exports. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-external-vpn-gateway.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-external-vpn-gateway.md deleted file mode 100644 index 353fc407..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-external-vpn-gateway.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: GCP Compute External Vpn Gateway -sidebar_label: gcp-compute-external-vpn-gateway ---- - -A **Compute External VPN Gateway** is a Google Cloud resource that represents a customer-managed VPN appliance that resides outside of Google’s network (for example, in an on-premises data centre or another cloud). By defining one or more external interface IP addresses and an associated redundancy type, it tells Cloud VPN (HA VPN or Classic VPN) where to terminate its tunnels. In other words, the resource is the “remote end” of a Cloud VPN connection, allowing Google Cloud to establish secure IPSec tunnels to external infrastructure. -For further details, see the official documentation: https://cloud.google.com/compute/docs/reference/rest/v1/externalVpnGateways - -**Terrafrom Mappings:** - -- `google_compute_external_vpn_gateway.name` - -## Supported Methods - -- `GET`: Get a gcp-compute-external-vpn-gateway by its "name" -- `LIST`: List all gcp-compute-external-vpn-gateway -- ~~`SEARCH`~~ diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-firewall.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-firewall.md deleted file mode 100644 index e970ca4b..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-firewall.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: GCP Compute Firewall -sidebar_label: gcp-compute-firewall ---- - -A Google Cloud VPC firewall rule controls inbound and outbound traffic to and from the virtual machine (VM) instances that are attached to a particular VPC network. Each rule specifies a direction, priority, action (allow or deny), protocol and port list, and a target (network tags or service accounts). Rules are stateful and are evaluated before traffic reaches any instance, allowing you to centrally enforce network security policy across your workloads. -Official documentation: https://cloud.google.com/vpc/docs/firewalls - -**Terrafrom Mappings:** - -- `google_compute_firewall.name` - -## Supported Methods - -- `GET`: Get a gcp-compute-firewall by its "name" -- `LIST`: List all gcp-compute-firewall -- `SEARCH`: Search for firewalls by network tag. The query is a plain network tag name. - -## Possible Links - -### [`gcp-compute-instance`](/sources/gcp/Types/gcp-compute-instance) - -Firewall rules apply to VM instances that match their target criteria (network tags or service accounts). Therefore, an instance is linked to the firewall rules that currently govern the traffic it may send or receive. - -### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) - -Every firewall rule is created within a specific VPC network. The rule only affects resources that are attached to that network, so it is linked to its parent network resource. - -### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) - -Firewall rules can target VM instances by the service account they are running as. When a rule uses the `target_service_accounts` field, it is related to those IAM service accounts. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-forwarding-rule.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-forwarding-rule.md deleted file mode 100644 index 39d030b3..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-forwarding-rule.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: GCP Compute Forwarding Rule -sidebar_label: gcp-compute-forwarding-rule ---- - -A GCP Compute Forwarding Rule defines how incoming packets are handled within Google Cloud. It binds an IP address, protocol and (optionally) port range to a specific target resource such as a backend service, target proxy or target pool. Forwarding rules underpin both external and internal load-balancing solutions and can be either regional or global in scope. -For full details see the official documentation: https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts. - -**Terrafrom Mappings:** - -- `google_compute_forwarding_rule.name` - -## Supported Methods - -- `GET`: Get GCP Compute Forwarding Rule by "gcp-compute-forwarding-rule-name" -- `LIST`: List all GCP Compute Forwarding Rule items -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-compute-backend-service`](/sources/gcp/Types/gcp-compute-backend-service) - -The forwarding rule may specify a backend service as its target (for example, when configuring an Internal TCP/UDP Load Balancer or External HTTP(S) Load Balancer). - -### [`gcp-compute-forwarding-rule`](/sources/gcp/Types/gcp-compute-forwarding-rule) - -This represents the same forwarding-rule resource; Overmind links to it so that self-references or associations between global and regional rules can be tracked. - -### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) - -For internal forwarding rules, the rule is created inside a specific VPC network; the link identifies that parent network. - -### [`gcp-compute-public-delegated-prefix`](/sources/gcp/Types/gcp-compute-public-delegated-prefix) - -If the rule’s IP address is allocated from a delegated public prefix, it will be linked to that prefix to show the allocation source. - -### [`gcp-compute-subnetwork`](/sources/gcp/Types/gcp-compute-subnetwork) - -Internal forwarding rules also reference the subnetwork from which their internal IP address is drawn. - -### [`gcp-compute-target-http-proxy`](/sources/gcp/Types/gcp-compute-target-http-proxy) - -External HTTP Load Balancer forwarding rules target an HTTP proxy, so the rule links to the relevant `target-http-proxy` resource. - -### [`gcp-compute-target-https-proxy`](/sources/gcp/Types/gcp-compute-target-https-proxy) - -External HTTPS Load Balancer forwarding rules target an HTTPS proxy; this link identifies that proxy. - -### [`gcp-compute-target-pool`](/sources/gcp/Types/gcp-compute-target-pool) - -Legacy Network Load Balancer forwarding rules can point directly to a target pool; the link shows which pool receives the traffic. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-global-address.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-global-address.md deleted file mode 100644 index ffbe9cb8..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-global-address.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: GCP Compute Global Address -sidebar_label: gcp-compute-global-address ---- - -A **Compute Global Address** in Google Cloud Platform is a statically-reserved IP address that is reachable from, or usable across, all regions. It can be external (used, for example, by a global HTTP(S) load balancer) or internal (used by regional resources that require a routable, private global IP). Reserving the address ensures it does not change while it is in use, and allows it to be assigned to resources at creation time or later. -Official documentation: https://cloud.google.com/compute/docs/ip-addresses/reserve-static-external-ip-address - -**Terrafrom Mappings:** - -- `google_compute_global_address.name` - -## Supported Methods - -- `GET`: Get a gcp-compute-global-address by its "name" -- `LIST`: List all gcp-compute-global-address -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) - -A global address may be bound to a specific VPC network when it is reserved as an internal global IP. Overmind links the address to the `gcp-compute-network` so you can see in which network the address is routable and assess overlapping CIDR or routing risks. - -### [`gcp-compute-public-delegated-prefix`](/sources/gcp/Types/gcp-compute-public-delegated-prefix) - -If the address is carved out of a public delegated prefix that your project controls, Overmind links it to that `gcp-compute-public-delegated-prefix` to show the parent block and enable checks for exhaustion or mis-allocation. - -### [`gcp-compute-subnetwork`](/sources/gcp/Types/gcp-compute-subnetwork) - -For internal global addresses that are further scoped to a particular subnetwork, Overmind establishes a link to the `gcp-compute-subnetwork` so you can trace which subnet’s routing table and firewall rules apply to traffic destined for the address. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-global-forwarding-rule.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-global-forwarding-rule.md deleted file mode 100644 index 4f56abf5..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-global-forwarding-rule.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: GCP Compute Global Forwarding Rule -sidebar_label: gcp-compute-global-forwarding-rule ---- - -A Google Cloud Compute Global Forwarding Rule defines a single anycast virtual IP address that routes incoming traffic at the global level to a specified target (such as an HTTP(S) proxy, SSL proxy or TCP proxy) or, for internal load balancing, directly to a backend service. It is the entry-point resource for most external HTTP(S) and proxy load balancers and for internal global load balancers. For full details see the Google Cloud documentation: https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts - -**Terrafrom Mappings:** - -- `google_compute_global_forwarding_rule.name` - -## Supported Methods - -- `GET`: Get a gcp-compute-global-forwarding-rule by its "name" -- `LIST`: List all gcp-compute-global-forwarding-rule -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-compute-backend-service`](/sources/gcp/Types/gcp-compute-backend-service) - -When the forwarding rule is created for an internal global load balancer, it references a backend service directly; the rule’s traffic is delivered to the backends listed in that service. Analysing this link lets Overmind trace traffic paths from the VIP to the actual instances or endpoints. - -### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) - -Internal global forwarding rules must be attached to a specific VPC network. Linking to the network resource reveals which project-wide connectivity domain the VIP belongs to and helps surface risks such as unintended exposure to peered networks. - -### [`gcp-compute-subnetwork`](/sources/gcp/Types/gcp-compute-subnetwork) - -If the forwarding rule is internal, it is scoped to a particular subnetwork. Understanding this relationship identifies the IP range in which the virtual IP lives and highlights segmentation or overlapping-CIDR issues. - -### [`gcp-compute-target-http-proxy`](/sources/gcp/Types/gcp-compute-target-http-proxy) - -For external HTTP(S), SSL or TCP proxy load balancers, the forwarding rule points to a target proxy resource. The proxy terminates the client connection before forwarding to backend services. Linking these resources enables Overmind to trace configuration chains and detect misconfigurations such as SSL policy mismatches or missing backends. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-health-check.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-health-check.md deleted file mode 100644 index b94f59de..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-health-check.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: GCP Compute Health Check -sidebar_label: gcp-compute-health-check ---- - -A GCP Compute Health Check is a Google Cloud resource that periodically probes virtual machine instances or endpoints to decide whether they are fit to receive production traffic. The check runs from the Google-managed control plane using protocols such as TCP, SSL, HTTP(S), HTTP/2 or gRPC, and compares the response to thresholds you configure (e.g. response code, timeout, healthy/unhealthy counts). Backend services, target pools and managed instance groups use the resulting health status to route requests only to healthy instances and to trigger autoscaling or fail-over behaviour. Health checks come in global and regional flavours, aligning with global and regional load balancers respectively. -Official documentation: https://cloud.google.com/load-balancing/docs/health-checks - -**Terrafrom Mappings:** - -- `google_compute_health_check.name` -- `google_compute_region_health_check.name` - -## Supported Methods - -- `GET`: Get GCP Compute Health Check by "gcp-compute-health-check-name" -- `LIST`: List all GCP Compute Health Check items -- ~~`SEARCH`~~ diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-http-health-check.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-http-health-check.md deleted file mode 100644 index 987b860c..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-http-health-check.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: GCP Compute Http Health Check -sidebar_label: gcp-compute-http-health-check ---- - -A GCP Compute HTTP Health Check is a globally scoped resource that periodically sends HTTP requests to a specified port and path on your instances or endpoints to verify that they are responding correctly. Load balancers, managed instance groups and other Google Cloud services use the results of these checks to decide whether traffic should be routed to a given backend. Each check can be customised with parameters such as the request path, host header, check interval, timeout, and healthy/unhealthy thresholds. -For further details see the official documentation: https://cloud.google.com/compute/docs/load-balancing/health-checks#http-health-checks - -**Terrafrom Mappings:** - -- `google_compute_http_health_check.name` - -## Supported Methods - -- `GET`: Get a gcp-compute-http-health-check by its "name" -- `LIST`: List all gcp-compute-http-health-check -- ~~`SEARCH`~~ diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-image.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-image.md deleted file mode 100644 index 9ccc9158..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-image.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: GCP Compute Image -sidebar_label: gcp-compute-image ---- - -A Google Cloud Compute Image is a read-only template that contains a boot disk configuration (including the operating system and any installed software) which can be used to create new persistent disks or VM instances. Images may be publicly provided by Google, published by third-party vendors, or built privately within your own project. They support features such as image families, deprecation, and customer-managed encryption keys (CMEK). -For full details see the official documentation: https://cloud.google.com/compute/docs/images - -**Terrafrom Mappings:** - -- `google_compute_image.name` - -## Supported Methods - -- `GET`: Get GCP Compute Image by "gcp-compute-image-name" -- `LIST`: List all GCP Compute Image items -- `SEARCH`: Search for GCP Compute Image by "gcp-compute-image-family" - -## Possible Links - -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) - -If the image is protected with a customer-managed encryption key (CMEK), Overmind links the image to the Cloud KMS Crypto Key that encrypts its contents. - -### [`gcp-cloud-kms-crypto-key-version`](/sources/gcp/Types/gcp-cloud-kms-crypto-key-version) - -When CMEK protection specifies an explicit key version, the image is linked to that exact Crypto Key Version so you can trace roll-overs or revocations that might affect instance bootability. - -### [`gcp-compute-disk`](/sources/gcp/Types/gcp-compute-disk) - -Images can be created from existing persistent disks, and new disks can be created from an image. Overmind therefore links images to the disks that serve as their source or to the disks that have been instantiated from them. - -### [`gcp-compute-image`](/sources/gcp/Types/gcp-compute-image) - -Images belonging to the same image family or derived from one another (for example, when rolling a new version) are cross-linked so you can understand upgrade paths and deprecations within a family. - -### [`gcp-compute-snapshot`](/sources/gcp/Types/gcp-compute-snapshot) - -An image may be built from one or more snapshots of a disk, and snapshots can be exported from an image. Overmind links images to the snapshots that contributed to, or were generated from, them. - -### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) - -Access to create, deprecate or use an image is controlled through IAM roles. Overmind shows the service accounts that have permissions on the image, helping you assess who can launch VMs from it. - -### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) - -During import or export operations, raw disk files are stored in Cloud Storage. Overmind links an image to the Storage Buckets that hosted its source or export objects, enabling you to trace data residency and clean-up unused artefacts. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-instance-group-manager.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-instance-group-manager.md deleted file mode 100644 index c9e3bf24..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-instance-group-manager.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: GCP Compute Instance Group Manager -sidebar_label: gcp-compute-instance-group-manager ---- - -A Compute Instance Group Manager (IGM) is the control plane object for a Managed Instance Group in Google Cloud Platform. It is responsible for creating, deleting, and maintaining a homogeneous fleet of Compute Engine virtual machines according to a declarative configuration such as target size, instance template and update policy. Because the manager continually reconciles the group’s actual state with the desired state, it underpins features like rolling updates, auto-healing and autoscaling. -Official documentation: https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances - -**Terrafrom Mappings:** - -- `google_compute_instance_group_manager.name` - -## Supported Methods - -- `GET`: Get GCP Compute Instance Group Manager by "gcp-compute-instance-group-manager-name" -- `LIST`: List all GCP Compute Instance Group Manager items -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-compute-autoscaler`](/sources/gcp/Types/gcp-compute-autoscaler) - -An Autoscaler resource can target a Managed Instance Group via its Instance Group Manager, dynamically increasing or decreasing the group’s size based on utilisation metrics or schedules. - -### [`gcp-compute-health-check`](/sources/gcp/Types/gcp-compute-health-check) - -Within an auto-healing policy the Instance Group Manager references one or more Health Check resources to decide when individual instances should be recreated. - -### [`gcp-compute-instance-group`](/sources/gcp/Types/gcp-compute-instance-group) - -The Instance Group Manager encapsulates and manages an underlying (managed) Instance Group resource that represents the actual collection of VM instances. - -### [`gcp-compute-instance-template`](/sources/gcp/Types/gcp-compute-instance-template) - -The manager uses an Instance Template to define the configuration (machine type, disks, metadata, etc.) of every VM it creates in the group. - -### [`gcp-compute-target-pool`](/sources/gcp/Types/gcp-compute-target-pool) - -For legacy network load balancing, an Instance Group Manager can be configured to automatically add or remove its instances from a Target Pool, enabling them to receive traffic from a forwarding rule. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-instance-group.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-instance-group.md deleted file mode 100644 index 5b45fd96..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-instance-group.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: GCP Compute Instance Group -sidebar_label: gcp-compute-instance-group ---- - -A Google Cloud Compute Instance Group is a logical collection of Virtual Machine (VM) instances running on Google Compute Engine that are treated as a single entity for deployment, scaling and load-balancing purposes. Instance groups can be managed (all VMs created from a common template and automatically kept in the desired size/state) or unmanaged (a user-assembled set of individual VMs). They are commonly used behind load balancers to provide highly available, horizontally scalable services. -For full details see the official Google Cloud documentation: https://cloud.google.com/compute/docs/instance-groups - -**Terrafrom Mappings:** - -- `google_compute_instance_group.name` - -## Supported Methods - -- `GET`: Get GCP Compute Instance Group by "gcp-compute-instance-group-name" -- `LIST`: List all GCP Compute Instance Group items -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) - -Every VM in an Instance Group must be attached to a VPC network. Overmind therefore links a Compute Instance Group to the Compute Network that provides its underlying connectivity, enabling you to trace how network-level policies or mis-configurations might affect the availability of the workload hosted by the group. - -### [`gcp-compute-subnetwork`](/sources/gcp/Types/gcp-compute-subnetwork) - -Within a given VPC network, all VMs in the Instance Group reside in a specific subnetwork. Overmind links the Instance Group to that Subnetwork so you can understand IP address allocation, regional placement and any subnet-specific firewall rules that could impact the instances. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-instance-template.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-instance-template.md deleted file mode 100644 index a109bd06..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-instance-template.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: GCP Compute Instance Template -sidebar_label: gcp-compute-instance-template ---- - -A Google Cloud Compute Instance Template is a reusable description of the properties required to create a virtual machine (VM) instance. It encapsulates details such as machine type, boot image, disks, network interfaces, metadata, tags, and service-account settings. Once defined, the template can be used by users, managed instance groups, autoscalers, or other automation to create identically configured VMs at scale. -Official documentation: https://cloud.google.com/compute/docs/instance-templates - -**Terrafrom Mappings:** - -- `google_compute_instance_template.name` - -## Supported Methods - -- `GET`: Get a gcp-compute-instance-template by its "name" -- `LIST`: List all gcp-compute-instance-template -- `SEARCH`: Search for instance templates by network tag. The query is a plain network tag name. - -## Possible Links - -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) - -An instance template can reference a customer-managed encryption key (CMEK) from Cloud KMS to encrypt the persistent disks defined in the template. - -### [`gcp-compute-disk`](/sources/gcp/Types/gcp-compute-disk) - -Boot and additional persistent disks are specified inside the template. Any disk image or snapshot expanded into an actual persistent disk at instance-creation time will appear as a linked compute-disk resource. - -### [`gcp-compute-firewall`](/sources/gcp/Types/gcp-compute-firewall) - -The network tags set in the template are used by VMs launched from it. Firewall rules that target those tags therefore become effective for every instance derived from the template. - -### [`gcp-compute-image`](/sources/gcp/Types/gcp-compute-image) - -The template’s boot disk references a specific compute image (public, custom, or shared). This image is the source from which the VM’s root filesystem is created. - -### [`gcp-compute-instance`](/sources/gcp/Types/gcp-compute-instance) - -When a VM is launched using this template—either manually or by a managed instance group—the resulting resource is a compute-instance that maintains a provenance link back to the template. - -### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) - -Each network interface declared in the template must point to a VPC network, establishing the connectivity context for all future instances based on the template. - -### [`gcp-compute-node-group`](/sources/gcp/Types/gcp-compute-node-group) - -If node affinity is configured in the template, instances created from it will attempt to schedule onto the specified sole-tenant node group. - -### [`gcp-compute-reservation`](/sources/gcp/Types/gcp-compute-reservation) - -A template can include reservation affinity, causing newly created VMs to consume capacity from a specific Compute Engine reservation. - -### [`gcp-compute-route`](/sources/gcp/Types/gcp-compute-route) - -Although routes are defined at the network level, all VMs derived from the template inherit those routes through their attached network, so routing behaviour is indirectly influenced by the template. - -### [`gcp-compute-security-policy`](/sources/gcp/Types/gcp-compute-security-policy) - -If instances launched from the template are later attached to backend services that use Cloud Armor security policies, their traffic will be evaluated against those policies; tracing the link helps assess exposure. - -### [`gcp-compute-snapshot`](/sources/gcp/Types/gcp-compute-snapshot) - -The template may specify a source snapshot instead of an image for one or more disks, resulting in disks that are restored from those snapshots at VM creation time. - -### [`gcp-compute-subnetwork`](/sources/gcp/Types/gcp-compute-subnetwork) - -For each network interface, the template can identify a specific subnetwork, dictating the IP range from which the instance will draw its primary internal address. - -### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) - -A service account can be attached in the template so that every VM started from it runs with the same IAM identity and associated OAuth scopes. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-instance.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-instance.md deleted file mode 100644 index a945f36a..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-instance.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -title: GCP Compute Instance -sidebar_label: gcp-compute-instance ---- - -A Google Cloud Compute Engine instance is a virtual machine (VM) that runs on Google’s infrastructure. It provides configurable CPU, memory, disk and network resources so you can run workloads in a scalable, on-demand manner. For full details see the official documentation: https://cloud.google.com/compute/docs/instances. - -**Terrafrom Mappings:** - -- `google_compute_instance.name` - -## Supported Methods - -- `GET`: Get GCP Compute Instance by "gcp-compute-instance-name" -- `LIST`: List all GCP Compute Instance items -- `SEARCH`: Search for GCP Compute Instance by "gcp-compute-instance-networkTag" - -## Possible Links - -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) - -If the instance’s boot or data disks are encrypted with customer-managed encryption keys (CMEK), it references a Cloud KMS crypto key. - -### [`gcp-cloud-kms-crypto-key-version`](/sources/gcp/Types/gcp-cloud-kms-crypto-key-version) - -A specific version of the KMS key may be recorded when CMEK encryption is enabled on the instance’s disks. - -### [`gcp-compute-disk`](/sources/gcp/Types/gcp-compute-disk) - -Boot and additional persistent disks are attached to the instance; these disks back the VM’s storage. - -### [`gcp-compute-firewall`](/sources/gcp/Types/gcp-compute-firewall) - -Firewall rules that target the instance’s network tags or service account control inbound and outbound traffic for the VM. - -### [`gcp-compute-image`](/sources/gcp/Types/gcp-compute-image) - -The instance’s boot disk is created from a Compute Engine image, capturing the operating system and initial state. - -### [`gcp-compute-instance-group-manager`](/sources/gcp/Types/gcp-compute-instance-group-manager) - -When the VM is part of a managed instance group (MIG), the group manager is responsible for creating, deleting and updating the instance. - -### [`gcp-compute-instance-template`](/sources/gcp/Types/gcp-compute-instance-template) - -Instances launched via a template inherit machine type, disks, metadata and network settings defined in that template. - -### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) - -Every network interface on the instance is connected to a VPC network, determining the VM’s reachable address space. - -### [`gcp-compute-route`](/sources/gcp/Types/gcp-compute-route) - -Routes in the attached VPC network dictate how the instance’s traffic is forwarded; some routes may apply only to instances with specific tags. - -### [`gcp-compute-snapshot`](/sources/gcp/Types/gcp-compute-snapshot) - -Snapshots can be taken from the instance’s persistent disks for backup or cloning purposes, creating a link between the VM and its snapshots. - -### [`gcp-compute-subnetwork`](/sources/gcp/Types/gcp-compute-subnetwork) - -Each network interface is placed within a subnetwork, assigning the instance its internal IP range and regional scope. - -### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) - -An optional service account is attached to the instance, granting it IAM-scoped credentials to access Google APIs. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-instant-snapshot.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-instant-snapshot.md deleted file mode 100644 index 82699609..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-instant-snapshot.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: GCP Compute Instant Snapshot -sidebar_label: gcp-compute-instant-snapshot ---- - -A GCP Compute Instant Snapshot is a point-in-time, crash-consistent copy of a Compute Engine persistent disk that is created almost instantaneously, permitting rapid backup, cloning, and disaster-recovery workflows. Instant snapshots can be used to restore a disk to the exact state it was in when the snapshot was taken or to create new disks that replicate that state. They differ from traditional snapshots primarily in the speed at which they are taken and restored. -Official documentation: https://cloud.google.com/compute/docs/disks/instant-snapshots - -**Terrafrom Mappings:** - -- `google_compute_instant_snapshot.name` - -## Supported Methods - -- `GET`: Get GCP Compute Instant Snapshot by "gcp-compute-instant-snapshot-name" -- `LIST`: List all GCP Compute Instant Snapshot items -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-compute-disk`](/sources/gcp/Types/gcp-compute-disk) - -An instant snapshot is always sourced from an existing Compute Engine persistent disk. Therefore, each `gcp-compute-instant-snapshot` has a direct parent–child relationship with the `gcp-compute-disk` it captures, and Overmind links the snapshot back to the originating disk to surface dependency and recovery paths. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-machine-image.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-machine-image.md deleted file mode 100644 index 5cb79bfb..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-machine-image.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: GCP Compute Machine Image -sidebar_label: gcp-compute-machine-image ---- - -A Google Cloud Compute Machine Image is a first-class resource that captures the full state of a virtual machine at a point in time, including all attached disks, metadata, instance properties, service-accounts, and network configuration. It can be used to recreate identical VMs quickly or share a golden template across projects and organisations. See the official documentation for full details: https://cloud.google.com/compute/docs/machine-images - -**Terrafrom Mappings:** - -- `google_compute_machine_image.name` - -## Supported Methods - -- `GET`: Get GCP Compute Machine Image by "gcp-compute-machine-image-name" -- `LIST`: List all GCP Compute Machine Image items -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-cloud-kms-crypto-key-version`](/sources/gcp/Types/gcp-cloud-kms-crypto-key-version) - -A machine image may be protected with customer-managed encryption keys (CMEK); when this option is used it references the specific Cloud KMS Crypto Key Version that encrypts the image data. - -### [`gcp-compute-disk`](/sources/gcp/Types/gcp-compute-disk) - -The boot disk and any additional data disks attached to the source instance are incorporated into the machine image. When a new instance is created from the machine image, new persistent disks are instantiated from these definitions. - -### [`gcp-compute-image`](/sources/gcp/Types/gcp-compute-image) - -Within a machine image the boot disk is ultimately based on a Compute Image. Thus the machine image indirectly depends on, and records, the image that was used to build the source VM. - -### [`gcp-compute-instance`](/sources/gcp/Types/gcp-compute-instance) - -A machine image is created from a source Compute Instance and can in turn be used to launch new instances that replicate the captured configuration. - -### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) - -Network interface settings, including the VPC network IDs, are stored in the machine image so that any VM instantiated from it can attach to the same or equivalent networks. - -### [`gcp-compute-snapshot`](/sources/gcp/Types/gcp-compute-snapshot) - -Internally, Google Cloud may use snapshots of the instance’s disks when building the machine image. Conversely, users can export disks from a machine image as individual snapshots. - -### [`gcp-compute-subnetwork`](/sources/gcp/Types/gcp-compute-subnetwork) - -The machine image stores the exact subnetwork configuration of each NIC, allowing recreated VMs to provision themselves in the same subnetworks. - -### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) - -Service accounts attached to the source instance are recorded in the machine image; any VM launched from the image inherits those service account bindings unless overridden. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-network-endpoint-group.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-network-endpoint-group.md deleted file mode 100644 index 818ed55f..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-network-endpoint-group.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: GCP Compute Network Endpoint Group -sidebar_label: gcp-compute-network-endpoint-group ---- - -A Google Cloud Platform Compute Network Endpoint Group (NEG) is a collection of network endpoints—such as VM NICs, container pods, Cloud Run services, or Cloud Functions—that can be treated as a single backend target by Load Balancing and Service Directory. NEGs give fine-grained control over which exact endpoints receive traffic and allow serverless or hybrid back-ends to participate in layer-4/7 load balancing. See the official documentation for full details: https://cloud.google.com/load-balancing/docs/negs. - -**Terrafrom Mappings:** - -- `google_compute_network_endpoint_group.name` - -## Supported Methods - -- `GET`: Get a gcp-compute-network-endpoint-group by its "name" -- `LIST`: List all gcp-compute-network-endpoint-group -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-cloud-functions-function`](/sources/gcp/Types/gcp-cloud-functions-function) - -A serverless NEG can reference a specific Cloud Function. Overmind therefore links the NEG to the underlying `gcp-cloud-functions-function` it represents, showing which function will receive traffic through the load balancer. - -### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) - -Zonal and regional NEGs are created inside a particular VPC network. The link indicates the network context in which the endpoints exist, helping to surface routing and firewall considerations. - -### [`gcp-compute-subnetwork`](/sources/gcp/Types/gcp-compute-subnetwork) - -When a NEG is scoped to a subnetwork (for example for VM or GKE pod endpoints), Overmind links it to that subnetwork so you can trace how traffic enters specific IP ranges. - -### [`gcp-run-service`](/sources/gcp/Types/gcp-run-service) - -Serverless NEGs can point to Cloud Run services. This link shows which `gcp-run-service` is exposed through the NEG and subsequently through any HTTP(S) load balancer. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-network.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-network.md deleted file mode 100644 index 64c63197..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-network.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: GCP Compute Network -sidebar_label: gcp-compute-network ---- - -A Google Cloud Platform (GCP) Compute Network—commonly called a Virtual Private Cloud (VPC) network—provides the fundamental isolation and IP address space in which all other networking resources (subnetworks, routes, firewall rules, VPNs, etc.) are created. It is a global resource that spans all regions in a project, allowing workloads to communicate securely inside Google’s backbone and to the internet where required. For a full description see the official documentation: https://cloud.google.com/vpc/docs/vpc - -**Terrafrom Mappings:** - -- `google_compute_network.name` - -## Supported Methods - -- `GET`: Get a gcp-compute-network by its "name" -- `LIST`: List all gcp-compute-network -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) - -A Compute Network can be peered with, or shared to, another Compute Network. Overmind records these peer or shared-VPC relationships by linking one `gcp-compute-network` item to the other(s). - -### [`gcp-compute-subnetwork`](/sources/gcp/Types/gcp-compute-subnetwork) - -Every subnetwork is created inside exactly one VPC network. Overmind therefore links each `gcp-compute-subnetwork` back to its parent `gcp-compute-network`, and conversely shows the network’s collection of subnetworks. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-node-group.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-node-group.md deleted file mode 100644 index 68faa42b..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-node-group.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: GCP Compute Node Group -sidebar_label: gcp-compute-node-group ---- - -A GCP Compute Node Group is a managed collection of sole-tenant nodes that are all created from the same node template. These groups allow you to provision and administer dedicated physical servers for your Compute Engine virtual machines, giving you fine-grained control over workload isolation, hardware affinity, licensing, and maintenance windows. For a detailed explanation, see the official Google Cloud documentation: https://cloud.google.com/compute/docs/nodes. - -**Terrafrom Mappings:** - -- `google_compute_node_group.name` -- `google_compute_node_template.name` - -## Supported Methods - -- `GET`: Get GCP Compute Node Group by "gcp-compute-node-group-name" -- `LIST`: List all GCP Compute Node Group items -- `SEARCH`: Search for GCP Compute Node Group by "gcp-compute-node-group-nodeTemplateName" diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-node-template.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-node-template.md deleted file mode 100644 index b44b560a..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-node-template.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: GCP Compute Node Template -sidebar_label: gcp-compute-node-template ---- - -A GCP Compute Node Template is a reusable description of the hardware configuration and host maintenance policies that will be applied to one or more Sole-Tenant Nodes in Google Cloud. The template specifies attributes such as CPU platform, virtual CPU count, memory, node affinity labels, and automatic restart behaviour. When you later create a Node Group, the group references a single Node Template, ensuring that every node in the group is created with an identical shape. -For a full specification of the resource, see the official Google Cloud documentation: https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes - -**Terrafrom Mappings:** - -- `google_compute_node_template.name` - -## Supported Methods - -- `GET`: Get GCP Compute Node Template by "gcp-compute-node-template-name" -- `LIST`: List all GCP Compute Node Template items -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-compute-node-group`](/sources/gcp/Types/gcp-compute-node-group) - -A GCP Compute Node Group consumes a single Node Template. Overmind creates a link from a node group back to the template it references so that you can assess how changes to the template (for example, switching CPU platforms) will affect every node that belongs to the group. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-project.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-project.md deleted file mode 100644 index 776fa9dd..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-project.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: GCP Compute Project -sidebar_label: gcp-compute-project ---- - -A Google Cloud Project is the fundamental organisational unit in Google Cloud Platform. It acts as a logical container for all your Google Cloud resources, identity and access management (IAM) policies, APIs, quotas and billing information. Every resource – from virtual machines to service accounts – is created in exactly one project, and project-level settings (such as audit logging, labels and network host project status) govern how those resources operate. See the official documentation for full details: https://cloud.google.com/resource-manager/docs/creating-managing-projects - -**Terrafrom Mappings:** - -- `google_project.project_id` -- `google_compute_shared_vpc_host_project.project` -- `google_compute_shared_vpc_service_project.service_project` -- `google_compute_shared_vpc_service_project.host_project` -- `google_project_iam_binding.project` -- `google_project_iam_member.project` -- `google_project_iam_policy.project` -- `google_project_iam_audit_config.project` - -## Supported Methods - -- `GET`: Get a gcp-compute-project by its "name" -- ~~`LIST`~~ -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) - -Service accounts are identities that live inside a project. Overmind links a gcp-iam-service-account to its parent gcp-compute-project to show which project owns and governs the credentials and IAM permissions of that service account. - -### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) - -Every Cloud Storage bucket is created within a specific project. Overmind establishes a link from a gcp-storage-bucket back to its gcp-compute-project so you can trace ownership, billing and IAM inheritance for the bucket. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-public-delegated-prefix.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-public-delegated-prefix.md deleted file mode 100644 index 79c4a254..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-public-delegated-prefix.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: GCP Compute Public Delegated Prefix -sidebar_label: gcp-compute-public-delegated-prefix ---- - -A Public Delegated Prefix is a regional IPv4 or IPv6 address range that you reserve from Google Cloud and can then subdivide and delegate to other projects, VPC networks, or Private Service Connect service attachments. It allows you to keep ownership of the parent prefix while giving consumers controlled use of sub-prefixes, simplifying address management and avoiding manual peering or routing configurations. -For full details, see the official documentation: https://cloud.google.com/vpc/docs/create-pdp - -**Terrafrom Mappings:** - -- `google_compute_public_delegated_prefix.id` - -## Supported Methods - -- `GET`: Get a gcp-compute-public-delegated-prefix by its "name" -- `LIST`: List all gcp-compute-public-delegated-prefix -- `SEARCH`: Search with full ID: projects/[project]/regions/[region]/publicDelegatedPrefixes/[name] (used for terraform mapping). - -## Possible Links - -### [`gcp-cloud-resource-manager-project`](/sources/gcp/Types/gcp-cloud-resource-manager-project) - -This prefix belongs to and is created within a specific Google Cloud project; the link points from the Public Delegated Prefix to its parent project. - -### [`gcp-compute-public-delegated-prefix`](/sources/gcp/Types/gcp-compute-public-delegated-prefix) - -A parent Public Delegated Prefix can be linked to child delegated sub-prefixes (or vice-versa) to represent hierarchy and inheritance of the IP space. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-region-commitment.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-region-commitment.md deleted file mode 100644 index 385724cc..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-region-commitment.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: GCP Compute Region Commitment -sidebar_label: gcp-compute-region-commitment ---- - -A Compute Region Commitment in Google Cloud Platform (GCP) represents a contractual agreement to purchase a certain amount of vCPU, memory, GPUs or local SSD capacity within a specific region for one or three years. In exchange for this up-front commitment, you receive a discounted hourly rate for the covered resources, regardless of whether the capacity is actually in use. Commitments are created per-project and per-region, and the discount automatically applies to any eligible VM instances running in that region. For full details see the official documentation: https://cloud.google.com/compute/docs/instances/signing-up-committed-use-discounts - -**Terrafrom Mappings:** - -- `google_compute_region_commitment.name` - -## Supported Methods - -- `GET`: Get a gcp-compute-region-commitment by its "name" -- `LIST`: List all gcp-compute-region-commitment -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-compute-reservation`](/sources/gcp/Types/gcp-compute-reservation) - -Reservations and commitments often work together: a reservation guarantees that capacity is available, while a commitment provides a discount for that capacity. When Overmind discovers a region commitment it links it to any compute reservations in the same project and region so you can see both the cost commitment and the capacity guarantee in one place. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-regional-instance-group-manager.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-regional-instance-group-manager.md deleted file mode 100644 index f291a1ce..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-regional-instance-group-manager.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: GCP Compute Regional Instance Group Manager -sidebar_label: gcp-compute-regional-instance-group-manager ---- - -A Google Cloud Compute Regional Instance Group Manager (RIGM) is a control plane resource that creates, deletes, updates and monitors a homogeneous set of virtual machine (VM) instances that are distributed across two or more zones within the same region. By using a RIGM you gain automated rolling updates, proactive auto-healing and the ability to spread workload across zones for higher availability. -Official documentation: https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances#regional - -**Terrafrom Mappings:** - -- `google_compute_region_instance_group_manager.name` - -## Supported Methods - -- `GET`: Get GCP Compute Regional Instance Group Manager by "gcp-compute-regional-instance-group-manager-name" -- `LIST`: List all GCP Compute Regional Instance Group Manager items -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-compute-autoscaler`](/sources/gcp/Types/gcp-compute-autoscaler) - -A regional instance group manager can be linked to an Autoscaler resource that dynamically adjusts the number of VM instances in the managed group based on load, schedules or custom metrics. - -### [`gcp-compute-health-check`](/sources/gcp/Types/gcp-compute-health-check) - -Health checks are referenced by the RIGM to perform auto-healing; instances that fail the configured health check are recreated automatically. - -### [`gcp-compute-instance-group`](/sources/gcp/Types/gcp-compute-instance-group) - -The RIGM creates and controls a Regional Managed Instance Group. This underlying instance group is where the actual VM instances live and where traffic is balanced. - -### [`gcp-compute-instance-template`](/sources/gcp/Types/gcp-compute-instance-template) - -Every RIGM points to an Instance Template that defines the machine type, boot disk, metadata and other properties used when new VM instances are instantiated. - -### [`gcp-compute-target-pool`](/sources/gcp/Types/gcp-compute-target-pool) - -For legacy network load balancing, a RIGM can register its instances with a Target Pool so that traffic from a network load balancer is distributed across the managed instances. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-reservation.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-reservation.md deleted file mode 100644 index 1fe5fe3e..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-reservation.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: GCP Compute Reservation -sidebar_label: gcp-compute-reservation ---- - -A GCP Compute Reservation is a zonal capacity-planning resource that lets you pre-allocate Compute Engine virtual machine capacity so that it is always available when your workloads need it. By creating a reservation you can guarantee that the required number and type of vCPUs, memory and accelerators are held for your project in a particular zone, avoiding scheduling failures during peaks or regional outages. For full details, see the official Google Cloud documentation: https://cloud.google.com/compute/docs/instances/reserving-zonal-resources - -**Terrafrom Mappings:** - -- `google_compute_reservation.name` - -## Supported Methods - -- `GET`: Get GCP Compute Reservation by "gcp-compute-reservation-name" -- `LIST`: List all GCP Compute Reservation items -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-compute-region-commitment`](/sources/gcp/Types/gcp-compute-region-commitment) - -Reservations guarantee capacity, while regional commitments provide sustained-use discounts for that capacity. A reservation created in a zone may be covered by, or contribute to the utilisation of, a regional commitment in the same region, so analysing the commitment alongside the reservation reveals both availability and cost-optimisation aspects of the deployment. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-route.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-route.md deleted file mode 100644 index e4031983..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-route.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: GCP Compute Route -sidebar_label: gcp-compute-route ---- - -A **GCP Compute Route** is a routing rule attached to a Google Cloud Virtual Private Cloud (VPC) network that determines how packets are forwarded from instances towards their destinations. Each route contains a destination CIDR block and a single next-hop target, such as an instance, VPN tunnel, gateway or internal load-balancer forwarding rule. Routes can be either system-generated (e.g. subnet and peering routes) or user-defined to control custom traffic flows, enforce security boundaries or implement hybrid-connectivity scenarios. -Official documentation: https://cloud.google.com/vpc/docs/routes - -**Terrafrom Mappings:** - -- `google_compute_route.name` - -## Supported Methods - -- `GET`: Get a gcp-compute-route by its "name" -- `LIST`: List all gcp-compute-route -- `SEARCH`: Search for routes by network tag. The query is a plain network tag name. - -## Possible Links - -### [`gcp-compute-forwarding-rule`](/sources/gcp/Types/gcp-compute-forwarding-rule) - -A route may specify an internal TCP/UDP load balancer (ILB) forwarding rule as its `nextHopIlb`, so the route is linked to the forwarding rule that receives the traffic. - -### [`gcp-compute-instance`](/sources/gcp/Types/gcp-compute-instance) - -When `nextHopInstance` is used, the route points to a specific Compute Engine instance that acts as a gateway. Instances are therefore linked as potential next hops for the route. - -### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) - -Every route is created inside exactly one VPC network, referenced by the `network` field. The relationship ties the route to the network whose traffic it influences. - -### [`gcp-compute-vpn-tunnel`](/sources/gcp/Types/gcp-compute-vpn-tunnel) - -If `nextHopVpnTunnel` is set, the route forwards matching traffic into a Cloud VPN tunnel. The route is consequently linked to the VPN tunnel resource it targets. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-router.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-router.md deleted file mode 100644 index fcb42c21..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-router.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: GCP Compute Router -sidebar_label: gcp-compute-router ---- - -A Google Cloud Compute Router is a fully distributed and managed Border Gateway Protocol (BGP) routing service that dynamically exchanges routes between your Virtual Private Cloud (VPC) network and on-premises or cloud networks connected via VPN or Cloud Interconnect. By advertising only the necessary prefixes, it enables highly available, scalable, and policy-driven traffic engineering without the need to run or maintain your own routing appliances. See the official documentation for full details: https://cloud.google.com/network-connectivity/docs/router - -**Terrafrom Mappings:** - -- `google_compute_router.id` - -## Supported Methods - -- `GET`: Get a gcp-compute-router by its "name" -- `LIST`: List all gcp-compute-router -- `SEARCH`: Search with full ID: projects/[project]/regions/[region]/routers/[router] (used for terraform mapping). - -## Possible Links - -### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) - -A Compute Router is created inside a specific VPC network and advertises routes for that network; therefore it is directly linked to the gcp-compute-network resource in which it resides. - -### [`gcp-compute-subnetwork`](/sources/gcp/Types/gcp-compute-subnetwork) - -Subnets within the parent VPC network can have their routes propagated or learned via the Compute Router, especially when using dynamic routing modes; this establishes an indirect but important relationship with each gcp-compute-subnetwork. - -### [`gcp-compute-vpn-tunnel`](/sources/gcp/Types/gcp-compute-vpn-tunnel) - -When Cloud VPN is configured in dynamic mode, the VPN tunnel relies on a Compute Router to exchange BGP routes with the peer gateway, making the tunnel dependent on, and logically linked to, the corresponding gcp-compute-vpn-tunnel resource. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-security-policy.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-security-policy.md deleted file mode 100644 index 60cb8cec..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-security-policy.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: GCP Compute Security Policy -sidebar_label: gcp-compute-security-policy ---- - -A GCP Compute Security Policy represents a Google Cloud Armor security policy that you configure to protect your applications and services from malicious or unwanted traffic. Each policy is made up of an ordered list of rules that allow, deny, or rate-limit requests based on layer-3/4 characteristics or custom layer-7 expressions. Security policies can be associated with external Application Load Balancers, Cloud CDN, and other HTTP(S)-based backend services, enabling centralised, declarative control over inbound traffic behaviour. -For full details, see the official Google documentation: https://cloud.google.com/compute/docs/reference/rest/v1/securityPolicies - -**Terrafrom Mappings:** - -- `google_compute_security_policy.name` - -## Supported Methods - -- `GET`: Get GCP Compute Security Policy by "gcp-compute-security-policy-name" -- `LIST`: List all GCP Compute Security Policy items -- ~~`SEARCH`~~ diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-snapshot.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-snapshot.md deleted file mode 100644 index bc72cc89..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-snapshot.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: GCP Compute Snapshot -sidebar_label: gcp-compute-snapshot ---- - -A **GCP Compute Snapshot** is a point-in-time, incremental backup of a Compute Engine persistent or regional disk. Snapshots can be stored in multiple regions, encrypted with customer-managed keys, and used to create new disks, thereby providing a simple mechanism for backup, disaster recovery and environment cloning. -Official documentation: https://cloud.google.com/compute/docs/disks/create-snapshots - -**Terrafrom Mappings:** - -- `google_compute_snapshot.name` - -## Supported Methods - -- `GET`: Get GCP Compute Snapshot by "gcp-compute-snapshot-name" -- `LIST`: List all GCP Compute Snapshot items -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-cloud-kms-crypto-key-version`](/sources/gcp/Types/gcp-cloud-kms-crypto-key-version) - -If the snapshot is encrypted with a customer-managed encryption key (CMEK), it references the specific Cloud KMS CryptoKeyVersion that holds the key material. Overmind links the snapshot to that key version so you can trace encryption dependencies and confirm key rotation policies. - -### [`gcp-compute-disk`](/sources/gcp/Types/gcp-compute-disk) - -Every snapshot originates from a source disk. This link shows which Compute Engine disk (zonal or regional) was used to create the snapshot, letting you assess blast radius and recovery workflows. - -### [`gcp-compute-instant-snapshot`](/sources/gcp/Types/gcp-compute-instant-snapshot) - -An instant snapshot is a fast, crash-consistent capture that can later be converted into a regular snapshot. When such a conversion occurs, Overmind links the resulting standard snapshot to its originating instant snapshot, giving visibility into the lineage of your backups. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-ssl-certificate.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-ssl-certificate.md deleted file mode 100644 index bcdd663c..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-ssl-certificate.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: GCP Compute Ssl Certificate -sidebar_label: gcp-compute-ssl-certificate ---- - -A **Google Compute SSL Certificate** represents an SSL certificate resource that can be attached to Google Cloud load-balancers to provide encrypted (HTTPS or SSL proxy) traffic termination. It stores the public certificate and its corresponding private key, enabling Compute Engine and Cloud Load Balancing to serve traffic securely on the specified domains. Certificates can be self-managed (you upload the PEM-encoded certificate and key) or Google-managed (Google provisions and renews them automatically). Full details are available in the official documentation: [Google Compute Engine – SSL certificates](https://cloud.google.com/compute/docs/reference/rest/v1/sslCertificates). - -**Terrafrom Mappings:** - -- `google_compute_ssl_certificate.name` - -## Supported Methods - -- `GET`: Get a gcp-compute-ssl-certificate by its "name" -- `LIST`: List all gcp-compute-ssl-certificate -- ~~`SEARCH`~~ diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-ssl-policy.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-ssl-policy.md deleted file mode 100644 index f0b5c098..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-ssl-policy.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: GCP Compute Ssl Policy -sidebar_label: gcp-compute-ssl-policy ---- - -A Google Cloud Compute **SSL Policy** specifies the minimum TLS protocol version and the set of supported cipher suites that HTTPS or SSL-proxy load balancers are allowed to use when negotiating SSL/TLS with clients. By attaching an SSL Policy to a target HTTPS proxy or target SSL proxy, you can enforce stronger security standards, ensure compliance, and gradually deprecate outdated encryption algorithms without disrupting traffic. -For detailed information, refer to the official Google Cloud documentation: https://cloud.google.com/load-balancing/docs/ssl-policies-concepts. - -**Terrafrom Mappings:** - -- `google_compute_ssl_policy.name` - -## Supported Methods - -- `GET`: Get a gcp-compute-ssl-policy by its "name" -- `LIST`: List all gcp-compute-ssl-policy -- ~~`SEARCH`~~ diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-subnetwork.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-subnetwork.md deleted file mode 100644 index 229d73bf..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-subnetwork.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: GCP Compute Subnetwork -sidebar_label: gcp-compute-subnetwork ---- - -A GCP Compute Subnetwork is a regional, layer-3 virtual network segment that belongs to a single Google Cloud VPC network. It defines an internal RFC 1918 IP address range (primary and optional secondary ranges) from which VM instances, containers and other resources receive their internal IPs. Within each subnetwork you can enable or disable Private Google Access, set flow-log export settings, IPv6 configurations, and control access through firewall rules inherited from the parent VPC. For a comprehensive overview refer to the official documentation: https://cloud.google.com/vpc/docs/subnets. - -**Terrafrom Mappings:** - -- `google_compute_subnetwork.name` - -## Supported Methods - -- `GET`: Get a gcp-compute-subnetwork by its "name" -- `LIST`: List all gcp-compute-subnetwork -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) - -Every subnetwork is created inside exactly one VPC network. This link represents that parent–child relationship, allowing Overmind to show which VPC a particular subnetwork belongs to and, conversely, to enumerate all subnetworks within a given VPC. - -### [`gcp-compute-public-delegated-prefix`](/sources/gcp/Types/gcp-compute-public-delegated-prefix) - -A public delegated prefix can be assigned to a subnetwork so that resources inside the subnet can use public IPv4 addresses from that prefix. This link highlights which delegated prefixes are associated with, or routed through, the subnetwork, helping users trace external IP allocations and their exposure. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-target-http-proxy.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-target-http-proxy.md deleted file mode 100644 index 612ebcb2..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-target-http-proxy.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: GCP Compute Target Http Proxy -sidebar_label: gcp-compute-target-http-proxy ---- - -A **GCP Compute Target HTTP Proxy** routes incoming HTTP requests to the appropriate backend service based on rules defined in a URL map. It terminates the client connection, consults the associated `google_compute_url_map`, and then forwards traffic to the selected backend (for example, a backend service or serverless NEG). Target HTTP proxies are a key component of Google Cloud external HTTP(S) Load Balancing. -See the official documentation for full details: https://cloud.google.com/load-balancing/docs/target-proxies#target_http_proxy - -**Terrafrom Mappings:** - -- `google_compute_target_http_proxy.name` - -## Supported Methods - -- `GET`: Get a gcp-compute-target-http-proxy by its "name" -- `LIST`: List all gcp-compute-target-http-proxy -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-compute-url-map`](/sources/gcp/Types/gcp-compute-url-map) - -A Target HTTP Proxy must reference exactly one URL map. Overmind uses this link to trace from the proxy to the URL map that defines its routing rules, enabling you to understand and surface any risks associated with misconfigured path matchers or backend services. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-target-https-proxy.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-target-https-proxy.md deleted file mode 100644 index 09ffa4b7..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-target-https-proxy.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: GCP Compute Target Https Proxy -sidebar_label: gcp-compute-target-https-proxy ---- - -A **Target HTTPS Proxy** is a global Google Cloud resource that terminates incoming HTTPS connections at the edge of Google’s network, presents one or more SSL certificates, and then forwards the decrypted requests to the appropriate backend service according to a URL map. In essence, it is the control point that binds SSL certificates, SSL policies, and URL maps together to enable HTTPS traffic on an External HTTP(S) Load Balancer. -For full details see the official documentation: https://cloud.google.com/compute/docs/reference/rest/v1/targetHttpsProxies - -**Terrafrom Mappings:** - -- `google_compute_target_https_proxy.name` - -## Supported Methods - -- `GET`: Get a gcp-compute-target-https-proxy by its "name" -- `LIST`: List all gcp-compute-target-https-proxy -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-compute-ssl-certificate`](/sources/gcp/Types/gcp-compute-ssl-certificate) - -A Target HTTPS Proxy references one or more SSL certificates that it presents to clients during the TLS handshake. Overmind links these certificates so you can track which certificate is in use and assess expiry or misconfiguration risks. - -### [`gcp-compute-ssl-policy`](/sources/gcp/Types/gcp-compute-ssl-policy) - -An optional SSL policy can be attached to a Target HTTPS Proxy to enforce minimum TLS versions and cipher suites. Overmind exposes this link to highlight the security posture enforced on the proxy. - -### [`gcp-compute-url-map`](/sources/gcp/Types/gcp-compute-url-map) - -Every Target HTTPS Proxy must point to exactly one URL map, which defines how incoming requests are routed to backend services. Overmind links the URL map so you can trace the full request path and evaluate routing risks before deployment. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-target-pool.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-target-pool.md deleted file mode 100644 index 1ee2983a..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-target-pool.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: GCP Compute Target Pool -sidebar_label: gcp-compute-target-pool ---- - -A Google Cloud Compute Target Pool is a regional grouping of VM instances that acts as the backend for the legacy TCP/UDP network load balancer. The pool defines which instances receive traffic, the optional session-affinity policy, the associated health checks that determine instance health, and an optional fail-over target pool for backup. See the official documentation for full details: https://cloud.google.com/compute/docs/reference/rest/v1/targetPools - -**Terrafrom Mappings:** - -- `google_compute_target_pool.id` - -## Supported Methods - -- `GET`: Get a gcp-compute-target-pool by its "name" -- `LIST`: List all gcp-compute-target-pool -- `SEARCH`: Search with full ID: projects/[project]/regions/[region]/targetPools/[name] (used for terraform mapping). - -## Possible Links - -### [`gcp-compute-health-check`](/sources/gcp/Types/gcp-compute-health-check) - -A target pool may reference one or more health checks through its `healthChecks` field. These health checks are used by Google Cloud to probe the instances in the pool and decide whether traffic should be sent to a particular VM. - -### [`gcp-compute-instance`](/sources/gcp/Types/gcp-compute-instance) - -Each target pool contains a list of VM instances (`instances` field) that will receive load-balanced traffic. Overmind links the pool to every instance it contains. - -### [`gcp-compute-target-pool`](/sources/gcp/Types/gcp-compute-target-pool) - -A target pool can specify another target pool as its `backupPool` to provide fail-over capacity, and it can itself be referenced as a backup by other pools. Overmind surfaces these peer-to-peer relationships between target pools. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-url-map.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-url-map.md deleted file mode 100644 index 339a1581..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-url-map.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: GCP Compute Url Map -sidebar_label: gcp-compute-url-map ---- - -A Google Cloud Platform (GCP) Compute URL Map is a routing table used by HTTP(S) load balancers to decide where an incoming request should be sent. It matches the request’s host name and URL path to a set of rules and then forwards the traffic to the appropriate backend service or backend bucket. URL Maps make it possible to implement advanced traffic-management patterns such as domain-based and path-based routing, default fall-back targets, and traffic migration between versions of a service. -Official documentation: https://cloud.google.com/load-balancing/docs/url-map-concepts - -**Terrafrom Mappings:** - -- `google_compute_url_map.name` - -## Supported Methods - -- `GET`: Get a gcp-compute-url-map by its "name" -- `LIST`: List all gcp-compute-url-map -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-compute-backend-service`](/sources/gcp/Types/gcp-compute-backend-service) - -A URL Map points to one or more backend services as its routing targets. Each rule in the map specifies which `gcp-compute-backend-service` should receive the traffic that matches the rule’s host and path conditions. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-vpn-gateway.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-vpn-gateway.md deleted file mode 100644 index 9e892355..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-vpn-gateway.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: GCP Compute Vpn Gateway -sidebar_label: gcp-compute-vpn-gateway ---- - -A GCP Compute High-Availability (HA) VPN Gateway is a regional resource that provides secure, encrypted IPsec tunnels between a Google Cloud Virtual Private Cloud (VPC) network and peer networks (on-premises data centres, other clouds, or different GCP projects). The gateway offers redundancy by using two external interfaces, each of which can establish a pair of active tunnels, ensuring traffic continues to flow even during maintenance events or failures. Because the gateway is tightly coupled to a specific VPC network and region, it influences routing, firewall behaviour and overall network reachability. -See the official Google Cloud documentation for full details: https://cloud.google.com/network-connectivity/docs/vpn/concepts/overview - -**Terrafrom Mappings:** - -- `google_compute_ha_vpn_gateway.name` - -## Supported Methods - -- `GET`: Get a gcp-compute-vpn-gateway by its "name" -- `LIST`: List all gcp-compute-vpn-gateway -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) - -Each HA VPN Gateway is created inside a single VPC network. Linking the gateway to its `gcp-compute-network` allows Overmind to trace which IP ranges, routes and firewall rules may be affected by the gateway’s tunnels, and to evaluate the blast radius of any proposed changes to either resource. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-vpn-tunnel.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-vpn-tunnel.md deleted file mode 100644 index af31b75c..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-vpn-tunnel.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: GCP Compute Vpn Tunnel -sidebar_label: gcp-compute-vpn-tunnel ---- - -A Compute VPN Tunnel is the logical link that carries encrypted IP-sec traffic between Google Cloud and another network. It is created on top of a Google Cloud VPN Gateway and points at a peer gateway, defining parameters such as IKE version, shared secrets and traffic selectors. Each tunnel secures packets as they traverse the public Internet, allowing workloads in a VPC network to communicate privately with on-premises resources, other clouds, or additional GCP projects. -Official documentation: https://cloud.google.com/compute/docs/reference/rest/v1/vpnTunnels - -**Terrafrom Mappings:** - -- `google_compute_vpn_tunnel.name` - -## Supported Methods - -- `GET`: Get a gcp-compute-vpn-tunnel by its "name" -- `LIST`: List all gcp-compute-vpn-tunnel -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-compute-external-vpn-gateway`](/sources/gcp/Types/gcp-compute-external-vpn-gateway) - -A VPN tunnel targets an External VPN Gateway when its peer endpoint resides outside Google Cloud. The tunnel resource holds the reference that binds the Google side of the connection to the defined external gateway interface. - -### [`gcp-compute-router`](/sources/gcp/Types/gcp-compute-router) - -For dynamic (BGP) routing, a VPN tunnel is attached to a Cloud Router. The router exchanges routes with the peer across the tunnel, advertising VPC prefixes and learning remote prefixes. - -### [`gcp-compute-vpn-gateway`](/sources/gcp/Types/gcp-compute-vpn-gateway) - -Every VPN tunnel is created on a specific VPN Gateway (Classic or HA). The gateway provides the Google Cloud termination point, while the tunnel specifies the individual encrypted session parameters. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-container-cluster.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-container-cluster.md deleted file mode 100644 index 155018b9..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-container-cluster.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: GCP Container Cluster -sidebar_label: gcp-container-cluster ---- - -Google Kubernetes Engine (GKE) Container Clusters provide fully-managed Kubernetes control planes running on Google Cloud. A cluster groups the Kubernetes control plane and the worker nodes that run your containerised workloads, and exposes a single API endpoint for deployment and management. Clusters can be regional or zonal, support autoscaling, automatic upgrades and many advanced networking, security and observability features. -Official documentation: https://cloud.google.com/kubernetes-engine/docs/concepts/kubernetes-engine-overview - -**Terrafrom Mappings:** - -- `google_container_cluster.id` - -## Supported Methods - -- `GET`: Get a gcp-container-cluster by its "locations|clusters" -- ~~`LIST`~~ -- `SEARCH`: Search for GKE clusters in a location. Use the format "location" or the full resource name supported for terraform mappings. - -## Possible Links - -### [`gcp-big-query-dataset`](/sources/gcp/Types/gcp-big-query-dataset) - -GKE can export usage metering and cost allocation data, as well as logs via Cloud Logging sinks, to a BigQuery dataset. When a cluster is configured for resource usage metering, it is linked to the destination dataset. - -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) - -Clusters may use a customer-managed encryption key (CMEK) from Cloud KMS to encrypt Kubernetes Secrets and other etcd data at rest. The CMEK key configured for a cluster or for its persistent disks is therefore related. - -### [`gcp-cloud-kms-crypto-key-version`](/sources/gcp/Types/gcp-cloud-kms-crypto-key-version) - -A specific key version is referenced by the cluster for CMEK encryption. Rotating the key version affects the cluster’s data-at-rest encryption. - -### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) - -Every cluster is deployed into a VPC network; all control-plane and node traffic flows across this network. The network selected during cluster creation is linked here. - -### [`gcp-compute-node-group`](/sources/gcp/Types/gcp-compute-node-group) - -If the cluster uses sole-tenant nodes or node auto-provisioning, the underlying Compute Engine Node Groups that host GKE nodes are related to the cluster. - -### [`gcp-compute-subnetwork`](/sources/gcp/Types/gcp-compute-subnetwork) - -Clusters (and their node pools) are placed in one or more subnets within the VPC for pod and service IP ranges. These subnetworks are therefore linked to the cluster. - -### [`gcp-container-node-pool`](/sources/gcp/Types/gcp-container-node-pool) - -A cluster contains one or more node pools that define the configuration of its worker nodes (machine type, autoscaling settings, etc.). Each node pool resource is directly associated with its parent cluster. - -### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) - -GKE uses IAM service accounts for the control plane, node VMs and workload identity. Service accounts granted to the cluster (e.g., Google APIs service agent, node service account) are linked. - -### [`gcp-pub-sub-topic`](/sources/gcp/Types/gcp-pub-sub-topic) - -Cluster audit logs, events or notifications can be exported to a Pub/Sub topic (e.g., via Log Sinks or Notification Channels). Any topic configured as a destination for the cluster is related here. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-container-node-pool.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-container-node-pool.md deleted file mode 100644 index a0190d92..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-container-node-pool.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: GCP Container Node Pool -sidebar_label: gcp-container-node-pool ---- - -Google Kubernetes Engine (GKE) runs worker nodes in groups called _node pools_. -Each pool defines the machine type, disk configuration, Kubernetes version and other attributes for the virtual machines that will back your workloads, and can be scaled or upgraded independently from the rest of the cluster. -Official documentation: https://cloud.google.com/kubernetes-engine/docs/concepts/node-pools - -**Terrafrom Mappings:** - -- `google_container_node_pool.id` - -## Supported Methods - -- `GET`: Get a gcp-container-node-pool by its "locations|clusters|nodePools" -- ~~`LIST`~~ -- `SEARCH`: Search GKE Node Pools within a cluster. Use "[location]|[cluster]" or the full resource name supported by Terraform mappings: "[project]/[location]/[cluster]/[node_pool_name]" - -## Possible Links - -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) - -When customer-managed encryption keys (CMEK) are enabled for node disks, the node pool stores a reference to the Cloud KMS crypto key that encrypts each node’s boot and attached data volumes. - -### [`gcp-compute-instance-group-manager`](/sources/gcp/Types/gcp-compute-instance-group-manager) - -Every node pool is implemented as a regional or zonal Managed Instance Group (MIG) that GKE creates and controls; the Instance Group Manager handles the lifecycle of the virtual machines that make up the pool. - -### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) - -Nodes launched by the pool are attached to a specific VPC network (and its associated routes and firewall rules), so the pool maintains a link to the Compute Network used by the cluster. - -### [`gcp-compute-node-group`](/sources/gcp/Types/gcp-compute-node-group) - -If the node pool is configured to run on sole-tenant nodes, it will reference the Compute Node Group that represents the underlying dedicated hosts reserved for those nodes. - -### [`gcp-compute-subnetwork`](/sources/gcp/Types/gcp-compute-subnetwork) - -The pool records the particular subnetwork into which its nodes are placed, controlling the IP range from which node addresses are allocated. - -### [`gcp-container-cluster`](/sources/gcp/Types/gcp-container-cluster) - -A node pool is a child resource of a GKE cluster; this link identifies the parent `gcp-container-cluster` that owns and orchestrates the pool. - -### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) - -Each node runs with a Google service account that provides credentials for pulling container images, writing logs, and calling Google APIs. The pool stores a reference to that IAM Service Account. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataflow-job.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataflow-job.md deleted file mode 100644 index 5f2860eb..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataflow-job.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: GCP Dataflow Job -sidebar_label: gcp-dataflow-job ---- - -A **Google Cloud Dataflow Job** is a managed Apache Beam pipeline that processes streaming or batch data at scale. Dataflow handles resource provisioning, autoscaling, and fault tolerance, allowing you to run data processing workloads without managing the underlying infrastructure. Jobs can read from and write to Pub/Sub, BigQuery, Spanner, Bigtable, and other GCP services. See the official documentation for full details: https://cloud.google.com/dataflow/docs. - -**Terraform Mappings:** - -- `google_dataflow_job.job_id` -- `google_dataflow_flex_template_job.job_id` - -## Supported Methods - -- `GET`: Get a gcp-dataflow-job by its "locations|jobs" -- ~~`LIST`~~ -- `SEARCH`: Search for gcp-dataflow-job by location - -## Possible Links - -### [`gcp-big-query-dataset`](/sources/gcp/Types/gcp-big-query-dataset) - -Dataflow jobs that read from or write to BigQuery reference the dataset containing the tables they use. If the dataset is deleted or misconfigured, the job may fail to access data. - -### [`gcp-big-query-table`](/sources/gcp/Types/gcp-big-query-table) - -Dataflow jobs can read from or write to specific BigQuery tables. If a table is deleted or its schema changes, the job may fail. - -### [`gcp-big-table-admin-instance`](/sources/gcp/Types/gcp-big-table-admin-instance) - -Dataflow jobs that use Bigtable as a source or sink reference the Bigtable instance. If the instance is deleted or misconfigured, the job may fail. - -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) - -When customer-managed encryption keys (CMEK) are enabled for the Dataflow job environment, the job references the Cloud KMS Crypto Key used for encryption. - -### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) - -Dataflow worker VMs are attached to a VPC network. If the network is deleted or misconfigured, workers may lose connectivity or fail to start. - -### [`gcp-compute-subnetwork`](/sources/gcp/Types/gcp-compute-subnetwork) - -Dataflow workers run in a specific subnetwork. If the subnetwork is deleted or misconfigured, workers may lose connectivity or fail to start. - -### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) - -Dataflow workers run under a service account that grants them permissions to access other GCP services. If the service account is deleted or its permissions change, the job may fail. - -### [`gcp-pub-sub-subscription`](/sources/gcp/Types/gcp-pub-sub-subscription) - -Dataflow jobs that consume messages from Pub/Sub reference the subscription. If the subscription is deleted or misconfigured, the job may fail to consume messages. - -### [`gcp-pub-sub-topic`](/sources/gcp/Types/gcp-pub-sub-topic) - -Dataflow jobs that publish to or consume from Pub/Sub reference the topic. If the topic is deleted or misconfigured, the job may fail to read or write messages. - -### [`gcp-spanner-instance`](/sources/gcp/Types/gcp-spanner-instance) - -Dataflow jobs that use Spanner reference the Spanner instance. If the instance is deleted or misconfigured, the job may fail. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataform-repository.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataform-repository.md deleted file mode 100644 index b4d0c48b..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataform-repository.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: GCP Dataform Repository -sidebar_label: gcp-dataform-repository ---- - -A Google Cloud Dataform Repository represents the source-controlled codebase that defines your Dataform workflows. It stores SQLX files, declarations and configuration that Dataform uses to build, test and deploy transformations in BigQuery. A repository can point to an internal workspace or to an external Git repository and may reference service accounts, Secret Manager secrets and customer-managed encryption keys. -Official documentation: https://cloud.google.com/dataform/reference/rest - -**Terrafrom Mappings:** - -- `google_dataform_repository.id` - -## Supported Methods - -- `GET`: Get a gcp-dataform-repository by its "locations|repositories" -- ~~`LIST`~~ -- `SEARCH`: Search for Dataform repositories in a location. Use the format "location" or "projects/[project_id]/locations/[location]/repositories/[repository_name]" which is supported for terraform mappings. - -## Possible Links - -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) - -A repository can be configured with a customer-managed encryption key (`kms_key_name`) to encrypt its metadata and compiled artefacts, creating a dependency on the corresponding Cloud KMS crypto-key. - -### [`gcp-cloud-kms-crypto-key-version`](/sources/gcp/Types/gcp-cloud-kms-crypto-key-version) - -If CMEK is enabled, the repository points to a specific crypto-key version that is actually used for encryption; rotating or disabling that version will affect the repository. - -### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) - -Dataform uses a service account to fetch code from remote Git repositories and to execute compilation and workflow tasks; the repository stores the e-mail address of that service account. - -### [`gcp-secret-manager-secret`](/sources/gcp/Types/gcp-secret-manager-secret) - -When a repository is linked to an external Git provider, the authentication token is stored in Secret Manager. The field `authentication_token_secret_version` references the secret (and version) that holds the token. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataplex-aspect-type.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataplex-aspect-type.md deleted file mode 100644 index 5261705d..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataplex-aspect-type.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: GCP Dataplex Aspect Type -sidebar_label: gcp-dataplex-aspect-type ---- - -A Dataplex Aspect Type is a top-level resource within Google Cloud Dataplex’s metadata service that defines the structure of a metadata “aspect” – a reusable schema describing a set of attributes you want to attach to data assets (for example, data quality scores or business classifications). Once an aspect type is created, individual assets such as tables, files or columns can be annotated with concrete “aspects” that conform to that schema, ensuring consistent, centrally-governed metadata across your lake. -For further details see the official API reference: https://cloud.google.com/dataplex/docs/reference/rest/v1/projects.locations.aspectTypes - -**Terrafrom Mappings:** - -- `google_dataplex_aspect_type.id` - -## Supported Methods - -- `GET`: Get a gcp-dataplex-aspect-type by its "locations|aspectTypes" -- ~~`LIST`~~ -- `SEARCH`: Search for Dataplex aspect types in a location. Use the format "location" or "projects/[project_id]/locations/[location]/aspectTypes/[aspect_type_id]" which is supported for terraform mappings. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataplex-data-scan.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataplex-data-scan.md deleted file mode 100644 index dbcd9070..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataplex-data-scan.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: GCP Dataplex Data Scan -sidebar_label: gcp-dataplex-data-scan ---- - -A GCP Dataplex Data Scan is a first-class Dataplex resource that encapsulates the configuration and schedule for profiling data or validating data-quality rules against a registered asset such as a BigQuery table or files held in Cloud Storage. Each scan lives in a specific Google Cloud location and records its execution history, metrics and detected issues, allowing teams to understand data health before downstream workloads rely on it. -For full details see the official REST reference: https://cloud.google.com/dataplex/docs/reference/rest/v1/projects.locations.dataScans - -**Terrafrom Mappings:** - -- `google_dataplex_datascan.id` - -## Supported Methods - -- `GET`: Get a gcp-dataplex-data-scan by its "locations|dataScans" -- ~~`LIST`~~ -- `SEARCH`: Search for Dataplex data scans in a location. Use the location name e.g., 'us-central1' or the format "projects/[project_id]/locations/[location]/dataScans/[data_scan_id]" which is supported for terraform mappings. - -## Possible Links - -### [`gcp-big-query-table`](/sources/gcp/Types/gcp-big-query-table) - -A Dataplex Data Scan may target a BigQuery table as its data source; linking the scan to the table lets Overmind trace quality findings back to the exact table that will be affected by the deployment. - -### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) - -When the data asset under review is a set of files stored in Cloud Storage, Dataplex references the underlying bucket. Linking the scan to the bucket reveals how changes to bucket configuration or contents could influence upcoming scan results. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataplex-entry-group.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataplex-entry-group.md deleted file mode 100644 index ffcc1c6b..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataplex-entry-group.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: GCP Dataplex Entry Group -sidebar_label: gcp-dataplex-entry-group ---- - -A Dataplex Entry Group is a logical container that holds one or more metadata entries within Google Cloud’s unified Data Catalog. By grouping related entries together, it helps data stewards organise, secure and search metadata that describe the underlying data assets managed by Dataplex (such as tables, files or streams). Each Entry Group lives in a specific project and location and can be granted IAM permissions independently, allowing fine-grained access control over the metadata it contains. -Official documentation: https://cloud.google.com/data-catalog/docs/reference/rest/v1/projects.locations.entryGroups - -**Terrafrom Mappings:** - -- `google_dataplex_entry_group.id` - -## Supported Methods - -- `GET`: Get a gcp-dataplex-entry-group by its "locations|entryGroups" -- ~~`LIST`~~ -- `SEARCH`: Search for Dataplex entry groups in a location. Use the format "location" or "projects/[project_id]/locations/[location]/entryGroups/[entry_group_id]" which is supported for terraform mappings. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataproc-autoscaling-policy.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataproc-autoscaling-policy.md deleted file mode 100644 index f897865f..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataproc-autoscaling-policy.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: GCP Dataproc Autoscaling Policy -sidebar_label: gcp-dataproc-autoscaling-policy ---- - -A GCP Dataproc Autoscaling Policy defines the rules that Google Cloud Dataproc uses to automatically add or remove worker nodes from a Dataproc cluster in response to workload demand. By specifying target utilisation levels, cooldown periods, graceful decommissioning time-outs and per-node billing settings, the policy ensures that clusters expand to meet spikes in processing requirements and shrink when demand falls, optimising both performance and cost. -For a full description of each field and the underlying API, see the official Google Cloud documentation: https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.autoscalingPolicies. - -**Terraform Mappings:** - -- `google_dataproc_autoscaling_policy.name` - -## Supported Methods - -- `GET`: Get a gcp-dataproc-autoscaling-policy by its "name" -- `LIST`: List all gcp-dataproc-autoscaling-policy -- ~~`SEARCH`~~ diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataproc-cluster.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataproc-cluster.md deleted file mode 100644 index 82fba59e..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataproc-cluster.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -title: GCP Dataproc Cluster -sidebar_label: gcp-dataproc-cluster ---- - -A Google Cloud Dataproc Cluster is a managed group of Compute Engine virtual machines configured to run big-data workloads such as Apache Hadoop, Spark, Hive and Presto. Dataproc abstracts away the operational overhead of provisioning, configuring and scaling the underlying infrastructure, allowing you to launch fully-featured clusters in minutes and shut them down just as quickly. See the official documentation for full details: https://cloud.google.com/dataproc/docs/concepts/overview - -**Terrafrom Mappings:** - -- `google_dataproc_cluster.name` - -## Supported Methods - -- `GET`: Get a gcp-dataproc-cluster by its "name" -- `LIST`: List all gcp-dataproc-cluster -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) - -If customer-managed encryption keys (CMEK) are enabled, a Dataproc Cluster references a Cloud KMS Crypto Key to encrypt the persistent disks attached to its virtual machines. - -### [`gcp-compute-image`](/sources/gcp/Types/gcp-compute-image) - -Each node in a Dataproc Cluster boots from a specific Compute Engine image (e.g., a Dataproc-prebuilt image or a custom image), so the cluster has a dependency on that image. - -### [`gcp-compute-instance-group-manager`](/sources/gcp/Types/gcp-compute-instance-group-manager) - -Dataproc automatically creates Managed Instance Groups (MIGs) for the primary, worker and optional secondary-worker node pools; these MIGs are children of the Dataproc Cluster. - -### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) - -The cluster’s VMs are attached to a particular VPC network, dictating their reachability, firewall rules and routing behaviour. - -### [`gcp-compute-node-group`](/sources/gcp/Types/gcp-compute-node-group) - -If the cluster is deployed on sole-tenant nodes, it is associated with a Compute Node Group that provides dedicated hardware isolation. - -### [`gcp-compute-subnetwork`](/sources/gcp/Types/gcp-compute-subnetwork) - -Within the selected VPC, the Dataproc Cluster attaches its instances to a specific subnetwork where IP addressing, Private Google Access and regional placement are defined. - -### [`gcp-container-cluster`](/sources/gcp/Types/gcp-container-cluster) - -For Dataproc on GKE deployments, the Dataproc Cluster is layered on top of an existing Google Kubernetes Engine cluster, creating a parent–child relationship. - -### [`gcp-container-node-pool`](/sources/gcp/Types/gcp-container-node-pool) - -When running Dataproc on GKE, the workloads execute on one or more GKE node pools; the Dataproc service references these node pools for capacity. - -### [`gcp-dataproc-autoscaling-policy`](/sources/gcp/Types/gcp-dataproc-autoscaling-policy) - -A Dataproc Cluster can be bound to an Autoscaling Policy that dynamically adjusts the number of worker nodes based on workload metrics. - -### [`gcp-dataproc-cluster`](/sources/gcp/Types/gcp-dataproc-cluster) - -Clusters can reference other clusters as templates or in workflows that orchestrate multiple clusters; Overmind represents these peer or predecessor relationships with a self-link. - -### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) - -The VMs within the cluster run under one or more IAM Service Accounts that grant them permissions to access other Google Cloud services. - -### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) - -During creation, the cluster specifies Cloud Storage buckets for staging, temp and log output, making those buckets upstream dependencies. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-dns-managed-zone.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-dns-managed-zone.md deleted file mode 100644 index f3a4f130..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-dns-managed-zone.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: GCP Dns Managed Zone -sidebar_label: gcp-dns-managed-zone ---- - -A Google Cloud DNS Managed Zone is a logical container for DNS resource records that share the same DNS name suffix. Managed zones can be configured as public (resolvable from the internet) or private (resolvable only from one or more selected VPC networks). They are the fundamental unit that Cloud DNS uses to host, serve and manage authoritative DNS data for your domains. -Official documentation: https://cloud.google.com/dns/docs/zones - -**Terrafrom Mappings:** - -- `google_dns_managed_zone.name` - -## Supported Methods - -- `GET`: Get a gcp-dns-managed-zone by its "name" -- `LIST`: List all gcp-dns-managed-zone -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) - -Private managed zones are explicitly linked to one or more VPC networks. The association determines which networks can resolve the zone’s records, so an Overmind relationship helps surface reachability and leakage risks between a DNS zone and the networks that consume it. - -### [`gcp-container-cluster`](/sources/gcp/Types/gcp-container-cluster) - -GKE clusters frequently create or rely on Cloud DNS managed zones for service discovery and in-cluster load-balancing (e.g., when CloudDNS for Service Directory is enabled). Mapping a cluster to its managed zones reveals dependencies that affect name resolution, cross-cluster communication and potential namespace conflicts. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-essential-contacts-contact.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-essential-contacts-contact.md deleted file mode 100644 index 96d94d99..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-essential-contacts-contact.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: GCP Essential Contacts Contact -sidebar_label: gcp-essential-contacts-contact ---- - -A **Google Cloud Essential Contact** represents an email address or Google Group that Google Cloud will use to send important notifications about incidents, security issues, and other critical updates for a project, folder, or organisation. Each contact is stored under a parent resource (e.g. `projects/123456789`, `folders/987654321`, or `organizations/555555555`) and can be categorised by notification types such as `SECURITY`, `TECHNICAL`, or `LEGAL`. -For further details, refer to the official Google Cloud documentation: https://cloud.google.com/resource-manager/docs/reference/essentialcontacts/rest - -**Terrafrom Mappings:** - -- `google_essential_contacts_contact.id` - -## Supported Methods - -- `GET`: Get a gcp-essential-contacts-contact by its "name" -- `LIST`: List all gcp-essential-contacts-contact -- `SEARCH`: Search for contacts by their ID in the form of "projects/[project_id]/contacts/[contact_id]". diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-file-instance.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-file-instance.md deleted file mode 100644 index 4f4693ee..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-file-instance.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: GCP File Instance -sidebar_label: gcp-file-instance ---- - -A GCP File Instance represents a Cloud Filestore instance – a managed network file storage appliance that provides an NFSv3 or NFSv4-compatible file share, typically used by GKE clusters or Compute Engine VMs that require shared, POSIX-compliant storage. Each instance is created in a specific GCP region and zone, connected to a VPC network, and exposes one or more file shares (called “filesets”) over a private RFC-1918 address. Instances can be customised for capacity and performance tiers, and may optionally use customer-managed encryption keys (CMEK) for data-at-rest encryption. -Official documentation: https://cloud.google.com/filestore/docs/overview - -**Terrafrom Mappings:** - -- `google_filestore_instance.id` - -## Supported Methods - -- `GET`: Get a gcp-file-instance by its "locations|instances" -- ~~`LIST`~~ -- `SEARCH`: Search for Filestore instances in a location. Use the location string or the full resource name supported for terraform mappings. - -## Possible Links - -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) - -A Filestore instance can be encrypted with a customer-managed Cloud KMS key (CMEK). The link shows which KMS Crypto Key is protecting the data-at-rest of this storage appliance. - -### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) - -Filestore instances are deployed into and reachable through a specific VPC network. This link identifies the Compute Network whose subnet provides the private IP addresses through which clients access the file share. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-iam-role.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-iam-role.md deleted file mode 100644 index f4ffc0ed..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-iam-role.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: GCP Iam Role -sidebar_label: gcp-iam-role ---- - -A **Google Cloud IAM Role** is a logical grouping of one or more IAM permissions that can be granted to principals (users, service accounts, groups or Google Workspace domains) to control their access to Google Cloud resources. Roles come in three flavours—basic, predefined and custom—allowing organisations to strike a balance between least-privilege access and administrative convenience. For a full explanation see the Google Cloud documentation: https://cloud.google.com/iam/docs/understanding-roles - -## Supported Methods - -- `GET`: Get a gcp-iam-role by its "name" -- `LIST`: List all gcp-iam-role -- ~~`SEARCH`~~ diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-iam-service-account-key.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-iam-service-account-key.md deleted file mode 100644 index a2e0d84e..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-iam-service-account-key.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: GCP Iam Service Account Key -sidebar_label: gcp-iam-service-account-key ---- - -A GCP IAM Service Account Key is a cryptographic key-pair (private and public) that is bound to a specific IAM service account. Possessing the private half of the key allows a workload or user to authenticate to Google Cloud APIs as that service account, making the key one of the most sensitive objects in any Google Cloud environment. Keys can be user-managed or Google-managed, rotated, disabled or deleted, and each service account can hold up to ten user-managed keys at a time. Mis-management of these keys can lead to credential leakage and unauthorised access. -Official documentation: https://cloud.google.com/iam/docs/creating-managing-service-account-keys - -**Terrafrom Mappings:** - -- `google_service_account_key.id` - -## Supported Methods - -- `GET`: Get GCP Iam Service Account Key by "gcp-iam-service-account-email or unique_id|gcp-iam-service-account-key-name" -- ~~`LIST`~~ -- `SEARCH`: Search for GCP Iam Service Account Key by "gcp-iam-service-account-email or unique_id" - -## Possible Links - -### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) - -A Service Account Key is always subordinate to, and uniquely associated with, a single IAM service account. Overmind links the key back to its parent service account so you can trace which workload the key belongs to, understand the permissions it inherits, and assess the blast radius should the key be compromised. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-iam-service-account.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-iam-service-account.md deleted file mode 100644 index 2fa7bf68..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-iam-service-account.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: GCP Iam Service Account -sidebar_label: gcp-iam-service-account ---- - -A GCP IAM Service Account is a special kind of Google identity that an application or VM instance uses to make authorised calls to Google Cloud APIs, rather than an end-user. Each service account is identified by an email‐style string (e.g. `my-sa@project-id.iam.gserviceaccount.com`) and a stable numeric `unique_id`. Service accounts can be granted IAM roles, can own resources, and may have one or more cryptographic keys used for authentication. -For full details see the official documentation: https://cloud.google.com/iam/docs/service-accounts - -**Terrafrom Mappings:** - -- `google_service_account.email` -- `google_service_account.unique_id` - -## Supported Methods - -- `GET`: Get GCP Iam Service Account by "gcp-iam-service-account-email or unique_id" -- `LIST`: List all GCP Iam Service Account items -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-cloud-resource-manager-project`](/sources/gcp/Types/gcp-cloud-resource-manager-project) - -Every service account is created inside a single Cloud Resource Manager project. This link lets you navigate from the service account to the project that owns it, revealing project-level policies and context. - -### [`gcp-iam-service-account-key`](/sources/gcp/Types/gcp-iam-service-account-key) - -Service account keys are cryptographic credentials associated with a service account. This link lists all keys (active, disabled or expired) that belong to the current service account, allowing you to audit key rotation and exposure risks. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-logging-bucket.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-logging-bucket.md deleted file mode 100644 index 8b2a3d9b..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-logging-bucket.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: GCP Logging Bucket -sidebar_label: gcp-logging-bucket ---- - -A GCP Logging Bucket is a regional or multi-regional storage container managed by Cloud Logging that stores log entries routed from one or more Google Cloud projects, folders or organisations. Buckets provide fine-grained control over where logs are kept, how long they are retained, and which encryption keys protect them. Log buckets behave similarly to Cloud Storage buckets, but are optimised for log data and are accessed through the Cloud Logging API rather than through Cloud Storage. -See the official documentation for full details: https://cloud.google.com/logging/docs/storage - -## Supported Methods - -- `GET`: Get a gcp-logging-bucket by its "locations|buckets" -- ~~`LIST`~~ -- `SEARCH`: Search for gcp-logging-bucket by its "locations" - -## Possible Links - -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) - -A logging bucket can be configured to use customer-managed encryption keys (CMEK). When CMEK is enabled, the bucket references a Cloud KMS Crypto Key that holds the symmetric key material used to encrypt and decrypt the stored log entries. - -### [`gcp-cloud-kms-crypto-key-version`](/sources/gcp/Types/gcp-cloud-kms-crypto-key-version) - -If CMEK is active, the bucket also keeps track of the specific key version that is currently in use. This link represents the exact Crypto Key Version providing encryption for the bucket at a given point in time. - -### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) - -Cloud Logging uses service accounts to write, read or route logs into a bucket. The bucket’s IAM policy may grant `roles/logging.bucketWriter` or `roles/logging.viewer` to particular service accounts, and the Log Router’s reserved service account must have permission to encrypt data when CMEK is enabled. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-logging-link.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-logging-link.md deleted file mode 100644 index 9ecd6a83..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-logging-link.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: GCP Logging Link -sidebar_label: gcp-logging-link ---- - -A GCP Logging Link is a Cloud Logging resource that continuously streams the log entries stored in a specific Log Bucket into an external BigQuery dataset. By configuring a link you enable near-real-time analytics of your logs with BigQuery without the need for manual exports or scheduled jobs. Links are created under the path - -`projects|folders|organizations|billingAccounts / locations / buckets / links` - -and each link specifies the destination BigQuery dataset, IAM writer identity, and lifecycle state. -For further details see Google’s official documentation: https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.locations.buckets.links - -## Supported Methods - -- `GET`: Get a gcp-logging-link by its "locations|buckets|links" -- ~~`LIST`~~ -- `SEARCH`: Search for gcp-logging-link by its "locations|buckets" - -## Possible Links - -### [`gcp-big-query-dataset`](/sources/gcp/Types/gcp-big-query-dataset) - -A logging link targets exactly one BigQuery dataset; Overmind establishes this edge so you can trace which dataset is receiving log entries from the bucket. - -### [`gcp-logging-bucket`](/sources/gcp/Types/gcp-logging-bucket) - -The logging link is defined inside a specific Log Bucket; this relationship lets you see which buckets are sending their logs onwards and to which destinations. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-logging-saved-query.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-logging-saved-query.md deleted file mode 100644 index 7802591d..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-logging-saved-query.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: GCP Logging Saved Query -sidebar_label: gcp-logging-saved-query ---- - -A GCP Logging Saved Query is a reusable, named log query that is stored in Google Cloud Logging’s Logs Explorer. It contains the filter expression (or Log Query Language statement), any configured time-range presets and display options, allowing teams to quickly rerun common searches, share queries across projects, and use them as the basis for dashboards, log-based metrics or alerting policies. Because Saved Queries are resources in their own right, they can be created, read, updated and deleted through the Cloud Logging API, and are uniquely identified by the combination of the Google Cloud location and the query name. -Official documentation: https://cloud.google.com/logging/docs/view/building-queries - -## Supported Methods - -- `GET`: Get a gcp-logging-saved-query by its "locations|savedQueries" -- ~~`LIST`~~ -- `SEARCH`: Search for gcp-logging-saved-query by its "locations" diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-logging-sink.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-logging-sink.md deleted file mode 100644 index e6bf2157..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-logging-sink.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: GCP Logging Sink -sidebar_label: gcp-logging-sink ---- - -A Logging Sink in Google Cloud Platform (GCP) is a routing rule that selects log entries with a user-defined filter and exports them to a chosen destination such as BigQuery, Cloud Storage, Pub/Sub, or another Cloud Logging bucket. Sinks are the building blocks of GCP’s Log Router and are used to retain, analyse or stream logs outside of the originating project, folder or organisation. -Official documentation: https://cloud.google.com/logging/docs/export - -## Supported Methods - -- `GET`: Get GCP Logging Sink by "gcp-logging-sink-name" -- `LIST`: List all GCP Logging Sink items -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-big-query-dataset`](/sources/gcp/Types/gcp-big-query-dataset) - -If the sink’s destination is a BigQuery table, it must reference a BigQuery dataset where the tables will be created and written to. The dataset therefore appears as a child dependency of the logging sink. - -### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) - -Every sink is assigned a writer_identity, which is an IAM service account that needs permission to write into the chosen destination. The sink’s correct operation depends on this service account having the required roles on the target resource. - -### [`gcp-logging-bucket`](/sources/gcp/Types/gcp-logging-bucket) - -A sink can route logs to another Cloud Logging bucket (including aggregated buckets at the folder or organisation level). In this case the sink targets, and must have write access to, the specified logging bucket. - -### [`gcp-pub-sub-topic`](/sources/gcp/Types/gcp-pub-sub-topic) - -When the destination is Pub/Sub, the sink exports each matching log entry as a message on a particular topic. The topic therefore represents an external linkage for onward streaming or event-driven processing. - -### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) - -For archival purposes a sink may export logs to a Cloud Storage bucket. The bucket must exist and grant the sink’s writer service account permission to create objects, making the storage bucket a direct dependency of the sink. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-monitoring-alert-policy.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-monitoring-alert-policy.md deleted file mode 100644 index f5ca12a8..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-monitoring-alert-policy.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: GCP Monitoring Alert Policy -sidebar_label: gcp-monitoring-alert-policy ---- - -A Google Cloud Monitoring Alert Policy is a configuration object that defines the conditions under which Cloud Monitoring should create an incident, how incidents are grouped, and which notification channels should be used to inform operators. Alert policies enable proactive observation of metrics, logs and uptime checks across Google Cloud services so that you can respond quickly to anomalies. For more detail see the official Google Cloud documentation: [Create and manage alerting policies](https://cloud.google.com/monitoring/alerts). - -**Terrafrom Mappings:** - -- `google_monitoring_alert_policy.id` - -## Supported Methods - -- `GET`: Get a gcp-monitoring-alert-policy by its "name" -- `LIST`: List all gcp-monitoring-alert-policy -- `SEARCH`: Search by full resource name: projects/[project]/alertPolicies/[alert_policy_id] (used for terraform mapping). - -## Possible Links - -### [`gcp-monitoring-notification-channel`](/sources/gcp/Types/gcp-monitoring-notification-channel) - -An alert policy can reference one or more notification channels so that, when its conditions are met, Cloud Monitoring sends notifications (e-mails, webhooks, SMS, etc.) through the linked gcp-monitoring-notification-channels. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-monitoring-custom-dashboard.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-monitoring-custom-dashboard.md deleted file mode 100644 index 98225cf8..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-monitoring-custom-dashboard.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: GCP Monitoring Custom Dashboard -sidebar_label: gcp-monitoring-custom-dashboard ---- - -A GCP Monitoring Custom Dashboard is a user-defined collection of charts and widgets that presents metrics, logs, and alerts for resources running in Google Cloud or on-premises. It allows platform teams to visualise performance, capacity, and health in a single view that can be shared across projects. Custom dashboards are managed through Cloud Monitoring and can be created or modified via the Google Cloud Console, the Cloud Monitoring API, or infrastructure-as-code tools such as Terraform. -For full details, see the official documentation: https://cloud.google.com/monitoring/charts/dashboards - -**Terrafrom Mappings:** - -- `google_monitoring_dashboard.id` - -## Supported Methods - -- `GET`: Get a gcp-monitoring-custom-dashboard by its "name" -- `LIST`: List all gcp-monitoring-custom-dashboard -- `SEARCH`: Search for custom dashboards by their ID in the form of "projects/[project_id]/dashboards/[dashboard_id]". This is supported for terraform mappings. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-monitoring-notification-channel.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-monitoring-notification-channel.md deleted file mode 100644 index fab21d69..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-monitoring-notification-channel.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: GCP Monitoring Notification Channel -sidebar_label: gcp-monitoring-notification-channel ---- - -A **Google Cloud Monitoring Notification Channel** specifies where and how Cloud Monitoring delivers alert notifications—for example via email, SMS, Cloud Pub/Sub, Slack or PagerDuty. Each channel stores the configuration necessary for a particular medium (address, webhook URL, Pub/Sub topic name, etc.) and can be referenced by one or more alerting policies. For full details, see the official Google documentation: https://cloud.google.com/monitoring/support/notification-options - -**Terrafrom Mappings:** - -- `google_monitoring_notification_channel.name` - -## Supported Methods - -- `GET`: Get a gcp-monitoring-notification-channel by its "name" -- `LIST`: List all gcp-monitoring-notification-channel -- `SEARCH`: Search by full resource name: projects/[project]/notificationChannels/[notificationChannel] (used for terraform mapping). - -## Possible Links - -### [`gcp-pub-sub-topic`](/sources/gcp/Types/gcp-pub-sub-topic) - -If the notification channel’s `type` is `pubsub`, the channel references a specific Cloud Pub/Sub topic where alert messages are published. Overmind therefore links the notification channel to the corresponding `gcp-pub-sub-topic` resource so that you can trace how alerts propagate into event-driven workflows or downstream systems. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-orgpolicy-policy.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-orgpolicy-policy.md deleted file mode 100644 index 9639146b..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-orgpolicy-policy.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: GCP Orgpolicy Policy -sidebar_label: gcp-orgpolicy-policy ---- - -An Organisation Policy (orgpolicy) in Google Cloud is a resource that applies a constraint to part of the resource hierarchy (organisation, folder, or project). It allows administrators to enforce governance rules—such as restricting the regions in which resources may be created, blocking the use of certain services, or mandating specific network configurations—before workloads are deployed. -For full details see Google’s official documentation: https://cloud.google.com/resource-manager/docs/organization-policy/overview - -**Terrafrom Mappings:** - -- `google_org_policy_policy.name` - -## Supported Methods - -- `GET`: Get a gcp-orgpolicy-policy by its "name" -- `LIST`: List all gcp-orgpolicy-policy -- `SEARCH`: Search with the full policy name: projects/[project]/policies/[constraint] (used for terraform mapping). - -## Possible Links - -### [`gcp-cloud-resource-manager-project`](/sources/gcp/Types/gcp-cloud-resource-manager-project) - -A project is one of the resource hierarchy levels to which an Organisation Policy can be attached. Each gcp-orgpolicy-policy documented here is therefore linked to the gcp-cloud-resource-manager-project that the policy governs. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-pub-sub-subscription.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-pub-sub-subscription.md deleted file mode 100644 index 4b358d98..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-pub-sub-subscription.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: GCP Pub Sub Subscription -sidebar_label: gcp-pub-sub-subscription ---- - -A Google Cloud Pub/Sub subscription represents a stream of messages delivered from a single Pub/Sub topic to a consumer application. Each subscription defines how, where and for how long messages are retained, whether the delivery is push or pull, any filters or dead-letter policies, and the IAM principals that are allowed to read from it. Official documentation can be found at Google Cloud – Pub/Sub Subscriptions: https://cloud.google.com/pubsub/docs/subscription-overview - -**Terrafrom Mappings:** - -- `google_pubsub_subscription.name` -- `google_pubsub_subscription_iam_binding.subscription` -- `google_pubsub_subscription_iam_member.subscription` -- `google_pubsub_subscription_iam_policy.subscription` - -## Supported Methods - -- `GET`: Get a gcp-pub-sub-subscription by its "name" -- `LIST`: List all gcp-pub-sub-subscription -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-big-query-table`](/sources/gcp/Types/gcp-big-query-table) - -Pub/Sub can deliver messages directly into BigQuery by means of a BigQuery subscription. When such an integration is configured, the subscription is linked to the destination BigQuery table. - -### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) - -Service accounts are granted roles such as `roles/pubsub.subscriber` on the subscription so that applications can pull or acknowledge messages, or so that Pub/Sub can impersonate them for push deliveries. These IAM bindings create a relationship between the subscription and the service accounts. - -### [`gcp-pub-sub-subscription`](/sources/gcp/Types/gcp-pub-sub-subscription) - -Multiple subscriptions can point at the same topic, or one subscription may forward undelivered messages to another subscription via a dead-letter topic. Overmind shows these peer or chained subscriptions as related items. - -### [`gcp-pub-sub-topic`](/sources/gcp/Types/gcp-pub-sub-topic) - -Every subscription is attached to exactly one topic. All messages published to that topic are made available to the subscription, making the topic the primary upstream dependency. - -### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) - -Cloud Storage buckets can emit object-change notifications to a Pub/Sub topic. If the subscription listens to such a topic, it is indirectly linked to the bucket that generated the events, allowing you to trace the flow from storage changes to message consumption. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-pub-sub-topic.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-pub-sub-topic.md deleted file mode 100644 index 9434dd26..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-pub-sub-topic.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: GCP Pub Sub Topic -sidebar_label: gcp-pub-sub-topic ---- - -A **Cloud Pub/Sub Topic** is a named message channel in Google Cloud Platform that receives messages from publishers and delivers them to subscribers. Topics decouple senders and receivers, allowing highly-scalable, asynchronous communication between services. Every message published to a topic is retained for the duration of its acknowledgement window and can be encrypted with a customer-managed key. -For comprehensive information, see the official documentation: https://cloud.google.com/pubsub/docs/create-topic. - -**Terrafrom Mappings:** - -- `google_pubsub_topic.name` -- `google_pubsub_topic_iam_binding.topic` -- `google_pubsub_topic_iam_member.topic` -- `google_pubsub_topic_iam_policy.topic` - -## Supported Methods - -- `GET`: Get a gcp-pub-sub-topic by its "name" -- `LIST`: List all gcp-pub-sub-topic -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) - -A Pub/Sub topic may be encrypted using a customer-managed encryption key (CMEK). When CMEK is enabled, the topic resource holds a reference to the Cloud KMS Crypto Key that protects message data at rest. - -### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) - -Access to publish or subscribe is controlled through IAM roles that are granted to service accounts on the topic. The topic’s IAM policy therefore links it to any service account that has roles such as `roles/pubsub.publisher` or `roles/pubsub.subscriber` on the resource. - -### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) - -Cloud Storage buckets can be configured to send change notifications to a Pub/Sub topic (for example, object create or delete events). In such configurations, the bucket acts as a publisher, and the topic appears as a dependent destination for bucket event notifications. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-redis-instance.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-redis-instance.md deleted file mode 100644 index ef6259bf..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-redis-instance.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: GCP Redis Instance -sidebar_label: gcp-redis-instance ---- - -A GCP Redis Instance is a fully managed, in-memory data store provided by Cloud Memorystore for Redis. It offers a drop-in, highly available Redis service that handles provisioning, patching, scaling, monitoring and automatic fail-over, allowing you to use Redis as a cache or primary database without managing the underlying infrastructure yourself. See the official documentation for details: https://cloud.google.com/memorystore/docs/redis - -**Terrafrom Mappings:** - -- `google_redis_instance.id` - -## Supported Methods - -- `GET`: Get a gcp-redis-instance by its "locations|instances" -- ~~`LIST`~~ -- `SEARCH`: Search Redis instances in a location. Use the format "location" or "projects/[project_id]/locations/[location]/instances/[instance_name]" which is supported for terraform mappings. - -## Possible Links - -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) - -If Customer-Managed Encryption Keys (CMEK) are enabled for the Redis instance, the data at rest is encrypted with a Cloud KMS Crypto Key. The Redis instance therefore depends on — and is cryptographically linked to — the specified `gcp-cloud-kms-crypto-key`. - -### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) - -A Redis instance is deployed inside a specific VPC network and is reachable only via an internal IP address in that network. Consequently, each instance is associated with a `gcp-compute-network`, which determines its connectivity and firewall boundaries. - -### [`gcp-compute-ssl-certificate`](/sources/gcp/Types/gcp-compute-ssl-certificate) - -When TLS is enabled for a Redis instance, it can reference a Compute Engine SSL certificate resource to present during encrypted client connections. The `gcp-compute-ssl-certificate` therefore represents the server certificate used to secure traffic to the Redis instance. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-run-revision.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-run-revision.md deleted file mode 100644 index 1e7b424b..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-run-revision.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: GCP Run Revision -sidebar_label: gcp-run-revision ---- - -A Cloud Run **Revision** is an immutable snapshot of a Cloud Run Service configuration at a particular point in time. Each time you deploy new code or change configuration, Cloud Run automatically creates a new revision and routes traffic according to your settings. A revision defines the container image to run, environment variables, resource limits, networking options, service account, secret mounts and more. Once created, a revision can never be modified – you can only create a new one. -Official documentation: https://cloud.google.com/run/docs/reference/rest/v1/namespaces.revisions - -## Supported Methods - -- `GET`: Get a gcp-run-revision by its "locations|services|revisions" -- ~~`LIST`~~ -- `SEARCH`: Search for gcp-run-revision by its "locations|services" - -## Possible Links - -### [`gcp-artifact-registry-docker-image`](/sources/gcp/Types/gcp-artifact-registry-docker-image) - -The container image specified in the revision is often stored in Artifact Registry. The revision therefore has a **uses-image** relationship with the referenced Docker image. - -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) - -If the revision is configured with a customer-managed encryption key (CMEK) for encrypted secrets or volumes, it will reference the corresponding Cloud KMS Crypto Key. - -### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) - -When a revision is set up to use Serverless VPC Access, it connects to a specific VPC network, creating a **connects-to-network** relationship. - -### [`gcp-compute-subnetwork`](/sources/gcp/Types/gcp-compute-subnetwork) - -The Serverless VPC Access connector used by the revision is attached to a particular subnetwork, so the revision is indirectly linked to that subnetwork. - -### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) - -Each revision runs with an IAM service account whose permissions govern outbound calls and resource access. The revision therefore **runs-as** the referenced service account. - -### [`gcp-run-service`](/sources/gcp/Types/gcp-run-service) - -The revision is a child resource of a Cloud Run Service. All traffic routing and lifecycle events are managed at the service level. - -### [`gcp-secret-manager-secret`](/sources/gcp/Types/gcp-secret-manager-secret) - -Environment variables or mounted volumes in the revision can pull values from Secret Manager. This establishes a **consumes-secret** relationship. - -### [`gcp-sql-admin-instance`](/sources/gcp/Types/gcp-sql-admin-instance) - -If the revision defines Cloud SQL connections, it will list one or more Cloud SQL instances it can connect to through the Cloud SQL proxy. - -### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) - -A revision may read from or write to Cloud Storage buckets (for example for static assets or generated files) when granted the appropriate IAM permissions, creating a potential dependency on those buckets. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-run-service.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-run-service.md deleted file mode 100644 index 6620ad19..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-run-service.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: GCP Run Service -sidebar_label: gcp-run-service ---- - -Google Cloud Run Service is a fully-managed compute platform that automatically scales stateless containers on demand. A Service represents the user-facing abstraction of your application, managing one or more immutable Revisions of a container image and routing traffic to them. It provides configuration for networking, environment variables, secrets, concurrency, autoscaling and identity. -Official documentation: https://cloud.google.com/run/docs - -**Terrafrom Mappings:** - -- `google_cloud_run_v2_service.id` - -## Supported Methods - -- `GET`: Get a gcp-run-service by its "locations|services" -- ~~`LIST`~~ -- `SEARCH`: Search for gcp-run-service by its "locations" - -## Possible Links - -### [`gcp-artifact-registry-docker-image`](/sources/gcp/Types/gcp-artifact-registry-docker-image) - -A Cloud Run Service deploys one specific container image; most commonly this image is stored in Artifact Registry. The link shows which image version the Service’s active Revision is based on. - -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) - -If the Service uses customer-managed encryption keys (CMEK) for at-rest encryption of logs, volumes or secrets, it will reference a Cloud KMS Crypto Key. - -### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) - -When the Service is configured with a VPC connector for egress or to reach private resources, it ultimately attaches to a specific Compute Network. - -### [`gcp-compute-subnetwork`](/sources/gcp/Types/gcp-compute-subnetwork) - -The VPC connector also targets a concrete Subnetwork; this link identifies the precise subnet through which the Service’s traffic is routed. - -### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) - -A Cloud Run Service runs with a dedicated runtime identity. This Service Account is used for accessing other Google Cloud resources and defines the permissions available to the container. - -### [`gcp-run-revision`](/sources/gcp/Types/gcp-run-revision) - -Each update to configuration or container image creates a new Revision. The Service points traffic to one or more of these Revisions; the link maps the parent-child relationship. - -### [`gcp-secret-manager-secret`](/sources/gcp/Types/gcp-secret-manager-secret) - -Environment variables or mounted volumes in the Service can be sourced from Secret Manager. Linked secrets indicate which sensitive values are injected at runtime. - -### [`gcp-sql-admin-instance`](/sources/gcp/Types/gcp-sql-admin-instance) - -If Cloud SQL connections are configured via the Cloud SQL Auth Proxy side-car or built-in integration, the Service will reference one or more Cloud SQL instances. - -### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) - -The Service may access files in Cloud Storage for static assets or as mounted volumes (Cloud Run volumes). Buckets listed here are those explicitly referenced by environment variables, IAM permissions or volume mounts. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-secret-manager-secret.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-secret-manager-secret.md deleted file mode 100644 index 6c8018f7..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-secret-manager-secret.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: GCP Secret Manager Secret -sidebar_label: gcp-secret-manager-secret ---- - -A Secret in Google Cloud Secret Manager is a secure, version-controlled container for sensitive data such as passwords, API keys, certificates, or any arbitrary text or binary payload. Each Secret holds one or more Secret Versions, allowing you to rotate or roll back the underlying data without changing the resource identifier that your applications refer to. Secrets are encrypted at rest with Google-managed keys by default, or you can supply a customer-managed Cloud KMS key. You can also configure Pub/Sub notifications to be emitted whenever a new version is added or other lifecycle events occur. -For full details see the official documentation: https://cloud.google.com/secret-manager/docs - -**Terrafrom Mappings:** - -- `google_secret_manager_secret.secret_id` - -## Supported Methods - -- `GET`: Get a gcp-secret-manager-secret by its "name" -- `LIST`: List all gcp-secret-manager-secret -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) - -If a Secret is configured to use customer-managed encryption (CMEK), it references a Cloud KMS Crypto Key that performs the envelope encryption of all Secret Versions. Compromise or mis-configuration of the referenced KMS key directly affects the confidentiality and availability of the Secret’s payloads. - -### [`gcp-pub-sub-topic`](/sources/gcp/Types/gcp-pub-sub-topic) - -Secret Manager can publish events—such as the creation of a new Secret Version—to a Pub/Sub topic. This enables automated workflows like triggering Cloud Functions for secret rotation or auditing. The Secret therefore holds an optional link to any Pub/Sub topic configured for such notifications. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-security-center-management-security-center-service.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-security-center-management-security-center-service.md deleted file mode 100644 index 25501a0c..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-security-center-management-security-center-service.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: GCP Security Center Management Security Center Service -sidebar_label: gcp-security-center-management-security-center-service ---- - -The **Security Center Service** resource represents the configuration of Security Command Center (SCC) for a particular Google Cloud location. -Each instance of this resource indicates that SCC is running in the specified region and records the service‐wide settings that govern how findings are ingested, stored and surfaced. -Official documentation: https://cloud.google.com/security-command-center/docs/reference/security-center-management/rest/v1/projects.locations.securityCenterServices/list - -## Supported Methods - -- `GET`: Get a gcp-security-center-management-security-center-service by its "locations|securityCenterServices" -- ~~`LIST`~~ -- `SEARCH`: Search Security Center services in a location. Use the format "location". - -## Possible Links - -### [`gcp-cloud-resource-manager-project`](/sources/gcp/Types/gcp-cloud-resource-manager-project) - -A Security Center Service exists **inside** a specific Google Cloud project – the project determines billing, IAM policies and the scope of resources that SCC monitors. The Overmind link lets you pivot from the project to every Security Center Service it has enabled (and vice-versa), helping you see which projects have security monitoring active in each region. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-service-directory-endpoint.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-service-directory-endpoint.md deleted file mode 100644 index 76976f02..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-service-directory-endpoint.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: GCP Service Directory Endpoint -sidebar_label: gcp-service-directory-endpoint ---- - -A **Service Directory Endpoint** represents a concrete network endpoint (host/IP address and port) that implements a Service Directory service within a namespace and location. Clients resolve a service and obtain one or more endpoints in order to make network calls. Endpoints can carry arbitrary key-value metadata and may point at instances running inside a VPC, on-premises, or in another cloud. -Official documentation: https://cloud.google.com/service-directory/docs/reference/rest/v1/projects.locations.namespaces.services.endpoints - -**Terrafrom Mappings:** - -- `google_service_directory_endpoint.id` - -## Supported Methods - -- `GET`: Get a gcp-service-directory-endpoint by its "locations|namespaces|services|endpoints" -- ~~`LIST`~~ -- `SEARCH`: Search for endpoints by "location|namespace_id|service_id" or "projects/[project_id]/locations/[location]/namespaces/[namespace_id]/services/[service_id]/endpoints/[endpoint_id]" which is supported for terraform mappings. - -## Possible Links - -### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) - -A Service Directory endpoint’s address usually resides within a VPC network. Linking an endpoint to its `gcp-compute-network` resource lets you trace which network the IP belongs to, ensuring that connectivity policies (firewalls, routes, private service access, etc.) permit clients to reach the service before deployment. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-service-usage-service.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-service-usage-service.md deleted file mode 100644 index 64ceb440..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-service-usage-service.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: GCP Service Usage Service -sidebar_label: gcp-service-usage-service ---- - -A **Service Usage Service** represents an individual Google-managed API or service (e.g. `compute.googleapis.com`, `pubsub.googleapis.com`) and its enablement state inside a single GCP project. By querying this resource you can determine whether a particular service is currently enabled, disabled, or in another transitional state for that project, which is critical for understanding if downstream resources can be created successfully. -Official documentation: https://cloud.google.com/service-usage/docs/reference/rest/v1/services - -## Supported Methods - -- `GET`: Get a gcp-service-usage-service by its "name" -- `LIST`: List all gcp-service-usage-service -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-cloud-resource-manager-project`](/sources/gcp/Types/gcp-cloud-resource-manager-project) - -Every Service Usage Service exists **within** a single Cloud Resource Manager project. The project acts as the parent container and dictates billing, IAM policies and quota that apply to the service. - -### [`gcp-pub-sub-topic`](/sources/gcp/Types/gcp-pub-sub-topic) - -A Pub/Sub topic can only be created or used if the **`pubsub.googleapis.com`** Service Usage Service is enabled in the same project. Overmind links the topic back to its enabling service so you can quickly spot configuration drift or missing API enablement that would prevent deployment. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-spanner-database.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-spanner-database.md deleted file mode 100644 index 76d315a0..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-spanner-database.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: GCP Spanner Database -sidebar_label: gcp-spanner-database ---- - -A GCP Spanner Database is a logically isolated collection of relational data that lives inside a Cloud Spanner instance. It contains the schema (tables, indexes, views) and the data itself, and it inherits the instance’s compute and storage resources. Cloud Spanner provides global consistency, horizontal scalability and automatic replication, making the database suitable for mission-critical, globally distributed workloads. Official documentation: https://cloud.google.com/spanner/docs - -**Terrafrom Mappings:** - -- `google_spanner_database.name` - -## Supported Methods - -- `GET`: Get a gcp-spanner-database by its "instances|databases" -- ~~`LIST`~~ -- `SEARCH`: Search for gcp-spanner-database by its "instances" - -## Possible Links - -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) - -A Spanner database can be encrypted with a customer-managed encryption key (CMEK) stored in Cloud KMS. Overmind links the database to the KMS Crypto Key that protects its data at rest. - -### [`gcp-cloud-kms-crypto-key-version`](/sources/gcp/Types/gcp-cloud-kms-crypto-key-version) - -When CMEK is enabled, Spanner actually uses a specific version of the KMS key. This link shows the exact key version currently in use so you can track key rotation and ensure compliance. - -### [`gcp-spanner-database`](/sources/gcp/Types/gcp-spanner-database) - -Spanner databases may reference one another through backups, clones or restores. Overmind records these relationships (e.g., a database restored from another) to expose any dependency chain between databases. - -### [`gcp-spanner-instance`](/sources/gcp/Types/gcp-spanner-instance) - -Every Spanner database belongs to a single Spanner instance. This link lets you traverse from the database to the parent instance to understand the compute resources, regional configuration and IAM policies that ultimately govern the database. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-spanner-instance.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-spanner-instance.md deleted file mode 100644 index 67c03c71..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-spanner-instance.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: GCP Spanner Instance -sidebar_label: gcp-spanner-instance ---- - -A **Cloud Spanner instance** is the top-level container that defines the geographical placement, compute capacity and billing context for one or more Cloud Spanner databases. When you create an instance you choose an instance configuration (regional or multi-regional) and allocate compute in the form of nodes or processing units; all databases created within the instance inherit this configuration and capacity. Google manages replication, automatic fail-over and online scaling transparently within the boundaries of the instance. -For full details see the official documentation: https://cloud.google.com/spanner/docs/instances - -**Terrafrom Mappings:** - -- `google_spanner_instance.name` - -## Supported Methods - -- `GET`: Get a gcp-spanner-instance by its "name" -- `LIST`: List all gcp-spanner-instance -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-spanner-database`](/sources/gcp/Types/gcp-spanner-database) - -Each Cloud Spanner instance can contain multiple Cloud Spanner databases. The `gcp-spanner-database` resource is therefore a child of the `gcp-spanner-instance`; enumerating databases or assessing their risks starts with traversing from the parent instance to its associated databases. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-sql-admin-backup-run.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-sql-admin-backup-run.md deleted file mode 100644 index b5ce83cf..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-sql-admin-backup-run.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: GCP Sql Admin Backup Run -sidebar_label: gcp-sql-admin-backup-run ---- - -A **Cloud SQL Backup Run** represents a single on-demand or automated backup operation for a Cloud SQL instance. It records when the backup was initiated, its status, size, location, encryption information and other metadata. Backup runs allow administrators to restore an instance to a previous state or to clone data into a new instance. -Official documentation: https://cloud.google.com/sql/docs/mysql/admin-api/rest/v1/backupRuns - -## Supported Methods - -- `GET`: Get a gcp-sql-admin-backup-run by its "instances|backupRuns" -- ~~`LIST`~~ -- `SEARCH`: Search for gcp-sql-admin-backup-run by its "instances" - -## Possible Links - -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) - -If Customer-Managed Encryption Keys (CMEK) are enabled for the instance, the backup run is encrypted with a Cloud KMS Crypto Key. This link points to the parent key that protects the specific key version used for the backup. - -### [`gcp-cloud-kms-crypto-key-version`](/sources/gcp/Types/gcp-cloud-kms-crypto-key-version) - -The `encryptionInfo` block inside the backup run references the exact Cloud KMS Crypto Key Version that encrypted the backup file. This relationship lets you trace which key version must be available to decrypt or restore the backup. - -### [`gcp-sql-admin-instance`](/sources/gcp/Types/gcp-sql-admin-instance) - -Every backup run belongs to a single Cloud SQL instance. This link connects the backup run to its parent instance so you can see which database the backup protects and assess the impact of restoring or deleting it. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-sql-admin-backup.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-sql-admin-backup.md deleted file mode 100644 index ab2ee094..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-sql-admin-backup.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: GCP Sql Admin Backup -sidebar_label: gcp-sql-admin-backup ---- - -A **Cloud SQL backup** represents a point-in-time copy of the data stored in a Cloud SQL instance. Backups are created automatically on a schedule you define or manually on demand, and are retained in Google-managed Cloud Storage where they can later be used to restore the originating instance or clone a new one. Backups may be encrypted either with Google-managed keys or with a customer-managed encryption key (CMEK) from Cloud KMS. -See the official documentation for details: https://cloud.google.com/sql/docs/mysql/backup-recovery/backups - -## Supported Methods - -- `GET`: Get a gcp-sql-admin-backup by its "name" -- `LIST`: List all gcp-sql-admin-backup -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) - -If CMEK encryption is enabled for the Cloud SQL instance, the backup is encrypted with a specific Cloud KMS CryptoKey. This link shows which key secures the backup data at rest. - -### [`gcp-cloud-kms-crypto-key-version`](/sources/gcp/Types/gcp-cloud-kms-crypto-key-version) - -The actual ciphertext is tied to a particular CryptoKey **version**. Linking to the key version lets you see exactly which rotation of the key was used when the backup was taken. - -### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) - -Although backups are stored out-of-band, they are associated with the same VPC network(s) as the Cloud SQL instance that produced them. This link helps trace network-level access policies that apply when a backup is restored to an instance using private IP. - -### [`gcp-sql-admin-instance`](/sources/gcp/Types/gcp-sql-admin-instance) - -Every backup is generated from, and can be restored to, a specific Cloud SQL instance. This link identifies the parent instance, allowing you to evaluate how instance configuration (e.g. region, database version) affects backup usability and risk. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-sql-admin-instance.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-sql-admin-instance.md deleted file mode 100644 index c4167965..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-sql-admin-instance.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: GCP Sql Admin Instance -sidebar_label: gcp-sql-admin-instance ---- - -A Google Cloud SQL Admin Instance represents a fully-managed relational database instance running on Google Cloud. It encapsulates the configuration for engines such as MySQL, PostgreSQL, or SQL Server, including CPU and memory sizing, version, storage, networking and encryption settings. For full details see the official documentation: https://cloud.google.com/sql/docs/introduction. - -**Terrafrom Mappings:** - -- `google_sql_database_instance.name` - -## Supported Methods - -- `GET`: Get a gcp-sql-admin-instance by its "name" -- `LIST`: List all gcp-sql-admin-instance -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) - -Linked when the instance is encrypted with a Customer-Managed Encryption Key (CMEK); the instance stores the resource ID of the Cloud KMS crypto key it uses for data-at-rest encryption. - -### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) - -Appears when the instance is configured with a private IP address. The instance is reachable through a Private Service Connection residing inside a specific VPC network. - -### [`gcp-compute-subnetwork`](/sources/gcp/Types/gcp-compute-subnetwork) - -If private IP is enabled, the instance is bound to a particular subnetwork from which it obtains its internal IP and through which it exposes its endpoints. - -### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) - -Cloud SQL creates or uses a service account to perform administrative actions such as backup, replication and interaction with other Google Cloud services; this link surfaces that service account. - -### [`gcp-sql-admin-backup-run`](/sources/gcp/Types/gcp-sql-admin-backup-run) - -Each successful or scheduled backup run is a child of an instance. The link shows all backup-run resources that belong to the current database instance. - -### [`gcp-sql-admin-instance`](/sources/gcp/Types/gcp-sql-admin-instance) - -An instance can reference another instance as its read replica or as the source for cloning. This self-link captures those primary/replica relationships. - -### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) - -Imports, exports and point-in-time backups can read from or write to Cloud Storage. The instance therefore maintains references to buckets used for these operations. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-storage-bucket-iam-policy.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-storage-bucket-iam-policy.md deleted file mode 100644 index c919de54..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-storage-bucket-iam-policy.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: GCP Storage Bucket Iam Policy -sidebar_label: gcp-storage-bucket-iam-policy ---- - -A **Storage Bucket IAM policy** defines who (principals) can perform which actions (roles/permissions) on a specific Cloud Storage bucket. It is the fine-grained access-control object that sits on top of a bucket and overrides or complements broader project-level IAM settings. For full details, see the Google Cloud documentation: https://cloud.google.com/storage/docs/access-control/iam - -**Terrafrom Mappings:** - -- `google_storage_bucket_iam_binding.bucket` -- `google_storage_bucket_iam_member.bucket` -- `google_storage_bucket_iam_policy.bucket` - -## Supported Methods - -- `GET`: Get GCP Storage Bucket Iam Policy by "gcp-storage-bucket-iam-policy-bucket" -- ~~`LIST`~~ -- `SEARCH`: Search for GCP Storage Bucket Iam Policy by "gcp-storage-bucket-iam-policy-bucket" - -## Possible Links - -### [`gcp-compute-project`](/sources/gcp/Types/gcp-compute-project) - -The bucket IAM policy is scoped within a single GCP project; therefore every policy item is linked back to the project that owns the bucket. - -### [`gcp-iam-role`](/sources/gcp/Types/gcp-iam-role) - -Each binding inside the policy references one or more IAM roles that grant permissions; this link shows which predefined or custom roles are in use. - -### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) - -Service accounts are common principals in bucket policies. Linking reveals which service accounts have been granted access and with what privileges. - -### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) - -The IAM policy is attached to and governs a specific Cloud Storage bucket; this link connects the policy object to the underlying bucket resource. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-storage-bucket.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-storage-bucket.md deleted file mode 100644 index 2efbb013..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-storage-bucket.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: GCP Storage Bucket -sidebar_label: gcp-storage-bucket ---- - -A Google Cloud Storage Bucket is a globally-unique container used to store, organise and serve objects (files) in Google Cloud Storage. Buckets provide configuration points for data location, access control, lifecycle management, encryption and logging. They are the fundamental resource for object storage workloads such as static website hosting, backup, or data lakes. -For full details see the official documentation: https://cloud.google.com/storage/docs/buckets - -**Terrafrom Mappings:** - -- `google_storage_bucket.name` -- `google_storage_bucket_iam_binding.bucket` -- `google_storage_bucket_iam_member.bucket` -- `google_storage_bucket_iam_policy.bucket` - -## Supported Methods - -- `GET`: Get a gcp-storage-bucket by its "name" -- `LIST`: List all gcp-storage-bucket -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) - -A bucket may be encrypted with a customer-managed encryption key (CMEK) that resides in Cloud KMS. The bucket’s encryption configuration therefore references the corresponding `gcp-cloud-kms-crypto-key`. - -### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) - -When VPC Service Controls or Private Google Access are used, access between a Compute Network and a Storage Bucket is constrained or allowed based on network settings. Log sinks from VPC flow logs can also target a Storage Bucket, creating a relationship between the bucket and the originating `gcp-compute-network`. - -### [`gcp-logging-bucket`](/sources/gcp/Types/gcp-logging-bucket) - -Cloud Logging can route logs from a Logging Bucket to Cloud Storage for long-term retention or auditing. If such a sink targets this Storage Bucket, the bucket becomes linked to the source `gcp-logging-bucket`. - -### [`gcp-storage-bucket-iam-policy`](/sources/gcp/Types/gcp-storage-bucket-iam-policy) - -Every Storage Bucket has an IAM policy that defines who can read, write or administer it. That policy is exposed as a separate `gcp-storage-bucket-iam-policy` object, which is directly attached to this bucket. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-storage-transfer-transfer-job.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-storage-transfer-transfer-job.md deleted file mode 100644 index 6a2b63ff..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-storage-transfer-transfer-job.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: GCP Storage Transfer Transfer Job -sidebar_label: gcp-storage-transfer-transfer-job ---- - -Google Cloud Storage Transfer Service enables you to copy or synchronise data between Cloud Storage buckets, on-premises file systems and external cloud providers. A Storage Transfer **transfer job** is the top-level resource that defines where data should be copied from, where it should be copied to, the schedule on which the copy should run, and options such as delete or overwrite rules. -Official documentation: https://cloud.google.com/storage-transfer/docs/create-transfers - -**Terrafrom Mappings:** - -- `google_storage_transfer_job.name` - -## Supported Methods - -- `GET`: Get a gcp-storage-transfer-transfer-job by its "name" -- `LIST`: List all gcp-storage-transfer-transfer-job -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) - -The transfer job runs under a Google-managed or user-specified IAM service account, which needs roles such as `Storage Object Admin` on the destination bucket and, when applicable, permissions to access the source. - -### [`gcp-pub-sub-subscription`](/sources/gcp/Types/gcp-pub-sub-subscription) - -If event notifications are enabled, a Pub/Sub subscription can pull the messages that the transfer job publishes when it starts, completes, or encounters errors. - -### [`gcp-pub-sub-topic`](/sources/gcp/Types/gcp-pub-sub-topic) - -A transfer job can be configured with a Pub/Sub topic as its notification destination so that operational events are published for downstream processing or alerting. - -### [`gcp-secret-manager-secret`](/sources/gcp/Types/gcp-secret-manager-secret) - -When transferring from external providers such as AWS S3 or Azure Blob Storage, the access keys and credentials are often stored in Secret Manager secrets, which the transfer job references to authenticate to the source. - -### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) - -Every transfer job specifies at least one Cloud Storage bucket as a source and/or destination; therefore it has direct relationships to the buckets involved in the data copy. diff --git a/docs.overmind.tech/docs/sources/gcp/_category_.json b/docs.overmind.tech/docs/sources/gcp/_category_.json deleted file mode 100644 index 80514572..00000000 --- a/docs.overmind.tech/docs/sources/gcp/_category_.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "label": "GCP 🆕", - "position": 3, - "collapsed": true, - "link": { - "type": "generated-index", - "description": "How to integrate your Google Cloud Platform" - } -} diff --git a/docs.overmind.tech/docs/sources/gcp/configuration.md b/docs.overmind.tech/docs/sources/gcp/configuration.md deleted file mode 100644 index f89dfb9c..00000000 --- a/docs.overmind.tech/docs/sources/gcp/configuration.md +++ /dev/null @@ -1,442 +0,0 @@ ---- -title: GCP Configuration -sidebar_position: 1 ---- - -# GCP Configuration - -## Overview - -Overmind's GCP infrastructure discovery provides comprehensive visibility into your Google Cloud Platform resources through secure, read-only access using Google Cloud's native IAM system. - -Overmind supports two authentication methods: - -1. **Direct Access** (Default) - Grant permissions directly to the Overmind service account -2. **Service Account Impersonation** (Optional) - Create your own service account with permissions, then allow Overmind to impersonate it - -Both methods provide the same functionality and security. Choose the method that fits your organization's security policies. - -### Authentication Methods Comparison - -**Direct Access:** - -- Simplest setup - grant roles directly to Overmind's service account -- Best for quick setup and straightforward security requirements - -**Service Account Impersonation:** - -- Enhanced control - you create and manage your own service account -- Better for organizations requiring all service accounts to be internally managed -- Provides dual identity in audit logs (both Overmind's SA and your SA) -- Learn more: [GCP Service Account Impersonation](https://cloud.google.com/iam/docs/service-account-impersonation) - -### Why Service Account-Based Access? - -Each customer receives a unique Overmind service account with minimal, read-only permissions. All access is logged through Google Cloud's audit system, giving you complete control with no shared credentials. This aligns with [Google Cloud's security best practices](https://cloud.google.com/security/best-practices). - -## Prerequisites - -Before beginning setup, ensure you have: - -- **GCP Resource Access**: Appropriate IAM admin permissions at the organization, folder, or project level to grant IAM roles (and create service accounts for impersonation) -- **Required Tools**: One of the following: - - [Google Cloud CLI (`gcloud`)](https://cloud.google.com/sdk/docs/install) installed and authenticated - - Terraform with the Google Cloud Provider configured -- **Parent Resource**: The parent resource ID where Overmind will discover resources. This can be: - - An organization: `organizations/123456789` - - A folder: `folders/987654321` - - A project: `projects/my-project-id` -- **Regional Scope**: List of GCP regions where your resources are located (mandatory for source configuration) - -### Authentication Setup - -Ensure your local environment is authenticated with Google Cloud: - -```bash -# Authenticate with Google Cloud -gcloud auth login - -# Set your default project (if using a project as parent) -gcloud config set project YOUR_PROJECT_ID - -# Verify authentication -gcloud auth list -``` - -For Terraform users, configure [Application Default Credentials (ADC)](https://cloud.google.com/docs/authentication/application-default-credentials): - -```bash -gcloud auth application-default login -``` - -## Quick Start - -### Step 1: Create Your Overmind GCP Source - -1. Navigate to **Settings** > **Sources** > **Add Source** > **GCP** in the Overmind application -2. Configure your source: - - **Parent ID**: The parent resource to discover from. Format: - - Organization: `organizations/123456789` - - Folder: `folders/987654321` - - Project: `projects/my-project-id` - - **Name**: A descriptive name for this source (optional) - - **Regions**: Select the regions where your resources are located (mandatory) - - **Impersonation** (optional): Toggle on to use service account impersonation - - Enter the email of the service account you'll create (e.g., `overmind-reader@your-project.iam.gserviceaccount.com`) - - Use any unique name for your service account -3. Click **Create Source** - -You'll be redirected to the source details page showing: - -- The Overmind service account email (e.g., `C-xxxxx@ovm-production.iam.gserviceaccount.com`) -- Configuration instructions customized for your setup -- Whether impersonation is enabled - -### Step 2: Grant Permissions - -The source details page provides customized scripts for your setup. These scripts automatically apply IAM permissions at the level you specified (organization, folder, or project). Permissions granted at a parent level are inherited by all child resources. - -Choose your preferred method: - -#### Option A: Cloud Shell (Easiest) - -Click the **"Open in Google Cloud Shell"** button shown on the source details page. This provides you with the scripts and guidance needed to complete the setup. Follow the instructions in Cloud Shell to run the appropriate setup script for your configuration. - -#### Option B: Manual Script - -Copy and run the bash script shown on the source details page. The script automatically detects whether you're using an organization, folder, or project parent and applies the correct `gcloud` commands. The script varies based on whether impersonation is enabled: - -**For Direct Access:** - -- Grants read-only roles directly to the Overmind service account at your specified parent level -- For project-level parents, also creates a custom role for additional permissions - -**For Impersonation:** - -- Grants read-only roles to your service account at your specified parent level (you must create the service account manually first) -- For project-level parents, also creates a custom role for additional permissions -- Grants Overmind's service account permission to impersonate yours (`roles/iam.serviceAccountTokenCreator`) - -#### Option C: Terraform - -Copy the Terraform configuration shown on the source details page and apply it: - -```bash -terraform init -terraform plan -terraform apply -``` - -### Step 3: Verify Source Status - -1. Navigate to **Settings** > **Sources** in the Overmind application -2. Verify your GCP source shows as **Healthy** - -## Required Permissions - -Overmind requires read-only IAM roles for infrastructure discovery. See the [Required GCP Roles Reference](#required-gcp-roles-reference) for the complete list. - -### Permission Flow - -Permissions can be applied at any level of the GCP resource hierarchy and are inherited by child resources: - -**Direct Access:** - -```text -Your GCP Organization/Folder/Project - └─ Overmind Service Account - └─ Granted: Viewer roles (+ custom role for project-level) - └─ Inherited by all child folders and projects -``` - -**Service Account Impersonation:** - -```text -Your GCP Organization/Folder/Project - ├─ Your Service Account - │ └─ Granted: Viewer roles (+ custom role for project-level) - │ └─ Inherited by all child folders and projects - └─ Overmind Service Account - └─ Granted: roles/iam.serviceAccountTokenCreator on Your Service Account -``` - -## Switching Between Authentication Methods - -### Enable Impersonation - -1. Create a service account in your GCP project (if you haven't already) -2. Grant it the required read-only roles and impersonation permission (use the scripts from the source details page - they handle both) -3. Edit your source in Overmind: enable **Impersonation** and enter your service account email -4. (Optional) Remove direct permissions from Overmind's service account - -### Disable Impersonation - -1. Edit your source in Overmind: disable **Impersonation** (this updates the scripts on the source details page) -2. Grant the required read-only roles directly to Overmind's service account (use the updated scripts from the source details page) -3. (Optional) Remove the impersonation permission and delete your service account - -## Validation - -### Verify IAM Permissions - -**Using Google Cloud Console:** - -1. Navigate to [IAM & Admin > IAM](https://console.cloud.google.com/iam-admin/iam) -2. Select your organization, folder, or project -3. Search for the service account (Overmind's or yours, depending on setup) -4. Verify all required roles are listed - -**Using Google Cloud CLI:** - -For direct access at organization level: - -```bash -gcloud organizations get-iam-policy YOUR_ORG_ID \ - --flatten="bindings[].members" \ - --format="table(bindings.role)" \ - --filter="bindings.members:serviceAccount:OVERMIND_SA_EMAIL" -``` - -For direct access at folder level: - -```bash -gcloud resource-manager folders get-iam-policy YOUR_FOLDER_ID \ - --flatten="bindings[].members" \ - --format="table(bindings.role)" \ - --filter="bindings.members:serviceAccount:OVERMIND_SA_EMAIL" -``` - -For direct access at project level: - -```bash -gcloud projects get-iam-policy YOUR_PROJECT_ID \ - --flatten="bindings[].members" \ - --format="table(bindings.role)" \ - --filter="bindings.members:serviceAccount:OVERMIND_SA_EMAIL" -``` - -For impersonation (verify Overmind can impersonate your SA): - -```bash -gcloud iam service-accounts get-iam-policy YOUR_SA_EMAIL \ - --project=YOUR_PROJECT_ID \ - --flatten="bindings[].members" \ - --format="table(bindings.role,bindings.members)" \ - --filter="bindings.members:serviceAccount:OVERMIND_SA_EMAIL" -``` - -### Test Source Discovery - -1. Navigate to **Explore** in the Overmind application -2. Run a query: GCP sources are prefixed with `gcp-` - - To list all VMs: `gcp-compute-instance` > `LIST` -3. Verify resources are being discovered - -### Validate Regional Coverage - -Review the **Regions** configuration in your source settings and verify discovered resources match your expected regional distribution. - -## Troubleshooting - -### Common Issues - -**"Insufficient Permissions" Error** - -Verify all required roles are assigned at the appropriate level: - -```bash -# For organization-level access -gcloud organizations get-iam-policy YOUR_ORG_ID \ - --flatten="bindings[].members" \ - --filter="bindings.members:serviceAccount:SA_EMAIL" - -# For folder-level access -gcloud resource-manager folders get-iam-policy YOUR_FOLDER_ID \ - --flatten="bindings[].members" \ - --filter="bindings.members:serviceAccount:SA_EMAIL" - -# For project-level access -gcloud projects get-iam-policy YOUR_PROJECT_ID \ - --flatten="bindings[].members" \ - --filter="bindings.members:serviceAccount:SA_EMAIL" -``` - -Re-run the setup script or check for organization-level policies restricting service account access. - -**No Resources Discovered** - -1. Verify regional configuration matches where your resources exist -2. For project-level parents, check that required GCP APIs are enabled: - - ```bash - gcloud services list --enabled --project=YOUR_PROJECT_ID - ``` - -3. For organization or folder-level parents, verify that you have the necessary permissions to list projects and that child projects have the required APIs enabled -4. Some resources may require additional permissions at different levels of the hierarchy - -**Service Account Impersonation Fails** - -1. Verify the impersonation permission is granted: - - ```bash - gcloud iam service-accounts get-iam-policy YOUR_SA_EMAIL --project=YOUR_PROJECT_ID - ``` - - You should see Overmind's service account with `roles/iam.serviceAccountTokenCreator`. - -2. Verify your service account exists and isn't disabled: - - ```bash - gcloud iam service-accounts describe YOUR_SA_EMAIL --project=YOUR_PROJECT_ID - ``` - -3. Ensure the service account email in Overmind matches exactly - -4. Wait for propagation: IAM policy changes can take a few minutes to propagate. Wait 2-5 minutes after granting permissions before testing. - -5. Check organization policies: Some organization policies may restrict service account impersonation. - -**Service Account Not Found** - -1. Verify you copied the correct email from the Overmind application -2. Ensure the email format is correct (ends with `.iam.gserviceaccount.com`) -3. For impersonation: verify your service account was created successfully -4. Contact [Overmind support](https://docs.overmind.tech/misc/support) if issues persist - -**Terraform Apply Failures** - -1. Verify authentication: `gcloud auth application-default print-access-token` -2. Ensure your credentials have necessary IAM permissions -3. For impersonation: ensure you have `iam.serviceAccounts.create` permission - -### Getting Help - -If you continue to experience issues, contact [Overmind support](https://docs.overmind.tech/misc/support) with: - -- Your GCP parent resource (organization/folder/project ID) -- The Overmind service account email -- Your service account email (if using impersonation) -- Whether you're using direct access or impersonation -- The parent level you're configuring (organization, folder, or project) -- Specific error messages and screenshots - -## Security Considerations - -### Principle of Least Privilege - -All roles are read-only and do not allow: - -- Resource modification or deletion -- Data access (beyond metadata) -- Configuration changes -- Administrative operations - -### Monitoring and Auditing - -1. Enable [Cloud Audit Logs](https://cloud.google.com/logging/docs/audit) for your project -2. Monitor service account activity in audit logs -3. Configure alerts for unusual behavior - -**Impersonation Audit Benefits:** -With impersonation, audit logs show both Overmind's identity and your service account identity, providing enhanced traceability. - -### Permission Management - -- **Regular Review**: Periodically review granted permissions -- **Revocation**: Remove access anytime: - - **Direct access**: Remove IAM bindings - - **Impersonation**: Remove `serviceAccountTokenCreator` role or disable/delete your service account - -## Required Permissions - -Overmind requires read-only access to discover and map your GCP infrastructure. The setup scripts provided in the Overmind application automatically grant all necessary permissions. - -### What Gets Configured - -**Essential role for resource discovery:** - -- `roles/browser` - Required for listing projects and navigating the resource hierarchy - -**Read-only viewer roles** for GCP services including: - -- Compute Engine, GKE, Cloud Run, Cloud Functions, Dataflow -- Cloud SQL, BigQuery, Spanner, Cloud Storage -- IAM, networking, monitoring, and logging resources -- And other GCP services - -**A custom role** with additional permissions for: - -- BigQuery data transfer configurations -- Spanner database details - -**Project-level-only roles** (only applied when using `projects/` parent): - -- `roles/iam.roleViewer` - View IAM roles -- `roles/iam.serviceAccountViewer` - View service accounts - -> **Note:** Some GCP IAM roles can only be granted at the project level, not at the organization or folder level. When configuring at the organization or folder level, these project-specific roles are automatically excluded. The custom role and project-level IAM roles are only created and assigned when using a project-level parent (e.g., `projects/my-project`). - -**For impersonation** (if enabled): - -- `roles/iam.serviceAccountTokenCreator` - Allows Overmind to impersonate your service account - -All permissions are read-only and do not allow resource modification, deletion, or access to data beyond metadata. - -The complete list of roles is included in the setup scripts shown in your source details page. These scripts are automatically updated as Overmind adds support for new GCP services and adapt based on whether you're configuring at the organization, folder, or project level. - -## Required GCP Roles Reference - -Here are all the predefined GCP roles that Overmind requires, plus the custom role for additional permissions: - -### Predefined Roles - -| Role | Purpose | -| --------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `roles/browser` | **Required:** List projects and navigate resource hierarchy [GCP Docs](https://cloud.google.com/iam/docs/understanding-roles#browser) | -| `roles/aiplatform.viewer` | AI Platform resource discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/aiplatform#aiplatform.viewer) | -| `roles/artifactregistry.reader` | Artifact Registry repository discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/artifactregistry#artifactregistry.reader) | -| `roles/bigquery.metadataViewer` | BigQuery metadata discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/bigquery#bigquery.metadataViewer) | -| `roles/bigquery.user` | BigQuery data transfer discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/bigquery#bigquery.user) | -| `roles/bigtable.viewer` | Cloud Bigtable resource discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/bigtable#bigtable.viewer) | -| `roles/cloudbuild.builds.viewer` | Cloud Build resource discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/cloudbuild#cloudbuild.builds.viewer) | -| `roles/cloudfunctions.viewer` | Cloud Functions discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/cloudfunctions#cloudfunctions.viewer) | -| `roles/cloudkms.viewer` | Cloud KMS resource discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/cloudkms#cloudkms.viewer) | -| `roles/cloudsql.viewer` | Cloud SQL instance discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/cloudsql#cloudsql.viewer) | -| `roles/compute.viewer` | Compute Engine resource discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/compute#compute.viewer) | -| `roles/container.viewer` | GKE cluster and resource discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/container#container.viewer) | -| `roles/dataform.viewer` | Dataform resource discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/dataform#dataform.viewer) | -| `roles/dataplex.catalogViewer` | Dataplex catalog resource discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/dataplex#dataplex.catalogViewer) | -| `roles/dataplex.viewer` | Dataplex resource discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/dataplex#dataplex.viewer) | -| `roles/dataflow.viewer` | Dataflow job discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/dataflow#dataflow.viewer) | -| `roles/dataproc.viewer` | Dataproc cluster discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/dataproc#dataproc.viewer) | -| `roles/dns.reader` | Cloud DNS resource discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/dns#dns.reader) | -| `roles/essentialcontacts.viewer` | Essential Contacts discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/essentialcontacts#essentialcontacts.viewer) | -| `roles/eventarc.viewer` | Eventarc trigger discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/eventarc#eventarc.viewer) | -| `roles/file.viewer` | Cloud Filestore discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/file#file.viewer) | -| `roles/iam.roleViewer` | **Project-level only:** IAM role discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/iam#iam.roleViewer) | -| `roles/iam.serviceAccountViewer` | **Project-level only:** IAM service account discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/iam#iam.serviceAccountViewer) | -| `roles/logging.viewer` | Cloud Logging resource discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/logging#logging.viewer) | -| `roles/monitoring.viewer` | Cloud Monitoring resource discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/monitoring#monitoring.viewer) | -| `roles/orgpolicy.policyViewer` | Organization Policy discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/orgpolicy#orgpolicy.policyViewer) | -| `roles/pubsub.viewer` | Pub/Sub resource discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/pubsub#pubsub.viewer) | -| `roles/redis.viewer` | Cloud Memorystore Redis discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/redis#redis.viewer) | -| `roles/resourcemanager.tagViewer` | Resource Manager tag discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/resourcemanager#resourcemanager.tagViewer) | -| `roles/run.viewer` | Cloud Run resource discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/run#run.viewer) | -| `roles/secretmanager.viewer` | Secret Manager secret discovery (metadata only) [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/secretmanager#secretmanager.viewer) | -| `roles/securitycentermanagement.viewer` | Security Center management discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/securitycentermanagement#securitycentermanagement.viewer) | -| `roles/servicedirectory.viewer` | Service Directory resource discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/servicedirectory#servicedirectory.viewer) | -| `roles/serviceusage.serviceUsageViewer` | Service Usage discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/serviceusage#serviceusage.serviceUsageViewer) | -| `roles/spanner.viewer` | Cloud Spanner resource discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/spanner#spanner.viewer) | -| `roles/storage.bucketViewer` | Cloud Storage bucket discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/storage#storage.bucketViewer) | -| `roles/storagetransfer.viewer` | Storage Transfer Service discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/storagetransfer#storagetransfer.viewer) | - -### Custom Role - -| Role | Purpose | -| ------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `projects/{PROJECT_ID}/roles/overmindCustomRole` | Custom role for additional BigQuery and Spanner permissions **Permissions:** `bigquery.transfers.get` - BigQuery transfer configuration discovery, `spanner.databases.get` - Spanner database detail discovery, `spanner.databases.list` - Spanner database enumeration | - -All predefined roles provide read-only access and are sourced from Google Cloud's [predefined roles documentation](https://cloud.google.com/iam/docs/understanding-roles#predefined). - -**Project-Level Restrictions:** Some roles (`roles/iam.roleViewer` and `roles/iam.serviceAccountViewer`) can only be granted at the project level in GCP. When configuring at the organization or folder level, these roles are automatically excluded. The custom role is also only created and assigned when using a project-level parent (e.g., `projects/my-project`). diff --git a/docs.overmind.tech/docs/sources/k8s/Types/ClusterRole.md b/docs.overmind.tech/docs/sources/k8s/Types/ClusterRole.md deleted file mode 100644 index cccc0bbc..00000000 --- a/docs.overmind.tech/docs/sources/k8s/Types/ClusterRole.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Cluster Role -sidebar_label: ClusterRole ---- - -A ClusterRole is a non-namespaced Kubernetes RBAC resource that groups together one or more policy rules, defining which verbs (such as `get`, `list`, `create`, `delete`) are allowed on which resources across the entire cluster. Because it is cluster-scoped, the permissions it grants apply to all namespaces. It can be referenced by a `RoleBinding` (to limit its scope to a single namespace) or by a `ClusterRoleBinding` (to apply it cluster-wide) and is commonly used to grant system-level or cross-namespace permissions to users, service accounts or other principals. -For full details, see the official Kubernetes documentation: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#clusterrole - -**Terrafrom Mappings:** - -- `kubernetes_cluster_role_v1.metadata[0].name` - -## Supported Methods - -- `GET`: Get a Cluster Role by name -- `LIST`: List all Cluster Roles -- `SEARCH`: Search for a Cluster Role using the ListOptions JSON format e.g. `{"labelSelector": "app=wordpress"}` diff --git a/docs.overmind.tech/docs/sources/k8s/Types/ClusterRoleBinding.md b/docs.overmind.tech/docs/sources/k8s/Types/ClusterRoleBinding.md deleted file mode 100644 index 946b9239..00000000 --- a/docs.overmind.tech/docs/sources/k8s/Types/ClusterRoleBinding.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: Cluster Role Binding -sidebar_label: ClusterRoleBinding ---- - -A ClusterRoleBinding grants the permissions defined in a `ClusterRole` to one or more subjects (users, groups, or ServiceAccounts) across the entire Kubernetes cluster. Whereas a `RoleBinding` is namespace-scoped, a ClusterRoleBinding has cluster-wide effect, making it a critical component of RBAC configuration. -For further details, see the Kubernetes documentation: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#rolebinding-and-clusterrolebinding - -**Terrafrom Mappings:** - -- `kubernetes_cluster_role_binding_v1.metadata[0].name` -- `kubernetes_cluster_role_binding.metadata[0].name` - -## Supported Methods - -- `GET`: Get a Cluster Role Binding by name -- `LIST`: List all Cluster Role Bindings -- `SEARCH`: Search for a Cluster Role Binding using the ListOptions JSON format e.g. `{"labelSelector": "app=wordpress"}` - -## Possible Links - -### [`ClusterRole`](/sources/k8s/Types/ClusterRole) - -The ClusterRoleBinding’s `roleRef` field points to the name of a `ClusterRole`. Overmind represents this relationship so you can trace which set of permissions (rules) is being granted cluster-wide. - -### [`ServiceAccount`](/sources/k8s/Types/ServiceAccount) - -If a ClusterRoleBinding contains one or more ServiceAccounts in its `subjects` array, Overmind links the binding to those ServiceAccounts, allowing you to see exactly which workload identities receive the referenced cluster-level permissions. diff --git a/docs.overmind.tech/docs/sources/k8s/Types/ConfigMap.md b/docs.overmind.tech/docs/sources/k8s/Types/ConfigMap.md deleted file mode 100644 index 5dd4e332..00000000 --- a/docs.overmind.tech/docs/sources/k8s/Types/ConfigMap.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Config Map -sidebar_label: ConfigMap ---- - -A ConfigMap is a Kubernetes API object used to store non-confidential configuration data in key-value pairs. It allows you to decouple environment-specific configuration from your container images so that the same image can be reused in different environments with different settings. Pods and other Kubernetes workloads can consume the data held in a ConfigMap as environment variables, command-line arguments or configuration files mounted into a volume. For an in-depth overview, see the official documentation: https://kubernetes.io/docs/concepts/configuration/configmap/ - -**Terrafrom Mappings:** - -- `kubernetes_config_map_v1.metadata[0].name` -- `kubernetes_config_map.metadata[0].name` - -## Supported Methods - -- `GET`: Get a Config Map by name -- `LIST`: List all Config Maps -- `SEARCH`: Search for a Config Map using the ListOptions JSON format e.g. `{"labelSelector": "app=wordpress"}` diff --git a/docs.overmind.tech/docs/sources/k8s/Types/CronJob.md b/docs.overmind.tech/docs/sources/k8s/Types/CronJob.md deleted file mode 100644 index 7dc0e2d2..00000000 --- a/docs.overmind.tech/docs/sources/k8s/Types/CronJob.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Cron Job -sidebar_label: CronJob ---- - -A Kubernetes **CronJob** is a higher-level controller responsible for running a Job object on a repeating schedule, expressed in standard _cron_ syntax. It is typically used for routine, time-based tasks such as database backups, report generation, and regular housekeeping activities inside a cluster. The controller automatically creates the underlying Job at the scheduled time, monitors its execution and, depending on the configuration, retains or cleans up finished Jobs and their Pods. For a full description of the resource’s behaviour and available fields, see the official Kubernetes documentation: https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/ - -**Terrafrom Mappings:** - -- `kubernetes_cron_job_v1.metadata[0].name` -- `kubernetes_cron_job.metadata[0].name` - -## Supported Methods - -- `GET`: Get a Cron Job by name -- `LIST`: List all Cron Jobs -- `SEARCH`: Search for a Cron Job using the ListOptions JSON format e.g. `{"labelSelector": "app=wordpress"}` diff --git a/docs.overmind.tech/docs/sources/k8s/Types/DaemonSet.md b/docs.overmind.tech/docs/sources/k8s/Types/DaemonSet.md deleted file mode 100644 index b04661a4..00000000 --- a/docs.overmind.tech/docs/sources/k8s/Types/DaemonSet.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Daemon Set -sidebar_label: DaemonSet ---- - -A Kubernetes **DaemonSet** ensures that a copy of a specified Pod is running on every (or a selected subset of) node(s) in the cluster. It is commonly used for cluster-wide services such as log collectors, monitoring agents, or network proxies that must be present on each node. When nodes are added to the cluster, the DaemonSet automatically schedules the Pod on the new nodes; when nodes are removed, the Pods are garbage-collected. -For a full description, see the official Kubernetes documentation: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ - -**Terrafrom Mappings:** - -- `kubernetes_daemon_set_v1.metadata[0].name` -- `kubernetes_daemonset.metadata[0].name` - -## Supported Methods - -- `GET`: Get a Daemon Set by name -- `LIST`: List all Daemon Sets -- `SEARCH`: Search for a Daemon Set using the ListOptions JSON format e.g. `{"labelSelector": "app=wordpress"}` diff --git a/docs.overmind.tech/docs/sources/k8s/Types/Deployment.md b/docs.overmind.tech/docs/sources/k8s/Types/Deployment.md deleted file mode 100644 index a11e2ab6..00000000 --- a/docs.overmind.tech/docs/sources/k8s/Types/Deployment.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Deployment -sidebar_label: Deployment ---- - -A Deployment is a higher-level Kubernetes workload resource that declaratively manages a set of identical Pods by creating and maintaining the appropriate number of ReplicaSets. With a Deployment you describe the desired state—such as how many replicas should be running or which Pod template to use—and the Kubernetes control plane continually works to bring the actual state in line with that specification. Deployments support rolling updates, rollbacks, and pausing/resuming of updates, making them the most common mechanism for managing stateless applications on Kubernetes clusters. -For the complete specification see the official Kubernetes documentation: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/ - -**Terrafrom Mappings:** - -- `kubernetes_deployment_v1.metadata[0].name` -- `kubernetes_deployment.metadata[0].name` - -## Supported Methods - -- `GET`: Get a Deployment by name -- `LIST`: List all Deployments -- `SEARCH`: Search for a Deployment using the ListOptions JSON format e.g. `{"labelSelector": "app=wordpress"}` - -## Possible Links - -### [`ReplicaSet`](/sources/k8s/Types/ReplicaSet) - -Each Deployment automatically creates and owns one or more ReplicaSets. The ReplicaSet is responsible for keeping the specified number of Pod replicas running, while the Deployment supervises the ReplicaSets, deciding when to create new ones or scale them to facilitate updates or rollbacks. diff --git a/docs.overmind.tech/docs/sources/k8s/Types/EndpointSlice.md b/docs.overmind.tech/docs/sources/k8s/Types/EndpointSlice.md deleted file mode 100644 index 9c9338aa..00000000 --- a/docs.overmind.tech/docs/sources/k8s/Types/EndpointSlice.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Endpoint Slice -sidebar_label: EndpointSlice ---- - -EndpointSlices provide a scalable and extensible way of tracking network endpoints that back a Kubernetes Service. Each slice contains a list of IP addresses and ports together with optional topology information such as the Node on which each endpoint is running. EndpointSlices replace the legacy Endpoints object for large clusters and are automatically created and managed by the control plane when a Service is defined. -For full details see the official Kubernetes documentation: https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/ - -**Terrafrom Mappings:** - -- `kubernetes_endpoints_slice_v1.metadata[0].name` -- `kubernetes_endpoints_slice.metadata[0].name` - -## Supported Methods - -- `GET`: Get a EndpointSlice by name -- `LIST`: List all EndpointSlices -- `SEARCH`: Search for a EndpointSlice using the ListOptions JSON format e.g. `{"labelSelector": "app=wordpress"}` - -## Possible Links - -### [`Node`](/sources/k8s/Types/Node) - -Each endpoint within an EndpointSlice may include a `nodeName` or topology label indicating the Node that hosts the backing Pod. Overmind links the slice to those Nodes so you can see which machines will receive traffic for the Service. - -### [`Pod`](/sources/k8s/Types/Pod) - -Endpoints usually correspond to Pod IPs. By linking EndpointSlices to the underlying Pods, Overmind allows you to trace from a Service to the exact workloads that will handle requests. - -### [`dns`](/sources/stdlib/Types/dns) - -When Kubernetes populates cluster DNS (e.g. `my-service.my-namespace.svc.cluster.local`) it ultimately resolves to the addresses listed in the Service’s EndpointSlices. Linking to DNS records shows how a name queried by applications maps to concrete endpoints. - -### [`ip`](/sources/aws/Types/networkmanager-network-resource-relationship) - -EndpointSlices store one or more IPv4/IPv6 addresses for each endpoint. These addresses are linked so that you can follow a path from a Service to the raw IPs that will be contacted, helping to assess network-level reachability and risk. - -### [`Service`](/sources/k8s/Types/Service) - -Every EndpointSlice carries a `kubernetes.io/service-name` label identifying the Service it belongs to. Overmind reads this label and links the EndpointSlice back to its parent Service, completing the bidirectional relationship in the infrastructure graph. diff --git a/docs.overmind.tech/docs/sources/k8s/Types/Endpoints.md b/docs.overmind.tech/docs/sources/k8s/Types/Endpoints.md deleted file mode 100644 index 34e90580..00000000 --- a/docs.overmind.tech/docs/sources/k8s/Types/Endpoints.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Endpoints -sidebar_label: Endpoints ---- - -An Endpoint in Kubernetes represents the network locations (IP address + port) that actually implement a Service. While a Service is an abstract front-end, the corresponding Endpoints object keeps the ever-changing list of Pods that are ready to receive traffic. See the official Kubernetes documentation for full details: https://kubernetes.io/docs/concepts/services-networking/service/#endpoints - -**Terrafrom Mappings:** - -- `kubernetes_endpoints.metadata[0].name` -- `kubernetes_endpoints_v1.metadata[0].name` - -## Supported Methods - -- `GET`: Get a Endpoints by name -- `LIST`: List all Endpointss -- `SEARCH`: Search for a Endpoints using the ListOptions JSON format e.g. `{"labelSelector": "app=wordpress"}` - -## Possible Links - -### [`Node`](/sources/k8s/Types/Node) - -Each endpoint address can include a `nodeName` field indicating the Node on which the backing Pod is running. Overmind therefore links the Endpoints object to the Node(s) that currently host its backing Pods, helping you understand on which worker machines traffic will land. - -### [`ip`](/sources/aws/Types/networkmanager-network-resource-relationship) - -Every endpoint entry exposes an IP address. Overmind extracts these IPs and links them, allowing you to trace the path from the abstract Service through the Endpoint to the concrete network address that will receive traffic. - -### [`Pod`](/sources/k8s/Types/Pod) - -Endpoint addresses typically contain a `targetRef` that points to the Pod providing the Service. Overmind links the Endpoints object to these Pods so you can quickly inspect the health, labels, and configuration of the workloads currently registered behind the Service. diff --git a/docs.overmind.tech/docs/sources/k8s/Types/HorizontalPodAutoscaler.md b/docs.overmind.tech/docs/sources/k8s/Types/HorizontalPodAutoscaler.md deleted file mode 100644 index 393d4521..00000000 --- a/docs.overmind.tech/docs/sources/k8s/Types/HorizontalPodAutoscaler.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Horizontal Pod Autoscaler -sidebar_label: HorizontalPodAutoscaler ---- - -The Horizontal Pod Autoscaler (HPA) is a native Kubernetes controller that automatically increases or decreases the number of running Pods in a Deployment, ReplicaSet, StatefulSet, or other scalable resource so that observed resource consumption stays close to a user-defined target. It polls the Kubernetes Metrics Server (or a custom/external metrics API) at a regular interval, compares CPU, memory, or arbitrary custom metrics against the specified thresholds, and then adjusts the `spec.replicas` field of the target workload accordingly. This enables applications to meet fluctuating demand without manual intervention or unnecessary over-provisioning, while still preventing sudden traffic spikes from overwhelming the cluster. You can read the full upstream specification in the official Kubernetes documentation: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/. - -**Terrafrom Mappings:** - -- `kubernetes_horizontal_pod_autoscaler_v2.metadata[0].name` - -## Supported Methods - -- `GET`: Get a Horizontal Pod Autoscaler by name -- `LIST`: List all Horizontal Pod Autoscalers -- `SEARCH`: Search for a Horizontal Pod Autoscaler using the ListOptions JSON format e.g. `{"labelSelector": "app=wordpress"}` diff --git a/docs.overmind.tech/docs/sources/k8s/Types/Ingress.md b/docs.overmind.tech/docs/sources/k8s/Types/Ingress.md deleted file mode 100644 index 892c1746..00000000 --- a/docs.overmind.tech/docs/sources/k8s/Types/Ingress.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: Ingress -sidebar_label: Ingress ---- - -An Ingress is a Kubernetes resource that manages external access to services within a cluster, typically HTTP and HTTPS traffic. It defines a set of routing rules that map incoming requests (based on hostnames and URL paths) to backend `Service` resources. By centralising traffic management, it allows fine-grained control over features such as virtual hosting, TLS termination and path-based routing without requiring each service to expose its own `Service` of type `LoadBalancer` or `NodePort`. -Official documentation: https://kubernetes.io/docs/concepts/services-networking/ingress/ - -**Terrafrom Mappings:** - -- `kubernetes_ingress_v1.metadata[0].name` - -## Supported Methods - -- `GET`: Get an Ingress by name -- `LIST`: List all Ingresses -- `SEARCH`: Search for an Ingress using the `ListOptions` JSON format, e.g. `{"labelSelector": "app=wordpress"}` - -## Possible Links - -### [`Service`](/sources/k8s/Types/Service) - -An Ingress routes external traffic to one or more backend `Service` objects. Each rule in the Ingress specification references a service name and port; therefore, Overmind links an Ingress to the `Service`(s) it targets so that you can trace how requests reach your application. - -### [`dns`](/sources/stdlib/Types/dns) - -The hostnames declared in an Ingress must be resolvable via DNS so that clients can reach the cluster’s ingress point. Overmind links these hostnames to their corresponding DNS records (A, AAAA or CNAME) to show whether the necessary records exist and to surface any misconfigurations. diff --git a/docs.overmind.tech/docs/sources/k8s/Types/Job.md b/docs.overmind.tech/docs/sources/k8s/Types/Job.md deleted file mode 100644 index 6917f44b..00000000 --- a/docs.overmind.tech/docs/sources/k8s/Types/Job.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: Job -sidebar_label: Job ---- - -A Kubernetes Job is a controller that runs one-off or batch tasks to completion. It creates one or more Pods and tracks their execution until the specified number have finished successfully. Jobs are ideal for database migrations, data processing, or any workload that needs to run to completion rather than persist indefinitely. A Job retries failed Pods according to its back-off policy and is marked as complete once all Pods exit successfully. For more details, see the official Kubernetes documentation: https://kubernetes.io/docs/concepts/workloads/controllers/job/ - -**Terrafrom Mappings:** - -- `kubernetes_job.metadata[0].name` -- `kubernetes_job_v1.metadata[0].name` - -## Supported Methods - -- `GET`: Get a Job by name -- `LIST`: List all Jobs -- `SEARCH`: Search for a Job using the ListOptions JSON format e.g. `{"labelSelector": "app=wordpress"}` - -## Possible Links - -### [`Pod`](/sources/k8s/Types/Pod) - -A Job spawns one or more Pods to run its workload; each Pod created by the Job is linked back to it via the Job’s `ownerReferences` metadata. diff --git a/docs.overmind.tech/docs/sources/k8s/Types/LimitRange.md b/docs.overmind.tech/docs/sources/k8s/Types/LimitRange.md deleted file mode 100644 index 0569cf33..00000000 --- a/docs.overmind.tech/docs/sources/k8s/Types/LimitRange.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Limit Range -sidebar_label: LimitRange ---- - -A Kubernetes LimitRange is a namespace-level policy object that defines default, minimum, and maximum compute-resource constraints (such as CPU, memory, and ephemeral storage) that apply to Pods or individual Containers created in that namespace. By enforcing these boundaries, a LimitRange prevents a single workload from monopolising cluster resources and ensures that every workload has sensible defaults if the user omits explicit resource requests or limits. See the official Kubernetes documentation for full details: https://kubernetes.io/docs/concepts/policy/limit-range/ - -**Terrafrom Mappings:** - -- `kubernetes_limit_range_v1.metadata[0].name` -- `kubernetes_limit_range.metadata[0].name` - -## Supported Methods - -- `GET`: Get a Limit Range by name -- `LIST`: List all Limit Ranges -- `SEARCH`: Search for a Limit Range using the ListOptions JSON format e.g. `{"labelSelector": "app=wordpress"}` diff --git a/docs.overmind.tech/docs/sources/k8s/Types/NetworkPolicy.md b/docs.overmind.tech/docs/sources/k8s/Types/NetworkPolicy.md deleted file mode 100644 index d8270b02..00000000 --- a/docs.overmind.tech/docs/sources/k8s/Types/NetworkPolicy.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Network Policy -sidebar_label: NetworkPolicy ---- - -A Kubernetes **NetworkPolicy** is a namespaced resource that controls how groups of Pods are allowed to communicate with each other and with other network endpoints. By defining ingress and/or egress rules that match Pods via label selectors, it provides fine-grained, declarative network segmentation inside the cluster, helping operators restrict unintended traffic and harden workloads. If no NetworkPolicy targets a Pod, that Pod is non-isolated and can both send and receive traffic to and from any source. -Official documentation: https://kubernetes.io/docs/concepts/services-networking/network-policies/ - -**Terrafrom Mappings:** - -- `kubernetes_network_policy.metadata[0].name` -- `kubernetes_network_policy_v1.metadata[0].name` - -## Supported Methods - -- ~~`GET`~~ -- ~~`LIST`~~ -- ~~`SEARCH`~~ - -## Possible Links - -### [`Pod`](/sources/k8s/Types/Pod) - -A NetworkPolicy selects one or more Pods (in the same namespace) through `podSelector` rules; therefore, each referenced Pod can be linked to the NetworkPolicy that governs its allowed ingress and egress traffic. diff --git a/docs.overmind.tech/docs/sources/k8s/Types/Node.md b/docs.overmind.tech/docs/sources/k8s/Types/Node.md deleted file mode 100644 index 0e85a370..00000000 --- a/docs.overmind.tech/docs/sources/k8s/Types/Node.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: Node -sidebar_label: Node ---- - -A Kubernetes **Node** is a worker machine (virtual or physical) that runs the Pods making up a cluster’s workloads. Each Node contains the services necessary to run containers, including the container runtime, kubelet and kube-proxy, and is managed by the Kubernetes control plane. For more details see the official Kubernetes documentation: https://kubernetes.io/docs/concepts/architecture/nodes/ - -**Terrafrom Mappings:** - -- `kubernetes_node_taint.metadata[0].name` - -## Supported Methods - -- `GET`: Get a Node by name -- `LIST`: List all Nodes -- `SEARCH`: Search for a Node using the ListOptions JSON format e.g. `{"labelSelector": "app=wordpress"}` - -## Possible Links - -### [`dns`](/sources/stdlib/Types/dns) - -A Node is discoverable in the cluster via its DNS entry. Overmind links the Node to its corresponding DNS record(s) so you can trace how applications or services resolve to this worker machine. - -### [`ip`](/sources/aws/Types/networkmanager-network-resource-relationship) - -Every Node advertises one or more internal and external IP addresses. Overmind establishes a link between the Node resource and these IP objects to surface network reachability or exposure risks. - -### [`ec2-volume`](/sources/aws/Types/ec2-volume) - -When Kubernetes is running on AWS, Nodes (EC2 instances) may have EBS volumes attached to provide persistent storage for Pods. Overmind links the Node to the `ec2-volume` resources it mounts, allowing you to evaluate storage-related blast radius or compliance concerns. diff --git a/docs.overmind.tech/docs/sources/k8s/Types/PersistentVolume.md b/docs.overmind.tech/docs/sources/k8s/Types/PersistentVolume.md deleted file mode 100644 index e79d6569..00000000 --- a/docs.overmind.tech/docs/sources/k8s/Types/PersistentVolume.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: Persistent Volume -sidebar_label: PersistentVolume ---- - -A Kubernetes PersistentVolume (PV) is a cluster-wide object that represents a piece of storage that has been provisioned either statically by an administrator or dynamically via a StorageClass. Unlike ephemeral volumes that are tied to the lifetime of a Pod, a PV exists independently and can outlive any consumer Pods, enabling stateful workloads to retain data across rescheduling or restarts. Each PV encapsulates details such as capacity, access modes, reclaim policy and the specifics of the underlying storage medium (for example, AWS EBS, NFS, or a CSI-provisioned backend). -Official documentation: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ - -**Terrafrom Mappings:** - -- `kubernetes_persistent_volume.metadata[0].name` -- `kubernetes_persistent_volume_v1.metadata[0].name` - -## Supported Methods - -- `GET`: Get a PersistentVolume by name -- `LIST`: List all PersistentVolumes -- `SEARCH`: Search for a PersistentVolume using the ListOptions JSON format e.g. `{"labelSelector": "app=wordpress"}` - -## Possible Links - -### [`ec2-volume`](/sources/aws/Types/ec2-volume) - -A PersistentVolume whose `spec.awsElasticBlockStore` (or CSI driver) references an AWS EBS disk ultimately maps to an EC2 volume. Overmind links the PV to the underlying `ec2-volume` so you can assess risks such as deletion protection, encryption status or capacity limits of the actual block device. - -### [`efs-access-point`](/sources/aws/Types/efs-access-point) - -When a PV is backed by Amazon EFS via the EFS CSI driver, it mounts the file system through a specific EFS Access Point. Linking the PV to the corresponding `efs-access-point` lets you trace permissions, throughput and network configurations that could affect the workload’s storage availability. - -### [`StorageClass`](/sources/k8s/Types/StorageClass) - -Most dynamically provisioned PVs include a `storageClassName` field that references the StorageClass used to create them. By linking to the `StorageClass`, Overmind shows the provisioning parameters, reclaim policy and allowed topologies that govern how this PV was created and how it behaves when released. diff --git a/docs.overmind.tech/docs/sources/k8s/Types/PersistentVolumeClaim.md b/docs.overmind.tech/docs/sources/k8s/Types/PersistentVolumeClaim.md deleted file mode 100644 index c155e143..00000000 --- a/docs.overmind.tech/docs/sources/k8s/Types/PersistentVolumeClaim.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Persistent Volume Claim -sidebar_label: PersistentVolumeClaim ---- - -A PersistentVolumeClaim (PVC) in Kubernetes is a user-defined request for storage. Applications declare the amount of space, access mode and other requirements they need through a PVC, and Kubernetes finds (or waits for) a matching PersistentVolume (PV) to satisfy that request. Once bound, the PVC provides a stable, pod-agnostic handle for the underlying storage, meaning workloads can be rescheduled across nodes without losing data. -For a full explanation see the Kubernetes documentation: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims - -**Terrafrom Mappings:** - -- `kubernetes_persistent_volume_claim.metadata[0].name` -- `kubernetes_persistent_volume_claim_v1.metadata[0].name` - -## Supported Methods - -- `GET`: Get a PersistentVolumeClaim by name -- `LIST`: List all PersistentVolumeClaims -- `SEARCH`: Search for a PersistentVolumeClaim using the ListOptions JSON format e.g. `{"labelSelector": "app=wordpress"}` - -## Possible Links - -### [`PersistentVolume`](/sources/k8s/Types/PersistentVolume) - -A PVC is bound to a PersistentVolume that satisfies its storage class, capacity and access-mode requirements. Overmind records this binding so that from a PVC you can quickly navigate to the backing PV and assess its characteristics and any associated risks. diff --git a/docs.overmind.tech/docs/sources/k8s/Types/Pod.md b/docs.overmind.tech/docs/sources/k8s/Types/Pod.md deleted file mode 100644 index f0945bf7..00000000 --- a/docs.overmind.tech/docs/sources/k8s/Types/Pod.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: Pod -sidebar_label: Pod ---- - -A Kubernetes Pod is the smallest deployable unit in the Kubernetes object model. It represents one or more containers that share storage, network and a specification for how to run the containers. Pods are ephemeral and are usually created and managed by higher-level controllers such as Deployments or StatefulSets. See the official Kubernetes documentation for full details: https://kubernetes.io/docs/concepts/workloads/pods/ - -**Terrafrom Mappings:** - -- `kubernetes_pod.metadata[0].name` -- `kubernetes_pod_v1.metadata[0].name` - -## Supported Methods - -- `GET`: Get a Pod by name -- `LIST`: List all Pods -- `SEARCH`: Search for a Pod using the ListOptions JSON format e.g. `{"labelSelector": "app=wordpress"}` - -## Possible Links - -### [`ConfigMap`](/sources/k8s/Types/ConfigMap) - -Pods can consume ConfigMaps as environment variables or mount them as files, allowing configuration data to be injected without rebuilding container images. - -### [`ec2-volume`](/sources/aws/Types/ec2-volume) - -When a Pod mounts a PersistentVolume backed by an AWS Elastic Block Store (EBS) volume, that underlying storage appears here as an `ec2-volume` link, connecting the workload to the physical disk resource in AWS. - -### [`dns`](/sources/stdlib/Types/dns) - -Each Pod receives an internal DNS entry (`..pod.cluster.local`) and may resolve or be resolved by other services; Overmind records this relationship so you can trace DNS dependencies. - -### [`ip`](/sources/aws/Types/networkmanager-network-resource-relationship) - -At runtime every Pod is assigned an IP address. This link surfaces the relationship between the Kubernetes object and the network IP resource managed by the underlying cloud networking layer. - -### [`PersistentVolumeClaim`](/sources/k8s/Types/PersistentVolumeClaim) - -Pods declare one or more PersistentVolumeClaims in their `volumes` section to obtain persistent storage. The link shows which claims are attached to the Pod. - -### [`PriorityClass`](/sources/k8s/Types/PriorityClass) - -A Pod may specify a `priorityClassName`; the associated PriorityClass influences scheduling order and pre-emption behaviour. This link ties the Pod to its scheduling priority. - -### [`Secret`](/sources/k8s/Types/Secret) - -Secrets can be mounted as files or injected as environment variables into a Pod, for example to provide credentials or TLS keys. This link identifies every Secret the Pod references. - -### [`ServiceAccount`](/sources/k8s/Types/ServiceAccount) - -Each Pod runs under a ServiceAccount that defines its Kubernetes API permissions and, in many cases, its cloud IAM identity. The link shows the ServiceAccount used by the Pod. diff --git a/docs.overmind.tech/docs/sources/k8s/Types/PodDisruptionBudget.md b/docs.overmind.tech/docs/sources/k8s/Types/PodDisruptionBudget.md deleted file mode 100644 index 350f3102..00000000 --- a/docs.overmind.tech/docs/sources/k8s/Types/PodDisruptionBudget.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: Pod Disruption Budget -sidebar_label: PodDisruptionBudget ---- - -A PodDisruptionBudget (PDB) is a Kubernetes policy object that limits the number of pods of a replicated application that can be unavailable during voluntary disruptions such as a node drain, cluster upgrade, or a user-initiated rolling update. By defining either a `minAvailable` or `maxUnavailable` threshold, it helps you maintain a desired level of service availability while still allowing the platform to carry out maintenance tasks. -See the official documentation for full details: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ - -**Terrafrom Mappings:** - -- `kubernetes_pod_disruption_budget_v1.metadata[0].name` - -## Supported Methods - -- `GET`: Get a PodDisruptionBudget by name -- `LIST`: List all PodDisruptionBudgets -- `SEARCH`: Search for a PodDisruptionBudget using the ListOptions JSON format e.g. `{"labelSelector": "app=wordpress"}` - -## Possible Links - -### [`Pod`](/sources/k8s/Types/Pod) - -A PodDisruptionBudget references pods via a label selector defined in `spec.selector`. Any pod whose labels match this selector is governed by the PDB, meaning it counts towards the availability calculations and is protected from eviction when the defined disruption limits would be exceeded. diff --git a/docs.overmind.tech/docs/sources/k8s/Types/PriorityClass.md b/docs.overmind.tech/docs/sources/k8s/Types/PriorityClass.md deleted file mode 100644 index 51c473e4..00000000 --- a/docs.overmind.tech/docs/sources/k8s/Types/PriorityClass.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Priority Class -sidebar_label: PriorityClass ---- - -A Kubernetes `PriorityClass` is a cluster-wide, non-namespaced resource that defines the relative importance of Pods. Each PriorityClass carries an integer value; the higher the value, the earlier the scheduler will try to place Pods that reference it. PriorityClasses are also used during pre-emption: when the cluster is under resource pressure, Pods with lower priority may be evicted in favour of higher-priority Pods. For full details, refer to the official Kubernetes documentation: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass - -**Terrafrom Mappings:** - -- `kubernetes_priority_class_v1.metadata[0].name` -- `kubernetes_priority_class.metadata[0].name` - -## Supported Methods - -- `GET`: Get a Priority Class by name -- `LIST`: List all Priority Classs -- `SEARCH`: Search for a Priority Class using the ListOptions JSON format e.g. `{"labelSelector": "app=wordpress"}` diff --git a/docs.overmind.tech/docs/sources/k8s/Types/ReplicaSet.md b/docs.overmind.tech/docs/sources/k8s/Types/ReplicaSet.md deleted file mode 100644 index d5723a62..00000000 --- a/docs.overmind.tech/docs/sources/k8s/Types/ReplicaSet.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: Replica Set -sidebar_label: ReplicaSet ---- - -A ReplicaSet is a Kubernetes controller whose purpose is to maintain a stable set of identical Pods running at any given time. By continuously watching the cluster state, it ensures that the desired number of Pod replicas are present: if one is deleted or becomes unhealthy, the ReplicaSet will automatically create a replacement. ReplicaSets are most commonly created implicitly by Deployments, but they can also be defined directly. -For full details, see the official Kubernetes documentation: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/ - -## Supported Methods - -- `GET`: Get a ReplicaSet by name -- `LIST`: List all ReplicaSets -- `SEARCH`: Search for a ReplicaSet using the ListOptions JSON format e.g. `{"labelSelector": "app=wordpress"}` - -## Possible Links - -### [`Pod`](/sources/k8s/Types/Pod) - -A ReplicaSet owns and manages a collection of Pods that match its selector. Each linked Pod represents one replica maintained by the ReplicaSet; scaling or health-checking operations performed by the ReplicaSet directly affect these Pods. diff --git a/docs.overmind.tech/docs/sources/k8s/Types/ReplicationController.md b/docs.overmind.tech/docs/sources/k8s/Types/ReplicationController.md deleted file mode 100644 index 3780e9de..00000000 --- a/docs.overmind.tech/docs/sources/k8s/Types/ReplicationController.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: Replication Controller -sidebar_label: ReplicationController ---- - -A ReplicationController is a legacy Kubernetes workload controller whose job is to ensure that a specified number of pod replicas are running at any one time. If a pod crashes or is deleted, the ReplicationController creates a replacement; if too many exist, it deletes the excess. Although superseded by ReplicaSets and Deployments, ReplicationControllers are still respected by the Kubernetes API and may be encountered in older manifests. Further information can be found in the official Kubernetes documentation: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/ - -**Terrafrom Mappings:** - -- `kubernetes_replication_controller.metadata[0].name` -- `kubernetes_replication_controller_v1.metadata[0].name` - -## Supported Methods - -- `GET`: Get a ReplicationController by name -- `LIST`: List all ReplicationControllers -- `SEARCH`: Search for a ReplicationController using the ListOptions JSON format e.g. `{"labelSelector": "app=wordpress"}` - -## Possible Links - -### [`Pod`](/sources/k8s/Types/Pod) - -A ReplicationController manages the lifecycle of a homogeneous set of Pods defined by its `spec.template`. Overmind links a ReplicationController to each Pod it owns via the `ownerReference`, enabling you to trace from controller to running workload (and vice-versa) when assessing deployment risk. diff --git a/docs.overmind.tech/docs/sources/k8s/Types/ResourceQuota.md b/docs.overmind.tech/docs/sources/k8s/Types/ResourceQuota.md deleted file mode 100644 index 5388ec70..00000000 --- a/docs.overmind.tech/docs/sources/k8s/Types/ResourceQuota.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Resource Quota -sidebar_label: ResourceQuota ---- - -A Kubernetes **ResourceQuota** object allows cluster administrators to limit the aggregate consumption of compute resources (such as CPU and memory), storage, and object counts (Pods, Services, PersistentVolumeClaims, etc.) within a namespace. By defining upper bounds, a ResourceQuota helps prevent any single team or workload from exhausting shared cluster capacity, and encourages fair usage across tenants. When a namespace has one or more quotas in place, resources are checked at creation or update time; if the requested amount would exceed the quota the operation is rejected. -Official documentation: https://kubernetes.io/docs/concepts/policy/resource-quotas/ - -**Terrafrom Mappings:** - -- `kubernetes_resource_quota_v1.metadata[0].name` -- `kubernetes_resource_quota.metadata[0].name` - -## Supported Methods - -- `GET`: Get a Resource Quota by name -- `LIST`: List all Resource Quotas -- `SEARCH`: Search for a Resource Quota using the ListOptions JSON format e.g. `{"labelSelector": "app=wordpress"}` diff --git a/docs.overmind.tech/docs/sources/k8s/Types/Role.md b/docs.overmind.tech/docs/sources/k8s/Types/Role.md deleted file mode 100644 index 38f16e56..00000000 --- a/docs.overmind.tech/docs/sources/k8s/Types/Role.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Role -sidebar_label: Role ---- - -A Kubernetes Role is an RBAC (Role-Based Access Control) resource that defines a set of permissions, expressed as rules, that apply within a single namespace. By binding a Role to a Subject (user, group, or service account) you control which verbs (get, list, create, delete, etc.) can be performed on which API resources inside that namespace. Roles are therefore central to enforcing the principle of least privilege in cluster security. -See the official Kubernetes documentation for full details: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole - -**Terrafrom Mappings:** - -- `kubernetes_role_v1.metadata[0].name` -- `kubernetes_role.metadata[0].name` - -## Supported Methods - -- `GET`: Get a Role by name -- `LIST`: List all Roles -- `SEARCH`: Search for a Role using the ListOptions JSON format e.g. `{"labelSelector": "app=wordpress"}` diff --git a/docs.overmind.tech/docs/sources/k8s/Types/RoleBinding.md b/docs.overmind.tech/docs/sources/k8s/Types/RoleBinding.md deleted file mode 100644 index 428b1dcf..00000000 --- a/docs.overmind.tech/docs/sources/k8s/Types/RoleBinding.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Role Binding -sidebar_label: RoleBinding ---- - -A Kubernetes **RoleBinding** grants the permissions defined in a Role (or ClusterRole) to a set of subjects—users, groups or service accounts—within a single namespace. It is a cornerstone object in Kubernetes RBAC, controlling who can perform which actions on namespaced resources. See the official Kubernetes documentation for full details: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#rolebinding-and-clusterrolebinding - -**Terrafrom Mappings:** - -- `kubernetes_role_binding.metadata[0].name` -- `kubernetes_role_binding_v1.metadata[0].name` - -## Supported Methods - -- `GET`: Get a RoleBinding by name -- `LIST`: List all RoleBindings -- `SEARCH`: Search for a RoleBinding using the ListOptions JSON format e.g. `{"labelSelector": "app=wordpress"}` - -## Possible Links - -### [`Role`](/sources/k8s/Types/Role) - -The RoleBinding points to a Role via the `roleRef` field. This link lets Overmind trace which set of rules (verbs, resources, API groups) will be granted when the RoleBinding is applied. - -### [`ClusterRole`](/sources/k8s/Types/ClusterRole) - -Although scoped to a namespace, a RoleBinding can reference a ClusterRole instead of a Role. Overmind links the two so you can see when cluster-wide permission sets are being delegated into a namespace. - -### [`ServiceAccount`](/sources/k8s/Types/ServiceAccount) - -Service accounts commonly appear in the `subjects` list of a RoleBinding. Linking these enables Overmind to reveal which workloads (pods using the service account) will inherit the referenced permissions. diff --git a/docs.overmind.tech/docs/sources/k8s/Types/Secret.md b/docs.overmind.tech/docs/sources/k8s/Types/Secret.md deleted file mode 100644 index 1c504e2f..00000000 --- a/docs.overmind.tech/docs/sources/k8s/Types/Secret.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Secret -sidebar_label: Secret ---- - -A Kubernetes Secret is an object that holds a small amount of sensitive data—such as passwords, tokens, or keys—so that it can be used by Pods without being written to image or configuration files. Storing confidential information in a Secret allows you to keep it separate from application code and to control how and when it is exposed to the running workload. For a detailed overview, see the official Kubernetes documentation: https://kubernetes.io/docs/concepts/configuration/secret/ - -**Terrafrom Mappings:** - -- `kubernetes_secret_v1.metadata[0].name` -- `kubernetes_secret.metadata[0].name` - -## Supported Methods - -- `GET`: Get a Secret by name -- `LIST`: List all Secrets -- `SEARCH`: Search for a Secret using the ListOptions JSON format e.g. `{"labelSelector": "app=wordpress"}` diff --git a/docs.overmind.tech/docs/sources/k8s/Types/Service.md b/docs.overmind.tech/docs/sources/k8s/Types/Service.md deleted file mode 100644 index 48ad1297..00000000 --- a/docs.overmind.tech/docs/sources/k8s/Types/Service.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Service -sidebar_label: Service ---- - -A Kubernetes Service is an abstract resource that defines a logical set of Pods and the policy by which they can be accessed. It provides a stable virtual IP (ClusterIP), DNS entry and, depending on the type, can expose workloads internally within the cluster or externally to the Internet through NodePorts or cloud load-balancers. Services decouple network identity and discovery from the underlying Pods, allowing them to scale up, down, or be replaced without changing the connection endpoint. -For full details see the official Kubernetes documentation: https://kubernetes.io/docs/concepts/services-networking/service/ - -**Terrafrom Mappings:** - -- `kubernetes_service.metadata[0].name` -- `kubernetes_service_v1.metadata[0].name` - -## Supported Methods - -- `GET`: Get a Service by name -- `LIST`: List all Services -- `SEARCH`: Search for a Service using the ListOptions JSON format e.g. `{"labelSelector": "app=wordpress"}` - -## Possible Links - -### [`Pod`](/sources/k8s/Types/Pod) - -A Service selects one or more Pods via label selectors and forwards traffic to them. Overmind links Services to the Pods that currently match their selector so you can see which workloads will receive traffic. - -### [`ip`](/sources/aws/Types/networkmanager-network-resource-relationship) - -Each Service is assigned one or more IP addresses (ClusterIP, ExternalIP, LoadBalancer IP). Overmind creates links to these IP resources to show the concrete network endpoints associated with the Service. - -### [`dns`](/sources/stdlib/Types/dns) - -Kubernetes automatically registers DNS records for every Service (e.g., `my-service.my-namespace.svc.cluster.local`). Overmind links Services to their corresponding DNS entries so you can trace name resolution to the backing workloads. - -### [`Endpoints`](/sources/k8s/Types/Endpoints) - -Each Service creates a corresponding Endpoints object with the same name that lists the IP addresses of the backing Pods. Overmind links Services to their Endpoints so you can see which addresses are currently active. This uses the legacy `core/v1` API and works on all Kubernetes versions. - -### [`EndpointSlice`](/sources/k8s/Types/EndpointSlice) - -Modern Kubernetes clusters create EndpointSlices (labelled with `kubernetes.io/service-name`) as the scalable replacement for Endpoints. Overmind searches for EndpointSlices matching the Service name so you can trace from a Service to the network endpoints that back it on newer clusters. diff --git a/docs.overmind.tech/docs/sources/k8s/Types/ServiceAccount.md b/docs.overmind.tech/docs/sources/k8s/Types/ServiceAccount.md deleted file mode 100644 index 5e2ed01b..00000000 --- a/docs.overmind.tech/docs/sources/k8s/Types/ServiceAccount.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: Service Account -sidebar_label: ServiceAccount ---- - -A ServiceAccount is a Kubernetes resource that provides an identity to processes running inside Pods, allowing them to authenticate to the Kubernetes API and other services with the minimum privileges required. Each ServiceAccount can be linked to one or more Secrets that store its bearer token or image-pull credentials, and these Secrets are automatically mounted into Pods that specify the ServiceAccount. Further information can be found in the official Kubernetes documentation: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/. - -**Terrafrom Mappings:** - -- `kubernetes_service_account.metadata[0].name` -- `kubernetes_service_account_v1.metadata[0].name` - -## Supported Methods - -- `GET`: Get a ServiceAccount by name -- `LIST`: List all ServiceAccounts -- `SEARCH`: Search for a ServiceAccount using the ListOptions JSON format e.g. `{"labelSelector": "app=wordpress"}` - -## Possible Links - -### [`Secret`](/sources/k8s/Types/Secret) - -A ServiceAccount is associated with Secrets that hold its authentication token or are referenced in `imagePullSecrets`. These Secrets determine how Pods using the ServiceAccount authenticate to the cluster or to external registries, making them critical for understanding access scopes and potential risk. diff --git a/docs.overmind.tech/docs/sources/k8s/Types/StatefulSet.md b/docs.overmind.tech/docs/sources/k8s/Types/StatefulSet.md deleted file mode 100644 index e7dd27b9..00000000 --- a/docs.overmind.tech/docs/sources/k8s/Types/StatefulSet.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Stateful Set -sidebar_label: StatefulSet ---- - -A StatefulSet is a Kubernetes workload controller that manages the deployment and scaling of a set of Pods, while guaranteeing the ordering and uniqueness of those Pods. Unlike Deployments, which are optimised for stateless services, StatefulSets are designed for applications that require stable network identities, stable persistent storage and ordered, graceful deployment and scaling. Typical use-cases include databases, distributed filesystems and clustered applications where each replica must be uniquely addressable. -For full details, see the official Kubernetes documentation: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/ - -**Terrafrom Mappings:** - -- `kubernetes_stateful_set_v1.metadata[0].name` -- `kubernetes_stateful_set.metadata[0].name` - -## Supported Methods - -- `GET`: Get a Stateful Set by name -- `LIST`: List all Stateful Sets -- `SEARCH`: Search for a Stateful Set using the ListOptions JSON format e.g. `{"labelSelector": "app=wordpress"}` diff --git a/docs.overmind.tech/docs/sources/k8s/Types/StorageClass.md b/docs.overmind.tech/docs/sources/k8s/Types/StorageClass.md deleted file mode 100644 index 82202e32..00000000 --- a/docs.overmind.tech/docs/sources/k8s/Types/StorageClass.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Storage Class -sidebar_label: StorageClass ---- - -A StorageClass is a cluster-wide Kubernetes resource that defines a “class” or tier of persistent storage that can be requested by workloads. Each StorageClass couples a provisioner (for example an AWS EBS driver, a CSI plug-in, or a Ceph back-end) with a set of parameters such as performance characteristics, encryption settings, reclaim policy, and mount options. When a user creates a PersistentVolumeClaim that references a particular `storageClassName`, Kubernetes dynamically provisions a matching PersistentVolume according to the rules in the StorageClass and binds it to the claim. This abstraction lets platform teams expose multiple quality-of-service levels while shielding application teams from underlying infrastructure details. -Official documentation: https://kubernetes.io/docs/concepts/storage/storage-classes/ - -**Terrafrom Mappings:** - -- `kubernetes_storage_class.metadata[0].name` -- `kubernetes_storage_class_v1.metadata[0].name` - -## Supported Methods - -- `GET`: Get a Storage Class by name -- `LIST`: List all Storage Classs -- `SEARCH`: Search for a Storage Class using the ListOptions JSON format e.g. `{"labelSelector": "app=wordpress"}` diff --git a/docs.overmind.tech/docs/sources/k8s/Types/VolumeAttachment.md b/docs.overmind.tech/docs/sources/k8s/Types/VolumeAttachment.md deleted file mode 100644 index 62d7d4c3..00000000 --- a/docs.overmind.tech/docs/sources/k8s/Types/VolumeAttachment.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: Volume Attachment -sidebar_label: VolumeAttachment ---- - -A Kubernetes `VolumeAttachment` represents the intent to attach (or detach) a PersistentVolume to a specific Node. It is created and managed automatically by the external CSI attacher or the in-tree volume controller whenever a Pod that uses a PersistentVolume is scheduled. Kubernetes will not make the volume available to the Pod until the corresponding `VolumeAttachment` reports that the attach operation has completed successfully. -For full details see the official Kubernetes documentation: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/volume-attachment-v1/ - -## Supported Methods - -- `GET`: Get a VolumeAttachment by name -- `LIST`: List all VolumeAttachments -- `SEARCH`: Search for a VolumeAttachment using the ListOptions JSON format e.g. `{"labelSelector": "app=wordpress"}` - -## Possible Links - -### [`PersistentVolume`](/sources/k8s/Types/PersistentVolume) - -`VolumeAttachment.spec.source.persistentVolumeName` holds the name of the PersistentVolume to be attached. Overmind links the `VolumeAttachment` to this `PersistentVolume` so you can trace which physical storage device is being mounted on which node. - -### [`Node`](/sources/k8s/Types/Node) - -`VolumeAttachment.spec.nodeName` identifies the Node where the volume should be attached. Linking `VolumeAttachment` to the `Node` lets you understand which worker machine will host the volume and helps assess the impact of node-specific storage operations. diff --git a/docs.overmind.tech/docs/sources/k8s/_category_.json b/docs.overmind.tech/docs/sources/k8s/_category_.json deleted file mode 100644 index dd873fc2..00000000 --- a/docs.overmind.tech/docs/sources/k8s/_category_.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "label": "Kubernetes", - "position": 2, - "collapsed": true, - "link": { - "type": "generated-index", - "description": "How to integrate your k8s cluster." - } -} diff --git a/docs.overmind.tech/docs/sources/k8s/account_settings.png b/docs.overmind.tech/docs/sources/k8s/account_settings.png deleted file mode 100644 index c7e184cb..00000000 Binary files a/docs.overmind.tech/docs/sources/k8s/account_settings.png and /dev/null differ diff --git a/docs.overmind.tech/docs/sources/k8s/api_key.png b/docs.overmind.tech/docs/sources/k8s/api_key.png deleted file mode 100644 index 9fa0623c..00000000 Binary files a/docs.overmind.tech/docs/sources/k8s/api_key.png and /dev/null differ diff --git a/docs.overmind.tech/docs/sources/k8s/configuration.md b/docs.overmind.tech/docs/sources/k8s/configuration.md deleted file mode 100644 index cb30ca63..00000000 --- a/docs.overmind.tech/docs/sources/k8s/configuration.md +++ /dev/null @@ -1,152 +0,0 @@ ---- -title: Kubernetes Setup -sidebar_position: 1 ---- - -## Prerequisites - -- Kubernetes 1.16+ -- Helm 3.x -- An Overmind API key with `request:receive` scope - -## Installation - -Create an API Key with `request:receive` scope in Overmind under **Settings › API Keys**. - -![User settings menu in the sidebar](account_settings.png) -![API Keys settings page](api_key.png) - -Install the source into your Kubernetes cluster using Helm: - -```sh -helm repo add overmind https://dl.cloudsmith.io/public/overmind/tools/helm/charts -helm install overmind-kube-source overmind/overmind-kube-source \ - --set source.apiKey.value=YOUR_API_KEY \ - --set source.clusterName=my-cluster-name -``` - -## Uninstalling - -```sh -helm uninstall overmind-kube-source -``` - -## Upgrading - -```shell -helm upgrade overmind-kube-source overmind/overmind-kube-source -``` - -## Configuration - -The following table lists the configurable parameters and their default values. - -### Image Configuration - -| Parameter | Description | Default | -| ------------------ | ---------------------------------- | ------------------------------------------- | -| `image.repository` | Image repository | `ghcr.io/overmindtech/workspace/k8s-source` | -| `image.pullPolicy` | Image pull policy | `Always` | -| `image.tag` | Image tag (defaults to appVersion) | `""` | -| `imagePullSecrets` | Image pull secrets | `[]` | - -### Deployment Configuration - -| Parameter | Description | Default | -| -------------------- | -------------------------- | ------- | -| `replicaCount` | Number of replicas | `1` | -| `nameOverride` | Override chart name | `""` | -| `fullnameOverride` | Override full name | `""` | -| `podAnnotations` | Pod annotations | `{}` | -| `podSecurityContext` | Pod security context | `{}` | -| `securityContext` | Container security context | `{}` | -| `nodeSelector` | Node selector | `{}` | -| `tolerations` | Pod tolerations | `[]` | -| `affinity` | Pod affinity rules | `{}` | - -### Source Configuration - -| Parameter | Description | Default | -| ---------------------------------- | ----------------------------------------------------- | --------------------------- | -| `source.log` | Log level (info, debug, trace) | `info` | -| `source.apiKey.value` | Direct API key value (not recommended for production) | `""` | -| `source.apiKey.existingSecretName` | Name of existing secret containing API key | `""` | -| `source.app` | Overmind instance URL | `https://app.overmind.tech` | -| `source.maxParallel` | Max parallel requests | `20` | -| `source.rateLimitQPS` | K8s API rate limit QPS | `10` | -| `source.rateLimitBurst` | K8s API rate limit burst | `30` | -| `source.clusterName` | Cluster name | `""` | -| `source.honeycombApiKey` | Honeycomb API key | `""` | - -### Pod Disruption Budget Configuration - -| Parameter | Description | Default | -| ----------------------------- | ---------------------------- | ------- | -| `podDisruptionBudget.enabled` | Enable Pod Disruption Budget | `true` | - -### Example values.yaml - -```yaml -source: - apiKey: 'your-api-key' - clusterName: 'production-cluster' - log: 'debug' - maxParallel: 30 - rateLimitQPS: 20 - rateLimitBurst: 40 - -# Pod Disruption Budget is enabled by default for production protection -podDisruptionBudget: - enabled: true - -resources: - limits: - cpu: 200m - memory: 256Mi - requests: - cpu: 100m - memory: 128Mi -``` - -## API Key Management - -The chart provides two methods for managing the required Overmind API key: - -### Using an Existing Secret - -1. Create a Kubernetes secret containing your API key: - - ```sh - kubectl create secret generic overmind-api-key \ - --from-literal=API_KEY=your-api-key-here - ``` - -2. Install the chart: - - ```sh - helm install overmind-kube-source overmind/overmind-kube-source \ - --set source.apiKey.existingSecretName=overmind-api-key - ``` - -**Important Notes:** - -- The secret MUST contain a key named `API_KEY` -- The secret must exist in the same namespace as the chart -- Installation will fail if: - - The secret doesn't exist - - The secret exists but doesn't contain an `API_KEY` key - - Neither `source.apiKey.existingSecretName` nor `source.apiKey.value` is provided - -### Using Direct Value - -```sh -helm install overmind-kube-source overmind/overmind-kube-source \ - --set source.apiKey.value=YOUR_API_KEY - --set source.clusterName=my-cluster-name -``` - -**Warning:** This method stores the API key in clear text in your values file. Only use for development/testing. - -## Support - -This source will support all Kubernetes versions that are currently maintained in the kubernetes project. The list can be found [here](https://kubernetes.io/releases/) diff --git a/docs.overmind.tech/docs/sources/stdlib/Types/certificate.md b/docs.overmind.tech/docs/sources/stdlib/Types/certificate.md deleted file mode 100644 index 626f922a..00000000 --- a/docs.overmind.tech/docs/sources/stdlib/Types/certificate.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Certificate -sidebar_label: certificate ---- - -A Certificate resource represents an X.509 public-key certificate (typically served during a TLS/SSL handshake) together with any intermediate certificates that form its trust chain. Overmind analyses these certificates to surface risks such as imminent expiry, weak signature algorithms, incorrect key usage flags, or hostnames that do not match the Subject Alternative Names (SANs). -For the formal specification of X.509 certificates, see RFC 5280 – Internet X.509 Public Key Infrastructure Certificate and Certificate Revocation List (CRL) Profile: https://datatracker.ietf.org/doc/html/rfc5280 - -## Supported Methods - -- ~~`GET`~~ -- ~~`LIST`~~ -- `SEARCH`: Takes a full certificate, or certificate bundle as input in PEM encoded format diff --git a/docs.overmind.tech/docs/sources/stdlib/Types/dns.md b/docs.overmind.tech/docs/sources/stdlib/Types/dns.md deleted file mode 100644 index e4193d4c..00000000 --- a/docs.overmind.tech/docs/sources/stdlib/Types/dns.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: DNS Entry -sidebar_label: dns ---- - -The Domain Name System (DNS) translates human-readable names into machine-usable information. A DNS _A_ record maps a hostname to an IPv4 address, while an _AAAA_ record maps it to an IPv6 address. By querying these records, Overmind can reveal the infrastructure that a name ultimately points to, allowing you to spot configuration mistakes, dangling records, or unexpected dependencies before you deploy. -Reference documentation: RFC 1034 & RFC 1035 – Domain Names – Concepts and Facilities / Implementation and Specification (https://www.rfc-editor.org/rfc/rfc1034 and https://www.rfc-editor.org/rfc/rfc1035) - -## Supported Methods - -- `GET`: A DNS A or AAAA entry to look up -- ~~`LIST`~~ -- `SEARCH`: A DNS name (or IP for reverse DNS), this will perform a recursive search and return all results. It is recommended that you always use the SEARCH method - -## Possible Links - -### [`dns`](/sources/stdlib/Types/dns) - -If the queried record is a CNAME, MX, NS or contains additional glue, Overmind follows those pointers and links the resulting records back as further `dns` items for deeper traversal. - -### [`ip`](/sources/aws/Types/networkmanager-network-resource-relationship) - -An A or AAAA record resolves to one or more IP addresses; each discovered address is linked as an `ip` item so their ownership, location and associated services can be examined. - -### [`rdap-domain`](/sources/stdlib/Types/rdap-domain) - -The second-level or higher-level domain extracted from the DNS name is linked to its corresponding `rdap-domain` item, giving visibility into registrar, registrant and name-server information that may present additional risk factors. diff --git a/docs.overmind.tech/docs/sources/stdlib/Types/http.md b/docs.overmind.tech/docs/sources/stdlib/Types/http.md deleted file mode 100644 index 5ae0276c..00000000 --- a/docs.overmind.tech/docs/sources/stdlib/Types/http.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: HTTP Endpoint -sidebar_label: http ---- - -An HTTP Endpoint represents a reachable URL that Overmind can interrogate in order to discover configuration or security issues before deployment. By performing lightweight `HEAD` or `GET` requests, Overmind determines the availability, response headers, redirects, and TLS configuration (if the endpoint is served over HTTPS). This allows you to spot problems such as broken links, unexpected redirections, missing security headers, or invalid certificates early in the pipeline. -For more background on how HTTP endpoints are conventionally exposed and managed on the internet, refer to the W3C documentation on HTTP semantics: https://www.w3.org/Protocols/ (external). - -## Supported Methods - -- `GET`: A HTTP endpoint to run a `HEAD` request against -- ~~`LIST`~~ -- `SEARCH`: A HTTP URL to search for. Query parameters and fragments will be stripped from the URL before processing. - -## Possible Links - -### [`ip`](/sources/aws/Types/networkmanager-network-resource-relationship) - -The hostname or FQDN of the HTTP endpoint ultimately resolves to one or more IP addresses. Overmind records these addresses to understand network-level reachability and to cross-reference them with firewall, VPC or load-balancer configurations. - -### [`dns`](/sources/stdlib/Types/dns) - -Before an HTTP request can be made, the client performs a DNS lookup. Overmind connects the endpoint to its corresponding DNS records (A, AAAA, CNAME, etc.) so you can see how changes in DNS zone files might affect the endpoint’s availability. - -### [`certificate`](/sources/gcp/Types/gcp-compute-ssl-certificate) - -If the endpoint is accessed over HTTPS, the server presents an X.509 certificate. Overmind links the endpoint to the certificate resource it observes during the TLS handshake, enabling validation of expiry dates, issuer trust chains, and key strengths. - -### [`http`](/sources/stdlib/Types/http) - -HTTP endpoints often redirect to, embed, or call other HTTP endpoints (for example via 3xx redirects or links in HTML/JSON responses). Overmind establishes links between them so you can trace dependencies, spot redirect loops, and ensure downstream endpoints meet your security standards. diff --git a/docs.overmind.tech/docs/sources/stdlib/Types/ip.md b/docs.overmind.tech/docs/sources/stdlib/Types/ip.md deleted file mode 100644 index 693acc9a..00000000 --- a/docs.overmind.tech/docs/sources/stdlib/Types/ip.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: IP Address -sidebar_label: ip ---- - -An IP address is a numerical label assigned to every device connected to an Internet Protocol network. It uniquely identifies the source and destination of traffic and is used for routing packets across interconnected networks. Overmind treats each IPv4 or IPv6 address that appears in your configuration as a discrete resource, allowing you to map how code, infrastructure and third-party services depend on it and to identify security or availability risks before deployment. -Official specification documents can be found in the relevant IETF RFCs: IPv4 is defined in RFC 791 and IPv6 in RFC 8200 (see https://www.rfc-editor.org/rfc/rfc791 and https://www.rfc-editor.org/rfc/rfc8200). - -## Supported Methods - -- `GET`: An ipv4 or ipv6 address -- ~~`LIST`~~ -- ~~`SEARCH`~~ - -## Possible Links - -### [`dns`](/sources/stdlib/Types/dns) - -DNS records (such as A, AAAA or PTR) map human-readable hostnames to this IP address or resolve the address back to a hostname. Overmind links the `ip` resource to `dns` items whenever the address appears in one of these records so that you can trace how name resolution affects your deployment. - -### [`rdap-ip-network`](/sources/stdlib/Types/rdap-ip-network) - -Querying the Registration Data Access Protocol (RDAP) for an IP returns information about the allocation block, the organisation that owns it, contact details and abuse mailboxes. Overmind links an `ip` to the corresponding `rdap-ip-network` resource to surface ownership, geolocation and abuse-handling context that may influence compliance or threat-modelling decisions. diff --git a/docs.overmind.tech/docs/sources/stdlib/Types/rdap-asn.md b/docs.overmind.tech/docs/sources/stdlib/Types/rdap-asn.md deleted file mode 100644 index 2ad5fc1a..00000000 --- a/docs.overmind.tech/docs/sources/stdlib/Types/rdap-asn.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Autonomous System Number (ASN) -sidebar_label: rdap-asn ---- - -An Autonomous System Number (ASN) is a unique 16- or 32-bit identifier assigned to an Autonomous System so that it can participate in Border Gateway Protocol (BGP) routing on the public Internet. Using the Registration Data Access Protocol (RDAP), you can query an ASN to obtain registration details such as the holder, allocation status, and associated contacts. For the formal specification of RDAP responses for ASNs, see [RFC 9083: Registration Data Access Protocol (RDAP)](https://datatracker.ietf.org/doc/html/rfc9083). - -## Supported Methods - -- `GET`: Get an ASN by handle i.e. "AS15169" -- ~~`LIST`~~ -- ~~`SEARCH`~~ - -## Possible Links - -### [`rdap-entity`](/sources/stdlib/Types/rdap-entity) - -An ASN RDAP record frequently contains an `entities` array. Each item in that array is an RDAP Entity object representing the organisation or individual responsible for the ASN (registrant, administrative contact, technical contact, etc.). Overmind therefore links an `rdap-asn` resource to one or more `rdap-entity` resources so that you can inspect the people or organisations behind a particular network. diff --git a/docs.overmind.tech/docs/sources/stdlib/Types/rdap-domain.md b/docs.overmind.tech/docs/sources/stdlib/Types/rdap-domain.md deleted file mode 100644 index f9f21abd..00000000 --- a/docs.overmind.tech/docs/sources/stdlib/Types/rdap-domain.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: RDAP Domain -sidebar_label: rdap-domain ---- - -An RDAP Domain record represents the authoritative registration data for a domain name as returned by the Registration Data Access Protocol (RDAP). The record contains information such as the registrar, registrant and administrative contacts, name-servers, status flags (e.g. `clientTransferProhibited`), and important lifecycle dates (creation, expiry, last update). In Overmind the resource lets you inspect this registration data and understand how a domain fits into the rest of your deployment before any changes are made. -Official RDAP specification: https://www.rfc-editor.org/rfc/rfc9082 - -## Supported Methods - -- ~~`GET`~~ -- ~~`LIST`~~ -- `SEARCH`: Search for a domain record by the domain name e.g. "www.google.com" - -## Possible Links - -### [`dns`](/sources/stdlib/Types/dns) - -The name portion of the RDAP domain (e.g. `example.com`) will typically have authoritative DNS records such as `A`, `AAAA`, `MX`, etc. Overmind links the RDAP Domain to those `dns` items so that you can trace from the registration layer straight through to the operational zone file that will actually be served. - -### [`rdap-nameserver`](/sources/stdlib/Types/rdap-nameserver) - -An RDAP Domain record contains a list of host objects (name-servers) delegated for the zone. Each of those host objects is represented as an `rdap-nameserver` item. The link allows you to drill into the registration data for each individual name-server. - -### [`rdap-entity`](/sources/stdlib/Types/rdap-entity) - -Entities in RDAP describe people or organisations such as the registrant, administrative contact, or registrar. Overmind links the RDAP Domain to every referenced `rdap-entity` so that you can view contact details, roles and other domains controlled by the same party. - -### [`rdap-ip-network`](/sources/stdlib/Types/rdap-ip-network) - -If the RDAP Domain record (or any of its linked name-servers) includes embedded references to address space—commonly via `v4network` or `v6network` objects—Overmind exposes those as `rdap-ip-network` items. This lets you see which blocks of IP addresses are directly associated with the domain and whether they overlap with other infrastructure you manage. diff --git a/docs.overmind.tech/docs/sources/stdlib/Types/rdap-entity.md b/docs.overmind.tech/docs/sources/stdlib/Types/rdap-entity.md deleted file mode 100644 index 1989ba3e..00000000 --- a/docs.overmind.tech/docs/sources/stdlib/Types/rdap-entity.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: RDAP Entity -sidebar_label: rdap-entity ---- - -An RDAP (Registration Data Access Protocol) Entity resource represents a single contact object – either a person, organisation or role – that appears in the registration data held by Regional Internet Registries (RIRs) and other RDAP servers. It typically contains identifying information such as names, postal addresses, e-mail addresses, telephone numbers and public identifiers, and is referenced by other RDAP objects (e.g. ASNs, IPv4/IPv6 prefix ranges and domain names) as their administrative, technical or abuse contact. - -The formal structure and semantics of an RDAP Entity are defined in RFC 9083, section 5.1 (https://www.rfc-editor.org/rfc/rfc9083#section-5.1). - -## Supported Methods - -- `GET`: Get an entity by its handle. This method is discouraged as it's not reliable since entity bootstrapping isn't comprehensive -- ~~`LIST`~~ -- `SEARCH`: Search for an entity by its URL e.g. https://rdap.apnic.net/entity/AIC3-AP - -## Possible Links - -### [`rdap-asn`](/sources/stdlib/Types/rdap-asn) - -An ASN record can reference one or more RDAP Entities as its registrant, administrative or technical contacts. Overmind links the rdap-asn resource to the corresponding rdap-entity resources so that you can see who is responsible for a particular Autonomous System and assess any associated risk or exposure stemming from those contacts. diff --git a/docs.overmind.tech/docs/sources/stdlib/Types/rdap-ip-network.md b/docs.overmind.tech/docs/sources/stdlib/Types/rdap-ip-network.md deleted file mode 100644 index efe67c03..00000000 --- a/docs.overmind.tech/docs/sources/stdlib/Types/rdap-ip-network.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: RDAP IP Network -sidebar_label: rdap-ip-network ---- - -An **RDAP IP Network** represents a block of IPv4 or IPv6 address space as returned by the Registration Data Access Protocol (RDAP). Overmind queries the authoritative RDAP service for a supplied address or prefix and surfaces the resulting network object, revealing who owns the range, the exact start- and end-addresses, its allocation status (allocated, assigned, reserved, etc.), and any policy or abuse information attached to it. Seeing this data in advance helps you verify that the addresses your deployment will use are valid and not bogon, reserved, or owned by an unexpected party. -The RDAP specification for IP networks is defined in [RFC 9083 – Registration Data Access Protocol (RDAP): Query Format](https://datatracker.ietf.org/doc/html/rfc9083). - -## Supported Methods - -- ~~`GET`~~ -- ~~`LIST`~~ -- `SEARCH`: Search for the most specific network that contains the specified IP or CIDR - -## Possible Links - -### [`rdap-entity`](/sources/stdlib/Types/rdap-entity) - -An RDAP network record contains an `entities` array referencing the people, organisations, and roles (registrant, technical, abuse, etc.) responsible for the address space. Overmind links each of these references to its corresponding `rdap-entity` item, letting you inspect contact details and responsibility assignments related to the network. diff --git a/docs.overmind.tech/docs/sources/stdlib/Types/rdap-nameserver.md b/docs.overmind.tech/docs/sources/stdlib/Types/rdap-nameserver.md deleted file mode 100644 index 663aa01a..00000000 --- a/docs.overmind.tech/docs/sources/stdlib/Types/rdap-nameserver.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: RDAP Nameserver -sidebar_label: rdap-nameserver ---- - -The Registration Data Access Protocol (RDAP) is the modern, machine-readable replacement for the old WHOIS service. -An **RDAP ­nameserver resource** represents the authoritative information that a Top-Level Domain (TLD) registry publishes about a particular DNS nameserver. By querying this endpoint you can discover, for example, the registrar that manages the server, its associated IP addresses, its status with the registry and any abuse or support contacts. -For details of the protocol and the structure of a nameserver response, see the IETF specification: https://datatracker.ietf.org/doc/html/rfc7483#section-5.5. - -## Supported Methods - -- ~~`GET`~~ -- ~~`LIST`~~ -- `SEARCH`: Search for the RDAP entry for a nameserver by its full URL e.g. "https://rdap.verisign.com/com/v1/nameserver/NS4.GOOGLE.COM" - -## Possible Links - -### [`dns`](/sources/stdlib/Types/dns) - -A nameserver appears in DNS NS records. Overmind links the RDAP nameserver object to the corresponding `dns` item so that you can see which zones delegate to this server and whether those zones are also in your inventory. - -### [`ip`](/sources/aws/Types/networkmanager-network-resource-relationship) - -The RDAP response normally includes the A and/or AAAA records for the nameserver. These addresses are represented as `ip` items, allowing you to trace from the logical nameserver to the concrete IP resources that sit behind it. - -### [`rdap-entity`](/sources/stdlib/Types/rdap-entity) - -Each nameserver RDAP document references one or more entities (registrar, registrant, technical contact, abuse contact, etc.). These are captured as separate `rdap-entity` items and linked so you can quickly identify who is responsible for the server and how to contact them. diff --git a/docs.overmind.tech/docs/sources/stdlib/_category_json b/docs.overmind.tech/docs/sources/stdlib/_category_json deleted file mode 100644 index 35c138aa..00000000 --- a/docs.overmind.tech/docs/sources/stdlib/_category_json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "label": "Public Resources (stdlib)", - "position": 3, - "collapsed": true, - "link": { - "type": "generated-index", - "description": "How to explore with Overminds built-in Public Resoures." - } -} diff --git a/go.mod b/go.mod index c2d13176..14ad431b 100644 --- a/go.mod +++ b/go.mod @@ -13,8 +13,8 @@ require ( buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.11-20260209202127-80ab13bee0bf.1 buf.build/go/protovalidate v1.1.3 charm.land/lipgloss/v2 v2.0.2 - cloud.google.com/go/aiplatform v1.120.0 - cloud.google.com/go/auth v0.18.2 + cloud.google.com/go/aiplatform v1.121.0 + cloud.google.com/go/auth v0.19.0 cloud.google.com/go/auth/oauth2adapt v0.2.8 cloud.google.com/go/bigquery v1.74.0 cloud.google.com/go/bigtable v1.43.0 @@ -22,12 +22,12 @@ require ( cloud.google.com/go/compute v1.57.0 cloud.google.com/go/compute/metadata v0.9.0 // indirect cloud.google.com/go/container v1.46.0 - cloud.google.com/go/dataplex v1.28.0 + cloud.google.com/go/dataplex v1.29.0 cloud.google.com/go/dataproc/v2 v2.16.0 cloud.google.com/go/eventarc v1.18.0 cloud.google.com/go/filestore v1.10.3 cloud.google.com/go/functions v1.19.7 - cloud.google.com/go/iam v1.5.3 + cloud.google.com/go/iam v1.6.0 cloud.google.com/go/kms v1.26.0 cloud.google.com/go/logging v1.13.2 cloud.google.com/go/monitoring v1.24.3 @@ -35,10 +35,10 @@ require ( cloud.google.com/go/orgpolicy v1.15.1 cloud.google.com/go/redis v1.18.3 cloud.google.com/go/resourcemanager v1.10.7 - cloud.google.com/go/run v1.15.0 + cloud.google.com/go/run v1.16.0 cloud.google.com/go/secretmanager v1.16.0 cloud.google.com/go/securitycentermanagement v1.1.6 - cloud.google.com/go/spanner v1.88.0 + cloud.google.com/go/spanner v1.89.0 cloud.google.com/go/storage v1.61.3 cloud.google.com/go/storagetransfer v1.13.1 connectrpc.com/connect v1.18.1 // v1.19.0 was faulty, wait until it is above this version @@ -50,7 +50,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/cosmos/armcosmos/v3 v3.4.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/dns/armdns v1.2.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/elasticsan/armelasticsan v1.2.0 - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault/v2 v2.0.1 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault/v2 v2.0.2 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi v1.3.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9 v9.0.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/postgresql/armpostgresqlflexibleservers/v5 v5.0.0 @@ -61,44 +61,44 @@ require ( github.com/Masterminds/semver/v3 v3.4.0 github.com/MrAlias/otel-schema-utils v0.4.0-alpha github.com/auth0/go-jwt-middleware/v3 v3.0.0 - github.com/aws/aws-sdk-go-v2 v1.41.4 - github.com/aws/aws-sdk-go-v2/config v1.32.12 - github.com/aws/aws-sdk-go-v2/credentials v1.19.12 - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.20 - github.com/aws/aws-sdk-go-v2/service/apigateway v1.39.0 - github.com/aws/aws-sdk-go-v2/service/autoscaling v1.64.3 - github.com/aws/aws-sdk-go-v2/service/cloudfront v1.60.3 - github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.55.2 - github.com/aws/aws-sdk-go-v2/service/directconnect v1.38.14 - github.com/aws/aws-sdk-go-v2/service/dynamodb v1.56.2 - github.com/aws/aws-sdk-go-v2/service/ec2 v1.296.0 - github.com/aws/aws-sdk-go-v2/service/ecs v1.74.0 - github.com/aws/aws-sdk-go-v2/service/efs v1.41.13 - github.com/aws/aws-sdk-go-v2/service/eks v1.81.1 - github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.33.22 - github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.54.9 - github.com/aws/aws-sdk-go-v2/service/iam v1.53.6 - github.com/aws/aws-sdk-go-v2/service/kms v1.50.3 - github.com/aws/aws-sdk-go-v2/service/lambda v1.88.3 - github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.59.6 - github.com/aws/aws-sdk-go-v2/service/networkmanager v1.41.7 - github.com/aws/aws-sdk-go-v2/service/rds v1.116.3 - github.com/aws/aws-sdk-go-v2/service/route53 v1.62.4 - github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1 - github.com/aws/aws-sdk-go-v2/service/sns v1.39.14 - github.com/aws/aws-sdk-go-v2/service/sqs v1.42.24 - github.com/aws/aws-sdk-go-v2/service/ssm v1.68.3 - github.com/aws/aws-sdk-go-v2/service/sts v1.41.9 + github.com/aws/aws-sdk-go-v2 v1.41.5 + github.com/aws/aws-sdk-go-v2/config v1.32.13 + github.com/aws/aws-sdk-go-v2/credentials v1.19.13 + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.21 + github.com/aws/aws-sdk-go-v2/service/apigateway v1.39.1 + github.com/aws/aws-sdk-go-v2/service/autoscaling v1.64.4 + github.com/aws/aws-sdk-go-v2/service/cloudfront v1.60.4 + github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.55.3 + github.com/aws/aws-sdk-go-v2/service/directconnect v1.38.15 + github.com/aws/aws-sdk-go-v2/service/dynamodb v1.57.1 + github.com/aws/aws-sdk-go-v2/service/ec2 v1.296.1 + github.com/aws/aws-sdk-go-v2/service/ecs v1.74.1 + github.com/aws/aws-sdk-go-v2/service/efs v1.41.14 + github.com/aws/aws-sdk-go-v2/service/eks v1.81.2 + github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.33.23 + github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.54.10 + github.com/aws/aws-sdk-go-v2/service/iam v1.53.7 + github.com/aws/aws-sdk-go-v2/service/kms v1.50.4 + github.com/aws/aws-sdk-go-v2/service/lambda v1.88.5 + github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.59.7 + github.com/aws/aws-sdk-go-v2/service/networkmanager v1.41.8 + github.com/aws/aws-sdk-go-v2/service/rds v1.117.1 + github.com/aws/aws-sdk-go-v2/service/route53 v1.62.5 + github.com/aws/aws-sdk-go-v2/service/s3 v1.97.3 + github.com/aws/aws-sdk-go-v2/service/sns v1.39.15 + github.com/aws/aws-sdk-go-v2/service/sqs v1.42.25 + github.com/aws/aws-sdk-go-v2/service/ssm v1.68.4 + github.com/aws/aws-sdk-go-v2/service/sts v1.41.10 github.com/aws/smithy-go v1.24.2 github.com/cenkalti/backoff/v5 v5.0.3 github.com/charmbracelet/glamour v0.10.0 github.com/coder/websocket v1.8.14 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/getsentry/sentry-go v0.43.0 + github.com/getsentry/sentry-go v0.44.1 github.com/go-jose/go-jose/v4 v4.1.3 github.com/google/btree v1.1.3 github.com/google/uuid v1.6.0 - github.com/googleapis/gax-go/v2 v2.19.0 + github.com/googleapis/gax-go/v2 v2.20.0 github.com/goombaio/namegenerator v0.0.0-20181006234301-989e774b106e github.com/hashicorp/go-retryablehttp v0.7.8 github.com/hashicorp/hcl/v2 v2.24.0 @@ -113,8 +113,8 @@ require ( github.com/mitchellh/go-homedir v1.1.0 github.com/muesli/reflow v0.3.0 github.com/nats-io/jwt/v2 v2.8.1 - github.com/nats-io/nats-server/v2 v2.12.5 - github.com/nats-io/nats.go v1.49.0 + github.com/nats-io/nats-server/v2 v2.12.6 + github.com/nats-io/nats.go v1.50.0 github.com/nats-io/nkeys v0.4.15 github.com/onsi/ginkgo/v2 v2.28.1 // indirect github.com/onsi/gomega v1.39.1 // indirect @@ -143,17 +143,17 @@ require ( go.uber.org/automaxprocs v1.6.0 go.uber.org/goleak v1.3.0 go.uber.org/mock v0.6.0 + go.yaml.in/yaml/v3 v3.0.4 golang.org/x/net v0.52.0 golang.org/x/oauth2 v0.36.0 golang.org/x/sync v0.20.0 golang.org/x/text v0.35.0 gonum.org/v1/gonum v0.17.0 - google.golang.org/api v0.272.0 + google.golang.org/api v0.273.0 google.golang.org/genproto/googleapis/rpc v0.0.0-20260319201613-d00831a3d3e7 google.golang.org/grpc v1.79.3 google.golang.org/protobuf v1.36.11 gopkg.in/ini.v1 v1.67.1 - gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.35.3 k8s.io/apimachinery v0.35.3 k8s.io/client-go v0.35.3 @@ -185,19 +185,19 @@ require ( github.com/antlr4-go/antlr/v4 v4.13.1 // indirect github.com/apache/arrow/go/v15 v15.0.2 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.7 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.20 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.20 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.8 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.21 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.21 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.6 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.21 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.22 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.12 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.11.20 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.20 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.20 // indirect - github.com/aws/aws-sdk-go-v2/service/signin v1.0.8 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.30.13 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.17 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.13 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.11.21 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.21 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.21 // indirect + github.com/aws/aws-sdk-go-v2/service/signin v1.0.9 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.30.14 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.18 // indirect github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/aymerick/douceur v0.2.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect @@ -266,7 +266,7 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.18.4 // indirect + github.com/klauspost/compress v1.18.5 // indirect github.com/klauspost/cpuid/v2 v2.2.8 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/lestrrat-go/blackmagic v1.0.4 // indirect @@ -330,10 +330,9 @@ require ( go.opentelemetry.io/otel/sdk/metric v1.42.0 // indirect go.opentelemetry.io/proto/otlp v1.9.0 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect - go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/crypto v0.49.0 // indirect golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect - golang.org/x/mod v0.33.0 // indirect + golang.org/x/mod v0.34.0 // indirect golang.org/x/sys v0.42.0 // indirect golang.org/x/telemetry v0.0.0-20260209163413-e7419c687ee4 // indirect golang.org/x/term v0.41.0 // indirect @@ -341,10 +340,11 @@ require ( golang.org/x/tools v0.42.0 // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20260316180232-0b37fe3546d5 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20260316180232-0b37fe3546d5 // indirect + google.golang.org/genproto v0.0.0-20260319201613-d00831a3d3e7 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20260319201613-d00831a3d3e7 // indirect gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect k8s.io/utils v0.0.0-20260210185600-b8788abfbbc2 // indirect diff --git a/go.sum b/go.sum index 2a972151..3933647b 100644 --- a/go.sum +++ b/go.sum @@ -18,10 +18,10 @@ charm.land/lipgloss/v2 v2.0.2 h1:xFolbF8JdpNkM2cEPTfXEcW1p6NRzOWTSamRfYEw8cs= charm.land/lipgloss/v2 v2.0.2/go.mod h1:KjPle2Qd3YmvP1KL5OMHiHysGcNwq6u83MUjYkFvEkM= cloud.google.com/go v0.123.0 h1:2NAUJwPR47q+E35uaJeYoNhuNEM9kM8SjgRgdeOJUSE= cloud.google.com/go v0.123.0/go.mod h1:xBoMV08QcqUGuPW65Qfm1o9Y4zKZBpGS+7bImXLTAZU= -cloud.google.com/go/aiplatform v1.120.0 h1:jKWTpEs+xoUhDa1FMdSuhMcEQYyUiMdufGyX3zvtLVQ= -cloud.google.com/go/aiplatform v1.120.0/go.mod h1:6mDthfmy0oS1EQhVFdijoxkVdI2+HIZkpuGTBpedeCg= -cloud.google.com/go/auth v0.18.2 h1:+Nbt5Ev0xEqxlNjd6c+yYUeosQ5TtEUaNcN/3FozlaM= -cloud.google.com/go/auth v0.18.2/go.mod h1:xD+oY7gcahcu7G2SG2DsBerfFxgPAJz17zz2joOFF3M= +cloud.google.com/go/aiplatform v1.121.0 h1:8y8sNfVAW1DVhFbSbI7d8rrqBGGJFk6EoV6atidlyQc= +cloud.google.com/go/aiplatform v1.121.0/go.mod h1:juMdDWeNphHV40KhWdN+563zNCOKNmLJjk5D2TA43ls= +cloud.google.com/go/auth v0.19.0 h1:DGYwtbcsGsT1ywuxsIoWi1u/vlks0moIblQHgSDgQkQ= +cloud.google.com/go/auth v0.19.0/go.mod h1:2Aph7BT2KnaSFOM0JDPyiYgNh6PL9vGMiP8CUIXZ+IY= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= cloud.google.com/go/bigquery v1.74.0 h1:Q6bAMv+eyvufOpIrfrYxhM46qq1D3ZQTdgUDQqKS+n8= @@ -38,8 +38,8 @@ cloud.google.com/go/container v1.46.0 h1:xX94Lo3xrS5OkdMWKvpEVAbBwjN9uleVv6vOi02 cloud.google.com/go/container v1.46.0/go.mod h1:A7gMqdQduTk46+zssWDTKbGS2z46UsJNXfKqvMI1ZO4= cloud.google.com/go/datacatalog v1.26.1 h1:bCRKA8uSQN8wGW3Tw0gwko4E9a64GRmbW1nCblhgC2k= cloud.google.com/go/datacatalog v1.26.1/go.mod h1:2Qcq8vsHNxMDgjgadRFmFG47Y+uuIVsyEGUrlrKEdrg= -cloud.google.com/go/dataplex v1.28.0 h1:rROI3iqMVI9nXT701ULoFRETQVAOAPC3mPSWFDxXFl0= -cloud.google.com/go/dataplex v1.28.0/go.mod h1:VB+xlYJiJ5kreonXsa2cHPj0A3CfPh/mgiHG4JFhbUA= +cloud.google.com/go/dataplex v1.29.0 h1:g1RsvpxELtGdVwmuOiktBM6BPfFy8TyNzmWvf+6yDgc= +cloud.google.com/go/dataplex v1.29.0/go.mod h1:32rAjJhxo1tY5KivJ33872X5ZqR6ZjlE5ng5Uz7+hH0= cloud.google.com/go/dataproc/v2 v2.16.0 h1:0g2hnjlQ8SQTnNeu+Bqqa61QPssfSZF3t+9ldRmx+VQ= cloud.google.com/go/dataproc/v2 v2.16.0/go.mod h1:HlzFg8k1SK+bJN3Zsy2z5g6OZS1D4DYiDUgJtF0gJnE= cloud.google.com/go/eventarc v1.18.0 h1:8WWG1/ogInYur1NQjML6EMHQ0ZBzAdMDGlUVpLD56cI= @@ -48,8 +48,8 @@ cloud.google.com/go/filestore v1.10.3 h1:3KZifUVTqGhNNv6MLeONYth1HjlVM4vDhaH+xrd cloud.google.com/go/filestore v1.10.3/go.mod h1:94ZGyLTx9j+aWKozPQ6Wbq1DuImie/L/HIdGMshtwac= cloud.google.com/go/functions v1.19.7 h1:7LcOD18euIVGRUPaeCmgO6vfWSLNIsi6STWRQcdANG8= cloud.google.com/go/functions v1.19.7/go.mod h1:xbcKfS7GoIcaXr2FSwmtn9NXal1JR4TV6iYZlgXffwA= -cloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc= -cloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU= +cloud.google.com/go/iam v1.6.0 h1:JiSIcEi38dWBKhB3BtfKCW+dMvCZJEhBA2BsaGJgoxs= +cloud.google.com/go/iam v1.6.0/go.mod h1:ZS6zEy7QHmcNO18mjO2viYv/n+wOUkhJqGNkPPGueGU= cloud.google.com/go/kms v1.26.0 h1:cK9mN2cf+9V63D3H1f6koxTatWy39aTI/hCjz1I+adU= cloud.google.com/go/kms v1.26.0/go.mod h1:pHKOdFJm63hxBsiPkYtowZPltu9dW0MWvBa6IA4HM58= cloud.google.com/go/logging v1.13.2 h1:qqlHCBvieJT9Cdq4QqYx1KPadCQ2noD4FK02eNqHAjA= @@ -66,14 +66,14 @@ cloud.google.com/go/redis v1.18.3 h1:6LI8zSt+vmE3WQ7hE5GsJ13CbJBLV1qUw6B7CY31Wcw cloud.google.com/go/redis v1.18.3/go.mod h1:x8HtXZbvMBDNT6hMHaQ022Pos5d7SP7YsUH8fCJ2Wm4= cloud.google.com/go/resourcemanager v1.10.7 h1:oPZKIdjyVTuag+D4HF7HO0mnSqcqgjcuA18xblwA0V0= cloud.google.com/go/resourcemanager v1.10.7/go.mod h1:rScGkr6j2eFwxAjctvOP/8sqnEpDbQ9r5CKwKfomqjs= -cloud.google.com/go/run v1.15.0 h1:4cwyNv9SUQEsQOf5/DfPKyMWYSA52p38/o119BgMhO4= -cloud.google.com/go/run v1.15.0/go.mod h1:rgFHMdAopLl++57vzeqA+a1o2x0/ILZnEacRD6nC0EA= +cloud.google.com/go/run v1.16.0 h1:dPkx5oS81AC/ly4TSpRr3AYcMushvFrl8lR7jnQjzdk= +cloud.google.com/go/run v1.16.0/go.mod h1:ydUU2MjfZ64kWfzy8+GKVqXmCxMS+Ik61VQx8/FwUyY= cloud.google.com/go/secretmanager v1.16.0 h1:19QT7ZsLJ8FSP1k+4esQvuCD7npMJml6hYzilxVyT+k= cloud.google.com/go/secretmanager v1.16.0/go.mod h1://C/e4I8D26SDTz1f3TQcddhcmiC3rMEl0S1Cakvs3Q= cloud.google.com/go/securitycentermanagement v1.1.6 h1:XFqjKq4ZpKTj8xCXWs/mTmh/UMWDiV25iCOUd9xaGWI= cloud.google.com/go/securitycentermanagement v1.1.6/go.mod h1:nt5Z6rU4s2/j8R/EQxG5K7OfVAfAfwo89j0Nx2Srzaw= -cloud.google.com/go/spanner v1.88.0 h1:HS+5TuEYZOVOXj9K+0EtrbTw7bKBLrMe3vgGsbnehmU= -cloud.google.com/go/spanner v1.88.0/go.mod h1:MzulBwuuYwQUVdkZXBBFapmXee3N+sQrj2T/yup6uEE= +cloud.google.com/go/spanner v1.89.0 h1:r3h5Z5RA8JRPf3HCvA6ujNhREIMhPY+MrDL9mkY8jS0= +cloud.google.com/go/spanner v1.89.0/go.mod h1:okNuxnp1wdPaVoM5M28Al2irKZLkHhZ2Z+DW6/ZJWGw= cloud.google.com/go/storage v1.61.3 h1:VS//ZfBuPGDvakfD9xyPW1RGF1Vy3BWUoVZXgW1KMOg= cloud.google.com/go/storage v1.61.3/go.mod h1:JtqK8BBB7TWv0HVGHubtUdzYYrakOQIsMLffZ2Z/HWk= cloud.google.com/go/storagetransfer v1.13.1 h1:Sjukr1LtUt7vLTHNvGc2gaAqlXNFeDFRIRmWGrFaJlY= @@ -106,8 +106,8 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/elasticsan/armelasticsan v github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/elasticsan/armelasticsan v1.2.0/go.mod h1:bXxc3uCnIUCh68pl4njcH45qUgRuR0kZfR6v06k18/A= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v3 v3.1.1 h1:1kpY4qe+BGAH2ykv4baVSqyx+AY5VjXeJ15SldlU6hs= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v3 v3.1.1/go.mod h1:nT6cWpWdUt+g81yuKmjeYPUtI73Ak3yQIT4PVVsCEEQ= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault/v2 v2.0.1 h1:nFZ7AvJqTpWobmnZlprsK6GucrByFsXWB+DwkhRxM9I= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault/v2 v2.0.1/go.mod h1:ZNiswYTEPuQ/D+mHxONII+FeHHNNVQlJ5IUG88opjS0= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault/v2 v2.0.2 h1:O2iuZYGa1nIMDk2uAFR0F7hDALVXMvz8Zwarz6itQ3E= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault/v2 v2.0.2/go.mod h1:7t88hsh6P4xqFM9uzaMX2qYfVsqDFkgFR4qdIX/OP+U= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.2.0 h1:akP6VpxJGgQRpDR1P462piz/8OhYLRCreDj48AyNabc= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.2.0/go.mod h1:8wzvopPfyZYPaQUoKW87Zfdul7jmJMDfp/k7YY3oJyA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi v1.3.0 h1:L7G3dExHBgUxsO3qpTGhk/P2dgnYyW48yn7AO33Tbek= @@ -185,88 +185,88 @@ github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmms github.com/atomicgo/cursor v0.0.1/go.mod h1:cBON2QmmrysudxNBFthvMtN32r3jxVRIvzkUiF/RuIk= github.com/auth0/go-jwt-middleware/v3 v3.0.0 h1:+rvUPCT+VbAuK4tpS13fWfZrMyqTwLopt3VoY0Y7kvA= github.com/auth0/go-jwt-middleware/v3 v3.0.0/go.mod h1:iU42jqjRyeKbf9YYSnRnolr836gk6Ty/jnUNuVq2b0o= -github.com/aws/aws-sdk-go-v2 v1.41.4 h1:10f50G7WyU02T56ox1wWXq+zTX9I1zxG46HYuG1hH/k= -github.com/aws/aws-sdk-go-v2 v1.41.4/go.mod h1:mwsPRE8ceUUpiTgF7QmQIJ7lgsKUPQOUl3o72QBrE1o= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.7 h1:3kGOqnh1pPeddVa/E37XNTaWJ8W6vrbYV9lJEkCnhuY= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.7/go.mod h1:lyw7GFp3qENLh7kwzf7iMzAxDn+NzjXEAGjKS2UOKqI= -github.com/aws/aws-sdk-go-v2/config v1.32.12 h1:O3csC7HUGn2895eNrLytOJQdoL2xyJy0iYXhoZ1OmP0= -github.com/aws/aws-sdk-go-v2/config v1.32.12/go.mod h1:96zTvoOFR4FURjI+/5wY1vc1ABceROO4lWgWJuxgy0g= -github.com/aws/aws-sdk-go-v2/credentials v1.19.12 h1:oqtA6v+y5fZg//tcTWahyN9PEn5eDU/Wpvc2+kJ4aY8= -github.com/aws/aws-sdk-go-v2/credentials v1.19.12/go.mod h1:U3R1RtSHx6NB0DvEQFGyf/0sbrpJrluENHdPy1j/3TE= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.20 h1:zOgq3uezl5nznfoK3ODuqbhVg1JzAGDUhXOsU0IDCAo= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.20/go.mod h1:z/MVwUARehy6GAg/yQ1GO2IMl0k++cu1ohP9zo887wE= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.20 h1:CNXO7mvgThFGqOFgbNAP2nol2qAWBOGfqR/7tQlvLmc= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.20/go.mod h1:oydPDJKcfMhgfcgBUZaG+toBbwy8yPWubJXBVERtI4o= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.20 h1:tN6W/hg+pkM+tf9XDkWUbDEjGLb+raoBMFsTodcoYKw= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.20/go.mod h1:YJ898MhD067hSHA6xYCx5ts/jEd8BSOLtQDL3iZsvbc= +github.com/aws/aws-sdk-go-v2 v1.41.5 h1:dj5kopbwUsVUVFgO4Fi5BIT3t4WyqIDjGKCangnV/yY= +github.com/aws/aws-sdk-go-v2 v1.41.5/go.mod h1:mwsPRE8ceUUpiTgF7QmQIJ7lgsKUPQOUl3o72QBrE1o= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.8 h1:eBMB84YGghSocM7PsjmmPffTa+1FBUeNvGvFou6V/4o= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.8/go.mod h1:lyw7GFp3qENLh7kwzf7iMzAxDn+NzjXEAGjKS2UOKqI= +github.com/aws/aws-sdk-go-v2/config v1.32.13 h1:5KgbxMaS2coSWRrx9TX/QtWbqzgQkOdEa3sZPhBhCSg= +github.com/aws/aws-sdk-go-v2/config v1.32.13/go.mod h1:8zz7wedqtCbw5e9Mi2doEwDyEgHcEE9YOJp6a8jdSMY= +github.com/aws/aws-sdk-go-v2/credentials v1.19.13 h1:mA59E3fokBvyEGHKFdnpNNrvaR351cqiHgRg+JzOSRI= +github.com/aws/aws-sdk-go-v2/credentials v1.19.13/go.mod h1:yoTXOQKea18nrM69wGF9jBdG4WocSZA1h38A+t/MAsk= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.21 h1:NUS3K4BTDArQqNu2ih7yeDLaS3bmHD0YndtA6UP884g= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.21/go.mod h1:YWNWJQNjKigKY1RHVJCuupeWDrrHjRqHm0N9rdrWzYI= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.21 h1:Rgg6wvjjtX8bNHcvi9OnXWwcE0a2vGpbwmtICOsvcf4= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.21/go.mod h1:A/kJFst/nm//cyqonihbdpQZwiUhhzpqTsdbhDdRF9c= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.21 h1:PEgGVtPoB6NTpPrBgqSE5hE/o47Ij9qk/SEZFbUOe9A= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.21/go.mod h1:p+hz+PRAYlY3zcpJhPwXlLC4C+kqn70WIHwnzAfs6ps= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.6 h1:qYQ4pzQ2Oz6WpQ8T3HvGHnZydA72MnLuFK9tJwmrbHw= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.6/go.mod h1:O3h0IK87yXci+kg6flUKzJnWeziQUKciKrLjcatSNcY= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.21 h1:SwGMTMLIlvDNyhMteQ6r8IJSBPlRdXX5d4idhIGbkXA= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.21/go.mod h1:UUxgWxofmOdAMuqEsSppbDtGKLfR04HGsD0HXzvhI1k= -github.com/aws/aws-sdk-go-v2/service/apigateway v1.39.0 h1:FQ0FLNsNkhwHcpv6rkAZaR+Royay19A+M88mtOOSg7w= -github.com/aws/aws-sdk-go-v2/service/apigateway v1.39.0/go.mod h1:xnkbxhrdrHvrz8qrNVvMAlARU/6suQoKtIjlaciWr3I= -github.com/aws/aws-sdk-go-v2/service/autoscaling v1.64.3 h1:YBBu7ZhnMkHjBCFVa50NO+AQo1I/8SK8yFIXxQSR/Do= -github.com/aws/aws-sdk-go-v2/service/autoscaling v1.64.3/go.mod h1:wRRzL5slhAzg/F8SwmTgIl3XXFB4V+X1t5qmuJf+O6k= -github.com/aws/aws-sdk-go-v2/service/cloudfront v1.60.3 h1:RB1bsGTqfbLymRdVDqHoomyZ6XfPcXtBP041qHLtlv4= -github.com/aws/aws-sdk-go-v2/service/cloudfront v1.60.3/go.mod h1:hF/cipBvnkoCnN0v+lw1ZnKTj6LyDfSX7yZW2d9buJQ= -github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.55.2 h1:mleWBVIxwceEzyItUVoqMFiv6TmOP6ECPoN6WB/VWXc= -github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.55.2/go.mod h1:cMApt548kNgu87UsBTNWVv+fpzjbUTFRSFjD1688SBs= -github.com/aws/aws-sdk-go-v2/service/directconnect v1.38.14 h1:ikGt5kTzRE0+ehjewNLHjEO9pgAjdLv2IiwUuIdWagw= -github.com/aws/aws-sdk-go-v2/service/directconnect v1.38.14/go.mod h1:WnsYjhq0txJK9bw4UeVijF1zG0Iuz6FdzwxNihVGcxI= -github.com/aws/aws-sdk-go-v2/service/dynamodb v1.56.2 h1:xi/ECwajy2mixviBD7bKAlGGSwzEaFKX2wIhrZt9NGw= -github.com/aws/aws-sdk-go-v2/service/dynamodb v1.56.2/go.mod h1:dLREOeW66eVaaGIOi2ZlLHDgkR3nuJ02rd00j0YSlBE= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.296.0 h1:98Miqj16un1WLNyM1RjVDhXYumhqZrQfAeG8i4jPG6o= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.296.0/go.mod h1:T6ndRfdhnXLIY5oKBHjYZDVj706los2zGdpThppquvA= -github.com/aws/aws-sdk-go-v2/service/ecs v1.74.0 h1:YS5TXaEvzDb+sV+wdQFUtuCAk0GeFR9Ai6HFdxpz6q8= -github.com/aws/aws-sdk-go-v2/service/ecs v1.74.0/go.mod h1:10kBgdaNJz0FO/+JWDUH+0rtSjkn5yafgavDDmmhFzs= -github.com/aws/aws-sdk-go-v2/service/efs v1.41.13 h1:C11r13KfnzxlLuILWOjBNdSJDRsJ2HDqc8kTNGc6VcM= -github.com/aws/aws-sdk-go-v2/service/efs v1.41.13/go.mod h1:YP65UYTCBf/NQKrZH+jfX/EHD5zFWLwioLpNoioIscU= -github.com/aws/aws-sdk-go-v2/service/eks v1.81.1 h1:wMMZ6vc0xljHGxZB4Hz3kVX9wSLTUau8RiaZAZtszCA= -github.com/aws/aws-sdk-go-v2/service/eks v1.81.1/go.mod h1:F8fvMS/6YtJPi40rwXWnuWPn6SYIGXPmLY5k87S3Td4= -github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.33.22 h1:YyV5ec8Hl6zezAzEQdetqYORGXHtNexaHWLw4TDfuBw= -github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.33.22/go.mod h1:480LHdOg5a74xgOBvXwI/yQuaK7SCHUVuujGyOkCw3c= -github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.54.9 h1:F7t1rvo++Bv9mTsFbd/0gThSx8vZqdHmIAURQ4dc8Jc= -github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.54.9/go.mod h1:1ethHYerpOsRYxSkV8mFNNDmDWPqCdLcrUmdd7aUYN4= -github.com/aws/aws-sdk-go-v2/service/iam v1.53.6 h1:GPQvvxy8+FDnD9xKYzGKJMjIm5xkVM5pd3bFgRldNSo= -github.com/aws/aws-sdk-go-v2/service/iam v1.53.6/go.mod h1:RJNVc52A0K41fCDJOnsCLeWJf8mwa0q30fM3CfE9U18= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.22 h1:rWyie/PxDRIdhNf4DzRk0lvjVOqFJuNnO8WwaIRVxzQ= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.22/go.mod h1:zd/JsJ4P7oGfUhXn1VyLqaRZwPmZwg44Jf2dS84Dm3Y= +github.com/aws/aws-sdk-go-v2/service/apigateway v1.39.1 h1:r3dXvi6tMfv4D48pyantOgDL48ifV6Ibj1eU1ca0C3k= +github.com/aws/aws-sdk-go-v2/service/apigateway v1.39.1/go.mod h1:nhYOLBwQu7P3ckR+L4gZkY0DT0nAhrQuZkI51jR1vTE= +github.com/aws/aws-sdk-go-v2/service/autoscaling v1.64.4 h1:9ytLDWrppFYTtWVVx80nefvaf/v02yG5pT+8HGk0vv8= +github.com/aws/aws-sdk-go-v2/service/autoscaling v1.64.4/go.mod h1:Lg8BJb1TOzVTJ6RFfkJ9zyI/XFcjcfZem+Iu4PeQxPE= +github.com/aws/aws-sdk-go-v2/service/cloudfront v1.60.4 h1:IvmTOyh1CZB0Gq6fUqVwmGqy8L9GApUr+cAK/Wq4oPs= +github.com/aws/aws-sdk-go-v2/service/cloudfront v1.60.4/go.mod h1:4/Vk7LHrr16Zkvy71Th2BJPNmCMPJFP91TaGcEqywWs= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.55.3 h1:mymqCkKEbqQIFkhh2xPAJ8jS0rmZqegQOF7bw48b0iw= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.55.3/go.mod h1:+bNfizG/fpRGctZuVeH8uWht/0BLD9wUyXOKM4VaCVA= +github.com/aws/aws-sdk-go-v2/service/directconnect v1.38.15 h1:UtMubbp/0sQ+mM8fLpsarNlAvzYOYP7BTAMaGPfaV0I= +github.com/aws/aws-sdk-go-v2/service/directconnect v1.38.15/go.mod h1:NSqhUsoeEhxJxyhtfG65YZs8WJ208MDslRU+lWfTSJc= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.57.1 h1:Vk+a1j2pXZHkkYqHmEdpwe8eX6NDtFSBGfzuauMEWYQ= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.57.1/go.mod h1:wHrWCwhXZrl2PuCP5t36UTacy9fCHDJ+vw1r3qxTL5M= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.296.1 h1:AsKDVqIbQox9NykcAm14xUiuzAKbarnC5+PZkrB2010= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.296.1/go.mod h1:R+2BNtUfTfhPY0RH18oL02q116bakeBWjanrbnVBqkM= +github.com/aws/aws-sdk-go-v2/service/ecs v1.74.1 h1:O0hhTSsxp24mIjaNUaZ0zST98SZojDluj/Zh4RkYss4= +github.com/aws/aws-sdk-go-v2/service/ecs v1.74.1/go.mod h1:QkWmubOYmjj3cHn7A4CoUU7BKJhVeo39Gp6NH7IyhZw= +github.com/aws/aws-sdk-go-v2/service/efs v1.41.14 h1:Ql2FayQK0PspATQ7DETibPMutuLn16xecUqRkT09kyM= +github.com/aws/aws-sdk-go-v2/service/efs v1.41.14/go.mod h1:4qKY0MLGqCjoOY3Wvb/J/soeJN5Tlc6uo85UuoKXqlI= +github.com/aws/aws-sdk-go-v2/service/eks v1.81.2 h1:6c/Jkyx1gYLiZGl6VPjApViaoPiYo7TDWXCMk/ZBq6c= +github.com/aws/aws-sdk-go-v2/service/eks v1.81.2/go.mod h1:xdUh6tdF9A8hc+PE84kmHbF/zsVPNiKnc6oLgulq1Eo= +github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.33.23 h1:sLj6B5YHp6RqyAf5lF5BxzyGxZIxXG1NW3mo0jtpDqo= +github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.33.23/go.mod h1:64UwjvWmvtc6HN/hbnZmLQbh+sFsqRUo8693mOu9LB4= +github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.54.10 h1:RRItb+JMcIGZSiNKLViw9fCYxQaahR+BaswSF3LkHEk= +github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.54.10/go.mod h1:qogon7Vx0cwiCEkU3x9/42gzUZbHoAr+ADGdLVHRVks= +github.com/aws/aws-sdk-go-v2/service/iam v1.53.7 h1:n9YLiWtX3+6pTLZWvRJmtq5JIB9NA/KFelyCg5fOlTU= +github.com/aws/aws-sdk-go-v2/service/iam v1.53.7/go.mod h1:sP46Vo6MeJcM4s0ZXcG2PFmfiSyixhIuC/74W52yKuk= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7 h1:5EniKhLZe4xzL7a+fU3C2tfUN4nWIqlLesfrjkuPFTY= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7/go.mod h1:x0nZssQ3qZSnIcePWLvcoFisRXJzcTVvYpAAdYX8+GI= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.12 h1:qtJZ70afD3ISKWnoX3xB0J2otEqu3LqicRcDBqsj0hQ= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.12/go.mod h1:v2pNpJbRNl4vEUWEh5ytQok0zACAKfdmKS51Hotc3pQ= -github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.11.20 h1:ru+seMuylHiNZlvgZei83eD8h37hRjm1XIMOEmcV0BU= -github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.11.20/go.mod h1:ihZMtPTKoX/ugQRHbui6zNdSgVYN1KY2Dgwb2d3hXlc= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.20 h1:2HvVAIq+YqgGotK6EkMf+KIEqTISmTYh5zLpYyeTo1Y= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.20/go.mod h1:V4X406Y666khGa8ghKmphma/7C0DAtEQYhkq9z4vpbk= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.20 h1:siU1A6xjUZ2N8zjTHSXFhB9L/2OY8Dqs0xXiLjF30jA= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.20/go.mod h1:4TLZCmVJDM3FOu5P5TJP0zOlu9zWgDWU7aUxWbr+rcw= -github.com/aws/aws-sdk-go-v2/service/kms v1.50.3 h1:s/zDSG/a/Su9aX+v0Ld9cimUCdkr5FWPmBV8owaEbZY= -github.com/aws/aws-sdk-go-v2/service/kms v1.50.3/go.mod h1:/iSgiUor15ZuxFGQSTf3lA2FmKxFsQoc2tADOarQBSw= -github.com/aws/aws-sdk-go-v2/service/lambda v1.88.3 h1:VlSZQKfbHSjeKJaTpBfp3WVxPH7qa2SbneFtjT9vft8= -github.com/aws/aws-sdk-go-v2/service/lambda v1.88.3/go.mod h1:/C3/ZU9bR0pjMwIYivZVpdcj4HjvOfk+OTPiiXKoTSE= -github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.59.6 h1:++XPZP+nXaKtZ755Yt8sfqu6lzyyIOu66CaUKA7o8eE= -github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.59.6/go.mod h1:geeH6hXRfXvEXn5tnVljRTl7PyDA3N6fadTnp4z5s8c= -github.com/aws/aws-sdk-go-v2/service/networkmanager v1.41.7 h1:NKDyxMTFdm1C/+a2mt4QqmAk2GEfC1iETCsyw9qCEow= -github.com/aws/aws-sdk-go-v2/service/networkmanager v1.41.7/go.mod h1:SGskKh/tt+FOs3//n2K6rNvdsfHQ91hPe7XRtRejOEg= -github.com/aws/aws-sdk-go-v2/service/rds v1.116.3 h1:H/ZYZ6QR4EXJAYElI5xkIM/yCz+A4uHIvWpzl+IfJks= -github.com/aws/aws-sdk-go-v2/service/rds v1.116.3/go.mod h1:QbXW4coAMakHQhf1qhE0eVVCen9gwB/Kvn+HHHKhpGY= -github.com/aws/aws-sdk-go-v2/service/route53 v1.62.4 h1:64aYPyHg3RjLvnMMSYQSg7aP+r1WRCPIS9SP9KfHjWg= -github.com/aws/aws-sdk-go-v2/service/route53 v1.62.4/go.mod h1:bPSPzWTn9LSX6e0KPp4LlPoaspouZdKAlIdSMdhBBrs= -github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1 h1:csi9NLpFZXb9fxY7rS1xVzgPRGMt7MSNWeQ6eo247kE= -github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1/go.mod h1:qXVal5H0ChqXP63t6jze5LmFalc7+ZE7wOdLtZ0LCP0= -github.com/aws/aws-sdk-go-v2/service/signin v1.0.8 h1:0GFOLzEbOyZABS3PhYfBIx2rNBACYcKty+XGkTgw1ow= -github.com/aws/aws-sdk-go-v2/service/signin v1.0.8/go.mod h1:LXypKvk85AROkKhOG6/YEcHFPoX+prKTowKnVdcaIxE= -github.com/aws/aws-sdk-go-v2/service/sns v1.39.14 h1:p8WdWDh5AwSZdp19Haa3XMyPCICi9Z375a/Nu3IIEZY= -github.com/aws/aws-sdk-go-v2/service/sns v1.39.14/go.mod h1:NKVY7DER6VXHkt2I/ycmHakALNboi3Rqwt4eEf/1Cnk= -github.com/aws/aws-sdk-go-v2/service/sqs v1.42.24 h1:JP2wjWGmUp8lTCZb13Dv0Eciyc1jbO8pd0HZVMHFlrc= -github.com/aws/aws-sdk-go-v2/service/sqs v1.42.24/go.mod h1:Ql9ziDutk8ERAN9HMaYANCW3lop451ppebkxEJMLCTM= -github.com/aws/aws-sdk-go-v2/service/ssm v1.68.3 h1:bBoWhx8lsFLTXintRX64ZBXcmFZbGqUmaPUrjXECqIc= -github.com/aws/aws-sdk-go-v2/service/ssm v1.68.3/go.mod h1:rcRkKbUJ2437WuXdq9fbj+MjTudYWzY9Ct8kiBbN8a8= -github.com/aws/aws-sdk-go-v2/service/sso v1.30.13 h1:kiIDLZ005EcKomYYITtfsjn7dtOwHDOFy7IbPXKek2o= -github.com/aws/aws-sdk-go-v2/service/sso v1.30.13/go.mod h1:2h/xGEowcW/g38g06g3KpRWDlT+OTfxxI0o1KqayAB8= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.17 h1:jzKAXIlhZhJbnYwHbvUQZEB8KfgAEuG0dc08Bkda7NU= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.17/go.mod h1:Al9fFsXjv4KfbzQHGe6V4NZSZQXecFcvaIF4e70FoRA= -github.com/aws/aws-sdk-go-v2/service/sts v1.41.9 h1:Cng+OOwCHmFljXIxpEVXAGMnBia8MSU6Ch5i9PgBkcU= -github.com/aws/aws-sdk-go-v2/service/sts v1.41.9/go.mod h1:LrlIndBDdjA/EeXeyNBle+gyCwTlizzW5ycgWnvIxkk= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.13 h1:JRaIgADQS/U6uXDqlPiefP32yXTda7Kqfx+LgspooZM= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.13/go.mod h1:CEuVn5WqOMilYl+tbccq8+N2ieCy0gVn3OtRb0vBNNM= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.11.21 h1:FTg+rVAPx1W21jsO57pxDS1ESy9a/JLFoaHeFubflJA= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.11.21/go.mod h1:92xP4VIS1yO3eF2NPBaHGF4cmyZow8TmFzSaz1nNgzo= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.21 h1:c31//R3xgIJMSC8S6hEVq+38DcvUlgFY0FM6mSI5oto= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.21/go.mod h1:r6+pf23ouCB718FUxaqzZdbpYFyDtehyZcmP5KL9FkA= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.21 h1:ZlvrNcHSFFWURB8avufQq9gFsheUgjVD9536obIknfM= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.21/go.mod h1:cv3TNhVrssKR0O/xxLJVRfd2oazSnZnkUeTf6ctUwfQ= +github.com/aws/aws-sdk-go-v2/service/kms v1.50.4 h1:PgD1y0ZagPokGIZPmejCBUySBzOFDN+leZxCOfb1OEQ= +github.com/aws/aws-sdk-go-v2/service/kms v1.50.4/go.mod h1:FfXDb5nXrsoGgxsBFxwxr3vdHXheC2tV+6lmuLghhjQ= +github.com/aws/aws-sdk-go-v2/service/lambda v1.88.5 h1:HWN7xwaV7Zwrn3Jlauio4u4aTMFgRzG2fblHWQeir/k= +github.com/aws/aws-sdk-go-v2/service/lambda v1.88.5/go.mod h1:6HBXRyFFqOw+ALkJ6YGHfrr20/YXYv6X9pcZErXRvCA= +github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.59.7 h1:kkxrST717WHMqmCe6myIQjNTdiUXv6hGGW8CLLFOl2Q= +github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.59.7/go.mod h1:mpeX4/K1mJnYC1oDVGuO4iXkymiExTSEVXaYirfcTXE= +github.com/aws/aws-sdk-go-v2/service/networkmanager v1.41.8 h1:in8vksq09Mua8K3TQPKP7M5kU0K9AFm+L7KiI5HlnJU= +github.com/aws/aws-sdk-go-v2/service/networkmanager v1.41.8/go.mod h1:BiwqPwD9B+QSZ26gyrODEJYFsUu3dx3ymODLMt4SiyI= +github.com/aws/aws-sdk-go-v2/service/rds v1.117.1 h1:LwcVYTKHBsQPhD0evNWtHIH8+xQG62kQaXmWJbLd7jg= +github.com/aws/aws-sdk-go-v2/service/rds v1.117.1/go.mod h1:EbQarE9odk5+EEhP2Yr6NjDEhms3PU3k9/qZ2GRpOuc= +github.com/aws/aws-sdk-go-v2/service/route53 v1.62.5 h1:Z+/OLsb85Kpq7TVLCspskqePaf68Tdv6GfmJP4kH6i0= +github.com/aws/aws-sdk-go-v2/service/route53 v1.62.5/go.mod h1:TmxGowuBYwjmHFOsEDxaZdsQE62JJzOmtiWafTi/czg= +github.com/aws/aws-sdk-go-v2/service/s3 v1.97.3 h1:HwxWTbTrIHm5qY+CAEur0s/figc3qwvLWsNkF4RPToo= +github.com/aws/aws-sdk-go-v2/service/s3 v1.97.3/go.mod h1:uoA43SdFwacedBfSgfFSjjCvYe8aYBS7EnU5GZ/YKMM= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.9 h1:QKZH0S178gCmFEgst8hN0mCX1KxLgHBKKY/CLqwP8lg= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.9/go.mod h1:7yuQJoT+OoH8aqIxw9vwF+8KpvLZ8AWmvmUWHsGQZvI= +github.com/aws/aws-sdk-go-v2/service/sns v1.39.15 h1:rOWMUrXJPcTXnk75ja6Bxv1P+j83dPhIWjfJ2cujj34= +github.com/aws/aws-sdk-go-v2/service/sns v1.39.15/go.mod h1:4exx1wZR0pe+WcMbas8OZ2krRrBbW7IUUvLXCCQbjkg= +github.com/aws/aws-sdk-go-v2/service/sqs v1.42.25 h1:8Bv3TQ1Cob6HLlpUbAnWxeHhAkYScJO9RIHh2WPXaxw= +github.com/aws/aws-sdk-go-v2/service/sqs v1.42.25/go.mod h1:eDstEbM0OEnBUnNQxIA7j74Jy61cCU1S4EMlCtdMwzs= +github.com/aws/aws-sdk-go-v2/service/ssm v1.68.4 h1:5Wg8AAAnIWM2LE/0KFGqllZff96bm4dBs+uerYFfReE= +github.com/aws/aws-sdk-go-v2/service/ssm v1.68.4/go.mod h1:nph0ypDLWm9D9iA9zOX39W/N+A4GqwzlxA13jzXVD4k= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.14 h1:GcLE9ba5ehAQma6wlopUesYg/hbcOhFNWTjELkiWkh4= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.14/go.mod h1:WSvS1NLr7JaPunCXqpJnWk1Bjo7IxzZXrZi1QQCkuqM= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.18 h1:mP49nTpfKtpXLt5SLn8Uv8z6W+03jYVoOSAl/c02nog= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.18/go.mod h1:YO8TrYtFdl5w/4vmjL8zaBSsiNp3w0L1FfKVKenZT7w= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.10 h1:p8ogvvLugcR/zLBXTXrTkj0RYBUdErbMnAFFp12Lm/U= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.10/go.mod h1:60dv0eZJfeVXfbT1tFJinbHrDfSJ2GZl4Q//OSSNAVw= github.com/aws/smithy-go v1.24.2 h1:FzA3bu/nt/vDvmnkg+R8Xl46gmzEDam6mZ1hzmwXFng= github.com/aws/smithy-go v1.24.2/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= @@ -354,8 +354,8 @@ github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= -github.com/getsentry/sentry-go v0.43.0 h1:XbXLpFicpo8HmBDaInk7dum18G9KSLcjZiyUKS+hLW4= -github.com/getsentry/sentry-go v0.43.0/go.mod h1:XDotiNZbgf5U8bPDUAfvcFmOnMQQceESxyKaObSssW0= +github.com/getsentry/sentry-go v0.44.1 h1:/cPtrA5qB7uMRrhgSn9TYtcEF36auGP3Y6+ThvD/yaI= +github.com/getsentry/sentry-go v0.44.1/go.mod h1:XDotiNZbgf5U8bPDUAfvcFmOnMQQceESxyKaObSssW0= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= @@ -421,8 +421,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.14 h1:yh8ncqsbUY4shRD5dA6RlzjJaT4hi3kII+zYw8wmLb8= github.com/googleapis/enterprise-certificate-proxy v0.3.14/go.mod h1:vqVt9yG9480NtzREnTlmGSBmFrA+bzb0yl0TxoBQXOg= -github.com/googleapis/gax-go/v2 v2.19.0 h1:fYQaUOiGwll0cGj7jmHT/0nPlcrZDFPrZRhTsoCr8hE= -github.com/googleapis/gax-go/v2 v2.19.0/go.mod h1:w2ROXVdfGEVFXzmlciUU4EdjHgWvB5h2n6x/8XSTTJA= +github.com/googleapis/gax-go/v2 v2.20.0 h1:NIKVuLhDlIV74muWlsMM4CcQZqN6JJ20Qcxd9YMuYcs= +github.com/googleapis/gax-go/v2 v2.20.0/go.mod h1:But/NJU6TnZsrLai/xBAQLLz+Hc7fHZJt/hsCz3Fih4= github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQHCoQ= github.com/gookit/color v1.5.0/go.mod h1:43aQb+Zerm/BWh2GnrgOQm7ffz7tvQXEKV6BFMl7wAo= github.com/gookit/color v1.5.4 h1:FZmqs7XOyGgCAxmWyPslpiok1k05wmY3SJTytgvYFs0= @@ -506,8 +506,8 @@ github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4 github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU= github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k= -github.com/klauspost/compress v1.18.4 h1:RPhnKRAQ4Fh8zU2FY/6ZFDwTVTxgJ/EMydqSTzE9a2c= -github.com/klauspost/compress v1.18.4/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= +github.com/klauspost/compress v1.18.5 h1:/h1gH5Ce+VWNLSWqPzOVn6XBO+vJbCNGvjoaGBFW2IE= +github.com/klauspost/compress v1.18.5/go.mod h1:cwPg85FWrGar70rWktvGQj8/hthj3wpl0PGDogxkrSQ= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.10/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= @@ -590,10 +590,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/nats-io/jwt/v2 v2.8.1 h1:V0xpGuD/N8Mi+fQNDynXohVvp7ZztevW5io8CUWlPmU= github.com/nats-io/jwt/v2 v2.8.1/go.mod h1:nWnOEEiVMiKHQpnAy4eXlizVEtSfzacZ1Q43LIRavZg= -github.com/nats-io/nats-server/v2 v2.12.5 h1:EOHLbsLJgUHUwzkj9gBTOlubkX+dmSs0EYWMdBiHivU= -github.com/nats-io/nats-server/v2 v2.12.5/go.mod h1:JQDAKcwdXs0NRhvYO31dzsXkzCyDkOBS7SKU3Nozu14= -github.com/nats-io/nats.go v1.49.0 h1:yh/WvY59gXqYpgl33ZI+XoVPKyut/IcEaqtsiuTJpoE= -github.com/nats-io/nats.go v1.49.0/go.mod h1:fDCn3mN5cY8HooHwE2ukiLb4p4G4ImmzvXyJt+tGwdw= +github.com/nats-io/nats-server/v2 v2.12.6 h1:Egbx9Vl7Ch8wTtpXPGqbehkZ+IncKqShUxvrt1+Enc8= +github.com/nats-io/nats-server/v2 v2.12.6/go.mod h1:4HPlrvtmSO3yd7KcElDNMx9kv5EBJBnJJzQPptXlheo= +github.com/nats-io/nats.go v1.50.0 h1:5zAeQrTvyrKrWLJ0fu02W3br8ym57qf7csDzgLOpcds= +github.com/nats-io/nats.go v1.50.0/go.mod h1:26HypzazeOkyO3/mqd1zZd53STJN0EjCYF9Uy2ZOBno= github.com/nats-io/nkeys v0.4.15 h1:JACV5jRVO9V856KOapQ7x+EY8Jo3qw1vJt/9Jpwzkk4= github.com/nats-io/nkeys v0.4.15/go.mod h1:CpMchTXC9fxA5zrMo4KpySxNjiDVvr8ANOSZdiNfUrs= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= @@ -790,8 +790,8 @@ golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2 golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= -golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= +golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= +golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= @@ -858,15 +858,15 @@ golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhS golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4= gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E= -google.golang.org/api v0.272.0 h1:eLUQZGnAS3OHn31URRf9sAmRk3w2JjMx37d2k8AjJmA= -google.golang.org/api v0.272.0/go.mod h1:wKjowi5LNJc5qarNvDCvNQBn3rVK8nSy6jg2SwRwzIA= +google.golang.org/api v0.273.0 h1:r/Bcv36Xa/te1ugaN1kdJ5LoA5Wj/cL+a4gj6FiPBjQ= +google.golang.org/api v0.273.0/go.mod h1:JbAt7mF+XVmWu6xNP8/+CTiGH30ofmCmk9nM8d8fHew= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/genproto v0.0.0-20260316180232-0b37fe3546d5 h1:JNfk58HZ8lfmXbYK2vx/UvsqIL59TzByCxPIX4TDmsE= -google.golang.org/genproto v0.0.0-20260316180232-0b37fe3546d5/go.mod h1:x5julN69+ED4PcFk/XWayw35O0lf/nGa4aNgODCmNmw= -google.golang.org/genproto/googleapis/api v0.0.0-20260316180232-0b37fe3546d5 h1:CogIeEXn4qWYzzQU0QqvYBM8yDF9cFYzDq9ojSpv0Js= -google.golang.org/genproto/googleapis/api v0.0.0-20260316180232-0b37fe3546d5/go.mod h1:EIQZ5bFCfRQDV4MhRle7+OgjNtZ6P1PiZBgAKuxXu/Y= +google.golang.org/genproto v0.0.0-20260319201613-d00831a3d3e7 h1:XzmzkmB14QhVhgnawEVsOn6OFsnpyxNPRY9QV01dNB0= +google.golang.org/genproto v0.0.0-20260319201613-d00831a3d3e7/go.mod h1:L43LFes82YgSonw6iTXTxXUX1OlULt4AQtkik4ULL/I= +google.golang.org/genproto/googleapis/api v0.0.0-20260319201613-d00831a3d3e7 h1:41r6JMbpzBMen0R/4TZeeAmGXSJC7DftGINUodzTkPI= +google.golang.org/genproto/googleapis/api v0.0.0-20260319201613-d00831a3d3e7/go.mod h1:EIQZ5bFCfRQDV4MhRle7+OgjNtZ6P1PiZBgAKuxXu/Y= google.golang.org/genproto/googleapis/rpc v0.0.0-20260319201613-d00831a3d3e7 h1:ndE4FoJqsIceKP2oYSnUZqhTdYufCYYkqwtFzfrhI7w= google.golang.org/genproto/googleapis/rpc v0.0.0-20260319201613-d00831a3d3e7/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= google.golang.org/grpc v1.79.3 h1:sybAEdRIEtvcD68Gx7dmnwjZKlyfuc61Dyo9pGXXkKE= diff --git a/go/cliauth/cliauth.go b/go/cliauth/cliauth.go new file mode 100644 index 00000000..970d5274 --- /dev/null +++ b/go/cliauth/cliauth.go @@ -0,0 +1,386 @@ +// Package cliauth provides shared CLI authentication logic for OAuth device flow, +// API key exchange, and token caching. +// +// This package is used by both the public overmind CLI and the area51-cli to avoid +// code duplication and ensure consistent authentication behavior. +package cliauth + +import ( + "bufio" + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/url" + "os" + "path/filepath" + "strings" + "time" + + "connectrpc.com/connect" + "github.com/overmindtech/cli/go/auth" + sdp "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdp-go/sdpconnect" + "github.com/overmindtech/cli/go/tracing" + "github.com/pkg/browser" + "golang.org/x/oauth2" +) + +// Logger is an interface for outputting authentication messages. +// Implementations can use pterm, slog, or any other logging framework. +type Logger interface { + Info(msg string, keysAndValues ...any) + Error(msg string, keysAndValues ...any) +} + +// ConfirmUntrustedHost checks whether appURL points to a trusted Overmind host +// (see [sdp.IsTrustedHost]). If not, it writes a warning to w and reads a +// [y/N] confirmation from stdin. Returns nil when the host is trusted or the +// user confirms; returns an error otherwise. +// +// Set hasAPIKey to true when an API key is configured so the warning can +// mention that the key will be sent to the untrusted host. +func ConfirmUntrustedHost(appURL string, hasAPIKey bool, stdin io.Reader, w io.Writer) error { + parsed, err := url.Parse(appURL) + if err != nil { + return fmt.Errorf("invalid app URL %q: %w", appURL, err) + } + + if sdp.IsTrustedHost(parsed.Hostname()) { + return nil + } + + credentialDetail := "OAuth tokens" //nolint:gosec // G101 false positive: this is a user-facing label, not a credential + if hasAPIKey { + credentialDetail = "your API key and OAuth tokens" + } + + fmt.Fprintf(w, "\n WARNING: The target host %q is not a known Overmind domain.\n", parsed.Hostname()) + fmt.Fprintf(w, " Credentials (%s) will be sent to this host.\n", credentialDetail) + fmt.Fprintf(w, "\n Only continue if you trust this host.\n\n") + fmt.Fprintf(w, " Continue? [y/N]: ") + + reader := bufio.NewReader(stdin) + line, err := reader.ReadString('\n') + if err != nil && (!errors.Is(err, io.EOF) || len(line) == 0) { + return fmt.Errorf("failed to read confirmation: %w", err) + } + + answer := strings.TrimSpace(strings.ToLower(line)) + if answer != "y" && answer != "yes" { + return errors.New("aborted: untrusted host not confirmed") + } + + return nil +} + +// TokenFile represents the ~/.overmind/token.json file structure. +// This format is shared between all Overmind CLI tools. +type TokenFile struct { + AuthEntries map[string]*TokenEntry `json:"auth_entries"` +} + +// TokenEntry represents a single auth entry in the token file +type TokenEntry struct { + Token *oauth2.Token `json:"token"` + AddedDate time.Time `json:"added_date"` +} + +// ReadLocalToken reads a cached token from ~/.overmind/token.json for the given +// app URL. Returns the token and its current scopes if valid and sufficient. +func ReadLocalToken(homeDir, app string, requiredScopes []string, log Logger) (*oauth2.Token, []string, error) { + path := filepath.Join(homeDir, ".overmind", "token.json") + + tokenFile := new(TokenFile) + + if _, err := os.Stat(path); err != nil { + return nil, nil, err + } + + file, err := os.Open(path) + if err != nil { + return nil, nil, fmt.Errorf("error opening token file at %q: %w", path, err) + } + defer file.Close() + + err = json.NewDecoder(file).Decode(tokenFile) + if err != nil { + return nil, nil, fmt.Errorf("error decoding token file at %q: %w", path, err) + } + + authEntry, ok := tokenFile.AuthEntries[app] + if !ok { + return nil, nil, fmt.Errorf("no token found for app %s in %q", app, path) + } + + if authEntry == nil { + return nil, nil, fmt.Errorf("token entry for app %s is null in %q", app, path) + } + + if authEntry.Token == nil { + return nil, nil, fmt.Errorf("token for app %s is null in %q", app, path) + } + if !authEntry.Token.Valid() { + return nil, nil, errors.New("token is no longer valid") + } + + claims, err := ExtractClaims(authEntry.Token.AccessToken) + if err != nil { + return nil, nil, fmt.Errorf("error extracting claims from token: %s in %q: %w", app, path, err) + } + if claims.Scope == "" { + return nil, nil, errors.New("token does not have any scopes") + } + + currentScopes := strings.Split(claims.Scope, " ") + + ok, missing, err := HasScopesFlexible(authEntry.Token, requiredScopes) + if err != nil { + return nil, currentScopes, fmt.Errorf("error checking token scopes: %s in %q: %w", app, path, err) + } + if !ok { + return nil, currentScopes, fmt.Errorf("local token is missing this permission: '%v'. %s in %q", missing, app, path) + } + + log.Info("Using local token", "app", app, "path", path) + return authEntry.Token, currentScopes, nil +} + +// SaveLocalToken saves a token to ~/.overmind/token.json with secure permissions +// (directory 0700, file 0600). The token is keyed by app URL so multiple +// environments can coexist. +func SaveLocalToken(homeDir, app string, token *oauth2.Token, log Logger) error { + path := filepath.Join(homeDir, ".overmind", "token.json") + dir := filepath.Dir(path) + + tokenFile := &TokenFile{ + AuthEntries: make(map[string]*TokenEntry), + } + + if _, err := os.Stat(path); err == nil { + file, err := os.Open(path) + if err == nil { + defer file.Close() + + err = json.NewDecoder(file).Decode(tokenFile) + if err != nil { + return fmt.Errorf("error decoding token file at %q: %w", path, err) + } + + if tokenFile.AuthEntries == nil { + tokenFile.AuthEntries = make(map[string]*TokenEntry) + } + } + } else { + err = os.MkdirAll(dir, 0700) + if err != nil { + return fmt.Errorf("unexpected fail creating directories: %w", err) + } + } + + if err := os.Chmod(dir, 0700); err != nil { + return fmt.Errorf("failed to set directory permissions: %w", err) + } + + tokenFile.AuthEntries[app] = &TokenEntry{ + Token: token, + AddedDate: time.Now(), + } + + file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return fmt.Errorf("error creating token file at %q: %w", path, err) + } + defer file.Close() + + err = json.NewEncoder(file).Encode(tokenFile) + if err != nil { + return fmt.Errorf("error encoding token file at %q: %w", path, err) + } + + if err := os.Chmod(path, 0600); err != nil { + return fmt.Errorf("failed to set file permissions: %w", err) + } + + log.Info("Saved token locally", "app", app, "path", path) + return nil +} + +// HasScopesFlexible checks if a token has the required scopes. A service:write +// scope is treated as satisfying service:read. +func HasScopesFlexible(token *oauth2.Token, requiredScopes []string) (bool, string, error) { + if token == nil { + return false, "", errors.New("HasScopesFlexible: token is nil") + } + + claims, err := ExtractClaims(token.AccessToken) + if err != nil { + return false, "", fmt.Errorf("error extracting claims from token: %w", err) + } + + for _, scope := range requiredScopes { + if !claims.HasScope(scope) { + sections := strings.Split(scope, ":") + var hasWriteInstead bool + + if len(sections) == 2 { + service, action := sections[0], sections[1] + if action == "read" { + hasWriteInstead = claims.HasScope(fmt.Sprintf("%v:write", service)) + } + } + + if !hasWriteInstead { + return false, scope, nil + } + } + } + + return true, "", nil +} + +// ExtractClaims extracts custom claims from a JWT token without verifying the +// signature. Signature verification is the server's responsibility; we only +// need the claims for scope checking. +func ExtractClaims(token string) (*auth.CustomClaims, error) { + sections := strings.Split(token, ".") + if len(sections) != 3 { + return nil, errors.New("token is not a JWT") + } + + decodedPayload, err := base64.RawURLEncoding.DecodeString(sections[1]) + if err != nil { + return nil, fmt.Errorf("error decoding token payload: %w", err) + } + + claims := new(auth.CustomClaims) + err = json.Unmarshal(decodedPayload, claims) + if err != nil { + return nil, fmt.Errorf("error parsing token payload: %w", err) + } + + return claims, nil +} + +// GetOauthToken authenticates using the OAuth2 device authorization flow. +// It first checks for a cached token in ~/.overmind/token.json and falls back +// to the interactive device flow if needed. New tokens are cached for reuse. +func GetOauthToken(ctx context.Context, oi sdp.OvermindInstance, app string, requiredScopes []string, log Logger) (*oauth2.Token, error) { + var localScopes []string + var localToken *oauth2.Token + home, err := os.UserHomeDir() + if err == nil { + localToken, localScopes, err = ReadLocalToken(home, app, requiredScopes, log) + if err != nil { + if !os.IsNotExist(err) { + log.Info("Skipping local token, re-authenticating", "error", err.Error()) + } + } else { + return localToken, nil + } + } + + // Request the required scopes on top of whatever the current local token + // has so that we don't keep replacing it with one that has fewer scopes. + // Use a new slice to avoid mutating the caller's requiredScopes. + requestScopes := make([]string, 0, len(requiredScopes)+len(localScopes)) + requestScopes = append(requestScopes, requiredScopes...) + requestScopes = append(requestScopes, localScopes...) + + config := oauth2.Config{ + ClientID: oi.CLIClientID, + Endpoint: oauth2.Endpoint{ + AuthURL: fmt.Sprintf("https://%v/authorize", oi.Auth0Domain), + TokenURL: fmt.Sprintf("https://%v/oauth/token", oi.Auth0Domain), + DeviceAuthURL: fmt.Sprintf("https://%v/oauth/device/code", oi.Auth0Domain), + }, + Scopes: requestScopes, + } + + deviceCode, err := config.DeviceAuth(ctx, + oauth2.SetAuthURLParam("audience", oi.Audience), + oauth2.AccessTypeOffline, + ) + if err != nil { + return nil, fmt.Errorf("error getting device code: %w", err) + } + + var urlToOpen string + if deviceCode.VerificationURIComplete != "" { + urlToOpen = deviceCode.VerificationURIComplete + } else { + urlToOpen = deviceCode.VerificationURI + } + + _ = browser.OpenURL(urlToOpen) + log.Info("Open this URL in your browser to authenticate", + "url", deviceCode.VerificationURI, + "code", deviceCode.UserCode) + + token, err := config.DeviceAccessToken(ctx, deviceCode) + if err != nil { + log.Error("Unable to authenticate. Please try again.", "error", err.Error()) + return nil, fmt.Errorf("error getting device access token: %w", err) + } + if token == nil { + log.Error("No token received") + return nil, errors.New("no token received") + } + + log.Info("Authenticated successfully") + + if home != "" { + err = SaveLocalToken(home, app, token, log) + if err != nil { + log.Error("Error saving token", "error", err.Error()) + } + } + + return token, nil +} + +// GetAPIKeyToken exchanges an Overmind API key (ovm_api_*) for a JWT token +// via the ApiKeyService, then verifies the token has the required scopes. +func GetAPIKeyToken(ctx context.Context, oi sdp.OvermindInstance, app, apiKey string, requiredScopes []string, log Logger) (*oauth2.Token, error) { + if !strings.HasPrefix(apiKey, "ovm_api_") { + return nil, errors.New("API key does not match pattern 'ovm_api_*'") + } + + httpClient := tracing.HTTPClient() + client := sdpconnect.NewApiKeyServiceClient(httpClient, oi.ApiUrl.String()) + + resp, err := client.ExchangeKeyForToken(ctx, &connect.Request[sdp.ExchangeKeyForTokenRequest]{ + Msg: &sdp.ExchangeKeyForTokenRequest{ + ApiKey: apiKey, + }, + }) + if err != nil { + return nil, fmt.Errorf("error authenticating the API token for %s: %w", app, err) + } + + token := &oauth2.Token{ + AccessToken: resp.Msg.GetAccessToken(), + TokenType: "Bearer", + } + + ok, missing, err := HasScopesFlexible(token, requiredScopes) + if err != nil { + return nil, fmt.Errorf("error checking token scopes for %s: %w", app, err) + } + if !ok { + return nil, fmt.Errorf("authenticated successfully against %s, but your API key is missing this permission: '%v'", app, missing) + } + log.Info("Using Overmind API key", "app", app) + return token, nil +} + +// GetToken gets a token using either API key exchange (if apiKey is non-empty) +// or the OAuth device flow. +func GetToken(ctx context.Context, oi sdp.OvermindInstance, app, apiKey string, requiredScopes []string, log Logger) (*oauth2.Token, error) { + if apiKey != "" { + return GetAPIKeyToken(ctx, oi, app, apiKey, requiredScopes, log) + } + return GetOauthToken(ctx, oi, app, requiredScopes, log) +} diff --git a/go/cliauth/cliauth_test.go b/go/cliauth/cliauth_test.go new file mode 100644 index 00000000..b09f3a6c --- /dev/null +++ b/go/cliauth/cliauth_test.go @@ -0,0 +1,586 @@ +package cliauth + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/overmindtech/cli/go/auth" + "golang.org/x/oauth2" +) + +type mockLogger struct { + infoMsgs []string + errorMsgs []string +} + +func (m *mockLogger) Info(msg string, keysAndValues ...any) { + m.infoMsgs = append(m.infoMsgs, msg) +} + +func (m *mockLogger) Error(msg string, keysAndValues ...any) { + m.errorMsgs = append(m.errorMsgs, msg) +} + +func TestExtractClaims(t *testing.T) { + testToken := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzY29wZSI6ImFkbWluOnJlYWQgYWRtaW46d3JpdGUiLCJzdWIiOiJ0ZXN0LXVzZXIiLCJpYXQiOjEyMzQ1Njc4OTAsImV4cCI6OTk5OTk5OTk5OX0.placeholder" + + claims, err := ExtractClaims(testToken) + if err != nil { + t.Fatalf("ExtractClaims failed: %v", err) + } + + if claims.Scope != "admin:read admin:write" { + t.Errorf("Expected scope 'admin:read admin:write', got '%s'", claims.Scope) + } +} + +func TestExtractClaimsInvalidJWT(t *testing.T) { + _, err := ExtractClaims("not-a-jwt") + if err == nil { + t.Fatal("Expected error for non-JWT token, got nil") + } +} + +func TestExtractClaimsInvalidBase64(t *testing.T) { + _, err := ExtractClaims("header.!!!invalid-base64!!!.sig") + if err == nil { + t.Fatal("Expected error for invalid base64, got nil") + } +} + +func TestHasScopesFlexible(t *testing.T) { + tests := []struct { + name string + tokenScopes string + requiredScopes []string + expectOK bool + expectMissing string + }{ + { + name: "exact match", + tokenScopes: "admin:read", + requiredScopes: []string{"admin:read"}, + expectOK: true, + }, + { + name: "write satisfies read", + tokenScopes: "admin:write", + requiredScopes: []string{"admin:read"}, + expectOK: true, + }, + { + name: "missing scope", + tokenScopes: "changes:read", + requiredScopes: []string{"admin:read"}, + expectOK: false, + expectMissing: "admin:read", + }, + { + name: "multiple scopes all present", + tokenScopes: "admin:read changes:write", + requiredScopes: []string{"admin:read", "changes:read"}, + expectOK: true, + }, + { + name: "read does not satisfy write", + tokenScopes: "admin:read", + requiredScopes: []string{"admin:write"}, + expectOK: false, + expectMissing: "admin:write", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + testToken := &oauth2.Token{ + AccessToken: createTestJWT(tt.tokenScopes), + TokenType: "Bearer", + } + + ok, missing, err := HasScopesFlexible(testToken, tt.requiredScopes) + if err != nil { + t.Fatalf("HasScopesFlexible failed: %v", err) + } + + if ok != tt.expectOK { + t.Errorf("Expected ok=%v, got %v", tt.expectOK, ok) + } + + if !tt.expectOK && missing != tt.expectMissing { + t.Errorf("Expected missing='%s', got '%s'", tt.expectMissing, missing) + } + }) + } +} + +func TestHasScopesFlexibleNilToken(t *testing.T) { + _, _, err := HasScopesFlexible(nil, []string{"admin:read"}) + if err == nil { + t.Fatal("Expected error for nil token, got nil") + } +} + +func TestReadWriteLocalToken(t *testing.T) { + tmpDir, err := os.MkdirTemp("", "cliauth-test") + if err != nil { + t.Fatalf("Failed to create temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + log := &mockLogger{} + app := "https://test.overmind.tech" + token := &oauth2.Token{ + AccessToken: createTestJWT("admin:read admin:write"), + TokenType: "Bearer", + Expiry: time.Now().Add(1 * time.Hour), + } + + err = SaveLocalToken(tmpDir, app, token, log) + if err != nil { + t.Fatalf("SaveLocalToken failed: %v", err) + } + + tokenPath := filepath.Join(tmpDir, ".overmind", "token.json") + if _, err := os.Stat(tokenPath); os.IsNotExist(err) { + t.Fatalf("Token file was not created") + } + + readToken, scopes, err := ReadLocalToken(tmpDir, app, []string{"admin:read"}, log) + if err != nil { + t.Fatalf("ReadLocalToken failed: %v", err) + } + + if readToken.AccessToken != token.AccessToken { + t.Errorf("Token mismatch") + } + + if len(scopes) != 2 { + t.Errorf("Expected 2 scopes, got %d", len(scopes)) + } +} + +func TestReadLocalTokenWrongApp(t *testing.T) { + tmpDir, err := os.MkdirTemp("", "cliauth-test") + if err != nil { + t.Fatalf("Failed to create temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + log := &mockLogger{} + app := "https://test.overmind.tech" + token := &oauth2.Token{ + AccessToken: createTestJWT("admin:read"), + TokenType: "Bearer", + Expiry: time.Now().Add(1 * time.Hour), + } + + if err := SaveLocalToken(tmpDir, app, token, log); err != nil { + t.Fatalf("SaveLocalToken failed: %v", err) + } + + _, _, err = ReadLocalToken(tmpDir, "https://wrong.overmind.tech", []string{"admin:read"}, log) + if err == nil { + t.Errorf("Expected error for wrong app, got nil") + } +} + +func TestReadLocalTokenInsufficientScopes(t *testing.T) { + tmpDir, err := os.MkdirTemp("", "cliauth-test") + if err != nil { + t.Fatalf("Failed to create temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + log := &mockLogger{} + app := "https://test.overmind.tech" + token := &oauth2.Token{ + AccessToken: createTestJWT("changes:read"), + TokenType: "Bearer", + Expiry: time.Now().Add(1 * time.Hour), + } + + if err := SaveLocalToken(tmpDir, app, token, log); err != nil { + t.Fatalf("SaveLocalToken failed: %v", err) + } + + _, _, err = ReadLocalToken(tmpDir, app, []string{"admin:read"}, log) + if err == nil { + t.Errorf("Expected error for insufficient scopes, got nil") + } +} + +func TestReadLocalTokenFileNotFound(t *testing.T) { + tmpDir, err := os.MkdirTemp("", "cliauth-test") + if err != nil { + t.Fatalf("Failed to create temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + log := &mockLogger{} + _, _, err = ReadLocalToken(tmpDir, "https://test.overmind.tech", []string{"admin:read"}, log) + if err == nil { + t.Fatal("Expected error for missing file, got nil") + } +} + +func TestSaveLocalTokenSecurePermissions(t *testing.T) { + tmpDir, err := os.MkdirTemp("", "cliauth-test") + if err != nil { + t.Fatalf("Failed to create temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + log := &mockLogger{} + token := &oauth2.Token{ + AccessToken: createTestJWT("admin:read"), + TokenType: "Bearer", + Expiry: time.Now().Add(1 * time.Hour), + } + + if err := SaveLocalToken(tmpDir, "https://test.overmind.tech", token, log); err != nil { + t.Fatalf("SaveLocalToken failed: %v", err) + } + + dirInfo, err := os.Stat(filepath.Join(tmpDir, ".overmind")) + if err != nil { + t.Fatalf("Failed to stat directory: %v", err) + } + if dirInfo.Mode().Perm() != 0700 { + t.Errorf("Expected directory permissions 0700, got %o", dirInfo.Mode().Perm()) + } + + fileInfo, err := os.Stat(filepath.Join(tmpDir, ".overmind", "token.json")) + if err != nil { + t.Fatalf("Failed to stat token file: %v", err) + } + if fileInfo.Mode().Perm() != 0600 { + t.Errorf("Expected file permissions 0600, got %o", fileInfo.Mode().Perm()) + } +} + +func TestSaveLocalTokenNilMap(t *testing.T) { + tmpDir, err := os.MkdirTemp("", "cliauth-test") + if err != nil { + t.Fatalf("Failed to create temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + tokenPath := filepath.Join(tmpDir, ".overmind", "token.json") + if err := os.MkdirAll(filepath.Dir(tokenPath), 0700); err != nil { + t.Fatalf("Failed to create directory: %v", err) + } + + // Simulate a corrupt token file with null auth_entries + if err := os.WriteFile(tokenPath, []byte(`{"auth_entries": null}`), 0600); err != nil { + t.Fatalf("Failed to write token file: %v", err) + } + + log := &mockLogger{} + token := &oauth2.Token{ + AccessToken: createTestJWT("admin:read"), + TokenType: "Bearer", + Expiry: time.Now().Add(1 * time.Hour), + } + + err = SaveLocalToken(tmpDir, "https://test.overmind.tech", token, log) + if err != nil { + t.Fatalf("SaveLocalToken failed with nil map: %v", err) + } + + readToken, _, err := ReadLocalToken(tmpDir, "https://test.overmind.tech", []string{"admin:read"}, log) + if err != nil { + t.Fatalf("ReadLocalToken failed: %v", err) + } + if readToken.AccessToken != token.AccessToken { + t.Errorf("Token mismatch after nil map save") + } +} + +func TestReadLocalTokenNilEntry(t *testing.T) { + tmpDir, err := os.MkdirTemp("", "cliauth-test") + if err != nil { + t.Fatalf("Failed to create temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + tokenPath := filepath.Join(tmpDir, ".overmind", "token.json") + if err := os.MkdirAll(filepath.Dir(tokenPath), 0700); err != nil { + t.Fatalf("Failed to create directory: %v", err) + } + + if err := os.WriteFile(tokenPath, []byte(`{"auth_entries": {"https://test.overmind.tech": null}}`), 0600); err != nil { + t.Fatalf("Failed to write token file: %v", err) + } + + log := &mockLogger{} + _, _, err = ReadLocalToken(tmpDir, "https://test.overmind.tech", []string{"admin:read"}, log) + if err == nil { + t.Fatal("Expected error for null token entry, got nil") + } + if !strings.Contains(err.Error(), "null") { + t.Errorf("Expected error to mention 'null', got: %v", err) + } +} + +func TestReadLocalTokenNilToken(t *testing.T) { + tmpDir, err := os.MkdirTemp("", "cliauth-test") + if err != nil { + t.Fatalf("Failed to create temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + tokenPath := filepath.Join(tmpDir, ".overmind", "token.json") + if err := os.MkdirAll(filepath.Dir(tokenPath), 0700); err != nil { + t.Fatalf("Failed to create directory: %v", err) + } + + if err := os.WriteFile(tokenPath, []byte(`{"auth_entries": {"https://test.overmind.tech": {"token": null, "added_date": "2024-01-01T00:00:00Z"}}}`), 0600); err != nil { + t.Fatalf("Failed to write token file: %v", err) + } + + log := &mockLogger{} + _, _, err = ReadLocalToken(tmpDir, "https://test.overmind.tech", []string{"admin:read"}, log) + if err == nil { + t.Fatal("Expected error for null token, got nil") + } + if !strings.Contains(err.Error(), "null") { + t.Errorf("Expected error to mention 'null', got: %v", err) + } +} + +func TestSaveLocalTokenOverwriteExisting(t *testing.T) { + tmpDir, err := os.MkdirTemp("", "cliauth-test") + if err != nil { + t.Fatalf("Failed to create temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + log := &mockLogger{} + app := "https://test.overmind.tech" + + token1 := &oauth2.Token{ + AccessToken: createTestJWT("admin:read"), + TokenType: "Bearer", + Expiry: time.Now().Add(1 * time.Hour), + } + token2 := &oauth2.Token{ + AccessToken: createTestJWT("admin:write"), + TokenType: "Bearer", + Expiry: time.Now().Add(1 * time.Hour), + } + + if err := SaveLocalToken(tmpDir, app, token1, log); err != nil { + t.Fatalf("SaveLocalToken (first) failed: %v", err) + } + if err := SaveLocalToken(tmpDir, app, token2, log); err != nil { + t.Fatalf("SaveLocalToken (second) failed: %v", err) + } + + readToken, _, err := ReadLocalToken(tmpDir, app, []string{"admin:write"}, log) + if err != nil { + t.Fatalf("ReadLocalToken failed: %v", err) + } + if readToken.AccessToken != token2.AccessToken { + t.Errorf("Expected second token, got first") + } +} + +func TestSaveLocalTokenMultipleApps(t *testing.T) { + tmpDir, err := os.MkdirTemp("", "cliauth-test") + if err != nil { + t.Fatalf("Failed to create temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + log := &mockLogger{} + app1 := "https://app.overmind.tech" + app2 := "https://app.staging.overmind.tech" + + token1 := &oauth2.Token{ + AccessToken: createTestJWT("admin:read"), + TokenType: "Bearer", + Expiry: time.Now().Add(1 * time.Hour), + } + token2 := &oauth2.Token{ + AccessToken: createTestJWT("admin:write"), + TokenType: "Bearer", + Expiry: time.Now().Add(1 * time.Hour), + } + + if err := SaveLocalToken(tmpDir, app1, token1, log); err != nil { + t.Fatalf("SaveLocalToken (app1) failed: %v", err) + } + if err := SaveLocalToken(tmpDir, app2, token2, log); err != nil { + t.Fatalf("SaveLocalToken (app2) failed: %v", err) + } + + read1, _, err := ReadLocalToken(tmpDir, app1, []string{"admin:read"}, log) + if err != nil { + t.Fatalf("ReadLocalToken (app1) failed: %v", err) + } + if read1.AccessToken != token1.AccessToken { + t.Errorf("App1 token mismatch") + } + + read2, _, err := ReadLocalToken(tmpDir, app2, []string{"admin:write"}, log) + if err != nil { + t.Fatalf("ReadLocalToken (app2) failed: %v", err) + } + if read2.AccessToken != token2.AccessToken { + t.Errorf("App2 token mismatch") + } +} + +func TestNoSliceMutationInScopeMerge(t *testing.T) { + // Verify the pattern used in GetOauthToken doesn't mutate caller slices + requiredScopes := make([]string, 1, 10) // extra capacity — the mutation scenario + requiredScopes[0] = "admin:read" + + originalLen := len(requiredScopes) + localScopes := []string{"changes:read", "config:read"} + + // This is the safe pattern used in GetOauthToken + requestScopes := make([]string, 0, len(requiredScopes)+len(localScopes)) + requestScopes = append(requestScopes, requiredScopes...) + requestScopes = append(requestScopes, localScopes...) + + if len(requiredScopes) != originalLen { + t.Errorf("Original slice length changed from %d to %d", originalLen, len(requiredScopes)) + } + if len(requestScopes) != 3 { + t.Errorf("Expected 3 scopes in combined slice, got %d", len(requestScopes)) + } +} + +func TestConfirmUntrustedHost_TrustedSkipsPrompt(t *testing.T) { + trustedURLs := []string{ + "https://app.overmind.tech", + "https://df.overmind-demo.com", + "http://localhost:3000", + "http://127.0.0.1:8080", + } + + for _, u := range trustedURLs { + t.Run(u, func(t *testing.T) { + err := ConfirmUntrustedHost(u, false, strings.NewReader(""), io.Discard) + if err != nil { + t.Errorf("Expected no prompt for trusted URL %q, got error: %v", u, err) + } + }) + } +} + +func TestConfirmUntrustedHost_UntrustedPrompts(t *testing.T) { + tests := []struct { + name string + url string + input string + wantError bool + errMsg string + }{ + { + name: "user confirms with y", + url: "https://custom.example.com", + input: "y\n", + }, + { + name: "user confirms with yes", + url: "https://custom.example.com", + input: "yes\n", + }, + { + name: "user confirms with YES (case insensitive)", + url: "https://custom.example.com", + input: "YES\n", + }, + { + name: "user declines with n", + url: "https://custom.example.com", + input: "n\n", + wantError: true, + errMsg: "aborted", + }, + { + name: "user declines with empty (default N)", + url: "https://custom.example.com", + input: "\n", + wantError: true, + errMsg: "aborted", + }, + { + name: "user types something else", + url: "https://custom.example.com", + input: "maybe\n", + wantError: true, + errMsg: "aborted", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := ConfirmUntrustedHost(tt.url, false, strings.NewReader(tt.input), io.Discard) + if tt.wantError { + if err == nil { + t.Fatal("Expected error, got nil") + } + if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) { + t.Errorf("Expected error containing %q, got: %v", tt.errMsg, err) + } + } else { + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + } + }) + } +} + +func TestConfirmUntrustedHost_PipedInputWithoutNewline(t *testing.T) { + // Simulates: echo -n y | area51 export-archive --change https://custom.example.com/changes/UUID + err := ConfirmUntrustedHost("https://custom.example.com", false, strings.NewReader("y"), io.Discard) + if err != nil { + t.Fatalf("Expected piped 'y' without newline to be accepted, got error: %v", err) + } + + err = ConfirmUntrustedHost("https://custom.example.com", false, strings.NewReader("n"), io.Discard) + if err == nil { + t.Fatal("Expected piped 'n' without newline to be rejected") + } + + err = ConfirmUntrustedHost("https://custom.example.com", false, strings.NewReader(""), io.Discard) + if err == nil { + t.Fatal("Expected empty piped input to be rejected") + } +} + +func TestConfirmUntrustedHost_WarningMentionsAPIKey(t *testing.T) { + var buf strings.Builder + _ = ConfirmUntrustedHost("https://custom.example.com", true, strings.NewReader("n\n"), &buf) + output := buf.String() + if !strings.Contains(output, "API key") { + t.Errorf("Expected warning to mention API key when hasAPIKey=true, got: %s", output) + } +} + +// createTestJWT creates a minimal JWT token for testing (no signature verification) +func createTestJWT(scopes string) string { + header := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9" + + payload := auth.CustomClaims{ + Scope: scopes, + } + payloadJSON, err := json.Marshal(payload) + if err != nil { + panic(fmt.Sprintf("failed to marshal test payload: %v", err)) + } + + payloadB64 := base64.RawURLEncoding.EncodeToString(payloadJSON) + return header + "." + payloadB64 + ".test-signature" +} diff --git a/go/sdp-go/changes.go b/go/sdp-go/changes.go index b55d29da..b6553901 100644 --- a/go/sdp-go/changes.go +++ b/go/sdp-go/changes.go @@ -5,7 +5,7 @@ import ( "fmt" "github.com/google/uuid" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v3" ) // GetUUIDParsed returns the parsed UUID from the ChangeMetadata, or nil if invalid. diff --git a/go/sdp-go/config.pb.go b/go/sdp-go/config.pb.go index 412962bd..79669c5a 100644 --- a/go/sdp-go/config.pb.go +++ b/go/sdp-go/config.pb.go @@ -24,6 +24,59 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +// Controls when a GitHub Check Run concludes as failure vs success. +type CheckRunMode int32 + +const ( + // Always conclude as success regardless of risks found (default). + CheckRunMode_CHECK_RUN_MODE_REPORT_ONLY CheckRunMode = 0 + // Conclude as failure only when high-severity risks exist. + CheckRunMode_CHECK_RUN_MODE_FAIL_HIGH_SEVERITY CheckRunMode = 1 + // Conclude as failure when any risks exist. + CheckRunMode_CHECK_RUN_MODE_FAIL_ANY_RISK CheckRunMode = 2 +) + +// Enum value maps for CheckRunMode. +var ( + CheckRunMode_name = map[int32]string{ + 0: "CHECK_RUN_MODE_REPORT_ONLY", + 1: "CHECK_RUN_MODE_FAIL_HIGH_SEVERITY", + 2: "CHECK_RUN_MODE_FAIL_ANY_RISK", + } + CheckRunMode_value = map[string]int32{ + "CHECK_RUN_MODE_REPORT_ONLY": 0, + "CHECK_RUN_MODE_FAIL_HIGH_SEVERITY": 1, + "CHECK_RUN_MODE_FAIL_ANY_RISK": 2, + } +) + +func (x CheckRunMode) Enum() *CheckRunMode { + p := new(CheckRunMode) + *p = x + return p +} + +func (x CheckRunMode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CheckRunMode) Descriptor() protoreflect.EnumDescriptor { + return file_config_proto_enumTypes[0].Descriptor() +} + +func (CheckRunMode) Type() protoreflect.EnumType { + return &file_config_proto_enumTypes[0] +} + +func (x CheckRunMode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CheckRunMode.Descriptor instead. +func (CheckRunMode) EnumDescriptor() ([]byte, []int) { + return file_config_proto_rawDescGZIP(), []int{0} +} + type AccountConfig_BlastRadiusPreset int32 const ( @@ -66,11 +119,11 @@ func (x AccountConfig_BlastRadiusPreset) String() string { } func (AccountConfig_BlastRadiusPreset) Descriptor() protoreflect.EnumDescriptor { - return file_config_proto_enumTypes[0].Descriptor() + return file_config_proto_enumTypes[1].Descriptor() } func (AccountConfig_BlastRadiusPreset) Type() protoreflect.EnumType { - return &file_config_proto_enumTypes[0] + return &file_config_proto_enumTypes[1] } func (x AccountConfig_BlastRadiusPreset) Number() protoreflect.EnumNumber { @@ -116,11 +169,11 @@ func (x GetHcpConfigResponse_Status) String() string { } func (GetHcpConfigResponse_Status) Descriptor() protoreflect.EnumDescriptor { - return file_config_proto_enumTypes[1].Descriptor() + return file_config_proto_enumTypes[2].Descriptor() } func (GetHcpConfigResponse_Status) Type() protoreflect.EnumType { - return &file_config_proto_enumTypes[1] + return &file_config_proto_enumTypes[2] } func (x GetHcpConfigResponse_Status) Number() protoreflect.EnumNumber { @@ -168,11 +221,11 @@ func (x RoutineChangesConfig_DurationUnit) String() string { } func (RoutineChangesConfig_DurationUnit) Descriptor() protoreflect.EnumDescriptor { - return file_config_proto_enumTypes[2].Descriptor() + return file_config_proto_enumTypes[3].Descriptor() } func (RoutineChangesConfig_DurationUnit) Type() protoreflect.EnumType { - return &file_config_proto_enumTypes[2] + return &file_config_proto_enumTypes[3] } func (x RoutineChangesConfig_DurationUnit) Number() protoreflect.EnumNumber { @@ -1100,8 +1153,10 @@ type SignalConfig struct { RoutineChangesConfig *RoutineChangesConfig `protobuf:"bytes,2,opt,name=routineChangesConfig,proto3" json:"routineChangesConfig,omitempty"` // Config for Github app profile, such as primary branch name GithubOrganisationProfile *GithubOrganisationProfile `protobuf:"bytes,3,opt,name=githubOrganisationProfile,proto3,oneof" json:"githubOrganisationProfile,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + // Controls the GitHub Check Run pass/fail conclusion criteria + CheckRunMode CheckRunMode `protobuf:"varint,4,opt,name=check_run_mode,json=checkRunMode,proto3,enum=config.CheckRunMode" json:"check_run_mode,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SignalConfig) Reset() { @@ -1155,6 +1210,13 @@ func (x *SignalConfig) GetGithubOrganisationProfile() *GithubOrganisationProfile return nil } +func (x *SignalConfig) GetCheckRunMode() CheckRunMode { + if x != nil { + return x.CheckRunMode + } + return CheckRunMode_CHECK_RUN_MODE_REPORT_ONLY +} + type AggregationConfig struct { state protoimpl.MessageState `protogen:"open.v1"` // Alpha parameter for aggregation: controls the weighting of recent data versus older data @@ -1339,8 +1401,10 @@ type GithubAppInformation struct { RequestedOrgName *string `protobuf:"bytes,10,opt,name=requestedOrgName,proto3,oneof" json:"requestedOrgName,omitempty"` RequestedAt *timestamppb.Timestamp `protobuf:"bytes,11,opt,name=requestedAt,proto3,oneof" json:"requestedAt,omitempty"` RequestedBy *string `protobuf:"bytes,12,opt,name=requestedBy,proto3,oneof" json:"requestedBy,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + // Suspended status (true when GitHub org admin has suspended the installation) + Suspended *bool `protobuf:"varint,13,opt,name=suspended,proto3,oneof" json:"suspended,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GithubAppInformation) Reset() { @@ -1457,6 +1521,13 @@ func (x *GithubAppInformation) GetRequestedBy() string { return "" } +func (x *GithubAppInformation) GetSuspended() bool { + if x != nil && x.Suspended != nil { + return *x.Suspended + } + return false +} + // this is all the information required to display the github app information type GetGithubAppInformationResponse struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -1853,11 +1924,12 @@ const file_config_proto_rawDesc = "" + "\x19UpdateSignalConfigRequest\x12,\n" + "\x06config\x18\x01 \x01(\v2\x14.config.SignalConfigR\x06config\"J\n" + "\x1aUpdateSignalConfigResponse\x12,\n" + - "\x06config\x18\x01 \x01(\v2\x14.config.SignalConfigR\x06config\"\xad\x02\n" + + "\x06config\x18\x01 \x01(\v2\x14.config.SignalConfigR\x06config\"\xe9\x02\n" + "\fSignalConfig\x12G\n" + "\x11aggregationConfig\x18\x01 \x01(\v2\x19.config.AggregationConfigR\x11aggregationConfig\x12P\n" + "\x14routineChangesConfig\x18\x02 \x01(\v2\x1c.config.RoutineChangesConfigR\x14routineChangesConfig\x12d\n" + - "\x19githubOrganisationProfile\x18\x03 \x01(\v2!.config.GithubOrganisationProfileH\x00R\x19githubOrganisationProfile\x88\x01\x01B\x1c\n" + + "\x19githubOrganisationProfile\x18\x03 \x01(\v2!.config.GithubOrganisationProfileH\x00R\x19githubOrganisationProfile\x88\x01\x01\x12:\n" + + "\x0echeck_run_mode\x18\x04 \x01(\x0e2\x14.config.CheckRunModeR\fcheckRunModeB\x1c\n" + "\x1a_githubOrganisationProfile\"5\n" + "\x11AggregationConfig\x12 \n" + "\x05alpha\x18\x01 \x01(\x02B\n" + @@ -1874,7 +1946,7 @@ const file_config_proto_rawDesc = "" + "\x05WEEKS\x10\x01\x12\n" + "\n" + "\x06MONTHS\x10\x02\" \n" + - "\x1eGetGithubAppInformationRequest\"\x9a\x05\n" + + "\x1eGetGithubAppInformationRequest\"\xcb\x05\n" + "\x14GithubAppInformation\x12&\n" + "\x0einstallationID\x18\x01 \x01(\x03R\x0einstallationID\x12 \n" + "\vinstalledBy\x18\x02 \x01(\tR\vinstalledBy\x12<\n" + @@ -1888,10 +1960,13 @@ const file_config_proto_rawDesc = "" + "\x10requestedOrgName\x18\n" + " \x01(\tH\x00R\x10requestedOrgName\x88\x01\x01\x12A\n" + "\vrequestedAt\x18\v \x01(\v2\x1a.google.protobuf.TimestampH\x01R\vrequestedAt\x88\x01\x01\x12%\n" + - "\vrequestedBy\x18\f \x01(\tH\x02R\vrequestedBy\x88\x01\x01B\x13\n" + + "\vrequestedBy\x18\f \x01(\tH\x02R\vrequestedBy\x88\x01\x01\x12!\n" + + "\tsuspended\x18\r \x01(\bH\x03R\tsuspended\x88\x01\x01B\x13\n" + "\x11_requestedOrgNameB\x0e\n" + "\f_requestedAtB\x0e\n" + - "\f_requestedBy\"s\n" + + "\f_requestedByB\f\n" + + "\n" + + "_suspended\"s\n" + "\x1fGetGithubAppInformationResponse\x12P\n" + "\x14githubAppInformation\x18\x01 \x01(\v2\x1c.config.GithubAppInformationR\x14githubAppInformation\"#\n" + "!RegenerateGithubAppProfileRequest\"\x8f\x01\n" + @@ -1905,7 +1980,11 @@ const file_config_proto_rawDesc = "" + "\x1dCreateGithubInstallURLRequest\"A\n" + "\x1eCreateGithubInstallURLResponse\x12\x1f\n" + "\vinstall_url\x18\x01 \x01(\tR\n" + - "installUrl2\xc1\t\n" + + "installUrl*w\n" + + "\fCheckRunMode\x12\x1e\n" + + "\x1aCHECK_RUN_MODE_REPORT_ONLY\x10\x00\x12%\n" + + "!CHECK_RUN_MODE_FAIL_HIGH_SEVERITY\x10\x01\x12 \n" + + "\x1cCHECK_RUN_MODE_FAIL_ANY_RISK\x10\x022\xc1\t\n" + "\x14ConfigurationService\x12U\n" + "\x10GetAccountConfig\x12\x1f.config.GetAccountConfigRequest\x1a .config.GetAccountConfigResponse\x12^\n" + "\x13UpdateAccountConfig\x12\".config.UpdateAccountConfigRequest\x1a#.config.UpdateAccountConfigResponse\x12R\n" + @@ -1932,102 +2011,104 @@ func file_config_proto_rawDescGZIP() []byte { return file_config_proto_rawDescData } -var file_config_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_config_proto_enumTypes = make([]protoimpl.EnumInfo, 4) var file_config_proto_msgTypes = make([]protoimpl.MessageInfo, 32) var file_config_proto_goTypes = []any{ - (AccountConfig_BlastRadiusPreset)(0), // 0: config.AccountConfig.BlastRadiusPreset - (GetHcpConfigResponse_Status)(0), // 1: config.GetHcpConfigResponse.Status - (RoutineChangesConfig_DurationUnit)(0), // 2: config.RoutineChangesConfig.DurationUnit - (*BlastRadiusConfig)(nil), // 3: config.BlastRadiusConfig - (*AccountConfig)(nil), // 4: config.AccountConfig - (*GetAccountConfigRequest)(nil), // 5: config.GetAccountConfigRequest - (*GetAccountConfigResponse)(nil), // 6: config.GetAccountConfigResponse - (*UpdateAccountConfigRequest)(nil), // 7: config.UpdateAccountConfigRequest - (*UpdateAccountConfigResponse)(nil), // 8: config.UpdateAccountConfigResponse - (*CreateHcpConfigRequest)(nil), // 9: config.CreateHcpConfigRequest - (*CreateHcpConfigResponse)(nil), // 10: config.CreateHcpConfigResponse - (*HcpConfig)(nil), // 11: config.HcpConfig - (*GetHcpConfigRequest)(nil), // 12: config.GetHcpConfigRequest - (*GetHcpConfigResponse)(nil), // 13: config.GetHcpConfigResponse - (*DeleteHcpConfigRequest)(nil), // 14: config.DeleteHcpConfigRequest - (*DeleteHcpConfigResponse)(nil), // 15: config.DeleteHcpConfigResponse - (*ReplaceHcpApiKeyRequest)(nil), // 16: config.ReplaceHcpApiKeyRequest - (*ReplaceHcpApiKeyResponse)(nil), // 17: config.ReplaceHcpApiKeyResponse - (*GetSignalConfigRequest)(nil), // 18: config.GetSignalConfigRequest - (*GetSignalConfigResponse)(nil), // 19: config.GetSignalConfigResponse - (*UpdateSignalConfigRequest)(nil), // 20: config.UpdateSignalConfigRequest - (*UpdateSignalConfigResponse)(nil), // 21: config.UpdateSignalConfigResponse - (*SignalConfig)(nil), // 22: config.SignalConfig - (*AggregationConfig)(nil), // 23: config.AggregationConfig - (*RoutineChangesConfig)(nil), // 24: config.RoutineChangesConfig - (*GetGithubAppInformationRequest)(nil), // 25: config.GetGithubAppInformationRequest - (*GithubAppInformation)(nil), // 26: config.GithubAppInformation - (*GetGithubAppInformationResponse)(nil), // 27: config.GetGithubAppInformationResponse - (*RegenerateGithubAppProfileRequest)(nil), // 28: config.RegenerateGithubAppProfileRequest - (*GithubOrganisationProfile)(nil), // 29: config.GithubOrganisationProfile - (*RegenerateGithubAppProfileResponse)(nil), // 30: config.RegenerateGithubAppProfileResponse - (*DeleteGithubAppProfileAndGithubInstallationIDRequest)(nil), // 31: config.DeleteGithubAppProfileAndGithubInstallationIDRequest - (*DeleteGithubAppProfileAndGithubInstallationIDResponse)(nil), // 32: config.DeleteGithubAppProfileAndGithubInstallationIDResponse - (*CreateGithubInstallURLRequest)(nil), // 33: config.CreateGithubInstallURLRequest - (*CreateGithubInstallURLResponse)(nil), // 34: config.CreateGithubInstallURLResponse - (*durationpb.Duration)(nil), // 35: google.protobuf.Duration - (*CreateAPIKeyResponse)(nil), // 36: apikeys.CreateAPIKeyResponse - (*timestamppb.Timestamp)(nil), // 37: google.protobuf.Timestamp + (CheckRunMode)(0), // 0: config.CheckRunMode + (AccountConfig_BlastRadiusPreset)(0), // 1: config.AccountConfig.BlastRadiusPreset + (GetHcpConfigResponse_Status)(0), // 2: config.GetHcpConfigResponse.Status + (RoutineChangesConfig_DurationUnit)(0), // 3: config.RoutineChangesConfig.DurationUnit + (*BlastRadiusConfig)(nil), // 4: config.BlastRadiusConfig + (*AccountConfig)(nil), // 5: config.AccountConfig + (*GetAccountConfigRequest)(nil), // 6: config.GetAccountConfigRequest + (*GetAccountConfigResponse)(nil), // 7: config.GetAccountConfigResponse + (*UpdateAccountConfigRequest)(nil), // 8: config.UpdateAccountConfigRequest + (*UpdateAccountConfigResponse)(nil), // 9: config.UpdateAccountConfigResponse + (*CreateHcpConfigRequest)(nil), // 10: config.CreateHcpConfigRequest + (*CreateHcpConfigResponse)(nil), // 11: config.CreateHcpConfigResponse + (*HcpConfig)(nil), // 12: config.HcpConfig + (*GetHcpConfigRequest)(nil), // 13: config.GetHcpConfigRequest + (*GetHcpConfigResponse)(nil), // 14: config.GetHcpConfigResponse + (*DeleteHcpConfigRequest)(nil), // 15: config.DeleteHcpConfigRequest + (*DeleteHcpConfigResponse)(nil), // 16: config.DeleteHcpConfigResponse + (*ReplaceHcpApiKeyRequest)(nil), // 17: config.ReplaceHcpApiKeyRequest + (*ReplaceHcpApiKeyResponse)(nil), // 18: config.ReplaceHcpApiKeyResponse + (*GetSignalConfigRequest)(nil), // 19: config.GetSignalConfigRequest + (*GetSignalConfigResponse)(nil), // 20: config.GetSignalConfigResponse + (*UpdateSignalConfigRequest)(nil), // 21: config.UpdateSignalConfigRequest + (*UpdateSignalConfigResponse)(nil), // 22: config.UpdateSignalConfigResponse + (*SignalConfig)(nil), // 23: config.SignalConfig + (*AggregationConfig)(nil), // 24: config.AggregationConfig + (*RoutineChangesConfig)(nil), // 25: config.RoutineChangesConfig + (*GetGithubAppInformationRequest)(nil), // 26: config.GetGithubAppInformationRequest + (*GithubAppInformation)(nil), // 27: config.GithubAppInformation + (*GetGithubAppInformationResponse)(nil), // 28: config.GetGithubAppInformationResponse + (*RegenerateGithubAppProfileRequest)(nil), // 29: config.RegenerateGithubAppProfileRequest + (*GithubOrganisationProfile)(nil), // 30: config.GithubOrganisationProfile + (*RegenerateGithubAppProfileResponse)(nil), // 31: config.RegenerateGithubAppProfileResponse + (*DeleteGithubAppProfileAndGithubInstallationIDRequest)(nil), // 32: config.DeleteGithubAppProfileAndGithubInstallationIDRequest + (*DeleteGithubAppProfileAndGithubInstallationIDResponse)(nil), // 33: config.DeleteGithubAppProfileAndGithubInstallationIDResponse + (*CreateGithubInstallURLRequest)(nil), // 34: config.CreateGithubInstallURLRequest + (*CreateGithubInstallURLResponse)(nil), // 35: config.CreateGithubInstallURLResponse + (*durationpb.Duration)(nil), // 36: google.protobuf.Duration + (*CreateAPIKeyResponse)(nil), // 37: apikeys.CreateAPIKeyResponse + (*timestamppb.Timestamp)(nil), // 38: google.protobuf.Timestamp } var file_config_proto_depIdxs = []int32{ - 35, // 0: config.BlastRadiusConfig.changeAnalysisTargetDuration:type_name -> google.protobuf.Duration - 0, // 1: config.AccountConfig.blastRadiusPreset:type_name -> config.AccountConfig.BlastRadiusPreset - 3, // 2: config.AccountConfig.blastRadius:type_name -> config.BlastRadiusConfig - 4, // 3: config.GetAccountConfigResponse.config:type_name -> config.AccountConfig - 4, // 4: config.UpdateAccountConfigRequest.config:type_name -> config.AccountConfig - 4, // 5: config.UpdateAccountConfigResponse.config:type_name -> config.AccountConfig - 11, // 6: config.CreateHcpConfigResponse.config:type_name -> config.HcpConfig - 36, // 7: config.CreateHcpConfigResponse.apiKey:type_name -> apikeys.CreateAPIKeyResponse - 11, // 8: config.GetHcpConfigResponse.config:type_name -> config.HcpConfig - 1, // 9: config.GetHcpConfigResponse.status:type_name -> config.GetHcpConfigResponse.Status - 11, // 10: config.ReplaceHcpApiKeyResponse.config:type_name -> config.HcpConfig - 36, // 11: config.ReplaceHcpApiKeyResponse.apiKey:type_name -> apikeys.CreateAPIKeyResponse - 22, // 12: config.GetSignalConfigResponse.config:type_name -> config.SignalConfig - 22, // 13: config.UpdateSignalConfigRequest.config:type_name -> config.SignalConfig - 22, // 14: config.UpdateSignalConfigResponse.config:type_name -> config.SignalConfig - 23, // 15: config.SignalConfig.aggregationConfig:type_name -> config.AggregationConfig - 24, // 16: config.SignalConfig.routineChangesConfig:type_name -> config.RoutineChangesConfig - 29, // 17: config.SignalConfig.githubOrganisationProfile:type_name -> config.GithubOrganisationProfile - 2, // 18: config.RoutineChangesConfig.eventsPerUnit:type_name -> config.RoutineChangesConfig.DurationUnit - 2, // 19: config.RoutineChangesConfig.durationUnit:type_name -> config.RoutineChangesConfig.DurationUnit - 37, // 20: config.GithubAppInformation.installedAt:type_name -> google.protobuf.Timestamp - 37, // 21: config.GithubAppInformation.requestedAt:type_name -> google.protobuf.Timestamp - 26, // 22: config.GetGithubAppInformationResponse.githubAppInformation:type_name -> config.GithubAppInformation - 29, // 23: config.RegenerateGithubAppProfileResponse.githubOrganisationProfile:type_name -> config.GithubOrganisationProfile - 5, // 24: config.ConfigurationService.GetAccountConfig:input_type -> config.GetAccountConfigRequest - 7, // 25: config.ConfigurationService.UpdateAccountConfig:input_type -> config.UpdateAccountConfigRequest - 9, // 26: config.ConfigurationService.CreateHcpConfig:input_type -> config.CreateHcpConfigRequest - 12, // 27: config.ConfigurationService.GetHcpConfig:input_type -> config.GetHcpConfigRequest - 14, // 28: config.ConfigurationService.DeleteHcpConfig:input_type -> config.DeleteHcpConfigRequest - 16, // 29: config.ConfigurationService.ReplaceHcpApiKey:input_type -> config.ReplaceHcpApiKeyRequest - 18, // 30: config.ConfigurationService.GetSignalConfig:input_type -> config.GetSignalConfigRequest - 20, // 31: config.ConfigurationService.UpdateSignalConfig:input_type -> config.UpdateSignalConfigRequest - 25, // 32: config.ConfigurationService.GetGithubAppInformation:input_type -> config.GetGithubAppInformationRequest - 28, // 33: config.ConfigurationService.RegenerateGithubAppProfile:input_type -> config.RegenerateGithubAppProfileRequest - 31, // 34: config.ConfigurationService.DeleteGithubAppProfileAndGithubInstallationID:input_type -> config.DeleteGithubAppProfileAndGithubInstallationIDRequest - 33, // 35: config.ConfigurationService.CreateGithubInstallURL:input_type -> config.CreateGithubInstallURLRequest - 6, // 36: config.ConfigurationService.GetAccountConfig:output_type -> config.GetAccountConfigResponse - 8, // 37: config.ConfigurationService.UpdateAccountConfig:output_type -> config.UpdateAccountConfigResponse - 10, // 38: config.ConfigurationService.CreateHcpConfig:output_type -> config.CreateHcpConfigResponse - 13, // 39: config.ConfigurationService.GetHcpConfig:output_type -> config.GetHcpConfigResponse - 15, // 40: config.ConfigurationService.DeleteHcpConfig:output_type -> config.DeleteHcpConfigResponse - 17, // 41: config.ConfigurationService.ReplaceHcpApiKey:output_type -> config.ReplaceHcpApiKeyResponse - 19, // 42: config.ConfigurationService.GetSignalConfig:output_type -> config.GetSignalConfigResponse - 21, // 43: config.ConfigurationService.UpdateSignalConfig:output_type -> config.UpdateSignalConfigResponse - 27, // 44: config.ConfigurationService.GetGithubAppInformation:output_type -> config.GetGithubAppInformationResponse - 30, // 45: config.ConfigurationService.RegenerateGithubAppProfile:output_type -> config.RegenerateGithubAppProfileResponse - 32, // 46: config.ConfigurationService.DeleteGithubAppProfileAndGithubInstallationID:output_type -> config.DeleteGithubAppProfileAndGithubInstallationIDResponse - 34, // 47: config.ConfigurationService.CreateGithubInstallURL:output_type -> config.CreateGithubInstallURLResponse - 36, // [36:48] is the sub-list for method output_type - 24, // [24:36] is the sub-list for method input_type - 24, // [24:24] is the sub-list for extension type_name - 24, // [24:24] is the sub-list for extension extendee - 0, // [0:24] is the sub-list for field type_name + 36, // 0: config.BlastRadiusConfig.changeAnalysisTargetDuration:type_name -> google.protobuf.Duration + 1, // 1: config.AccountConfig.blastRadiusPreset:type_name -> config.AccountConfig.BlastRadiusPreset + 4, // 2: config.AccountConfig.blastRadius:type_name -> config.BlastRadiusConfig + 5, // 3: config.GetAccountConfigResponse.config:type_name -> config.AccountConfig + 5, // 4: config.UpdateAccountConfigRequest.config:type_name -> config.AccountConfig + 5, // 5: config.UpdateAccountConfigResponse.config:type_name -> config.AccountConfig + 12, // 6: config.CreateHcpConfigResponse.config:type_name -> config.HcpConfig + 37, // 7: config.CreateHcpConfigResponse.apiKey:type_name -> apikeys.CreateAPIKeyResponse + 12, // 8: config.GetHcpConfigResponse.config:type_name -> config.HcpConfig + 2, // 9: config.GetHcpConfigResponse.status:type_name -> config.GetHcpConfigResponse.Status + 12, // 10: config.ReplaceHcpApiKeyResponse.config:type_name -> config.HcpConfig + 37, // 11: config.ReplaceHcpApiKeyResponse.apiKey:type_name -> apikeys.CreateAPIKeyResponse + 23, // 12: config.GetSignalConfigResponse.config:type_name -> config.SignalConfig + 23, // 13: config.UpdateSignalConfigRequest.config:type_name -> config.SignalConfig + 23, // 14: config.UpdateSignalConfigResponse.config:type_name -> config.SignalConfig + 24, // 15: config.SignalConfig.aggregationConfig:type_name -> config.AggregationConfig + 25, // 16: config.SignalConfig.routineChangesConfig:type_name -> config.RoutineChangesConfig + 30, // 17: config.SignalConfig.githubOrganisationProfile:type_name -> config.GithubOrganisationProfile + 0, // 18: config.SignalConfig.check_run_mode:type_name -> config.CheckRunMode + 3, // 19: config.RoutineChangesConfig.eventsPerUnit:type_name -> config.RoutineChangesConfig.DurationUnit + 3, // 20: config.RoutineChangesConfig.durationUnit:type_name -> config.RoutineChangesConfig.DurationUnit + 38, // 21: config.GithubAppInformation.installedAt:type_name -> google.protobuf.Timestamp + 38, // 22: config.GithubAppInformation.requestedAt:type_name -> google.protobuf.Timestamp + 27, // 23: config.GetGithubAppInformationResponse.githubAppInformation:type_name -> config.GithubAppInformation + 30, // 24: config.RegenerateGithubAppProfileResponse.githubOrganisationProfile:type_name -> config.GithubOrganisationProfile + 6, // 25: config.ConfigurationService.GetAccountConfig:input_type -> config.GetAccountConfigRequest + 8, // 26: config.ConfigurationService.UpdateAccountConfig:input_type -> config.UpdateAccountConfigRequest + 10, // 27: config.ConfigurationService.CreateHcpConfig:input_type -> config.CreateHcpConfigRequest + 13, // 28: config.ConfigurationService.GetHcpConfig:input_type -> config.GetHcpConfigRequest + 15, // 29: config.ConfigurationService.DeleteHcpConfig:input_type -> config.DeleteHcpConfigRequest + 17, // 30: config.ConfigurationService.ReplaceHcpApiKey:input_type -> config.ReplaceHcpApiKeyRequest + 19, // 31: config.ConfigurationService.GetSignalConfig:input_type -> config.GetSignalConfigRequest + 21, // 32: config.ConfigurationService.UpdateSignalConfig:input_type -> config.UpdateSignalConfigRequest + 26, // 33: config.ConfigurationService.GetGithubAppInformation:input_type -> config.GetGithubAppInformationRequest + 29, // 34: config.ConfigurationService.RegenerateGithubAppProfile:input_type -> config.RegenerateGithubAppProfileRequest + 32, // 35: config.ConfigurationService.DeleteGithubAppProfileAndGithubInstallationID:input_type -> config.DeleteGithubAppProfileAndGithubInstallationIDRequest + 34, // 36: config.ConfigurationService.CreateGithubInstallURL:input_type -> config.CreateGithubInstallURLRequest + 7, // 37: config.ConfigurationService.GetAccountConfig:output_type -> config.GetAccountConfigResponse + 9, // 38: config.ConfigurationService.UpdateAccountConfig:output_type -> config.UpdateAccountConfigResponse + 11, // 39: config.ConfigurationService.CreateHcpConfig:output_type -> config.CreateHcpConfigResponse + 14, // 40: config.ConfigurationService.GetHcpConfig:output_type -> config.GetHcpConfigResponse + 16, // 41: config.ConfigurationService.DeleteHcpConfig:output_type -> config.DeleteHcpConfigResponse + 18, // 42: config.ConfigurationService.ReplaceHcpApiKey:output_type -> config.ReplaceHcpApiKeyResponse + 20, // 43: config.ConfigurationService.GetSignalConfig:output_type -> config.GetSignalConfigResponse + 22, // 44: config.ConfigurationService.UpdateSignalConfig:output_type -> config.UpdateSignalConfigResponse + 28, // 45: config.ConfigurationService.GetGithubAppInformation:output_type -> config.GetGithubAppInformationResponse + 31, // 46: config.ConfigurationService.RegenerateGithubAppProfile:output_type -> config.RegenerateGithubAppProfileResponse + 33, // 47: config.ConfigurationService.DeleteGithubAppProfileAndGithubInstallationID:output_type -> config.DeleteGithubAppProfileAndGithubInstallationIDResponse + 35, // 48: config.ConfigurationService.CreateGithubInstallURL:output_type -> config.CreateGithubInstallURLResponse + 37, // [37:49] is the sub-list for method output_type + 25, // [25:37] is the sub-list for method input_type + 25, // [25:25] is the sub-list for extension type_name + 25, // [25:25] is the sub-list for extension extendee + 0, // [0:25] is the sub-list for field type_name } func init() { file_config_proto_init() } @@ -2045,7 +2126,7 @@ func file_config_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_config_proto_rawDesc), len(file_config_proto_rawDesc)), - NumEnums: 3, + NumEnums: 4, NumMessages: 32, NumExtensions: 0, NumServices: 1, diff --git a/go/sdp-go/host_trust.go b/go/sdp-go/host_trust.go new file mode 100644 index 00000000..3fa309b6 --- /dev/null +++ b/go/sdp-go/host_trust.go @@ -0,0 +1,72 @@ +package sdp + +import ( + "fmt" + "net" + "net/url" + "slices" + "strings" +) + +var trustedDomainSuffixes = []string{ + ".overmind.tech", + ".overmind-demo.com", +} + +var trustedExactDomains = []string{ + "overmind.tech", + "overmind-demo.com", +} + +// IsTrustedHost reports whether the given host (without port) belongs +// to a known Overmind domain (*.overmind.tech, *.overmind-demo.com) or is a +// local address. Callers should prompt for explicit user confirmation before +// sending credentials to untrusted hosts. +func IsTrustedHost(hostname string) bool { + hostname = strings.ToLower(hostname) + + if IsLocalHost(hostname) { + return true + } + + if slices.Contains(trustedExactDomains, hostname) { + return true + } + + for _, suffix := range trustedDomainSuffixes { + if strings.HasSuffix(hostname, suffix) { + return true + } + } + + return false +} + +// IsLocalHost reports whether the given host (without port) resolves +// to a loopback address. HTTP (non-TLS) is only acceptable for local hosts. +func IsLocalHost(hostname string) bool { + if hostname == "localhost" { + return true + } + ip := net.ParseIP(hostname) + return ip != nil && ip.IsLoopback() +} + +// ValidateAppURL parses appURLString and enforces that non-local hosts use +// HTTPS. It returns the parsed URL or an error. +func ValidateAppURL(appURLString string) (*url.URL, error) { + appURL, err := url.Parse(appURLString) + if err != nil { + return nil, fmt.Errorf("invalid app URL %q: %w", appURLString, err) + } + + if !IsLocalHost(appURL.Hostname()) && appURL.Scheme != "https" { + return nil, fmt.Errorf( + "HTTPS is required for non-local hosts (got %s://%s); "+ + "use https:// or target localhost for development", + appURL.Scheme, appURL.Host, + ) + } + + return appURL, nil +} diff --git a/go/sdp-go/host_trust_test.go b/go/sdp-go/host_trust_test.go new file mode 100644 index 00000000..260b54ef --- /dev/null +++ b/go/sdp-go/host_trust_test.go @@ -0,0 +1,105 @@ +package sdp + +import "testing" + +func TestIsTrustedHost(t *testing.T) { + tests := []struct { + host string + trusted bool + }{ + // Trusted Overmind domains (callers must pass hostname without port) + {"app.overmind.tech", true}, + {"api.overmind.tech", true}, + {"overmind.tech", true}, + {"df.overmind-demo.com", true}, + {"staging.overmind-demo.com", true}, + {"overmind-demo.com", true}, + + // Case insensitive + {"APP.OVERMIND.TECH", true}, + {"DF.Overmind-Demo.Com", true}, + + // Localhost variants + {"localhost", true}, + {"127.0.0.1", true}, + {"127.0.0.2", true}, + {"127.255.255.254", true}, + {"::1", true}, + + // Untrusted domains + {"evil.com", false}, + {"attacker.io", false}, + {"overmind.tech.evil.com", false}, + {"notovermind.tech", false}, + {"fakeovermind-demo.com", false}, + {"overmind-demo.com.evil.com", false}, + + // Sneaky substrings that should not match + {"xovermind.tech", false}, + {"xovermind-demo.com", false}, + } + + for _, tt := range tests { + t.Run(tt.host, func(t *testing.T) { + got := IsTrustedHost(tt.host) + if got != tt.trusted { + t.Errorf("IsTrustedHost(%q) = %v, want %v", tt.host, got, tt.trusted) + } + }) + } +} + +func TestIsLocalHost(t *testing.T) { + tests := []struct { + host string + local bool + }{ + {"localhost", true}, + {"127.0.0.1", true}, + {"127.0.0.2", true}, + {"127.255.255.254", true}, + {"::1", true}, + {"app.overmind.tech", false}, + {"evil.com", false}, + } + + for _, tt := range tests { + t.Run(tt.host, func(t *testing.T) { + got := IsLocalHost(tt.host) + if got != tt.local { + t.Errorf("IsLocalHost(%q) = %v, want %v", tt.host, got, tt.local) + } + }) + } +} + +func TestValidateAppURL(t *testing.T) { + tests := []struct { + name string + url string + wantErr bool + }{ + {"https production", "https://app.overmind.tech", false}, + {"https dogfood", "https://df.overmind-demo.com", false}, + {"http localhost", "http://localhost:3000", false}, + {"http 127.0.0.1", "http://127.0.0.1:8080", false}, + {"http ipv6 loopback", "http://[::1]", false}, + {"http ipv6 loopback with port", "http://[::1]:8080", false}, + + // HTTP to non-local is rejected + {"http remote", "http://app.overmind.tech", true}, + {"http evil", "http://evil.com", true}, + + // Invalid URL + {"invalid", "://bad", true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := ValidateAppURL(tt.url) + if (err != nil) != tt.wantErr { + t.Errorf("ValidateAppURL(%q) error = %v, wantErr %v", tt.url, err, tt.wantErr) + } + }) + } +} diff --git a/go/sdp-go/instance_detect.go b/go/sdp-go/instance_detect.go index 2da3ac9e..480c1f3c 100644 --- a/go/sdp-go/instance_detect.go +++ b/go/sdp-go/instance_detect.go @@ -41,13 +41,17 @@ type instanceData struct { // NewOvermindInstance creates a new OvermindInstance from the given app URL // with all URLs filled in, or an error. The app URL should be the URL of the // frontend of the Overmind instance. e.g. https://app.overmind.tech +// +// HTTPS is enforced for all non-localhost hosts. Callers should use +// [IsTrustedHost] before calling this function and prompt for user +// confirmation when the host is not a known Overmind domain. func NewOvermindInstance(ctx context.Context, app string) (OvermindInstance, error) { var instance OvermindInstance var err error - instance.FrontendUrl, err = url.Parse(app) + instance.FrontendUrl, err = ValidateAppURL(app) if err != nil { - return instance, fmt.Errorf("invalid app value '%v', error: %w", app, err) + return instance, err } // Get the instance data diff --git a/go/sdp-go/link_extract_test.go b/go/sdp-go/link_extract_test.go index 4b2af566..6c07a625 100644 --- a/go/sdp-go/link_extract_test.go +++ b/go/sdp-go/link_extract_test.go @@ -3,7 +3,7 @@ package sdp import ( "testing" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v3" ) // Create a very large set of attributes for the benchmark diff --git a/knowledge/discover.go b/knowledge/discover.go index d0468d65..23402078 100644 --- a/knowledge/discover.go +++ b/knowledge/discover.go @@ -12,7 +12,7 @@ import ( "github.com/overmindtech/cli/go/sdp-go" log "github.com/sirupsen/logrus" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v3" ) // KnowledgeFile represents a discovered and validated knowledge file @@ -122,7 +122,6 @@ func Discover(knowledgeDir string) ([]KnowledgeFile, []Warning) { return nil }) - if err != nil { warnings = append(warnings, Warning{ Path: "", diff --git a/sources/.cursor/BUGBOT.md b/sources/.cursor/BUGBOT.md deleted file mode 100644 index a3f4363a..00000000 --- a/sources/.cursor/BUGBOT.md +++ /dev/null @@ -1,27 +0,0 @@ -# Source-Specific Rules - -## New Adapters - -When reviewing newly created adapters, it is extremely important to ensure that all of the `LinkedItemQueries` that could be added have been added. The way this is done is by looking at the method in which we translate from the cloud provider's data type to an `sdp.Item`. You should look at the definition of the cloud provider's type. This will almost always be a struct with fields, which are quite often other nested structs. What you should do is go through every field in that struct and its children, and see whether it is likely that those fields reference other cloud resources that we could potentially create a link to. Doesn't matter whether or not we have created the adapter for that type of cloud resource yet. We should always create as many links as possible. If it is another cloud resource that we are likely to also create an adapter for at some point. - -There are also a couple of generic types that we should always create links for if the attributes are there. These are: - -* `ip`: Any attribute that would contain an IP address should create a LinkedItemQueries for an `ip` type. This should always use the scope of global, the method of GET and a query of the IP address itself -* `dns`: any attribute that contains a DNS name should create a LinkedItemQueries for a DNS type. The type should be `dns`, scope `global`, method SEARCH with the query being the DNS name itself - -## IAMPermissions and PredefinedRole - -Every adapter must implement both `IAMPermissions()` and `PredefinedRole()`: - -* `IAMPermissions()` must return at least one permission string following the pattern `Microsoft.{Provider}/{resourcePath}/read`. The resource path must match the ARM resource type for the resource being adapted. For child resources, include the full path (e.g., `Microsoft.Batch/batchAccounts/applications/versions/read`, not just `Microsoft.Batch/batchAccounts/read`). The method should have a comment linking to the relevant Azure RBAC resource provider operations page. -* `PredefinedRole()` must return a non-empty string naming a valid Azure built-in role. If the service area has a specific reader role (e.g., `"Azure Batch Account Reader"` for Batch, `"Storage Blob Data Reader"` for Storage), use that. Otherwise, `"Reader"` is acceptable as the most restrictive general role. - -Flag any adapter missing either method, returning empty values, or using an incorrect resource provider path. - -## Azure ARM Get/List options - -For Azure adapters, only pass `*Options` fields (for example `$expand`) that the REST API for that resource and API version documents. Unsupported or mistyped query parameters can surface as `400 Bad Request` from malformed URLs. When in doubt, prefer `nil` options or cross-check the official REST reference for the operation. - -## PotentialLinks Completeness - -`PotentialLinks()` must include every resource type that appears in any `LinkedItemQuery` returned by the adapter's conversion function. If the adapter creates linked item queries for IP addresses, `PotentialLinks()` must include `stdlib.NetworkIP`. If it creates queries for DNS names, `PotentialLinks()` must include `stdlib.NetworkDNS`. Missing entries in `PotentialLinks()` break the Overmind dependency graph — linked items won't be discovered even though the queries exist in the adapter's output. diff --git a/sources/azure/clients/dbforpostgresql-configurations-client.go b/sources/azure/clients/dbforpostgresql-configurations-client.go new file mode 100644 index 00000000..a64faf33 --- /dev/null +++ b/sources/azure/clients/dbforpostgresql-configurations-client.go @@ -0,0 +1,35 @@ +package clients + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/postgresql/armpostgresqlflexibleservers/v5" +) + +//go:generate mockgen -destination=../shared/mocks/mock_dbforpostgresql_configurations_client.go -package=mocks -source=dbforpostgresql-configurations-client.go + +// PostgreSQLConfigurationsPager is a type alias for the generic Pager interface. +type PostgreSQLConfigurationsPager = Pager[armpostgresqlflexibleservers.ConfigurationsClientListByServerResponse] + +// PostgreSQLConfigurationsClient is an interface for interacting with Azure PostgreSQL Flexible Server configurations. +type PostgreSQLConfigurationsClient interface { + Get(ctx context.Context, resourceGroupName string, serverName string, configurationName string, options *armpostgresqlflexibleservers.ConfigurationsClientGetOptions) (armpostgresqlflexibleservers.ConfigurationsClientGetResponse, error) + NewListByServerPager(resourceGroupName string, serverName string, options *armpostgresqlflexibleservers.ConfigurationsClientListByServerOptions) PostgreSQLConfigurationsPager +} + +type postgreSQLConfigurationsClient struct { + client *armpostgresqlflexibleservers.ConfigurationsClient +} + +func (c *postgreSQLConfigurationsClient) Get(ctx context.Context, resourceGroupName string, serverName string, configurationName string, options *armpostgresqlflexibleservers.ConfigurationsClientGetOptions) (armpostgresqlflexibleservers.ConfigurationsClientGetResponse, error) { + return c.client.Get(ctx, resourceGroupName, serverName, configurationName, options) +} + +func (c *postgreSQLConfigurationsClient) NewListByServerPager(resourceGroupName string, serverName string, options *armpostgresqlflexibleservers.ConfigurationsClientListByServerOptions) PostgreSQLConfigurationsPager { + return c.client.NewListByServerPager(resourceGroupName, serverName, options) +} + +// NewPostgreSQLConfigurationsClient creates a new PostgreSQLConfigurationsClient from the Azure SDK client. +func NewPostgreSQLConfigurationsClient(client *armpostgresqlflexibleservers.ConfigurationsClient) PostgreSQLConfigurationsClient { + return &postgreSQLConfigurationsClient{client: client} +} diff --git a/sources/azure/clients/dbforpostgresql-flexible-server-replica-client.go b/sources/azure/clients/dbforpostgresql-flexible-server-replica-client.go new file mode 100644 index 00000000..8921c7ee --- /dev/null +++ b/sources/azure/clients/dbforpostgresql-flexible-server-replica-client.go @@ -0,0 +1,36 @@ +package clients + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/postgresql/armpostgresqlflexibleservers/v5" +) + +//go:generate mockgen -destination=../shared/mocks/mock_dbforpostgresql_flexible_server_replica_client.go -package=mocks -source=dbforpostgresql-flexible-server-replica-client.go + +type DBforPostgreSQLFlexibleServerReplicaPager = Pager[armpostgresqlflexibleservers.ReplicasClientListByServerResponse] + +type DBforPostgreSQLFlexibleServerReplicaClient interface { + ListByServer(ctx context.Context, resourceGroupName string, serverName string) DBforPostgreSQLFlexibleServerReplicaPager + Get(ctx context.Context, resourceGroupName string, replicaName string) (armpostgresqlflexibleservers.ServersClientGetResponse, error) +} + +type dbforPostgreSQLFlexibleServerReplicaClient struct { + replicasClient *armpostgresqlflexibleservers.ReplicasClient + serversClient *armpostgresqlflexibleservers.ServersClient +} + +func (a *dbforPostgreSQLFlexibleServerReplicaClient) ListByServer(ctx context.Context, resourceGroupName string, serverName string) DBforPostgreSQLFlexibleServerReplicaPager { + return a.replicasClient.NewListByServerPager(resourceGroupName, serverName, nil) +} + +func (a *dbforPostgreSQLFlexibleServerReplicaClient) Get(ctx context.Context, resourceGroupName string, replicaName string) (armpostgresqlflexibleservers.ServersClientGetResponse, error) { + return a.serversClient.Get(ctx, resourceGroupName, replicaName, nil) +} + +func NewDBforPostgreSQLFlexibleServerReplicaClient(replicasClient *armpostgresqlflexibleservers.ReplicasClient, serversClient *armpostgresqlflexibleservers.ServersClient) DBforPostgreSQLFlexibleServerReplicaClient { + return &dbforPostgreSQLFlexibleServerReplicaClient{ + replicasClient: replicasClient, + serversClient: serversClient, + } +} diff --git a/sources/azure/clients/load-balancer-backend-address-pools-client.go b/sources/azure/clients/load-balancer-backend-address-pools-client.go new file mode 100644 index 00000000..e06999a8 --- /dev/null +++ b/sources/azure/clients/load-balancer-backend-address-pools-client.go @@ -0,0 +1,35 @@ +package clients + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" +) + +//go:generate mockgen -destination=../shared/mocks/mock_load_balancer_backend_address_pools_client.go -package=mocks -source=load-balancer-backend-address-pools-client.go + +// LoadBalancerBackendAddressPoolsPager is a type alias for the generic Pager interface. +type LoadBalancerBackendAddressPoolsPager = Pager[armnetwork.LoadBalancerBackendAddressPoolsClientListResponse] + +// LoadBalancerBackendAddressPoolsClient is an interface for interacting with Azure load balancer backend address pools. +type LoadBalancerBackendAddressPoolsClient interface { + Get(ctx context.Context, resourceGroupName string, loadBalancerName string, backendAddressPoolName string) (armnetwork.LoadBalancerBackendAddressPoolsClientGetResponse, error) + NewListPager(resourceGroupName string, loadBalancerName string) LoadBalancerBackendAddressPoolsPager +} + +type loadBalancerBackendAddressPoolsClient struct { + client *armnetwork.LoadBalancerBackendAddressPoolsClient +} + +func (a *loadBalancerBackendAddressPoolsClient) Get(ctx context.Context, resourceGroupName string, loadBalancerName string, backendAddressPoolName string) (armnetwork.LoadBalancerBackendAddressPoolsClientGetResponse, error) { + return a.client.Get(ctx, resourceGroupName, loadBalancerName, backendAddressPoolName, nil) +} + +func (a *loadBalancerBackendAddressPoolsClient) NewListPager(resourceGroupName string, loadBalancerName string) LoadBalancerBackendAddressPoolsPager { + return a.client.NewListPager(resourceGroupName, loadBalancerName, nil) +} + +// NewLoadBalancerBackendAddressPoolsClient creates a new LoadBalancerBackendAddressPoolsClient from the Azure SDK client. +func NewLoadBalancerBackendAddressPoolsClient(client *armnetwork.LoadBalancerBackendAddressPoolsClient) LoadBalancerBackendAddressPoolsClient { + return &loadBalancerBackendAddressPoolsClient{client: client} +} diff --git a/sources/azure/clients/load-balancer-probes-client.go b/sources/azure/clients/load-balancer-probes-client.go new file mode 100644 index 00000000..7f16414d --- /dev/null +++ b/sources/azure/clients/load-balancer-probes-client.go @@ -0,0 +1,32 @@ +package clients + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" +) + +//go:generate mockgen -destination=../shared/mocks/mock_load_balancer_probes_client.go -package=mocks -source=load-balancer-probes-client.go + +type LoadBalancerProbesPager = Pager[armnetwork.LoadBalancerProbesClientListResponse] + +type LoadBalancerProbesClient interface { + Get(ctx context.Context, resourceGroupName string, loadBalancerName string, probeName string) (armnetwork.LoadBalancerProbesClientGetResponse, error) + NewListPager(resourceGroupName string, loadBalancerName string) LoadBalancerProbesPager +} + +type loadBalancerProbesClient struct { + client *armnetwork.LoadBalancerProbesClient +} + +func (a *loadBalancerProbesClient) Get(ctx context.Context, resourceGroupName string, loadBalancerName string, probeName string) (armnetwork.LoadBalancerProbesClientGetResponse, error) { + return a.client.Get(ctx, resourceGroupName, loadBalancerName, probeName, nil) +} + +func (a *loadBalancerProbesClient) NewListPager(resourceGroupName string, loadBalancerName string) LoadBalancerProbesPager { + return a.client.NewListPager(resourceGroupName, loadBalancerName, nil) +} + +func NewLoadBalancerProbesClient(client *armnetwork.LoadBalancerProbesClient) LoadBalancerProbesClient { + return &loadBalancerProbesClient{client: client} +} diff --git a/sources/azure/clients/sql-database-schemas-client.go b/sources/azure/clients/sql-database-schemas-client.go new file mode 100644 index 00000000..7397021e --- /dev/null +++ b/sources/azure/clients/sql-database-schemas-client.go @@ -0,0 +1,35 @@ +package clients + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/sql/armsql/v2" +) + +//go:generate mockgen -destination=../shared/mocks/mock_sql_database_schemas_client.go -package=mocks -source=sql-database-schemas-client.go + +// SqlDatabaseSchemasPager is a type alias for the generic Pager interface with database schema response type. +type SqlDatabaseSchemasPager = Pager[armsql.DatabaseSchemasClientListByDatabaseResponse] + +// SqlDatabaseSchemasClient is an interface for interacting with Azure SQL database schemas +type SqlDatabaseSchemasClient interface { + Get(ctx context.Context, resourceGroupName, serverName, databaseName, schemaName string) (armsql.DatabaseSchemasClientGetResponse, error) + ListByDatabase(ctx context.Context, resourceGroupName, serverName, databaseName string) SqlDatabaseSchemasPager +} + +type sqlDatabaseSchemasClient struct { + client *armsql.DatabaseSchemasClient +} + +func (c *sqlDatabaseSchemasClient) Get(ctx context.Context, resourceGroupName, serverName, databaseName, schemaName string) (armsql.DatabaseSchemasClientGetResponse, error) { + return c.client.Get(ctx, resourceGroupName, serverName, databaseName, schemaName, nil) +} + +func (c *sqlDatabaseSchemasClient) ListByDatabase(ctx context.Context, resourceGroupName, serverName, databaseName string) SqlDatabaseSchemasPager { + return c.client.NewListByDatabasePager(resourceGroupName, serverName, databaseName, nil) +} + +// NewSqlDatabaseSchemasClient creates a new SqlDatabaseSchemasClient from the Azure SDK client +func NewSqlDatabaseSchemasClient(client *armsql.DatabaseSchemasClient) SqlDatabaseSchemasClient { + return &sqlDatabaseSchemasClient{client: client} +} diff --git a/sources/azure/integration-tests/README.md b/sources/azure/integration-tests/README.md index adb86869..a916790c 100644 --- a/sources/azure/integration-tests/README.md +++ b/sources/azure/integration-tests/README.md @@ -20,7 +20,8 @@ For example, `TestComputeVirtualMachineIntegration` tests the Compute API's Virt "RUN_AZURE_INTEGRATION_TESTS": "true", "AZURE_SUBSCRIPTION_ID": "your-subscription-id", "AZURE_TENANT_ID": "your-tenant-id", - "AZURE_CLIENT_ID": "your-client-id" + "AZURE_CLIENT_ID": "your-client-id", + "AZURE_INTEGRATION_TEST_RUN_ID": "local-dev-1" } } ``` @@ -35,6 +36,7 @@ For example, `TestComputeVirtualMachineIntegration` tests the Compute API's Virt export AZURE_TENANT_ID="your-tenant-id" # your Azure AD tenant ID export AZURE_CLIENT_ID="your-client-id" # your Azure application/client ID export AZURE_REGIONS="eastus,westus2" # optional: comma-separated list of regions + export AZURE_INTEGRATION_TEST_RUN_ID="local-dev-1" # optional: isolate this run's resource group # For SQL Database integration tests export AZURE_SQL_SERVER_ADMIN_LOGIN="sqladmin" # SQL server administrator login export AZURE_SQL_SERVER_ADMIN_PASSWORD="your-secure-password" # SQL server administrator password @@ -70,11 +72,10 @@ We can easily run all `Setup` tests to create resources, then run all `Run` test Some tests intentionally call `t.Skip` for Azure conditions that are external to adapter correctness, for example: - Batch account quota exhaustion (`SubscriptionQuotaExceeded`) -- Transient VM/VMSS control-plane conflicts where create returns `409` but `Get` still cannot retrieve the resource - **Gallery application version** (`compute-gallery-application-version_test.go`): requires env vars `AZURE_TEST_GALLERY_NAME`, `AZURE_TEST_GALLERY_APPLICATION_NAME`, and `AZURE_TEST_GALLERY_APPLICATION_VERSION` pointing at an existing gallery application version; if the version is missing (`404`), the test skips after preflight - **Role assignments** (`authorization-role-assignment_test.go`): may wait for RBAC eventual consistency before asserting adapter behaviour -This keeps integration runs stable without hiding adapter bugs. +VM/VMSS/role-assignment ghost `409 Conflict` states are now handled with "auto-remediate then fail": tests attempt cleanup and a retry, and fail loudly if the resource is still unrecoverable. Also note that PostgreSQL Flexible Server creation and Key Vault purge/recreate can take many minutes. If a run times out, increase `go test -timeout` (for example `-timeout 30m`) before assuming the test is stuck. @@ -138,5 +139,5 @@ set +a - The service principal has **read-write access** scoped to the integration test subscription only - Cloud Agent dashboard stores only the bootstrap token (`OP_SERVICE_ACCOUNT_TOKEN`) - Azure credentials remain in 1Password and are resolved only at runtime via `inject-secrets` -- All test resources are created in the `overmind-integration-tests` resource group +- By default test resources are created in `overmind-integration-tests`; set `AZURE_INTEGRATION_TEST_RUN_ID` to isolate parallel runs into per-run resource groups (for example `overmind-integration-tests-agent-42`) - Teardown steps clean up created resources after each test run diff --git a/sources/azure/integration-tests/authorization-role-assignment_test.go b/sources/azure/integration-tests/authorization-role-assignment_test.go index 6e3b5ea5..6eb1f58a 100644 --- a/sources/azure/integration-tests/authorization-role-assignment_test.go +++ b/sources/azure/integration-tests/authorization-role-assignment_test.go @@ -27,8 +27,6 @@ import ( "github.com/overmindtech/cli/sources/shared" ) -var errRoleAssignmentConflictWithoutResource = errors.New("role assignment create returned conflict but role assignment could not be retrieved") - func TestAuthorizationRoleAssignmentIntegration(t *testing.T) { subscriptionID := os.Getenv("AZURE_SUBSCRIPTION_ID") if subscriptionID == "" { @@ -70,8 +68,9 @@ func TestAuthorizationRoleAssignmentIntegration(t *testing.T) { t.Fatalf("Failed to get Reader role definition ID: %v", err) } - // Generate unique role assignment name (GUID) - roleAssignmentName := uuid.New().String() + // Deterministic role assignment name so re-runs reuse the same assignment ID + // instead of conflicting with a prior run's different UUID for the same principal+role combo + roleAssignmentName := uuid.NewSHA1(uuid.NameSpaceURL, []byte(principalID+readerRoleDefinitionID+integrationTestResourceGroup)).String() azureScope := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s", subscriptionID, integrationTestResourceGroup) setupCompleted := false @@ -85,13 +84,11 @@ func TestAuthorizationRoleAssignmentIntegration(t *testing.T) { } // Create role assignment at resource group scope - err = createRoleAssignment(ctx, roleAssignmentsClient, azureScope, roleAssignmentName, principalID, readerRoleDefinitionID) - if err != nil { - if errors.Is(err, errRoleAssignmentConflictWithoutResource) { - t.Skipf("Skipping due to transient Azure role-assignment control-plane conflict: %v", err) - } - t.Fatalf("Failed to create role assignment: %v", err) + actualName, createErr := createRoleAssignment(ctx, roleAssignmentsClient, azureScope, roleAssignmentName, principalID, readerRoleDefinitionID) + if createErr != nil { + t.Fatalf("Failed to create role assignment: %v", createErr) } + roleAssignmentName = actualName err = waitForRoleAssignmentAvailable(ctx, roleAssignmentsClient, azureScope, roleAssignmentName) if err != nil { t.Fatalf("Failed waiting for role assignment to be available: %v", err) @@ -370,25 +367,22 @@ func getReaderRoleDefinitionID(ctx context.Context, client *armauthorization.Rol return "", fmt.Errorf("Reader role definition not found") } -// createRoleAssignment creates an Azure role assignment (idempotent) -func createRoleAssignment(ctx context.Context, client *armauthorization.RoleAssignmentsClient, scope, roleAssignmentName, principalID, roleDefinitionID string) error { - // Check if role assignment already exists +// createRoleAssignment creates an Azure role assignment (idempotent). +// Returns the actual assignment name used (may differ from input if a prior run +// created the same principal+role combo under a different UUID). +func createRoleAssignment(ctx context.Context, client *armauthorization.RoleAssignmentsClient, scope, roleAssignmentName, principalID, roleDefinitionID string) (string, error) { + return createRoleAssignmentWithRemediation(ctx, client, scope, roleAssignmentName, principalID, roleDefinitionID, 0) +} + +func createRoleAssignmentWithRemediation(ctx context.Context, client *armauthorization.RoleAssignmentsClient, scope, roleAssignmentName, principalID, roleDefinitionID string, remediationAttempt int) (string, error) { _, err := client.Get(ctx, scope, roleAssignmentName, nil) if err == nil { log.Printf("Role assignment %s already exists, skipping creation", roleAssignmentName) - return nil + return roleAssignmentName, nil } - // Create the role assignment - // Note: We need to get the principal ID from the current user or a service principal - // For integration tests, we'll use Azure CLI to get the current user's object ID - // This requires running: az ad signed-in-user show --query id -o tsv - // Or using Graph API - - // For now, let's try to create it and handle the error if principal ID is needed - // Actually, we should get the principal ID before calling this function if principalID == "" { - return fmt.Errorf("principal ID is required to create role assignment") + return "", fmt.Errorf("principal ID is required to create role assignment") } parameters := armauthorization.RoleAssignmentCreateParameters{ @@ -403,31 +397,68 @@ func createRoleAssignment(ctx context.Context, client *armauthorization.RoleAssi var respErr *azcore.ResponseError if errors.As(err, &respErr) { if respErr.StatusCode == http.StatusConflict { + if strings.Contains(respErr.Error(), "RoleAssignmentExists") { + existingID := extractExistingRoleAssignmentID(respErr.Error()) + if existingID != "" { + log.Printf("Role assignment for this principal+role already exists at scope %s with ID %s, reusing", scope, existingID) + return existingID, nil + } + log.Printf("Role assignment for this principal+role already exists at scope %s, treating as success", scope) + return roleAssignmentName, nil + } existing, getErr := client.Get(ctx, scope, roleAssignmentName, nil) if getErr == nil && existing.RoleAssignment.ID != nil && *existing.RoleAssignment.ID != "" { log.Printf("Role assignment %s already exists (conflict), verified readable, skipping creation", roleAssignmentName) - return nil + return roleAssignmentName, nil } var getRespErr *azcore.ResponseError if errors.As(getErr, &getRespErr) && getRespErr.StatusCode == http.StatusNotFound { - return fmt.Errorf("%w: scope=%s roleAssignmentName=%s", errRoleAssignmentConflictWithoutResource, scope, roleAssignmentName) + if remediationAttempt >= 1 { + return "", fmt.Errorf("role assignment %s still in ghost conflict state after remediation (scope=%s): %w", roleAssignmentName, scope, err) + } + log.Printf("Detected ghost role-assignment conflict for %s at %s, attempting automatic remediation", roleAssignmentName, scope) + if deleteErr := deleteRoleAssignment(ctx, client, scope, roleAssignmentName); deleteErr != nil { + return "", fmt.Errorf("failed to remediate ghost role assignment %s before retry: %w", roleAssignmentName, deleteErr) + } + time.Sleep(5 * time.Second) + return createRoleAssignmentWithRemediation(ctx, client, scope, roleAssignmentName, principalID, roleDefinitionID, remediationAttempt+1) } - return fmt.Errorf("role assignment conflict for %s and failed to verify existing role assignment: %w", roleAssignmentName, getErr) + return "", fmt.Errorf("role assignment conflict for %s and failed to verify existing role assignment: %w", roleAssignmentName, getErr) } if respErr.StatusCode == http.StatusForbidden { - return fmt.Errorf("insufficient permissions to create role assignment: %w", err) + return "", fmt.Errorf("insufficient permissions to create role assignment: %w", err) } } - return fmt.Errorf("failed to create role assignment: %w", err) + return "", fmt.Errorf("failed to create role assignment: %w", err) } - // Verify the role assignment was created successfully if resp.RoleAssignment.ID == nil { - return fmt.Errorf("role assignment created but ID is unknown") + return "", fmt.Errorf("role assignment created but ID is unknown") } log.Printf("Role assignment %s created successfully at scope %s", roleAssignmentName, scope) - return nil + return roleAssignmentName, nil +} + +// extractExistingRoleAssignmentID parses the existing assignment ID from the +// RoleAssignmentExists error message (format: "...The ID of the existing role +// assignment is ."). +func extractExistingRoleAssignmentID(errMsg string) string { + const marker = "The ID of the existing role assignment is " + _, after, ok := strings.Cut(errMsg, marker) + if !ok { + return "" + } + rest := after + if dotIdx := strings.Index(rest, "."); dotIdx > 0 { + rest = rest[:dotIdx] + } + rest = strings.TrimSpace(rest) + if len(rest) != 32 { + return rest + } + // Convert 32-char hex to UUID format (8-4-4-4-12) + return rest[:8] + "-" + rest[8:12] + "-" + rest[12:16] + "-" + rest[16:20] + "-" + rest[20:] } // deleteRoleAssignment deletes an Azure role assignment diff --git a/sources/azure/integration-tests/compute-availability-set_test.go b/sources/azure/integration-tests/compute-availability-set_test.go index 0b567e18..76638014 100644 --- a/sources/azure/integration-tests/compute-availability-set_test.go +++ b/sources/azure/integration-tests/compute-availability-set_test.go @@ -129,9 +129,6 @@ func TestComputeAvailabilitySetIntegration(t *testing.T) { // Create virtual machine with availability set err = createVirtualMachineWithAvailabilitySet(ctx, vmClient, integrationTestResourceGroup, integrationTestVMForAVSetName, integrationTestLocation, *nicResp.ID, *avSetResp.ID) if err != nil { - if errors.Is(err, errVMConflictWithoutResource) { - t.Skipf("Skipping due to transient Azure VM control-plane conflict: %v", err) - } t.Fatalf("Failed to create virtual machine: %v", err) } @@ -388,11 +385,12 @@ func createAvailabilitySet(ctx context.Context, client *armcompute.AvailabilityS // Create the availability set resp, err := client.CreateOrUpdate(ctx, resourceGroupName, avSetName, armcompute.AvailabilitySet{ Location: new(location), + SKU: &armcompute.SKU{ + Name: new("Aligned"), + }, Properties: &armcompute.AvailabilitySetProperties{ PlatformFaultDomainCount: new(int32(2)), PlatformUpdateDomainCount: new(int32(2)), - ProximityPlacementGroup: nil, // Optional - not setting for this test - VirtualMachines: nil, // Will be populated when VMs are added }, Tags: map[string]*string{ "purpose": new("overmind-integration-tests"), @@ -556,6 +554,10 @@ func createNetworkInterfaceForAVSet(ctx context.Context, client *armnetwork.Inte // createVirtualMachineWithAvailabilitySet creates an Azure virtual machine with an availability set (idempotent) func createVirtualMachineWithAvailabilitySet(ctx context.Context, client *armcompute.VirtualMachinesClient, resourceGroupName, vmName, location, nicID, availabilitySetID string) error { + return createVirtualMachineWithAvailabilitySetWithRemediation(ctx, client, resourceGroupName, vmName, location, nicID, availabilitySetID, 0) +} + +func createVirtualMachineWithAvailabilitySetWithRemediation(ctx context.Context, client *armcompute.VirtualMachinesClient, resourceGroupName, vmName, location, nicID, availabilitySetID string, remediationAttempt int) error { // Check if VM already exists existingVM, err := client.Get(ctx, resourceGroupName, vmName, nil) if err == nil { @@ -578,14 +580,13 @@ func createVirtualMachineWithAvailabilitySet(ctx context.Context, client *armcom Location: new(location), Properties: &armcompute.VirtualMachineProperties{ HardwareProfile: &armcompute.HardwareProfile{ - // Use Standard_D2ps_v5 - ARM-based VM with good availability in westus2 - VMSize: new(armcompute.VirtualMachineSizeTypes("Standard_D2ps_v5")), + VMSize: new(armcompute.VirtualMachineSizeTypes("Standard_D2s_v3")), }, StorageProfile: &armcompute.StorageProfile{ ImageReference: &armcompute.ImageReference{ Publisher: new("Canonical"), Offer: new("0001-com-ubuntu-server-jammy"), - SKU: new("22_04-lts-arm64"), // ARM64 image for ARM-based VM + SKU: new("22_04-lts"), Version: new("latest"), }, OSDisk: &armcompute.OSDisk{ @@ -640,7 +641,15 @@ func createVirtualMachineWithAvailabilitySet(ctx context.Context, client *armcom } var getRespErr *azcore.ResponseError if errors.As(getErr, &getRespErr) && getRespErr.StatusCode == http.StatusNotFound { - return fmt.Errorf("%w: vm=%s resourceGroup=%s", errVMConflictWithoutResource, vmName, resourceGroupName) + if remediationAttempt >= 1 { + return fmt.Errorf("vm %s still in ghost conflict state after remediation (resourceGroup=%s): %w", vmName, resourceGroupName, err) + } + log.Printf("Detected ghost VM conflict for availability-set test VM %s in %s, attempting automatic remediation", vmName, resourceGroupName) + if deleteErr := deleteVirtualMachine(ctx, client, resourceGroupName, vmName); deleteErr != nil { + return fmt.Errorf("failed to remediate ghost VM %s before retry: %w", vmName, deleteErr) + } + time.Sleep(20 * time.Second) + return createVirtualMachineWithAvailabilitySetWithRemediation(ctx, client, resourceGroupName, vmName, location, nicID, availabilitySetID, remediationAttempt+1) } return fmt.Errorf("vm creation conflict for %s and failed to verify existing VM: %w", vmName, getErr) } diff --git a/sources/azure/integration-tests/compute-virtual-machine-extension_test.go b/sources/azure/integration-tests/compute-virtual-machine-extension_test.go index b58ef546..f21fb483 100644 --- a/sources/azure/integration-tests/compute-virtual-machine-extension_test.go +++ b/sources/azure/integration-tests/compute-virtual-machine-extension_test.go @@ -114,9 +114,6 @@ func TestComputeVirtualMachineExtensionIntegration(t *testing.T) { // Create virtual machine err = createVirtualMachineForExtension(ctx, vmClient, integrationTestResourceGroup, integrationTestExtensionVMName, integrationTestLocation, *nicResp.ID) if err != nil { - if errors.Is(err, errVMConflictWithoutResource) { - t.Skipf("Skipping due to transient Azure VM control-plane conflict: %v", err) - } t.Fatalf("Failed to create virtual machine: %v", err) } @@ -421,6 +418,10 @@ func createNetworkInterfaceForExtension(ctx context.Context, client *armnetwork. // createVirtualMachineForExtension creates an Azure virtual machine (idempotent) func createVirtualMachineForExtension(ctx context.Context, client *armcompute.VirtualMachinesClient, resourceGroupName, vmName, location, nicID string) error { + return createVirtualMachineForExtensionWithRemediation(ctx, client, resourceGroupName, vmName, location, nicID, 0) +} + +func createVirtualMachineForExtensionWithRemediation(ctx context.Context, client *armcompute.VirtualMachinesClient, resourceGroupName, vmName, location, nicID string, remediationAttempt int) error { // Check if VM already exists existingVM, err := client.Get(ctx, resourceGroupName, vmName, nil) if err == nil { @@ -443,14 +444,13 @@ func createVirtualMachineForExtension(ctx context.Context, client *armcompute.Vi Location: new(location), Properties: &armcompute.VirtualMachineProperties{ HardwareProfile: &armcompute.HardwareProfile{ - // Use Standard_D2ps_v5 - ARM-based VM with good availability in westus2 - VMSize: new(armcompute.VirtualMachineSizeTypes("Standard_D2ps_v5")), + VMSize: new(armcompute.VirtualMachineSizeTypes("Standard_D2s_v3")), }, StorageProfile: &armcompute.StorageProfile{ ImageReference: &armcompute.ImageReference{ Publisher: new("Canonical"), Offer: new("0001-com-ubuntu-server-jammy"), - SKU: new("22_04-lts-arm64"), // ARM64 image for ARM-based VM + SKU: new("22_04-lts"), Version: new("latest"), }, OSDisk: &armcompute.OSDisk{ @@ -502,7 +502,15 @@ func createVirtualMachineForExtension(ctx context.Context, client *armcompute.Vi } var getRespErr *azcore.ResponseError if errors.As(getErr, &getRespErr) && getRespErr.StatusCode == http.StatusNotFound { - return fmt.Errorf("%w: vm=%s resourceGroup=%s", errVMConflictWithoutResource, vmName, resourceGroupName) + if remediationAttempt >= 1 { + return fmt.Errorf("vm %s still in ghost conflict state after remediation (resourceGroup=%s): %w", vmName, resourceGroupName, err) + } + log.Printf("Detected ghost VM conflict for extension test VM %s in %s, attempting automatic remediation", vmName, resourceGroupName) + if deleteErr := deleteVirtualMachineForExtension(ctx, client, resourceGroupName, vmName); deleteErr != nil { + return fmt.Errorf("failed to remediate ghost VM %s before retry: %w", vmName, deleteErr) + } + time.Sleep(20 * time.Second) + return createVirtualMachineForExtensionWithRemediation(ctx, client, resourceGroupName, vmName, location, nicID, remediationAttempt+1) } return fmt.Errorf("vm creation conflict for %s and failed to verify existing VM: %w", vmName, getErr) } diff --git a/sources/azure/integration-tests/compute-virtual-machine-run-command_test.go b/sources/azure/integration-tests/compute-virtual-machine-run-command_test.go index 9d3367d4..c487a61f 100644 --- a/sources/azure/integration-tests/compute-virtual-machine-run-command_test.go +++ b/sources/azure/integration-tests/compute-virtual-machine-run-command_test.go @@ -114,9 +114,6 @@ func TestComputeVirtualMachineRunCommandIntegration(t *testing.T) { // Create virtual machine err = createVirtualMachineForRunCommand(ctx, vmClient, integrationTestResourceGroup, integrationTestRunCommandVMName, integrationTestLocation, *nicResp.ID) if err != nil { - if errors.Is(err, errVMConflictWithoutResource) { - t.Skipf("Skipping due to transient Azure VM control-plane conflict: %v", err) - } t.Fatalf("Failed to create virtual machine: %v", err) } @@ -421,6 +418,10 @@ func createNetworkInterfaceForRunCommand(ctx context.Context, client *armnetwork // createVirtualMachineForRunCommand creates an Azure virtual machine (idempotent) func createVirtualMachineForRunCommand(ctx context.Context, client *armcompute.VirtualMachinesClient, resourceGroupName, vmName, location, nicID string) error { + return createVirtualMachineForRunCommandWithRemediation(ctx, client, resourceGroupName, vmName, location, nicID, 0) +} + +func createVirtualMachineForRunCommandWithRemediation(ctx context.Context, client *armcompute.VirtualMachinesClient, resourceGroupName, vmName, location, nicID string, remediationAttempt int) error { // Check if VM already exists existingVM, err := client.Get(ctx, resourceGroupName, vmName, nil) if err == nil { @@ -443,14 +444,13 @@ func createVirtualMachineForRunCommand(ctx context.Context, client *armcompute.V Location: new(location), Properties: &armcompute.VirtualMachineProperties{ HardwareProfile: &armcompute.HardwareProfile{ - // Use Standard_D2ps_v5 - ARM-based VM with good availability in westus2 - VMSize: new(armcompute.VirtualMachineSizeTypes("Standard_D2ps_v5")), + VMSize: new(armcompute.VirtualMachineSizeTypes("Standard_D2s_v3")), }, StorageProfile: &armcompute.StorageProfile{ ImageReference: &armcompute.ImageReference{ Publisher: new("Canonical"), Offer: new("0001-com-ubuntu-server-jammy"), - SKU: new("22_04-lts-arm64"), // ARM64 image for ARM-based VM + SKU: new("22_04-lts"), Version: new("latest"), }, OSDisk: &armcompute.OSDisk{ @@ -502,7 +502,15 @@ func createVirtualMachineForRunCommand(ctx context.Context, client *armcompute.V } var getRespErr *azcore.ResponseError if errors.As(getErr, &getRespErr) && getRespErr.StatusCode == http.StatusNotFound { - return fmt.Errorf("%w: vm=%s resourceGroup=%s", errVMConflictWithoutResource, vmName, resourceGroupName) + if remediationAttempt >= 1 { + return fmt.Errorf("vm %s still in ghost conflict state after remediation (resourceGroup=%s): %w", vmName, resourceGroupName, err) + } + log.Printf("Detected ghost VM conflict for run-command test VM %s in %s, attempting automatic remediation", vmName, resourceGroupName) + if deleteErr := deleteVirtualMachineForRunCommand(ctx, client, resourceGroupName, vmName); deleteErr != nil { + return fmt.Errorf("failed to remediate ghost VM %s before retry: %w", vmName, deleteErr) + } + time.Sleep(20 * time.Second) + return createVirtualMachineForRunCommandWithRemediation(ctx, client, resourceGroupName, vmName, location, nicID, remediationAttempt+1) } return fmt.Errorf("vm creation conflict for %s and failed to verify existing VM: %w", vmName, getErr) } diff --git a/sources/azure/integration-tests/compute-virtual-machine-scale-set_test.go b/sources/azure/integration-tests/compute-virtual-machine-scale-set_test.go index 186f57d2..d43d207f 100644 --- a/sources/azure/integration-tests/compute-virtual-machine-scale-set_test.go +++ b/sources/azure/integration-tests/compute-virtual-machine-scale-set_test.go @@ -31,8 +31,6 @@ const ( integrationTestVMSSSubnetName = "default" ) -var errVMSSConflictWithoutResource = errors.New("vmss conflict persisted without readable vmss resource") - func TestComputeVirtualMachineScaleSetIntegration(t *testing.T) { subscriptionID := os.Getenv("AZURE_SUBSCRIPTION_ID") if subscriptionID == "" { @@ -90,9 +88,6 @@ func TestComputeVirtualMachineScaleSetIntegration(t *testing.T) { // Create virtual machine scale set err = createVirtualMachineScaleSet(ctx, vmssClient, integrationTestResourceGroup, integrationTestVMSSName, integrationTestLocation, *subnetResp.ID) if err != nil { - if errors.Is(err, errVMSSConflictWithoutResource) { - t.Skipf("Skipping due to transient Azure VMSS control-plane conflict: %v", err) - } t.Fatalf("Failed to create virtual machine scale set: %v", err) } @@ -414,7 +409,7 @@ func createVirtualMachineScaleSet(ctx context.Context, client *armcompute.Virtua poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, vmssName, armcompute.VirtualMachineScaleSet{ Location: new(location), SKU: &armcompute.SKU{ - Name: new("Standard_B1s"), // Burstable B-series VM - cheaper and more widely available + Name: new("Standard_D2s_v3"), Tier: new("Standard"), Capacity: new(int64(1)), // Start with 1 instance for testing }, @@ -482,16 +477,19 @@ func createVirtualMachineScaleSet(ctx context.Context, client *armcompute.Virtua // Verify the VMSS actually exists _, getErr := client.Get(ctx, resourceGroupName, vmssName, nil) if getErr != nil { - // If we get a conflict but VMSS doesn't exist, it might be in a transient state (deleting) - // Wait longer and retry creation once - log.Printf("VMSS %s not found after conflict, waiting 30s and retrying creation", vmssName) + // If we get a conflict but VMSS doesn't exist, treat it as a ghost/stale control-plane record. + // Try to remediate once by forcing a delete, then retry creation. + log.Printf("VMSS %s not found after conflict, attempting remediation delete before retry", vmssName) + if deleteErr := deleteVirtualMachineScaleSet(ctx, client, resourceGroupName, vmssName); deleteErr != nil { + return fmt.Errorf("failed to remediate VMSS ghost state for %s: %w", vmssName, deleteErr) + } time.Sleep(30 * time.Second) // Retry creation retryPoller, retryErr := client.BeginCreateOrUpdate(ctx, resourceGroupName, vmssName, armcompute.VirtualMachineScaleSet{ Location: new(location), SKU: &armcompute.SKU{ - Name: new("Standard_B1s"), + Name: new("Standard_D2s_v3"), Tier: new("Standard"), Capacity: new(int64(1)), }, @@ -556,7 +554,7 @@ func createVirtualMachineScaleSet(ctx context.Context, client *armcompute.Virtua // Still conflict - check if it exists now _, finalCheckErr := client.Get(ctx, resourceGroupName, vmssName, nil) if finalCheckErr != nil { - return fmt.Errorf("%w: vmss=%s resourceGroup=%s", errVMSSConflictWithoutResource, vmssName, resourceGroupName) + return fmt.Errorf("vmss %s still in ghost conflict state after remediation retry (resourceGroup=%s): %w", vmssName, resourceGroupName, retryErr) } log.Printf("VMSS %s exists after retry conflict", vmssName) return nil diff --git a/sources/azure/integration-tests/compute-virtual-machine_test.go b/sources/azure/integration-tests/compute-virtual-machine_test.go index b4e47a77..091ad3f3 100644 --- a/sources/azure/integration-tests/compute-virtual-machine_test.go +++ b/sources/azure/integration-tests/compute-virtual-machine_test.go @@ -34,8 +34,6 @@ const ( defaultPollInterval = 15 * time.Second ) -var errVMConflictWithoutResource = errors.New("vm create returned conflict but vm could not be retrieved") - func TestComputeVirtualMachineIntegration(t *testing.T) { subscriptionID := os.Getenv("AZURE_SUBSCRIPTION_ID") if subscriptionID == "" { @@ -111,9 +109,6 @@ func TestComputeVirtualMachineIntegration(t *testing.T) { // Create virtual machine err = createVirtualMachine(ctx, vmClient, integrationTestResourceGroup, integrationTestVMName, integrationTestLocation, *nicResp.ID) if err != nil { - if errors.Is(err, errVMConflictWithoutResource) { - t.Skipf("Skipping due to transient Azure VM control-plane conflict: %v", err) - } t.Fatalf("Failed to create virtual machine: %v", err) } @@ -392,6 +387,10 @@ func createNetworkInterface(ctx context.Context, client *armnetwork.InterfacesCl // createVirtualMachine creates an Azure virtual machine (idempotent) func createVirtualMachine(ctx context.Context, client *armcompute.VirtualMachinesClient, resourceGroupName, vmName, location, nicID string) error { + return createVirtualMachineWithRemediation(ctx, client, resourceGroupName, vmName, location, nicID, 0) +} + +func createVirtualMachineWithRemediation(ctx context.Context, client *armcompute.VirtualMachinesClient, resourceGroupName, vmName, location, nicID string, remediationAttempt int) error { // Check if VM already exists existingVM, err := client.Get(ctx, resourceGroupName, vmName, nil) if err == nil { @@ -414,14 +413,13 @@ func createVirtualMachine(ctx context.Context, client *armcompute.VirtualMachine Location: new(location), Properties: &armcompute.VirtualMachineProperties{ HardwareProfile: &armcompute.HardwareProfile{ - // Use Standard_D2ps_v5 - ARM-based VM with good availability in westus2 - VMSize: new(armcompute.VirtualMachineSizeTypes("Standard_D2ps_v5")), + VMSize: new(armcompute.VirtualMachineSizeTypes("Standard_D2s_v3")), }, StorageProfile: &armcompute.StorageProfile{ ImageReference: &armcompute.ImageReference{ Publisher: new("Canonical"), Offer: new("0001-com-ubuntu-server-jammy"), - SKU: new("22_04-lts-arm64"), // ARM64 image for ARM-based VM + SKU: new("22_04-lts"), Version: new("latest"), }, OSDisk: &armcompute.OSDisk{ @@ -475,7 +473,15 @@ func createVirtualMachine(ctx context.Context, client *armcompute.VirtualMachine } var getRespErr *azcore.ResponseError if errors.As(getErr, &getRespErr) && getRespErr.StatusCode == http.StatusNotFound { - return fmt.Errorf("%w: vm=%s resourceGroup=%s", errVMConflictWithoutResource, vmName, resourceGroupName) + if remediationAttempt >= 1 { + return fmt.Errorf("vm %s still in ghost conflict state after remediation (resourceGroup=%s): %w", vmName, resourceGroupName, err) + } + log.Printf("Detected ghost VM conflict for %s in %s, attempting automatic remediation", vmName, resourceGroupName) + if deleteErr := deleteVirtualMachine(ctx, client, resourceGroupName, vmName); deleteErr != nil { + return fmt.Errorf("failed to remediate ghost VM %s before retry: %w", vmName, deleteErr) + } + time.Sleep(20 * time.Second) + return createVirtualMachineWithRemediation(ctx, client, resourceGroupName, vmName, location, nicID, remediationAttempt+1) } return fmt.Errorf("vm creation conflict for %s and failed to verify existing VM: %w", vmName, getErr) } diff --git a/sources/azure/integration-tests/dbforpostgresql-flexible-server-configuration_test.go b/sources/azure/integration-tests/dbforpostgresql-flexible-server-configuration_test.go new file mode 100644 index 00000000..af863334 --- /dev/null +++ b/sources/azure/integration-tests/dbforpostgresql-flexible-server-configuration_test.go @@ -0,0 +1,313 @@ +package integrationtests + +import ( + "fmt" + "os" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/postgresql/armpostgresqlflexibleservers/v5" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" + log "github.com/sirupsen/logrus" + + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + "github.com/overmindtech/cli/sources/azure/manual" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" +) + +const ( + integrationTestPGConfigServerName = "ovm-integ-test-pg-config" +) + +func TestDBforPostgreSQLFlexibleServerConfigurationIntegration(t *testing.T) { + subscriptionID := os.Getenv("AZURE_SUBSCRIPTION_ID") + if subscriptionID == "" { + t.Skip("AZURE_SUBSCRIPTION_ID environment variable not set") + } + + adminLogin := os.Getenv("AZURE_POSTGRESQL_SERVER_ADMIN_LOGIN") + adminPassword := os.Getenv("AZURE_POSTGRESQL_SERVER_ADMIN_PASSWORD") + if adminLogin == "" || adminPassword == "" { + t.Skip("AZURE_POSTGRESQL_SERVER_ADMIN_LOGIN and AZURE_POSTGRESQL_SERVER_ADMIN_PASSWORD must be set for PostgreSQL tests") + } + + cred, err := azureshared.NewAzureCredential(t.Context()) + if err != nil { + t.Fatalf("Failed to create Azure credential: %v", err) + } + + postgreSQLServerClient, err := armpostgresqlflexibleservers.NewServersClient(subscriptionID, cred, nil) + if err != nil { + t.Fatalf("Failed to create PostgreSQL Flexible Servers client: %v", err) + } + + configurationsClient, err := armpostgresqlflexibleservers.NewConfigurationsClient(subscriptionID, cred, nil) + if err != nil { + t.Fatalf("Failed to create PostgreSQL Configurations client: %v", err) + } + + rgClient, err := armresources.NewResourceGroupsClient(subscriptionID, cred, nil) + if err != nil { + t.Fatalf("Failed to create Resource Groups client: %v", err) + } + + pgServerName := generatePostgreSQLServerName(integrationTestPGConfigServerName) + var setupCompleted bool + + t.Run("Setup", func(t *testing.T) { + ctx := t.Context() + + err := createResourceGroup(ctx, rgClient, integrationTestResourceGroup, integrationTestLocation) + if err != nil { + t.Fatalf("Failed to create resource group: %v", err) + } + + err = createPostgreSQLFlexibleServer(ctx, postgreSQLServerClient, integrationTestResourceGroup, pgServerName, integrationTestLocation) + if err != nil { + t.Fatalf("Failed to create PostgreSQL Flexible Server: %v", err) + } + + err = waitForPostgreSQLServerAvailable(ctx, postgreSQLServerClient, integrationTestResourceGroup, pgServerName) + if err != nil { + t.Fatalf("Failed waiting for PostgreSQL server to be available: %v", err) + } + + setupCompleted = true + }) + + t.Run("Run", func(t *testing.T) { + if !setupCompleted { + t.Skip("Skipping Run: Setup did not complete successfully") + } + + t.Run("GetPostgreSQLFlexibleServerConfiguration", func(t *testing.T) { + ctx := t.Context() + + pager := configurationsClient.NewListByServerPager(integrationTestResourceGroup, pgServerName, nil) + var configName string + if pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + t.Fatalf("Failed to list configurations: %v", err) + } + if len(page.Value) > 0 && page.Value[0].Name != nil { + configName = *page.Value[0].Name + } + } + + if configName == "" { + t.Skip("No configurations found on server") + } + + log.Printf("Testing with configuration: %s", configName) + + wrapper := manual.NewDBforPostgreSQLFlexibleServerConfiguration( + clients.NewPostgreSQLConfigurationsClient(configurationsClient), + []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, integrationTestResourceGroup)}, + ) + scope := wrapper.Scopes()[0] + + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + query := shared.CompositeLookupKey(pgServerName, configName) + sdpItem, qErr := adapter.Get(ctx, scope, query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem == nil { + t.Fatalf("Expected sdpItem to be non-nil") + } + + if sdpItem.GetType() != azureshared.DBforPostgreSQLFlexibleServerConfiguration.String() { + t.Errorf("Expected type %s, got %s", azureshared.DBforPostgreSQLFlexibleServerConfiguration, sdpItem.GetType()) + } + + uniqueAttrKey := sdpItem.GetUniqueAttribute() + if uniqueAttrKey != "uniqueAttr" { + t.Errorf("Expected unique attribute 'uniqueAttr', got %s", uniqueAttrKey) + } + + uniqueAttrValue, err := sdpItem.GetAttributes().Get(uniqueAttrKey) + if err != nil { + t.Fatalf("Failed to get unique attribute: %v", err) + } + + expectedUniqueAttrValue := shared.CompositeLookupKey(pgServerName, configName) + if uniqueAttrValue != expectedUniqueAttrValue { + t.Errorf("Expected unique attribute value %s, got %s", expectedUniqueAttrValue, uniqueAttrValue) + } + + if sdpItem.GetScope() != fmt.Sprintf("%s.%s", subscriptionID, integrationTestResourceGroup) { + t.Errorf("Expected scope %s.%s, got %s", subscriptionID, integrationTestResourceGroup, sdpItem.GetScope()) + } + + if err := sdpItem.Validate(); err != nil { + t.Fatalf("Item validation failed: %v", err) + } + + log.Printf("Successfully retrieved configuration %s", configName) + }) + + t.Run("SearchPostgreSQLFlexibleServerConfigurations", func(t *testing.T) { + ctx := t.Context() + + wrapper := manual.NewDBforPostgreSQLFlexibleServerConfiguration( + clients.NewPostgreSQLConfigurationsClient(configurationsClient), + []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, integrationTestResourceGroup)}, + ) + scope := wrapper.Scopes()[0] + + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + sdpItems, err := searchable.Search(ctx, scope, pgServerName, true) + if err != nil { + t.Fatalf("Failed to search configurations: %v", err) + } + + if len(sdpItems) < 1 { + t.Fatalf("Expected at least one configuration, got %d", len(sdpItems)) + } + + for _, item := range sdpItems { + if err := item.Validate(); err != nil { + t.Fatalf("Item validation failed: %v", err) + } + + if item.GetType() != azureshared.DBforPostgreSQLFlexibleServerConfiguration.String() { + t.Errorf("Expected type %s, got %s", azureshared.DBforPostgreSQLFlexibleServerConfiguration, item.GetType()) + } + } + + log.Printf("Found %d configurations in search results", len(sdpItems)) + }) + + t.Run("VerifyLinkedItems", func(t *testing.T) { + ctx := t.Context() + + pager := configurationsClient.NewListByServerPager(integrationTestResourceGroup, pgServerName, nil) + var configName string + if pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + t.Fatalf("Failed to list configurations: %v", err) + } + if len(page.Value) > 0 && page.Value[0].Name != nil { + configName = *page.Value[0].Name + } + } + + if configName == "" { + t.Skip("No configurations found on server") + } + + wrapper := manual.NewDBforPostgreSQLFlexibleServerConfiguration( + clients.NewPostgreSQLConfigurationsClient(configurationsClient), + []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, integrationTestResourceGroup)}, + ) + scope := wrapper.Scopes()[0] + + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + query := shared.CompositeLookupKey(pgServerName, configName) + sdpItem, qErr := adapter.Get(ctx, scope, query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + linkedQueries := sdpItem.GetLinkedItemQueries() + if len(linkedQueries) == 0 { + t.Fatalf("Expected linked item queries, but got none") + } + + var hasServerLink bool + for _, liq := range linkedQueries { + if liq.GetQuery().GetType() == azureshared.DBforPostgreSQLFlexibleServer.String() { + hasServerLink = true + if liq.GetQuery().GetQuery() != pgServerName { + t.Errorf("Expected linked query to server %s, got %s", pgServerName, liq.GetQuery().GetQuery()) + } + if liq.GetQuery().GetMethod() != sdp.QueryMethod_GET { + t.Errorf("Expected linked query method GET, got %s", liq.GetQuery().GetMethod()) + } + if liq.GetQuery().GetScope() != scope { + t.Errorf("Expected linked query scope %s, got %s", scope, liq.GetQuery().GetScope()) + } + break + } + } + + if !hasServerLink { + t.Error("Expected linked query to PostgreSQL Flexible Server, but didn't find one") + } + + log.Printf("Verified %d linked item queries for configuration %s", len(linkedQueries), configName) + }) + + t.Run("VerifyItemAttributes", func(t *testing.T) { + ctx := t.Context() + + pager := configurationsClient.NewListByServerPager(integrationTestResourceGroup, pgServerName, nil) + var configName string + if pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + t.Fatalf("Failed to list configurations: %v", err) + } + if len(page.Value) > 0 && page.Value[0].Name != nil { + configName = *page.Value[0].Name + } + } + + if configName == "" { + t.Skip("No configurations found on server") + } + + wrapper := manual.NewDBforPostgreSQLFlexibleServerConfiguration( + clients.NewPostgreSQLConfigurationsClient(configurationsClient), + []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, integrationTestResourceGroup)}, + ) + scope := wrapper.Scopes()[0] + + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + query := shared.CompositeLookupKey(pgServerName, configName) + sdpItem, qErr := adapter.Get(ctx, scope, query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.DBforPostgreSQLFlexibleServerConfiguration.String() { + t.Errorf("Expected type %s, got %s", azureshared.DBforPostgreSQLFlexibleServerConfiguration, sdpItem.GetType()) + } + + expectedScope := fmt.Sprintf("%s.%s", subscriptionID, integrationTestResourceGroup) + if sdpItem.GetScope() != expectedScope { + t.Errorf("Expected scope %s, got %s", expectedScope, sdpItem.GetScope()) + } + + if sdpItem.GetUniqueAttribute() != "uniqueAttr" { + t.Errorf("Expected unique attribute 'uniqueAttr', got %s", sdpItem.GetUniqueAttribute()) + } + + if err := sdpItem.Validate(); err != nil { + t.Fatalf("Item validation failed: %v", err) + } + }) + }) + + t.Run("Teardown", func(t *testing.T) { + ctx := t.Context() + + err := deletePostgreSQLFlexibleServer(ctx, postgreSQLServerClient, integrationTestResourceGroup, pgServerName) + if err != nil { + t.Fatalf("Failed to delete PostgreSQL Flexible Server: %v", err) + } + }) +} diff --git a/sources/azure/integration-tests/dbforpostgresql-flexible-server-replica_test.go b/sources/azure/integration-tests/dbforpostgresql-flexible-server-replica_test.go new file mode 100644 index 00000000..90906d72 --- /dev/null +++ b/sources/azure/integration-tests/dbforpostgresql-flexible-server-replica_test.go @@ -0,0 +1,414 @@ +package integrationtests + +import ( + "context" + "errors" + "fmt" + "net/http" + "os" + "testing" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/postgresql/armpostgresqlflexibleservers/v5" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" + log "github.com/sirupsen/logrus" + + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + "github.com/overmindtech/cli/sources/azure/manual" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" +) + +const ( + integrationTestPgServerName = "ovm-integ-test-pg-server" + integrationTestPgReplicaName = "ovm-integ-test-pg-replica" +) + +func TestDBforPostgreSQLFlexibleServerReplicaIntegration(t *testing.T) { + subscriptionID := os.Getenv("AZURE_SUBSCRIPTION_ID") + if subscriptionID == "" { + t.Skip("AZURE_SUBSCRIPTION_ID environment variable not set") + } + + cred, err := azureshared.NewAzureCredential(t.Context()) + if err != nil { + t.Fatalf("Failed to create Azure credential: %v", err) + } + + serversClient, err := armpostgresqlflexibleservers.NewServersClient(subscriptionID, cred, nil) + if err != nil { + t.Fatalf("Failed to create PostgreSQL Flexible Servers client: %v", err) + } + + replicasClient, err := armpostgresqlflexibleservers.NewReplicasClient(subscriptionID, cred, nil) + if err != nil { + t.Fatalf("Failed to create PostgreSQL Replicas client: %v", err) + } + + rgClient, err := armresources.NewResourceGroupsClient(subscriptionID, cred, nil) + if err != nil { + t.Fatalf("Failed to create Resource Groups client: %v", err) + } + + var setupCompleted bool + + t.Run("Setup", func(t *testing.T) { + ctx, cancel := context.WithTimeout(t.Context(), 30*time.Minute) + defer cancel() + + err := createResourceGroup(ctx, rgClient, integrationTestResourceGroup, integrationTestLocation) + if err != nil { + t.Fatalf("Failed to create resource group: %v", err) + } + + err = createPostgreSQLServerForReplica(ctx, serversClient, subscriptionID, integrationTestResourceGroup, integrationTestPgServerName, integrationTestLocation) + if err != nil { + t.Fatalf("Failed to create PostgreSQL flexible server: %v", err) + } + + err = waitForPostgreSQLServerReady(ctx, serversClient, integrationTestResourceGroup, integrationTestPgServerName) + if err != nil { + t.Fatalf("Failed waiting for PostgreSQL server to be ready: %v", err) + } + + err = createPostgreSQLReplica(ctx, serversClient, subscriptionID, integrationTestResourceGroup, integrationTestPgServerName, integrationTestPgReplicaName, integrationTestLocation) + if err != nil { + t.Fatalf("Failed to create PostgreSQL replica: %v", err) + } + + err = waitForPostgreSQLServerReady(ctx, serversClient, integrationTestResourceGroup, integrationTestPgReplicaName) + if err != nil { + t.Fatalf("Failed waiting for PostgreSQL replica to be ready: %v", err) + } + + setupCompleted = true + }) + + t.Run("Run", func(t *testing.T) { + if !setupCompleted { + t.Skip("Skipping Run: Setup did not complete successfully") + } + + t.Run("GetReplica", func(t *testing.T) { + ctx := t.Context() + + log.Printf("Retrieving replica %s under server %s", integrationTestPgReplicaName, integrationTestPgServerName) + + wrapper := manual.NewDBforPostgreSQLFlexibleServerReplica( + clients.NewDBforPostgreSQLFlexibleServerReplicaClient(replicasClient, serversClient), + []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, integrationTestResourceGroup)}, + ) + scope := wrapper.Scopes()[0] + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(integrationTestPgServerName, integrationTestPgReplicaName) + sdpItem, qErr := adapter.Get(ctx, scope, query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem == nil { + t.Fatalf("Expected sdpItem to be non-nil") + } + + uniqueAttrKey := sdpItem.GetUniqueAttribute() + uniqueAttrValue, err := sdpItem.GetAttributes().Get(uniqueAttrKey) + if err != nil { + t.Fatalf("Failed to get unique attribute: %v", err) + } + + expectedUniqueAttr := shared.CompositeLookupKey(integrationTestPgServerName, integrationTestPgReplicaName) + if uniqueAttrValue != expectedUniqueAttr { + t.Errorf("Expected unique attribute value %s, got %s", expectedUniqueAttr, uniqueAttrValue) + } + + log.Printf("Successfully retrieved replica %s", integrationTestPgReplicaName) + }) + + t.Run("SearchReplicas", func(t *testing.T) { + ctx := t.Context() + + log.Printf("Searching replicas under server %s", integrationTestPgServerName) + + wrapper := manual.NewDBforPostgreSQLFlexibleServerReplica( + clients.NewDBforPostgreSQLFlexibleServerReplicaClient(replicasClient, serversClient), + []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, integrationTestResourceGroup)}, + ) + scope := wrapper.Scopes()[0] + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + sdpItems, err := searchable.Search(ctx, scope, integrationTestPgServerName, true) + if err != nil { + t.Fatalf("Failed to search replicas: %v", err) + } + + if len(sdpItems) < 1 { + t.Fatalf("Expected at least one replica, got %d", len(sdpItems)) + } + + var found bool + expectedUniqueAttr := shared.CompositeLookupKey(integrationTestPgServerName, integrationTestPgReplicaName) + for _, item := range sdpItems { + uniqueAttrKey := item.GetUniqueAttribute() + if v, err := item.GetAttributes().Get(uniqueAttrKey); err == nil && v == expectedUniqueAttr { + found = true + break + } + } + + if !found { + t.Fatalf("Expected to find replica %s in search results", integrationTestPgReplicaName) + } + + log.Printf("Found %d replicas in search results", len(sdpItems)) + }) + + t.Run("VerifyLinkedItems", func(t *testing.T) { + ctx := t.Context() + + log.Printf("Verifying linked items for replica %s", integrationTestPgReplicaName) + + wrapper := manual.NewDBforPostgreSQLFlexibleServerReplica( + clients.NewDBforPostgreSQLFlexibleServerReplicaClient(replicasClient, serversClient), + []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, integrationTestResourceGroup)}, + ) + scope := wrapper.Scopes()[0] + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(integrationTestPgServerName, integrationTestPgReplicaName) + sdpItem, qErr := adapter.Get(ctx, scope, query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + linkedQueries := sdpItem.GetLinkedItemQueries() + if len(linkedQueries) == 0 { + t.Fatalf("Expected linked item queries, but got none") + } + + var hasSourceServerLink bool + for _, liq := range linkedQueries { + if liq.GetQuery().GetType() == azureshared.DBforPostgreSQLFlexibleServer.String() { + hasSourceServerLink = true + if liq.GetQuery().GetQuery() != integrationTestPgServerName { + t.Errorf("Expected linked query to source server %s, got %s", integrationTestPgServerName, liq.GetQuery().GetQuery()) + } + break + } + } + + if !hasSourceServerLink { + t.Error("Expected linked query to source server, but didn't find one") + } + + log.Printf("Verified %d linked item queries for replica %s", len(linkedQueries), integrationTestPgReplicaName) + }) + + t.Run("VerifyItemAttributes", func(t *testing.T) { + ctx := t.Context() + + wrapper := manual.NewDBforPostgreSQLFlexibleServerReplica( + clients.NewDBforPostgreSQLFlexibleServerReplicaClient(replicasClient, serversClient), + []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, integrationTestResourceGroup)}, + ) + scope := wrapper.Scopes()[0] + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(integrationTestPgServerName, integrationTestPgReplicaName) + sdpItem, qErr := adapter.Get(ctx, scope, query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.DBforPostgreSQLFlexibleServerReplica.String() { + t.Errorf("Expected type %s, got %s", azureshared.DBforPostgreSQLFlexibleServerReplica.String(), sdpItem.GetType()) + } + + expectedScope := fmt.Sprintf("%s.%s", subscriptionID, integrationTestResourceGroup) + if sdpItem.GetScope() != expectedScope { + t.Errorf("Expected scope %s, got %s", expectedScope, sdpItem.GetScope()) + } + + if sdpItem.GetUniqueAttribute() != "uniqueAttr" { + t.Errorf("Expected unique attribute 'uniqueAttr', got %s", sdpItem.GetUniqueAttribute()) + } + + if err := sdpItem.Validate(); err != nil { + t.Errorf("Item validation failed: %v", err) + } + }) + }) + + t.Run("Teardown", func(t *testing.T) { + ctx, cancel := context.WithTimeout(t.Context(), 20*time.Minute) + defer cancel() + + err := deletePostgreSQLServer(ctx, serversClient, integrationTestResourceGroup, integrationTestPgReplicaName) + if err != nil { + t.Logf("Warning: Failed to delete replica %s: %v", integrationTestPgReplicaName, err) + } + + err = deletePostgreSQLServer(ctx, serversClient, integrationTestResourceGroup, integrationTestPgServerName) + if err != nil { + t.Logf("Warning: Failed to delete server %s: %v", integrationTestPgServerName, err) + } + }) +} + +func createPostgreSQLServerForReplica(ctx context.Context, client *armpostgresqlflexibleservers.ServersClient, subscriptionID, resourceGroupName, serverName, location string) error { + _, err := client.Get(ctx, resourceGroupName, serverName, nil) + if err == nil { + log.Printf("PostgreSQL server %s already exists, skipping creation", serverName) + return nil + } + + version := armpostgresqlflexibleservers.PostgresMajorVersionSixteen + createMode := armpostgresqlflexibleservers.CreateModeDefault + adminLogin := "ovmadmin" + adminPassword := "TestPassword123!" + skuName := "Standard_D2ds_v5" + skuTier := armpostgresqlflexibleservers.SKUTierGeneralPurpose + storageSizeGB := int32(32) + + poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, serverName, armpostgresqlflexibleservers.Server{ + Location: &location, + SKU: &armpostgresqlflexibleservers.SKU{ + Name: &skuName, + Tier: &skuTier, + }, + Properties: &armpostgresqlflexibleservers.ServerProperties{ + Version: &version, + CreateMode: &createMode, + AdministratorLogin: &adminLogin, + AdministratorLoginPassword: &adminPassword, + Storage: &armpostgresqlflexibleservers.Storage{ + StorageSizeGB: &storageSizeGB, + }, + }, + }, nil) + if err != nil { + var respErr *azcore.ResponseError + if errors.As(err, &respErr) && respErr.StatusCode == http.StatusConflict { + if _, getErr := client.Get(ctx, resourceGroupName, serverName, nil); getErr == nil { + log.Printf("PostgreSQL server %s already exists (conflict), skipping creation", serverName) + return nil + } + return fmt.Errorf("server %s conflict but not retrievable: %w", serverName, err) + } + return fmt.Errorf("failed to create PostgreSQL server: %w", err) + } + + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + return fmt.Errorf("failed to create PostgreSQL server: %w", err) + } + + log.Printf("PostgreSQL server %s created successfully", serverName) + return nil +} + +func createPostgreSQLReplica(ctx context.Context, client *armpostgresqlflexibleservers.ServersClient, subscriptionID, resourceGroupName, primaryServerName, replicaName, location string) error { + _, err := client.Get(ctx, resourceGroupName, replicaName, nil) + if err == nil { + log.Printf("PostgreSQL replica %s already exists, skipping creation", replicaName) + return nil + } + + sourceServerID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DBforPostgreSQL/flexibleServers/%s", + subscriptionID, resourceGroupName, primaryServerName) + + createMode := armpostgresqlflexibleservers.CreateModeReplica + + poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, replicaName, armpostgresqlflexibleservers.Server{ + Location: &location, + Properties: &armpostgresqlflexibleservers.ServerProperties{ + CreateMode: &createMode, + SourceServerResourceID: &sourceServerID, + }, + }, nil) + if err != nil { + var respErr *azcore.ResponseError + if errors.As(err, &respErr) && respErr.StatusCode == http.StatusConflict { + if _, getErr := client.Get(ctx, resourceGroupName, replicaName, nil); getErr == nil { + log.Printf("PostgreSQL replica %s already exists (conflict), skipping creation", replicaName) + return nil + } + return fmt.Errorf("replica %s conflict but not retrievable: %w", replicaName, err) + } + return fmt.Errorf("failed to create PostgreSQL replica: %w", err) + } + + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + return fmt.Errorf("failed to create PostgreSQL replica: %w", err) + } + + log.Printf("PostgreSQL replica %s created successfully", replicaName) + return nil +} + +func waitForPostgreSQLServerReady(ctx context.Context, client *armpostgresqlflexibleservers.ServersClient, resourceGroupName, serverName string) error { + maxAttempts := 60 + pollInterval := 30 * time.Second + maxNotFoundAttempts := 5 + notFoundCount := 0 + + for attempt := 1; attempt <= maxAttempts; attempt++ { + resp, err := client.Get(ctx, resourceGroupName, serverName, nil) + if err != nil { + var respErr *azcore.ResponseError + if errors.As(err, &respErr) && respErr.StatusCode == http.StatusNotFound { + notFoundCount++ + if notFoundCount >= maxNotFoundAttempts { + return fmt.Errorf("server %s not found after %d attempts", serverName, notFoundCount) + } + log.Printf("Server %s not found yet (attempt %d/%d), waiting...", serverName, attempt, maxAttempts) + time.Sleep(pollInterval) + continue + } + return fmt.Errorf("error checking server: %w", err) + } + notFoundCount = 0 + + if resp.Properties != nil && resp.Properties.State != nil { + state := *resp.Properties.State + log.Printf("Server %s state: %s (attempt %d/%d)", serverName, state, attempt, maxAttempts) + if state == armpostgresqlflexibleservers.ServerStateReady { + return nil + } + } + + time.Sleep(pollInterval) + } + + return fmt.Errorf("timeout waiting for server %s to be ready", serverName) +} + +func deletePostgreSQLServer(ctx context.Context, client *armpostgresqlflexibleservers.ServersClient, resourceGroupName, serverName string) error { + poller, err := client.BeginDelete(ctx, resourceGroupName, serverName, nil) + if err != nil { + var respErr *azcore.ResponseError + if errors.As(err, &respErr) && respErr.StatusCode == http.StatusNotFound { + log.Printf("PostgreSQL server %s not found, skipping deletion", serverName) + return nil + } + return fmt.Errorf("failed to delete PostgreSQL server: %w", err) + } + + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + return fmt.Errorf("failed to delete PostgreSQL server: %w", err) + } + + log.Printf("PostgreSQL server %s deleted successfully", serverName) + return nil +} diff --git a/sources/azure/integration-tests/helpers_test.go b/sources/azure/integration-tests/helpers_test.go index 7690f315..557b66b7 100644 --- a/sources/azure/integration-tests/helpers_test.go +++ b/sources/azure/integration-tests/helpers_test.go @@ -3,6 +3,9 @@ package integrationtests import ( "context" "fmt" + "os" + "regexp" + "strings" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" log "github.com/sirupsen/logrus" @@ -10,10 +13,48 @@ import ( // Shared constants for integration tests const ( - integrationTestResourceGroup = "overmind-integration-tests" - integrationTestLocation = "westus2" + integrationTestResourceGroupBase = "overmind-integration-tests" + integrationTestLocation = "westus2" ) +var integrationTestResourceGroup = resolveIntegrationTestResourceGroup() + +var invalidRunIDSanitizer = regexp.MustCompile(`[^a-z0-9-]+`) + +// resolveIntegrationTestResourceGroup returns the default integration test resource group, +// optionally scoped by AZURE_INTEGRATION_TEST_RUN_ID for parallel runs. +// +// Example: +// +// AZURE_INTEGRATION_TEST_RUN_ID=agent-42 +// => overmind-integration-tests-agent-42 +func resolveIntegrationTestResourceGroup() string { + runID := normalizeIntegrationTestRunID(os.Getenv("AZURE_INTEGRATION_TEST_RUN_ID")) + if runID == "" { + return integrationTestResourceGroupBase + } + + // Azure resource group names can be up to 90 characters. + name := integrationTestResourceGroupBase + "-" + runID + if len(name) > 90 { + return name[:90] + } + return name +} + +func normalizeIntegrationTestRunID(runID string) string { + normalized := strings.ToLower(strings.TrimSpace(runID)) + if normalized == "" { + return "" + } + normalized = invalidRunIDSanitizer.ReplaceAllString(normalized, "-") + normalized = strings.Trim(normalized, "-") + if len(normalized) > 30 { + normalized = normalized[:30] + } + return normalized +} + // createResourceGroup creates an Azure resource group if it doesn't already exist (idempotent) func createResourceGroup(ctx context.Context, client *armresources.ResourceGroupsClient, resourceGroupName, location string) error { // Check if resource group already exists diff --git a/sources/azure/integration-tests/network-flow-log_test.go b/sources/azure/integration-tests/network-flow-log_test.go index 1e2d7171..e8c78381 100644 --- a/sources/azure/integration-tests/network-flow-log_test.go +++ b/sources/azure/integration-tests/network-flow-log_test.go @@ -6,11 +6,10 @@ import ( "fmt" "net/http" "os" + "strings" "testing" "time" - "strings" - "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" diff --git a/sources/azure/integration-tests/network-load-balancer-backend-address-pool_test.go b/sources/azure/integration-tests/network-load-balancer-backend-address-pool_test.go new file mode 100644 index 00000000..8c0b4305 --- /dev/null +++ b/sources/azure/integration-tests/network-load-balancer-backend-address-pool_test.go @@ -0,0 +1,467 @@ +package integrationtests + +import ( + "context" + "fmt" + "os" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" + log "github.com/sirupsen/logrus" + + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + "github.com/overmindtech/cli/sources/azure/manual" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" +) + +const ( + integrationTestBackendPoolLBName = "ovm-integ-test-lb-for-pool" + integrationTestBackendPoolName = "test-backend-pool" + integrationTestVNetNameForBackendPool = "ovm-integ-test-vnet-for-pool" + integrationTestSubnetNameForBackendPool = "default" + integrationTestPublicIPNameForBackendPool = "ovm-integ-test-pip-for-pool" +) + +func TestNetworkLoadBalancerBackendAddressPoolIntegration(t *testing.T) { + subscriptionID := os.Getenv("AZURE_SUBSCRIPTION_ID") + if subscriptionID == "" { + t.Skip("AZURE_SUBSCRIPTION_ID environment variable not set") + } + + cred, err := azureshared.NewAzureCredential(t.Context()) + if err != nil { + t.Fatalf("Failed to create Azure credential: %v", err) + } + + rgClient, err := armresources.NewResourceGroupsClient(subscriptionID, cred, nil) + if err != nil { + t.Fatalf("Failed to create Resource Groups client: %v", err) + } + + vnetClient, err := armnetwork.NewVirtualNetworksClient(subscriptionID, cred, nil) + if err != nil { + t.Fatalf("Failed to create Virtual Networks client: %v", err) + } + + publicIPClient, err := armnetwork.NewPublicIPAddressesClient(subscriptionID, cred, nil) + if err != nil { + t.Fatalf("Failed to create Public IP Addresses client: %v", err) + } + + lbClient, err := armnetwork.NewLoadBalancersClient(subscriptionID, cred, nil) + if err != nil { + t.Fatalf("Failed to create Load Balancers client: %v", err) + } + + backendPoolClient, err := armnetwork.NewLoadBalancerBackendAddressPoolsClient(subscriptionID, cred, nil) + if err != nil { + t.Fatalf("Failed to create Load Balancer Backend Address Pools client: %v", err) + } + + var setupCompleted bool + + t.Run("Setup", func(t *testing.T) { + ctx := t.Context() + + err := createResourceGroup(ctx, rgClient, integrationTestResourceGroup, integrationTestLocation) + if err != nil { + t.Fatalf("Failed to create resource group: %v", err) + } + + err = createVirtualNetworkForBackendPool(ctx, vnetClient, integrationTestResourceGroup, integrationTestVNetNameForBackendPool, integrationTestLocation) + if err != nil { + t.Fatalf("Failed to create virtual network: %v", err) + } + + err = createPublicIPForBackendPool(ctx, publicIPClient, integrationTestResourceGroup, integrationTestPublicIPNameForBackendPool, integrationTestLocation) + if err != nil { + t.Fatalf("Failed to create public IP address: %v", err) + } + + publicIPResp, err := publicIPClient.Get(ctx, integrationTestResourceGroup, integrationTestPublicIPNameForBackendPool, nil) + if err != nil { + t.Fatalf("Failed to get public IP address: %v", err) + } + + err = createLoadBalancerWithBackendPool(ctx, lbClient, subscriptionID, integrationTestResourceGroup, integrationTestBackendPoolLBName, integrationTestLocation, *publicIPResp.ID, integrationTestBackendPoolName) + if err != nil { + t.Fatalf("Failed to create load balancer: %v", err) + } + + log.Printf("Setup completed: Load balancer %s with backend pool %s created", integrationTestBackendPoolLBName, integrationTestBackendPoolName) + setupCompleted = true + }) + + t.Run("Run", func(t *testing.T) { + if !setupCompleted { + t.Skip("Skipping Run: Setup did not complete successfully") + } + + t.Run("GetBackendAddressPool", func(t *testing.T) { + ctx := t.Context() + + log.Printf("Retrieving backend address pool %s from load balancer %s", integrationTestBackendPoolName, integrationTestBackendPoolLBName) + + wrapper := manual.NewNetworkLoadBalancerBackendAddressPool( + clients.NewLoadBalancerBackendAddressPoolsClient(backendPoolClient), + []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, integrationTestResourceGroup)}, + ) + scope := wrapper.Scopes()[0] + + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + query := shared.CompositeLookupKey(integrationTestBackendPoolLBName, integrationTestBackendPoolName) + sdpItem, qErr := adapter.Get(ctx, scope, query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem == nil { + t.Fatalf("Expected sdpItem to be non-nil") + } + + uniqueAttrKey := sdpItem.GetUniqueAttribute() + uniqueAttrValue, err := sdpItem.GetAttributes().Get(uniqueAttrKey) + if err != nil { + t.Fatalf("Failed to get unique attribute: %v", err) + } + + expectedUniqueValue := shared.CompositeLookupKey(integrationTestBackendPoolLBName, integrationTestBackendPoolName) + if uniqueAttrValue != expectedUniqueValue { + t.Errorf("Expected unique attribute value %s, got %s", expectedUniqueValue, uniqueAttrValue) + } + + if sdpItem.GetType() != azureshared.NetworkLoadBalancerBackendAddressPool.String() { + t.Errorf("Expected type %s, got %s", azureshared.NetworkLoadBalancerBackendAddressPool, sdpItem.GetType()) + } + + log.Printf("Successfully retrieved backend address pool %s", integrationTestBackendPoolName) + }) + + t.Run("SearchBackendAddressPools", func(t *testing.T) { + ctx := t.Context() + + log.Printf("Searching backend address pools in load balancer %s", integrationTestBackendPoolLBName) + + wrapper := manual.NewNetworkLoadBalancerBackendAddressPool( + clients.NewLoadBalancerBackendAddressPoolsClient(backendPoolClient), + []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, integrationTestResourceGroup)}, + ) + scope := wrapper.Scopes()[0] + + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + sdpItems, err := searchable.Search(ctx, scope, integrationTestBackendPoolLBName, true) + if err != nil { + t.Fatalf("Failed to search backend address pools: %v", err) + } + + if len(sdpItems) < 1 { + t.Fatalf("Expected at least one backend address pool, got %d", len(sdpItems)) + } + + var found bool + for _, item := range sdpItems { + uniqueAttrKey := item.GetUniqueAttribute() + expectedValue := shared.CompositeLookupKey(integrationTestBackendPoolLBName, integrationTestBackendPoolName) + if v, err := item.GetAttributes().Get(uniqueAttrKey); err == nil && v == expectedValue { + found = true + break + } + } + + if !found { + t.Fatalf("Expected to find backend pool %s in the search results", integrationTestBackendPoolName) + } + + log.Printf("Found %d backend address pools in search results", len(sdpItems)) + }) + + t.Run("VerifyLinkedItems", func(t *testing.T) { + ctx := t.Context() + + log.Printf("Verifying linked items for backend address pool %s", integrationTestBackendPoolName) + + wrapper := manual.NewNetworkLoadBalancerBackendAddressPool( + clients.NewLoadBalancerBackendAddressPoolsClient(backendPoolClient), + []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, integrationTestResourceGroup)}, + ) + scope := wrapper.Scopes()[0] + + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + query := shared.CompositeLookupKey(integrationTestBackendPoolLBName, integrationTestBackendPoolName) + sdpItem, qErr := adapter.Get(ctx, scope, query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + linkedQueries := sdpItem.GetLinkedItemQueries() + if len(linkedQueries) == 0 { + t.Fatalf("Expected linked item queries, but got none") + } + + for _, liq := range linkedQueries { + query := liq.GetQuery() + if query.GetType() == "" { + t.Error("Expected linked query to have a non-empty Type") + } + if query.GetQuery() == "" { + t.Error("Expected linked query to have a non-empty Query") + } + if query.GetScope() == "" { + t.Error("Expected linked query to have a non-empty Scope") + } + } + + // Verify parent load balancer link exists + var hasLoadBalancerLink bool + for _, liq := range linkedQueries { + if liq.GetQuery().GetType() == azureshared.NetworkLoadBalancer.String() { + hasLoadBalancerLink = true + if liq.GetQuery().GetQuery() != integrationTestBackendPoolLBName { + t.Errorf("Expected linked query to load balancer %s, got %s", integrationTestBackendPoolLBName, liq.GetQuery().GetQuery()) + } + break + } + } + + if !hasLoadBalancerLink { + t.Error("Expected linked query to parent load balancer, but didn't find one") + } + + log.Printf("Verified %d linked item queries for backend address pool %s", len(linkedQueries), integrationTestBackendPoolName) + }) + + t.Run("VerifyItemAttributes", func(t *testing.T) { + ctx := t.Context() + + wrapper := manual.NewNetworkLoadBalancerBackendAddressPool( + clients.NewLoadBalancerBackendAddressPoolsClient(backendPoolClient), + []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, integrationTestResourceGroup)}, + ) + scope := wrapper.Scopes()[0] + + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + query := shared.CompositeLookupKey(integrationTestBackendPoolLBName, integrationTestBackendPoolName) + sdpItem, qErr := adapter.Get(ctx, scope, query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.NetworkLoadBalancerBackendAddressPool.String() { + t.Errorf("Expected type %s, got %s", azureshared.NetworkLoadBalancerBackendAddressPool, sdpItem.GetType()) + } + + expectedScope := fmt.Sprintf("%s.%s", subscriptionID, integrationTestResourceGroup) + if sdpItem.GetScope() != expectedScope { + t.Errorf("Expected scope %s, got %s", expectedScope, sdpItem.GetScope()) + } + + if sdpItem.GetUniqueAttribute() != "uniqueAttr" { + t.Errorf("Expected unique attribute 'uniqueAttr', got %s", sdpItem.GetUniqueAttribute()) + } + + if err := sdpItem.Validate(); err != nil { + t.Errorf("Item validation failed: %v", err) + } + + log.Printf("Verified item attributes for backend address pool %s", integrationTestBackendPoolName) + }) + }) + + t.Run("Teardown", func(t *testing.T) { + ctx := t.Context() + + err := deleteLoadBalancer(ctx, lbClient, integrationTestResourceGroup, integrationTestBackendPoolLBName) + if err != nil { + t.Fatalf("Failed to delete load balancer: %v", err) + } + + err = deletePublicIPForBackendPool(ctx, publicIPClient, integrationTestResourceGroup, integrationTestPublicIPNameForBackendPool) + if err != nil { + t.Fatalf("Failed to delete public IP address: %v", err) + } + + err = deleteVirtualNetworkForBackendPool(ctx, vnetClient, integrationTestResourceGroup, integrationTestVNetNameForBackendPool) + if err != nil { + t.Fatalf("Failed to delete virtual network: %v", err) + } + }) +} + +func createVirtualNetworkForBackendPool(ctx context.Context, client *armnetwork.VirtualNetworksClient, resourceGroupName, vnetName, location string) error { + _, err := client.Get(ctx, resourceGroupName, vnetName, nil) + if err == nil { + log.Printf("Virtual network %s already exists, skipping creation", vnetName) + return nil + } + + poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, vnetName, armnetwork.VirtualNetwork{ + Location: new(location), + Properties: &armnetwork.VirtualNetworkPropertiesFormat{ + AddressSpace: &armnetwork.AddressSpace{ + AddressPrefixes: []*string{new("10.3.0.0/16")}, + }, + Subnets: []*armnetwork.Subnet{ + { + Name: new(integrationTestSubnetNameForBackendPool), + Properties: &armnetwork.SubnetPropertiesFormat{ + AddressPrefix: new("10.3.0.0/24"), + }, + }, + }, + }, + Tags: map[string]*string{ + "purpose": new("overmind-integration-tests"), + }, + }, nil) + if err != nil { + return fmt.Errorf("failed to begin creating virtual network: %w", err) + } + + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + return fmt.Errorf("failed to create virtual network: %w", err) + } + + log.Printf("Virtual network %s created successfully", vnetName) + return nil +} + +func deleteVirtualNetworkForBackendPool(ctx context.Context, client *armnetwork.VirtualNetworksClient, resourceGroupName, vnetName string) error { + poller, err := client.BeginDelete(ctx, resourceGroupName, vnetName, nil) + if err != nil { + log.Printf("Virtual network %s delete failed (may already be deleted): %v", vnetName, err) + return nil + } + + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + return fmt.Errorf("failed to delete virtual network: %w", err) + } + + log.Printf("Virtual network %s deleted successfully", vnetName) + return nil +} + +func createPublicIPForBackendPool(ctx context.Context, client *armnetwork.PublicIPAddressesClient, resourceGroupName, publicIPName, location string) error { + _, err := client.Get(ctx, resourceGroupName, publicIPName, nil) + if err == nil { + log.Printf("Public IP address %s already exists, skipping creation", publicIPName) + return nil + } + + poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, publicIPName, armnetwork.PublicIPAddress{ + Location: new(location), + Properties: &armnetwork.PublicIPAddressPropertiesFormat{ + PublicIPAllocationMethod: new(armnetwork.IPAllocationMethodStatic), + PublicIPAddressVersion: new(armnetwork.IPVersionIPv4), + }, + SKU: &armnetwork.PublicIPAddressSKU{ + Name: new(armnetwork.PublicIPAddressSKUNameStandard), + }, + Tags: map[string]*string{ + "purpose": new("overmind-integration-tests"), + }, + }, nil) + if err != nil { + return fmt.Errorf("failed to begin creating public IP address: %w", err) + } + + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + return fmt.Errorf("failed to create public IP address: %w", err) + } + + log.Printf("Public IP address %s created successfully", publicIPName) + return nil +} + +func deletePublicIPForBackendPool(ctx context.Context, client *armnetwork.PublicIPAddressesClient, resourceGroupName, publicIPName string) error { + poller, err := client.BeginDelete(ctx, resourceGroupName, publicIPName, nil) + if err != nil { + log.Printf("Public IP address %s delete failed (may already be deleted): %v", publicIPName, err) + return nil + } + + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + return fmt.Errorf("failed to delete public IP address: %w", err) + } + + log.Printf("Public IP address %s deleted successfully", publicIPName) + return nil +} + +func createLoadBalancerWithBackendPool(ctx context.Context, client *armnetwork.LoadBalancersClient, subscriptionID, resourceGroupName, lbName, location, publicIPID, backendPoolName string) error { + _, err := client.Get(ctx, resourceGroupName, lbName, nil) + if err == nil { + log.Printf("Load balancer %s already exists, skipping creation", lbName) + return nil + } + + poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, lbName, armnetwork.LoadBalancer{ + Location: new(location), + Properties: &armnetwork.LoadBalancerPropertiesFormat{ + FrontendIPConfigurations: []*armnetwork.FrontendIPConfiguration{ + { + Name: new("frontend-config"), + Properties: &armnetwork.FrontendIPConfigurationPropertiesFormat{ + PublicIPAddress: &armnetwork.PublicIPAddress{ + ID: new(publicIPID), + }, + }, + }, + }, + BackendAddressPools: []*armnetwork.BackendAddressPool{ + { + Name: new(backendPoolName), + }, + }, + LoadBalancingRules: []*armnetwork.LoadBalancingRule{ + { + Name: new("lb-rule"), + Properties: &armnetwork.LoadBalancingRulePropertiesFormat{ + FrontendIPConfiguration: &armnetwork.SubResource{ + ID: new(fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/frontendIPConfigurations/frontend-config", subscriptionID, resourceGroupName, lbName)), + }, + BackendAddressPool: &armnetwork.SubResource{ + ID: new(fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/backendAddressPools/%s", subscriptionID, resourceGroupName, lbName, backendPoolName)), + }, + Protocol: new(armnetwork.TransportProtocolTCP), + FrontendPort: new(int32(80)), + BackendPort: new(int32(80)), + EnableFloatingIP: new(false), + IdleTimeoutInMinutes: new(int32(4)), + }, + }, + }, + }, + SKU: &armnetwork.LoadBalancerSKU{ + Name: new(armnetwork.LoadBalancerSKUNameStandard), + }, + Tags: map[string]*string{ + "purpose": new("overmind-integration-tests"), + }, + }, nil) + if err != nil { + return fmt.Errorf("failed to begin creating load balancer: %w", err) + } + + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + return fmt.Errorf("failed to create load balancer: %w", err) + } + + log.Printf("Load balancer %s with backend pool %s created successfully", lbName, backendPoolName) + return nil +} diff --git a/sources/azure/integration-tests/network-load-balancer-probe_test.go b/sources/azure/integration-tests/network-load-balancer-probe_test.go new file mode 100644 index 00000000..b42390a9 --- /dev/null +++ b/sources/azure/integration-tests/network-load-balancer-probe_test.go @@ -0,0 +1,523 @@ +package integrationtests + +import ( + "context" + "errors" + "fmt" + "net/http" + "os" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" + log "github.com/sirupsen/logrus" + + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + "github.com/overmindtech/cli/sources/azure/manual" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" +) + +const ( + integrationTestLBForProbeName = "ovm-integ-test-lb-probe" + integrationTestVNetForProbeName = "ovm-integ-test-vnet-for-probe" + integrationTestSubnetForProbeName = "default" + integrationTestPublicIPForProbeLB = "ovm-integ-test-pip-for-probe-lb" + integrationTestProbeName = "ovm-integ-test-health-probe" + integrationTestProbeHTTPName = "ovm-integ-test-http-probe" +) + +func TestNetworkLoadBalancerProbeIntegration(t *testing.T) { + subscriptionID := os.Getenv("AZURE_SUBSCRIPTION_ID") + if subscriptionID == "" { + t.Skip("AZURE_SUBSCRIPTION_ID environment variable not set") + } + + cred, err := azureshared.NewAzureCredential(t.Context()) + if err != nil { + t.Fatalf("Failed to create Azure credential: %v", err) + } + + rgClient, err := armresources.NewResourceGroupsClient(subscriptionID, cred, nil) + if err != nil { + t.Fatalf("Failed to create Resource Groups client: %v", err) + } + + vnetClient, err := armnetwork.NewVirtualNetworksClient(subscriptionID, cred, nil) + if err != nil { + t.Fatalf("Failed to create Virtual Networks client: %v", err) + } + + publicIPClient, err := armnetwork.NewPublicIPAddressesClient(subscriptionID, cred, nil) + if err != nil { + t.Fatalf("Failed to create Public IP Addresses client: %v", err) + } + + lbClient, err := armnetwork.NewLoadBalancersClient(subscriptionID, cred, nil) + if err != nil { + t.Fatalf("Failed to create Load Balancers client: %v", err) + } + + probesClient, err := armnetwork.NewLoadBalancerProbesClient(subscriptionID, cred, nil) + if err != nil { + t.Fatalf("Failed to create Load Balancer Probes client: %v", err) + } + + setupCompleted := false + + t.Run("Setup", func(t *testing.T) { + ctx := t.Context() + + err := createResourceGroup(ctx, rgClient, integrationTestResourceGroup, integrationTestLocation) + if err != nil { + t.Fatalf("Failed to create resource group: %v", err) + } + + err = createVNetForProbeTest(ctx, vnetClient, integrationTestResourceGroup, integrationTestVNetForProbeName, integrationTestLocation) + if err != nil { + t.Fatalf("Failed to create virtual network: %v", err) + } + + err = createPublicIPForProbeTest(ctx, publicIPClient, integrationTestResourceGroup, integrationTestPublicIPForProbeLB, integrationTestLocation) + if err != nil { + t.Fatalf("Failed to create public IP address: %v", err) + } + + publicIPResp, err := publicIPClient.Get(ctx, integrationTestResourceGroup, integrationTestPublicIPForProbeLB, nil) + if err != nil { + t.Fatalf("Failed to get public IP address: %v", err) + } + + err = createLBWithProbes(ctx, lbClient, subscriptionID, integrationTestResourceGroup, integrationTestLBForProbeName, integrationTestLocation, *publicIPResp.ID) + if err != nil { + t.Fatalf("Failed to create load balancer with probes: %v", err) + } + + setupCompleted = true + log.Printf("Setup completed: Load balancer %s with probes created", integrationTestLBForProbeName) + }) + + t.Run("Run", func(t *testing.T) { + if !setupCompleted { + t.Skip("Skipping Run: Setup did not complete successfully") + } + + t.Run("GetProbe", func(t *testing.T) { + ctx := t.Context() + + probeWrapper := manual.NewNetworkLoadBalancerProbe( + clients.NewLoadBalancerProbesClient(probesClient), + []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, integrationTestResourceGroup)}, + ) + scope := probeWrapper.Scopes()[0] + adapter := sources.WrapperToAdapter(probeWrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(integrationTestLBForProbeName, integrationTestProbeName) + sdpItem, qErr := adapter.Get(ctx, scope, query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.NetworkLoadBalancerProbe.String() { + t.Errorf("Expected type %s, got %s", azureshared.NetworkLoadBalancerProbe, sdpItem.GetType()) + } + + expectedUniqueValue := shared.CompositeLookupKey(integrationTestLBForProbeName, integrationTestProbeName) + if sdpItem.UniqueAttributeValue() != expectedUniqueValue { + t.Errorf("Expected unique attribute value %s, got %s", expectedUniqueValue, sdpItem.UniqueAttributeValue()) + } + + log.Printf("Successfully retrieved probe %s from load balancer %s", integrationTestProbeName, integrationTestLBForProbeName) + }) + + t.Run("SearchProbes", func(t *testing.T) { + ctx := t.Context() + + probeWrapper := manual.NewNetworkLoadBalancerProbe( + clients.NewLoadBalancerProbesClient(probesClient), + []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, integrationTestResourceGroup)}, + ) + scope := probeWrapper.Scopes()[0] + adapter := sources.WrapperToAdapter(probeWrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + sdpItems, err := searchable.Search(ctx, scope, integrationTestLBForProbeName, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) < 2 { + t.Fatalf("Expected at least 2 probes, got: %d", len(sdpItems)) + } + + foundTCP := false + foundHTTP := false + for _, item := range sdpItems { + val := item.UniqueAttributeValue() + if val == shared.CompositeLookupKey(integrationTestLBForProbeName, integrationTestProbeName) { + foundTCP = true + } + if val == shared.CompositeLookupKey(integrationTestLBForProbeName, integrationTestProbeHTTPName) { + foundHTTP = true + } + } + + if !foundTCP { + t.Errorf("Expected to find TCP probe %s in search results", integrationTestProbeName) + } + if !foundHTTP { + t.Errorf("Expected to find HTTP probe %s in search results", integrationTestProbeHTTPName) + } + + log.Printf("Successfully searched %d probes for load balancer %s", len(sdpItems), integrationTestLBForProbeName) + }) + + t.Run("VerifyLinkedItems", func(t *testing.T) { + ctx := t.Context() + + probeWrapper := manual.NewNetworkLoadBalancerProbe( + clients.NewLoadBalancerProbesClient(probesClient), + []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, integrationTestResourceGroup)}, + ) + scope := probeWrapper.Scopes()[0] + adapter := sources.WrapperToAdapter(probeWrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(integrationTestLBForProbeName, integrationTestProbeName) + sdpItem, qErr := adapter.Get(ctx, scope, query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + linkedQueries := sdpItem.GetLinkedItemQueries() + if len(linkedQueries) == 0 { + t.Fatalf("Expected linked item queries, but got none") + } + + for _, liq := range linkedQueries { + q := liq.GetQuery() + if q.GetType() == "" { + t.Error("Linked item query has empty Type") + } + if q.GetQuery() == "" { + t.Error("Linked item query has empty Query") + } + if q.GetScope() == "" { + t.Error("Linked item query has empty Scope") + } + if q.GetMethod() != sdp.QueryMethod_GET && q.GetMethod() != sdp.QueryMethod_SEARCH { + t.Errorf("Linked item query has invalid Method: %v", q.GetMethod()) + } + } + + foundParentLB := false + for _, liq := range linkedQueries { + if liq.GetQuery().GetType() == azureshared.NetworkLoadBalancer.String() { + foundParentLB = true + if liq.GetQuery().GetQuery() != integrationTestLBForProbeName { + t.Errorf("Expected parent LB query %s, got %s", integrationTestLBForProbeName, liq.GetQuery().GetQuery()) + } + break + } + } + if !foundParentLB { + t.Error("Expected to find parent Load Balancer linked query") + } + + log.Printf("Verified %d linked item queries for probe %s", len(linkedQueries), integrationTestProbeName) + }) + + t.Run("VerifyItemAttributes", func(t *testing.T) { + ctx := t.Context() + + probeWrapper := manual.NewNetworkLoadBalancerProbe( + clients.NewLoadBalancerProbesClient(probesClient), + []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, integrationTestResourceGroup)}, + ) + scope := probeWrapper.Scopes()[0] + adapter := sources.WrapperToAdapter(probeWrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(integrationTestLBForProbeName, integrationTestProbeName) + sdpItem, qErr := adapter.Get(ctx, scope, query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.NetworkLoadBalancerProbe.String() { + t.Errorf("Expected type %s, got %s", azureshared.NetworkLoadBalancerProbe, sdpItem.GetType()) + } + + expectedScope := fmt.Sprintf("%s.%s", subscriptionID, integrationTestResourceGroup) + if sdpItem.GetScope() != expectedScope { + t.Errorf("Expected scope %s, got %s", expectedScope, sdpItem.GetScope()) + } + + if sdpItem.GetUniqueAttribute() != "uniqueAttr" { + t.Errorf("Expected unique attribute 'uniqueAttr', got %s", sdpItem.GetUniqueAttribute()) + } + + if err := sdpItem.Validate(); err != nil { + t.Errorf("Validation failed: %v", err) + } + + log.Printf("Verified item attributes for probe %s", integrationTestProbeName) + }) + }) + + t.Run("Teardown", func(t *testing.T) { + ctx := t.Context() + + err := deleteLBForProbeTest(ctx, lbClient, integrationTestResourceGroup, integrationTestLBForProbeName) + if err != nil { + t.Fatalf("Failed to delete load balancer: %v", err) + } + + err = deletePublicIPForProbeTest(ctx, publicIPClient, integrationTestResourceGroup, integrationTestPublicIPForProbeLB) + if err != nil { + t.Fatalf("Failed to delete public IP address: %v", err) + } + + err = deleteVNetForProbeTest(ctx, vnetClient, integrationTestResourceGroup, integrationTestVNetForProbeName) + if err != nil { + t.Fatalf("Failed to delete virtual network: %v", err) + } + }) +} + +func createVNetForProbeTest(ctx context.Context, client *armnetwork.VirtualNetworksClient, rg, name, location string) error { + _, err := client.Get(ctx, rg, name, nil) + if err == nil { + log.Printf("Virtual network %s already exists, skipping creation", name) + return nil + } + + poller, err := client.BeginCreateOrUpdate(ctx, rg, name, armnetwork.VirtualNetwork{ + Location: new(location), + Properties: &armnetwork.VirtualNetworkPropertiesFormat{ + AddressSpace: &armnetwork.AddressSpace{ + AddressPrefixes: []*string{new("10.3.0.0/16")}, + }, + Subnets: []*armnetwork.Subnet{ + { + Name: new(integrationTestSubnetForProbeName), + Properties: &armnetwork.SubnetPropertiesFormat{ + AddressPrefix: new("10.3.0.0/24"), + }, + }, + }, + }, + Tags: map[string]*string{"purpose": new("overmind-integration-tests")}, + }, nil) + if err != nil { + var respErr *azcore.ResponseError + if errors.As(err, &respErr) && respErr.StatusCode == http.StatusConflict { + if _, getErr := client.Get(ctx, rg, name, nil); getErr == nil { + log.Printf("Virtual network %s already exists (conflict), skipping", name) + return nil + } + return fmt.Errorf("virtual network %s conflict but not retrievable: %w", name, err) + } + return fmt.Errorf("failed to create virtual network: %w", err) + } + + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + return fmt.Errorf("failed to create virtual network: %w", err) + } + log.Printf("Virtual network %s created successfully", name) + return nil +} + +func deleteVNetForProbeTest(ctx context.Context, client *armnetwork.VirtualNetworksClient, rg, name string) error { + poller, err := client.BeginDelete(ctx, rg, name, nil) + if err != nil { + var respErr *azcore.ResponseError + if errors.As(err, &respErr) && respErr.StatusCode == http.StatusNotFound { + log.Printf("Virtual network %s not found, skipping deletion", name) + return nil + } + return fmt.Errorf("failed to delete virtual network: %w", err) + } + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + return fmt.Errorf("failed to delete virtual network: %w", err) + } + log.Printf("Virtual network %s deleted successfully", name) + return nil +} + +func createPublicIPForProbeTest(ctx context.Context, client *armnetwork.PublicIPAddressesClient, rg, name, location string) error { + _, err := client.Get(ctx, rg, name, nil) + if err == nil { + log.Printf("Public IP address %s already exists, skipping creation", name) + return nil + } + + poller, err := client.BeginCreateOrUpdate(ctx, rg, name, armnetwork.PublicIPAddress{ + Location: new(location), + Properties: &armnetwork.PublicIPAddressPropertiesFormat{ + PublicIPAllocationMethod: new(armnetwork.IPAllocationMethodStatic), + PublicIPAddressVersion: new(armnetwork.IPVersionIPv4), + }, + SKU: &armnetwork.PublicIPAddressSKU{ + Name: new(armnetwork.PublicIPAddressSKUNameStandard), + }, + Tags: map[string]*string{"purpose": new("overmind-integration-tests")}, + }, nil) + if err != nil { + var respErr *azcore.ResponseError + if errors.As(err, &respErr) && respErr.StatusCode == http.StatusConflict { + if _, getErr := client.Get(ctx, rg, name, nil); getErr == nil { + log.Printf("Public IP address %s already exists (conflict), skipping", name) + return nil + } + return fmt.Errorf("public IP %s conflict but not retrievable: %w", name, err) + } + return fmt.Errorf("failed to create public IP address: %w", err) + } + + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + return fmt.Errorf("failed to create public IP address: %w", err) + } + log.Printf("Public IP address %s created successfully", name) + return nil +} + +func deletePublicIPForProbeTest(ctx context.Context, client *armnetwork.PublicIPAddressesClient, rg, name string) error { + poller, err := client.BeginDelete(ctx, rg, name, nil) + if err != nil { + var respErr *azcore.ResponseError + if errors.As(err, &respErr) && respErr.StatusCode == http.StatusNotFound { + log.Printf("Public IP address %s not found, skipping deletion", name) + return nil + } + return fmt.Errorf("failed to delete public IP address: %w", err) + } + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + return fmt.Errorf("failed to delete public IP address: %w", err) + } + log.Printf("Public IP address %s deleted successfully", name) + return nil +} + +func createLBWithProbes(ctx context.Context, client *armnetwork.LoadBalancersClient, subscriptionID, rg, name, location, publicIPID string) error { + _, err := client.Get(ctx, rg, name, nil) + if err == nil { + log.Printf("Load balancer %s already exists, skipping creation", name) + return nil + } + + frontendIPConfigID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/frontendIPConfigurations/frontend-config", subscriptionID, rg, name) + backendPoolID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/backendAddressPools/backend-pool", subscriptionID, rg, name) + tcpProbeID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/probes/%s", subscriptionID, rg, name, integrationTestProbeName) + + port80 := int32(80) + port443 := int32(443) + intervalInSeconds := int32(15) + numberOfProbes := int32(2) + + poller, err := client.BeginCreateOrUpdate(ctx, rg, name, armnetwork.LoadBalancer{ + Location: new(location), + SKU: &armnetwork.LoadBalancerSKU{ + Name: new(armnetwork.LoadBalancerSKUNameStandard), + }, + Properties: &armnetwork.LoadBalancerPropertiesFormat{ + FrontendIPConfigurations: []*armnetwork.FrontendIPConfiguration{ + { + Name: new("frontend-config"), + Properties: &armnetwork.FrontendIPConfigurationPropertiesFormat{ + PublicIPAddress: &armnetwork.PublicIPAddress{ + ID: new(publicIPID), + }, + }, + }, + }, + BackendAddressPools: []*armnetwork.BackendAddressPool{ + {Name: new("backend-pool")}, + }, + Probes: []*armnetwork.Probe{ + { + Name: new(integrationTestProbeName), + Properties: &armnetwork.ProbePropertiesFormat{ + Protocol: new(armnetwork.ProbeProtocolTCP), + Port: &port80, + IntervalInSeconds: &intervalInSeconds, + NumberOfProbes: &numberOfProbes, + }, + }, + { + Name: new(integrationTestProbeHTTPName), + Properties: &armnetwork.ProbePropertiesFormat{ + Protocol: new(armnetwork.ProbeProtocolHTTP), + Port: &port443, + IntervalInSeconds: &intervalInSeconds, + NumberOfProbes: &numberOfProbes, + RequestPath: new("/health"), + }, + }, + }, + LoadBalancingRules: []*armnetwork.LoadBalancingRule{ + { + Name: new("lb-rule-with-probe"), + Properties: &armnetwork.LoadBalancingRulePropertiesFormat{ + FrontendIPConfiguration: &armnetwork.SubResource{ID: new(frontendIPConfigID)}, + BackendAddressPool: &armnetwork.SubResource{ID: new(backendPoolID)}, + Probe: &armnetwork.SubResource{ID: new(tcpProbeID)}, + Protocol: new(armnetwork.TransportProtocolTCP), + FrontendPort: &port80, + BackendPort: &port80, + EnableFloatingIP: new(false), + IdleTimeoutInMinutes: new(int32(4)), + }, + }, + }, + }, + Tags: map[string]*string{"purpose": new("overmind-integration-tests")}, + }, nil) + if err != nil { + var respErr *azcore.ResponseError + if errors.As(err, &respErr) && respErr.StatusCode == http.StatusConflict { + if _, getErr := client.Get(ctx, rg, name, nil); getErr == nil { + log.Printf("Load balancer %s already exists (conflict), skipping", name) + return nil + } + return fmt.Errorf("load balancer %s conflict but not retrievable: %w", name, err) + } + return fmt.Errorf("failed to create load balancer: %w", err) + } + + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + return fmt.Errorf("failed to create load balancer: %w", err) + } + log.Printf("Load balancer %s with probes created successfully", name) + return nil +} + +func deleteLBForProbeTest(ctx context.Context, client *armnetwork.LoadBalancersClient, rg, name string) error { + poller, err := client.BeginDelete(ctx, rg, name, nil) + if err != nil { + var respErr *azcore.ResponseError + if errors.As(err, &respErr) && respErr.StatusCode == http.StatusNotFound { + log.Printf("Load balancer %s not found, skipping deletion", name) + return nil + } + return fmt.Errorf("failed to delete load balancer: %w", err) + } + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + return fmt.Errorf("failed to delete load balancer: %w", err) + } + log.Printf("Load balancer %s deleted successfully", name) + return nil +} diff --git a/sources/azure/integration-tests/sql-database-schema_test.go b/sources/azure/integration-tests/sql-database-schema_test.go new file mode 100644 index 00000000..fcc92648 --- /dev/null +++ b/sources/azure/integration-tests/sql-database-schema_test.go @@ -0,0 +1,544 @@ +package integrationtests + +import ( + "context" + "errors" + "fmt" + "math/rand" + "net/http" + "os" + "strings" + "testing" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/sql/armsql/v2" + log "github.com/sirupsen/logrus" + + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + "github.com/overmindtech/cli/sources/azure/manual" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" +) + +const ( + integrationTestSQLSchemaServerName = "ovm-integ-test-schema-svr" + integrationTestSQLSchemaDatabaseName = "ovm-integ-test-schema-db" +) + +func TestSQLDatabaseSchemaIntegration(t *testing.T) { + subscriptionID := os.Getenv("AZURE_SUBSCRIPTION_ID") + if subscriptionID == "" { + t.Skip("AZURE_SUBSCRIPTION_ID environment variable not set") + } + + // Initialize Azure credentials using DefaultAzureCredential + cred, err := azureshared.NewAzureCredential(t.Context()) + if err != nil { + t.Fatalf("Failed to create Azure credential: %v", err) + } + + // Create Azure SDK clients + sqlServerClient, err := armsql.NewServersClient(subscriptionID, cred, nil) + if err != nil { + t.Fatalf("Failed to create SQL Servers client: %v", err) + } + + sqlDatabaseClient, err := armsql.NewDatabasesClient(subscriptionID, cred, nil) + if err != nil { + t.Fatalf("Failed to create SQL Databases client: %v", err) + } + + sqlDatabaseSchemasClient, err := armsql.NewDatabaseSchemasClient(subscriptionID, cred, nil) + if err != nil { + t.Fatalf("Failed to create SQL Database Schemas client: %v", err) + } + + rgClient, err := armresources.NewResourceGroupsClient(subscriptionID, cred, nil) + if err != nil { + t.Fatalf("Failed to create Resource Groups client: %v", err) + } + + // Generate unique SQL server name (must be globally unique, lowercase, no special chars) + sqlServerName := generateSQLServerNameForSchemaTest(integrationTestSQLSchemaServerName) + + // Track if setup completed successfully + setupCompleted := false + + t.Run("Setup", func(t *testing.T) { + ctx := t.Context() + + // Create resource group if it doesn't exist + err := createResourceGroup(ctx, rgClient, integrationTestResourceGroup, integrationTestLocation) + if err != nil { + t.Fatalf("Failed to create resource group: %v", err) + } + + // Create SQL server + err = createSQLServerForSchemaTest(ctx, sqlServerClient, integrationTestResourceGroup, sqlServerName, integrationTestLocation) + if err != nil { + if errors.Is(err, errMissingSQLCredentials) { + t.Skip("Skipping: SQL server admin credentials not configured") + } + t.Fatalf("Failed to create SQL server: %v", err) + } + + // Wait for SQL server to be available + err = waitForSQLServerAvailableForSchemaTest(ctx, sqlServerClient, integrationTestResourceGroup, sqlServerName) + if err != nil { + t.Fatalf("Failed waiting for SQL server to be available: %v", err) + } + + // Create SQL database + err = createSQLDatabaseForSchemaTest(ctx, sqlDatabaseClient, integrationTestResourceGroup, sqlServerName, integrationTestSQLSchemaDatabaseName, integrationTestLocation) + if err != nil { + t.Fatalf("Failed to create SQL database: %v", err) + } + + // Wait for SQL database to be available + err = waitForSQLDatabaseAvailableForSchemaTest(ctx, sqlDatabaseClient, integrationTestResourceGroup, sqlServerName, integrationTestSQLSchemaDatabaseName) + if err != nil { + t.Fatalf("Failed waiting for SQL database to be available: %v", err) + } + + setupCompleted = true + }) + + t.Run("Run", func(t *testing.T) { + if !setupCompleted { + t.Skip("Skipping Run: Setup did not complete successfully") + } + + // First discover available schemas from the database (schemas are auto-created like dbo, sys, etc.) + var testSchemaName string + + t.Run("DiscoverSchemas", func(t *testing.T) { + ctx := t.Context() + + // List schemas to find an available one (dbo is standard in SQL Server databases) + pager := sqlDatabaseSchemasClient.NewListByDatabasePager(integrationTestResourceGroup, sqlServerName, integrationTestSQLSchemaDatabaseName, nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + t.Fatalf("Failed to list schemas: %v", err) + } + if len(page.Value) > 0 && page.Value[0].Name != nil { + testSchemaName = *page.Value[0].Name + log.Printf("Discovered schema: %s", testSchemaName) + break + } + } + + if testSchemaName == "" { + t.Fatalf("No schemas found in database %s", integrationTestSQLSchemaDatabaseName) + } + }) + + t.Run("GetSQLDatabaseSchema", func(t *testing.T) { + ctx := t.Context() + + log.Printf("Retrieving SQL database schema %s in database %s, server %s", + testSchemaName, integrationTestSQLSchemaDatabaseName, sqlServerName) + + schemaWrapper := manual.NewSqlDatabaseSchema( + clients.NewSqlDatabaseSchemasClient(sqlDatabaseSchemasClient), + []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, integrationTestResourceGroup)}, + ) + scope := schemaWrapper.Scopes()[0] + + schemaAdapter := sources.WrapperToAdapter(schemaWrapper, sdpcache.NewNoOpCache()) + // Get requires serverName, databaseName, and schemaName as query parts + query := shared.CompositeLookupKey(sqlServerName, integrationTestSQLSchemaDatabaseName, testSchemaName) + sdpItem, qErr := schemaAdapter.Get(ctx, scope, query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem == nil { + t.Fatalf("Expected sdpItem to be non-nil") + } + + if sdpItem.GetType() != azureshared.SQLDatabaseSchema.String() { + t.Errorf("Expected type %s, got %s", azureshared.SQLDatabaseSchema, sdpItem.GetType()) + } + + uniqueAttrKey := sdpItem.GetUniqueAttribute() + if uniqueAttrKey != "uniqueAttr" { + t.Errorf("Expected unique attribute 'uniqueAttr', got %s", uniqueAttrKey) + } + + uniqueAttrValue, err := sdpItem.GetAttributes().Get(uniqueAttrKey) + if err != nil { + t.Fatalf("Failed to get unique attribute: %v", err) + } + + expectedUniqueAttrValue := shared.CompositeLookupKey(sqlServerName, integrationTestSQLSchemaDatabaseName, testSchemaName) + if uniqueAttrValue != expectedUniqueAttrValue { + t.Errorf("Expected unique attribute value %s, got %s", expectedUniqueAttrValue, uniqueAttrValue) + } + + if sdpItem.GetScope() != fmt.Sprintf("%s.%s", subscriptionID, integrationTestResourceGroup) { + t.Errorf("Expected scope %s.%s, got %s", subscriptionID, integrationTestResourceGroup, sdpItem.GetScope()) + } + + if err := sdpItem.Validate(); err != nil { + t.Fatalf("Item validation failed: %v", err) + } + + log.Printf("Successfully retrieved SQL database schema %s", testSchemaName) + }) + + t.Run("SearchSQLDatabaseSchemas", func(t *testing.T) { + ctx := t.Context() + + log.Printf("Searching SQL database schemas in database %s", integrationTestSQLSchemaDatabaseName) + + schemaWrapper := manual.NewSqlDatabaseSchema( + clients.NewSqlDatabaseSchemasClient(sqlDatabaseSchemasClient), + []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, integrationTestResourceGroup)}, + ) + scope := schemaWrapper.Scopes()[0] + + schemaAdapter := sources.WrapperToAdapter(schemaWrapper, sdpcache.NewNoOpCache()) + + // Check if adapter supports search + searchable, ok := schemaAdapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + sdpItems, err := searchable.Search(ctx, scope, shared.CompositeLookupKey(sqlServerName, integrationTestSQLSchemaDatabaseName), true) + if err != nil { + t.Fatalf("Failed to search SQL database schemas: %v", err) + } + + if len(sdpItems) < 1 { + t.Fatalf("Expected at least one SQL database schema, got %d", len(sdpItems)) + } + + var found bool + for _, item := range sdpItems { + uniqueAttrKey := item.GetUniqueAttribute() + if v, err := item.GetAttributes().Get(uniqueAttrKey); err == nil { + expectedValue := shared.CompositeLookupKey(sqlServerName, integrationTestSQLSchemaDatabaseName, testSchemaName) + if v == expectedValue { + found = true + break + } + } + } + + if !found { + t.Fatalf("Expected to find schema %s in the search results", testSchemaName) + } + + log.Printf("Found %d SQL database schemas in search results", len(sdpItems)) + }) + + t.Run("VerifyLinkedItems", func(t *testing.T) { + ctx := t.Context() + + log.Printf("Verifying linked items for SQL database schema %s", testSchemaName) + + schemaWrapper := manual.NewSqlDatabaseSchema( + clients.NewSqlDatabaseSchemasClient(sqlDatabaseSchemasClient), + []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, integrationTestResourceGroup)}, + ) + scope := schemaWrapper.Scopes()[0] + + schemaAdapter := sources.WrapperToAdapter(schemaWrapper, sdpcache.NewNoOpCache()) + query := shared.CompositeLookupKey(sqlServerName, integrationTestSQLSchemaDatabaseName, testSchemaName) + sdpItem, qErr := schemaAdapter.Get(ctx, scope, query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + // Verify that linked items exist (SQL database should be linked) + linkedQueries := sdpItem.GetLinkedItemQueries() + if len(linkedQueries) == 0 { + t.Fatalf("Expected linked item queries, but got none") + } + + var hasSQLDatabaseLink bool + for _, liq := range linkedQueries { + if liq.GetQuery().GetType() != "" { + // Verify query structure + if liq.GetQuery().GetQuery() == "" { + t.Errorf("LinkedItemQuery has empty query") + } + if liq.GetQuery().GetScope() == "" { + t.Errorf("LinkedItemQuery has empty scope") + } + } + + if liq.GetQuery().GetType() == azureshared.SQLDatabase.String() { + hasSQLDatabaseLink = true + expectedQuery := shared.CompositeLookupKey(sqlServerName, integrationTestSQLSchemaDatabaseName) + if liq.GetQuery().GetQuery() != expectedQuery { + t.Errorf("Expected linked query to SQL database %s, got %s", expectedQuery, liq.GetQuery().GetQuery()) + } + if liq.GetQuery().GetMethod() != sdp.QueryMethod_GET { + t.Errorf("Expected linked query method GET, got %s", liq.GetQuery().GetMethod()) + } + if liq.GetQuery().GetScope() != scope { + t.Errorf("Expected linked query scope %s, got %s", scope, liq.GetQuery().GetScope()) + } + } + } + + if !hasSQLDatabaseLink { + t.Error("Expected linked query to SQL database, but didn't find one") + } + + log.Printf("Verified %d linked item queries for SQL database schema %s", len(linkedQueries), testSchemaName) + }) + + t.Run("VerifyItemAttributes", func(t *testing.T) { + ctx := t.Context() + + schemaWrapper := manual.NewSqlDatabaseSchema( + clients.NewSqlDatabaseSchemasClient(sqlDatabaseSchemasClient), + []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, integrationTestResourceGroup)}, + ) + scope := schemaWrapper.Scopes()[0] + + schemaAdapter := sources.WrapperToAdapter(schemaWrapper, sdpcache.NewNoOpCache()) + query := shared.CompositeLookupKey(sqlServerName, integrationTestSQLSchemaDatabaseName, testSchemaName) + sdpItem, qErr := schemaAdapter.Get(ctx, scope, query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + // Verify item type + if sdpItem.GetType() != azureshared.SQLDatabaseSchema.String() { + t.Errorf("Expected type %s, got %s", azureshared.SQLDatabaseSchema.String(), sdpItem.GetType()) + } + + // Verify scope + expectedScope := fmt.Sprintf("%s.%s", subscriptionID, integrationTestResourceGroup) + if sdpItem.GetScope() != expectedScope { + t.Errorf("Expected scope %s, got %s", expectedScope, sdpItem.GetScope()) + } + + // Verify unique attribute + if sdpItem.GetUniqueAttribute() != "uniqueAttr" { + t.Errorf("Expected unique attribute 'uniqueAttr', got %s", sdpItem.GetUniqueAttribute()) + } + + // Validate the item + if err := sdpItem.Validate(); err != nil { + t.Errorf("Item validation failed: %v", err) + } + + log.Printf("Verified item attributes for SQL database schema %s", testSchemaName) + }) + }) + + t.Run("Teardown", func(t *testing.T) { + ctx := t.Context() + + // Delete SQL database + err := deleteSQLDatabaseForSchemaTest(ctx, sqlDatabaseClient, integrationTestResourceGroup, sqlServerName, integrationTestSQLSchemaDatabaseName) + if err != nil { + t.Logf("Warning: Failed to delete SQL database: %v", err) + } + + // Delete SQL server + err = deleteSQLServerForSchemaTest(ctx, sqlServerClient, integrationTestResourceGroup, sqlServerName) + if err != nil { + t.Logf("Warning: Failed to delete SQL server: %v", err) + } + }) +} + +// errMissingSQLCredentials is a sentinel error for missing SQL credentials +var errMissingSQLCredentials = errors.New("AZURE_SQL_SERVER_ADMIN_LOGIN and AZURE_SQL_SERVER_ADMIN_PASSWORD environment variables must be set for integration tests") + +// createSQLServerForSchemaTest creates an Azure SQL server for schema tests +func createSQLServerForSchemaTest(ctx context.Context, client *armsql.ServersClient, resourceGroup, serverName, location string) error { + // Check if SQL server already exists + _, err := client.Get(ctx, resourceGroup, serverName, nil) + if err == nil { + log.Printf("SQL server %s already exists, skipping creation", serverName) + return nil + } + + var respErr *azcore.ResponseError + if !errors.As(err, &respErr) { + return fmt.Errorf("failed to check if SQL server exists: %w", err) + } + if respErr != nil && respErr.StatusCode != http.StatusNotFound { + return fmt.Errorf("failed to check if SQL server exists: %w", err) + } + + // Get credentials from environment + adminLogin := os.Getenv("AZURE_SQL_SERVER_ADMIN_LOGIN") + adminPassword := os.Getenv("AZURE_SQL_SERVER_ADMIN_PASSWORD") + + if adminLogin == "" || adminPassword == "" { + return errMissingSQLCredentials + } + + poller, err := client.BeginCreateOrUpdate(ctx, resourceGroup, serverName, armsql.Server{ + Location: new(location), + Properties: &armsql.ServerProperties{ + AdministratorLogin: new(adminLogin), + AdministratorLoginPassword: new(adminPassword), + Version: new("12.0"), + }, + Tags: map[string]*string{ + "purpose": new("overmind-integration-tests"), + "managed": new("true"), + }, + }, nil) + if err != nil { + return fmt.Errorf("failed to start SQL server creation: %w", err) + } + + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + return fmt.Errorf("failed to create SQL server: %w", err) + } + + log.Printf("SQL server %s created successfully in location %s", serverName, location) + return nil +} + +// waitForSQLServerAvailableForSchemaTest waits for a SQL server to be available +func waitForSQLServerAvailableForSchemaTest(ctx context.Context, client *armsql.ServersClient, resourceGroup, serverName string) error { + maxAttempts := 30 + for range maxAttempts { + server, err := client.Get(ctx, resourceGroup, serverName, nil) + if err == nil { + if server.Properties != nil && server.Properties.State != nil && *server.Properties.State == "Ready" { + return nil + } + } + time.Sleep(5 * time.Second) + } + return fmt.Errorf("SQL server %s did not become available within expected time", serverName) +} + +// createSQLDatabaseForSchemaTest creates an Azure SQL database for schema tests +func createSQLDatabaseForSchemaTest(ctx context.Context, client *armsql.DatabasesClient, resourceGroup, serverName, databaseName, location string) error { + // Check if SQL database already exists + _, err := client.Get(ctx, resourceGroup, serverName, databaseName, nil) + if err == nil { + log.Printf("SQL database %s already exists, skipping creation", databaseName) + return nil + } + + var respErr *azcore.ResponseError + if !errors.As(err, &respErr) { + return fmt.Errorf("failed to check if SQL database exists: %w", err) + } + if respErr != nil && respErr.StatusCode != http.StatusNotFound { + return fmt.Errorf("failed to check if SQL database exists: %w", err) + } + + poller, err := client.BeginCreateOrUpdate(ctx, resourceGroup, serverName, databaseName, armsql.Database{ + Location: new(location), + Properties: &armsql.DatabaseProperties{ + RequestedServiceObjectiveName: new("Basic"), + }, + Tags: map[string]*string{ + "purpose": new("overmind-integration-tests"), + "managed": new("true"), + }, + }, nil) + if err != nil { + return fmt.Errorf("failed to start SQL database creation: %w", err) + } + + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + return fmt.Errorf("failed to create SQL database: %w", err) + } + + log.Printf("SQL database %s created successfully in server %s", databaseName, serverName) + return nil +} + +// waitForSQLDatabaseAvailableForSchemaTest waits for a SQL database to be available +func waitForSQLDatabaseAvailableForSchemaTest(ctx context.Context, client *armsql.DatabasesClient, resourceGroup, serverName, databaseName string) error { + maxAttempts := 30 + for range maxAttempts { + database, err := client.Get(ctx, resourceGroup, serverName, databaseName, nil) + if err == nil { + if database.Properties != nil && database.Properties.Status != nil && *database.Properties.Status == armsql.DatabaseStatusOnline { + return nil + } + } + time.Sleep(5 * time.Second) + } + return fmt.Errorf("SQL database %s did not become available within expected time", databaseName) +} + +// deleteSQLDatabaseForSchemaTest deletes an Azure SQL database +func deleteSQLDatabaseForSchemaTest(ctx context.Context, client *armsql.DatabasesClient, resourceGroup, serverName, databaseName string) error { + _, err := client.Get(ctx, resourceGroup, serverName, databaseName, nil) + if err != nil { + var respErr *azcore.ResponseError + if errors.As(err, &respErr) && respErr.StatusCode == http.StatusNotFound { + log.Printf("SQL database %s does not exist, skipping deletion", databaseName) + return nil + } + return fmt.Errorf("failed to check if SQL database exists: %w", err) + } + + poller, err := client.BeginDelete(ctx, resourceGroup, serverName, databaseName, nil) + if err != nil { + return fmt.Errorf("failed to start SQL database deletion: %w", err) + } + + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + return fmt.Errorf("failed to delete SQL database: %w", err) + } + + log.Printf("SQL database %s deleted successfully", databaseName) + return nil +} + +// deleteSQLServerForSchemaTest deletes an Azure SQL server +func deleteSQLServerForSchemaTest(ctx context.Context, client *armsql.ServersClient, resourceGroup, serverName string) error { + _, err := client.Get(ctx, resourceGroup, serverName, nil) + if err != nil { + var respErr *azcore.ResponseError + if errors.As(err, &respErr) && respErr.StatusCode == http.StatusNotFound { + log.Printf("SQL server %s does not exist, skipping deletion", serverName) + return nil + } + return fmt.Errorf("failed to check if SQL server exists: %w", err) + } + + poller, err := client.BeginDelete(ctx, resourceGroup, serverName, nil) + if err != nil { + return fmt.Errorf("failed to start SQL server deletion: %w", err) + } + + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + return fmt.Errorf("failed to delete SQL server: %w", err) + } + + log.Printf("SQL server %s deleted successfully", serverName) + return nil +} + +// generateSQLServerNameForSchemaTest generates a unique SQL server name +// SQL server names must be globally unique, 1-63 characters, lowercase letters, numbers, and hyphens +func generateSQLServerNameForSchemaTest(baseName string) string { + baseName = strings.ToLower(baseName) + baseName = strings.ReplaceAll(baseName, "_", "-") + baseName = strings.ReplaceAll(baseName, " ", "-") + + rng := rand.New(rand.NewSource(time.Now().UnixNano() + int64(os.Getpid()))) + suffix := rng.Intn(10000) + return fmt.Sprintf("%s-%04d", baseName, suffix) +} diff --git a/sources/azure/manual/adapters.go b/sources/azure/manual/adapters.go index 77b70da6..d3f8ff7e 100644 --- a/sources/azure/manual/adapters.go +++ b/sources/azure/manual/adapters.go @@ -143,6 +143,11 @@ func Adapters(ctx context.Context, subscriptionID string, regions []string, cred return nil, fmt.Errorf("failed to create sql databases client: %w", err) } + sqlDatabaseSchemasClient, err := armsql.NewDatabaseSchemasClient(subscriptionID, cred, nil) + if err != nil { + return nil, fmt.Errorf("failed to create sql database schemas client: %w", err) + } + documentDBDatabaseAccountsClient, err := armcosmos.NewDatabaseAccountsClient(subscriptionID, cred, nil) if err != nil { return nil, fmt.Errorf("failed to create document db database accounts client: %w", err) @@ -188,6 +193,16 @@ func Adapters(ctx context.Context, subscriptionID string, regions []string, cred return nil, fmt.Errorf("failed to create load balancer frontend IP configurations client: %w", err) } + loadBalancerBackendAddressPoolsClient, err := armnetwork.NewLoadBalancerBackendAddressPoolsClient(subscriptionID, cred, nil) + if err != nil { + return nil, fmt.Errorf("failed to create load balancer backend address pools client: %w", err) + } + + loadBalancerProbesClient, err := armnetwork.NewLoadBalancerProbesClient(subscriptionID, cred, nil) + if err != nil { + return nil, fmt.Errorf("failed to create load balancer probes client: %w", err) + } + privateEndpointsClient, err := armnetwork.NewPrivateEndpointsClient(subscriptionID, cred, nil) if err != nil { return nil, fmt.Errorf("failed to create private endpoints client: %w", err) @@ -332,6 +347,16 @@ func Adapters(ctx context.Context, subscriptionID string, regions []string, cred return nil, fmt.Errorf("failed to create postgresql flexible server backups client: %w", err) } + postgresqlReplicasClient, err := armpostgresqlflexibleservers.NewReplicasClient(subscriptionID, cred, nil) + if err != nil { + return nil, fmt.Errorf("failed to create postgresql flexible server replicas client: %w", err) + } + + postgresqlConfigurationsClient, err := armpostgresqlflexibleservers.NewConfigurationsClient(subscriptionID, cred, nil) + if err != nil { + return nil, fmt.Errorf("failed to create postgresql flexible server configurations client: %w", err) + } + secretsClient, err := armkeyvault.NewSecretsClient(subscriptionID, cred, nil) if err != nil { return nil, fmt.Errorf("failed to create secrets client: %w", err) @@ -517,6 +542,10 @@ func Adapters(ctx context.Context, subscriptionID string, regions []string, cred clients.NewSqlDatabasesClient(sqlDatabasesClient), resourceGroupScopes, ), cache), + sources.WrapperToAdapter(NewSqlDatabaseSchema( + clients.NewSqlDatabaseSchemasClient(sqlDatabaseSchemasClient), + resourceGroupScopes, + ), cache), sources.WrapperToAdapter(NewSqlElasticPool( clients.NewSqlElasticPoolClient(sqlElasticPoolsClient), resourceGroupScopes, @@ -577,6 +606,14 @@ func Adapters(ctx context.Context, subscriptionID string, regions []string, cred clients.NewLoadBalancerFrontendIPConfigurationsClient(loadBalancerFrontendIPConfigurationsClient), resourceGroupScopes, ), cache), + sources.WrapperToAdapter(NewNetworkLoadBalancerBackendAddressPool( + clients.NewLoadBalancerBackendAddressPoolsClient(loadBalancerBackendAddressPoolsClient), + resourceGroupScopes, + ), cache), + sources.WrapperToAdapter(NewNetworkLoadBalancerProbe( + clients.NewLoadBalancerProbesClient(loadBalancerProbesClient), + resourceGroupScopes, + ), cache), sources.WrapperToAdapter(NewNetworkPrivateEndpoint( clients.NewPrivateEndpointsClient(privateEndpointsClient), resourceGroupScopes, @@ -685,6 +722,14 @@ func Adapters(ctx context.Context, subscriptionID string, regions []string, cred clients.NewDBforPostgreSQLFlexibleServerBackupClient(postgresqlBackupsClient), resourceGroupScopes, ), cache), + sources.WrapperToAdapter(NewDBforPostgreSQLFlexibleServerReplica( + clients.NewDBforPostgreSQLFlexibleServerReplicaClient(postgresqlReplicasClient, postgresqlFlexibleServersClient), + resourceGroupScopes, + ), cache), + sources.WrapperToAdapter(NewDBforPostgreSQLFlexibleServerConfiguration( + clients.NewPostgreSQLConfigurationsClient(postgresqlConfigurationsClient), + resourceGroupScopes, + ), cache), sources.WrapperToAdapter(NewKeyVaultSecret( clients.NewSecretsClient(secretsClient), resourceGroupScopes, @@ -813,6 +858,7 @@ func Adapters(ctx context.Context, subscriptionID string, regions []string, cred sources.WrapperToAdapter(NewNetworkVirtualNetworkPeering(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewNetworkNetworkInterface(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewSqlDatabase(nil, placeholderResourceGroupScopes), noOpCache), + sources.WrapperToAdapter(NewSqlDatabaseSchema(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewSqlServerFirewallRule(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewSqlServerVirtualNetworkRule(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewSQLServerPrivateEndpointConnection(nil, placeholderResourceGroupScopes), noOpCache), @@ -827,6 +873,8 @@ func Adapters(ctx context.Context, subscriptionID string, regions []string, cred sources.WrapperToAdapter(NewNetworkDdosProtectionPlan(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewNetworkLoadBalancer(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewNetworkLoadBalancerFrontendIPConfiguration(nil, placeholderResourceGroupScopes), noOpCache), + sources.WrapperToAdapter(NewNetworkLoadBalancerBackendAddressPool(nil, placeholderResourceGroupScopes), noOpCache), + sources.WrapperToAdapter(NewNetworkLoadBalancerProbe(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewNetworkZone(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewNetworkPrivateDNSZone(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewNetworkDNSRecordSet(nil, placeholderResourceGroupScopes), noOpCache), @@ -852,6 +900,8 @@ func Adapters(ctx context.Context, subscriptionID string, regions []string, cred sources.WrapperToAdapter(NewDBforPostgreSQLFlexibleServerFirewallRule(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewDBforPostgreSQLFlexibleServerPrivateEndpointConnection(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewDBforPostgreSQLFlexibleServerBackup(nil, placeholderResourceGroupScopes), noOpCache), + sources.WrapperToAdapter(NewDBforPostgreSQLFlexibleServerReplica(nil, placeholderResourceGroupScopes), noOpCache), + sources.WrapperToAdapter(NewDBforPostgreSQLFlexibleServerConfiguration(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewKeyVaultSecret(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewKeyVaultKey(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewManagedIdentityUserAssignedIdentity(nil, placeholderResourceGroupScopes), noOpCache), diff --git a/sources/azure/manual/dbforpostgresql-flexible-server-configuration.go b/sources/azure/manual/dbforpostgresql-flexible-server-configuration.go new file mode 100644 index 00000000..9da4b01a --- /dev/null +++ b/sources/azure/manual/dbforpostgresql-flexible-server-configuration.go @@ -0,0 +1,237 @@ +package manual + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/postgresql/armpostgresqlflexibleservers/v5" + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" +) + +var DBforPostgreSQLFlexibleServerConfigurationLookupByName = shared.NewItemTypeLookup("name", azureshared.DBforPostgreSQLFlexibleServerConfiguration) + +type dbforPostgreSQLFlexibleServerConfigurationWrapper struct { + client clients.PostgreSQLConfigurationsClient + + *azureshared.MultiResourceGroupBase +} + +func NewDBforPostgreSQLFlexibleServerConfiguration(client clients.PostgreSQLConfigurationsClient, resourceGroupScopes []azureshared.ResourceGroupScope) sources.SearchableWrapper { + return &dbforPostgreSQLFlexibleServerConfigurationWrapper{ + client: client, + MultiResourceGroupBase: azureshared.NewMultiResourceGroupBase( + resourceGroupScopes, + sdp.AdapterCategory_ADAPTER_CATEGORY_DATABASE, + azureshared.DBforPostgreSQLFlexibleServerConfiguration, + ), + } +} + +// Get retrieves a single configuration by server name and configuration name. +// ref: https://learn.microsoft.com/en-us/rest/api/postgresql/configurations/get?view=rest-postgresql-2025-08-01 +func (c dbforPostgreSQLFlexibleServerConfigurationWrapper) Get(ctx context.Context, scope string, queryParts ...string) (*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 2 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Get requires 2 query parts: serverName and configurationName", + Scope: scope, + ItemType: c.Type(), + } + } + serverName := queryParts[0] + configurationName := queryParts[1] + + if serverName == "" { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "serverName cannot be empty", + Scope: scope, + ItemType: c.Type(), + } + } + if configurationName == "" { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "configurationName cannot be empty", + Scope: scope, + ItemType: c.Type(), + } + } + + rgScope, err := c.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + + resp, err := c.client.Get(ctx, rgScope.ResourceGroup, serverName, configurationName, nil) + if err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + + return c.azureConfigurationToSDPItem(&resp.Configuration, serverName, scope) +} + +// Search lists all configurations for a given server. +// ref: https://learn.microsoft.com/en-us/rest/api/postgresql/configurations/list-by-server?view=rest-postgresql-2025-08-01 +func (c dbforPostgreSQLFlexibleServerConfigurationWrapper) Search(ctx context.Context, scope string, queryParts ...string) ([]*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 1 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Search requires 1 query part: serverName", + Scope: scope, + ItemType: c.Type(), + } + } + serverName := queryParts[0] + + if serverName == "" { + return nil, azureshared.QueryError(errors.New("serverName cannot be empty"), scope, c.Type()) + } + + rgScope, err := c.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + + pager := c.client.NewListByServerPager(rgScope.ResourceGroup, serverName, nil) + + var items []*sdp.Item + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + + for _, configuration := range page.Value { + if configuration.Name == nil { + continue + } + + item, sdpErr := c.azureConfigurationToSDPItem(configuration, serverName, scope) + if sdpErr != nil { + return nil, sdpErr + } + items = append(items, item) + } + } + + return items, nil +} + +// SearchStream streams configurations for a given server. +func (c dbforPostgreSQLFlexibleServerConfigurationWrapper) SearchStream(ctx context.Context, stream discovery.QueryResultStream, cache sdpcache.Cache, cacheKey sdpcache.CacheKey, scope string, queryParts ...string) { + if len(queryParts) < 1 { + stream.SendError(azureshared.QueryError(errors.New("Search requires 1 query part: serverName"), scope, c.Type())) + return + } + serverName := queryParts[0] + + if serverName == "" { + stream.SendError(azureshared.QueryError(errors.New("serverName cannot be empty"), scope, c.Type())) + return + } + + rgScope, err := c.ResourceGroupScopeFromScope(scope) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, c.Type())) + return + } + + pager := c.client.NewListByServerPager(rgScope.ResourceGroup, serverName, nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, c.Type())) + return + } + + for _, configuration := range page.Value { + if configuration.Name == nil { + continue + } + + item, sdpErr := c.azureConfigurationToSDPItem(configuration, serverName, scope) + if sdpErr != nil { + stream.SendError(sdpErr) + continue + } + cache.StoreItem(ctx, item, shared.DefaultCacheDuration, cacheKey) + stream.SendItem(item) + } + } +} + +func (c dbforPostgreSQLFlexibleServerConfigurationWrapper) azureConfigurationToSDPItem(configuration *armpostgresqlflexibleservers.Configuration, serverName, scope string) (*sdp.Item, *sdp.QueryError) { + if configuration.Name == nil { + return nil, azureshared.QueryError(errors.New("configuration name is nil"), scope, c.Type()) + } + + attributes, err := shared.ToAttributesWithExclude(configuration) + if err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + + configurationName := *configuration.Name + + err = attributes.Set("uniqueAttr", shared.CompositeLookupKey(serverName, configurationName)) + if err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + + sdpItem := &sdp.Item{ + Type: c.Type(), + UniqueAttribute: "uniqueAttr", + Attributes: attributes, + Scope: scope, + } + + // Link back to parent Flexible Server + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.DBforPostgreSQLFlexibleServer.String(), + Method: sdp.QueryMethod_GET, + Query: serverName, + Scope: scope, + }, + }) + + return sdpItem, nil +} + +func (c dbforPostgreSQLFlexibleServerConfigurationWrapper) GetLookups() sources.ItemTypeLookups { + return sources.ItemTypeLookups{ + DBforPostgreSQLFlexibleServerLookupByName, + DBforPostgreSQLFlexibleServerConfigurationLookupByName, + } +} + +func (c dbforPostgreSQLFlexibleServerConfigurationWrapper) SearchLookups() []sources.ItemTypeLookups { + return []sources.ItemTypeLookups{ + { + DBforPostgreSQLFlexibleServerLookupByName, + }, + } +} + +func (c dbforPostgreSQLFlexibleServerConfigurationWrapper) PotentialLinks() map[shared.ItemType]bool { + return shared.NewItemTypesSet( + azureshared.DBforPostgreSQLFlexibleServer, + ) +} + +// ref: https://learn.microsoft.com/en-us/azure/role-based-access-control/permissions/databases#microsoftdbforpostgresql +func (c dbforPostgreSQLFlexibleServerConfigurationWrapper) IAMPermissions() []string { + return []string{ + "Microsoft.DBforPostgreSQL/flexibleServers/configurations/read", + } +} + +func (c dbforPostgreSQLFlexibleServerConfigurationWrapper) PredefinedRole() string { + return "Reader" +} diff --git a/sources/azure/manual/dbforpostgresql-flexible-server-configuration_test.go b/sources/azure/manual/dbforpostgresql-flexible-server-configuration_test.go new file mode 100644 index 00000000..d8b5664d --- /dev/null +++ b/sources/azure/manual/dbforpostgresql-flexible-server-configuration_test.go @@ -0,0 +1,403 @@ +package manual_test + +import ( + "context" + "errors" + "sync" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/postgresql/armpostgresqlflexibleservers/v5" + "go.uber.org/mock/gomock" + + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + "github.com/overmindtech/cli/sources/azure/manual" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/azure/shared/mocks" + "github.com/overmindtech/cli/sources/shared" +) + +// mockConfigurationsPager is a mock implementation of PostgreSQLConfigurationsPager +type mockConfigurationsPager struct { + pages []armpostgresqlflexibleservers.ConfigurationsClientListByServerResponse + index int +} + +func (m *mockConfigurationsPager) More() bool { + return m.index < len(m.pages) +} + +func (m *mockConfigurationsPager) NextPage(ctx context.Context) (armpostgresqlflexibleservers.ConfigurationsClientListByServerResponse, error) { + if m.index >= len(m.pages) { + return armpostgresqlflexibleservers.ConfigurationsClientListByServerResponse{}, errors.New("no more pages") + } + page := m.pages[m.index] + m.index++ + return page, nil +} + +// errorConfigurationsPager is a mock pager that always returns an error +type errorConfigurationsPager struct{} + +func (e *errorConfigurationsPager) More() bool { + return true +} + +func (e *errorConfigurationsPager) NextPage(ctx context.Context) (armpostgresqlflexibleservers.ConfigurationsClientListByServerResponse, error) { + return armpostgresqlflexibleservers.ConfigurationsClientListByServerResponse{}, errors.New("pager error") +} + +// testConfigurationsClient wraps the mock to implement the correct interface +type testConfigurationsClient struct { + *mocks.MockPostgreSQLConfigurationsClient + pager clients.PostgreSQLConfigurationsPager +} + +func (t *testConfigurationsClient) NewListByServerPager(resourceGroupName string, serverName string, options *armpostgresqlflexibleservers.ConfigurationsClientListByServerOptions) clients.PostgreSQLConfigurationsPager { + return t.pager +} + +func TestDBforPostgreSQLFlexibleServerConfiguration(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + subscriptionID := "test-subscription" + resourceGroup := "test-rg" + serverName := "test-server" + configurationName := "shared_buffers" + + t.Run("Get", func(t *testing.T) { + configuration := createAzureConfiguration(configurationName) + + mockClient := mocks.NewMockPostgreSQLConfigurationsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, serverName, configurationName, nil).Return( + armpostgresqlflexibleservers.ConfigurationsClientGetResponse{ + Configuration: *configuration, + }, nil) + + testClient := &testConfigurationsClient{MockPostgreSQLConfigurationsClient: mockClient} + wrapper := manual.NewDBforPostgreSQLFlexibleServerConfiguration(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(serverName, configurationName) + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.DBforPostgreSQLFlexibleServerConfiguration.String() { + t.Errorf("Expected type %s, got %s", azureshared.DBforPostgreSQLFlexibleServerConfiguration, sdpItem.GetType()) + } + + if sdpItem.GetUniqueAttribute() != "uniqueAttr" { + t.Errorf("Expected unique attribute 'uniqueAttr', got %s", sdpItem.GetUniqueAttribute()) + } + + if sdpItem.UniqueAttributeValue() != shared.CompositeLookupKey(serverName, configurationName) { + t.Errorf("Expected unique attribute value %s, got %s", shared.CompositeLookupKey(serverName, configurationName), sdpItem.UniqueAttributeValue()) + } + + if sdpItem.GetScope() != subscriptionID+"."+resourceGroup { + t.Errorf("Expected scope %s, got %s", subscriptionID+"."+resourceGroup, sdpItem.GetScope()) + } + + if err := sdpItem.Validate(); err != nil { + t.Fatalf("Expected no validation error, got: %v", err) + } + + t.Run("StaticTests", func(t *testing.T) { + queryTests := shared.QueryTests{ + { + ExpectedType: azureshared.DBforPostgreSQLFlexibleServer.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: serverName, + ExpectedScope: subscriptionID + "." + resourceGroup, + }, + } + + shared.RunStaticTests(t, adapter, sdpItem, queryTests) + }) + }) + + t.Run("Search", func(t *testing.T) { + config1 := createAzureConfiguration("shared_buffers") + config2 := createAzureConfiguration("work_mem") + + mockClient := mocks.NewMockPostgreSQLConfigurationsClient(ctrl) + mockPager := &mockConfigurationsPager{ + pages: []armpostgresqlflexibleservers.ConfigurationsClientListByServerResponse{ + { + ConfigurationList: armpostgresqlflexibleservers.ConfigurationList{ + Value: []*armpostgresqlflexibleservers.Configuration{config1, config2}, + }, + }, + }, + } + + testClient := &testConfigurationsClient{ + MockPostgreSQLConfigurationsClient: mockClient, + pager: mockPager, + } + + wrapper := manual.NewDBforPostgreSQLFlexibleServerConfiguration(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + sdpItems, err := searchable.Search(ctx, wrapper.Scopes()[0], serverName, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 2 { + t.Fatalf("Expected 2 items, got: %d", len(sdpItems)) + } + + for _, item := range sdpItems { + if err := item.Validate(); err != nil { + t.Fatalf("Expected no validation error, got: %v", err) + } + + if item.GetType() != azureshared.DBforPostgreSQLFlexibleServerConfiguration.String() { + t.Errorf("Expected type %s, got %s", azureshared.DBforPostgreSQLFlexibleServerConfiguration, item.GetType()) + } + } + }) + + t.Run("SearchStream", func(t *testing.T) { + config1 := createAzureConfiguration("shared_buffers") + config2 := createAzureConfiguration("work_mem") + + mockClient := mocks.NewMockPostgreSQLConfigurationsClient(ctrl) + mockPager := &mockConfigurationsPager{ + pages: []armpostgresqlflexibleservers.ConfigurationsClientListByServerResponse{ + { + ConfigurationList: armpostgresqlflexibleservers.ConfigurationList{ + Value: []*armpostgresqlflexibleservers.Configuration{config1, config2}, + }, + }, + }, + } + + testClient := &testConfigurationsClient{ + MockPostgreSQLConfigurationsClient: mockClient, + pager: mockPager, + } + + wrapper := manual.NewDBforPostgreSQLFlexibleServerConfiguration(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchStreamable, ok := adapter.(discovery.SearchStreamableAdapter) + if !ok { + t.Fatalf("Adapter does not support SearchStream operation") + } + + wg := &sync.WaitGroup{} + wg.Add(2) + + var items []*sdp.Item + mockItemHandler := func(item *sdp.Item) { + items = append(items, item) + wg.Done() + } + + var errs []error + mockErrorHandler := func(err error) { + errs = append(errs, err) + } + + stream := discovery.NewQueryResultStream(mockItemHandler, mockErrorHandler) + + searchStreamable.SearchStream(ctx, wrapper.Scopes()[0], serverName, true, stream) + wg.Wait() + + if len(errs) != 0 { + t.Fatalf("Expected no errors, got: %v", errs) + } + + if len(items) != 2 { + t.Fatalf("Expected 2 items, got: %d", len(items)) + } + }) + + t.Run("GetWithInsufficientQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockPostgreSQLConfigurationsClient(ctrl) + testClient := &testConfigurationsClient{MockPostgreSQLConfigurationsClient: mockClient} + + wrapper := manual.NewDBforPostgreSQLFlexibleServerConfiguration(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], serverName, true) + if qErr == nil { + t.Error("Expected error when providing insufficient query parts, but got nil") + } + }) + + t.Run("GetWithEmptyServerName", func(t *testing.T) { + mockClient := mocks.NewMockPostgreSQLConfigurationsClient(ctrl) + testClient := &testConfigurationsClient{MockPostgreSQLConfigurationsClient: mockClient} + + wrapper := manual.NewDBforPostgreSQLFlexibleServerConfiguration(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey("", configurationName) + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when providing empty server name, but got nil") + } + }) + + t.Run("GetWithEmptyConfigurationName", func(t *testing.T) { + mockClient := mocks.NewMockPostgreSQLConfigurationsClient(ctrl) + testClient := &testConfigurationsClient{MockPostgreSQLConfigurationsClient: mockClient} + + wrapper := manual.NewDBforPostgreSQLFlexibleServerConfiguration(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(serverName, "") + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when providing empty configuration name, but got nil") + } + }) + + t.Run("SearchWithEmptyServerName", func(t *testing.T) { + mockClient := mocks.NewMockPostgreSQLConfigurationsClient(ctrl) + testClient := &testConfigurationsClient{MockPostgreSQLConfigurationsClient: mockClient} + + wrapper := manual.NewDBforPostgreSQLFlexibleServerConfiguration(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + _, qErr := wrapper.Search(ctx, wrapper.Scopes()[0], "") + if qErr == nil { + t.Error("Expected error when providing empty server name, but got nil") + } + }) + + t.Run("SearchWithNoQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockPostgreSQLConfigurationsClient(ctrl) + testClient := &testConfigurationsClient{MockPostgreSQLConfigurationsClient: mockClient} + + wrapper := manual.NewDBforPostgreSQLFlexibleServerConfiguration(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + _, err := searchable.Search(ctx, wrapper.Scopes()[0], "", true) + if err == nil { + t.Error("Expected error when providing no query parts, but got nil") + } + }) + + t.Run("Search_ConfigurationWithNilName", func(t *testing.T) { + configWithName := createAzureConfiguration("shared_buffers") + + mockClient := mocks.NewMockPostgreSQLConfigurationsClient(ctrl) + mockPager := &mockConfigurationsPager{ + pages: []armpostgresqlflexibleservers.ConfigurationsClientListByServerResponse{ + { + ConfigurationList: armpostgresqlflexibleservers.ConfigurationList{ + Value: []*armpostgresqlflexibleservers.Configuration{ + {Name: nil}, // Configuration with nil name should be skipped + configWithName, + }, + }, + }, + }, + } + + testClient := &testConfigurationsClient{ + MockPostgreSQLConfigurationsClient: mockClient, + pager: mockPager, + } + + wrapper := manual.NewDBforPostgreSQLFlexibleServerConfiguration(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + sdpItems, err := searchable.Search(ctx, wrapper.Scopes()[0], serverName, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 1 { + t.Fatalf("Expected 1 item, got: %d", len(sdpItems)) + } + + if sdpItems[0].UniqueAttributeValue() != shared.CompositeLookupKey(serverName, "shared_buffers") { + t.Errorf("Expected configuration name '%s', got %s", shared.CompositeLookupKey(serverName, "shared_buffers"), sdpItems[0].UniqueAttributeValue()) + } + }) + + t.Run("ErrorHandling_Get", func(t *testing.T) { + expectedErr := errors.New("configuration not found") + + mockClient := mocks.NewMockPostgreSQLConfigurationsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, serverName, "nonexistent", nil).Return( + armpostgresqlflexibleservers.ConfigurationsClientGetResponse{}, expectedErr) + + testClient := &testConfigurationsClient{MockPostgreSQLConfigurationsClient: mockClient} + wrapper := manual.NewDBforPostgreSQLFlexibleServerConfiguration(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(serverName, "nonexistent") + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when getting non-existent configuration, but got nil") + } + }) + + t.Run("ErrorHandling_Search", func(t *testing.T) { + mockClient := mocks.NewMockPostgreSQLConfigurationsClient(ctrl) + errorPager := &errorConfigurationsPager{} + + testClient := &testConfigurationsClient{ + MockPostgreSQLConfigurationsClient: mockClient, + pager: errorPager, + } + + wrapper := manual.NewDBforPostgreSQLFlexibleServerConfiguration(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + _, err := searchable.Search(ctx, wrapper.Scopes()[0], serverName, true) + if err == nil { + t.Error("Expected error from pager when NextPage returns an error, but got nil") + } + }) +} + +// createAzureConfiguration creates a mock Azure configuration for testing +func createAzureConfiguration(name string) *armpostgresqlflexibleservers.Configuration { + dataType := armpostgresqlflexibleservers.ConfigurationDataTypeInteger + return &armpostgresqlflexibleservers.Configuration{ + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.DBforPostgreSQL/flexibleServers/test-server/configurations/" + name), + Name: new(name), + Type: new("Microsoft.DBforPostgreSQL/flexibleServers/configurations"), + Properties: &armpostgresqlflexibleservers.ConfigurationProperties{ + Value: new("128MB"), + DefaultValue: new("128MB"), + DataType: &dataType, + AllowedValues: new("16384-2097152"), + Source: new("system-default"), + Description: new("Sets the amount of memory the database server uses for shared memory buffers."), + }, + } +} diff --git a/sources/azure/manual/dbforpostgresql-flexible-server-replica.go b/sources/azure/manual/dbforpostgresql-flexible-server-replica.go new file mode 100644 index 00000000..d47cad8a --- /dev/null +++ b/sources/azure/manual/dbforpostgresql-flexible-server-replica.go @@ -0,0 +1,483 @@ +package manual + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/postgresql/armpostgresqlflexibleservers/v5" + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" + "github.com/overmindtech/cli/sources/stdlib" +) + +var DBforPostgreSQLFlexibleServerReplicaLookupByName = shared.NewItemTypeLookup("name", azureshared.DBforPostgreSQLFlexibleServerReplica) + +type dbforPostgreSQLFlexibleServerReplicaWrapper struct { + client clients.DBforPostgreSQLFlexibleServerReplicaClient + + *azureshared.MultiResourceGroupBase +} + +func NewDBforPostgreSQLFlexibleServerReplica(client clients.DBforPostgreSQLFlexibleServerReplicaClient, resourceGroupScopes []azureshared.ResourceGroupScope) sources.SearchableWrapper { + return &dbforPostgreSQLFlexibleServerReplicaWrapper{ + client: client, + MultiResourceGroupBase: azureshared.NewMultiResourceGroupBase( + resourceGroupScopes, + sdp.AdapterCategory_ADAPTER_CATEGORY_DATABASE, + azureshared.DBforPostgreSQLFlexibleServerReplica, + ), + } +} + +// ref: https://learn.microsoft.com/en-us/rest/api/postgresql/flexibleserver/servers/get +func (s dbforPostgreSQLFlexibleServerReplicaWrapper) Get(ctx context.Context, scope string, queryParts ...string) (*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 2 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Get requires 2 query parts: serverName and replicaName", + Scope: scope, + ItemType: s.Type(), + } + } + serverName := queryParts[0] + replicaName := queryParts[1] + if serverName == "" { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "serverName cannot be empty", + Scope: scope, + ItemType: s.Type(), + } + } + if replicaName == "" { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "replicaName cannot be empty", + Scope: scope, + ItemType: s.Type(), + } + } + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + resp, err := s.client.Get(ctx, rgScope.ResourceGroup, replicaName) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + return s.azureReplicaToSDPItem(&resp.Server, serverName, replicaName, scope) +} + +func (s dbforPostgreSQLFlexibleServerReplicaWrapper) azureReplicaToSDPItem(server *armpostgresqlflexibleservers.Server, serverName, replicaName, scope string) (*sdp.Item, *sdp.QueryError) { + if server.Name == nil { + return nil, azureshared.QueryError(errors.New("replica name is nil"), scope, s.Type()) + } + + attributes, err := shared.ToAttributesWithExclude(server, "tags") + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + err = attributes.Set("uniqueAttr", shared.CompositeLookupKey(serverName, replicaName)) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + sdpItem := &sdp.Item{ + Type: azureshared.DBforPostgreSQLFlexibleServerReplica.String(), + UniqueAttribute: "uniqueAttr", + Attributes: attributes, + Scope: scope, + Tags: azureshared.ConvertAzureTags(server.Tags), + } + + // Map provisioning state to health + if server.Properties != nil && server.Properties.State != nil { + switch *server.Properties.State { + case armpostgresqlflexibleservers.ServerStateReady: + sdpItem.Health = sdp.Health_HEALTH_OK.Enum() + case armpostgresqlflexibleservers.ServerStateStarting, armpostgresqlflexibleservers.ServerStateStopping, armpostgresqlflexibleservers.ServerStateUpdating, armpostgresqlflexibleservers.ServerStateProvisioning, armpostgresqlflexibleservers.ServerStateRestarting: + sdpItem.Health = sdp.Health_HEALTH_PENDING.Enum() + case armpostgresqlflexibleservers.ServerStateDisabled, armpostgresqlflexibleservers.ServerStateStopped, armpostgresqlflexibleservers.ServerStateInaccessible: + sdpItem.Health = sdp.Health_HEALTH_WARNING.Enum() + case armpostgresqlflexibleservers.ServerStateDropping: + sdpItem.Health = sdp.Health_HEALTH_ERROR.Enum() + default: + sdpItem.Health = sdp.Health_HEALTH_UNKNOWN.Enum() + } + } else { + sdpItem.Health = sdp.Health_HEALTH_UNKNOWN.Enum() + } + + // Link to parent PostgreSQL Flexible Server (source server for replica) + if server.Properties != nil && server.Properties.SourceServerResourceID != nil { + sourceServerID := *server.Properties.SourceServerResourceID + sourceServerName := azureshared.ExtractResourceName(sourceServerID) + if sourceServerName != "" { + linkedScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(sourceServerID); extractedScope != "" { + linkedScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.DBforPostgreSQLFlexibleServer.String(), + Method: sdp.QueryMethod_GET, + Query: sourceServerName, + Scope: linkedScope, + }, + }) + } + } else { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.DBforPostgreSQLFlexibleServer.String(), + Method: sdp.QueryMethod_GET, + Query: serverName, + Scope: scope, + }, + }) + } + + // Link to Fully Qualified Domain Name (DNS) + if server.Properties != nil && server.Properties.FullyQualifiedDomainName != nil && *server.Properties.FullyQualifiedDomainName != "" { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkDNS.String(), + Method: sdp.QueryMethod_SEARCH, + Query: *server.Properties.FullyQualifiedDomainName, + Scope: "global", + }, + }) + } + + // Link to Subnet (external resource) + if server.Properties != nil && server.Properties.Network != nil && server.Properties.Network.DelegatedSubnetResourceID != nil { + subnetID := *server.Properties.Network.DelegatedSubnetResourceID + scopeParams := azureshared.ExtractPathParamsFromResourceID(subnetID, []string{"subscriptions", "resourceGroups"}) + subnetParams := azureshared.ExtractPathParamsFromResourceID(subnetID, []string{"virtualNetworks", "subnets"}) + if len(scopeParams) >= 2 && len(subnetParams) >= 2 { + subscriptionID := scopeParams[0] + resourceGroupName := scopeParams[1] + vnetName := subnetParams[0] + subnetName := subnetParams[1] + query := shared.CompositeLookupKey(vnetName, subnetName) + linkedScope := subscriptionID + "." + resourceGroupName + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkSubnet.String(), + Method: sdp.QueryMethod_GET, + Query: query, + Scope: linkedScope, + }, + }) + + // Link to Virtual Network (parent of subnet) + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkVirtualNetwork.String(), + Method: sdp.QueryMethod_GET, + Query: vnetName, + Scope: linkedScope, + }, + }) + } + } + + // Link to Private DNS Zone (external resource) + if server.Properties != nil && server.Properties.Network != nil && server.Properties.Network.PrivateDNSZoneArmResourceID != nil { + privateDNSZoneID := *server.Properties.Network.PrivateDNSZoneArmResourceID + privateDNSZoneName := azureshared.ExtractResourceName(privateDNSZoneID) + if privateDNSZoneName != "" { + linkedScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(privateDNSZoneID); extractedScope != "" { + linkedScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkPrivateDNSZone.String(), + Method: sdp.QueryMethod_GET, + Query: privateDNSZoneName, + Scope: linkedScope, + }, + }) + } + } + + // Link to User Assigned Managed Identities + if server.Identity != nil && server.Identity.UserAssignedIdentities != nil { + for identityResourceID := range server.Identity.UserAssignedIdentities { + identityName := azureshared.ExtractResourceName(identityResourceID) + if identityName != "" { + linkedScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(identityResourceID); extractedScope != "" { + linkedScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.ManagedIdentityUserAssignedIdentity.String(), + Method: sdp.QueryMethod_GET, + Query: identityName, + Scope: linkedScope, + }, + }) + } + } + } + + // Link to Network Private Endpoints from PrivateEndpointConnections + if server.Properties != nil && server.Properties.PrivateEndpointConnections != nil { + for _, peConnection := range server.Properties.PrivateEndpointConnections { + if peConnection.Properties != nil && peConnection.Properties.PrivateEndpoint != nil && peConnection.Properties.PrivateEndpoint.ID != nil { + privateEndpointID := *peConnection.Properties.PrivateEndpoint.ID + privateEndpointName := azureshared.ExtractResourceName(privateEndpointID) + if privateEndpointName != "" { + linkedScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(privateEndpointID); extractedScope != "" { + linkedScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkPrivateEndpoint.String(), + Method: sdp.QueryMethod_GET, + Query: privateEndpointName, + Scope: linkedScope, + }, + }) + } + } + } + } + + // Link to Key Vault Vault from Data Encryption (Primary Key) + if server.Properties != nil && server.Properties.DataEncryption != nil && server.Properties.DataEncryption.PrimaryKeyURI != nil { + keyURI := *server.Properties.DataEncryption.PrimaryKeyURI + if vaultName := azureshared.ExtractVaultNameFromURI(keyURI); vaultName != "" { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.KeyVaultVault.String(), + Method: sdp.QueryMethod_GET, + Query: vaultName, + Scope: scope, + }, + }) + + // Link to Key Vault Key + keyName := azureshared.ExtractKeyNameFromURI(keyURI) + if keyName != "" { + query := shared.CompositeLookupKey(vaultName, keyName) + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.KeyVaultKey.String(), + Method: sdp.QueryMethod_GET, + Query: query, + Scope: scope, + }, + }) + } + } + } + + // Link to Primary User Assigned Managed Identity from Data Encryption + if server.Properties != nil && server.Properties.DataEncryption != nil && server.Properties.DataEncryption.PrimaryUserAssignedIdentityID != nil { + identityID := *server.Properties.DataEncryption.PrimaryUserAssignedIdentityID + identityName := azureshared.ExtractResourceName(identityID) + if identityName != "" { + linkedScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(identityID); extractedScope != "" { + linkedScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.ManagedIdentityUserAssignedIdentity.String(), + Method: sdp.QueryMethod_GET, + Query: identityName, + Scope: linkedScope, + }, + }) + } + } + + // Link to Geo Backup Key Vault Vault from Data Encryption + if server.Properties != nil && server.Properties.DataEncryption != nil && server.Properties.DataEncryption.GeoBackupKeyURI != nil { + keyURI := *server.Properties.DataEncryption.GeoBackupKeyURI + if vaultName := azureshared.ExtractVaultNameFromURI(keyURI); vaultName != "" { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.KeyVaultVault.String(), + Method: sdp.QueryMethod_GET, + Query: vaultName, + Scope: scope, + }, + }) + + // Link to Geo Backup Key Vault Key + keyName := azureshared.ExtractKeyNameFromURI(keyURI) + if keyName != "" { + query := shared.CompositeLookupKey(vaultName, keyName) + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.KeyVaultKey.String(), + Method: sdp.QueryMethod_GET, + Query: query, + Scope: scope, + }, + }) + } + } + } + + // Link to Geo Backup User Assigned Managed Identity from Data Encryption + if server.Properties != nil && server.Properties.DataEncryption != nil && server.Properties.DataEncryption.GeoBackupUserAssignedIdentityID != nil { + identityID := *server.Properties.DataEncryption.GeoBackupUserAssignedIdentityID + identityName := azureshared.ExtractResourceName(identityID) + if identityName != "" { + linkedScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(identityID); extractedScope != "" { + linkedScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.ManagedIdentityUserAssignedIdentity.String(), + Method: sdp.QueryMethod_GET, + Query: identityName, + Scope: linkedScope, + }, + }) + } + } + + return sdpItem, nil +} + +func (s dbforPostgreSQLFlexibleServerReplicaWrapper) GetLookups() sources.ItemTypeLookups { + return sources.ItemTypeLookups{ + DBforPostgreSQLFlexibleServerLookupByName, + DBforPostgreSQLFlexibleServerReplicaLookupByName, + } +} + +// ref: https://learn.microsoft.com/en-us/rest/api/postgresql/flexibleserver/replicas/list-by-server +func (s dbforPostgreSQLFlexibleServerReplicaWrapper) Search(ctx context.Context, scope string, queryParts ...string) ([]*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 1 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Search requires 1 query part: serverName", + Scope: scope, + ItemType: s.Type(), + } + } + serverName := queryParts[0] + if serverName == "" { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "serverName cannot be empty", + Scope: scope, + ItemType: s.Type(), + } + } + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + pager := s.client.ListByServer(ctx, rgScope.ResourceGroup, serverName) + + var items []*sdp.Item + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + for _, server := range page.Value { + if server.Name == nil { + continue + } + item, sdpErr := s.azureReplicaToSDPItem(server, serverName, *server.Name, scope) + if sdpErr != nil { + return nil, sdpErr + } + items = append(items, item) + } + } + + return items, nil +} + +func (s dbforPostgreSQLFlexibleServerReplicaWrapper) SearchStream(ctx context.Context, stream discovery.QueryResultStream, cache sdpcache.Cache, cacheKey sdpcache.CacheKey, scope string, queryParts ...string) { + if len(queryParts) < 1 { + stream.SendError(azureshared.QueryError(errors.New("Search requires 1 query part: serverName"), scope, s.Type())) + return + } + serverName := queryParts[0] + if serverName == "" { + stream.SendError(azureshared.QueryError(errors.New("serverName cannot be empty"), scope, s.Type())) + return + } + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, s.Type())) + return + } + pager := s.client.ListByServer(ctx, rgScope.ResourceGroup, serverName) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, s.Type())) + return + } + for _, server := range page.Value { + if server.Name == nil { + continue + } + item, sdpErr := s.azureReplicaToSDPItem(server, serverName, *server.Name, scope) + if sdpErr != nil { + stream.SendError(sdpErr) + continue + } + cache.StoreItem(ctx, item, shared.DefaultCacheDuration, cacheKey) + stream.SendItem(item) + } + } +} + +func (s dbforPostgreSQLFlexibleServerReplicaWrapper) SearchLookups() []sources.ItemTypeLookups { + return []sources.ItemTypeLookups{ + { + DBforPostgreSQLFlexibleServerLookupByName, + }, + } +} + +func (s dbforPostgreSQLFlexibleServerReplicaWrapper) PotentialLinks() map[shared.ItemType]bool { + return shared.NewItemTypesSet( + azureshared.DBforPostgreSQLFlexibleServer, + azureshared.NetworkSubnet, + azureshared.NetworkVirtualNetwork, + azureshared.NetworkPrivateDNSZone, + azureshared.NetworkPrivateEndpoint, + azureshared.ManagedIdentityUserAssignedIdentity, + azureshared.KeyVaultVault, + azureshared.KeyVaultKey, + stdlib.NetworkDNS, + ) +} + +// ref: https://learn.microsoft.com/en-us/azure/role-based-access-control/resource-provider-operations#microsoftdbforpostgresql +func (s dbforPostgreSQLFlexibleServerReplicaWrapper) IAMPermissions() []string { + return []string{ + "Microsoft.DBforPostgreSQL/flexibleServers/read", + "Microsoft.DBforPostgreSQL/flexibleServers/replicas/read", + } +} + +func (s dbforPostgreSQLFlexibleServerReplicaWrapper) PredefinedRole() string { + return "Reader" +} diff --git a/sources/azure/manual/dbforpostgresql-flexible-server-replica_test.go b/sources/azure/manual/dbforpostgresql-flexible-server-replica_test.go new file mode 100644 index 00000000..4b5c111a --- /dev/null +++ b/sources/azure/manual/dbforpostgresql-flexible-server-replica_test.go @@ -0,0 +1,392 @@ +package manual_test + +import ( + "context" + "errors" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/postgresql/armpostgresqlflexibleservers/v5" + "go.uber.org/mock/gomock" + + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + "github.com/overmindtech/cli/sources/azure/manual" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/azure/shared/mocks" + "github.com/overmindtech/cli/sources/shared" + "github.com/overmindtech/cli/sources/stdlib" +) + +type mockDBforPostgreSQLFlexibleServerReplicaPager struct { + pages []armpostgresqlflexibleservers.ReplicasClientListByServerResponse + index int +} + +func (m *mockDBforPostgreSQLFlexibleServerReplicaPager) More() bool { + return m.index < len(m.pages) +} + +func (m *mockDBforPostgreSQLFlexibleServerReplicaPager) NextPage(ctx context.Context) (armpostgresqlflexibleservers.ReplicasClientListByServerResponse, error) { + if m.index >= len(m.pages) { + return armpostgresqlflexibleservers.ReplicasClientListByServerResponse{}, errors.New("no more pages") + } + page := m.pages[m.index] + m.index++ + return page, nil +} + +type errorDBforPostgreSQLFlexibleServerReplicaPager struct{} + +func (e *errorDBforPostgreSQLFlexibleServerReplicaPager) More() bool { + return true +} + +func (e *errorDBforPostgreSQLFlexibleServerReplicaPager) NextPage(ctx context.Context) (armpostgresqlflexibleservers.ReplicasClientListByServerResponse, error) { + return armpostgresqlflexibleservers.ReplicasClientListByServerResponse{}, errors.New("pager error") +} + +type testDBforPostgreSQLFlexibleServerReplicaClient struct { + *mocks.MockDBforPostgreSQLFlexibleServerReplicaClient + pager clients.DBforPostgreSQLFlexibleServerReplicaPager +} + +func (t *testDBforPostgreSQLFlexibleServerReplicaClient) ListByServer(ctx context.Context, resourceGroupName, serverName string) clients.DBforPostgreSQLFlexibleServerReplicaPager { + return t.pager +} + +func TestDBforPostgreSQLFlexibleServerReplica(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + subscriptionID := "test-subscription" + resourceGroup := "test-rg" + serverName := "test-server" + replicaName := "test-replica" + + t.Run("Get", func(t *testing.T) { + replica := createAzurePostgreSQLFlexibleServerReplica(serverName, replicaName) + + mockClient := mocks.NewMockDBforPostgreSQLFlexibleServerReplicaClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, replicaName).Return( + armpostgresqlflexibleservers.ServersClientGetResponse{ + Server: *replica, + }, nil) + + wrapper := manual.NewDBforPostgreSQLFlexibleServerReplica(&testDBforPostgreSQLFlexibleServerReplicaClient{MockDBforPostgreSQLFlexibleServerReplicaClient: mockClient}, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(serverName, replicaName) + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.DBforPostgreSQLFlexibleServerReplica.String() { + t.Errorf("Expected type %s, got %s", azureshared.DBforPostgreSQLFlexibleServerReplica, sdpItem.GetType()) + } + + if sdpItem.GetUniqueAttribute() != "uniqueAttr" { + t.Errorf("Expected unique attribute 'uniqueAttr', got %s", sdpItem.GetUniqueAttribute()) + } + + expectedUniqueAttrValue := shared.CompositeLookupKey(serverName, replicaName) + if sdpItem.UniqueAttributeValue() != expectedUniqueAttrValue { + t.Errorf("Expected unique attribute value %s, got %s", expectedUniqueAttrValue, sdpItem.UniqueAttributeValue()) + } + + if sdpItem.GetScope() != subscriptionID+"."+resourceGroup { + t.Errorf("Expected scope %s, got %s", subscriptionID+"."+resourceGroup, sdpItem.GetScope()) + } + + if sdpItem.GetHealth() != sdp.Health_HEALTH_OK { + t.Errorf("Expected health OK, got %v", sdpItem.GetHealth()) + } + + if err := sdpItem.Validate(); err != nil { + t.Fatalf("Expected no validation error, got: %v", err) + } + + t.Run("StaticTests", func(t *testing.T) { + queryTests := shared.QueryTests{ + { + ExpectedType: azureshared.DBforPostgreSQLFlexibleServer.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: serverName, + ExpectedScope: subscriptionID + "." + resourceGroup, + }, + { + ExpectedType: stdlib.NetworkDNS.String(), + ExpectedMethod: sdp.QueryMethod_SEARCH, + ExpectedQuery: "test-replica.postgres.database.azure.com", + ExpectedScope: "global", + }, + } + shared.RunStaticTests(t, adapter, sdpItem, queryTests) + }) + }) + + t.Run("GetWithInsufficientQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockDBforPostgreSQLFlexibleServerReplicaClient(ctrl) + wrapper := manual.NewDBforPostgreSQLFlexibleServerReplica(&testDBforPostgreSQLFlexibleServerReplicaClient{MockDBforPostgreSQLFlexibleServerReplicaClient: mockClient}, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], serverName, true) + if qErr == nil { + t.Error("Expected error when providing only serverName (1 query part), but got nil") + } + }) + + t.Run("GetWithEmptyServerName", func(t *testing.T) { + mockClient := mocks.NewMockDBforPostgreSQLFlexibleServerReplicaClient(ctrl) + wrapper := manual.NewDBforPostgreSQLFlexibleServerReplica(&testDBforPostgreSQLFlexibleServerReplicaClient{MockDBforPostgreSQLFlexibleServerReplicaClient: mockClient}, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey("", replicaName) + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when serverName is empty, but got nil") + } + }) + + t.Run("GetWithEmptyReplicaName", func(t *testing.T) { + mockClient := mocks.NewMockDBforPostgreSQLFlexibleServerReplicaClient(ctrl) + wrapper := manual.NewDBforPostgreSQLFlexibleServerReplica(&testDBforPostgreSQLFlexibleServerReplicaClient{MockDBforPostgreSQLFlexibleServerReplicaClient: mockClient}, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(serverName, "") + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when replicaName is empty, but got nil") + } + }) + + t.Run("Search", func(t *testing.T) { + replica1 := createAzurePostgreSQLFlexibleServerReplica(serverName, "replica1") + replica2 := createAzurePostgreSQLFlexibleServerReplica(serverName, "replica2") + + mockClient := mocks.NewMockDBforPostgreSQLFlexibleServerReplicaClient(ctrl) + pager := &mockDBforPostgreSQLFlexibleServerReplicaPager{ + pages: []armpostgresqlflexibleservers.ReplicasClientListByServerResponse{ + { + ServerList: armpostgresqlflexibleservers.ServerList{ + Value: []*armpostgresqlflexibleservers.Server{replica1, replica2}, + }, + }, + }, + } + + testClient := &testDBforPostgreSQLFlexibleServerReplicaClient{ + MockDBforPostgreSQLFlexibleServerReplicaClient: mockClient, + pager: pager, + } + wrapper := manual.NewDBforPostgreSQLFlexibleServerReplica(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + items, qErr := searchable.Search(ctx, wrapper.Scopes()[0], serverName, true) + if qErr != nil { + t.Fatalf("Expected no error from Search, got: %v", qErr) + } + if len(items) != 2 { + t.Errorf("Expected 2 items from Search, got %d", len(items)) + } + }) + + t.Run("SearchStream", func(t *testing.T) { + replica1 := createAzurePostgreSQLFlexibleServerReplica(serverName, "replica1") + + mockClient := mocks.NewMockDBforPostgreSQLFlexibleServerReplicaClient(ctrl) + pager := &mockDBforPostgreSQLFlexibleServerReplicaPager{ + pages: []armpostgresqlflexibleservers.ReplicasClientListByServerResponse{ + { + ServerList: armpostgresqlflexibleservers.ServerList{ + Value: []*armpostgresqlflexibleservers.Server{replica1}, + }, + }, + }, + } + + testClient := &testDBforPostgreSQLFlexibleServerReplicaClient{ + MockDBforPostgreSQLFlexibleServerReplicaClient: mockClient, + pager: pager, + } + wrapper := manual.NewDBforPostgreSQLFlexibleServerReplica(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchStreamable, ok := adapter.(discovery.SearchStreamableAdapter) + if !ok { + t.Fatalf("Adapter does not support SearchStream operation") + } + + stream := discovery.NewRecordingQueryResultStream() + searchStreamable.SearchStream(ctx, wrapper.Scopes()[0], serverName, true, stream) + items := stream.GetItems() + errs := stream.GetErrors() + if len(errs) > 0 { + t.Fatalf("Expected no errors from SearchStream, got: %v", errs) + } + if len(items) != 1 { + t.Errorf("Expected 1 item from SearchStream, got %d", len(items)) + } + }) + + t.Run("SearchWithInsufficientQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockDBforPostgreSQLFlexibleServerReplicaClient(ctrl) + wrapper := manual.NewDBforPostgreSQLFlexibleServerReplica(&testDBforPostgreSQLFlexibleServerReplicaClient{MockDBforPostgreSQLFlexibleServerReplicaClient: mockClient}, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + _, qErr := wrapper.Search(ctx, wrapper.Scopes()[0]) + if qErr == nil { + t.Error("Expected error when providing no query parts, but got nil") + } + }) + + t.Run("SearchWithEmptyServerName", func(t *testing.T) { + mockClient := mocks.NewMockDBforPostgreSQLFlexibleServerReplicaClient(ctrl) + wrapper := manual.NewDBforPostgreSQLFlexibleServerReplica(&testDBforPostgreSQLFlexibleServerReplicaClient{MockDBforPostgreSQLFlexibleServerReplicaClient: mockClient}, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + _, qErr := wrapper.Search(ctx, wrapper.Scopes()[0], "") + if qErr == nil { + t.Error("Expected error when serverName is empty, but got nil") + } + }) + + t.Run("SearchStreamWithEmptyServerName", func(t *testing.T) { + mockClient := mocks.NewMockDBforPostgreSQLFlexibleServerReplicaClient(ctrl) + wrapper := manual.NewDBforPostgreSQLFlexibleServerReplica(&testDBforPostgreSQLFlexibleServerReplicaClient{MockDBforPostgreSQLFlexibleServerReplicaClient: mockClient}, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + searchStreamable := wrapper.(sources.SearchStreamableWrapper) + + stream := discovery.NewRecordingQueryResultStream() + searchStreamable.SearchStream(ctx, stream, sdpcache.NewNoOpCache(), sdpcache.CacheKey{}, wrapper.Scopes()[0], "") + errs := stream.GetErrors() + if len(errs) == 0 { + t.Error("Expected error when serverName is empty, but got none") + } + }) + + t.Run("ErrorHandling_Get", func(t *testing.T) { + expectedErr := errors.New("replica not found") + + mockClient := mocks.NewMockDBforPostgreSQLFlexibleServerReplicaClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, "nonexistent-replica").Return( + armpostgresqlflexibleservers.ServersClientGetResponse{}, expectedErr) + + wrapper := manual.NewDBforPostgreSQLFlexibleServerReplica(&testDBforPostgreSQLFlexibleServerReplicaClient{MockDBforPostgreSQLFlexibleServerReplicaClient: mockClient}, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(serverName, "nonexistent-replica") + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when getting non-existent replica, but got nil") + } + }) + + t.Run("ErrorHandling_Search", func(t *testing.T) { + mockClient := mocks.NewMockDBforPostgreSQLFlexibleServerReplicaClient(ctrl) + errorPager := &errorDBforPostgreSQLFlexibleServerReplicaPager{} + testClient := &testDBforPostgreSQLFlexibleServerReplicaClient{ + MockDBforPostgreSQLFlexibleServerReplicaClient: mockClient, + pager: errorPager, + } + + wrapper := manual.NewDBforPostgreSQLFlexibleServerReplica(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + _, qErr := wrapper.Search(ctx, wrapper.Scopes()[0], serverName) + if qErr == nil { + t.Error("Expected error from Search when pager returns error, but got nil") + } + }) + + t.Run("PotentialLinks", func(t *testing.T) { + mockClient := mocks.NewMockDBforPostgreSQLFlexibleServerReplicaClient(ctrl) + wrapper := manual.NewDBforPostgreSQLFlexibleServerReplica(&testDBforPostgreSQLFlexibleServerReplicaClient{MockDBforPostgreSQLFlexibleServerReplicaClient: mockClient}, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + potentialLinks := wrapper.PotentialLinks() + + expectedLinks := []shared.ItemType{ + azureshared.DBforPostgreSQLFlexibleServer, + azureshared.NetworkSubnet, + azureshared.NetworkVirtualNetwork, + azureshared.NetworkPrivateDNSZone, + azureshared.NetworkPrivateEndpoint, + azureshared.ManagedIdentityUserAssignedIdentity, + azureshared.KeyVaultVault, + azureshared.KeyVaultKey, + stdlib.NetworkDNS, + } + + for _, expected := range expectedLinks { + if !potentialLinks[expected] { + t.Errorf("Expected PotentialLinks to include %s", expected) + } + } + }) + + t.Run("HealthMapping", func(t *testing.T) { + testCases := []struct { + state armpostgresqlflexibleservers.ServerState + expectedHealth sdp.Health + }{ + {armpostgresqlflexibleservers.ServerStateReady, sdp.Health_HEALTH_OK}, + {armpostgresqlflexibleservers.ServerStateStarting, sdp.Health_HEALTH_PENDING}, + {armpostgresqlflexibleservers.ServerStateStopping, sdp.Health_HEALTH_PENDING}, + {armpostgresqlflexibleservers.ServerStateUpdating, sdp.Health_HEALTH_PENDING}, + {armpostgresqlflexibleservers.ServerStateDisabled, sdp.Health_HEALTH_WARNING}, + {armpostgresqlflexibleservers.ServerStateStopped, sdp.Health_HEALTH_WARNING}, + {armpostgresqlflexibleservers.ServerStateDropping, sdp.Health_HEALTH_ERROR}, + } + + for _, tc := range testCases { + t.Run(string(tc.state), func(t *testing.T) { + replica := createAzurePostgreSQLFlexibleServerReplicaWithState(serverName, replicaName, tc.state) + + mockClient := mocks.NewMockDBforPostgreSQLFlexibleServerReplicaClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, replicaName).Return( + armpostgresqlflexibleservers.ServersClientGetResponse{ + Server: *replica, + }, nil) + + wrapper := manual.NewDBforPostgreSQLFlexibleServerReplica(&testDBforPostgreSQLFlexibleServerReplicaClient{MockDBforPostgreSQLFlexibleServerReplicaClient: mockClient}, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(serverName, replicaName) + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetHealth() != tc.expectedHealth { + t.Errorf("Expected health %v for state %s, got %v", tc.expectedHealth, tc.state, sdpItem.GetHealth()) + } + }) + } + }) +} + +func createAzurePostgreSQLFlexibleServerReplica(serverName, replicaName string) *armpostgresqlflexibleservers.Server { + return createAzurePostgreSQLFlexibleServerReplicaWithState(serverName, replicaName, armpostgresqlflexibleservers.ServerStateReady) +} + +func createAzurePostgreSQLFlexibleServerReplicaWithState(serverName, replicaName string, state armpostgresqlflexibleservers.ServerState) *armpostgresqlflexibleservers.Server { + replicaID := "/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.DBforPostgreSQL/flexibleServers/" + replicaName + sourceServerID := "/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.DBforPostgreSQL/flexibleServers/" + serverName + replicationRole := armpostgresqlflexibleservers.ReplicationRoleAsyncReplica + fqdn := replicaName + ".postgres.database.azure.com" + return &armpostgresqlflexibleservers.Server{ + Name: &replicaName, + ID: &replicaID, + Type: new(string), + Location: new(string), + Properties: &armpostgresqlflexibleservers.ServerProperties{ + State: &state, + ReplicationRole: &replicationRole, + SourceServerResourceID: &sourceServerID, + FullyQualifiedDomainName: &fqdn, + }, + } +} diff --git a/sources/azure/manual/network-load-balancer-backend-address-pool.go b/sources/azure/manual/network-load-balancer-backend-address-pool.go new file mode 100644 index 00000000..2123f296 --- /dev/null +++ b/sources/azure/manual/network-load-balancer-backend-address-pool.go @@ -0,0 +1,468 @@ +package manual + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" + "github.com/overmindtech/cli/sources/stdlib" +) + +var NetworkLoadBalancerBackendAddressPoolLookupByUniqueAttr = shared.NewItemTypeLookup("uniqueAttr", azureshared.NetworkLoadBalancerBackendAddressPool) + +type networkLoadBalancerBackendAddressPoolWrapper struct { + client clients.LoadBalancerBackendAddressPoolsClient + + *azureshared.MultiResourceGroupBase +} + +func NewNetworkLoadBalancerBackendAddressPool(client clients.LoadBalancerBackendAddressPoolsClient, resourceGroupScopes []azureshared.ResourceGroupScope) sources.SearchableWrapper { + return &networkLoadBalancerBackendAddressPoolWrapper{ + client: client, + MultiResourceGroupBase: azureshared.NewMultiResourceGroupBase( + resourceGroupScopes, + sdp.AdapterCategory_ADAPTER_CATEGORY_NETWORK, + azureshared.NetworkLoadBalancerBackendAddressPool, + ), + } +} + +func (c networkLoadBalancerBackendAddressPoolWrapper) Get(ctx context.Context, scope string, queryParts ...string) (*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 2 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Get requires 2 query parts: loadBalancerName and backendAddressPoolName", + Scope: scope, + ItemType: c.Type(), + } + } + loadBalancerName := queryParts[0] + backendAddressPoolName := queryParts[1] + + if loadBalancerName == "" { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "loadBalancerName cannot be empty", + Scope: scope, + ItemType: c.Type(), + } + } + if backendAddressPoolName == "" { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "backendAddressPoolName cannot be empty", + Scope: scope, + ItemType: c.Type(), + } + } + + rgScope, err := c.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + resp, err := c.client.Get(ctx, rgScope.ResourceGroup, loadBalancerName, backendAddressPoolName) + if err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + + return c.azureBackendAddressPoolToSDPItem(&resp.BackendAddressPool, loadBalancerName, scope) +} + +func (c networkLoadBalancerBackendAddressPoolWrapper) Search(ctx context.Context, scope string, queryParts ...string) ([]*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 1 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Search requires 1 query part: loadBalancerName", + Scope: scope, + ItemType: c.Type(), + } + } + loadBalancerName := queryParts[0] + + if loadBalancerName == "" { + return nil, azureshared.QueryError(errors.New("loadBalancerName cannot be empty"), scope, c.Type()) + } + + rgScope, err := c.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + pager := c.client.NewListPager(rgScope.ResourceGroup, loadBalancerName) + + var items []*sdp.Item + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + + for _, backendPool := range page.Value { + if backendPool == nil || backendPool.Name == nil { + continue + } + item, sdpErr := c.azureBackendAddressPoolToSDPItem(backendPool, loadBalancerName, scope) + if sdpErr != nil { + return nil, sdpErr + } + items = append(items, item) + } + } + + return items, nil +} + +func (c networkLoadBalancerBackendAddressPoolWrapper) SearchStream(ctx context.Context, stream discovery.QueryResultStream, cache sdpcache.Cache, cacheKey sdpcache.CacheKey, scope string, queryParts ...string) { + if len(queryParts) < 1 { + stream.SendError(azureshared.QueryError(errors.New("Search requires 1 query part: loadBalancerName"), scope, c.Type())) + return + } + loadBalancerName := queryParts[0] + + if loadBalancerName == "" { + stream.SendError(azureshared.QueryError(errors.New("loadBalancerName cannot be empty"), scope, c.Type())) + return + } + + rgScope, err := c.ResourceGroupScopeFromScope(scope) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, c.Type())) + return + } + pager := c.client.NewListPager(rgScope.ResourceGroup, loadBalancerName) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, c.Type())) + return + } + for _, backendPool := range page.Value { + if backendPool == nil || backendPool.Name == nil { + continue + } + item, sdpErr := c.azureBackendAddressPoolToSDPItem(backendPool, loadBalancerName, scope) + if sdpErr != nil { + stream.SendError(sdpErr) + continue + } + cache.StoreItem(ctx, item, shared.DefaultCacheDuration, cacheKey) + stream.SendItem(item) + } + } +} + +func (c networkLoadBalancerBackendAddressPoolWrapper) GetLookups() sources.ItemTypeLookups { + return sources.ItemTypeLookups{ + NetworkLoadBalancerLookupByName, + NetworkLoadBalancerBackendAddressPoolLookupByUniqueAttr, + } +} + +func (c networkLoadBalancerBackendAddressPoolWrapper) SearchLookups() []sources.ItemTypeLookups { + return []sources.ItemTypeLookups{ + { + NetworkLoadBalancerLookupByName, + }, + } +} + +func (c networkLoadBalancerBackendAddressPoolWrapper) azureBackendAddressPoolToSDPItem(backendPool *armnetwork.BackendAddressPool, loadBalancerName string, scope string) (*sdp.Item, *sdp.QueryError) { + if backendPool.Name == nil { + return nil, azureshared.QueryError(errors.New("backend address pool name is nil"), scope, c.Type()) + } + + backendPoolName := *backendPool.Name + + attributes, err := shared.ToAttributesWithExclude(backendPool, "tags") + if err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + + err = attributes.Set("uniqueAttr", shared.CompositeLookupKey(loadBalancerName, backendPoolName)) + if err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + + sdpItem := &sdp.Item{ + Type: azureshared.NetworkLoadBalancerBackendAddressPool.String(), + UniqueAttribute: "uniqueAttr", + Attributes: attributes, + Scope: scope, + } + + // Health status from provisioning state + if backendPool.Properties != nil && backendPool.Properties.ProvisioningState != nil { + switch *backendPool.Properties.ProvisioningState { + case armnetwork.ProvisioningStateSucceeded: + sdpItem.Health = sdp.Health_HEALTH_OK.Enum() + case armnetwork.ProvisioningStateCreating, armnetwork.ProvisioningStateUpdating, armnetwork.ProvisioningStateDeleting: + sdpItem.Health = sdp.Health_HEALTH_PENDING.Enum() + case armnetwork.ProvisioningStateFailed, armnetwork.ProvisioningStateCanceled: + sdpItem.Health = sdp.Health_HEALTH_ERROR.Enum() + default: + sdpItem.Health = sdp.Health_HEALTH_UNKNOWN.Enum() + } + } + + // Link to parent Load Balancer + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkLoadBalancer.String(), + Method: sdp.QueryMethod_GET, + Query: loadBalancerName, + Scope: scope, + }, + }) + + if backendPool.Properties != nil { + // Link to Virtual Network (pool level) + if backendPool.Properties.VirtualNetwork != nil && backendPool.Properties.VirtualNetwork.ID != nil { + vnetName := azureshared.ExtractResourceName(*backendPool.Properties.VirtualNetwork.ID) + if vnetName != "" { + linkedScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(*backendPool.Properties.VirtualNetwork.ID); extractedScope != "" { + linkedScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkVirtualNetwork.String(), + Method: sdp.QueryMethod_GET, + Query: vnetName, + Scope: linkedScope, + }, + }) + } + } + + // Link to Inbound NAT Rules (read-only references) + for _, natRule := range backendPool.Properties.InboundNatRules { + if natRule != nil && natRule.ID != nil { + params := azureshared.ExtractPathParamsFromResourceID(*natRule.ID, []string{"loadBalancers", "inboundNatRules"}) + if len(params) >= 2 { + linkedScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(*natRule.ID); extractedScope != "" { + linkedScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkLoadBalancerInboundNatRule.String(), + Method: sdp.QueryMethod_GET, + Query: shared.CompositeLookupKey(params[0], params[1]), + Scope: linkedScope, + }, + }) + } + } + } + + // Link to Load Balancing Rules (read-only references) + for _, lbRule := range backendPool.Properties.LoadBalancingRules { + if lbRule != nil && lbRule.ID != nil { + params := azureshared.ExtractPathParamsFromResourceID(*lbRule.ID, []string{"loadBalancers", "loadBalancingRules"}) + if len(params) >= 2 { + linkedScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(*lbRule.ID); extractedScope != "" { + linkedScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkLoadBalancerLoadBalancingRule.String(), + Method: sdp.QueryMethod_GET, + Query: shared.CompositeLookupKey(params[0], params[1]), + Scope: linkedScope, + }, + }) + } + } + } + + // Link to Outbound Rule (single read-only reference) + if backendPool.Properties.OutboundRule != nil && backendPool.Properties.OutboundRule.ID != nil { + params := azureshared.ExtractPathParamsFromResourceID(*backendPool.Properties.OutboundRule.ID, []string{"loadBalancers", "outboundRules"}) + if len(params) >= 2 { + linkedScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(*backendPool.Properties.OutboundRule.ID); extractedScope != "" { + linkedScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkLoadBalancerOutboundRule.String(), + Method: sdp.QueryMethod_GET, + Query: shared.CompositeLookupKey(params[0], params[1]), + Scope: linkedScope, + }, + }) + } + } + + // Link to Outbound Rules (read-only references array) + for _, outboundRule := range backendPool.Properties.OutboundRules { + if outboundRule != nil && outboundRule.ID != nil { + params := azureshared.ExtractPathParamsFromResourceID(*outboundRule.ID, []string{"loadBalancers", "outboundRules"}) + if len(params) >= 2 { + linkedScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(*outboundRule.ID); extractedScope != "" { + linkedScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkLoadBalancerOutboundRule.String(), + Method: sdp.QueryMethod_GET, + Query: shared.CompositeLookupKey(params[0], params[1]), + Scope: linkedScope, + }, + }) + } + } + } + + // Link to Backend IP Configurations (Network Interface IP Configurations) + for _, backendIPConfig := range backendPool.Properties.BackendIPConfigurations { + if backendIPConfig != nil && backendIPConfig.ID != nil { + params := azureshared.ExtractPathParamsFromResourceID(*backendIPConfig.ID, []string{"networkInterfaces", "ipConfigurations"}) + if len(params) >= 2 { + linkedScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(*backendIPConfig.ID); extractedScope != "" { + linkedScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkNetworkInterfaceIPConfiguration.String(), + Method: sdp.QueryMethod_GET, + Query: shared.CompositeLookupKey(params[0], params[1]), + Scope: linkedScope, + }, + }) + } + } + } + + // Link to Backend Addresses (IP addresses, VNets, Subnets, Frontend IP Configs) + for _, addr := range backendPool.Properties.LoadBalancerBackendAddresses { + if addr == nil || addr.Properties == nil { + continue + } + + // Link to Virtual Network + if addr.Properties.VirtualNetwork != nil && addr.Properties.VirtualNetwork.ID != nil { + vnetName := azureshared.ExtractResourceName(*addr.Properties.VirtualNetwork.ID) + if vnetName != "" { + linkedScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(*addr.Properties.VirtualNetwork.ID); extractedScope != "" { + linkedScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkVirtualNetwork.String(), + Method: sdp.QueryMethod_GET, + Query: vnetName, + Scope: linkedScope, + }, + }) + } + } + + // Link to Subnet + if addr.Properties.Subnet != nil && addr.Properties.Subnet.ID != nil { + params := azureshared.ExtractPathParamsFromResourceID(*addr.Properties.Subnet.ID, []string{"virtualNetworks", "subnets"}) + if len(params) >= 2 { + linkedScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(*addr.Properties.Subnet.ID); extractedScope != "" { + linkedScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkSubnet.String(), + Method: sdp.QueryMethod_GET, + Query: shared.CompositeLookupKey(params[0], params[1]), + Scope: linkedScope, + }, + }) + } + } + + // Link to Frontend IP Configuration (regional LB) + if addr.Properties.LoadBalancerFrontendIPConfiguration != nil && addr.Properties.LoadBalancerFrontendIPConfiguration.ID != nil { + params := azureshared.ExtractPathParamsFromResourceID(*addr.Properties.LoadBalancerFrontendIPConfiguration.ID, []string{"loadBalancers", "frontendIPConfigurations"}) + if len(params) >= 2 { + linkedScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(*addr.Properties.LoadBalancerFrontendIPConfiguration.ID); extractedScope != "" { + linkedScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkLoadBalancerFrontendIPConfiguration.String(), + Method: sdp.QueryMethod_GET, + Query: shared.CompositeLookupKey(params[0], params[1]), + Scope: linkedScope, + }, + }) + } + } + + // Link to Network Interface IP Configuration + if addr.Properties.NetworkInterfaceIPConfiguration != nil && addr.Properties.NetworkInterfaceIPConfiguration.ID != nil { + params := azureshared.ExtractPathParamsFromResourceID(*addr.Properties.NetworkInterfaceIPConfiguration.ID, []string{"networkInterfaces", "ipConfigurations"}) + if len(params) >= 2 { + linkedScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(*addr.Properties.NetworkInterfaceIPConfiguration.ID); extractedScope != "" { + linkedScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkNetworkInterfaceIPConfiguration.String(), + Method: sdp.QueryMethod_GET, + Query: shared.CompositeLookupKey(params[0], params[1]), + Scope: linkedScope, + }, + }) + } + } + + // Link to IP Address (stdlib) + if addr.Properties.IPAddress != nil && *addr.Properties.IPAddress != "" { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkIP.String(), + Method: sdp.QueryMethod_GET, + Query: *addr.Properties.IPAddress, + Scope: "global", + }, + }) + } + } + } + + return sdpItem, nil +} + +func (c networkLoadBalancerBackendAddressPoolWrapper) PotentialLinks() map[shared.ItemType]bool { + return map[shared.ItemType]bool{ + azureshared.NetworkLoadBalancer: true, + azureshared.NetworkVirtualNetwork: true, + azureshared.NetworkSubnet: true, + azureshared.NetworkNetworkInterfaceIPConfiguration: true, + azureshared.NetworkLoadBalancerInboundNatRule: true, + azureshared.NetworkLoadBalancerLoadBalancingRule: true, + azureshared.NetworkLoadBalancerOutboundRule: true, + azureshared.NetworkLoadBalancerFrontendIPConfiguration: true, + stdlib.NetworkIP: true, + } +} + +// ref: https://learn.microsoft.com/en-us/azure/role-based-access-control/permissions/networking +func (c networkLoadBalancerBackendAddressPoolWrapper) IAMPermissions() []string { + return []string{ + "Microsoft.Network/loadBalancers/backendAddressPools/read", + } +} + +func (c networkLoadBalancerBackendAddressPoolWrapper) PredefinedRole() string { + return "Reader" +} diff --git a/sources/azure/manual/network-load-balancer-backend-address-pool_test.go b/sources/azure/manual/network-load-balancer-backend-address-pool_test.go new file mode 100644 index 00000000..cb6a1251 --- /dev/null +++ b/sources/azure/manual/network-load-balancer-backend-address-pool_test.go @@ -0,0 +1,517 @@ +package manual_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + "go.uber.org/mock/gomock" + + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + "github.com/overmindtech/cli/sources/azure/manual" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/azure/shared/mocks" + "github.com/overmindtech/cli/sources/shared" +) + +type mockBackendAddressPoolPager struct { + pages []armnetwork.LoadBalancerBackendAddressPoolsClientListResponse + index int +} + +func (m *mockBackendAddressPoolPager) More() bool { + return m.index < len(m.pages) +} + +func (m *mockBackendAddressPoolPager) NextPage(ctx context.Context) (armnetwork.LoadBalancerBackendAddressPoolsClientListResponse, error) { + if m.index >= len(m.pages) { + return armnetwork.LoadBalancerBackendAddressPoolsClientListResponse{}, errors.New("no more pages") + } + page := m.pages[m.index] + m.index++ + return page, nil +} + +type errorBackendAddressPoolPager struct{} + +func (e *errorBackendAddressPoolPager) More() bool { + return true +} + +func (e *errorBackendAddressPoolPager) NextPage(ctx context.Context) (armnetwork.LoadBalancerBackendAddressPoolsClientListResponse, error) { + return armnetwork.LoadBalancerBackendAddressPoolsClientListResponse{}, errors.New("pager error") +} + +type testBackendAddressPoolClient struct { + *mocks.MockLoadBalancerBackendAddressPoolsClient + pager clients.LoadBalancerBackendAddressPoolsPager +} + +func (t *testBackendAddressPoolClient) NewListPager(resourceGroupName, loadBalancerName string) clients.LoadBalancerBackendAddressPoolsPager { + return t.pager +} + +func TestNetworkLoadBalancerBackendAddressPool(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + subscriptionID := "test-subscription" + resourceGroup := "test-rg" + loadBalancerName := "test-lb" + backendPoolName := "test-backend-pool" + + t.Run("Get", func(t *testing.T) { + backendPool := createAzureBackendAddressPool(backendPoolName, loadBalancerName, subscriptionID, resourceGroup) + + mockClient := mocks.NewMockLoadBalancerBackendAddressPoolsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, loadBalancerName, backendPoolName).Return( + armnetwork.LoadBalancerBackendAddressPoolsClientGetResponse{ + BackendAddressPool: *backendPool, + }, nil) + + testClient := &testBackendAddressPoolClient{MockLoadBalancerBackendAddressPoolsClient: mockClient} + wrapper := manual.NewNetworkLoadBalancerBackendAddressPool(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(loadBalancerName, backendPoolName) + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.NetworkLoadBalancerBackendAddressPool.String() { + t.Errorf("Expected type %s, got %s", azureshared.NetworkLoadBalancerBackendAddressPool, sdpItem.GetType()) + } + + if sdpItem.GetUniqueAttribute() != "uniqueAttr" { + t.Errorf("Expected unique attribute 'uniqueAttr', got %s", sdpItem.GetUniqueAttribute()) + } + + expectedUniqueValue := shared.CompositeLookupKey(loadBalancerName, backendPoolName) + if sdpItem.UniqueAttributeValue() != expectedUniqueValue { + t.Errorf("Expected unique attribute value %s, got %s", expectedUniqueValue, sdpItem.UniqueAttributeValue()) + } + + if sdpItem.GetScope() != subscriptionID+"."+resourceGroup { + t.Errorf("Expected scope %s, got %s", subscriptionID+"."+resourceGroup, sdpItem.GetScope()) + } + + if err := sdpItem.Validate(); err != nil { + t.Fatalf("Expected no validation error, got: %v", err) + } + + if sdpItem.GetHealth() != sdp.Health_HEALTH_OK { + t.Errorf("Expected health OK, got %s", sdpItem.GetHealth()) + } + + t.Run("StaticTests", func(t *testing.T) { + queryTests := shared.QueryTests{ + { + ExpectedType: azureshared.NetworkLoadBalancer.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: loadBalancerName, + ExpectedScope: fmt.Sprintf("%s.%s", subscriptionID, resourceGroup), + }, + { + ExpectedType: azureshared.NetworkVirtualNetwork.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-vnet", + ExpectedScope: fmt.Sprintf("%s.%s", subscriptionID, resourceGroup), + }, + { + ExpectedType: azureshared.NetworkLoadBalancerInboundNatRule.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: shared.CompositeLookupKey(loadBalancerName, "nat-rule-1"), + ExpectedScope: fmt.Sprintf("%s.%s", subscriptionID, resourceGroup), + }, + { + ExpectedType: azureshared.NetworkLoadBalancerLoadBalancingRule.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: shared.CompositeLookupKey(loadBalancerName, "lb-rule-1"), + ExpectedScope: fmt.Sprintf("%s.%s", subscriptionID, resourceGroup), + }, + { + ExpectedType: azureshared.NetworkLoadBalancerOutboundRule.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: shared.CompositeLookupKey(loadBalancerName, "outbound-rule-1"), + ExpectedScope: fmt.Sprintf("%s.%s", subscriptionID, resourceGroup), + }, + { + ExpectedType: azureshared.NetworkNetworkInterfaceIPConfiguration.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: shared.CompositeLookupKey("test-nic", "test-ip-config"), + ExpectedScope: fmt.Sprintf("%s.%s", subscriptionID, resourceGroup), + }, + { + ExpectedType: azureshared.NetworkVirtualNetwork.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "addr-vnet", + ExpectedScope: fmt.Sprintf("%s.%s", subscriptionID, resourceGroup), + }, + { + ExpectedType: azureshared.NetworkSubnet.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: shared.CompositeLookupKey("addr-vnet", "addr-subnet"), + ExpectedScope: fmt.Sprintf("%s.%s", subscriptionID, resourceGroup), + }, + { + ExpectedType: azureshared.NetworkLoadBalancerFrontendIPConfiguration.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: shared.CompositeLookupKey("regional-lb", "frontend-1"), + ExpectedScope: fmt.Sprintf("%s.%s", subscriptionID, resourceGroup), + }, + { + ExpectedType: "ip", + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "10.0.0.10", + ExpectedScope: "global", + }, + } + shared.RunStaticTests(t, adapter, sdpItem, queryTests) + }) + }) + + t.Run("Get_WithInsufficientQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockLoadBalancerBackendAddressPoolsClient(ctrl) + testClient := &testBackendAddressPoolClient{MockLoadBalancerBackendAddressPoolsClient: mockClient} + + wrapper := manual.NewNetworkLoadBalancerBackendAddressPool(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], loadBalancerName, true) + if qErr == nil { + t.Error("Expected error when providing insufficient query parts, but got nil") + } + }) + + t.Run("Get_WithEmptyLoadBalancerName", func(t *testing.T) { + mockClient := mocks.NewMockLoadBalancerBackendAddressPoolsClient(ctrl) + testClient := &testBackendAddressPoolClient{MockLoadBalancerBackendAddressPoolsClient: mockClient} + + wrapper := manual.NewNetworkLoadBalancerBackendAddressPool(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey("", backendPoolName) + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when loadBalancerName is empty, but got nil") + } + }) + + t.Run("Get_WithEmptyBackendPoolName", func(t *testing.T) { + mockClient := mocks.NewMockLoadBalancerBackendAddressPoolsClient(ctrl) + testClient := &testBackendAddressPoolClient{MockLoadBalancerBackendAddressPoolsClient: mockClient} + + wrapper := manual.NewNetworkLoadBalancerBackendAddressPool(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(loadBalancerName, "") + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when backendAddressPoolName is empty, but got nil") + } + }) + + t.Run("Search", func(t *testing.T) { + pool1 := createAzureBackendAddressPoolMinimal("pool-1", loadBalancerName, subscriptionID, resourceGroup) + pool2 := createAzureBackendAddressPoolMinimal("pool-2", loadBalancerName, subscriptionID, resourceGroup) + + mockClient := mocks.NewMockLoadBalancerBackendAddressPoolsClient(ctrl) + mockPager := &mockBackendAddressPoolPager{ + pages: []armnetwork.LoadBalancerBackendAddressPoolsClientListResponse{ + { + LoadBalancerBackendAddressPoolListResult: armnetwork.LoadBalancerBackendAddressPoolListResult{ + Value: []*armnetwork.BackendAddressPool{pool1, pool2}, + }, + }, + }, + } + + testClient := &testBackendAddressPoolClient{ + MockLoadBalancerBackendAddressPoolsClient: mockClient, + pager: mockPager, + } + + wrapper := manual.NewNetworkLoadBalancerBackendAddressPool(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + sdpItems, err := searchable.Search(ctx, wrapper.Scopes()[0], loadBalancerName, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 2 { + t.Fatalf("Expected 2 items, got: %d", len(sdpItems)) + } + + for _, item := range sdpItems { + if err := item.Validate(); err != nil { + t.Fatalf("Expected no validation error, got: %v", err) + } + if item.GetType() != azureshared.NetworkLoadBalancerBackendAddressPool.String() { + t.Errorf("Expected type %s, got %s", azureshared.NetworkLoadBalancerBackendAddressPool, item.GetType()) + } + } + }) + + t.Run("Search_WithNilName", func(t *testing.T) { + validPool := createAzureBackendAddressPoolMinimal("valid-pool", loadBalancerName, subscriptionID, resourceGroup) + + mockClient := mocks.NewMockLoadBalancerBackendAddressPoolsClient(ctrl) + mockPager := &mockBackendAddressPoolPager{ + pages: []armnetwork.LoadBalancerBackendAddressPoolsClientListResponse{ + { + LoadBalancerBackendAddressPoolListResult: armnetwork.LoadBalancerBackendAddressPoolListResult{ + Value: []*armnetwork.BackendAddressPool{ + {Name: nil, ID: new("/some/id")}, + validPool, + }, + }, + }, + }, + } + + testClient := &testBackendAddressPoolClient{ + MockLoadBalancerBackendAddressPoolsClient: mockClient, + pager: mockPager, + } + + wrapper := manual.NewNetworkLoadBalancerBackendAddressPool(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable := adapter.(discovery.SearchableAdapter) + sdpItems, err := searchable.Search(ctx, wrapper.Scopes()[0], loadBalancerName, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 1 { + t.Fatalf("Expected 1 item (nil name skipped), got: %d", len(sdpItems)) + } + expectedValue := shared.CompositeLookupKey(loadBalancerName, "valid-pool") + if sdpItems[0].UniqueAttributeValue() != expectedValue { + t.Errorf("Expected unique value %s, got %s", expectedValue, sdpItems[0].UniqueAttributeValue()) + } + }) + + t.Run("Search_InvalidQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockLoadBalancerBackendAddressPoolsClient(ctrl) + testClient := &testBackendAddressPoolClient{MockLoadBalancerBackendAddressPoolsClient: mockClient} + + wrapper := manual.NewNetworkLoadBalancerBackendAddressPool(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + _, qErr := wrapper.Search(ctx, wrapper.Scopes()[0]) + if qErr == nil { + t.Error("Expected error when providing no query parts, but got nil") + } + }) + + t.Run("Search_WithEmptyLoadBalancerName", func(t *testing.T) { + mockClient := mocks.NewMockLoadBalancerBackendAddressPoolsClient(ctrl) + testClient := &testBackendAddressPoolClient{MockLoadBalancerBackendAddressPoolsClient: mockClient} + + wrapper := manual.NewNetworkLoadBalancerBackendAddressPool(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + _, qErr := wrapper.Search(ctx, wrapper.Scopes()[0], "") + if qErr == nil { + t.Error("Expected error when loadBalancerName is empty, but got nil") + } + }) + + t.Run("ErrorHandling_Get", func(t *testing.T) { + expectedErr := errors.New("backend pool not found") + + mockClient := mocks.NewMockLoadBalancerBackendAddressPoolsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, loadBalancerName, "nonexistent-pool").Return( + armnetwork.LoadBalancerBackendAddressPoolsClientGetResponse{}, expectedErr) + + testClient := &testBackendAddressPoolClient{MockLoadBalancerBackendAddressPoolsClient: mockClient} + wrapper := manual.NewNetworkLoadBalancerBackendAddressPool(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(loadBalancerName, "nonexistent-pool") + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when getting non-existent backend pool, but got nil") + } + }) + + t.Run("ErrorHandling_Search", func(t *testing.T) { + mockClient := mocks.NewMockLoadBalancerBackendAddressPoolsClient(ctrl) + testClient := &testBackendAddressPoolClient{ + MockLoadBalancerBackendAddressPoolsClient: mockClient, + pager: &errorBackendAddressPoolPager{}, + } + + wrapper := manual.NewNetworkLoadBalancerBackendAddressPool(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable := adapter.(discovery.SearchableAdapter) + _, err := searchable.Search(ctx, wrapper.Scopes()[0], loadBalancerName, true) + if err == nil { + t.Error("Expected error from pager when NextPage returns an error, but got nil") + } + }) + + t.Run("Get_CrossResourceGroupLinks", func(t *testing.T) { + backendPool := createAzureBackendAddressPoolCrossRG(backendPoolName, loadBalancerName, "other-sub", "other-rg") + + mockClient := mocks.NewMockLoadBalancerBackendAddressPoolsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, loadBalancerName, backendPoolName).Return( + armnetwork.LoadBalancerBackendAddressPoolsClientGetResponse{ + BackendAddressPool: *backendPool, + }, nil) + + testClient := &testBackendAddressPoolClient{MockLoadBalancerBackendAddressPoolsClient: mockClient} + wrapper := manual.NewNetworkLoadBalancerBackendAddressPool(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(loadBalancerName, backendPoolName) + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + found := false + for _, linkedQuery := range sdpItem.GetLinkedItemQueries() { + if linkedQuery.GetQuery().GetType() == azureshared.NetworkVirtualNetwork.String() { + found = true + expectedScope := "other-sub.other-rg" + if linkedQuery.GetQuery().GetScope() != expectedScope { + t.Errorf("Expected VirtualNetwork scope to be %s, got: %s", expectedScope, linkedQuery.GetQuery().GetScope()) + } + break + } + } + if !found { + t.Error("Expected to find VirtualNetwork linked query") + } + }) + + t.Run("Get_NoProperties", func(t *testing.T) { + backendPool := &armnetwork.BackendAddressPool{ + ID: new(fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/backendAddressPools/%s", subscriptionID, resourceGroup, loadBalancerName, backendPoolName)), + Name: new(backendPoolName), + Type: new("Microsoft.Network/loadBalancers/backendAddressPools"), + } + + mockClient := mocks.NewMockLoadBalancerBackendAddressPoolsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, loadBalancerName, backendPoolName).Return( + armnetwork.LoadBalancerBackendAddressPoolsClientGetResponse{ + BackendAddressPool: *backendPool, + }, nil) + + testClient := &testBackendAddressPoolClient{MockLoadBalancerBackendAddressPoolsClient: mockClient} + wrapper := manual.NewNetworkLoadBalancerBackendAddressPool(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(loadBalancerName, backendPoolName) + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + // Should only have the parent load balancer link + linkedQueries := sdpItem.GetLinkedItemQueries() + if len(linkedQueries) != 1 { + t.Errorf("Expected 1 linked query (parent LB only), got %d", len(linkedQueries)) + } + if linkedQueries[0].GetQuery().GetType() != azureshared.NetworkLoadBalancer.String() { + t.Errorf("Expected parent LB link, got type %s", linkedQueries[0].GetQuery().GetType()) + } + }) +} + +func createAzureBackendAddressPool(name, lbName, subscriptionID, resourceGroup string) *armnetwork.BackendAddressPool { + provisioningState := armnetwork.ProvisioningStateSucceeded + vnetID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/test-vnet", subscriptionID, resourceGroup) + natRuleID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/inboundNatRules/nat-rule-1", subscriptionID, resourceGroup, lbName) + lbRuleID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/loadBalancingRules/lb-rule-1", subscriptionID, resourceGroup, lbName) + outboundRuleID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/outboundRules/outbound-rule-1", subscriptionID, resourceGroup, lbName) + nicIPConfigID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/networkInterfaces/test-nic/ipConfigurations/test-ip-config", subscriptionID, resourceGroup) + addrVnetID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/addr-vnet", subscriptionID, resourceGroup) + addrSubnetID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/addr-vnet/subnets/addr-subnet", subscriptionID, resourceGroup) + frontendIPConfigID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/regional-lb/frontendIPConfigurations/frontend-1", subscriptionID, resourceGroup) + + return &armnetwork.BackendAddressPool{ + ID: new(fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/backendAddressPools/%s", subscriptionID, resourceGroup, lbName, name)), + Name: new(name), + Type: new("Microsoft.Network/loadBalancers/backendAddressPools"), + Properties: &armnetwork.BackendAddressPoolPropertiesFormat{ + ProvisioningState: &provisioningState, + VirtualNetwork: &armnetwork.SubResource{ + ID: new(vnetID), + }, + InboundNatRules: []*armnetwork.SubResource{ + {ID: new(natRuleID)}, + }, + LoadBalancingRules: []*armnetwork.SubResource{ + {ID: new(lbRuleID)}, + }, + OutboundRules: []*armnetwork.SubResource{ + {ID: new(outboundRuleID)}, + }, + BackendIPConfigurations: []*armnetwork.InterfaceIPConfiguration{ + {ID: new(nicIPConfigID)}, + }, + LoadBalancerBackendAddresses: []*armnetwork.LoadBalancerBackendAddress{ + { + Name: new("backend-addr-1"), + Properties: &armnetwork.LoadBalancerBackendAddressPropertiesFormat{ + IPAddress: new("10.0.0.10"), + VirtualNetwork: &armnetwork.SubResource{ + ID: new(addrVnetID), + }, + Subnet: &armnetwork.SubResource{ + ID: new(addrSubnetID), + }, + LoadBalancerFrontendIPConfiguration: &armnetwork.SubResource{ + ID: new(frontendIPConfigID), + }, + }, + }, + }, + }, + } +} + +func createAzureBackendAddressPoolMinimal(name, lbName, subscriptionID, resourceGroup string) *armnetwork.BackendAddressPool { + provisioningState := armnetwork.ProvisioningStateSucceeded + return &armnetwork.BackendAddressPool{ + ID: new(fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/backendAddressPools/%s", subscriptionID, resourceGroup, lbName, name)), + Name: new(name), + Type: new("Microsoft.Network/loadBalancers/backendAddressPools"), + Properties: &armnetwork.BackendAddressPoolPropertiesFormat{ + ProvisioningState: &provisioningState, + }, + } +} + +func createAzureBackendAddressPoolCrossRG(name, lbName, otherSub, otherRG string) *armnetwork.BackendAddressPool { + provisioningState := armnetwork.ProvisioningStateSucceeded + vnetID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/cross-rg-vnet", otherSub, otherRG) + + return &armnetwork.BackendAddressPool{ + ID: new(fmt.Sprintf("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Network/loadBalancers/%s/backendAddressPools/%s", lbName, name)), + Name: new(name), + Type: new("Microsoft.Network/loadBalancers/backendAddressPools"), + Properties: &armnetwork.BackendAddressPoolPropertiesFormat{ + ProvisioningState: &provisioningState, + VirtualNetwork: &armnetwork.SubResource{ + ID: new(vnetID), + }, + }, + } +} diff --git a/sources/azure/manual/network-load-balancer-probe.go b/sources/azure/manual/network-load-balancer-probe.go new file mode 100644 index 00000000..bfabe1aa --- /dev/null +++ b/sources/azure/manual/network-load-balancer-probe.go @@ -0,0 +1,263 @@ +package manual + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" +) + +var NetworkLoadBalancerProbeLookupByUniqueAttr = shared.NewItemTypeLookup("uniqueAttr", azureshared.NetworkLoadBalancerProbe) + +type networkLoadBalancerProbeWrapper struct { + client clients.LoadBalancerProbesClient + + *azureshared.MultiResourceGroupBase +} + +func NewNetworkLoadBalancerProbe(client clients.LoadBalancerProbesClient, resourceGroupScopes []azureshared.ResourceGroupScope) sources.SearchableWrapper { + return &networkLoadBalancerProbeWrapper{ + client: client, + MultiResourceGroupBase: azureshared.NewMultiResourceGroupBase( + resourceGroupScopes, + sdp.AdapterCategory_ADAPTER_CATEGORY_NETWORK, + azureshared.NetworkLoadBalancerProbe, + ), + } +} + +func (c networkLoadBalancerProbeWrapper) Get(ctx context.Context, scope string, queryParts ...string) (*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 2 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Get requires 2 query parts: loadBalancerName and probeName", + Scope: scope, + ItemType: c.Type(), + } + } + loadBalancerName := queryParts[0] + probeName := queryParts[1] + + if loadBalancerName == "" { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "loadBalancerName cannot be empty", + Scope: scope, + ItemType: c.Type(), + } + } + if probeName == "" { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "probeName cannot be empty", + Scope: scope, + ItemType: c.Type(), + } + } + + rgScope, err := c.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + resp, err := c.client.Get(ctx, rgScope.ResourceGroup, loadBalancerName, probeName) + if err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + + return c.azureProbeToSDPItem(&resp.Probe, loadBalancerName, scope) +} + +func (c networkLoadBalancerProbeWrapper) Search(ctx context.Context, scope string, queryParts ...string) ([]*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 1 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Search requires 1 query part: loadBalancerName", + Scope: scope, + ItemType: c.Type(), + } + } + loadBalancerName := queryParts[0] + + if loadBalancerName == "" { + return nil, azureshared.QueryError(errors.New("loadBalancerName cannot be empty"), scope, c.Type()) + } + + rgScope, err := c.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + pager := c.client.NewListPager(rgScope.ResourceGroup, loadBalancerName) + + var items []*sdp.Item + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + + for _, probe := range page.Value { + if probe == nil || probe.Name == nil { + continue + } + item, sdpErr := c.azureProbeToSDPItem(probe, loadBalancerName, scope) + if sdpErr != nil { + return nil, sdpErr + } + items = append(items, item) + } + } + + return items, nil +} + +func (c networkLoadBalancerProbeWrapper) SearchStream(ctx context.Context, stream discovery.QueryResultStream, cache sdpcache.Cache, cacheKey sdpcache.CacheKey, scope string, queryParts ...string) { + if len(queryParts) < 1 { + stream.SendError(azureshared.QueryError(errors.New("Search requires 1 query part: loadBalancerName"), scope, c.Type())) + return + } + loadBalancerName := queryParts[0] + + if loadBalancerName == "" { + stream.SendError(azureshared.QueryError(errors.New("loadBalancerName cannot be empty"), scope, c.Type())) + return + } + + rgScope, err := c.ResourceGroupScopeFromScope(scope) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, c.Type())) + return + } + pager := c.client.NewListPager(rgScope.ResourceGroup, loadBalancerName) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, c.Type())) + return + } + for _, probe := range page.Value { + if probe == nil || probe.Name == nil { + continue + } + item, sdpErr := c.azureProbeToSDPItem(probe, loadBalancerName, scope) + if sdpErr != nil { + stream.SendError(sdpErr) + continue + } + cache.StoreItem(ctx, item, shared.DefaultCacheDuration, cacheKey) + stream.SendItem(item) + } + } +} + +func (c networkLoadBalancerProbeWrapper) GetLookups() sources.ItemTypeLookups { + return sources.ItemTypeLookups{ + NetworkLoadBalancerLookupByName, + NetworkLoadBalancerProbeLookupByUniqueAttr, + } +} + +func (c networkLoadBalancerProbeWrapper) SearchLookups() []sources.ItemTypeLookups { + return []sources.ItemTypeLookups{ + { + NetworkLoadBalancerLookupByName, + }, + } +} + +func (c networkLoadBalancerProbeWrapper) azureProbeToSDPItem(probe *armnetwork.Probe, loadBalancerName string, scope string) (*sdp.Item, *sdp.QueryError) { + if probe.Name == nil { + return nil, azureshared.QueryError(errors.New("probe name is nil"), scope, c.Type()) + } + + probeName := *probe.Name + + attributes, err := shared.ToAttributesWithExclude(probe, "tags") + if err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + + err = attributes.Set("uniqueAttr", shared.CompositeLookupKey(loadBalancerName, probeName)) + if err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + + sdpItem := &sdp.Item{ + Type: azureshared.NetworkLoadBalancerProbe.String(), + UniqueAttribute: "uniqueAttr", + Attributes: attributes, + Scope: scope, + } + + if probe.Properties != nil && probe.Properties.ProvisioningState != nil { + switch *probe.Properties.ProvisioningState { + case armnetwork.ProvisioningStateSucceeded: + sdpItem.Health = sdp.Health_HEALTH_OK.Enum() + case armnetwork.ProvisioningStateCreating, armnetwork.ProvisioningStateUpdating, armnetwork.ProvisioningStateDeleting: + sdpItem.Health = sdp.Health_HEALTH_PENDING.Enum() + case armnetwork.ProvisioningStateFailed, armnetwork.ProvisioningStateCanceled: + sdpItem.Health = sdp.Health_HEALTH_ERROR.Enum() + default: + sdpItem.Health = sdp.Health_HEALTH_UNKNOWN.Enum() + } + } + + // Link to parent Load Balancer + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkLoadBalancer.String(), + Method: sdp.QueryMethod_GET, + Query: loadBalancerName, + Scope: scope, + }, + }) + + if probe.Properties != nil { + // Link to Load Balancing Rules that reference this probe + for _, lbRule := range probe.Properties.LoadBalancingRules { + if lbRule != nil && lbRule.ID != nil { + params := azureshared.ExtractPathParamsFromResourceID(*lbRule.ID, []string{"loadBalancers", "loadBalancingRules"}) + if len(params) >= 2 { + linkedScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(*lbRule.ID); extractedScope != "" { + linkedScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkLoadBalancerLoadBalancingRule.String(), + Method: sdp.QueryMethod_GET, + Query: shared.CompositeLookupKey(params[0], params[1]), + Scope: linkedScope, + }, + }) + } + } + } + } + + return sdpItem, nil +} + +func (c networkLoadBalancerProbeWrapper) PotentialLinks() map[shared.ItemType]bool { + return map[shared.ItemType]bool{ + azureshared.NetworkLoadBalancer: true, + azureshared.NetworkLoadBalancerLoadBalancingRule: true, + } +} + +// ref: https://learn.microsoft.com/en-us/azure/role-based-access-control/permissions/networking +func (c networkLoadBalancerProbeWrapper) IAMPermissions() []string { + return []string{ + "Microsoft.Network/loadBalancers/probes/read", + } +} + +func (c networkLoadBalancerProbeWrapper) PredefinedRole() string { + return "Reader" +} diff --git a/sources/azure/manual/network-load-balancer-probe_test.go b/sources/azure/manual/network-load-balancer-probe_test.go new file mode 100644 index 00000000..4d423376 --- /dev/null +++ b/sources/azure/manual/network-load-balancer-probe_test.go @@ -0,0 +1,393 @@ +package manual_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + "go.uber.org/mock/gomock" + + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + "github.com/overmindtech/cli/sources/azure/manual" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/azure/shared/mocks" + "github.com/overmindtech/cli/sources/shared" +) + +type mockLoadBalancerProbePager struct { + pages []armnetwork.LoadBalancerProbesClientListResponse + index int +} + +func (m *mockLoadBalancerProbePager) More() bool { + return m.index < len(m.pages) +} + +func (m *mockLoadBalancerProbePager) NextPage(ctx context.Context) (armnetwork.LoadBalancerProbesClientListResponse, error) { + if m.index >= len(m.pages) { + return armnetwork.LoadBalancerProbesClientListResponse{}, errors.New("no more pages") + } + page := m.pages[m.index] + m.index++ + return page, nil +} + +type errorLoadBalancerProbePager struct{} + +func (e *errorLoadBalancerProbePager) More() bool { + return true +} + +func (e *errorLoadBalancerProbePager) NextPage(ctx context.Context) (armnetwork.LoadBalancerProbesClientListResponse, error) { + return armnetwork.LoadBalancerProbesClientListResponse{}, errors.New("pager error") +} + +type testLoadBalancerProbeClient struct { + *mocks.MockLoadBalancerProbesClient + pager clients.LoadBalancerProbesPager +} + +func (t *testLoadBalancerProbeClient) NewListPager(resourceGroupName, loadBalancerName string) clients.LoadBalancerProbesPager { + return t.pager +} + +func TestNetworkLoadBalancerProbe(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + subscriptionID := "test-subscription" + resourceGroup := "test-rg" + loadBalancerName := "test-lb" + probeName := "test-probe" + + t.Run("Get", func(t *testing.T) { + probe := createAzureLoadBalancerProbe(probeName, loadBalancerName, subscriptionID, resourceGroup) + + mockClient := mocks.NewMockLoadBalancerProbesClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, loadBalancerName, probeName).Return( + armnetwork.LoadBalancerProbesClientGetResponse{ + Probe: *probe, + }, nil) + + testClient := &testLoadBalancerProbeClient{MockLoadBalancerProbesClient: mockClient} + wrapper := manual.NewNetworkLoadBalancerProbe(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(loadBalancerName, probeName) + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.NetworkLoadBalancerProbe.String() { + t.Errorf("Expected type %s, got %s", azureshared.NetworkLoadBalancerProbe, sdpItem.GetType()) + } + + if sdpItem.GetUniqueAttribute() != "uniqueAttr" { + t.Errorf("Expected unique attribute 'uniqueAttr', got %s", sdpItem.GetUniqueAttribute()) + } + + expectedUniqueValue := shared.CompositeLookupKey(loadBalancerName, probeName) + if sdpItem.UniqueAttributeValue() != expectedUniqueValue { + t.Errorf("Expected unique attribute value %s, got %s", expectedUniqueValue, sdpItem.UniqueAttributeValue()) + } + + if sdpItem.GetScope() != subscriptionID+"."+resourceGroup { + t.Errorf("Expected scope %s, got %s", subscriptionID+"."+resourceGroup, sdpItem.GetScope()) + } + + if err := sdpItem.Validate(); err != nil { + t.Fatalf("Expected no validation error, got: %v", err) + } + + if sdpItem.GetHealth() != sdp.Health_HEALTH_OK { + t.Errorf("Expected health OK, got %s", sdpItem.GetHealth()) + } + + t.Run("StaticTests", func(t *testing.T) { + queryTests := shared.QueryTests{ + { + ExpectedType: azureshared.NetworkLoadBalancer.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: loadBalancerName, + ExpectedScope: fmt.Sprintf("%s.%s", subscriptionID, resourceGroup), + }, + { + ExpectedType: azureshared.NetworkLoadBalancerLoadBalancingRule.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: shared.CompositeLookupKey(loadBalancerName, "lb-rule-1"), + ExpectedScope: fmt.Sprintf("%s.%s", subscriptionID, resourceGroup), + }, + } + shared.RunStaticTests(t, adapter, sdpItem, queryTests) + }) + }) + + t.Run("Get_WithInsufficientQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockLoadBalancerProbesClient(ctrl) + testClient := &testLoadBalancerProbeClient{MockLoadBalancerProbesClient: mockClient} + + wrapper := manual.NewNetworkLoadBalancerProbe(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], loadBalancerName, true) + if qErr == nil { + t.Error("Expected error when providing insufficient query parts, but got nil") + } + }) + + t.Run("Get_WithEmptyLoadBalancerName", func(t *testing.T) { + mockClient := mocks.NewMockLoadBalancerProbesClient(ctrl) + testClient := &testLoadBalancerProbeClient{MockLoadBalancerProbesClient: mockClient} + + wrapper := manual.NewNetworkLoadBalancerProbe(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey("", probeName) + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when loadBalancerName is empty, but got nil") + } + }) + + t.Run("Get_WithEmptyProbeName", func(t *testing.T) { + mockClient := mocks.NewMockLoadBalancerProbesClient(ctrl) + testClient := &testLoadBalancerProbeClient{MockLoadBalancerProbesClient: mockClient} + + wrapper := manual.NewNetworkLoadBalancerProbe(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(loadBalancerName, "") + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when probeName is empty, but got nil") + } + }) + + t.Run("Search", func(t *testing.T) { + probe1 := createAzureLoadBalancerProbeMinimal("probe-1", loadBalancerName, subscriptionID, resourceGroup) + probe2 := createAzureLoadBalancerProbeMinimal("probe-2", loadBalancerName, subscriptionID, resourceGroup) + + mockClient := mocks.NewMockLoadBalancerProbesClient(ctrl) + mockPager := &mockLoadBalancerProbePager{ + pages: []armnetwork.LoadBalancerProbesClientListResponse{ + { + LoadBalancerProbeListResult: armnetwork.LoadBalancerProbeListResult{ + Value: []*armnetwork.Probe{probe1, probe2}, + }, + }, + }, + } + + testClient := &testLoadBalancerProbeClient{ + MockLoadBalancerProbesClient: mockClient, + pager: mockPager, + } + + wrapper := manual.NewNetworkLoadBalancerProbe(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + sdpItems, err := searchable.Search(ctx, wrapper.Scopes()[0], loadBalancerName, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 2 { + t.Fatalf("Expected 2 items, got: %d", len(sdpItems)) + } + + for _, item := range sdpItems { + if err := item.Validate(); err != nil { + t.Fatalf("Expected no validation error, got: %v", err) + } + if item.GetType() != azureshared.NetworkLoadBalancerProbe.String() { + t.Errorf("Expected type %s, got %s", azureshared.NetworkLoadBalancerProbe, item.GetType()) + } + } + }) + + t.Run("Search_WithNilName", func(t *testing.T) { + validProbe := createAzureLoadBalancerProbeMinimal("valid-probe", loadBalancerName, subscriptionID, resourceGroup) + + mockClient := mocks.NewMockLoadBalancerProbesClient(ctrl) + mockPager := &mockLoadBalancerProbePager{ + pages: []armnetwork.LoadBalancerProbesClientListResponse{ + { + LoadBalancerProbeListResult: armnetwork.LoadBalancerProbeListResult{ + Value: []*armnetwork.Probe{ + {Name: nil, ID: new("/some/id")}, + validProbe, + }, + }, + }, + }, + } + + testClient := &testLoadBalancerProbeClient{ + MockLoadBalancerProbesClient: mockClient, + pager: mockPager, + } + + wrapper := manual.NewNetworkLoadBalancerProbe(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable := adapter.(discovery.SearchableAdapter) + sdpItems, err := searchable.Search(ctx, wrapper.Scopes()[0], loadBalancerName, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 1 { + t.Fatalf("Expected 1 item (nil name skipped), got: %d", len(sdpItems)) + } + expectedValue := shared.CompositeLookupKey(loadBalancerName, "valid-probe") + if sdpItems[0].UniqueAttributeValue() != expectedValue { + t.Errorf("Expected unique value %s, got %s", expectedValue, sdpItems[0].UniqueAttributeValue()) + } + }) + + t.Run("Search_InvalidQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockLoadBalancerProbesClient(ctrl) + testClient := &testLoadBalancerProbeClient{MockLoadBalancerProbesClient: mockClient} + + wrapper := manual.NewNetworkLoadBalancerProbe(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + _, qErr := wrapper.Search(ctx, wrapper.Scopes()[0]) + if qErr == nil { + t.Error("Expected error when providing no query parts, but got nil") + } + }) + + t.Run("Search_WithEmptyLoadBalancerName", func(t *testing.T) { + mockClient := mocks.NewMockLoadBalancerProbesClient(ctrl) + testClient := &testLoadBalancerProbeClient{MockLoadBalancerProbesClient: mockClient} + + wrapper := manual.NewNetworkLoadBalancerProbe(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + _, qErr := wrapper.Search(ctx, wrapper.Scopes()[0], "") + if qErr == nil { + t.Error("Expected error when loadBalancerName is empty, but got nil") + } + }) + + t.Run("ErrorHandling_Get", func(t *testing.T) { + expectedErr := errors.New("probe not found") + + mockClient := mocks.NewMockLoadBalancerProbesClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, loadBalancerName, "nonexistent-probe").Return( + armnetwork.LoadBalancerProbesClientGetResponse{}, expectedErr) + + testClient := &testLoadBalancerProbeClient{MockLoadBalancerProbesClient: mockClient} + wrapper := manual.NewNetworkLoadBalancerProbe(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(loadBalancerName, "nonexistent-probe") + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when getting non-existent probe, but got nil") + } + }) + + t.Run("ErrorHandling_Search", func(t *testing.T) { + mockClient := mocks.NewMockLoadBalancerProbesClient(ctrl) + testClient := &testLoadBalancerProbeClient{ + MockLoadBalancerProbesClient: mockClient, + pager: &errorLoadBalancerProbePager{}, + } + + wrapper := manual.NewNetworkLoadBalancerProbe(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable := adapter.(discovery.SearchableAdapter) + _, err := searchable.Search(ctx, wrapper.Scopes()[0], loadBalancerName, true) + if err == nil { + t.Error("Expected error from pager when NextPage returns an error, but got nil") + } + }) + + t.Run("Get_NoProperties", func(t *testing.T) { + probe := &armnetwork.Probe{ + ID: new(fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/probes/%s", subscriptionID, resourceGroup, loadBalancerName, probeName)), + Name: new(probeName), + Type: new("Microsoft.Network/loadBalancers/probes"), + } + + mockClient := mocks.NewMockLoadBalancerProbesClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, loadBalancerName, probeName).Return( + armnetwork.LoadBalancerProbesClientGetResponse{ + Probe: *probe, + }, nil) + + testClient := &testLoadBalancerProbeClient{MockLoadBalancerProbesClient: mockClient} + wrapper := manual.NewNetworkLoadBalancerProbe(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(loadBalancerName, probeName) + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + linkedQueries := sdpItem.GetLinkedItemQueries() + if len(linkedQueries) != 1 { + t.Errorf("Expected 1 linked query (parent LB only), got %d", len(linkedQueries)) + } + if linkedQueries[0].GetQuery().GetType() != azureshared.NetworkLoadBalancer.String() { + t.Errorf("Expected parent LB link, got type %s", linkedQueries[0].GetQuery().GetType()) + } + }) +} + +func createAzureLoadBalancerProbe(name, lbName, subscriptionID, resourceGroup string) *armnetwork.Probe { + provisioningState := armnetwork.ProvisioningStateSucceeded + port := int32(80) + protocol := armnetwork.ProbeProtocolHTTP + intervalInSeconds := int32(15) + numberOfProbes := int32(2) + lbRuleID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/loadBalancingRules/lb-rule-1", subscriptionID, resourceGroup, lbName) + + return &armnetwork.Probe{ + ID: new(fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/probes/%s", subscriptionID, resourceGroup, lbName, name)), + Name: new(name), + Type: new("Microsoft.Network/loadBalancers/probes"), + Properties: &armnetwork.ProbePropertiesFormat{ + ProvisioningState: &provisioningState, + Port: &port, + Protocol: &protocol, + IntervalInSeconds: &intervalInSeconds, + NumberOfProbes: &numberOfProbes, + RequestPath: new("/health"), + LoadBalancingRules: []*armnetwork.SubResource{ + {ID: new(lbRuleID)}, + }, + }, + } +} + +func createAzureLoadBalancerProbeMinimal(name, lbName, subscriptionID, resourceGroup string) *armnetwork.Probe { + provisioningState := armnetwork.ProvisioningStateSucceeded + port := int32(80) + protocol := armnetwork.ProbeProtocolTCP + return &armnetwork.Probe{ + ID: new(fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/probes/%s", subscriptionID, resourceGroup, lbName, name)), + Name: new(name), + Type: new("Microsoft.Network/loadBalancers/probes"), + Properties: &armnetwork.ProbePropertiesFormat{ + ProvisioningState: &provisioningState, + Port: &port, + Protocol: &protocol, + }, + } +} diff --git a/sources/azure/manual/sql-database-schema.go b/sources/azure/manual/sql-database-schema.go new file mode 100644 index 00000000..bfa3dc93 --- /dev/null +++ b/sources/azure/manual/sql-database-schema.go @@ -0,0 +1,243 @@ +package manual + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/sql/armsql/v2" + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" +) + +var SQLDatabaseSchemaLookupByName = shared.NewItemTypeLookup("name", azureshared.SQLDatabaseSchema) + +type sqlDatabaseSchemaWrapper struct { + client clients.SqlDatabaseSchemasClient + + *azureshared.MultiResourceGroupBase +} + +func NewSqlDatabaseSchema(client clients.SqlDatabaseSchemasClient, resourceGroupScopes []azureshared.ResourceGroupScope) sources.SearchableWrapper { + return &sqlDatabaseSchemaWrapper{ + client: client, + MultiResourceGroupBase: azureshared.NewMultiResourceGroupBase( + resourceGroupScopes, + sdp.AdapterCategory_ADAPTER_CATEGORY_DATABASE, + azureshared.SQLDatabaseSchema, + ), + } +} + +// Get retrieves a specific database schema by serverName, databaseName, and schemaName +// ref: https://learn.microsoft.com/en-us/rest/api/sql/database-schemas/get +func (s sqlDatabaseSchemaWrapper) Get(ctx context.Context, scope string, queryParts ...string) (*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 3 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Get requires 3 query parts: serverName, databaseName, and schemaName", + Scope: scope, + ItemType: s.Type(), + } + } + serverName := queryParts[0] + databaseName := queryParts[1] + schemaName := queryParts[2] + + if serverName == "" { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "serverName cannot be empty", + Scope: scope, + ItemType: s.Type(), + } + } + if databaseName == "" { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "databaseName cannot be empty", + Scope: scope, + ItemType: s.Type(), + } + } + if schemaName == "" { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "schemaName cannot be empty", + Scope: scope, + ItemType: s.Type(), + } + } + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + resp, err := s.client.Get(ctx, rgScope.ResourceGroup, serverName, databaseName, schemaName) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + return s.azureDatabaseSchemaToSDPItem(&resp.DatabaseSchema, serverName, databaseName, schemaName, scope) +} + +func (s sqlDatabaseSchemaWrapper) azureDatabaseSchemaToSDPItem(schema *armsql.DatabaseSchema, serverName, databaseName, schemaName, scope string) (*sdp.Item, *sdp.QueryError) { + attributes, err := shared.ToAttributesWithExclude(schema) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + err = attributes.Set("uniqueAttr", shared.CompositeLookupKey(serverName, databaseName, schemaName)) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + sdpItem := &sdp.Item{ + Type: azureshared.SQLDatabaseSchema.String(), + UniqueAttribute: "uniqueAttr", + Attributes: attributes, + Scope: scope, + } + + // Link to parent SQL Database + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.SQLDatabase.String(), + Method: sdp.QueryMethod_GET, + Query: shared.CompositeLookupKey(serverName, databaseName), + Scope: scope, + }, + }) + + return sdpItem, nil +} + +func (s sqlDatabaseSchemaWrapper) GetLookups() sources.ItemTypeLookups { + return sources.ItemTypeLookups{ + SQLServerLookupByName, + SQLDatabaseLookupByName, + SQLDatabaseSchemaLookupByName, + } +} + +// Search lists all database schemas for a given serverName and databaseName +// ref: https://learn.microsoft.com/en-us/rest/api/sql/database-schemas/list-by-database +func (s sqlDatabaseSchemaWrapper) Search(ctx context.Context, scope string, queryParts ...string) ([]*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 2 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Search requires 2 query parts: serverName and databaseName", + Scope: scope, + ItemType: s.Type(), + } + } + serverName := queryParts[0] + databaseName := queryParts[1] + + if serverName == "" { + return nil, azureshared.QueryError(errors.New("serverName cannot be empty"), scope, s.Type()) + } + if databaseName == "" { + return nil, azureshared.QueryError(errors.New("databaseName cannot be empty"), scope, s.Type()) + } + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + pager := s.client.ListByDatabase(ctx, rgScope.ResourceGroup, serverName, databaseName) + + var items []*sdp.Item + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + for _, schema := range page.Value { + if schema.Name == nil { + continue + } + item, sdpErr := s.azureDatabaseSchemaToSDPItem(schema, serverName, databaseName, *schema.Name, scope) + if sdpErr != nil { + return nil, sdpErr + } + items = append(items, item) + } + } + + return items, nil +} + +func (s sqlDatabaseSchemaWrapper) SearchStream(ctx context.Context, stream discovery.QueryResultStream, cache sdpcache.Cache, cacheKey sdpcache.CacheKey, scope string, queryParts ...string) { + if len(queryParts) < 2 { + stream.SendError(azureshared.QueryError(errors.New("Search requires 2 query parts: serverName and databaseName"), scope, s.Type())) + return + } + serverName := queryParts[0] + databaseName := queryParts[1] + + if serverName == "" { + stream.SendError(azureshared.QueryError(errors.New("serverName cannot be empty"), scope, s.Type())) + return + } + if databaseName == "" { + stream.SendError(azureshared.QueryError(errors.New("databaseName cannot be empty"), scope, s.Type())) + return + } + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, s.Type())) + return + } + pager := s.client.ListByDatabase(ctx, rgScope.ResourceGroup, serverName, databaseName) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, s.Type())) + return + } + for _, schema := range page.Value { + if schema.Name == nil { + continue + } + item, sdpErr := s.azureDatabaseSchemaToSDPItem(schema, serverName, databaseName, *schema.Name, scope) + if sdpErr != nil { + stream.SendError(sdpErr) + continue + } + cache.StoreItem(ctx, item, shared.DefaultCacheDuration, cacheKey) + stream.SendItem(item) + } + } +} + +func (s sqlDatabaseSchemaWrapper) SearchLookups() []sources.ItemTypeLookups { + return []sources.ItemTypeLookups{ + { + SQLServerLookupByName, + SQLDatabaseLookupByName, + }, + } +} + +func (s sqlDatabaseSchemaWrapper) PotentialLinks() map[shared.ItemType]bool { + return map[shared.ItemType]bool{ + azureshared.SQLDatabase: true, + } +} + +// ref: https://learn.microsoft.com/en-us/azure/role-based-access-control/resource-provider-operations#microsoftsql +func (s sqlDatabaseSchemaWrapper) IAMPermissions() []string { + return []string{ + "Microsoft.Sql/servers/databases/schemas/read", + } +} + +func (s sqlDatabaseSchemaWrapper) PredefinedRole() string { + return "Reader" +} diff --git a/sources/azure/manual/sql-database-schema_test.go b/sources/azure/manual/sql-database-schema_test.go new file mode 100644 index 00000000..da6fe7be --- /dev/null +++ b/sources/azure/manual/sql-database-schema_test.go @@ -0,0 +1,401 @@ +package manual_test + +import ( + "context" + "errors" + "slices" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/sql/armsql/v2" + "go.uber.org/mock/gomock" + + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + "github.com/overmindtech/cli/sources/azure/manual" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/azure/shared/mocks" + "github.com/overmindtech/cli/sources/shared" +) + +// mockSqlDatabaseSchemasPager is a simple mock implementation of SqlDatabaseSchemasPager +type mockSqlDatabaseSchemasPager struct { + pages []armsql.DatabaseSchemasClientListByDatabaseResponse + index int +} + +func (m *mockSqlDatabaseSchemasPager) More() bool { + return m.index < len(m.pages) +} + +func (m *mockSqlDatabaseSchemasPager) NextPage(ctx context.Context) (armsql.DatabaseSchemasClientListByDatabaseResponse, error) { + if m.index >= len(m.pages) { + return armsql.DatabaseSchemasClientListByDatabaseResponse{}, errors.New("no more pages") + } + page := m.pages[m.index] + m.index++ + return page, nil +} + +// errorSqlDatabaseSchemasPager is a mock pager that always returns an error +type errorSqlDatabaseSchemasPager struct{} + +func (e *errorSqlDatabaseSchemasPager) More() bool { + return true // Always return true so NextPage will be called +} + +func (e *errorSqlDatabaseSchemasPager) NextPage(ctx context.Context) (armsql.DatabaseSchemasClientListByDatabaseResponse, error) { + return armsql.DatabaseSchemasClientListByDatabaseResponse{}, errors.New("pager error") +} + +// testSqlDatabaseSchemasClient wraps the mock to implement the correct interface +type testSqlDatabaseSchemasClient struct { + *mocks.MockSqlDatabaseSchemasClient + pager clients.SqlDatabaseSchemasPager +} + +func (t *testSqlDatabaseSchemasClient) ListByDatabase(ctx context.Context, resourceGroupName, serverName, databaseName string) clients.SqlDatabaseSchemasPager { + return t.pager +} + +func TestSqlDatabaseSchema(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + subscriptionID := "test-subscription" + resourceGroup := "test-rg" + serverName := "test-server" + databaseName := "test-database" + schemaName := "dbo" + + t.Run("Get", func(t *testing.T) { + schema := createAzureDatabaseSchema(serverName, databaseName, schemaName) + + mockClient := mocks.NewMockSqlDatabaseSchemasClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, serverName, databaseName, schemaName).Return( + armsql.DatabaseSchemasClientGetResponse{ + DatabaseSchema: *schema, + }, nil) + + testClient := &testSqlDatabaseSchemasClient{MockSqlDatabaseSchemasClient: mockClient} + wrapper := manual.NewSqlDatabaseSchema(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + // Get requires serverName, databaseName, and schemaName as query parts + query := shared.CompositeLookupKey(serverName, databaseName, schemaName) + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.SQLDatabaseSchema.String() { + t.Errorf("Expected type %s, got %s", azureshared.SQLDatabaseSchema, sdpItem.GetType()) + } + + if sdpItem.GetUniqueAttribute() != "uniqueAttr" { + t.Errorf("Expected unique attribute 'uniqueAttr', got %s", sdpItem.GetUniqueAttribute()) + } + + expectedUniqueAttrValue := shared.CompositeLookupKey(serverName, databaseName, schemaName) + if sdpItem.UniqueAttributeValue() != expectedUniqueAttrValue { + t.Errorf("Expected unique attribute value %s, got %s", expectedUniqueAttrValue, sdpItem.UniqueAttributeValue()) + } + + if sdpItem.GetScope() != subscriptionID+"."+resourceGroup { + t.Errorf("Expected scope %s, got %s", subscriptionID+"."+resourceGroup, sdpItem.GetScope()) + } + + // Validate the item + if err := sdpItem.Validate(); err != nil { + t.Fatalf("Expected no validation error, got: %v", err) + } + + t.Run("StaticTests", func(t *testing.T) { + queryTests := shared.QueryTests{ + { + // SQLDatabase parent link + ExpectedType: azureshared.SQLDatabase.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: shared.CompositeLookupKey(serverName, databaseName), + ExpectedScope: subscriptionID + "." + resourceGroup, + }, + } + + shared.RunStaticTests(t, adapter, sdpItem, queryTests) + }) + }) + + t.Run("Get_InvalidQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockSqlDatabaseSchemasClient(ctrl) + testClient := &testSqlDatabaseSchemasClient{MockSqlDatabaseSchemasClient: mockClient} + + wrapper := manual.NewSqlDatabaseSchema(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + // Test with insufficient query parts (only server and database name) + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], shared.CompositeLookupKey(serverName, databaseName), true) + if qErr == nil { + t.Error("Expected error when providing insufficient query parts, but got nil") + } + }) + + t.Run("GetWithEmptyServerName", func(t *testing.T) { + mockClient := mocks.NewMockSqlDatabaseSchemasClient(ctrl) + testClient := &testSqlDatabaseSchemasClient{MockSqlDatabaseSchemasClient: mockClient} + + wrapper := manual.NewSqlDatabaseSchema(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + // Test with empty server name + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], shared.CompositeLookupKey("", databaseName, schemaName), true) + if qErr == nil { + t.Error("Expected error when providing empty server name, but got nil") + } + }) + + t.Run("GetWithEmptyDatabaseName", func(t *testing.T) { + mockClient := mocks.NewMockSqlDatabaseSchemasClient(ctrl) + testClient := &testSqlDatabaseSchemasClient{MockSqlDatabaseSchemasClient: mockClient} + + wrapper := manual.NewSqlDatabaseSchema(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + // Test with empty database name + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], shared.CompositeLookupKey(serverName, "", schemaName), true) + if qErr == nil { + t.Error("Expected error when providing empty database name, but got nil") + } + }) + + t.Run("GetWithEmptySchemaName", func(t *testing.T) { + mockClient := mocks.NewMockSqlDatabaseSchemasClient(ctrl) + testClient := &testSqlDatabaseSchemasClient{MockSqlDatabaseSchemasClient: mockClient} + + wrapper := manual.NewSqlDatabaseSchema(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + // Test with empty schema name + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], shared.CompositeLookupKey(serverName, databaseName, ""), true) + if qErr == nil { + t.Error("Expected error when providing empty schema name, but got nil") + } + }) + + t.Run("Search", func(t *testing.T) { + schema1 := createAzureDatabaseSchema(serverName, databaseName, "dbo") + schema2 := createAzureDatabaseSchema(serverName, databaseName, "sys") + + mockClient := mocks.NewMockSqlDatabaseSchemasClient(ctrl) + mockPager := &mockSqlDatabaseSchemasPager{ + pages: []armsql.DatabaseSchemasClientListByDatabaseResponse{ + { + DatabaseSchemaListResult: armsql.DatabaseSchemaListResult{ + Value: []*armsql.DatabaseSchema{schema1, schema2}, + }, + }, + }, + } + + testClient := &testSqlDatabaseSchemasClient{ + MockSqlDatabaseSchemasClient: mockClient, + pager: mockPager, + } + + wrapper := manual.NewSqlDatabaseSchema(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + sdpItems, err := searchable.Search(ctx, wrapper.Scopes()[0], shared.CompositeLookupKey(serverName, databaseName), true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 2 { + t.Fatalf("Expected 2 items, got: %d", len(sdpItems)) + } + + for _, item := range sdpItems { + if err := item.Validate(); err != nil { + t.Fatalf("Expected no validation error, got: %v", err) + } + + if item.GetType() != azureshared.SQLDatabaseSchema.String() { + t.Errorf("Expected type %s, got %s", azureshared.SQLDatabaseSchema, item.GetType()) + } + } + }) + + t.Run("Search_WithNilName", func(t *testing.T) { + schema1 := createAzureDatabaseSchema(serverName, databaseName, "dbo") + schema2 := &armsql.DatabaseSchema{ + Name: nil, // Schema with nil name should be skipped + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Sql/servers/test-server/databases/test-database/schemas/nil-schema"), + } + + mockClient := mocks.NewMockSqlDatabaseSchemasClient(ctrl) + mockPager := &mockSqlDatabaseSchemasPager{ + pages: []armsql.DatabaseSchemasClientListByDatabaseResponse{ + { + DatabaseSchemaListResult: armsql.DatabaseSchemaListResult{ + Value: []*armsql.DatabaseSchema{schema1, schema2}, + }, + }, + }, + } + + testClient := &testSqlDatabaseSchemasClient{ + MockSqlDatabaseSchemasClient: mockClient, + pager: mockPager, + } + + wrapper := manual.NewSqlDatabaseSchema(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + sdpItems, err := searchable.Search(ctx, wrapper.Scopes()[0], shared.CompositeLookupKey(serverName, databaseName), true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + // Should only return 1 item (schema with nil name is skipped) + if len(sdpItems) != 1 { + t.Fatalf("Expected 1 item (nil name filtered out), got: %d", len(sdpItems)) + } + + if sdpItems[0].UniqueAttributeValue() != shared.CompositeLookupKey(serverName, databaseName, "dbo") { + t.Fatalf("Expected schema name 'dbo', got: %s", sdpItems[0].UniqueAttributeValue()) + } + }) + + t.Run("Search_InvalidQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockSqlDatabaseSchemasClient(ctrl) + testClient := &testSqlDatabaseSchemasClient{MockSqlDatabaseSchemasClient: mockClient} + + wrapper := manual.NewSqlDatabaseSchema(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + // Test Search directly with insufficient query parts - should return error before calling ListByDatabase + _, qErr := wrapper.Search(ctx, wrapper.Scopes()[0], serverName) + if qErr == nil { + t.Error("Expected error when providing insufficient query parts, but got nil") + } + }) + + t.Run("SearchWithEmptyServerName", func(t *testing.T) { + mockClient := mocks.NewMockSqlDatabaseSchemasClient(ctrl) + testClient := &testSqlDatabaseSchemasClient{MockSqlDatabaseSchemasClient: mockClient} + + wrapper := manual.NewSqlDatabaseSchema(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + // Test Search with empty server name + _, qErr := wrapper.Search(ctx, wrapper.Scopes()[0], "", databaseName) + if qErr == nil { + t.Error("Expected error when providing empty server name, but got nil") + } + }) + + t.Run("SearchWithEmptyDatabaseName", func(t *testing.T) { + mockClient := mocks.NewMockSqlDatabaseSchemasClient(ctrl) + testClient := &testSqlDatabaseSchemasClient{MockSqlDatabaseSchemasClient: mockClient} + + wrapper := manual.NewSqlDatabaseSchema(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + // Test Search with empty database name + _, qErr := wrapper.Search(ctx, wrapper.Scopes()[0], serverName, "") + if qErr == nil { + t.Error("Expected error when providing empty database name, but got nil") + } + }) + + t.Run("ErrorHandling_Get", func(t *testing.T) { + expectedErr := errors.New("schema not found") + + mockClient := mocks.NewMockSqlDatabaseSchemasClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, serverName, databaseName, "nonexistent-schema").Return( + armsql.DatabaseSchemasClientGetResponse{}, expectedErr) + + testClient := &testSqlDatabaseSchemasClient{MockSqlDatabaseSchemasClient: mockClient} + wrapper := manual.NewSqlDatabaseSchema(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(serverName, databaseName, "nonexistent-schema") + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when getting non-existent schema, but got nil") + } + }) + + t.Run("ErrorHandling_Search", func(t *testing.T) { + mockClient := mocks.NewMockSqlDatabaseSchemasClient(ctrl) + // Create a pager that returns an error when NextPage is called + errorPager := &errorSqlDatabaseSchemasPager{} + + testClient := &testSqlDatabaseSchemasClient{ + MockSqlDatabaseSchemasClient: mockClient, + pager: errorPager, + } + + wrapper := manual.NewSqlDatabaseSchema(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + _, err := searchable.Search(ctx, wrapper.Scopes()[0], shared.CompositeLookupKey(serverName, databaseName), true) + if err == nil { + t.Error("Expected error from pager when NextPage returns an error, but got nil") + } + }) + + t.Run("InterfaceCompliance", func(t *testing.T) { + mockClient := mocks.NewMockSqlDatabaseSchemasClient(ctrl) + testClient := &testSqlDatabaseSchemasClient{MockSqlDatabaseSchemasClient: mockClient} + wrapper := manual.NewSqlDatabaseSchema(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + // Cast to sources.Wrapper to access interface methods + w := wrapper.(sources.Wrapper) + + // Verify IAMPermissions + permissions := w.IAMPermissions() + if len(permissions) == 0 { + t.Error("Expected IAMPermissions to return at least one permission") + } + expectedPermission := "Microsoft.Sql/servers/databases/schemas/read" + found := slices.Contains(permissions, expectedPermission) + if !found { + t.Errorf("Expected IAMPermissions to include %s", expectedPermission) + } + + // Verify PotentialLinks + potentialLinks := w.PotentialLinks() + if len(potentialLinks) == 0 { + t.Error("Expected PotentialLinks to return at least one link") + } + if !potentialLinks[azureshared.SQLDatabase] { + t.Error("Expected PotentialLinks to include SQLDatabase") + } + }) +} + +// createAzureDatabaseSchema creates a mock Azure database schema for testing +func createAzureDatabaseSchema(serverName, databaseName, schemaName string) *armsql.DatabaseSchema { + schemaID := "/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Sql/servers/" + serverName + "/databases/" + databaseName + "/schemas/" + schemaName + + return &armsql.DatabaseSchema{ + Name: new(schemaName), + ID: new(schemaID), + Type: new("Microsoft.Sql/servers/databases/schemas"), + } +} diff --git a/sources/azure/shared/mocks/mock_dbforpostgresql_configurations_client.go b/sources/azure/shared/mocks/mock_dbforpostgresql_configurations_client.go new file mode 100644 index 00000000..002fc78e --- /dev/null +++ b/sources/azure/shared/mocks/mock_dbforpostgresql_configurations_client.go @@ -0,0 +1,72 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: dbforpostgresql-configurations-client.go +// +// Generated by this command: +// +// mockgen -destination=../shared/mocks/mock_dbforpostgresql_configurations_client.go -package=mocks -source=dbforpostgresql-configurations-client.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + armpostgresqlflexibleservers "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/postgresql/armpostgresqlflexibleservers/v5" + clients "github.com/overmindtech/cli/sources/azure/clients" + gomock "go.uber.org/mock/gomock" +) + +// MockPostgreSQLConfigurationsClient is a mock of PostgreSQLConfigurationsClient interface. +type MockPostgreSQLConfigurationsClient struct { + ctrl *gomock.Controller + recorder *MockPostgreSQLConfigurationsClientMockRecorder + isgomock struct{} +} + +// MockPostgreSQLConfigurationsClientMockRecorder is the mock recorder for MockPostgreSQLConfigurationsClient. +type MockPostgreSQLConfigurationsClientMockRecorder struct { + mock *MockPostgreSQLConfigurationsClient +} + +// NewMockPostgreSQLConfigurationsClient creates a new mock instance. +func NewMockPostgreSQLConfigurationsClient(ctrl *gomock.Controller) *MockPostgreSQLConfigurationsClient { + mock := &MockPostgreSQLConfigurationsClient{ctrl: ctrl} + mock.recorder = &MockPostgreSQLConfigurationsClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockPostgreSQLConfigurationsClient) EXPECT() *MockPostgreSQLConfigurationsClientMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockPostgreSQLConfigurationsClient) Get(ctx context.Context, resourceGroupName, serverName, configurationName string, options *armpostgresqlflexibleservers.ConfigurationsClientGetOptions) (armpostgresqlflexibleservers.ConfigurationsClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, serverName, configurationName, options) + ret0, _ := ret[0].(armpostgresqlflexibleservers.ConfigurationsClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockPostgreSQLConfigurationsClientMockRecorder) Get(ctx, resourceGroupName, serverName, configurationName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockPostgreSQLConfigurationsClient)(nil).Get), ctx, resourceGroupName, serverName, configurationName, options) +} + +// NewListByServerPager mocks base method. +func (m *MockPostgreSQLConfigurationsClient) NewListByServerPager(resourceGroupName, serverName string, options *armpostgresqlflexibleservers.ConfigurationsClientListByServerOptions) clients.PostgreSQLConfigurationsPager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewListByServerPager", resourceGroupName, serverName, options) + ret0, _ := ret[0].(clients.PostgreSQLConfigurationsPager) + return ret0 +} + +// NewListByServerPager indicates an expected call of NewListByServerPager. +func (mr *MockPostgreSQLConfigurationsClientMockRecorder) NewListByServerPager(resourceGroupName, serverName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewListByServerPager", reflect.TypeOf((*MockPostgreSQLConfigurationsClient)(nil).NewListByServerPager), resourceGroupName, serverName, options) +} diff --git a/sources/azure/shared/mocks/mock_dbforpostgresql_flexible_server_replica_client.go b/sources/azure/shared/mocks/mock_dbforpostgresql_flexible_server_replica_client.go new file mode 100644 index 00000000..a4b5ac65 --- /dev/null +++ b/sources/azure/shared/mocks/mock_dbforpostgresql_flexible_server_replica_client.go @@ -0,0 +1,72 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: dbforpostgresql-flexible-server-replica-client.go +// +// Generated by this command: +// +// mockgen -destination=../shared/mocks/mock_dbforpostgresql_flexible_server_replica_client.go -package=mocks -source=dbforpostgresql-flexible-server-replica-client.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + armpostgresqlflexibleservers "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/postgresql/armpostgresqlflexibleservers/v5" + clients "github.com/overmindtech/cli/sources/azure/clients" + gomock "go.uber.org/mock/gomock" +) + +// MockDBforPostgreSQLFlexibleServerReplicaClient is a mock of DBforPostgreSQLFlexibleServerReplicaClient interface. +type MockDBforPostgreSQLFlexibleServerReplicaClient struct { + ctrl *gomock.Controller + recorder *MockDBforPostgreSQLFlexibleServerReplicaClientMockRecorder + isgomock struct{} +} + +// MockDBforPostgreSQLFlexibleServerReplicaClientMockRecorder is the mock recorder for MockDBforPostgreSQLFlexibleServerReplicaClient. +type MockDBforPostgreSQLFlexibleServerReplicaClientMockRecorder struct { + mock *MockDBforPostgreSQLFlexibleServerReplicaClient +} + +// NewMockDBforPostgreSQLFlexibleServerReplicaClient creates a new mock instance. +func NewMockDBforPostgreSQLFlexibleServerReplicaClient(ctrl *gomock.Controller) *MockDBforPostgreSQLFlexibleServerReplicaClient { + mock := &MockDBforPostgreSQLFlexibleServerReplicaClient{ctrl: ctrl} + mock.recorder = &MockDBforPostgreSQLFlexibleServerReplicaClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDBforPostgreSQLFlexibleServerReplicaClient) EXPECT() *MockDBforPostgreSQLFlexibleServerReplicaClientMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockDBforPostgreSQLFlexibleServerReplicaClient) Get(ctx context.Context, resourceGroupName, replicaName string) (armpostgresqlflexibleservers.ServersClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, replicaName) + ret0, _ := ret[0].(armpostgresqlflexibleservers.ServersClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockDBforPostgreSQLFlexibleServerReplicaClientMockRecorder) Get(ctx, resourceGroupName, replicaName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockDBforPostgreSQLFlexibleServerReplicaClient)(nil).Get), ctx, resourceGroupName, replicaName) +} + +// ListByServer mocks base method. +func (m *MockDBforPostgreSQLFlexibleServerReplicaClient) ListByServer(ctx context.Context, resourceGroupName, serverName string) clients.DBforPostgreSQLFlexibleServerReplicaPager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListByServer", ctx, resourceGroupName, serverName) + ret0, _ := ret[0].(clients.DBforPostgreSQLFlexibleServerReplicaPager) + return ret0 +} + +// ListByServer indicates an expected call of ListByServer. +func (mr *MockDBforPostgreSQLFlexibleServerReplicaClientMockRecorder) ListByServer(ctx, resourceGroupName, serverName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByServer", reflect.TypeOf((*MockDBforPostgreSQLFlexibleServerReplicaClient)(nil).ListByServer), ctx, resourceGroupName, serverName) +} diff --git a/sources/azure/shared/mocks/mock_load_balancer_backend_address_pools_client.go b/sources/azure/shared/mocks/mock_load_balancer_backend_address_pools_client.go new file mode 100644 index 00000000..9fe8e3a9 --- /dev/null +++ b/sources/azure/shared/mocks/mock_load_balancer_backend_address_pools_client.go @@ -0,0 +1,72 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: load-balancer-backend-address-pools-client.go +// +// Generated by this command: +// +// mockgen -destination=../shared/mocks/mock_load_balancer_backend_address_pools_client.go -package=mocks -source=load-balancer-backend-address-pools-client.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + clients "github.com/overmindtech/cli/sources/azure/clients" + gomock "go.uber.org/mock/gomock" +) + +// MockLoadBalancerBackendAddressPoolsClient is a mock of LoadBalancerBackendAddressPoolsClient interface. +type MockLoadBalancerBackendAddressPoolsClient struct { + ctrl *gomock.Controller + recorder *MockLoadBalancerBackendAddressPoolsClientMockRecorder + isgomock struct{} +} + +// MockLoadBalancerBackendAddressPoolsClientMockRecorder is the mock recorder for MockLoadBalancerBackendAddressPoolsClient. +type MockLoadBalancerBackendAddressPoolsClientMockRecorder struct { + mock *MockLoadBalancerBackendAddressPoolsClient +} + +// NewMockLoadBalancerBackendAddressPoolsClient creates a new mock instance. +func NewMockLoadBalancerBackendAddressPoolsClient(ctrl *gomock.Controller) *MockLoadBalancerBackendAddressPoolsClient { + mock := &MockLoadBalancerBackendAddressPoolsClient{ctrl: ctrl} + mock.recorder = &MockLoadBalancerBackendAddressPoolsClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockLoadBalancerBackendAddressPoolsClient) EXPECT() *MockLoadBalancerBackendAddressPoolsClientMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockLoadBalancerBackendAddressPoolsClient) Get(ctx context.Context, resourceGroupName, loadBalancerName, backendAddressPoolName string) (armnetwork.LoadBalancerBackendAddressPoolsClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, loadBalancerName, backendAddressPoolName) + ret0, _ := ret[0].(armnetwork.LoadBalancerBackendAddressPoolsClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockLoadBalancerBackendAddressPoolsClientMockRecorder) Get(ctx, resourceGroupName, loadBalancerName, backendAddressPoolName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockLoadBalancerBackendAddressPoolsClient)(nil).Get), ctx, resourceGroupName, loadBalancerName, backendAddressPoolName) +} + +// NewListPager mocks base method. +func (m *MockLoadBalancerBackendAddressPoolsClient) NewListPager(resourceGroupName, loadBalancerName string) clients.LoadBalancerBackendAddressPoolsPager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewListPager", resourceGroupName, loadBalancerName) + ret0, _ := ret[0].(clients.LoadBalancerBackendAddressPoolsPager) + return ret0 +} + +// NewListPager indicates an expected call of NewListPager. +func (mr *MockLoadBalancerBackendAddressPoolsClientMockRecorder) NewListPager(resourceGroupName, loadBalancerName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewListPager", reflect.TypeOf((*MockLoadBalancerBackendAddressPoolsClient)(nil).NewListPager), resourceGroupName, loadBalancerName) +} diff --git a/sources/azure/shared/mocks/mock_load_balancer_probes_client.go b/sources/azure/shared/mocks/mock_load_balancer_probes_client.go new file mode 100644 index 00000000..1690a359 --- /dev/null +++ b/sources/azure/shared/mocks/mock_load_balancer_probes_client.go @@ -0,0 +1,72 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: load-balancer-probes-client.go +// +// Generated by this command: +// +// mockgen -destination=../shared/mocks/mock_load_balancer_probes_client.go -package=mocks -source=load-balancer-probes-client.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + clients "github.com/overmindtech/cli/sources/azure/clients" + gomock "go.uber.org/mock/gomock" +) + +// MockLoadBalancerProbesClient is a mock of LoadBalancerProbesClient interface. +type MockLoadBalancerProbesClient struct { + ctrl *gomock.Controller + recorder *MockLoadBalancerProbesClientMockRecorder + isgomock struct{} +} + +// MockLoadBalancerProbesClientMockRecorder is the mock recorder for MockLoadBalancerProbesClient. +type MockLoadBalancerProbesClientMockRecorder struct { + mock *MockLoadBalancerProbesClient +} + +// NewMockLoadBalancerProbesClient creates a new mock instance. +func NewMockLoadBalancerProbesClient(ctrl *gomock.Controller) *MockLoadBalancerProbesClient { + mock := &MockLoadBalancerProbesClient{ctrl: ctrl} + mock.recorder = &MockLoadBalancerProbesClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockLoadBalancerProbesClient) EXPECT() *MockLoadBalancerProbesClientMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockLoadBalancerProbesClient) Get(ctx context.Context, resourceGroupName, loadBalancerName, probeName string) (armnetwork.LoadBalancerProbesClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, loadBalancerName, probeName) + ret0, _ := ret[0].(armnetwork.LoadBalancerProbesClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockLoadBalancerProbesClientMockRecorder) Get(ctx, resourceGroupName, loadBalancerName, probeName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockLoadBalancerProbesClient)(nil).Get), ctx, resourceGroupName, loadBalancerName, probeName) +} + +// NewListPager mocks base method. +func (m *MockLoadBalancerProbesClient) NewListPager(resourceGroupName, loadBalancerName string) clients.LoadBalancerProbesPager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewListPager", resourceGroupName, loadBalancerName) + ret0, _ := ret[0].(clients.LoadBalancerProbesPager) + return ret0 +} + +// NewListPager indicates an expected call of NewListPager. +func (mr *MockLoadBalancerProbesClientMockRecorder) NewListPager(resourceGroupName, loadBalancerName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewListPager", reflect.TypeOf((*MockLoadBalancerProbesClient)(nil).NewListPager), resourceGroupName, loadBalancerName) +} diff --git a/sources/azure/shared/mocks/mock_sql_database_schemas_client.go b/sources/azure/shared/mocks/mock_sql_database_schemas_client.go new file mode 100644 index 00000000..efb34d8d --- /dev/null +++ b/sources/azure/shared/mocks/mock_sql_database_schemas_client.go @@ -0,0 +1,72 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: sql-database-schemas-client.go +// +// Generated by this command: +// +// mockgen -destination=../shared/mocks/mock_sql_database_schemas_client.go -package=mocks -source=sql-database-schemas-client.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + armsql "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/sql/armsql/v2" + clients "github.com/overmindtech/cli/sources/azure/clients" + gomock "go.uber.org/mock/gomock" +) + +// MockSqlDatabaseSchemasClient is a mock of SqlDatabaseSchemasClient interface. +type MockSqlDatabaseSchemasClient struct { + ctrl *gomock.Controller + recorder *MockSqlDatabaseSchemasClientMockRecorder + isgomock struct{} +} + +// MockSqlDatabaseSchemasClientMockRecorder is the mock recorder for MockSqlDatabaseSchemasClient. +type MockSqlDatabaseSchemasClientMockRecorder struct { + mock *MockSqlDatabaseSchemasClient +} + +// NewMockSqlDatabaseSchemasClient creates a new mock instance. +func NewMockSqlDatabaseSchemasClient(ctrl *gomock.Controller) *MockSqlDatabaseSchemasClient { + mock := &MockSqlDatabaseSchemasClient{ctrl: ctrl} + mock.recorder = &MockSqlDatabaseSchemasClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSqlDatabaseSchemasClient) EXPECT() *MockSqlDatabaseSchemasClientMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockSqlDatabaseSchemasClient) Get(ctx context.Context, resourceGroupName, serverName, databaseName, schemaName string) (armsql.DatabaseSchemasClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, serverName, databaseName, schemaName) + ret0, _ := ret[0].(armsql.DatabaseSchemasClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockSqlDatabaseSchemasClientMockRecorder) Get(ctx, resourceGroupName, serverName, databaseName, schemaName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockSqlDatabaseSchemasClient)(nil).Get), ctx, resourceGroupName, serverName, databaseName, schemaName) +} + +// ListByDatabase mocks base method. +func (m *MockSqlDatabaseSchemasClient) ListByDatabase(ctx context.Context, resourceGroupName, serverName, databaseName string) clients.SqlDatabaseSchemasPager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListByDatabase", ctx, resourceGroupName, serverName, databaseName) + ret0, _ := ret[0].(clients.SqlDatabaseSchemasPager) + return ret0 +} + +// ListByDatabase indicates an expected call of ListByDatabase. +func (mr *MockSqlDatabaseSchemasClientMockRecorder) ListByDatabase(ctx, resourceGroupName, serverName, databaseName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByDatabase", reflect.TypeOf((*MockSqlDatabaseSchemasClient)(nil).ListByDatabase), ctx, resourceGroupName, serverName, databaseName) +}