diff --git a/cli/cmd/digger/default.go b/cli/cmd/digger/default.go index 72f9908ec..3366b102f 100644 --- a/cli/cmd/digger/default.go +++ b/cli/cmd/digger/default.go @@ -13,15 +13,31 @@ import ( "github.com/diggerhq/digger/libs/policy" lib_spec "github.com/diggerhq/digger/libs/spec" "github.com/spf13/cobra" - "log" + "log/slog" "os" "runtime/debug" ) +func initLogger() { + logLevel := os.Getenv("DIGGER_LOG_LEVEL") + var level slog.Leveler + if logLevel == "DEBUG" { + level = slog.LevelDebug + } else { + level = slog.LevelInfo + } + logger := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{ + Level: level, + })) + + slog.SetDefault(logger) + +} + var defaultCmd = &cobra.Command{ Use: "default", Run: func(cmd *cobra.Command, args []string) { - + initLogger() specStr := os.Getenv("DIGGER_RUN_SPEC") if specStr != "" { var spec lib_spec.Spec @@ -31,7 +47,7 @@ var defaultCmd = &cobra.Command{ } var spec_err error - + spec_err = spec2.RunSpec( spec, lib_spec.VCSProviderBasic{}, @@ -65,10 +81,10 @@ var defaultCmd = &cobra.Command{ defer func() { if r := recover(); r != nil { - log.Println(fmt.Sprintf("stacktrace from panic: \n" + string(debug.Stack()))) + slog.Error(fmt.Sprintf("stacktrace from panic: " + string(debug.Stack()))) err := usage.SendLogRecord(logLeader, fmt.Sprintf("Panic occurred. %s", r)) if err != nil { - log.Printf("Failed to send log record. %s\n", err) + slog.Error("Failed to send log record", "error", err) } os.Exit(1) } diff --git a/cli/cmd/digger/main.go b/cli/cmd/digger/main.go index bae66b0fc..3eeed4bb3 100644 --- a/cli/cmd/digger/main.go +++ b/cli/cmd/digger/main.go @@ -3,7 +3,6 @@ package main import ( "fmt" "github.com/diggerhq/digger/cli/pkg/usage" - "log" "os" ) @@ -30,11 +29,3 @@ func main() { } } - -func init() { - log.SetOutput(os.Stdout) - - if os.Getenv("DEBUG") == "true" { - log.SetFlags(log.Ltime | log.Lshortfile) - } -} diff --git a/cli/cmd/digger/root.go b/cli/cmd/digger/root.go index fdc6626d6..0936f7e3c 100644 --- a/cli/cmd/digger/root.go +++ b/cli/cmd/digger/root.go @@ -11,7 +11,7 @@ import ( locking2 "github.com/diggerhq/digger/libs/locking" core_policy "github.com/diggerhq/digger/libs/policy" "github.com/spf13/cobra" - "log" + "log/slog" "net/http" "os" "time" @@ -108,15 +108,15 @@ func PreRun(cmd *cobra.Command, args []string) { if os.Getenv("NO_BACKEND") == "true" { lock, err = locking2.GetLock() } else { - log.Printf("Warning: not performing locking in cli since digger is invoked with orchestrator mode, any arguments to LOCKING_PROVIDER will be ignored") + slog.Warn("Not performing locking in cli since digger is invoked with orchestrator mode, any arguments to LOCKING_PROVIDER will be ignored") lock = locking2.NoOpLock{} err = nil } if err != nil { - log.Printf("Failed to create lock provider. %s\n", err) + slog.Error("Failed to create lock provider", "error", err) os.Exit(2) } - log.Println("Lock provider has been created successfully") + slog.Info("Lock provider has been created successfully") } var rootCmd = &cobra.Command{ diff --git a/cli/pkg/digger/digger.go b/cli/pkg/digger/digger.go index a2714357c..d1845d82a 100644 --- a/cli/pkg/digger/digger.go +++ b/cli/pkg/digger/digger.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "log" + "log/slog" "os" "path" "strings" @@ -68,9 +69,9 @@ func DetectCI() CIName { func RunJobs(jobs []orchestrator.Job, prService ci.PullRequestService, orgService ci.OrgService, lock locking2.Lock, reporter reporting.Reporter, planStorage storage.PlanStorage, policyChecker policy.Checker, commentUpdater comment_updater.CommentUpdater, backendApi backendapi.Api, jobId string, reportFinalStatusToBackend bool, reportTerraformOutput bool, prCommentId string, workingDir string) (bool, bool, error) { defer reporter.Flush() - log.Printf("Info: [TF_PLUGIN_CACHE_DIR=%v] ", os.Getenv("TF_PLUGIN_CACHE_DIR")) - log.Printf("Info: [TG_PROVIDER_CACHE_DIR=%v] ", os.Getenv("TG_PROVIDER_CACHE_DIR")) - log.Printf("Info: [TERRAGRUNT_PROVIDER_CACHE_DIR=%v] ", os.Getenv("TERRAGRUNT_PROVIDER_CACHE_DIR")) + slog.Debug("Variable info", "TF_PLUGIN_CACHE_DIR", os.Getenv("TF_PLUGIN_CACHE_DIR")) + slog.Debug("Variable info", "TG_PROVIDER_CACHE_DIR", os.Getenv("TG_PROVIDER_CACHE_DIR")) + slog.Debug("Variable info", "TERRAGRUNT_PROVIDER_CACHE_DIR", os.Getenv("TERRAGRUNT_PROVIDER_CACHE_DIR")) runStartedAt := time.Now() @@ -91,31 +92,31 @@ func RunJobs(jobs []orchestrator.Job, prService ci.PullRequestService, orgServic if !allowedToPerformCommand { msg := reportPolicyError(job.ProjectName, command, job.RequestedBy, reporter) - log.Printf("Skipping command ... %v for project %v", command, job.ProjectName) - log.Println(msg) + slog.Warn("Skipping command ... %v for project %v", command, job.ProjectName) + slog.Warn("Received policy error", "message", msg) appliesPerProject[job.ProjectName] = false continue } executorResult, output, err := run(command, job, policyChecker, orgService, SCMOrganisation, SCMrepository, job.PullRequestNumber, job.RequestedBy, reporter, lock, prService, job.Namespace, workingDir, planStorage, appliesPerProject) if err != nil { - log.Printf("error while running command %v for project %v: %v", command, job.ProjectName, err) + slog.Error("error while running command for project", "command", command, "projectname", job.ProjectName, "error", err) reportErr := backendApi.ReportProjectRun(SCMOrganisation+"-"+SCMrepository, job.ProjectName, runStartedAt, time.Now(), "FAILED", command, output) if reportErr != nil { - log.Printf("error reporting project Run err: %v.\n", reportErr) + slog.Error("error reporting project Run err.", "error", reportErr) } appliesPerProject[job.ProjectName] = false if executorResult != nil { exectorResults[i] = *executorResult } - log.Printf("Project %v command %v failed, skipping job", job.ProjectName, command) + slog.Error("Project command failed, skipping job", "project name", job.ProjectName, "command", command) break } exectorResults[i] = *executorResult err = backendApi.ReportProjectRun(SCMOrganisation+"-"+SCMrepository, job.ProjectName, runStartedAt, time.Now(), "SUCCESS", command, output) if err != nil { - log.Printf("Error reporting project Run: %v", err) + slog.Error("Error reporting project Run", "error", err) } } } @@ -132,10 +133,10 @@ func RunJobs(jobs []orchestrator.Job, prService ci.PullRequestService, orgServic _, jobPrCommentUrl, err := reporter.Flush() if err != nil { - log.Printf("error while sending job comments %v", err) + slog.Error("error while sending job comments", "error", err) cmt, cmt_err := prService.PublishComment(*currentJob.PullRequestNumber, fmt.Sprintf(":yellow_circle: Warning: failed to post report for project %v, received error: %v.\n\n you may review details in the job logs", currentJob.ProjectName, err)) if cmt_err != nil { - log.Printf("Error while posting error comment: %v", err) + slog.Error("Error while posting error comment", "error", err) return false, false, fmt.Errorf("failed to post reporter error comment, aborting. Error: %v", err) } jobPrCommentUrl = cmt.Url @@ -154,18 +155,18 @@ func RunJobs(jobs []orchestrator.Job, prService ci.PullRequestService, orgServic iacUtils := iac_utils.GetIacUtilsIacType(currentJob.IacType()) batchResult, err := backendApi.ReportProjectJobStatus(currentJob.Namespace, projectNameForBackendReporting, jobId, "succeeded", time.Now(), &summary, "", jobPrCommentUrl, terraformOutput, iacUtils) if err != nil { - log.Printf("error reporting Job status: %v.\n", err) + slog.Error("error reporting Job status", "error", err) return false, false, fmt.Errorf("error while running command: %v", err) } err = commentUpdater.UpdateComment(batchResult.Jobs, prNumber, prService, prCommentId) if err != nil { - log.Printf("error Updating status comment: %v.\n", err) + slog.Error("error Updating status comment", "error", err) return false, false, err } err = UpdateAggregateStatus(batchResult, prService) if err != nil { - log.Printf("error updating aggregate status check: %v.\n", err) + slog.Error("error updating aggregate status check", "error", err) return false, false, err } @@ -181,19 +182,19 @@ func reportPolicyError(projectName string, command string, requestedBy string, r if reporter.SupportsMarkdown() { _, _, err := reporter.Report(msg, coreutils.AsCollapsibleComment(fmt.Sprintf("Policy violation for %v - %v", projectName, command), false)) if err != nil { - log.Printf("Error publishing comment: %v", err) + slog.Error("Error publishing comment", "error", err) } } else { _, _, err := reporter.Report(msg, coreutils.AsComment(fmt.Sprintf("Policy violation for %v - %v", projectName, command))) if err != nil { - log.Printf("Error publishing comment: %v", err) + slog.Error("Error publishing comment", "error", err) } } return msg } func run(command string, job orchestrator.Job, policyChecker policy.Checker, orgService ci.OrgService, SCMOrganisation string, SCMrepository string, PRNumber *int, requestedBy string, reporter reporting.Reporter, lock locking2.Lock, prService ci.PullRequestService, projectNamespace string, workingDir string, planStorage storage.PlanStorage, appliesPerProject map[string]bool) (*execution.DiggerExecutorResult, string, error) { - log.Printf("Running '%s' for project '%s' (workflow: %s)\n", command, job.ProjectName, job.ProjectWorkflow) + slog.Info("Running command for project", "command", command, "project name", job.ProjectName, "project workflow", job.ProjectWorkflow) allowedToPerformCommand, err := policyChecker.CheckAccessPolicy(orgService, &prService, SCMOrganisation, SCMrepository, job.ProjectName, job.ProjectDir, command, job.PullRequestNumber, requestedBy, []string{}) @@ -203,13 +204,14 @@ func run(command string, job orchestrator.Job, policyChecker policy.Checker, org if !allowedToPerformCommand { msg := reportPolicyError(job.ProjectName, command, requestedBy, reporter) - log.Println(msg) + slog.Error(msg) return nil, msg, errors.New(msg) } err = job.PopulateAwsCredentialsEnvVarsForJob() if err != nil { - log.Fatalf("failed to fetch AWS keys, %v", err) + slog.Error("failed to fetch AWS keys", "error", err) + os.Exit(1) } projectLock := &locking2.PullRequestLock{ @@ -272,7 +274,7 @@ func run(command string, job orchestrator.Job, policyChecker policy.Checker, org case "digger plan": err := usage.SendUsageRecord(requestedBy, job.EventName, "plan") if err != nil { - log.Printf("failed to send usage report. %v", err) + slog.Error("failed to send usage report", "error", err) } err = prService.SetStatus(*job.PullRequestNumber, "pending", job.ProjectName+"/plan") if err != nil { @@ -283,7 +285,7 @@ func run(command string, job orchestrator.Job, policyChecker policy.Checker, org if err != nil { msg := fmt.Sprintf("Failed to Run digger plan command. %v", err) - log.Printf(msg) + slog.Error("Failed to Run digger plan command", "error", err) err := prService.SetStatus(*job.PullRequestNumber, "failure", job.ProjectName+"/plan") if err != nil { msg := fmt.Sprintf("Failed to set PR status. %v", err) @@ -297,7 +299,7 @@ func run(command string, job orchestrator.Job, policyChecker policy.Checker, org planIsAllowed, messages, err := policyChecker.CheckPlanPolicy(SCMrepository, SCMOrganisation, job.ProjectName, job.ProjectDir, planJsonOutput) if err != nil { msg := fmt.Sprintf("Failed to validate plan. %v", err) - log.Printf(msg) + slog.Error("Failed to validate plan.", "error", err) return nil, msg, fmt.Errorf(msg) } var planPolicyFormatter func(report string) string @@ -310,7 +312,7 @@ func run(command string, job orchestrator.Job, policyChecker policy.Checker, org planSummary, err := iacUtils.GetSummarizePlan(planJsonOutput) if err != nil { - log.Printf("Failed to summarize plan. %v", err) + slog.Error("Failed to summarize plan", "error", err) } if !planIsAllowed { @@ -323,15 +325,15 @@ func run(command string, job orchestrator.Job, policyChecker policy.Checker, org _, _, err = reporter.Report(planReportMessage, planPolicyFormatter) if err != nil { - log.Printf("Failed to report plan. %v", err) + slog.Error("Failed to report plan.", "error", err) } msg := fmt.Sprintf("Plan is not allowed") - log.Printf(msg) + slog.Error(msg) return nil, msg, fmt.Errorf(msg) } else { _, _, err := reporter.Report("Terraform plan validation checks succeeded :white_check_mark:", planPolicyFormatter) if err != nil { - log.Printf("Failed to report plan. %v", err) + slog.Error("Failed to report plan.", "error", err) } reportPlanSummary(reporter, planSummary) } @@ -357,7 +359,7 @@ func run(command string, job orchestrator.Job, policyChecker policy.Checker, org appliesPerProject[job.ProjectName] = false err := usage.SendUsageRecord(requestedBy, job.EventName, "apply") if err != nil { - log.Printf("failed to send usage report. %v", err) + slog.Error("failed to send usage report.", "error", err) } err = prService.SetStatus(*job.PullRequestNumber, "pending", job.ProjectName+"/apply") if err != nil { @@ -377,7 +379,7 @@ func run(command string, job orchestrator.Job, policyChecker policy.Checker, org msg := fmt.Sprintf("Failed to check if PR is mergeable. %v", err) return nil, msg, fmt.Errorf(msg) } - log.Printf("PR status, mergeable: %v, merged: %v and skipMergeCheck %v\n", isMergeable, isMerged, job.SkipMergeCheck) + slog.Info("PR status Information", "mergeable", isMergeable, "merged", isMerged, "skipMergeCheck", job.SkipMergeCheck) if !isMergeable && !isMerged && !job.SkipMergeCheck { comment := reportApplyMergeabilityError(reporter) prService.SetStatus(*job.PullRequestNumber, "failure", job.ProjectName+"/apply") @@ -392,31 +394,31 @@ func run(command string, job orchestrator.Job, policyChecker policy.Checker, org terraformPlanJsonStr, err := executor.RetrievePlanJson() if err != nil { msg := fmt.Sprintf("Failed to retrieve stored plan. %v", err) - log.Printf(msg) + slog.Error("Failed to retrieve stored plan.", "error", err) return nil, msg, fmt.Errorf(msg) } _, violations, err := policyChecker.CheckPlanPolicy(SCMrepository, SCMOrganisation, job.ProjectName, job.ProjectDir, terraformPlanJsonStr) if err != nil { msg := fmt.Sprintf("Failed to check plan policy. %v", err) - log.Printf(msg) + slog.Error("Failed to check plan policy.", "error", err) return nil, msg, fmt.Errorf(msg) } planPolicyViolations = violations } else { - log.Printf("Skipping plan policy checks because plan storage is not configured.") + slog.Info("Skipping plan policy checks because plan storage is not configured.") planPolicyViolations = []string{} } allowedToApply, err := policyChecker.CheckAccessPolicy(orgService, &prService, SCMOrganisation, SCMrepository, job.ProjectName, job.ProjectDir, command, job.PullRequestNumber, requestedBy, planPolicyViolations) if err != nil { msg := fmt.Sprintf("Failed to run plan policy check before apply. %v", err) - log.Printf(msg) + slog.Error("Failed to run plan policy check before apply", "error", err) return nil, msg, fmt.Errorf(msg) } if !allowedToApply { msg := reportPolicyError(job.ProjectName, command, requestedBy, reporter) - log.Println(msg) + slog.Error(msg) return nil, msg, errors.New(msg) } @@ -425,7 +427,7 @@ func run(command string, job orchestrator.Job, policyChecker policy.Checker, org applySummary, applyPerformed, output, err := diggerExecutor.Apply() if err != nil { //TODO reuse executor error handling - log.Printf("Failed to Run digger apply command. %v", err) + slog.Error("Failed to Run digger apply command.", "error", err) err := prService.SetStatus(*job.PullRequestNumber, "failure", job.ProjectName+"/apply") if err != nil { msg := fmt.Sprintf("Failed to set PR status. %v", err) @@ -453,12 +455,12 @@ func run(command string, job orchestrator.Job, policyChecker policy.Checker, org case "digger destroy": err := usage.SendUsageRecord(requestedBy, job.EventName, "destroy") if err != nil { - log.Printf("Failed to send usage report. %v", err) + slog.Error("Failed to send usage report.", "error", err) } _, err = diggerExecutor.Destroy() if err != nil { - log.Printf("Failed to Run digger destroy command. %v", err) + slog.Error("Failed to Run digger destroy command.", "error", err) msg := fmt.Sprintf("failed to run digger destroy command: %v", err) return nil, msg, fmt.Errorf("failed to Run digger apply command. %v", err) } @@ -468,7 +470,7 @@ func run(command string, job orchestrator.Job, policyChecker policy.Checker, org case "digger unlock": err := usage.SendUsageRecord(requestedBy, job.EventName, "unlock") if err != nil { - log.Printf("failed to send usage report. %v", err) + slog.Error("failed to send usage report.", "error", err) } err = diggerExecutor.Unlock() if err != nil { @@ -479,13 +481,13 @@ func run(command string, job orchestrator.Job, policyChecker policy.Checker, org if planStorage != nil { err = planStorage.DeleteStoredPlan(planPathProvider.ArtifactName(), planPathProvider.StoredPlanFilePath()) if err != nil { - log.Printf("failed to delete stored plan file '%v': %v", planPathProvider.StoredPlanFilePath(), err) + slog.Error("failed to delete stored plan file", "stored plan file path", planPathProvider.StoredPlanFilePath(), "error", err) } } case "digger lock": err := usage.SendUsageRecord(requestedBy, job.EventName, "lock") if err != nil { - log.Printf("failed to send usage report. %v", err) + slog.Error("failed to send usage report.", "error", err) } err = diggerExecutor.Lock() if err != nil { @@ -502,17 +504,17 @@ func run(command string, job orchestrator.Job, policyChecker policy.Checker, org func reportApplyMergeabilityError(reporter reporting.Reporter) string { comment := "cannot perform Apply since the PR is not currently mergeable" - log.Println(comment) + slog.Error(comment) if reporter.SupportsMarkdown() { _, _, err := reporter.Report(comment, coreutils.AsCollapsibleComment("Apply error", false)) if err != nil { - log.Printf("error publishing comment: %v\n", err) + slog.Error("error publishing comment", "error", err) } } else { _, _, err := reporter.Report(comment, coreutils.AsComment("Apply error")) if err != nil { - log.Printf("error publishing comment: %v\n", err) + slog.Error("error publishing comment", "error", err) } } return comment @@ -529,7 +531,7 @@ func reportTerraformPlanOutput(reporter reporting.Reporter, projectId string, pl _, _, err := reporter.Report(plan, formatter) if err != nil { - log.Printf("Failed to report plan. %v", err) + slog.Error("Failed to report plan.", "error", err) } } @@ -544,7 +546,7 @@ func reportPlanSummary(reporter reporting.Reporter, summary string) { _, _, err := reporter.Report("\n"+summary, formatter) if err != nil { - log.Printf("Failed to report plan summary. %v", err) + slog.Error("Failed to report plan summary.", "error", err) } } @@ -556,7 +558,7 @@ func reportEmptyPlanOutput(reporter reporting.Reporter, projectId string) { // suppress the comment (if reporter is suppressible) reporter.Suppress() if err != nil { - log.Printf("Failed to report plan. %v", err) + slog.Error("Failed to report plan.", "error", err) } } @@ -573,7 +575,7 @@ func RunJob( ) error { runStartedAt := time.Now() SCMOrganisation, SCMrepository := utils.ParseRepoNamespace(repo) - log.Printf("Running '%s' for project '%s'\n", job.Commands, job.ProjectName) + slog.Info("Running commands for project", "commands", job.Commands, "project name", job.ProjectName) for _, command := range job.Commands { @@ -586,12 +588,12 @@ func RunJob( if !allowedToPerformCommand { msg := fmt.Sprintf("User %s is not allowed to perform action: %s. Check your policies", requestedBy, command) if err != nil { - log.Printf("Error publishing comment: %v", err) + slog.Error("Error publishing comment.", "error", err) } - log.Println(msg) + slog.Error(msg) err = backendApi.ReportProjectRun(repo, job.ProjectName, runStartedAt, time.Now(), "FORBIDDEN", command, msg) if err != nil { - log.Printf("Error reporting Run: %v", err) + slog.Error("Error reporting Run.", "error", err) } return errors.New(msg) } @@ -648,71 +650,71 @@ func RunJob( case "digger plan": err := usage.SendUsageRecord(requestedBy, job.EventName, "plan") if err != nil { - log.Printf("Failed to send usage report. %v", err) + slog.Error("Failed to send usage report.", "error", err) } _, _, _, plan, planJsonOutput, err := diggerExecutor.Plan() if err != nil { msg := fmt.Sprintf("Failed to Run digger plan command. %v", err) - log.Printf(msg) + slog.Error(msg) err = backendApi.ReportProjectRun(repo, job.ProjectName, runStartedAt, time.Now(), "FAILED", command, msg) if err != nil { - log.Printf("Error reporting Run: %v", err) + slog.Error("Error reporting Run.", "error", err) } return fmt.Errorf(msg) } planIsAllowed, messages, err := policyChecker.CheckPlanPolicy(SCMrepository, SCMOrganisation, job.ProjectName, job.ProjectDir, planJsonOutput) - log.Print(strings.Join(messages, "\n")) + slog.Info(strings.Join(messages, "\n")) if err != nil { msg := fmt.Sprintf("Failed to validate plan %v", err) - log.Printf(msg) + slog.Error(msg) err = backendApi.ReportProjectRun(repo, job.ProjectName, runStartedAt, time.Now(), "FAILED", command, msg) if err != nil { - log.Printf("Error reporting Run: %v", err) + slog.Error("Error reporting Run.", "error", err) } return fmt.Errorf(msg) } if !planIsAllowed { msg := fmt.Sprintf("Plan is not allowed") - log.Printf(msg) + slog.Error(msg) err = backendApi.ReportProjectRun(repo, job.ProjectName, runStartedAt, time.Now(), "FAILED", command, msg) if err != nil { - log.Printf("Error reporting Run: %v", err) + slog.Error("Error reporting Run.", "error", err) } return fmt.Errorf(msg) } else { err = backendApi.ReportProjectRun(repo, job.ProjectName, runStartedAt, time.Now(), "SUCCESS", command, plan) if err != nil { - log.Printf("Error reporting Run: %v", err) + slog.Error("Error reporting Run.", "error", err) } } case "digger apply": err := usage.SendUsageRecord(requestedBy, job.EventName, "apply") if err != nil { - log.Printf("Failed to send usage report. %v", err) + slog.Error("Failed to send usage report.", "error", err) } _, _, output, err := diggerExecutor.Apply() if err != nil { msg := fmt.Sprintf("Failed to Run digger apply command. %v", err) - log.Printf(msg) + slog.Error(msg) err = backendApi.ReportProjectRun(repo, job.ProjectName, runStartedAt, time.Now(), "FAILED", command, msg) if err != nil { - log.Printf("Error reporting Run: %v", err) + slog.Error("Error reporting Run.", "error", err) } return fmt.Errorf(msg) } err = backendApi.ReportProjectRun(repo, job.ProjectName, runStartedAt, time.Now(), "SUCCESS", command, output) if err != nil { - log.Printf("Error reporting Run: %v", err) + slog.Error("Error reporting Run.", "error", err) } case "digger destroy": err := usage.SendUsageRecord(requestedBy, job.EventName, "destroy") if err != nil { - log.Printf("Failed to send usage report. %v", err) + slog.Error("Failed to send usage report.", "error", err) } _, err = diggerExecutor.Destroy() if err != nil { - log.Printf("Failed to Run digger destroy command. %v", err) + slog.Error("Failed to Run digger destroy command.", "error", err) return fmt.Errorf("failed to Run digger apply command. %v", err) } @@ -723,7 +725,7 @@ func RunJob( } err = backendApi.ReportProjectRun(repo, job.ProjectName, runStartedAt, time.Now(), "SUCCESS", command, output) if err != nil { - log.Printf("Error reporting Run: %v", err) + slog.Error("Error reporting Run.", "error", err) } } @@ -734,40 +736,40 @@ func RunJob( func runDriftDetection(policyChecker policy.Checker, SCMOrganisation string, SCMrepository string, projectName string, requestedBy string, eventName string, diggerExecutor execution.Executor, notification *core_drift.Notification) (string, error) { err := usage.SendUsageRecord(requestedBy, eventName, "drift-detect") if err != nil { - log.Printf("Failed to send usage report. %v", err) + slog.Error("Failed to send usage report.", "error", err) } policyEnabled, err := policyChecker.CheckDriftPolicy(SCMOrganisation, SCMrepository, projectName) if err != nil { msg := fmt.Sprintf("failed to check drift policy. %v", err) - log.Printf(msg) + slog.Error(msg) return msg, fmt.Errorf(msg) } if !policyEnabled { msg := "skipping this drift application since it is not enabled for this project" - log.Printf(msg) + slog.Info(msg) return msg, nil } _, planPerformed, nonEmptyPlan, plan, _, err := diggerExecutor.Plan() if err != nil { msg := fmt.Sprintf("failed to Run digger plan command. %v", err) - log.Printf(msg) + slog.Error(msg) return msg, fmt.Errorf(msg) } if planPerformed && nonEmptyPlan { if notification == nil { - log.Print("Warning: no notification configured, not sending any notifications") + slog.Warn("Warning: no notification configured, not sending any notifications") return plan, nil } err := (*notification).Send(projectName, plan) if err != nil { - log.Printf("Error sending drift drift: %v", err) + slog.Error("Error sending drift drift.", "error", err) } } else if planPerformed && !nonEmptyPlan { - log.Printf("No drift detected") + slog.Info("No drift detected") } else { - log.Printf("No plan performed") + slog.Info("No plan performed") } return plan, nil } @@ -778,8 +780,9 @@ func SortedCommandsByDependency(project []orchestrator.Job, dependencyGraph *gra return s < s2 }) if err != nil { - log.Printf("dependencyGraph: %v", dependencyGraph) - log.Fatalf("failed to sort commands by dependency, %v", err) + slog.Error("failed to sort commands by dependency", "error", err) + slog.Debug("Dependency Graph Debug", "DependencyGraph", dependencyGraph) + os.Exit(1) } for _, node := range sortedGraph { for _, command := range project { @@ -826,6 +829,6 @@ func MergePullRequest(ciService ci.PullRequestService, prNumber int, mergeStrate log.Fatalf("failed to merge PR, %v", err) } } else { - log.Printf("PR is already merged, skipping merge step") + slog.Info("PR is already merged, skipping merge step") } } diff --git a/cli/pkg/drift/slack.go b/cli/pkg/drift/slack.go index a8b8a22b4..68f065535 100644 --- a/cli/pkg/drift/slack.go +++ b/cli/pkg/drift/slack.go @@ -5,7 +5,7 @@ import ( "encoding/json" "fmt" "io" - "log" + "log/slog" "net/http" "regexp" "strings" @@ -54,34 +54,30 @@ func (slack SlackNotification) Send(projectName string, plan string) error { jsonData, err := json.Marshal(slackMessage) if err != nil { - msg := fmt.Sprintf("failed to marshal slack message. %v", err) - log.Printf(msg) - return fmt.Errorf(msg) + slog.Error("failed to marshal slack message", "error", err) + return err } request, err := http.NewRequest("POST", slack.Url, bytes.NewBuffer(jsonData)) if err != nil { - msg := fmt.Sprintf("failed to create slack drift request. %v", err) - log.Printf(msg) - return fmt.Errorf(msg) + slog.Error("failed to create slack drift request", "error", err) + return err } request.Header.Set("Content-Type", "application/json") resp, err := httpClient.Do(request) if err != nil { - msg := fmt.Sprintf("failed to send slack drift request. %v", err) - log.Printf(msg) - return fmt.Errorf(msg) + slog.Error("failed to send slack drift request", "error", err) + return err } if resp.StatusCode != 200 { body, err := io.ReadAll(resp.Body) if err != nil { - msg := fmt.Sprintf("failed to read response body. %v", err) - log.Printf(msg) - return fmt.Errorf(msg) + slog.Error("failed to read response body", "error", err) + return err } + slog.Error("failed to send slack drift request", "status code", resp.Status, "body", body) msg := fmt.Sprintf("failed to send slack drift request. %v. Message: %v", resp.Status, body) - log.Printf(msg) return fmt.Errorf(msg) } resp.Body.Close() diff --git a/cli/pkg/github/github.go b/cli/pkg/github/github.go index ff21fa0af..567f3a645 100644 --- a/cli/pkg/github/github.go +++ b/cli/pkg/github/github.go @@ -20,13 +20,29 @@ import ( "github.com/diggerhq/digger/libs/storage" "github.com/google/go-github/v61/github" "gopkg.in/yaml.v3" - "log" + "log/slog" "os" "strings" ) +func initLogger() { + logLevel := os.Getenv("DIGGER_LOG_LEVEL") + var level slog.Leveler + if logLevel == "DEBUG" { + level = slog.LevelDebug + } else { + level = slog.LevelInfo + } + logger := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{ + Level: level, + })) + + slog.SetDefault(logger) +} + func GitHubCI(lock core_locking.Lock, policyCheckerProvider core_policy.PolicyCheckerProvider, backendApi core_backend.Api, reportingStrategy reporting.ReportStrategy, githubServiceProvider dg_github.GithubServiceProvider, commentUpdaterProvider comment_updater.CommentUpdaterProvider, driftNotificationProvider drift.DriftNotificationProvider) { - log.Printf("Using GitHub.\n") + initLogger() + slog.Info("Using GitHub (backendless)") githubActor := os.Getenv("GITHUB_ACTOR") if githubActor != "" { usage.SendUsageRecord(githubActor, "log", "initialize") @@ -47,7 +63,7 @@ func GitHubCI(lock core_locking.Lock, policyCheckerProvider core_policy.PolicyCh diggerGitHubToken := os.Getenv("DIGGER_GITHUB_TOKEN") if diggerGitHubToken != "" { - log.Println("GITHUB_TOKEN has been overridden with DIGGER_GITHUB_TOKEN") + slog.Info("GITHUB_TOKEN has been overridden with DIGGER_GITHUB_TOKEN") ghToken = diggerGitHubToken } @@ -69,7 +85,7 @@ func GitHubCI(lock core_locking.Lock, policyCheckerProvider core_policy.PolicyCh if err != nil { usage.ReportErrorAndExit(githubActor, fmt.Sprintf("Failed to parse GitHub context. %s", err), 3) } - log.Printf("GitHub context parsed successfully\n") + slog.Info("GitHub context parsed successfully") ghEvent := parsedGhContext.Event @@ -94,16 +110,17 @@ func GitHubCI(lock core_locking.Lock, policyCheckerProvider core_policy.PolicyCh if err != nil { usage.ReportErrorAndExit(githubActor, fmt.Sprintf("Failed to read Digger digger_config. %s", err), 4) } - log.Printf("Digger digger_config read successfully\n") + slog.Info("Digger digger_config read successfully") if diggerConfig.PrLocks == false { - log.Printf("info: Using noop lock as configured in digger.yml") + slog.Info("Using noop lock as configured in digger.yml") lock = core_locking.NoOpLock{} } yamlData, err := yaml.Marshal(diggerConfigYaml) if err != nil { - log.Fatalf("error: %v", err) + slog.Error("error while marshalling yaml", "error", err) + os.Exit(1) } // Convert to string @@ -113,7 +130,7 @@ func GitHubCI(lock core_locking.Lock, policyCheckerProvider core_policy.PolicyCh for _, p := range diggerConfig.Projects { err = backendApi.ReportProject(repo, p.Name, yamlStr) if err != nil { - log.Printf("Failed to report project %s. %s\n", p.Name, err) + slog.Error("Failed to report project", "projectName", p.Name, "error", err) } } @@ -227,11 +244,11 @@ func GitHubCI(lock core_locking.Lock, policyCheckerProvider core_policy.PolicyCh usage.ReportErrorAndExit(githubActor, fmt.Sprintf("Failed to process GitHub event. %s", err), 6) } } - log.Printf("Following projects are impacted by pull request #%d\n", prNumber) + slog.Info("Following projects are impacted by pull request", "prNumber", prNumber) for _, p := range impactedProjects { - log.Printf("- %s\n", p.Name) + slog.Info(fmt.Sprintf("- %s\n", p.Name)) } - log.Println("GitHub event processed successfully") + slog.Info("GitHub event processed successfully") if dg_github.CheckIfHelpComment(ghEvent) { reply := utils.GetCommands() @@ -268,7 +285,7 @@ func GitHubCI(lock core_locking.Lock, policyCheckerProvider core_policy.PolicyCh if err != nil { usage.ReportErrorAndExit(githubActor, fmt.Sprintf("Failed to convert GitHub event to commands. %s", err), 7) } - log.Println("GitHub event converted to commands successfully") + slog.Info("GitHub event converted to commands successfully") logCommands(jobs) err = githubPrService.SetOutput(prNumber, "DIGGER_PR_NUMBER", fmt.Sprintf("%v", prNumber)) @@ -303,7 +320,7 @@ func GitHubCI(lock core_locking.Lock, policyCheckerProvider core_policy.PolicyCh if diggerConfig.AutoMerge && allAppliesSuccessful && atLeastOneApply && coversAllImpactedProjects { digger.MergePullRequest(&githubPrService, prNumber, diggerConfig.AutoMergeStrategy) - log.Println("PR merged successfully") + slog.Info("PR merged successfully") } if allAppliesSuccessful { @@ -315,7 +332,7 @@ func GitHubCI(lock core_locking.Lock, policyCheckerProvider core_policy.PolicyCh } } - log.Println("Commands executed successfully") + slog.Info("Commands executed successfully") } usage.ReportErrorAndExit(githubActor, "Digger finished successfully", 0) @@ -330,5 +347,6 @@ func logCommands(projectCommands []scheduler.Job) { } logMessage += "\n" } - log.Print(logMessage) + // TODO: improve the error message + slog.Info(logMessage) } diff --git a/cli/pkg/spec/manual.go b/cli/pkg/spec/manual.go index 7f04656d0..eea5d7818 100644 --- a/cli/pkg/spec/manual.go +++ b/cli/pkg/spec/manual.go @@ -12,7 +12,7 @@ import ( "github.com/diggerhq/digger/libs/scheduler" "github.com/diggerhq/digger/libs/spec" "github.com/diggerhq/digger/libs/storage" - "log" + "log/slog" "os" "os/exec" ) @@ -66,7 +66,7 @@ func RunSpecManualCommand( // download zip artefact, git init and prepare for job execution tempDir, err := os.MkdirTemp("", "downloaded-zip-") if err != nil { - log.Printf("failed to create temp dir: %v", err) + slog.Error("failed to create temp dir", "error", err) os.Exit(1) } @@ -78,7 +78,7 @@ func RunSpecManualCommand( zipPath := *absoluteFileName err = utils.ExtractZip(zipPath, tempDir) if err != nil { - log.Printf("ExtractZip err: %v", err) + slog.Error("ExtractZip err", "error", err) usage.ReportErrorAndExit(spec.VCS.Actor, fmt.Sprintf("artefact zip extraction err: %v", err), 1) } @@ -86,14 +86,14 @@ func RunSpecManualCommand( // remove the zipPath err = os.Remove(zipPath) if err != nil { - log.Printf("os.Remove err: %v", err) + slog.Error("os.Remove error", "error", err) usage.ReportErrorAndExit(spec.VCS.Actor, fmt.Sprintf("zip path removal err: %v", err), 1) } // Navigating to our diractory err = os.Chdir(tempDir) if err != nil { - log.Printf("Chdir err: %v", err) + slog.Error("Chdir err", "error", err) usage.ReportErrorAndExit(spec.VCS.Actor, fmt.Sprintf("Chdir err: %v", err), 1) } @@ -135,9 +135,9 @@ func RunSpecManualCommand( commentUpdater := comment_summary.NoopCommentUpdater{} // do not change these placeholders as they are parsed by dgctl to stream logs - log.Printf("<========= DIGGER RUNNING IN MANUAL MODE =========>") + slog.Info("<========= DIGGER RUNNING IN MANUAL MODE =========>") allAppliesSuccess, _, err := digger.RunJobs(jobs, prService, orgService, lock, reporter, planStorage, policyChecker, commentUpdater, noopBackendApi, spec.JobId, false, false, commentId, currentDir) - log.Printf("<========= DIGGER COMPLETED =========>") + slog.Info("<========= DIGGER COMPLETED =========>") if err != nil || allAppliesSuccess == false { usage.ReportErrorAndExit(spec.VCS.RepoOwner, "Terraform execution failed", 1) } diff --git a/cli/pkg/spec/spec.go b/cli/pkg/spec/spec.go index 361fc6a5d..a2b271e96 100644 --- a/cli/pkg/spec/spec.go +++ b/cli/pkg/spec/spec.go @@ -9,14 +9,14 @@ import ( "github.com/diggerhq/digger/libs/scheduler" "github.com/diggerhq/digger/libs/spec" "github.com/samber/lo" - "log" + "log/slog" "os" "os/exec" "time" ) func reportError(spec spec.Spec, backendApi backend2.Api, message string, err error) { - log.Printf(message) + slog.Error(message) _, reportingError := backendApi.ReportProjectJobStatus(spec.VCS.RepoName, spec.Job.ProjectName, spec.JobId, "failed", time.Now(), nil, "", "", "", nil) if reportingError != nil { usage.ReportErrorAndExit(spec.VCS.RepoOwner, fmt.Sprintf("Failed to run commands. %v", err), 5) @@ -39,7 +39,7 @@ func RunSpec( backendApi, err := backedProvider.GetBackendApi(spec.Backend) if err != nil { - log.Printf("could not get backend api: %v", err) + slog.Error("could not get backend api", "error", err) usage.ReportErrorAndExit(spec.VCS.Actor, fmt.Sprintf("could not get backend api: %v", err), 1) } @@ -52,7 +52,7 @@ func RunSpec( if spec.Job.Commit != "" { // checking out to the commit ID - log.Printf("fetching commit ID %v", spec.Job.Commit) + slog.Info("fetching commit ID", "commitId", spec.Job.Commit) fetchCmd := exec.Command("git", "fetch", "origin", spec.Job.Commit) fetchCmd.Stdout = os.Stdout fetchCmd.Stderr = os.Stderr @@ -63,7 +63,7 @@ func RunSpec( usage.ReportErrorAndExit(spec.VCS.Actor, fmt.Sprintf("error while checking out to commit sha: %v", err), 1) } - log.Printf("checking out to commit ID %v", spec.Job.Commit) + slog.Info("checking out to commit ID", "commitID", spec.Job.Commit) cmd := exec.Command("git", "checkout", spec.Job.Commit) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr diff --git a/cli/pkg/usage/usage.go b/cli/pkg/usage/usage.go index e59f80eb0..779a0cd84 100644 --- a/cli/pkg/usage/usage.go +++ b/cli/pkg/usage/usage.go @@ -6,7 +6,7 @@ import ( "encoding/hex" "encoding/json" configuration "github.com/diggerhq/digger/libs/digger_config" - "log" + "log/slog" "net/http" "os" ) @@ -55,18 +55,22 @@ func sendPayload(payload interface{}) error { } jsonData, err := json.Marshal(payload) if err != nil { - log.Printf("Error marshalling usage record: %v", err) + slog.Error("Error marshalling usage record", "error", err) + return err + } + req, err := http.NewRequest("POST", "https://analytics.digger.dev", bytes.NewBuffer(jsonData)) + if err != nil { + slog.Error("Error creating request", "error", err) return err } - req, _ := http.NewRequest("POST", "https://analytics.digger.dev", bytes.NewBuffer(jsonData)) req.Header.Set("Content-Type", "application/json") resp, err := http.DefaultClient.Do(req) if err != nil { - log.Printf("Error sending telemetry: %v. If you are using digger in a firewalled environment, "+ + slog.Warn("Error received while sending telemetry: If you are using digger in a firewalled environment, "+ "please consider whitelisting analytics.digger.dev. You can also disable this message by setting "+ - "telemetry: false in digger.yml", err) + "telemetry: false in digger.yml", "error", err) return err } defer resp.Body.Close() @@ -76,7 +80,7 @@ func sendPayload(payload interface{}) error { func init() { currentDir, err := os.Getwd() if err != nil { - log.Printf("Failed to get current dir. %s", err) + slog.Error("Failed to get current dir", "error", err) } notEmpty := func(key string) bool { return os.Getenv(key) != "" @@ -109,10 +113,10 @@ func init() { } func ReportErrorAndExit(repoOwner string, message string, exitCode int) { - log.Println(message) + slog.Error(message) err := SendLogRecord(repoOwner, message) if err != nil { - log.Printf("Failed to send log record. %s\n", err) + slog.Error("Failed to send log record.", "error", err) } os.Exit(exitCode) } diff --git a/libs/scheduler/aws.go b/libs/scheduler/aws.go index 5c3bb4f78..2ea99bc37 100644 --- a/libs/scheduler/aws.go +++ b/libs/scheduler/aws.go @@ -297,6 +297,7 @@ type GithubAwsTokenFetcher struct { } func (fetcher *GithubAwsTokenFetcher) SetAudience(audience string) { + slog.Debug("GithubAwsTokenFetcher setting audience", "audience", audience) fetcher.audience = audience } @@ -317,6 +318,9 @@ func (fetcher *GithubAwsTokenFetcher) GetIdentityToken() ([]byte, error) { url := fmt.Sprintf("%v&audience=%v", tokenIdUrl, audience) slog.Debug("Fetching GitHub identity token", "audience", audienceDomain) + slog.Debug("len(ACTIONS_ID_TOKEN_REQUEST_URL)", "length of ACTIONS_ID_TOKEN_REQUEST_URL", len(tokenIdUrl)) + slog.Debug("len(ACTIONS_ID_TOKEN_REQUEST_TOKEN)", "length of ACTIONS_ID_TOKEN_REQUEST_TOKEN", len(bearerToken)) + slog.Debug("audience (escaped", "audience", audience) req, err := http.NewRequest("GET", url, nil) if err != nil {