Skip to content

Remote environment: Support infrastructure files for Terraform. #4973

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions cli/azd/pkg/environment/azdcontext/azdcontext.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ const EnvironmentDirectoryName = ".azure"
const DotEnvFileName = ".env"
const ConfigFileName = "config.json"
const ConfigFileVersion = 1
const EnvironmentInfraDirectoryName = "infra"

type AzdContext struct {
projectDirectory string
Expand Down Expand Up @@ -53,6 +54,26 @@ func (c *AzdContext) GetEnvironmentWorkDirectory(name string) string {
return filepath.Join(c.EnvironmentRoot(name), "wd")
}

func (c *AzdContext) GetEnvironmentInfraDirectory(name string) string {
return filepath.Join(c.EnvironmentRoot(name), EnvironmentInfraDirectoryName)
}

func (c *AzdContext) GetEnvironmentInfraFiles(name string) []string {
var files []string
infraDir := c.GetEnvironmentInfraDirectory(name)

entries, err := os.ReadDir(infraDir)
if err == nil {
for _, entry := range entries {
if !entry.IsDir() {
files = append(files, filepath.Join(infraDir, entry.Name()))
}
}
}

return files
}

// GetDefaultEnvironmentName returns the name of the default environment. Returns
// an empty string if a default environment has not been set.
func (c *AzdContext) GetDefaultEnvironmentName() (string, error) {
Expand Down
1 change: 1 addition & 0 deletions cli/azd/pkg/environment/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ type Spec struct {

const DotEnvFileName = ".env"
const ConfigFileName = "config.json"
const InfraFilesDir = "infra"

var (
// Error returned when an environment with the specified name already exists
Expand Down
63 changes: 62 additions & 1 deletion cli/azd/pkg/environment/storage_blob_data_store.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"context"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"slices"
Expand All @@ -19,6 +20,7 @@ import (
"github.com/azure/azure-dev/cli/azd/pkg/azsdk/storage"
"github.com/azure/azure-dev/cli/azd/pkg/config"
"github.com/azure/azure-dev/cli/azd/pkg/contracts"
"github.com/azure/azure-dev/cli/azd/pkg/environment/azdcontext"
"github.com/google/uuid"
"github.com/joho/godotenv"
)
Expand All @@ -31,12 +33,15 @@ var (
type StorageBlobDataStore struct {
configManager config.Manager
blobClient storage.BlobClient
azdContext *azdcontext.AzdContext
}

func NewStorageBlobDataStore(configManager config.Manager, blobClient storage.BlobClient) RemoteDataStore {
func NewStorageBlobDataStore(configManager config.Manager, blobClient storage.BlobClient,
azdContext *azdcontext.AzdContext) RemoteDataStore {
return &StorageBlobDataStore{
configManager: configManager,
blobClient: blobClient,
azdContext: azdContext,
}
}

Expand All @@ -50,6 +55,11 @@ func (fs *StorageBlobDataStore) ConfigPath(env *Environment) string {
return fmt.Sprintf("%s/%s", env.name, ConfigFileName)
}

// InfraPath returns the path to the infra files for the given environment
func (fs *StorageBlobDataStore) InfraPath(env *Environment) string {
return fmt.Sprintf("%s/%s", env.name, InfraFilesDir)
}

func (sbd *StorageBlobDataStore) List(ctx context.Context) ([]*contracts.EnvListEnvironment, error) {
blobs, err := sbd.blobClient.Items(ctx)
if err != nil {
Expand Down Expand Up @@ -143,6 +153,22 @@ func (sbd *StorageBlobDataStore) Save(ctx context.Context, env *Environment, opt
return fmt.Errorf("uploading .env: %w", describeError(err))
}

// Upload Infra Files if any
filesToUpload := sbd.azdContext.GetEnvironmentInfraFiles(env.name)
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can files be deleted from remote env? how would I remove an individual file and have it removed from storage and other machines using this env?


for _, file := range filesToUpload {
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What happens if some uploads succeed and some fail? This would leave an environment in an inconsistent state. Is there a way to make the upload more atomic? Maybe upload a zip of all the files instead of files individually?

fileName := filepath.Base(file)
fileBuffer, err := os.Open(file)
if err != nil {
return fmt.Errorf("failed to open file for upload: %w", err)
}
defer fileBuffer.Close()
err = sbd.blobClient.Upload(ctx, fmt.Sprintf("%s/%s/%s", env.name, InfraFilesDir, fileName), fileBuffer)
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

How are secrets protected in this approach? Seems like a user could upload a lot of sensitive information to a storage account that I don't see any documentation on how to secure correctly. There's no mention of turning on blob encryption, turning off public access, restricting network access, or any other best practices for putting tfstate in a storage account https://learn.microsoft.com/en-us/azure/developer/azure-developer-cli/remote-environments-support

If I'm working across two computers, how is the other computer notified of the change so I have the latest state? Do I have to manually re-run azd env refresh?

if err != nil {
return fmt.Errorf("uploading infra file: %w", err)
}
}
Comment on lines +157 to +170
Copy link
Contributor

@wbreza wbreza Mar 26, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@HadwaAbdelhalem Instead of being selective here - what if we just upload any files that are in the environment folder? That would solve this specific use case as well as any other similar issues.

Copy link
Contributor Author

@HadwaAbdelhalem HadwaAbdelhalem Apr 1, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@wbreza I wanted to be more specific in case users have local tmp, or test files in the local environment folder that was not meant to be persisted and to follow the pattern of the environment items being well structured and defined ( configfile, env file) by adding infrafiles

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What would happen with terraform.tfstate.lock.info?

Synchronizing a local tfstate file doesn't seem to account for concurrency, locking, and conflict resolution which might leave customers in a much worse state than just using Terraform remote state. It exposes customers to inconsistencies and potential data loss when a conflict occurs. I have not seen manually synchronizing a local state file be recommended over just using terraform remote state. I believe we should be recommending best practices to our customers using azd.


tracing.SetUsageAttributes(fields.StringHashed(fields.EnvNameKey, env.Name()))
return nil
}
Expand Down Expand Up @@ -191,6 +217,41 @@ func (sbd *StorageBlobDataStore) Reload(ctx context.Context, env *Environment) e
tracing.SetGlobalAttributes(fields.StringHashed(fields.SubscriptionIdKey, env.GetSubscriptionId()))
}

// Reload infra config file if any
items, err := sbd.blobClient.Items(ctx)
if err != nil {
return describeError(err)
}

for _, item := range items {
if strings.Contains(item.Path, sbd.InfraPath(env)) {
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Using Contains is fragile – if one environment’s name is a substring of another, this strings.Contains check can incorrectly match blobs from a different environment. For example, an environment named "prod" would match blobs for "prod2"


blobStream, err := sbd.blobClient.Download(ctx, item.Path)
if err != nil {
return fmt.Errorf("failed to download blob: %w", err)
}
defer blobStream.Close()

localInfraDir := sbd.azdContext.GetEnvironmentInfraDirectory(env.name)
err = os.MkdirAll(localInfraDir, os.ModePerm)
if err != nil {
return fmt.Errorf("failed to create directory: %w", err)
}

localFilePath := fmt.Sprintf("%s/%s", localInfraDir, filepath.Base(item.Path))
file, err := os.Create(localFilePath)
if err != nil {
return fmt.Errorf("failed to create local file: %w", err)
}
defer file.Close()

_, err = io.Copy(file, blobStream)
if err != nil {
return fmt.Errorf("failed to write blob to local file: %w", err)
}
}
}

return nil
}

Expand Down
15 changes: 10 additions & 5 deletions cli/azd/pkg/environment/storage_blob_data_store_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ import (

"github.com/azure/azure-dev/cli/azd/pkg/azsdk/storage"
"github.com/azure/azure-dev/cli/azd/pkg/config"
"github.com/azure/azure-dev/cli/azd/pkg/environment/azdcontext"
"github.com/azure/azure-dev/cli/azd/test/mocks"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
Expand Down Expand Up @@ -39,11 +40,12 @@ var validBlobItems []*storage.Blob = []*storage.Blob{
func Test_StorageBlobDataStore_List(t *testing.T) {
mockContext := mocks.NewMockContext(context.Background())
configManager := config.NewManager()
azdContext := azdcontext.NewAzdContextWithDirectory(t.TempDir())

t.Run("List", func(t *testing.T) {
blobClient := &MockBlobClient{}
blobClient.On("Items", *mockContext.Context).Return(validBlobItems, nil)
dataStore := NewStorageBlobDataStore(configManager, blobClient)
dataStore := NewStorageBlobDataStore(configManager, blobClient, azdContext)

envList, err := dataStore.List(*mockContext.Context)
require.NoError(t, err)
Expand All @@ -56,7 +58,7 @@ func Test_StorageBlobDataStore_List(t *testing.T) {
t.Run("Empty", func(t *testing.T) {
blobClient := &MockBlobClient{}
blobClient.On("Items", *mockContext.Context).Return(nil, storage.ErrContainerNotFound)
dataStore := NewStorageBlobDataStore(configManager, blobClient)
dataStore := NewStorageBlobDataStore(configManager, blobClient, azdContext)

envList, err := dataStore.List(*mockContext.Context)
require.NoError(t, err)
Expand All @@ -69,7 +71,8 @@ func Test_StorageBlobDataStore_SaveAndGet(t *testing.T) {
mockContext := mocks.NewMockContext(context.Background())
configManager := config.NewManager()
blobClient := &MockBlobClient{}
dataStore := NewStorageBlobDataStore(configManager, blobClient)
azdContext := azdcontext.NewAzdContextWithDirectory(t.TempDir())
dataStore := NewStorageBlobDataStore(configManager, blobClient, azdContext)

t.Run("Success", func(t *testing.T) {
envReader := io.NopCloser(bytes.NewReader([]byte("key1=value1")))
Expand All @@ -96,7 +99,8 @@ func Test_StorageBlobDataStore_SaveAndGet(t *testing.T) {
func Test_StorageBlobDataStore_Path(t *testing.T) {
configManager := config.NewManager()
blobClient := &MockBlobClient{}
dataStore := NewStorageBlobDataStore(configManager, blobClient)
azdContext := azdcontext.NewAzdContextWithDirectory(t.TempDir())
dataStore := NewStorageBlobDataStore(configManager, blobClient, azdContext)

env := New("env1")
expected := fmt.Sprintf("%s/%s", env.name, DotEnvFileName)
Expand All @@ -108,7 +112,8 @@ func Test_StorageBlobDataStore_Path(t *testing.T) {
func Test_StorageBlobDataStore_ConfigPath(t *testing.T) {
configManager := config.NewManager()
blobClient := &MockBlobClient{}
dataStore := NewStorageBlobDataStore(configManager, blobClient)
azdContext := azdcontext.NewAzdContextWithDirectory(t.TempDir())
dataStore := NewStorageBlobDataStore(configManager, blobClient, azdContext)

env := New("env1")
expected := fmt.Sprintf("%s/%s", env.name, ConfigFileName)
Expand Down