From 25b47cb6f62e54ea11dd9692a5992631aef23838 Mon Sep 17 00:00:00 2001 From: Salman Toor Date: Mon, 10 Jun 2024 13:42:52 +0200 Subject: [PATCH] Feature/SK-855 | Monai Example (#620) --- examples/monai-2D-mednist/.dockerignore | 4 + examples/monai-2D-mednist/.gitignore | 6 + examples/monai-2D-mednist/README.rst | 169 ++++++++++++++++++ examples/monai-2D-mednist/client/data.py | 153 ++++++++++++++++ examples/monai-2D-mednist/client/fedn.yaml | 10 ++ examples/monai-2D-mednist/client/model.py | 64 +++++++ .../monai-2D-mednist/client/python_env.yaml | 12 ++ .../monai-2D-mednist/client/requirements.txt | 8 + examples/monai-2D-mednist/client/train.py | 133 ++++++++++++++ examples/monai-2D-mednist/client/validate.py | 97 ++++++++++ .../monai-2D-mednist/client_settings.yaml | 6 + .../docker-compose.override.yaml | 36 ++++ 12 files changed, 698 insertions(+) create mode 100644 examples/monai-2D-mednist/.dockerignore create mode 100644 examples/monai-2D-mednist/.gitignore create mode 100644 examples/monai-2D-mednist/README.rst create mode 100644 examples/monai-2D-mednist/client/data.py create mode 100644 examples/monai-2D-mednist/client/fedn.yaml create mode 100644 examples/monai-2D-mednist/client/model.py create mode 100644 examples/monai-2D-mednist/client/python_env.yaml create mode 100644 examples/monai-2D-mednist/client/requirements.txt create mode 100644 examples/monai-2D-mednist/client/train.py create mode 100644 examples/monai-2D-mednist/client/validate.py create mode 100644 examples/monai-2D-mednist/client_settings.yaml create mode 100644 examples/monai-2D-mednist/docker-compose.override.yaml diff --git a/examples/monai-2D-mednist/.dockerignore b/examples/monai-2D-mednist/.dockerignore new file mode 100644 index 000000000..8ba9024ad --- /dev/null +++ b/examples/monai-2D-mednist/.dockerignore @@ -0,0 +1,4 @@ +data +seed.npz +*.tgz +*.tar.gz \ No newline at end of file diff --git a/examples/monai-2D-mednist/.gitignore b/examples/monai-2D-mednist/.gitignore new file mode 100644 index 000000000..a9f01054b --- /dev/null +++ b/examples/monai-2D-mednist/.gitignore @@ -0,0 +1,6 @@ +data +*.npz +*.tgz +*.tar.gz +.mnist-pytorch +client.yaml \ No newline at end of file diff --git a/examples/monai-2D-mednist/README.rst b/examples/monai-2D-mednist/README.rst new file mode 100644 index 000000000..c2c536f27 --- /dev/null +++ b/examples/monai-2D-mednist/README.rst @@ -0,0 +1,169 @@ +FEDn Project: MonAI 2D Classification with the MedNIST Dataset (PyTorch) +------------------------------------------------------------------------ + +This is an example FEDn Project based on the MonAI 2D Classification with the MedNIST Dataset. +The example is intented as a minimalistic quickstart and automates the handling of training data +by letting the client download and create its partition of the dataset as it starts up. + +Links: + +- MonAI: https://monai.io/ +- Base example notebook: https://github.com/Project-MONAI/tutorials/blob/main/2d_classification/mednist_tutorial.ipynb +- MedNIST dataset: https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/MedNIST.tar.gz + +Prerequisites +------------- + +Using FEDn Studio: + +- `Python 3.8, 3.9, 3.10 or 3.11 `__ +- `A FEDn Studio account `__ + +If using pseudo-distributed mode with docker-compose: + +- `Docker `__ +- `Docker Compose `__ + +Creating the compute package and seed model +------------------------------------------- + +Install fedn: + +.. code-block:: + + pip install fedn + +Clone this repository, then locate into this directory: + +.. code-block:: + + git clone https://github.com/scaleoutsystems/fedn.git + cd fedn/examples/monai-2D-mednist + +Create the compute package: + +.. code-block:: + + fedn package create --path client + +This should create a file 'package.tgz' in the project folder. + +Next, generate a seed model (the first model in a global model trail): + +.. code-block:: + + fedn run build --path client + +This will create a seed model called 'seed.npz' in the root of the project. This step will take a few minutes, depending on hardware and internet connection (builds a virtualenv). + +Using FEDn Studio +----------------- + +Follow the guide here to set up your FEDn Studio project and learn how to connect clients (using token authentication): `Studio guide `__. +On the step "Upload Files", upload 'package.tgz' and 'seed.npz' created above. + +Connecting clients: +=================== + +**NOTE: In case a different data path needs to be set, use the env variable FEDN_DATA_PATH.** + +.. code-block:: + + export FEDN_PACKAGE_EXTRACT_DIR=package + export FEDN_DATA_PATH=./data/ + export FEDN_CLIENT_SETTINGS_PATH=/client_settings.yaml + fedn client start -in client.yaml --secure=True --force-ssl + +Connecting clients using Docker: +================================ + +For convenience, there is a Docker image hosted on ghrc.io with fedn preinstalled. To start a client using Docker: + +.. code-block:: + + docker run \ + -v $PWD/client.yaml:/app/client.yaml \ + -v $PWD/client_settings.yaml:/app/client_settings.yaml \ + -e FEDN_PACKAGE_EXTRACT_DIR=package \ + -e FEDN_DATA_PATH=./data/ \ + -e FEDN_CLIENT_SETTINGS_PATH=/app/client_settings.yaml \ + ghcr.io/scaleoutsystems/fedn/fedn:0.9.0 run client -in client.yaml --force-ssl --secure=True + + +**NOTE: The following instructions are only for SDK-based client communication and for local development environments using Docker.** + + +Local development mode using Docker/docker compose +-------------------------------------------------- + +Follow the steps above to install FEDn, generate 'package.tgz' and 'seed.tgz'. + +Start a pseudo-distributed FEDn network using docker-compose: + +.. code-block:: + + docker compose \ + -f ../../docker-compose.yaml \ + -f docker-compose.override.yaml \ + up + +This starts up local services for MongoDB, Minio, the API Server, one Combiner and two clients. +You can verify the deployment using these urls: + +- API Server: http://localhost:8092/get_controller_status +- Minio: http://localhost:9000 +- Mongo Express: http://localhost:8081 + +Upload the package and seed model to FEDn controller using the APIClient. In Python: + +.. code-block:: + + from fedn import APIClient + client = APIClient(host="localhost", port=8092) + client.set_active_package("package.tgz", helper="numpyhelper") + client.set_active_model("seed.npz") + +You can now start a training session with 5 rounds (default): + +.. code-block:: + + client.start_session() + +Automate experimentation with several clients +============================================= + +If you want to scale the number of clients, you can do so by modifying ``docker-compose.override.yaml``. For example, +in order to run with 3 clients, change the environment variable ``FEDN_NUM_DATA_SPLITS`` to 3, and add one more client +by copying ``client1`` and setting ``FEDN_DATA_PATH`` to ``/app/package/data3/`` + + +Access message logs and validation data from MongoDB +==================================================== + +You can access and download event logs and validation data via the API, and you can also as a developer obtain +the MongoDB backend data using pymongo or via the MongoExpress interface: + +- http://localhost:8081/db/fedn-network/ + +The credentials are as set in docker-compose.yaml in the root of the repository. + +Access global models +==================== + +You can obtain global model updates from the 'fedn-models' bucket in Minio: + +- http://localhost:9000 + +Reset the FEDn deployment +========================= + +To purge all data from a deployment incuding all session and round data, access the MongoExpress UI interface and +delete the entire ``fedn-network`` collection. Then restart all services. + +Clean up +======== +You can clean up by running + +.. code-block:: + + docker-compose -f ../../docker-compose.yaml -f docker-compose.override.yaml down -v diff --git a/examples/monai-2D-mednist/client/data.py b/examples/monai-2D-mednist/client/data.py new file mode 100644 index 000000000..0a8b5c306 --- /dev/null +++ b/examples/monai-2D-mednist/client/data.py @@ -0,0 +1,153 @@ +import os +import random + +import numpy as np +import PIL +import torch +import yaml +from monai.apps import download_and_extract + +dir_path = os.path.dirname(os.path.realpath(__file__)) +abs_path = os.path.abspath(dir_path) + +DATA_CLASSES = {"AbdomenCT": 0, "BreastMRI": 1, "CXR": 2, "ChestCT": 3, "Hand": 4, "HeadCT": 5} + + +def split_data(data_path="data/MedNIST", splits=100, validation_split=0.9): + # create clients + clients = {"client " + str(i): {"train": [], "validation": []} for i in range(splits)} + + for class_ in os.listdir(data_path): + if os.path.isdir(os.path.join(data_path, class_)): + patients_in_class = [os.path.join(class_, patient) for patient in os.listdir(os.path.join(data_path, class_))] + np.random.shuffle(patients_in_class) + chops = np.int32(np.linspace(0, len(patients_in_class), splits + 1)) + for split in range(splits): + p = patients_in_class[chops[split] : chops[split + 1]] + valsplit = np.int32(len(p) * validation_split) + + clients["client " + str(split)]["train"] += p[:valsplit] + clients["client " + str(split)]["validation"] += p[valsplit:] + + with open(os.path.join(os.path.dirname(data_path), "data_splits.yaml"), "w") as file: + yaml.dump(clients, file, default_flow_style=False) + + +def get_data(out_dir="data"): + """Get data from the external repository. + + :param out_dir: Path to data directory. If doesn't + :type data_dir: str + """ + # Make dir if necessary + if not os.path.exists(out_dir): + os.mkdir(out_dir) + + resource = "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/MedNIST.tar.gz" + md5 = "0bc7306e7427e00ad1c5526a6677552d" + + compressed_file = os.path.join(out_dir, "MedNIST.tar.gz") + + data_dir = os.path.abspath(out_dir) + print("data_dir:", data_dir) + if os.path.exists(data_dir): + print("path exist.") + if not os.path.exists(compressed_file): + print("compressed file does not exist, downloading and extracting data.") + download_and_extract(resource, compressed_file, data_dir, md5) + else: + print("files already exist.") + + split_data() + + +def get_classes(data_path): + """Get a list of classes from the dataset + + :param data_path: Path to data directory. + :type data_path: str + """ + if data_path is None: + data_path = os.environ.get("FEDN_DATA_PATH", abs_path + "/data/MedNIST") + + class_names = sorted(x for x in os.listdir(data_path) if os.path.isdir(os.path.join(data_path, x))) + return class_names + + +def load_data(data_path, sample_size=None, is_train=True): + """Load data from disk. + + :param data_path: Path to data directory. + :type data_path: str + :param is_train: Whether to load training or test data. + :type is_train: bool + :return: Tuple of data and labels. + :rtype: tuple + """ + if data_path is None: + data_path = os.environ.get("FEDN_DATA_PATH", abs_path + "/data/MedNIST") + + class_names = get_classes(data_path) + num_class = len(class_names) + + image_files_all = [[os.path.join(data_path, class_names[i], x) for x in os.listdir(os.path.join(data_path, class_names[i]))] for i in range(num_class)] + + # To make the dataset small, we are using sample_size=100 images of each class. + if sample_size is None: + image_files = image_files_all + + else: + image_files = [random.sample(inner_list, sample_size) for inner_list in image_files_all] + + num_each = [len(image_files[i]) for i in range(num_class)] + image_files_list = [] + image_class = [] + for i in range(num_class): + image_files_list.extend(image_files[i]) + image_class.extend([i] * num_each[i]) + num_total = len(image_class) + image_width, image_height = PIL.Image.open(image_files_list[0]).size + + print(f"Total image count: {num_total}") + print(f"Image dimensions: {image_width} x {image_height}") + print(f"Label names: {class_names}") + print(f"Label counts: {num_each}") + + val_frac = 0.1 + length = len(image_files_list) + indices = np.arange(length) + np.random.shuffle(indices) + + val_split = int(val_frac * length) + val_indices = indices[:val_split] + train_indices = indices[val_split:] + + train_x = [image_files_list[i] for i in train_indices] + train_y = [image_class[i] for i in train_indices] + val_x = [image_files_list[i] for i in val_indices] + val_y = [image_class[i] for i in val_indices] + + print(f"Training count: {len(train_x)}, Validation count: " f"{len(val_x)}") + + if is_train: + return train_x, train_y + else: + return val_x, val_y, class_names + + +class MedNISTDataset(torch.utils.data.Dataset): + def __init__(self, data_path, image_files, transforms): + self.data_path = data_path + self.image_files = image_files + self.transforms = transforms + + def __len__(self): + return len(self.image_files) + + def __getitem__(self, index): + return (self.transforms(os.path.join(self.data_path, self.image_files[index])), DATA_CLASSES[os.path.dirname(self.image_files[index])]) + + +if __name__ == "__main__": + # Prepare data if not already done + get_data() diff --git a/examples/monai-2D-mednist/client/fedn.yaml b/examples/monai-2D-mednist/client/fedn.yaml new file mode 100644 index 000000000..b05504102 --- /dev/null +++ b/examples/monai-2D-mednist/client/fedn.yaml @@ -0,0 +1,10 @@ +python_env: python_env.yaml +entry_points: + build: + command: python model.py + startup: + command: python data.py + train: + command: python train.py + validate: + command: python validate.py \ No newline at end of file diff --git a/examples/monai-2D-mednist/client/model.py b/examples/monai-2D-mednist/client/model.py new file mode 100644 index 000000000..4f8596b85 --- /dev/null +++ b/examples/monai-2D-mednist/client/model.py @@ -0,0 +1,64 @@ +import collections + +import torch +from monai.networks.nets import DenseNet121 + +from fedn.utils.helpers.helpers import get_helper + +HELPER_MODULE = "numpyhelper" +helper = get_helper(HELPER_MODULE) + + +def compile_model(): + """Compile the MonAI model. + + :return: The compiled model. + :rtype: torch.nn.Module + """ + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + model = DenseNet121(spatial_dims=2, in_channels=1, out_channels=6).to(device) + return model + + +def save_parameters(model, out_path): + """Save model paramters to file. + + :param model: The model to serialize. + :type model: torch.nn.Module + :param out_path: The path to save to. + :type out_path: str + """ + parameters_np = [val.cpu().numpy() for _, val in model.state_dict().items()] + helper.save(parameters_np, out_path) + + +def load_parameters(model_path): + """Load model parameters from file and populate model. + + param model_path: The path to load from. + :type model_path: str + :return: The loaded model. + :rtype: torch.nn.Module + """ + model = compile_model() + parameters_np = helper.load(model_path) + + params_dict = zip(model.state_dict().keys(), parameters_np) + state_dict = collections.OrderedDict({key: torch.tensor(x) for key, x in params_dict}) + model.load_state_dict(state_dict, strict=True) + return model + + +def init_seed(out_path="seed.npz"): + """Initialize seed model and save it to file. + + :param out_path: The path to save the seed model to. + :type out_path: str + """ + # Init and save + model = compile_model() + save_parameters(model, out_path) + + +if __name__ == "__main__": + init_seed("../seed.npz") diff --git a/examples/monai-2D-mednist/client/python_env.yaml b/examples/monai-2D-mednist/client/python_env.yaml new file mode 100644 index 000000000..7580ffb76 --- /dev/null +++ b/examples/monai-2D-mednist/client/python_env.yaml @@ -0,0 +1,12 @@ +name: monai-2d-mdnist +build_dependencies: + - pip + - setuptools + - wheel==0.37.1 +dependencies: + - torch==2.2.1 + - torchvision==0.17.1 + - fedn==0.9.0 + - monai-weekly[pillow, tqdm] + - scikit-learn + - tensorboard diff --git a/examples/monai-2D-mednist/client/requirements.txt b/examples/monai-2D-mednist/client/requirements.txt new file mode 100644 index 000000000..a37218f00 --- /dev/null +++ b/examples/monai-2D-mednist/client/requirements.txt @@ -0,0 +1,8 @@ +setuptools +wheel==0.37.1 +torch==2.2.1 +torchvision==0.17.1 +fedn==0.9.0 +monai-weekly[pillow, tqdm] +scikit-learn +tensorboard diff --git a/examples/monai-2D-mednist/client/train.py b/examples/monai-2D-mednist/client/train.py new file mode 100644 index 000000000..e3cb235c0 --- /dev/null +++ b/examples/monai-2D-mednist/client/train.py @@ -0,0 +1,133 @@ +import os +import sys + +import numpy as np +import torch +import yaml +from data import MedNISTDataset +from model import load_parameters, save_parameters +from monai.data import DataLoader +from monai.transforms import ( + Compose, + EnsureChannelFirst, + LoadImage, + RandFlip, + RandRotate, + RandZoom, + ScaleIntensity, +) + +from fedn.utils.helpers.helpers import save_metadata + +dir_path = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.abspath(dir_path)) + + +train_transforms = Compose( + [ + LoadImage(image_only=True), + EnsureChannelFirst(), + ScaleIntensity(), + RandRotate(range_x=np.pi / 12, prob=0.5, keep_size=True), + RandFlip(spatial_axis=0, prob=0.5), + RandZoom(min_zoom=0.9, max_zoom=1.1, prob=0.5), + ] +) + + +def train(in_model_path, out_model_path, data_path=None, client_settings_path=None): + """Complete a model update. + + Load model paramters from in_model_path (managed by the FEDn client), + perform a model update, and write updated paramters + to out_model_path (picked up by the FEDn client). + + :param in_model_path: The path to the input model. + :type in_model_path: str + :param out_model_path: The path to save the output model to. + :type out_model_path: str + :param data_path: The path to the data directory. + :type data_path: str + :param client_settings_path: path to a local client settings file. + :type client_settings_path: str + """ + if client_settings_path is None: + client_settings_path = os.environ.get("FEDN_CLIENT_SETTINGS_PATH", dir_path + "/client_settings.yaml") + + print("client_settings_path: ", client_settings_path) + with open(client_settings_path, "r") as fh: # Used by CJG for local training + try: + client_settings = dict(yaml.safe_load(fh)) + except yaml.YAMLError: + raise + + print("client settings: ", client_settings) + batch_size = client_settings["batch_size"] + max_epochs = client_settings["local_epochs"] + num_workers = client_settings["num_workers"] + split_index = client_settings["split_index"] + lr = client_settings["lr"] + + if data_path is None: + data_path = os.environ.get("FEDN_DATA_PATH") + + with open(os.path.join(os.path.dirname(data_path), "data_splits.yaml"), "r") as file: + clients = yaml.safe_load(file) + + image_list = clients["client " + str(split_index)]["train"] + + train_ds = MedNISTDataset(data_path="data/MedNIST", transforms=train_transforms, image_files=image_list) + + train_loader = DataLoader(train_ds, batch_size=batch_size, shuffle=True, num_workers=num_workers) + + # Load parmeters and initialize model + model = load_parameters(in_model_path) + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + optimizer = torch.optim.Adam(model.parameters(), 1e-5) + loss_function = torch.nn.CrossEntropyLoss() + + # Train + epoch_loss_values = [] + # writer = SummaryWriter() + + for epoch in range(max_epochs): + print("-" * 10) + print(f"epoch {epoch + 1}/{max_epochs}") + model.train() + epoch_loss = 0 + step = 0 + for batch_data in train_loader: + step += 1 + inputs, labels = batch_data[0].to(device), batch_data[1].to(device) + optimizer.zero_grad() + outputs = model(inputs) + loss = loss_function(outputs, labels) + loss.backward() + optimizer.step() + epoch_loss += loss.item() + print(f"{step}/{len(train_loader) // train_loader.batch_size}, " f"train_loss: {loss.item():.4f}") + + epoch_loss /= step + epoch_loss_values.append(epoch_loss) + print(f"epoch {epoch + 1} average loss: {epoch_loss:.4f}") + + print("training completed!") + + # Metadata needed for aggregation server side + metadata = { + # num_examples are mandatory + "num_examples": len(train_loader), + "batch_size": batch_size, + "epochs": max_epochs, + "lr": lr, + } + + # Save JSON metadata file (mandatory) + save_metadata(metadata, out_model_path) + + # Save model update (mandatory) + save_parameters(model, out_model_path) + + +if __name__ == "__main__": + train(sys.argv[1], sys.argv[2]) diff --git a/examples/monai-2D-mednist/client/validate.py b/examples/monai-2D-mednist/client/validate.py new file mode 100644 index 000000000..74292c34f --- /dev/null +++ b/examples/monai-2D-mednist/client/validate.py @@ -0,0 +1,97 @@ +import os +import sys + +import torch +import yaml +from data import DATA_CLASSES, MedNISTDataset +from model import load_parameters +from monai.data import DataLoader +from monai.transforms import ( + Compose, + EnsureChannelFirst, + LoadImage, + ScaleIntensity, +) +from sklearn.metrics import accuracy_score, classification_report, f1_score + +from fedn.utils.helpers.helpers import save_metrics + +dir_path = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.abspath(dir_path)) + +val_transforms = Compose([LoadImage(image_only=True), EnsureChannelFirst(), ScaleIntensity()]) + + +def validate(in_model_path, out_json_path, data_path=None, client_settings_path=None): + """Validate model. + + :param in_model_path: The path to the input model. + :type in_model_path: str + :param out_json_path: The path to save the output JSON to. + :type out_json_path: str + :param data_path: The path to the data file. + :type data_path: str + :param client_settings_path: The path to the local client settings file. + :type client_settings_path: str + """ + if client_settings_path is None: + client_settings_path = os.environ.get("FEDN_CLIENT_SETTINGS_PATH", dir_path + "/client_settings.yaml") + + with open(client_settings_path, "r") as fh: # Used by CJG for local training + try: + client_settings = dict(yaml.safe_load(fh)) + except yaml.YAMLError: + raise + + num_workers = client_settings["num_workers"] + batch_size = client_settings["batch_size"] + split_index = client_settings["split_index"] + + if data_path is None: + data_path = os.environ.get("FEDN_DATA_PATH") + + with open(os.path.join(os.path.dirname(data_path), "data_splits.yaml"), "r") as file: + clients = yaml.safe_load(file) + + image_list = clients["client " + str(split_index)]["validation"] + + val_ds = MedNISTDataset(data_path="data/MedNIST", transforms=val_transforms, image_files=image_list) + + val_loader = DataLoader(val_ds, batch_size=batch_size, shuffle=True, num_workers=num_workers) + + # Load model + model = load_parameters(in_model_path) + model.eval() + + y_true = [] + y_pred = [] + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + with torch.no_grad(): + for val_data in val_loader: + val_images, val_labels = ( + val_data[0].to(device), + val_data[1].to(device), + ) + pred = model(val_images).argmax(dim=1) + for i in range(len(pred)): + y_true.append(val_labels[i].item()) + y_pred.append(pred[i].item()) + + class_names = list(DATA_CLASSES.keys()) + print("class names: ", class_names) + cr = classification_report(y_true, y_pred, digits=4, output_dict=True, target_names=class_names) + report = {class_name + "_" + metric: cr[class_name][metric] for class_name in cr if isinstance(cr[class_name], dict) for metric in cr[class_name]} + report.update({class_name: cr[class_name] for class_name in cr if isinstance(cr[class_name], str)}) + + # JSON schema + report.update({"test_accuracy": accuracy_score(y_true, y_pred), "test_f1_score": f1_score(y_true, y_pred, average="macro")}) + for r in report: + print(r, ": ", report[r]) + + # Save JSON + save_metrics(report, out_json_path) + + +if __name__ == "__main__": + validate(sys.argv[1], sys.argv[2]) diff --git a/examples/monai-2D-mednist/client_settings.yaml b/examples/monai-2D-mednist/client_settings.yaml new file mode 100644 index 000000000..f7bccb303 --- /dev/null +++ b/examples/monai-2D-mednist/client_settings.yaml @@ -0,0 +1,6 @@ +lr: 0.01 +batch_size: 32 +local_epochs: 10 +num_workers: 1 +sample_size: 30 +split_index: 4 diff --git a/examples/monai-2D-mednist/docker-compose.override.yaml b/examples/monai-2D-mednist/docker-compose.override.yaml new file mode 100644 index 000000000..afeaf1437 --- /dev/null +++ b/examples/monai-2D-mednist/docker-compose.override.yaml @@ -0,0 +1,36 @@ +# Compose schema version +version: '3.4' + +# Overriding requirements + +x-env: &defaults + GET_HOSTS_FROM: dns + FEDN_PACKAGE_EXTRACT_DIR: package + +services: + + client1: + extends: + file: ${HOST_REPO_DIR:-.}/docker-compose.yaml + service: client + environment: + <<: *defaults + FEDN_DATA_PATH: /app/package/client/data/MedNIST + FEDN_CLIENT_SETTINGS_PATH: /app/client_settings.yaml + deploy: + replicas: 1 + volumes: + - ${HOST_REPO_DIR:-.}/fedn:/app/fedn + - ${HOST_REPO_DIR:-.}/examples/monai-2D-mednist/client_settings.yaml:/app/client_settings.yaml + + client2: + extends: + file: ${HOST_REPO_DIR:-.}/docker-compose.yaml + service: client + environment: + <<: *defaults + FEDN_DATA_PATH: /app/package/client/data/MedNIST + deploy: + replicas: 1 + volumes: + - ${HOST_REPO_DIR:-.}/fedn:/app/fedn