Skip to content

feat: added new 'qos' resource #241

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 34 commits into from
Mar 31, 2025
Merged
Show file tree
Hide file tree
Changes from 27 commits
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
95304e5
feat: added new 'qos' resource
cmeesters Mar 15, 2025
be90c19
Merge branch 'main' into feat/add-qos-flag
cmeesters Mar 15, 2025
64aa03e
fix: typo
cmeesters Mar 15, 2025
8743a3a
Merge branch 'feat/add-qos-flag' of github.com:snakemake/snakemake-ex…
cmeesters Mar 15, 2025
55716c6
feat: new tests for the 'constraint' and 'qos' resources
cmeesters Mar 15, 2025
7522792
fix: linting of tests.py
cmeesters Mar 15, 2025
0c74d04
fix: trying workflow fix with pytest upgrade
cmeesters Mar 17, 2025
4ec357f
fix: revoking last commit - did not help
cmeesters Mar 17, 2025
9f2a8a6
fix: attempt by reordering tests
cmeesters Mar 17, 2025
1aca875
fix: syntax fix
cmeesters Mar 17, 2025
404f38b
fix: trying fix fixture
cmeesters Mar 18, 2025
9abdadd
fix: trying fix fixture - now with io streams - another try
cmeesters Mar 18, 2025
076eb7b
fix: revoked last two commits
cmeesters Mar 18, 2025
22d70de
fix: unused import
cmeesters Mar 18, 2025
34dff5d
fix: trying updated pytest
cmeesters Mar 18, 2025
b5e951a
fix: forcing different stream
cmeesters Mar 18, 2025
7257642
fix: syntax
cmeesters Mar 18, 2025
34990d9
trial: outcommenting offending function
cmeesters Mar 18, 2025
226f3c6
fix: trying different mock setup
cmeesters Mar 18, 2025
24d2e2d
fix: added missing fixture
cmeesters Mar 18, 2025
cb1d178
fix: trying process mock
cmeesters Mar 18, 2025
08e702b
fix: everywhere - a mock popen
cmeesters Mar 18, 2025
06a27c9
refactor: moved submit string creation into own file to facilitate te…
cmeesters Mar 28, 2025
e481380
Merge branch 'main' into feat/add-qos-flag
cmeesters Mar 28, 2025
b3cc9f8
fix: blacked
cmeesters Mar 28, 2025
ecba9c4
fix: blacked
cmeesters Mar 28, 2025
d69631c
Merge branch 'feat/add-qos-flag' of github.com:snakemake/snakemake-ex…
cmeesters Mar 28, 2025
da5a727
fix: removed doubled check for clusters
cmeesters Mar 28, 2025
553eaf7
Update snakemake_executor_plugin_slurm/submit_string.py
cmeesters Mar 28, 2025
ead02f7
Merge branch 'feat/add-qos-flag' of github.com:snakemake/snakemake-ex…
cmeesters Mar 28, 2025
ac1c562
Update snakemake_executor_plugin_slurm/submit_string.py
cmeesters Mar 28, 2025
f198593
Update snakemake_executor_plugin_slurm/submit_string.py
cmeesters Mar 28, 2025
fab9390
Merge branch 'feat/add-qos-flag' of github.com:snakemake/snakemake-ex…
cmeesters Mar 28, 2025
b0a6661
refactor: now, using SimpleNamespace to enable attribute-style access…
cmeesters Mar 28, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/announce-release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ permissions:

jobs:
post_to_mastodon:
if: ${{ contains(github.event.head_commit.message, 'chore(main): release') }}
if: "${{ contains(github.event.head_commit.message, 'chore(main): release') }}"
runs-on: ubuntu-latest
steps:
- name: Post to Mastodon
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ jobs:
poetry install

- name: Run pytest
run: poetry run coverage run -m pytest tests/tests.py -sv
run: poetry run coverage run -m pytest tests/tests.py -sv --tb=short --disable-warnings

- name: Run Coverage
run: poetry run coverage report -m
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ throttler = "^1.2.2"
black = "^23.7.0"
flake8 = "^6.1.0"
coverage = "^7.3.1"
pytest = "^7.4.2"
pytest = "^8.3.5"
snakemake = "^8.20.0"

[tool.coverage.run]
Expand Down
65 changes: 18 additions & 47 deletions snakemake_executor_plugin_slurm/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,9 @@
JobExecutorInterface,
)
from snakemake_interface_common.exceptions import WorkflowError
from snakemake_executor_plugin_slurm_jobstep import get_cpu_setting

from .utils import delete_slurm_environment, delete_empty_dirs, set_gres_string
from .submit_string import get_submit_command


@dataclass
Expand Down Expand Up @@ -135,9 +135,10 @@ class ExecutorSettings(ExecutorSettingsBase):
# Required:
# Implementation of your executor
class Executor(RemoteExecutor):
def __post_init__(self):
def __post_init__(self, test_mode: bool = False):
# run check whether we are running in a SLURM job context
self.warn_on_jobcontext()
self.test_mode = test_mode
self.run_uuid = str(uuid.uuid4())
self.logger.info(f"SLURM run ID: {self.run_uuid}")
self._fallback_account_arg = None
Expand Down Expand Up @@ -225,60 +226,41 @@ def run_job(self, job: JobExecutorInterface):
comment_str = f"rule_{job.name}"
else:
comment_str = f"rule_{job.name}_wildcards_{wildcard_str}"
call = (
f"sbatch "
f"--parsable "
f"--job-name {self.run_uuid} "
f"--output '{slurm_logfile}' "
f"--export=ALL "
f"--comment '{comment_str}'"
)
# check whether the 'slurm_extra' parameter is used correctly
# prior to putatively setting in the sbatch call
if job.resources.get("slurm_extra"):
self.check_slurm_extra(job)

if not self.workflow.executor_settings.no_account:
call += self.get_account_arg(job)
job_params = {
"run_uuid": self.run_uuid,
"slurm_logfile": slurm_logfile,
"comment_str": comment_str,
"account": self.get_account_arg(job),
"partition": self.get_partition_arg(job),
"workdir": self.workflow.workdir_init,
}

call += self.get_partition_arg(job)
call = get_submit_command(job, job_params)

if self.workflow.executor_settings.requeue:
call += " --requeue"

call += set_gres_string(job)

if job.resources.get("clusters"):
call += f" --clusters {job.resources.clusters}"

if job.resources.get("runtime"):
call += f" -t {job.resources.runtime}"
else:
if not job.resources.get("runtime"):
self.logger.warning(
"No wall time information given. This might or might not "
"work on your cluster. "
"If not, specify the resource runtime in your rule or as a reasonable "
"default via --default-resources."
)

if job.resources.get("constraint"):
call += f" -C '{job.resources.constraint}'"
if job.resources.get("mem_mb_per_cpu"):
call += f" --mem-per-cpu {job.resources.mem_mb_per_cpu}"
elif job.resources.get("mem_mb"):
call += f" --mem {job.resources.mem_mb}"
else:
if not job.resources.get("mem_mb_per_cpu") and not job.resources.get("mem_mb"):
self.logger.warning(
"No job memory information ('mem_mb' or 'mem_mb_per_cpu') is given "
"- submitting without. This might or might not work on your cluster."
)

if job.resources.get("nodes", False):
call += f" --nodes={job.resources.get('nodes', 1)}"

# fixes #40 - set ntasks regardless of mpi, because
# SLURM v22.05 will require it for all jobs
gpu_job = job.resources.get("gpu") or "gpu" in job.resources.get("gres", "")
if gpu_job:
call += f" --ntasks-per-gpu={job.resources.get('tasks', 1)}"
else:
call += f" --ntasks={job.resources.get('tasks', 1)}"
# MPI job
if job.resources.get("mpi", False):
if not job.resources.get("tasks_per_node") and not job.resources.get(
Expand All @@ -290,19 +272,8 @@ def run_job(self, job: JobExecutorInterface):
"Probably not what you want."
)

# we need to set cpus-per-task OR cpus-per-gpu, the function
# will return a string with the corresponding value
call += f" {get_cpu_setting(job, gpu_job)}"
if job.resources.get("slurm_extra"):
self.check_slurm_extra(job)
call += f" {job.resources.slurm_extra}"

exec_job = self.format_job_exec(job)

# ensure that workdir is set correctly
# use short argument as this is the same in all slurm versions
# (see https://github.com/snakemake/snakemake/issues/2014)
call += f" -D {self.workflow.workdir_init}"
# and finally the job to execute with all the snakemake parameters
call += f' --wrap="{exec_job}"'

Expand Down
67 changes: 67 additions & 0 deletions snakemake_executor_plugin_slurm/submit_string.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
from snakemake_executor_plugin_slurm_jobstep import get_cpu_setting


def get_submit_command(job, params):
"""
Return the submit command for the job.
"""
call = (
f"sbatch "
f"--parsable "
f"--job-name {params['run_uuid']} "
f"--output \"{params['slurm_logfile']}\" "
f"--export=ALL "
f"--comment \"{params['comment_str']}\""
)

# check whether an account is set
if params.get("account"):
call += f" {params['account']} "
# check whether a partition is set
if params.get("partition"):
call += f" {params['partition']}"

if job.resources.get("clusters"):
call += f" --clusters {job.resources.clusters}"

if job.resources.get("runtime"):
call += f" -t {job.resources.runtime}"

if job.resources.get("clusters"):
call += f" --clusters {job.resources.clusters}"

if job.resources.get("constraint") or isinstance(
job.resources.get("constraint"), str
):
call += f" -C '{job.resources.get('constraint')}'"

if job.resources.get("qos") or isinstance(job.resources.get("qos"), str):
call += f" --qos='{job.resources.get('qos')}'"
if job.resources.get("mem_mb_per_cpu"):
call += f" --mem-per-cpu {job.resources.mem_mb_per_cpu}"
elif job.resources.get("mem_mb"):
call += f" --mem {job.resources.mem_mb}"

if job.resources.get("nodes", False):
call += f" --nodes={job.resources.get('nodes', 1)}"

# fixes #40 - set ntasks regardless of mpi, because
# SLURM v22.05 will require it for all jobs
gpu_job = job.resources.get("gpu") or "gpu" in job.resources.get("gres", "")
if gpu_job:
call += f" --ntasks-per-gpu={job.resources.get('tasks', 1)}"
else:
call += f" --ntasks={job.resources.get('tasks', 1)}"

# we need to set cpus-per-task OR cpus-per-gpu, the function
# will return a string with the corresponding value
call += f" {get_cpu_setting(job, gpu_job)}"
if job.resources.get("slurm_extra"):
call += f" {job.resources.slurm_extra}"

# ensure that workdir is set correctly
# use short argument as this is the same in all slurm versions
# (see https://github.com/snakemake/snakemake/issues/2014)
call += f" -D '{params['workdir']}'"

return call
Loading