diff --git a/CHANGELOG.md b/CHANGELOG.md
index 514c50a..687c48b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,9 +1,34 @@
# Changelog
+## Next release (dev branch)
+
+## Ananke 0.6
+
+* New features:
+ * install script for [jupyter_scheduler](https://github.com/jupyter-server/jupyter-scheduler)
+ * [jupyter-resource-usage](https://github.com/jupyter-server/jupyter-resource-usage)
+ * [Jupyter RTC](https://github.com/jupyterlab/jupyter-collaboration) supports [server-side execution](https://github.com/datalayer/jupyter-server-nbmodel) (see [Update from Ananke 0.5 to Ananke 0.6](https://gauss.whz.de/ananke/doc/container-admins.html#update-to-0_6) for important hints before updating your Ananke containers)
+* Documentation improvements:
+ * hints on batch autograding with Nbgrader
+ * alternative method for enlarging container admins' home directories
+ * more realistic test environment for development
+ * connect to LMS via HTTPS with enterprise root CA
+* Bugfixes:
+ * increase PID limit in container templates
+ * avoid `:` in nbgrader feedback paths (timestamps!) to be compatible with jupyter-fs
+ * downgrade jupyter-collaboration to stable release (instead of beta)
+* Update all packages:
+ * JupyterHub 5.2.1
+ * JupyterLab 4.3.5
+ * Notebook 7.3.2
+ * Nbgrader 9.5
+ * Python 3.13.1
+
## Ananke 0.5
+
* New features:
* **BREAKING CHANGE** - LTI/Nbgrader course and grades management is now accessible through the 'Kore' menu item in the instructor's JupyterLab (see [kore-extension](https://github.com/marcwit/kore-extension)). This replaces the former Kore service GUI. See [Update form Ananke 0.4 to Ananke 0.5](https://gauss.whz.de/ananke/doc/container-admins.html#update-to-0_5) for hints on updating your installation.
- * **BREAKING CHANGE** - Images and Containers now are managed by one Python script instead of several bash scripts. Directory structure changed, too. See [Update form Ananke 0.4 to Ananke 0.5](https://gauss.whz.de/ananke/doc/container-admins.html#update-to-0_5) for hints on updating your installation.
+ * **BREAKING CHANGE** - Images and Containers now are managed by one Python script instead of several bash scripts. Directory structure changed, too. See [Update from Ananke 0.4 to Ananke 0.5](https://gauss.whz.de/ananke/doc/container-admins.html#update-to-0_5) for hints on updating your installation.
* [jupyterlab-execute-time extension](https://github.com/deshaw/jupyterlab-execute-time) is activated in all JupyterLabs by default.
* Minor additions and corrections to doc.
* Bugfixes:
diff --git a/ananke b/ananke
index 885f2d5..c3cc03d 100755
--- a/ananke
+++ b/ananke
@@ -227,6 +227,7 @@ def ask_int(low: int, high: int, default: int) -> int:
int
Chosen integer.
"""
+
range_text = f'{low if low is not None else "-∞"}...{high if high is not None else "+∞"}'
while True:
@@ -424,12 +425,18 @@ def subcmd_create(args):
if not choice:
break
else:
+ choice_valid = True
+
for gpu in choice.split(','):
gpu = gpu.strip()
if gpu in gpus:
chosen_gpus.append(gpu)
else:
print(f'GPU name "{gpu}" invalid!')
+ choice_valid = False
+
+ if choice_valid:
+ break
for gpu in chosen_gpus:
gpu_devices.append(f'nvidia.com/gpu={gpu}')
@@ -483,7 +490,7 @@ def subcmd_create(args):
return
# create systemd unit
- cmd = f'podman generate systemd --restart-policy=always --files --start-timeout=60 --stop-timeout=60'
+ cmd = f'podman generate systemd --restart-policy=always --files --start-timeout=120 --stop-timeout=120 --restart-sec=10'
if 'requires' in config:
for unit in config['requires']:
cmd += f' --after={unit} --requires={unit}'
diff --git a/containers/template-base-forget/config.py b/containers/template-base-forget/config.py
index c8a2e4c..f4b62fe 100644
--- a/containers/template-base-forget/config.py
+++ b/containers/template-base-forget/config.py
@@ -16,7 +16,9 @@
config['gpus'] = 'interactive'
# additional arguments to Podman (list of strings)
-config['podman_args'] = []
+config['podman_args'] = [
+ '--pids-limit -1' # no PID limit (Podman default is 2048, which is too small)
+]
# host directories accessible inside the container (list of (host_path, container_path) tuples)
# Paths have to be absolute or relative to config file path.
diff --git a/containers/template-base/config.py b/containers/template-base/config.py
index c8a2e4c..f4b62fe 100644
--- a/containers/template-base/config.py
+++ b/containers/template-base/config.py
@@ -16,7 +16,9 @@
config['gpus'] = 'interactive'
# additional arguments to Podman (list of strings)
-config['podman_args'] = []
+config['podman_args'] = [
+ '--pids-limit -1' # no PID limit (Podman default is 2048, which is too small)
+]
# host directories accessible inside the container (list of (host_path, container_path) tuples)
# Paths have to be absolute or relative to config file path.
diff --git a/containers/template-nbgrader/config.py b/containers/template-nbgrader/config.py
index f73b55e..b61a4ea 100644
--- a/containers/template-nbgrader/config.py
+++ b/containers/template-nbgrader/config.py
@@ -16,7 +16,9 @@
config['gpus'] = 'interactive'
# additional arguments to Podman (list of strings)
-config['podman_args'] = []
+config['podman_args'] = [
+ '--pids-limit -1' # no PID limit (Podman default is 2048, which is too small)
+]
# host directories accessible inside the container (list of (host_path, container_path) tuples)
# Paths have to be absolute or relative to config file path.
diff --git a/doc/src/conf.py b/doc/src/conf.py
index 73a9276..81fd1ed 100644
--- a/doc/src/conf.py
+++ b/doc/src/conf.py
@@ -4,10 +4,10 @@
# https://www.sphinx-doc.org/en/master/usage/configuration.html
project = 'Ananke'
-copyright = '2024, Ananke Team'
+copyright = '2025, Ananke Team'
author = 'Ananke Team'
version = ''
-release = '0.5'
+release = '0.6'
extensions = ['myst_parser']
myst_heading_anchors = 4
diff --git a/doc/src/container-admins.md b/doc/src/container-admins.md
index b7c8bab..45f56ee 100644
--- a/doc/src/container-admins.md
+++ b/doc/src/container-admins.md
@@ -266,7 +266,7 @@ Note that the value for `c.LTI13Authenticator.client_id` has to be a list of str
Don't abuse `30_lms.py` for other configuration purposes than the described LTI configuration.
This may lead to unexpected behavior.
-(lit-lms)=
+(lti-lms)=
#### LMS
For your LMS you need the following configuration information (field names are taken from Moodle here and may be slightly different in other LMS):
@@ -283,6 +283,22 @@ Thus, in Moodle only `Existing window` works.
Even `new window` is not possible due to it's implementation in Moodle via embedding techniques.
```
+(container-admins-enterprise-ca)=
+#### HTTPS with enterprise root CA or self-signed cert
+
+If JupyterHub shall connect to your LMS via HTTPS with a cert issued by an enterprise root CA, you have to install the CA's root cert in the Ananke container:
+1. On the host machine copy the cert file to your container's `jupyterhub_config.d` directory.
+2. In the container's root shell run
+ ```
+ mv /opt/conda/envs/jhub/etc/jupyterhub/jupyterhub_config.d/YOUR_CERT_FILE /usr/local/share/ca-certificates/
+ update-ca-certificates
+ ```
+3. Check that the output contains `1 added`.
+
+```{important}
+JupyterHub refuses to connect to servers via HTTPS if the cert is self-signed. Thus, if you want or have to use a self-signed cert for your LMS, you have to create a custom root CA and issue your own certs with that CA. See [documentation for developers](developers.md), where the process of creating a custom CA and issuing certs is described for setting up the development environment.
+```
+
### Hub admins
To give a hub user admin privileges inside the hub (see [For hub admins](hub-admins.md)), get the user's username (from URL `.../user/USERNAME/...` when user visits the hub) and write it to `ananke/containers/my-hub/jupyterhub_config.d/20_users.py`:
@@ -413,6 +429,20 @@ Alternatively to in-container updates yoyu may replace your container by a new o
Remember to back up your user's home directories and modifications you made to the container (Python environments, ...).
+(update-to-0_6)=
+### Update from Ananke 0.5 to Ananke 0.6
+
+If you use Jupyter RTC, rename your containers RTC config file (usually `80_rtc.py`) to something like `80_rtc.py.disabled` before creating the new container. After running the RTC install script in the new container, reset the file to the original name and restart the hub. Without this procedure some new RTC features (server-side execution) will not work, because corresponding lab extensions won't get enabled.
+
+If your hub has many users (or at least many notebooks opened in parallel) the container may hit its default PID limit. Ananke 0.6 adds
+```
+# additional arguments to Podman (list of strings)
+config['podman_args'] = [
+ '--pids-limit -1' # no PID limit (Podman default is 2048, which is too small)
+]
+```
+to all container definition templates (in `config.py`) to allow for an unlimited number of processes (might be a security issue depending on your environment). If you reuse you Ananke 0.5 container definition you have to add this option manually.
+
(update-to-0_5)=
### Update from Ananke 0.4 to Ananke 0.5
@@ -583,9 +613,17 @@ To install TensorFlow run `/opt/install/tensorflow.sh` in the container's root s
The install script also runs some TensorFlow commands to test the installation. Carefully check the output for errors.
```{important}
-TensorFlow 2.17 does not have NumPy 2 support. The install script will downgrade NumPy to 1.26.4!
+TensorFlow 2.18 does not have Python 3.13 support. The install script will downgrade Python to 3.11.9!
+```
+
+```{important}
+TensorFlow 2.18 does not have NumPy 2.2.3 support. The install script will downgrade NumPy to 2.0.2!
```
#### Assign GPUs to users
Every hub user has access to all GPUs. How to confine a user's TensorFlow commands to a subset of GPUs is described in [TensorFlow and GPUs](hub-users.md#tensorflow-and-gpus).
+
+### Job scheduling in JupyterLab
+
+To allow users to schedul jobs based on notebooks in JupyterLab install [jupyter_scheduler](https://github.com/jupyter-server/jupyter-scheduler) by running `/opt/install/scheduler.sh` in the container's root shell and restart all user servers.
diff --git a/doc/src/developers.md b/doc/src/developers.md
index 37fe2f5..92ade86 100644
--- a/doc/src/developers.md
+++ b/doc/src/developers.md
@@ -31,83 +31,31 @@ cd doc/html
firefox index.html
```
-## Deployment models
-
-If providing Ananke-based JupyterHubs, one has to decide what kind of deployment model to use:
-* *fully managed*: host admin is identical to container admin (no installation or config work for instructors, instructor cannot modify global Python environment)
-* *managed host*: host admin is different from container admin (instructor is container admin, a good model for experienced JupyterHub admins with some need for individual/direct configuration including modifications to the global Python environment)
-* *fully self-managed*: user sets up its own host machine along the lines of the Ananke project (good for people with special performance needs like GPU servers)
-
-## User management
-
-There is no root or sudo user inside an Ananke container.
-Modifications to containers have to be implemented by the container admin, which is a regular user on the host machine.
-
-Inside a container, users are generated dynamically via systemd's [dynamic users](https://0pointer.net/blog/dynamic-users-with-systemd.html) feature.
-Home directories of dynamic users are persistent.
-Thus, hub users shouldn't recognize that their accounts are created dynamically.
-
-Container admins are unprivileged users on the host system.
-Thus, they should not have access to another container admin's containers.
-
-## Conda environments
-
-Starting with 0.3 Ananke uses [`nb_conda_kernels`](https://github.com/Anaconda-Platform/nb_conda_kernels) to make IPython kernels from different conda environments available in Jupyter.
-The advantage compared to usual kernel management via `ipykernel install` is that kernels installed by `nb_conda_kernels` automatically run `conda activate` at start-up.
-This is necessary for some packages (TensorFlow, Plotly) to have access to relevant environment variables.
-With standard kernel management there is no `conda activate` at start-up.
-
-## Kore for LTI related features
-
-Ananke adds LTI capabilities to JupyterHub and Nbgrader. Two components are involved:
-* The *Kore service* running as a JupyterHub service provides a REST API for LTI functions. Before Ananke 0.5 this serivce also provided a GUI for course and grades management.
-* The *Kore lab extension* provides a GUI in JupyterLab for course and grades management starting with Ananke 0.5.
-
-Note that there is no Jupyter Server extension involved, because LTI functions heavily interact with the authentication process. Thus, they have to be implemented globally for the whole JupyterHub.
-
-## Container structure
-
-### `systemd`
-Ananke's Podman containers run `systemd` as the main process.
-JupyterHub then is a `systemd` service and JupyterHub uses `systemdspawner` for running hub users' JupyterLabs.
-This setup requires Linux with 'cgroups v2' and `systemd` on the host system (Debian, Ubuntu and most others).
-Remember that containers are not virtual machines! All containers and the host machine share one and the same Linux kernel.
-
-### Boot script
-
-Each Ananke image comes with a script `assets/boot.sh` which is run at container boot time.
-
-### Nbgrader exchange directory
-
-The exchange directory for nbgrader inside a container is `/opt/nbgrader_exchange`.
-It cannot be in `/home` because dynamic users have no access to `/home` (the home path is mapped to dynamic user's home path in `/var`).
-
## Local testing with LMS
-The Ananke project ships with a Podman image for Moodle.
+Ananke cannot be used without LMS, because JupyterHub's login process solely relies on LTI. You may use whatever LTI capable LMS is available to you for testing. But to facilitate local development and testing the Ananke project ships with a Podman image for [Moodle](https://moodle.org/). In this section we describe how to set up your network and a local Moodle instance for Ananke development.
```{warning}
The Moodle Podman image is for local testing only.
-Never (!!!) use it on an internet facing server.
+Never (!!!) use it on a public facing server.
It's by no means secure!
```
-### Workflow
+### Overview
The following list shall guide you through the proper setup.
See the corresponding sections of individual steps.
-* Build both images with the corresponding `build.sh` scripts as needed.
-* Adjust host configuration, see [Networking configuration](#networking-configuration).
+* Adjust your development machine's networking configuration and (optionally) install a reverse proxy, see [Networking configuration](#networking-configuration).
* Start and configure Moodle, see [First start of Moodle](#first-start-of-moodle).
-* Prepare `External Tool`, see [LTI tool configuration (Moodle)](#lti-tool-configuration-moodle).
-* Change LTI tool visibility (tick `Show in activity chooser` at course view > `More` > `LTI External tools`).
-* Follow [Example Ananke configuration for local testing with Moodle](#example-ananke-configuration-for-local-testing-with-moodle).
-* Start Ananke container with `run.sh`.
-
+* Build Ananke images, configure LTI for JupyterHub, and run a container, see [Start JupyterHub](#start-jupyterhub).
(networking-configuration)=
### Networking configuration
+You may choose between a simple networking setup (without reverse proxy and without HTTPS) or the full setup (with reverse proxy and HTTPS). The simple setup is sufficient for most development tasks. The full setup allows to test under more realistic conditions. In particular, several security features (and related troubles) will only be active with the full setup. Some JupyterLab extensions won't work in the simple setup. An example is [jupyter-fs](https://github.com/jpmorganchase/jupyter-fs).
+
+#### Simple setup
+
For local testing, you have to run both Podman containers, Moodle and Ananke.
No reverse proxy is required.
To get network communication between containers running don't use `localhost` or `127.0.0.1`, because `localhost` inside a container refers to the container's `localhost`.
@@ -125,56 +73,195 @@ Rootless (that is, run by an unprivileged user) Podman containers do not have an
Communication with the container has to use Podman's port forwarding.
```
Moodle then is at `192.168.178.28:9090` and JupyterHub is at `192.168.178.28:8000`, for instance.
-Ports are specified in `config.sh` and `run.sh` scripts for Ananke and Moodle respectively.
+Ports are specified in Moodle's `run.sh` and `config.php` as well as in an Ananke container's `config.py` script. If you are comfortable with `9090` and `8000` you do not have to modify those files (for the moment).
+
+If the host machine is using SELinux run `sudo setenforce Permissive`. Otherwise there will be permission errors.
+
+Note that in Chromium webbrowser some of the limitations of the simple networking setup can be circumvented as follows:
+1. Type `chrome://flags/#unsafely-treat-insecure-origin-as-secure` in Chromium's address bar.
+2. Fill the text area with your local IP and the JupyterHub port (`http://192.168.178.128:8000`).
+
+#### Full setup
+
+The simple networking setup has two major drawbacks:
+* There's no HTTPS and no reverse proxy, which makes things much simpler and less error-prone than usual production setups.
+* The LMS (Moodle) and Ananke use the same domain, which only in very rare cases is what will happen in production.
+
+Both points disable security features of modern browsers, like [CORS](https://developer.mozilla.org/de/docs/Web/HTTP/CORS). The full networking setup descibed in this section should yield a development environment very similar to typical production environments.
+
+##### IP addresses
+
+Create two additional IP addresses for `localhost` (cf. simple setup):
+```
+sudo ip addr add 192.168.178.28 dev lo
+sudo ip addr add 192.168.178.29 dev lo
+```
+Ananke will use `192.168.178.28`. Moodle will be on `192.168.178.29`. So LMS and JupyterHub run on different domains.
+
+##### Reverse proxy
+
+Install [nginx](https://nginx.org/) as reverse proxy and HTTPS endpoint. See [Reverse proxy](#host-admins-reverse-proxy) in the host admins documentation for details.
+
+If you have a certificate issued by a commonly trusted CA, install the cert to your system's cert store (see below for Debian). If you do not have a cert issued by a commonly trusted CA, you have to set up your own root CA, because JupyterHub (or Python packages it builds upon) does not accept self-signed certs from servers requesting data from.
+
+##### Redirects
+
+Add following `location` blocks to the `server` block of nginx's site configuration:
+```
+ location /moodle/ {
+ proxy_pass http://127.0.0.1:9090;
+ proxy_redirect off;
+ proxy_set_header X-Real-IP $remote_addr;
+ #proxy_set_header Host $host; # Moodle fails with this line
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection $connection_upgrade;
+ }
+
+ location /dev-hub/ {
+ proxy_pass http://127.0.0.1:8000;
+ proxy_redirect off;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header Host $host;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection $connection_upgrade;
+ }
+```
+Then Moodle will be on `https://192.168.178.29/moodle` and JupyterHub will be on `https://192.168.178.28/dev-hub`. Note that both Moodle and JupyterHub are on accessible via both IP addresses. But we will use `...29` for Moodle and `...28` for JupyterHub to have them on different domains.
-If the host machine is using SELinux run `sudo setenforce Permissive` otherwise there will be permission errors.
+##### Set up a root CA
+
+Create a private key and protect it by a password:
+```
+openssl genrsa -des3 -out myCA.key 2048
+```
+Then create the CA's cert valid for 5 years and signed with your private key:
+```
+openssl req -x509 -key myCA.key -sha256 -days 1825 -out myCA.pem
+```
+Answer all questions somehow (empty or default values). Answers do not matter as long as you use your CA for local testing only. It's a good idea to choose a sensible common name. Else, you may have difficulties to find your cert in a list of many certs.
+
+Now make your CA's cert known to your systems cert store. On Debian:
+```
+sudo cp myCA.pem /usr/local/share/ca-certificates/myCA.crt
+sudo update-ca-certificates
+```
+You also have to add the cert to your browser's cert store, which for most browsers (at least Firefox and Chromium) is separate from your system's cert store. Firefox: Settings > Privacy & Security > Security > Certificates > View Certificates > Authorities > Import. Chromium: Settings > Privacy and security > Security > Advanced > Manage certificates > Authorities > Import.
+
+##### Issue a certificate
+
+Create a private key:
+```
+openssl genrsa -out localhost.key 2048
+```
+Create a certifcate signing request (CSR):
+```
+openssl req -new -key localhost.key -out localhost.csr
+```
+Answer all questions somehow (choose a sensible common name).
+
+Create a file `localhost.ext` holding additional configuration for the certificate signing process:
+```
+authorityKeyIdentifier = keyid, issuer
+basicConstraints = CA:FALSE
+keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment
+subjectAltName = @alt_names
+
+[alt_names]
+DNS.1 = localhost
+IP.1 = 192.168.178.28
+IP.2 = 192.168.178.29
+IP.3 = 127.0.0.1
+```
+Create the cert:
+```
+openssl x509 -req -in localhost.csr -CA myCA.pem -CAkey myCA.key -CAcreateserial -out localhost.crt -days 1825 -sha256 -extfile localhost.ext
+```
+
+##### Install the certificate
+
+Move the private key and the cert to your system's corresponding locations. For Debian:
+```
+sudo cp localhost.crt /etc/ssl/certs/
+sudo cp localhost.key /etc/ssl/private/
+```
+Set paths for both files in nginx site config file (`ssl_certificate` and `ssl_certificate_key` in `server` block).
(first-start-of-moodle)=
### First start of Moodle
-Start the Moodle container with `run.sh`.
-Then enter the container's shell with `shell.sh` und run `/opt/init_moodle.sh`.
+#### Image and container
+
+Create Moodle's Podman image:
+```
+cd images/test-moodle
+./build.sh
+```
+Then `cd ../../test-moodle` and start the Moodle container with `run.sh` (after properly setting the `PORT` variable, if you do not want to have Moodle on `9090`).
+
+Now the Moodle container is up and running, but requires some initialization.
+
+#### Initialization of Moodle
+
+Enter the container's shell with `shell.sh` und run `/opt/init_moodle.sh`.
This creates the Moodle database and basic Moodle configuration.
The script will ask you for your Moodle container's URL.
-With the above network configuration use `http://192.168.178.28:9090`.
+With the above network configuration use `http://192.168.178.28:9090` (simple networking) or `https://192.168.178.29/moodle` (full networking).
+
+If you are using the full networking setup, in the container's shell edit both files `/var/www/html/moodle/config.php` and `/opt/moodledata/config.php` to contain the config option `$CFG->sslproxy = true;`. The file then might look like:
+```
+...
+$CFG->dataroot = '/opt/moodledata';
+$CFG->admin = 'admin';
+$CFG->reverseproxy = true;
+$CFG->sslproxy = true;
+
+$CFG->directorypermissions = 02777;
-In your webbrowser open `http://192.168.178.28:9090/moodle`.
+require_once(__DIR__ . '/lib/setup.php');
+```
+
+In your webbrowser open `http://192.168.178.28:9090/moodle` (simple networking) or `https://192.168.178.29/moodle` (full networking).
Log in as user `admin` with password `Admin123.`.
Answer all questions asked.
-Even though the email addresses have to be entered, they serve no purpose because mail is not configured and so the addresses may be selected at will.
+Even though the email addresses have to be entered, they serve no purpose because mail is not configured and so the addresses may be chosen at will.
The `admin` user is the only exiting user.
You may add other users for testing.
```{important}
-Although the container is running at `http://192.168.178.28:9090` Moodle's URL is `http://192.168.178.28:9090/moodle`.
+Although the container is running at `http://192.168.178.28:9090` in the simple networking setting Moodle's URL is `http://192.168.178.28:9090/moodle`. Contrary, in the full networking setting Moodle's URL ist `.../moodle`, not `.../moodle/moodle` as one might expect from the reverse proxy's config.
```
-### Moodle security settings
+#### Moodle security settings
Moodle has a black list for hosts and a white list for hosts.
Moodle only sends requests to URLs matching both lists.
This is especially important for LTI communication in test environments with non-standard ports and requests to `localhost` and friends.
-For instance, hosts `192.168.*.*` are black listed by default.
+For instance, hosts `192.168.*.*` are black-listed by default.
Log in to Moodle as `admin`.
Go to 'Site Administration', 'General', 'Security', 'HTTP Security'.
-Remove your IP pattern from the hosts black list and add your JupyterHub port to the ports white list.
+Remove your IP pattern from the hosts black list (for simple and full networking) and add your JupyterHub port to the ports white list (for simple networking only).
-(lti-tool-configuration-moodle)=
-### LTI tool configuration (Moodle)
+#### Moodle LTI configuration
-To access the JupyterHub in the Moodle course context, it must be configured as an external tool.
+To access JupyterHub in the Moodle course context, it must be configured as an external tool.
This may be done at `Site administration` > `Plugins` > `Manage tools` > `configure a tool manually`.
-Tool settings:
-* `Tool URL` - URL of the `JupyterHub` as described above.
+Tool settings (simple networking):
+* `Tool URL` - URL of `JupyterHub`, that is `http://192.168.178.28:8000`
* `LTI version` - LTI 1.3
* `Public key type` - Keyset URL
-* `Public keyset` - `http://192.168.178.28:8000/services/kore/jwks` (alter base URL as needed)
-* `Initiate login URL` - `http://192.168.178.28:8000/hub/lti13/oauth_login` (alter base URL as needed)
-* `Redirection URI(s)` - `http://192.168.178.28:8000/hub/lti13/oauth_callback` (alter base URL as needed)
+* `Public keyset` - `http://192.168.178.28:8000/services/kore/jwks`
+* `Initiate login URL` - `http://192.168.178.28:8000/hub/lti13/oauth_login`
+* `Redirection URI(s)` - `http://192.168.178.28:8000/hub/lti13/oauth_callback`
* `Default launch container` - New window
-### Persistent data
+If you use the full networking setting, replace `http://192.168.178.28:8000` by `https://192.168.178.29/dev-hub`.
+
+#### Persistent data
All data created by Moodle at runtime (users, courses, grades, ...) will be stored in `test-moodle/runtime` on the host machine.
If you destroy the container und start a fresh one, everything will still be available to the new container.
@@ -182,16 +269,17 @@ Running `init_moodle.sh` again is NOT required and may result in errors and corr
To get rid of Moodle's data and start with a fresh Moodle installation, delete the contents of `test-moodle/runtime/moodle_data` and `test-moodle/runtime/mariadb_data` before you create the container.
-(example-ananke-configuration-for-local-testing-with-moodle)=
-### Example Ananke configuration for local testing with Moodle
+(start-jupyterhub)=
+### Start JupyterHub
-Before starting the container of Ananke make sure to add / alter the `20_users.py` and `30_lms.py` files.
-Exemplary configurations may look like:
+Proceed as described in [Install and run a container](#install).
+Remember to move your localhost cert to the container, see [HTTPS with enterprise root CA or self-signed cert](container-admins-enterprise-ca).
+Configuration files `20_users.py` and `30_lms.py` may look like
```python
# 20_users.py
-c = get_config() # noqa
+c = get_config()
# username(s) of hub admin(s)
# (login to the hub and look at the URL to get your username)
@@ -203,7 +291,7 @@ and
```python
# 30_lms.py
-c = get_config() # noqa
+c = get_config()
# configuration data provided by your LMS
base_url = 'http://192.168.178.28:9090/moodle'
@@ -214,19 +302,81 @@ c.LTI13Authenticator.jwks_endpoint = f'{base_url}/mod/lti/certs.php'
c.LTI13Authenticator.access_token_url = f'{base_url}/mod/lti/token.php'
```
-Here are some notes to fill in the correct values:
-* `admin_users` - The ID may be extracted from Moodle's URL. The URL might look something like this: yoursite.com/user/profile.php?id=123, where "123" is your user ID. Remember to prefix the ID with the letter 'u'.
-* All values for `30_lms.py` may be seen within the `Tool configuration details`. These are available from the list symbol (left to the cog symbol) of the tool (`Site administration` > `Plugins` > `Manage tools`).
+The user ID for `admin_users` may be extracted from Moodle's URL. The URL looks something like `http://192.168.178.28:9090/moodle/user/profile.php?id=123`, where "123" is your user ID. Remember to prefix the ID with the letter 'u'.
-## Testing jupyter-fs locally
+All values for `30_lms.py` may be seen within Moodle's `Tool configuration details`. These are available from the list symbol of the tool (`Site administration` > `Plugins` > `Manage tools`).
-Jupyter-fs uses browser security features preventing local testing if the local host's IP address is not `127.0.0.1`. For Chromium this security feature can be disabled:
-1. Type `chrome://flags/#unsafely-treat-insecure-origin-as-secure` in Chromium's address bar.
-2. Fill the text area with your local IP and the JHub port (`http://192.168.178.128:8000`).
+## Deployment models
+
+If providing Ananke-based JupyterHubs, one has to decide what kind of deployment model to use:
+* *fully managed*: host admin is identical to container admin (no installation or config work for instructors, instructor cannot modify global Python environment)
+* *managed host*: host admin is different from container admin (instructor is container admin, a good model for experienced JupyterHub admins with some need for individual/direct configuration including modifications to the global Python environment)
+* *fully self-managed*: user sets up its own host machine along the lines of the Ananke project (good for people with special performance needs like GPU servers)
+
+## User management
+
+There is no root or sudo user inside an Ananke container.
+Modifications to containers have to be implemented by the container admin, which is a regular user on the host machine.
+
+Inside a container, users are generated dynamically via systemd's [dynamic users](https://0pointer.net/blog/dynamic-users-with-systemd.html) feature.
+Home directories of dynamic users are persistent.
+Thus, hub users shouldn't recognize that their accounts are created dynamically.
+
+Container admins are unprivileged users on the host system.
+Thus, they should not have access to another container admin's containers.
+
+## Conda environments
+
+Starting with 0.3 Ananke uses [`nb_conda_kernels`](https://github.com/Anaconda-Platform/nb_conda_kernels) to make IPython kernels from different conda environments available in Jupyter.
+The advantage compared to usual kernel management via `ipykernel install` is that kernels installed by `nb_conda_kernels` automatically run `conda activate` at start-up.
+This is necessary for some packages (TensorFlow, Plotly) to have access to relevant environment variables.
+With standard kernel management there is no `conda activate` at start-up.
+
+## Kore for LTI related features
+
+Ananke adds LTI capabilities to JupyterHub and Nbgrader. Two components are involved:
+* The *Kore service* running as a JupyterHub service provides a REST API for LTI functions. Before Ananke 0.5 this service also provided a GUI for course and grades management.
+* The *Kore lab extension* provides a GUI in JupyterLab for course and grades management starting with Ananke 0.5.
+
+Note that there is no Jupyter Server extension involved, because LTI functions heavily interact with the authentication process.
+Thus, they have to be implemented globally for the whole JupyterHub.
+
+**Explanation**
+Kore is a Flask-based web application to manages courses, assignments, grades, and related functionalities through a set of routes and integrations with JupyterHub.
+It handles configuration loading, session management, and various HTTP requests to process user actions like managing courses, sending grades, and handling course data.
+
+The `grades_route.py` file implements a `/grades` endpoint, which processes grades and sends them securely to an external Learning Management System (LMS).
+It handles JWT-based authentication, reads data from a local Gradebook, and ensures error handling across several steps, including reading configuration files and posting scores.
+
+The `courses_route.py` file introduces routes for managing courses, including listing active and current courses, copying, backing up, resetting, and deleting courses.
+It interacts with course directories, handles file operations, and modifies JupyterHub configurations for course management.
+The file includes detailed error handling, particularly around system calls and configuration management, and ensures smooth integration with nbgrader for course grading.
+
+The `assignments_route.py` and `problems_route.py` files define the `/assignments` and `/problems` endpoints for listing and copying assignments and problems respectively.
+They retrieve data and manages file operations to copy assignments/problems between directories, ensuring proper permissions and error handling.
+
+The `home_route.py` file in combination with the jinja template files is used to render the frontend if the service is accessed via the Hub Control Panel.
+
+## Container structure
+
+### `systemd`
+Ananke's Podman containers run `systemd` as the main process.
+JupyterHub then is a `systemd` service and JupyterHub uses `systemdspawner` for running hub users' JupyterLabs.
+This setup requires Linux with 'cgroups v2' and `systemd` on the host system (Debian, Ubuntu and most others).
+Remember that containers are not virtual machines! All containers and the host machine share one and the same Linux kernel.
+
+### Boot script
+
+Each Ananke image comes with a script `assets/boot.sh` which is run at container boot time.
+
+### Nbgrader exchange directory
+
+The exchange directory for nbgrader inside a container is `/opt/nbgrader_exchange`.
+It cannot be in `/home` because dynamic users have no access to `/home` (the home path is mapped to dynamic user's home path in `/var`).
-## Arguments to `podman run`
+## Arguments to `podman create`
-Some special arguments used in `run.sh`:
+Some special arguments are used in the Ananke Manager script `ananke` for creating containers:
* `-p 8000:8000` makes port 8000 (right one) inside the container available as port 8000 (left one) outside.
* `--cap-add SYS_ADMIN` allows the container to create dynamic systemd users.
* `--mount=type=bind,source=runtime/dyn_home,destination=/var/lib/private` mounts the host machines `runtime/dyn_home` to the container's `/var/lib/private` making dynamic users' home directories persist container rebuilds and restarts.
diff --git a/doc/src/host-admins.md b/doc/src/host-admins.md
index 04d6999..82c8801 100644
--- a/doc/src/host-admins.md
+++ b/doc/src/host-admins.md
@@ -1090,6 +1090,7 @@ sudo sysctl vm.overcommit_ratio
Consider this solution experimental. The authors of this document do not fully understand the details here, but tested suggested commands on a production system. See [How to Adjust Linux Out-Of-Memory Killer Settings for PostgreSQL](https://www.percona.com/blog/out-of-memory-killer-or-savior/) for some background information.
```
+(host-admins-reverse-proxy)=
### Reverse proxy
The host machine will provide (next to SSH) only one service to the outside world: an HTTP server.
@@ -1098,7 +1099,7 @@ Which container to forward to is determined by the request's URL.
Here we use [`nginx`](https://www.nginx.com/) as HTTP server.
Connection between the world and HTTP server will be encrypted (HTTPS).
-Connection to containers won't be encrypted because traffic does leave the machine.
+Connection to containers won't be encrypted because traffic does not leave the machine.
Install `nginx`:
```
@@ -1229,7 +1230,7 @@ To prevent a container from filling the host machine's disk, we use separate vir
The container admin's file system's image file will be a sparse file.
Thus, it requires much less disk space than its size suggests.
-See [Disk quota checks and extension](#disk-quota-checks-and-extension) for commands to check container admins' true disk usage.
+See [Disk quota checks and extension](#disk-quota-check) for commands to check container admins' true disk usage.
```
```{warning}
@@ -1321,39 +1322,104 @@ For each container on the machine use different location and different port.
### Enlarge container admin's home dir
-If the initial maximum size of a container admin's home directory turns out to be too small, you may increase the size limit by, say, 5 GB. Proceed as follows:
-1. Tell the containter admin to stop the container by running
+If the initial maximum size of a container admin's home directory turns out to be too small, you may increase the size limit.
+This requires three steps detailled below:
+1. Stop the container und unmount the file system.
+2. Entlarge the file system.
+3. Mount the file system and restart the container.
+
+For step 2 we provide two fundamentally different variants. A simple one, that should work, but does not work in at least some cases. And more complex one, that always works.
+
+#### Stop and unmount
+
+Tell the containter admin to stop the container by running
+```
+systemctl --user stop ananke-base-hub.service
+```
+in the **container admin's shell** (SSH). Here `ananke-base-hub.service` is the systemd service for the Ananke container. It's structure is `ananke-CONTAINER_DEFINITION.service`, where `CONTAINER_DEFINITION` is the name of the directory containing the containers `config.py`.
+
+Identify processes accessing the container admin's home directory:
+```
+sudo fuser -mv /home/testhub_user
+```
+Then kill all processes listed there:
+```
+sudo kill PID_OF_PROCESS
+```
+
+Unmount the container admin's home directory:
+```
+sudo umount /home/testhub_user
+```
+
+#### Enlarge the file system (simple method)
+
+In priciple, following commands should suffice to increase file system size by 5 GB. But in some settings Ananke's development team observed file system corruption and only partial enlargement (not all tools recognize the larger file system size) for unknown reasons. To avoid data loss you should resort to the more complex (and more reliable) procedure described in the next section.
+```
+sudo truncate -s +5G /home/testhub_user.img
+sudo e2fsck -f /home/testhub_user.img
+sudo resize2fs /home/testhub_user.img
+```
+
+#### Enlarge the file system (complex method)
+
+Following steps create a new, larger file system and copy all files to this new file system. If something goes wrong, you may restore the old, small file system.
+
+1. Rename the old image file:
```
- systemctl --user stop container-ananke-base-hub.service
+ sudo mv /home/testhub_user.img /home/testhub_user_old.img
```
- in the **container admin's shell** (SSH). Here `container-ananke-base-hub.service` is the systemd service for the container defined in the containers start-up script `run.sh`.
-2. Identify processes accessing the container admin's home directory:
+2. Create a new, empty image file:
```
- sudo fuser -mv /home/testhub_user
+ sudo truncate -s 25G /home/testhub_user.img
+ sudo mkfs.ext4 /home/testhub_user.img
```
- Then kill all processes listed there:
+3. Create a directory for mounting the old file system:
```
- sudo kill PID_OF_PROCESS
+ sudo mkdir /home/testhub_user_old
```
-3. Unmount the container admin's home directory:
+4. Add the line
```
- sudo umount /home/testhub_user
+ /home/testhub_user_old.img /home/testhub_user_old ext4 loop,rw,nosuid,nodev 0 3
```
-4. Enlarge image file and file system:
+ to `/etc/fstab` and run
```
- sudo truncate -s +5G /home/testhub_user.img
- sudo e2fsck -f /home/testhub_user.img
- sudo resize2fs /home/testhub_user.img
+ sudo systemctl daemon-reload
+ sudo mount /home/testhub_user_old
```
-5. Mount the home directory:
+ to mount the old file system.
+5. Mount the new file system:
```
sudo mount /home/testhub_user
```
-6. Tell the container admin to start its container by running
+6. Transfer all files from old to new file system:
+ ```
+ sudo rsync -av /home/testhub_user_old/ /home/testhub_user
+ ```
+7. Unmount both file systems:
+ ```
+ sudo umount /home/testhub_user_old
+ sudo umount /home/testhub_user
+ ```
+8. (optional) Remove the `testhub_user_old` line from `/etc/fstab` and run
```
- systemctl --user start container-ananke-base-hub.service
+ sudo rm /home/testhub_user_old.img
+ rmdir /home/testhub_user_old
```
- in the **container admin's shell** (SSH).
+ to remove the old file system.
+
+#### Mount and restart
+
+Mount the home directory:
+```
+sudo mount /home/testhub_user
+```
+
+Tell the container admin to start its container by running
+```
+systemctl --user start ananke-base-hub.service
+```
+in the **container admin's shell** (SSH).
## Regular maintenance work
@@ -1383,14 +1449,6 @@ All vulnerable packages (including ones not repairable by update at the moment):
debsecan --suite bookworm --format report
```
-````{important}
-If you have to reboot the machine after an update (new kernel version, for instance), all Podman container's on the machine will be stopped.
-Thus, container admins have to restart their containers with
-```
-podman restart container_name_or_id
-```
-````
-
### Malware
Run
@@ -1467,39 +1525,14 @@ iostat -N --human --pretty
```
This prints CPU and disk usage statistics.
-(disk-quota-checks-and-extension)=
-### Disk quota checks and extension
+(disk-quota-check)=
+### Disk quota check
List disk usage with
```
du -h /home/*.img
```
-Extend a container admin's maximum home directory size by
-```
-CONT_ADMIN=testhub_user
-
-sudo truncate -s +5G /home/$CONT_ADMIN.img
-sudo umount /home/$CONT_ADMIN.img
-sudo e2fsck -f /home/$CONT_ADMIN.img
-sudo resize2fs /home/$CONT_ADMIN.img
-sudo mount /home/$CONT_ADMIN.img
-```
-If `umount` fails with `device is busy` run
-```
-sudo lsof /home/$CONT_ADMIN
-```
-and kill corresponding processes.
-Alternatively (or additionally), run
-```
-sudo fuser -mv /home/$CONT_ADMIN.img
-```
-to see processes using the file system and run
-```
-sudo fuser -mkv /home/$CONT_ADMIN.img
-```
-to kill them.
-
## Optional features
Features described below may be relevant to some users only.
diff --git a/doc/src/hub-users.md b/doc/src/hub-users.md
index a6c876a..ac813ca 100644
--- a/doc/src/hub-users.md
+++ b/doc/src/hub-users.md
@@ -198,3 +198,16 @@ cd ~
ln -s / .lsp_symlink
```
once. This makes more detailed package information available to Jupyter LSP.
+
+## Scheduling jobs
+
+If [jupyter_scheduler](https://github.com/jupyter-server/jupyter-scheduler) is available on your JupyterHub, you may schedule jobs based on notebook files.
+
+```{important}
+Do not use the "Run job with input folder" for notebooks in your home directory. If you need that option, create a subdirectory and move the notebook there. Else, job creation will fail.
+```
+
+```{important}
+Don't forget to remove jobs you do not need anymore. Each job execution creates several files filling up disk space if run forever.
+```
+
diff --git a/doc/src/instructors.md b/doc/src/instructors.md
index c920dba..c482056 100644
--- a/doc/src/instructors.md
+++ b/doc/src/instructors.md
@@ -62,6 +62,14 @@ Usage of the formgrader GUI is described in [nbgrader's documentation](https://n
When collecting submissions with the formgrader, nbgrader will complain about possible cheating attempts in the log output due to unexpected file ownerships. This warning can be savely ignored as long as no student tries cheating ;-) The warning is caused by Ananke's management of user accounts. Developers plan to tackle this problem in a future release.
```
+### Batch autograding
+
+The "Manage Submissions" page of Nbgraders Formgrader GUI shows commands for batch autgrading in a terminal. Before running those commands you have to activate the Conda environment, where Nbgrader is installed in:
+```
+conda activate jhub
+```
+The environment name always is `jhub`, even if you have created local or global additional environments.
+
### Feedback configuration
Nbgrader provides two configuration options for feedback generation. In the course's Lab open a terminal and run `nano ~/.jupyter/nbgrader_config.py`. A console based text editor will open showing a few lines of Python code. Do not change anything here except for (un)commenting following two lines:
@@ -94,10 +102,16 @@ to `~/.jupyter/nbgrader_config.py`.
The Kore service shipping with the Ananke Jupyter Distribution on the one hand sends nbgrader's grades to the LMS.
On the other hand, it provides course management functionality for nbgrader.
+The service can be operated via the JupyterLab extension or via the 'Services' tab in the Hub Control Panel.
+
+The following functionality is provided:
+**Grading Scope:**
+ - If the grading_scopes parameter is set to `current`, the instructor can only send grades for the current course—specifically, the course they accessed JupyterHub from.
+ - If it is set to `all`, the instructor can send grades for all courses in which they have access rights, providing broader control for instructors managing multiple courses.
+
+**Importing Courses, Assignments, and Problems:**
+ - Instructors can import data from both running and backed-up courses. This includes importing entire courses, specific assignments, or individual problems.
+ - Note: When copying data (sending "to"), the extension only allows importing to running/active courses, and not to backed up courses.
-Access Kore from your JupyterLab session by clicking the 'Kore' menu item.
-* **Sending grades** to the LMS only works for the LMS course you started your Jupyter session from. All other functionality works for all your courses.
-* **Backup** copies the a whole course (including submissions and grades) to your home directory (in your personal JupyterLab).
-* **Import** searches your home directory for backup directories and shows a selection dialog. Imported course files and directories can be renamed in the course's file browser (to rename assignments, for instance).
-* **Reset a course** to remove all students and their data from the course. So you can start with a fresh course without re-creating it.
-* **Delete a course** if you do not need it anymore. All files and the course's user account will be removed. Don't forget to remove corresponding LMS activity, too. Else coming to JupyterHub through this activity will create a new nbgrader course.
+**Course Management:**
+ - The extension allows instructors to back up, reset, or delete any running course where they have the necessary access rights.
diff --git a/images/ananke-base/Containerfile b/images/ananke-base/Containerfile
index 1d3e9e8..dbd5014 100644
--- a/images/ananke-base/Containerfile
+++ b/images/ananke-base/Containerfile
@@ -1,14 +1,14 @@
-FROM docker.io/library/debian@sha256:45f2e735295654f13e3be10da2a6892c708f71a71be845818f6058982761a6d3
+FROM docker.io/library/debian@sha256:4abf773f2a570e6873259c4e3ba16de6c6268fb571fd46ec80be7c67822823b3
ARG DEBIAN_FRONTEND=noninteractive
ARG TERM=linux
-ARG JUPYTERHUB_VERSION=5.1.0
-ARG JUPYTERLAB_VERSION=4.2.4
-ARG NOTEBOOK_VERSION=7.2.1
+ARG JUPYTERHUB_VERSION=5.2.1
+ARG JUPYTERLAB_VERSION=4.3.5
+ARG NOTEBOOK_VERSION=7.3.2
ARG LTIAUTHENTICATOR_VERSION=1.6.2
-ARG SYSTEMDSPAWNER_VERSION=1.0.1
-ARG PYTHON_VERSION=3.12.5
+ARG SYSTEMDSPAWNER_VERSION=1.0.2
+ARG PYTHON_VERSION=3.13.1
RUN apt-get update && \
apt-get upgrade -y && \
@@ -66,6 +66,7 @@ RUN bash -c "source /opt/conda/etc/profile.d/conda.sh; \
jupyterhub=$JUPYTERHUB_VERSION \
jupyterlab=$JUPYTERLAB_VERSION \
jupyterlab_execute_time \
+ jupyter-resource-usage \
nb_conda_kernels \
notebook=$NOTEBOOK_VERSION \
pycurl \
diff --git a/images/ananke-base/assets/install/all.sh b/images/ananke-base/assets/install/all.sh
new file mode 100644
index 0000000..2f63296
--- /dev/null
+++ b/images/ananke-base/assets/install/all.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+./base.sh
+./myst.sh
+./lsp.sh
+./scheduler.sh
+./jupyterfs.sh
+./rtc.sh
+./tensorflow.sh
diff --git a/images/ananke-base/assets/install/base.sh b/images/ananke-base/assets/install/base.sh
index 3b743bf..001b00d 100644
--- a/images/ananke-base/assets/install/base.sh
+++ b/images/ananke-base/assets/install/base.sh
@@ -1,14 +1,14 @@
#!/bin/bash
-NUMPY_VERSION=2.0.1
-PANDAS_VERSION=2.2.2
+NUMPY_VERSION=2.2.3
+PANDAS_VERSION=2.2.3
-MATPLOTLIB_VERSION=3.9.1
-IPYMPL_VERSION=0.9.4
+MATPLOTLIB_VERSION=3.10.0
+IPYMPL_VERSION=0.9.6
# ipympl is required in both jhub and python3 env for interactive matplotlib output
SEABORN_VERSION=0.13.2
-PLOTLY_VERSION=5.23.0
+PLOTLY_VERSION=6.0.0
# plotly is required in both jhub and python3 env for proper rendering in JLab
source /opt/conda/etc/profile.d/conda.sh
diff --git a/images/ananke-base/assets/install/jupyterfs.sh b/images/ananke-base/assets/install/jupyterfs.sh
index 02b9506..0c6de15 100644
--- a/images/ananke-base/assets/install/jupyterfs.sh
+++ b/images/ananke-base/assets/install/jupyterfs.sh
@@ -14,7 +14,7 @@ pip install git+https://github.com/ezhov-evgeny/webdav-client-python-3.git@98c23
# https://github.com/jpmorganchase/jupyter-fs/issues/211
# https://github.com/jpmorganchase/jupyter-fs/issues/210
# https://github.com/jpmorganchase/jupyter-fs/pull/212
-pip install git+https://github.com/jeflem/jupyter-fs.git@ananke_0.5
+pip install git+https://github.com/jeflem/jupyter-fs.git@ananke_0.6
# WebDAV support for jupyterfs
pip install fs.webdavfs
diff --git a/images/ananke-base/assets/install/rtc.sh b/images/ananke-base/assets/install/rtc.sh
index a27020e..24ce882 100644
--- a/images/ananke-base/assets/install/rtc.sh
+++ b/images/ananke-base/assets/install/rtc.sh
@@ -3,10 +3,13 @@
source /opt/conda/etc/profile.d/conda.sh
conda activate jhub
-pip install jupyter-collaboration==3.0.0b2
+pip install jupyter-collaboration==3.1.0
+pip install jupyter_server_nbmodel[lab]
# disabel RTC for all users
jupyter labextension disable @jupyter/collaboration-extension
jupyter labextension unlock @jupyter/collaboration-extension
jupyter labextension disable @jupyter/docprovider-extension
jupyter labextension unlock @jupyter/docprovider-extension
+jupyter labextension disable jupyter-server-nbmodel
+jupyter labextension unlock jupyter-server-nbmodel
diff --git a/images/ananke-base/assets/install/rtc_config.py b/images/ananke-base/assets/install/rtc_config.py
index 988d87f..f857b44 100644
--- a/images/ananke-base/assets/install/rtc_config.py
+++ b/images/ananke-base/assets/install/rtc_config.py
@@ -25,6 +25,8 @@
jupyter labextension enable --level=user @jupyter/collaboration-extension; \
jupyter labextension disable --level=user @jupyter/docprovider-extension; \
jupyter labextension enable --level=user @jupyter/docprovider-extension; \
+ jupyter labextension disable --level=user jupyter-server-nbmodel; \
+ jupyter labextension enable --level=user jupyter-server-nbmodel; \
jupyter labextension disable --level=user @jupyter/nbgrader:assignment-list; \
jupyter labextension disable --level=user @jupyter/nbgrader:validate-assignment; \
jupyter labextension disable --level=user @jupyter/nbgrader:menu"')
@@ -38,6 +40,7 @@
'name': f'{room}',
'url': f'http://127.0.0.1:{port}',
'command': ['jupyterhub-singleuser', '--KernelSpecManager.ensure_native_kernel=False'],
+ 'environment': {'JUPYTER_PREFER_ENV_PATH': '0'},
'user': f'{username}',
'cwd': f'/home/{username}',
'oauth_no_confirm': True
@@ -69,6 +72,10 @@
conda activate jhub; \
jupyter labextension disable --level=user @jupyter/collaboration-extension; \
jupyter labextension enable --level=user @jupyter/collaboration-extension; \
+ jupyter labextension disable --level=user @jupyter/docprovider-extension; \
+ jupyter labextension enable --level=user @jupyter/docprovider-extension; \
+ jupyter labextension disable --level=user jupyter-server-nbmodel; \
+ jupyter labextension enable --level=user jupyter-server-nbmodel; \
jupyter labextension disable --level=user @jupyter/nbgrader:assignment-list; \
jupyter labextension disable --level=user @jupyter/nbgrader:validate-assignment; \
jupyter labextension disable --level=user @jupyter/nbgrader:menu"')
@@ -82,6 +89,7 @@
'name': f'{room["name"]}',
'url': f'http://127.0.0.1:{port}',
'command': ['jupyterhub-singleuser', '--KernelSpecManager.ensure_native_kernel=False'],
+ 'environment': {'JUPYTER_PREFER_ENV_PATH': '0'},
'user': f'{username}',
'cwd': f'/home/{username}',
'oauth_no_confirm': True
diff --git a/images/ananke-base/assets/install/scheduler.sh b/images/ananke-base/assets/install/scheduler.sh
new file mode 100644
index 0000000..2e55ac4
--- /dev/null
+++ b/images/ananke-base/assets/install/scheduler.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+source /opt/conda/etc/profile.d/conda.sh
+
+conda activate jhub
+
+pip install jupyter_scheduler
diff --git a/images/ananke-base/assets/install/tensorflow.sh b/images/ananke-base/assets/install/tensorflow.sh
index 0981f23..916e15e 100644
--- a/images/ananke-base/assets/install/tensorflow.sh
+++ b/images/ananke-base/assets/install/tensorflow.sh
@@ -5,8 +5,8 @@ source /opt/conda/etc/profile.d/conda.sh
conda activate python3
# install
-conda install -y numpy==1.26.4
-pip install tensorflow[and-cuda]==2.17.0
+conda install -y python==3.11.9 numpy==2.0.2
+pip install tensorflow[and-cuda]==2.18.0
pip install keras_tuner
# test
diff --git a/images/ananke-base/assets/jupyterhub.service b/images/ananke-base/assets/jupyterhub.service
index 4487e6c..633e25f 100644
--- a/images/ananke-base/assets/jupyterhub.service
+++ b/images/ananke-base/assets/jupyterhub.service
@@ -5,6 +5,7 @@ After=syslog.target network.target
[Service]
User=root
Environment="PATH=/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/opt/conda/envs/jhub/bin"
+Environment="SSL_CERT_DIR=/etc/ssl/certs"
ExecStart=/opt/conda/envs/jhub/bin/jupyterhub -f /opt/conda/envs/jhub/etc/jupyterhub/jupyterhub_config.py
[Install]
diff --git a/images/ananke-base/assets/jupyterhub_config.py b/images/ananke-base/assets/jupyterhub_config.py
index 7154b12..8580fbc 100644
--- a/images/ananke-base/assets/jupyterhub_config.py
+++ b/images/ananke-base/assets/jupyterhub_config.py
@@ -203,6 +203,9 @@ async def update_user_data(authenticator: LTI13Authenticator, handler: LTI13Call
# https://jupyterhub.readthedocs.io/en/stable/howto/configuration/config-user-env.html#jupyter-environment-configuration-priority
c.Spawner.environment.update({'JUPYTER_PREFER_ENV_PATH': '0'})
+# use system certs path in Conda envs' OpenSSL (required for Nbgrader (and maybe other packages) if server uses enterprise CA)
+c.Spawner.environment.update({'SSL_CERT_DIR': '/etc/ssl/certs'})
+
c.Spawner.args = ['--KernelSpecManager.ensure_native_kernel=False']
c.JupyterHub.spawner_class = 'systemdspawner.SystemdSpawner'
diff --git a/images/ananke-nbgrader/Containerfile b/images/ananke-nbgrader/Containerfile
index 028e6ea..390a02f 100644
--- a/images/ananke-nbgrader/Containerfile
+++ b/images/ananke-nbgrader/Containerfile
@@ -6,7 +6,7 @@ FROM ananke-base
# install nbgrader and kore
RUN bash -c "source /opt/conda/etc/profile.d/conda.sh; \
conda activate jhub; \
- pip install git+https://github.com/jeflem/nbgrader.git@ananke_0.5; \
+ pip install git+https://github.com/jeflem/nbgrader.git@ananke_0.6; \
pip install git+https://github.com/marcwit/kore-extension.git@ananke_0.5; \
pip cache purge"
# pip install git+https://github.com/jupyter/nbgrader.git@v$NBGRADER_VERSION"
diff --git a/images/ananke-nbgrader/assets/kore/kore_jhub_config.py b/images/ananke-nbgrader/assets/kore/kore_jhub_config.py
index 89f97f0..881de03 100644
--- a/images/ananke-nbgrader/assets/kore/kore_jhub_config.py
+++ b/images/ananke-nbgrader/assets/kore/kore_jhub_config.py
@@ -393,8 +393,8 @@ async def run_as_user(username: str, cmd: str, args: list[str]) -> None:
})
groups.update({
- f'formgrade-{course_id}': [grader_user],
- f'nbgrader-{course_id}': [grader_user]
+ f'formgrade-{course_id}': {'users': [grader_user]},
+ f'nbgrader-{course_id}': {'users': [grader_user]}
})
needs_restart = True
@@ -402,7 +402,7 @@ async def run_as_user(username: str, cmd: str, args: list[str]) -> None:
# Add instructor to course.
group_name = f'formgrade-{course_id}'
if username not in groups[group_name]:
- groups[group_name].append(username)
+ groups[group_name]['users'].append(username)
needs_restart = True
# Write new services, roles, groups to config file.
@@ -478,11 +478,11 @@ async def run_as_user(username: str, cmd: str, args: list[str]) -> None:
c.JupyterHub.services.append({
'name': 'kore',
'url': 'http://127.0.0.1:10001',
- 'display': False, # Will be changed once refactored.
+ 'display': True,
'api_token': kore_token,
'oauth_no_confirm': True,
'cwd': '/opt/kore',
- 'command': ['gunicorn', '--workers=2', '--bind=localhost:10001', 'kore:app']
+ 'command': ['gunicorn', '--workers=1', '--bind=localhost:10001', 'kore:app']
})
c.JupyterHub.load_roles.append({
'name': 'kore_role',
diff --git a/images/ananke-nbgrader/assets/kore/models/lti_file_reader.py b/images/ananke-nbgrader/assets/kore/models/lti_file_reader.py
deleted file mode 100755
index f4bc337..0000000
--- a/images/ananke-nbgrader/assets/kore/models/lti_file_reader.py
+++ /dev/null
@@ -1,81 +0,0 @@
-import hashlib
-import json
-import logging
-from typing import Optional
-
-from flask import Response
-
-
-class LTIFileReader:
- def __init__(self, user_name: str, file_path: str) -> None:
- self.user_name: str = user_name
- self.file_path: str = file_path
- self.lti_state: Optional[dict] = None
- self.read_success: bool = False
- self.course_id: Optional[str] = None
- self.course_title: Optional[str] = None
- self.grader_user: Optional[str] = None
- self.parse_success: bool = False
- self.error_response: Optional[Response] = None
- self.preflight_error: Optional[str] = None
-
- def read_file(self) -> None:
- try:
- with open(self.file_path, 'r') as file:
- # Check if the file has a JSON extension otherwise raise an error.
- if not self.file_path.lower().endswith('.json'):
- raise ValueError('File is not a JSON file.')
-
- # Read content of file and set boolean value to True if reading is successful.
- self.lti_state = json.load(file)
- self.read_success = True
- except FileNotFoundError:
- logging.error(f'LTI state file for user {self.user_name} not found!')
- self.error_response = Response(response=json.dumps({'message': 'FileNotFoundError'}), status=404)
- self.preflight_error = 'LTI file for current user could not be found. Contact administrator or see logs for more details.'
- except PermissionError:
- logging.error(f'LTI state file for user {self.user_name} not readable!')
- self.error_response = Response(response=json.dumps({'message': 'PermissionError'}), status=400)
- self.preflight_error = 'LTI file for current user could not be read. Contact administrator or see logs for more details.'
- except ValueError:
- logging.error(f'LTI state file is not a JSON!')
- self.error_response = Response(response=json.dumps({'message': 'ValueError'}), status=400)
- self.preflight_error = 'LTI file for current user is not a JSON file. Contact administrator or see logs for more details.'
- except OSError:
- logging.error(f'LTI state file for user {self.user_name} can not be opened!')
- self.error_response = Response(response=json.dumps({'message': 'OSError'}), status=500)
- self.preflight_error = 'LTI file for current user could not be opened. Contact administrator or see logs for more details.'
-
- def extract_values(self) -> None:
- if not self.read_success:
- return
-
- if not isinstance(self.lti_state, dict):
- logging.error('LTI state content is not a dict!')
- self.error_response = Response(response=json.dumps({'message': 'ContentError'}), status=500)
- return
-
- # Extract course id, course title and grader username from lti state.
- deployment_id = self.lti_state.get('https://purl.imsglobal.org/spec/lti/claim/deployment_id', '0')
- resource_link_id = self.lti_state.get('https://purl.imsglobal.org/spec/lti/claim/resource_link').get('id')
- resource_link_title = self.lti_state.get('https://purl.imsglobal.org/spec/lti/claim/resource_link').get('title')
- context_title = self.lti_state.get('https://purl.imsglobal.org/spec/lti/claim/context', {}).get('title')
-
- h = hashlib.shake_256(f'{deployment_id}-{resource_link_id}'.encode())
- self.course_id = 'c-' + h.hexdigest(8)
- self.grader_user = self.course_id[0:32]
-
- if resource_link_title and context_title:
- course_title = f'{context_title} - {resource_link_title}'
- elif resource_link_title:
- course_title = resource_link_title
- elif context_title:
- course_title = context_title
- else:
- course_title = 'No title available'
- self.course_title = f'{course_title} ({self.course_id})'.replace('\'', '')
-
- logging.debug(f'Course ID: {self.course_id}')
- logging.debug(f'Course title: {self.course_title}')
- logging.debug(f'Grader user: {self.grader_user}')
- self.parse_success = True
diff --git a/images/ananke-nbgrader/assets/kore/routes/home_route.py b/images/ananke-nbgrader/assets/kore/routes/home_route.py
index 3f88a44..2302ab0 100644
--- a/images/ananke-nbgrader/assets/kore/routes/home_route.py
+++ b/images/ananke-nbgrader/assets/kore/routes/home_route.py
@@ -1,3 +1,4 @@
+import json
import logging
from flask import make_response
@@ -7,8 +8,6 @@
from flask import session as flask_session
from jupyterhub.services.auth import HubOAuth
-from models.lti_file_reader import LTIFileReader
-
home_bp = Blueprint('home', __name__)
@@ -41,24 +40,27 @@ def wrapper(*args, **kwargs):
@home_bp.route('', methods=['GET'])
def home(user):
# Defining object containing relevant information for the HTML template.
- tmpl_data = {}
+ data = {}
config_loader = current_app.config['CONFIG_LOADER']
if config_loader.preflight_error:
- tmpl_data['preflight_error'] = config_loader.preflight_error
+ data['preflight_error'] = config_loader.preflight_error
user_name = user.get('name')
- tmpl_data['user'] = user_name
-
- logging.info(f'User {user_name} is accessing home page of kore service.')
+ data['user'] = user_name
- lti_file_reader: LTIFileReader = LTIFileReader(user_name=user_name, file_path=f'runtime/lti_{user_name}.json')
- lti_file_reader.read_file()
+ try:
+ with open(f'runtime/lti_{user_name}.json', 'r') as file:
+ lti_state = json.load(file)
+ data['url'] = f"{lti_state['https://purl.imsglobal.org/spec/lti/claim/target_link_uri']}/services/kore"
+ except (FileNotFoundError, PermissionError, ValueError, OSError):
+ logging.error(f'LTI state file for user {user_name} not found!')
- if not lti_file_reader.read_success:
- if 'preflight_error' in tmpl_data:
- tmpl_data['preflight_error'] += f'\n{lti_file_reader.preflight_error}'
+ if 'preflight_error' in data:
+ data['preflight_error'] += f'\nError while reading lti state file.'
else:
- tmpl_data['preflight_error'] = lti_file_reader.preflight_error
+ data['preflight_error'] = 'Error while reading lti state file.'
+
+ logging.info(f'User {user_name} is accessing home page of kore service.')
- return render_template('home.html.jinja', data=tmpl_data)
+ return render_template('home.html.jinja', data=data)
diff --git a/images/ananke-nbgrader/assets/kore/templates/base.html.jinja b/images/ananke-nbgrader/assets/kore/templates/base.html.jinja
index dd0b7ff..3537259 100644
--- a/images/ananke-nbgrader/assets/kore/templates/base.html.jinja
+++ b/images/ananke-nbgrader/assets/kore/templates/base.html.jinja
@@ -64,7 +64,7 @@
diff --git a/images/ananke-nbgrader/assets/kore/templates/home.html.jinja b/images/ananke-nbgrader/assets/kore/templates/home.html.jinja
index 9db1b1d..75e62bc 100644
--- a/images/ananke-nbgrader/assets/kore/templates/home.html.jinja
+++ b/images/ananke-nbgrader/assets/kore/templates/home.html.jinja
@@ -2,97 +2,179 @@
{% block head %}
{% endblock %}
{% block body %}
-
+
There was an error during the loading of the page. Please see the logs or contact your administrator for further information.
@@ -102,9 +184,31 @@ window.onload = async function() {
+
+ Select active course:
+
+
+
+ Please choose the course that you wish to set as your active course.
+ The active course serves as the reference point for all operations you perform within this platform.
+
+
+
Import Operations: When importing assignments, problems, or other content, the active course will automatically be selected as the destination where these items will be copied.
+
Management Operations: For tasks such as backup, reset, or delete, the active course is the one that will be affected. This means the selected course will be backed up, reset to its initial state, or removed entirely, depending on the operation you choose.
+
+
+ Make sure to select the correct course as your active course to ensure that all operations are performed on the intended course.
+
+
+
For importing a single problem statement from a running course or from the backup folder.
- The problem statement will be available within the 'imported_problems' assignment.
This assignment will be created as well as the folder within your home directory.
The name of the imported problem statement is identical to the of the original one, except that the time of the import is added.
This due to the fact that problem statements shall have unique names.