Thanks to visit codestin.com
Credit goes to github.com

Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
add CODER_INNER_USR_LIB_DIR override
  • Loading branch information
johnstcn committed Mar 6, 2025
commit 5d58aeb1e76c983fd960e6305c1d4427fe234a3c
6 changes: 4 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ The environment variables can be used to configure various aspects of the inner
| `CODER_DOCKER_BRIDGE_CIDR` | The bridge CIDR to start the Docker daemon with. | false |
| `CODER_MOUNTS` | A list of mounts to mount into the inner container. Mounts default to `rw`. Ex: `CODER_MOUNTS=/home/coder:/home/coder,/var/run/mysecret:/var/run/mysecret:ro` | false |
| `CODER_USR_LIB_DIR` | The mountpoint of the host `/usr/lib` directory. Only required when using GPUs. | false |
| `CODER_INNER_USR_LIB_DIR` | The inner /usr/lib mountpoint. This is automatically detected based on `/etc/os-release` in the inner image, but may optionally be overridden. | false |
| `CODER_ADD_TUN` | If `CODER_ADD_TUN=true` add a TUN device to the inner container. | false |
| `CODER_ADD_FUSE` | If `CODER_ADD_FUSE=true` add a FUSE device to the inner container. | false |
| `CODER_ADD_GPU` | If `CODER_ADD_GPU=true` add detected GPUs and related files to the inner container. Requires setting `CODER_USR_LIB_DIR` and mounting in the hosts `/usr/lib/` directory. | false |
Expand All @@ -43,7 +44,7 @@ It is not possible to develop `envbox` effectively using a containerized environ

If a login is required to pull images from a private repository, create a secret following the instructions from the [Kubernetes Documentation](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#create-a-secret-by-providing-credentials-on-the-command-line) as such:

```
```shell
kubectl -n <coder namespace> create secret docker-registry regcred \
--docker-server=<your-registry-server> \
--docker-username=<your-name> \
Expand All @@ -53,7 +54,7 @@ kubectl -n <coder namespace> create secret docker-registry regcred \

Then reference the secret in your template as such:

```
```shell
env {
name = "CODER_IMAGE_PULL_SECRET"
value_from {
Expand Down Expand Up @@ -98,6 +99,7 @@ Here's an example Docker command to run a GPU-enabled workload in Envbox. Note t
1) The NVidia container runtime must be installed on the host (`--runtime=nvidia`).
2) `CODER_ADD_GPU=true` must be set to enable GPU-specific functionality.
3) When `CODER_ADD_GPU` is set, it is required to also set `CODER_USR_LIB_DIR` to a location where the relvant host directory has been mounted inside the outer container. In the below example, this is `/usr/lib/x86_64-linux-gnu` on the underlying host. It is mounted into the container under the path `/var/coder/usr/lib`. We then set `CODER_USR_LIB_DIR=/var/coder/usr/lib`. The actual location inside the container is not important **as long as it does not overwrite any pre-existing directories containing system libraries**.
4) The location to mount the libraries in the inner container is determined by the distribution ID in the `/etc/os-release` of the inner container. If the automatically determined location is incorrect, you can set it manually via `CODER_INNER_USR_LIB_DIR`.

> Note: this step is required in case user workloads require libraries from the underlying host that are not added in by the container runtime.

Expand Down
12 changes: 11 additions & 1 deletion cli/docker.go
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,7 @@ var (
EnvMemory = "CODER_MEMORY"
EnvAddGPU = "CODER_ADD_GPU"
EnvUsrLibDir = "CODER_USR_LIB_DIR"
EnvInnerUsrLibDir = "CODER_INNER_USR_LIB_DIR"
EnvDockerConfig = "CODER_DOCKER_CONFIG"
EnvDebug = "CODER_DEBUG"
EnvDisableIDMappedMount = "CODER_DISABLE_IDMAPPED_MOUNT"
Expand Down Expand Up @@ -135,6 +136,7 @@ type flags struct {
boostrapScript string
containerMounts string
hostUsrLibDir string
innerUsrLibDir string
dockerConfig string
cpus int
memory int
Expand Down Expand Up @@ -370,6 +372,7 @@ func dockerCmd() *cobra.Command {
cliflag.StringVarP(cmd.Flags(), &flags.boostrapScript, "boostrap-script", "", EnvBootstrap, "", "The script to use to bootstrap the container. This should typically install and start the agent.")
cliflag.StringVarP(cmd.Flags(), &flags.containerMounts, "mounts", "", EnvMounts, "", "Comma separated list of mounts in the form of '<source>:<target>[:options]' (e.g. /var/lib/docker:/var/lib/docker:ro,/usr/src:/usr/src).")
cliflag.StringVarP(cmd.Flags(), &flags.hostUsrLibDir, "usr-lib-dir", "", EnvUsrLibDir, "", "The host /usr/lib mountpoint. Used to detect GPU drivers to mount into inner container.")
cliflag.StringVarP(cmd.Flags(), &flags.innerUsrLibDir, "inner-usr-lib-dir", "", EnvInnerUsrLibDir, "", "The inner /usr/lib mountpoint. This is automatically detected based on /etc/os-release in the inner image, but may optionally be overridden.")
cliflag.StringVarP(cmd.Flags(), &flags.dockerConfig, "docker-config", "", EnvDockerConfig, "/root/.docker/config.json", "The path to the docker config to consult when pulling an image.")
cliflag.BoolVarP(cmd.Flags(), &flags.addTUN, "add-tun", "", EnvAddTun, false, "Add a TUN device to the inner container.")
cliflag.BoolVarP(cmd.Flags(), &flags.addFUSE, "add-fuse", "", EnvAddFuse, false, "Add a FUSE device to the inner container.")
Expand Down Expand Up @@ -616,6 +619,13 @@ func runDockerCVM(ctx context.Context, log slog.Logger, client dockerutil.Client
})
}

innerUsrLibDir := imgMeta.UsrLibDir()
if flags.innerUsrLibDir != "" {
log.Info(ctx, "overriding auto-detected inner usr lib dir ",
slog.F("before", innerUsrLibDir),
slog.F("after", flags.innerUsrLibDir))
innerUsrLibDir = flags.innerUsrLibDir
}
for _, bind := range binds {
// If the bind has a path that points to the host-mounted /usr/lib
// directory we need to remap it to /usr/lib inside the container.
Expand All @@ -624,7 +634,7 @@ func runDockerCVM(ctx context.Context, log slog.Logger, client dockerutil.Client
mountpoint = filepath.Join(
// Note: we used to mount into /usr/lib, but this can change
// based on the distro inside the container.
imgMeta.UsrLibDir(),
innerUsrLibDir,
strings.TrimPrefix(mountpoint, strings.TrimSuffix(flags.hostUsrLibDir, "/")),
)
}
Expand Down
21 changes: 21 additions & 0 deletions integration/gpu_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,27 @@ func TestDocker_Nvidia(t *testing.T) {
_, err := execContainerCmd(ctx, t, ctID, "docker", "exec", "workspace_cvm", "nvidia-smi")
require.NoError(t, err, "failed to run nvidia-smi in the inner container")
})

t.Run("InnerUsrLibDirOverride", func(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)

// Start the envbox container.
ctID := startEnvboxCmd(ctx, t, integrationtest.UbuntuImage, "root",
"-v", "/usr/lib/x86_64-linux-gnu:/var/coder/usr/lib",
"--env", "CODER_ADD_GPU=true",
"--env", "CODER_USR_LIB_DIR=/var/coder/usr/lib",
"--env", "CODER_INNER_USR_LIB_DIR=/usr/lib/coder",
"--runtime=nvidia",
"--gpus=all",
)

// Assert that we can run nvidia-smi in the inner container.
out, err := execContainerCmd(ctx, t, ctID, "docker", "exec", "workspace_cvm", "ls", "-l", "/usr/lib/coder")
require.NoError(t, err, "inner usr lib dir override failed")
require.Regexp(t, `(?i)(libgl|nvidia|vulkan|cuda)`, out)
})
}

// dockerRuntimes returns the list of container runtimes available on the host.
Expand Down
Loading