From 416f4f5bdd40e726594347e0f4b7f407e05669b6 Mon Sep 17 00:00:00 2001 From: ale Date: Sat, 20 May 2023 19:44:09 +0200 Subject: [PATCH] update --- Dockerfile | 18 +++++++++++------- docker-compose.yml | 23 +++++++++++------------ 2 files changed, 22 insertions(+), 19 deletions(-) diff --git a/Dockerfile b/Dockerfile index 5221261..aebf092 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,8 +3,8 @@ # xxx move from cudagl to cuda, for runtime? -ARG CUDA_DEVEL="11.0.3-devel-ubuntu20.04" -ARG CUDA_RUNTIME="11.0.3-runtime-ubuntu20.04" +ARG CUDA_DEVEL="12.1.1-devel-ubuntu22.04" +ARG CUDA_RUNTIME="12.1.1-runtime-ubuntu22.04" # runtime starts as 2.3GB image # devel is runtime+ and is 4.2GB image @@ -16,7 +16,7 @@ FROM nvidia/cuda:$CUDA_DEVEL AS builder ARG CUDA_ARCH_75=75 ARG CUDA_ARCH_61=61 -ARG FFMPEG_TGZ=https://ffmpeg.org/releases/ffmpeg-5.0.tar.gz +ARG FFMPEG_TGZ=https://ffmpeg.org/releases/ffmpeg-5.1.3.tar.gz # create an ffmpeg (w/ shared libs) that can utilize nvidia GPU @@ -41,7 +41,7 @@ RUN apt-get -yqq update && apt-get -yqq install \ # install nvidia headers (got moved out of ffmpeg) RUN git clone https://git.videolan.org/git/ffmpeg/nv-codec-headers && \ - cd nv-codec-headers && git checkout origin/sdk/11.0 && \ + cd nv-codec-headers && git checkout n12.0.16.0 && \ sudo make install && \ cd .. @@ -69,7 +69,8 @@ RUN sed -i -e "s/gencode arch=compute_..,code=sm_../gencode arch=compute_${CUDA_ --enable-cuda --enable-cuda-sdk --enable-cuda-nvcc --enable-nvenc --enable-cuvid --enable-libnpp \ --extra-cflags=-I/usr/local/cuda/include --extra-ldflags=-L/usr/local/cuda/lib64 && \ make -j4 && \ - cp ffmpeg /tmp/ffmpeg-turing + cp ffmpeg /tmp/ffmpeg-turing && \ + cp ffplay /tmp/ffplay-turing RUN sed -i -e "s/gencode arch=compute_..,code=sm_../gencode arch=compute_${CUDA_ARCH_61},code=sm_${CUDA_ARCH_61}/" ./configure\ && make distclean || echo && \ @@ -85,7 +86,8 @@ RUN sed -i -e "s/gencode arch=compute_..,code=sm_../gencode arch=compute_${CUDA_ --enable-cuda --enable-cuda-sdk --enable-cuda-nvcc --enable-nvenc --enable-cuvid --enable-libnpp \ --extra-cflags=-I/usr/local/cuda/include --extra-ldflags=-L/usr/local/cuda/lib64 && \ make -j4 && \ - cp ffmpeg /tmp/ffmpeg-pascal + cp ffmpeg /tmp/ffmpeg-pascal && \ + cp ffplay /tmp/ffplay-pascal # now collect up all the .so files we'll need for the runtime, into new lib/ subdir RUN BIN=ffmpeg && mkdir lib && ( \ @@ -96,9 +98,11 @@ RUN BIN=ffmpeg && mkdir lib && ( \ # switch to the smaller "runtime" baseline. # now we just keep the executable(s) and .so files they need and chuck everything else above. -FROM nvidia/cudagl:$CUDA_RUNTIME +FROM nvidia/cuda:$CUDA_RUNTIME COPY --from=builder /tmp/ffmpeg-pascal /ffmpeg-pascal COPY --from=builder /tmp/ffmpeg-turing /ffmpeg +COPY --from=builder /tmp/ffplay-pascal /ffplay-pascal +COPY --from=builder /tmp/ffplay-turing /ffplay COPY --from=builder /tmp/ffmpeg/lib/ /fflib # @see cuda-runtime.sh for where this small image of three cuda runtime .so files came from diff --git a/docker-compose.yml b/docker-compose.yml index 92940c7..9bd7e74 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,4 +1,4 @@ -version: '2' +version: '3' services: nvidia-ffmpeg: @@ -6,19 +6,18 @@ services: image: nvidia-ffmpeg container_name: nvidia-ffmpeg restart: "no" + deploy: + resources: + reservations: + devices: + - driver: nvidia + capabilities: [gpu] + environment: + - NVIDIA_VISIBLE_DEVICES=all entrypoint: - /bin/sleep - infinity volumes: - - /usr/lib/x86_64-linux-gnu/nvidia/current:/cuda:ro - ./folder:/folder - environment: - - NVIDIA_VISIBLE_DEVICES=all - devices: - - /dev/nvidia0 - - /dev/nvidiactl - - /dev/nvidia-uvm - - /dev/nvidia-uvm-tools - cap_add: - - IPC_LOCK - network_mode: host \ No newline at end of file + - /usr/lib/x86_64-linux-gnu/nvidia/current:/cuda:ro + network_mode: host