Thanks
Then to install CUDA and CUDNN, I made docker file considering all of three docker files: L4T, CUDA, CUDNN as following:
FROM balenalib/jetson-nano-ubuntu:bionic
ARG DRIVER_PACK=“Jetson-210_Linux_R32.2.1_aarch64.tbz2”
ARG POWER_MODE=0000
COPY packages/$DRIVER_PACK .
RUN apt-get update &&
apt-get install -y --no-install-recommends bzip2 ca-certificates curl lbzip2 sudo htop curl &&
apt-get install -y zip git python3 python3-pip python3-numpy cmake systemd &&
tar -xpj --overwrite -f ./{DRIVER_PACK} && \
sed -i '/.*tar -I lbzip2 -xpmf {LDK_NV_TEGRA_DIR}/config.tbz2./c\tar -I lbzip2 -xpm --overwrite -f {LDK_NV_TEGRA_DIR}\/config.tbz2' ./Linux_for_Tegra/apply_binaries.sh && \
./Linux_for_Tegra/apply_binaries.sh -r / && \
rm -rf ./Linux_for_Tegra && \
rm ./{DRIVER_PACK} &&
apt-get clean &&
rm -rf /var/lib/apt/lists/ &&
pip3 install jetson-stats
ENV LD_LIBRARY_PATH=/usr/lib/aarch64-linux-gnu/tegra:/usr/lib/aarch64-linux-gnu/tegra-egl:${LD_LIBRARY_PATH}
RUN ln -s /usr/lib/aarch64-linux-gnu/tegra/libnvidia-ptxjitcompiler.so.32.1.0 /usr/lib/aarch64-linux-gnu/tegra/libnvidia-ptxjitcompiler.so &&
ln -s /usr/lib/aarch64-linux-gnu/tegra/libnvidia-ptxjitcompiler.so.32.1.0 /usr/lib/aarch64-linux-gnu/tegra/libnvidia-ptxjitcompiler.so.1 &&
ln -sf /usr/lib/aarch64-linux-gnu/tegra/libGL.so /usr/lib/aarch64-linux-gnu/libGL.so &&
ln -s /usr/lib/aarch64-linux-gnu/libcuda.so /usr/lib/aarch64-linux-gnu/libcuda.so.1 &&
ln -sf /usr/lib/aarch64-linux-gnu/tegra-egl/libEGL.so /usr/lib/aarch64-linux-gnu/libEGL.so
RUN ln -s /etc/nvpmodel/nvpmodel_t210_jetson-nano.conf /etc/nvpmodel.conf &&
ln -s /etc/systemd/system/nvpmodel.service /etc/systemd/system/multi-user.target.wants/nvpmodel.service &&
mkdir /var/lib/nvpmodel &&
echo “/etc/nvpmodel.conf” > /var/lib/nvpmodel/conf_file_path &&
echo “pmode:${POWER_MODE} fmode:fanNull” > /var/lib/nvpmodel/status
ARG CUDA_TOOLKIT=“cuda-repo-l4t-10-0-local-10.0.326”
ARG CUDA_TOOLKIT_PKG="${CUDA_TOOLKIT}_1.0-1_arm64.deb"
COPY packages/$CUDA_TOOLKIT_PKG .
RUN apt-get update &&
apt-get install -y --no-install-recommends curl &&
dpkg --force-all -i {CUDA_TOOLKIT_PKG} && \
rm {CUDA_TOOLKIT_PKG} &&
apt-key add var/cuda-repo--local/.pub &&
apt-get update &&
apt-get install -y --allow-downgrades cuda-toolkit-10-0 libgomp1 libfreeimage-dev libopenmpi-dev openmpi-bin &&
dpkg --purge ${CUDA_TOOLKIT} &&
apt-get clean &&
rm -rf /var/lib/apt/lists/
ENV CUDA_HOME=/usr/local/cuda
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64
ENV PATH=$PATH:$CUDA_HOME/bin
ENV CUDNN_VERSION 7.5.0.56
ENV CUDNN_PKG_VERSION={CUDNN_VERSION}-1
LABEL com.nvidia.cudnn.version="{CUDNN_VERSION}"
COPY packages/libcudnn7_$CUDNN_VERSION-1+cuda10.0_arm64.deb .
COPY packages/libcudnn7-dev_$CUDNN_VERSION-1+cuda10.0_arm64.deb .
COPY packages/libcudnn7-doc_$CUDNN_VERSION-1+cuda10.0_arm64.deb .
RUN dpkg -i libcudnn7_$CUDNN_VERSION-1+cuda10.0_arm64.deb &&
dpkg -i libcudnn7-dev_$CUDNN_VERSION-1+cuda10.0_arm64.deb &&
dpkg -i libcudnn7-doc_$CUDNN_VERSION-1+cuda10.0_arm64.deb &&
rm libcudnn7_$CUDNN_VERSION-1+cuda10.0_arm64.deb &&
rm libcudnn7-dev_$CUDNN_VERSION-1+cuda10.0_arm64.deb &&
rm libcudnn7-doc_$CUDNN_VERSION-1+cuda10.0_arm64.deb
Here, I omitted the header and footer part from original three docker files.
That is to say:
FROM bouwe/jetson-nano-l4t:latest
ENTRYPOINT ["/bin/bash"]
Is this right?
Best regards
Artem