Skip to content

GPU Example job

#!/bin/bash
#    
#SBATCH --job-name=test_job 
#SBATCH --account=gpu_gres               # to access gpu resources
#SBATCH --partition=gpu
#SBATCH --nodes=1                        # request to run job on single node
#SBATCH --ntasks=10                      # request 10 CPUs (t3gpu01/02: balance between CPU and GPU : 5CPU/1GPU)
#SBATCH --gres=gpu:2                     # request two GPUs
#SBATCH --mem=4000M                      # memory (for the total job)
#SBATCH --time=0-00:30                   # time  in format DD-HH:MM


# You should use a node's local /scratch space for creating files and
# intensive I/O. Never use /tmp, and make sure that you also
# set TMPDIR, so that your programs create temporary files there
JOB_SCRATCH=/scratch/$USER/$SLURM_JOB_ID
mkdir -p "$JOB_SCRATCH"
export TMPDIR="$JOB_SCRATCH"


# The GPUs that are allocated to you can be listed in the environment
# variable CUDA_VISIBLE_DEVICES. Your program should use this to target
# the correct GPUs!
echo CUDA_VISIBLE_DEVICES : $CUDA_VISIBLE_DEVICES

# your program
python script.py 


# cleaning of temporary working dir after job is completed:
rm  -rf "$JOB_SCRATCH"