Slurm test jobs
Minimal sbatch test jobs using sleep to compare baseline vs funded account/QoS behavior.
Some clusters require an explicit account for every job. If you see Invalid account or account/partition combination specified, add a valid --account (and possibly a compatible --partition).
For a funded project, add #SBATCH --account=proj_a in the script or submit with sbatch --account=proj_a script.sh.
To check queue wait after submission:
sacct -j <jobid> --format=JobID,JobName,Partition,Account,QOS,Submit,Start,Elapsed,StateTo troubleshoot account/partition validation:
# See your associations (account + allowed partition + qos)
sacctmgr -nP show assoc where user=$USER format=Account,Partition,QOS
# See cluster partitions
scontrol show partition | grep -E 'PartitionName=|AllowAccounts='
# Submit with explicit account (and partition when required)
sbatch --account=<valid_account> --partition=<valid_partition> script.shTiny and fast
#!/bin/bash
#SBATCH --job-name=tiny-2m
#SBATCH --time=00:02:00
#SBATCH --cpus-per-task=1
#SBATCH --mem=256M
date
sleep 120
dateShort small job
#!/bin/bash
#SBATCH --job-name=small-10m
#SBATCH --time=00:10:00
#SBATCH --cpus-per-task=1
#SBATCH --mem=1G
date
sleep 600
dateMedium job
#!/bin/bash
#SBATCH --job-name=medium-1h
#SBATCH --time=01:00:00
#SBATCH --cpus-per-task=2
#SBATCH --mem=4G
date
sleep 3600
dateLong single-node job
#!/bin/bash
#SBATCH --job-name=long-8h
#SBATCH --time=08:00:00
#SBATCH --cpus-per-task=4
#SBATCH --mem=8G
date
sleep 28800
date8-hour full-node exclusive job
#!/bin/bash
#SBATCH --job-name=fullnode-8h
#SBATCH --time=08:00:00
#SBATCH --nodes=1
#SBATCH --exclusive
#SBATCH --ntasks-per-node=1
#SBATCH --cpus-per-task=1
#SBATCH --mem=1G
echo "Job ID: $SLURM_JOB_ID"
echo "Node:"
scontrol show hostnames "$SLURM_JOB_NODELIST"
date
sleep 28800
dateLarge CPU job
#!/bin/bash
#SBATCH --job-name=large-cpu-2h
#SBATCH --time=02:00:00
#SBATCH --cpus-per-task=32
#SBATCH --mem=32G
#SBATCH --account=proj_a
date
sleep 7200
date4 node job
#!/bin/bash
#SBATCH --job-name=def-4
#SBATCH --time=08:00:00
#SBATCH --nodes=4
#SBATCH --ntasks-per-node=1
#SBATCH --cpus-per-task=1
#SBATCH --mem=1G
echo "Job ID: $SLURM_JOB_ID"
echo "Allocated nodes:"
scontrol show hostnames "$SLURM_JOB_NODELIST"
srun hostname | sort
date
sleep 28800
date#!/bin/bash
#SBATCH --job-name=funded-4
#SBATCH --time=08:00:00
#SBATCH --nodes=4
#SBATCH --ntasks-per-node=1
#SBATCH --cpus-per-task=1
#SBATCH --mem=1G
#SBATCH --account=proj_a
echo "Job ID: $SLURM_JOB_ID"
echo "Allocated nodes:"
scontrol show hostnames "$SLURM_JOB_NODELIST"
srun hostname | sort
date
sleep 28800
dateLarge memory job
#!/bin/bash
#SBATCH --job-name=large-mem-2h
#SBATCH --time=02:00:00
#SBATCH --cpus-per-task=8
#SBATCH --mem=128G
date
sleep 7200
dateSame shape, explicit partition test
#!/bin/bash
#SBATCH --job-name=nodes-check-30m
#SBATCH --time=00:30:00
#SBATCH --cpus-per-task=4
#SBATCH --mem=8G
#SBATCH --partition=nodes
date
sleep 1800
date