#!/bin/bash # submit script with: sbatch filename #SBATCH -n 4 # number of processor core(tasks) #SBATCH --ntasks-per-node=1 # tasks pre nodes #SBATCH --mem-per-cpu=1G # memory per CPU core #SBATCH --time=0-00:05:00 # wall time ###SBATCH --exclusive # Want the node exlusively #SBATCH -J template # Job name #SBATCH --output=%jtemplate.out #SBATCH --error=%j.err #SBATCH --mail-user=eladbi@gmail.com # email address #SBATCH --mail-type=ALL #SBATCH --no-requeue # to prevent the slurm restart after node fail !!! echo "Starting at `date`" echo "Job name: $SLURM_JOB_NAME JobID: $SLURM_JOB_ID" echo "Running on hosts: $SLURM_NODELIST" echo "Running on $SLURM_NNODES nodes." echo "Running on $SLURM_NPROCS processors." echo "Running for $time d-h:m:s" echo "Current working directory is `pwd`" echo "# of particles" `wc particles_autopick_class2d2.star` # chang star name to get number of particle setpkgs -a openmpi_1.8.4 setpkgs -a relion_1.4 ### change the number of -np and insert command ### set -v mpirun -np 4 echo relion set +v echo "Program finished with exit code $? at: `date`"