#!/bin/bash # submit script with: sbatch filename #SBATCH -n 100 # number of processor core(tasks) #SBATCH --ntasks-per-node=2 # tasks pre nodes #SBATCH --mem-per-cpu=2G # memory per CPU core #SBATCH --time=0-03:00:00 # wall time (take +/- 30min for mircograph/cpu) ###SBATCH --exclusive # Want the node exlusively #SBATCH -J ctf0001-0515 # Job name #SBATCH --output=%j.out #SBATCH --error=%j.err #SBATCH --mail-user=eladbi@gmail.com # email address #SBATCH --mail-type=ALL echo "Starting at `date`" echo "Job name: $SLURM_JOB_NAME JobID: $SLURM_JOB_ID" echo "Running on hosts: $SLURM_NODELIST" echo "Running on $SLURM_NNODES nodes." echo "Running on $SLURM_NPROCS processors." echo "Current working directory is `pwd`" setpkgs -a openmpi_1.8.4 setpkgs -a relion_1.4 set -v mpirun -np 100 `which relion_run_ctffind_mpi` --i "micrographs_1_515.star" --o "micrographs_1_515_ctf.star" --ctfWin -1 --CS 2.2 --HT 300 --AmpCnst 0.1 --XMAG 39683 --DStep 5 --Box 512 --ResMin 100 --ResMax 4 --dFMin 5000 --dFMax 50000 --FStep 500 --dAst 0 --ctffind_exe /programs/i386-linux/ctf/20130307/bin/ctffind3.exe set +v echo "Program finished with exit code $? at: `date`"