*** qm_ewald.f 2006-04-03 16:35:55.000000000 -0700 --- qm_ewald.f 2006-11-02 08:35:38.000000000 -0800 *************** *** 58,69 **** !Local integer :: kx,ky,kz,ksy,ksz,ksq ! integer :: mpi_division, mpi_totkq_count integer :: ier #ifdef MPI - #include "parallel.h" #include "mpif.h" ! integer :: i, istatus, istartend(2) #endif !Calculate the total number of kspace vectors --- 58,71 ---- !Local integer :: kx,ky,kz,ksy,ksz,ksq ! integer :: mpi_division integer :: ier #ifdef MPI #include "mpif.h" ! integer :: i, istartend(2) ! !PART OF WORKAROUND FOR BUGGY INTEL COMPILER ! integer, allocatable, dimension(:) :: gather_array ! #endif !Calculate the total number of kspace vectors *************** *** 89,137 **** end do !Now we need to allocate enough memory for the kvectors and the kvector exponential tables. ! if (totkq > 0) then ! mpi_division = (totkq + (qmmm_mpi%numthreads-1))/qmmm_mpi%numthreads ! qmmm_mpi%kvec_end = min(mpi_division*(qmmm_mpi%mytaskid+1),totkq) ! qmmm_mpi%kvec_start = min(mpi_division*qmmm_mpi%mytaskid+1,totkq+1) ! qmmm_mpi%totkq_count = qmmm_mpi%kvec_end-qmmm_mpi%kvec_start+1 #ifdef MPI ! if (qmmm_mpi%master) then ! write (6,'(/a)') '|QMMM: KVector division among threads:' ! write (6,'(a)') '|QMMM: Start End Count' ! !Already know my own. ! write(6,'(a,i8,a,i8,a,i8,a)') & ! '|QMMM: Thread( 0): ',qmmm_mpi%kvec_start,'->',qmmm_mpi%kvec_end, & ! ' (',qmmm_mpi%kvec_end-qmmm_mpi%kvec_start+1,')' ! do i = 1, sandersize-1 ! call mpi_recv(istartend,2,mpi_integer,i,0,commsander,istatus,ier) ! write(6,'(a,i4,a,i8,a,i8,a,i8,a)') & ! '|QMMM: Thread(',i,'): ',istartend(1),'->',istartend(2), & ! ' (',istartend(2)-istartend(1)+1,')' ! end do ! else ! !Send a message to the master with our counts in. ! istartend(1) = qmmm_mpi%kvec_start ! istartend(2) = qmmm_mpi%kvec_end ! call mpi_send(istartend,2,mpi_integer,0,0,commsander,ier) ! end if ! #endif ! !Ultimately we only need to allocate these as the number of kvectors this cpu does. ! allocate ( qmewald%kvec(qmmm_mpi%totkq_count),stat=ier ) ! REQUIRE(ier == 0) ! allocate ( qmewald%dkvec(3,qmmm_mpi%totkq_count),stat=ier ) REQUIRE(ier == 0) ! allocate ( qmewald%dmkv(3,qmmm_mpi%totkq_count),stat=ier ) REQUIRE(ier == 0) ! if (.not. qmmm_nml%qm_pme) then ! allocate ( qmewald%ktable(6,natom,qmmm_mpi%totkq_count), stat=ier ) ! REQUIRE(ier == 0) ! end if ! allocate ( qmewald%qmktable(6,nquant+nlink,qmmm_mpi%totkq_count), stat=ier ) REQUIRE(ier == 0) - else - call sander_bomb('qm_ewald_setup','INVALID NUMBER OF K VECTORS','Need totkq > 0') end if return --- 91,164 ---- end do !Now we need to allocate enough memory for the kvectors and the kvector exponential tables. ! if (totkq == 0) then ! call sander_bomb('qm_ewald_setup','INVALID NUMBER OF K VECTORS','Need totkq > 0') ! end if ! ! mpi_division = (totkq + (qmmm_mpi%numthreads-1))/qmmm_mpi%numthreads ! qmmm_mpi%kvec_end = min(mpi_division*(qmmm_mpi%mytaskid+1),totkq) ! qmmm_mpi%kvec_start = min(mpi_division*qmmm_mpi%mytaskid+1,totkq+1) ! qmmm_mpi%totkq_count = qmmm_mpi%kvec_end-qmmm_mpi%kvec_start+1 ! #ifdef MPI ! if (qmmm_mpi%master) then ! write (6,'(/a)') '|QMMM: KVector division among threads:' ! write (6,'(a)') '|QMMM: Start End Count' ! ! !The FOLLOWING CODE SHOULD WORK FINE BUT THE INTEL COMPILER SEEMS TO MISCOMPILE ! !THE i=1,iminus loop DUE TO A COMPILER BUG. SO I HAVE WRITTEN A WORK AROUND BELOW. ! ! !Already know my own. ! ! write(6,'(a,i8,a,i8,a,i8,a)') & ! ! '|QMMM: Thread( 0): ',qmmm_mpi%kvec_start,'->',qmmm_mpi%kvec_end, & ! ! ' (',qmmm_mpi%kvec_end-qmmm_mpi%kvec_start+1,')' ! ! iminus = qmmm_mpi%numthreads-1 ! ! do i = 1, iminus ! ! call mpi_recv(istartend,2,mpi_integer,i,i,qmmm_mpi%commqmmm,istatus,ier) ! ! write(6,'(a,i4,a,i8,a,i8,a,i8,a)') & ! ! '|QMMM: Thread(',i,'): ',istartend(1),'->',istartend(2), & ! ! ' (',istartend(2)-istartend(1)+1,')' ! ! end do ! ! else ! ! !Send a message to the master with our counts in. ! ! istartend(1) = qmmm_mpi%kvec_start ! ! istartend(2) = qmmm_mpi%kvec_end ! ! call mpi_ssend(istartend,2,mpi_integer,0,qmmm_mpi%mytaskid,qmmm_mpi%commqmmm,ier) ! ! end if ! !WORKAROUND FOR BUGGY INTEL COMPILER ! allocate( gather_array(2*qmmm_mpi%numthreads), stat=ier) REQUIRE(ier == 0) ! end if ! istartend(1) = qmmm_mpi%kvec_start ! istartend(2) = qmmm_mpi%kvec_end ! ! call mpi_gather(istartend, 2, MPI_INTEGER, gather_array, 2, MPI_INTEGER, 0, qmmm_mpi%commqmmm, ier) ! ! if (qmmm_mpi%master) then ! do i = 1, qmmm_mpi%numthreads ! write(6,'(a,i4,a,i8,a,i8,a,i8,a)') & ! '|QMMM: Thread(',i-1,'): ',gather_array(2*i-1),'->',gather_array(2*i), & ! ' (',gather_array(2*i)-gather_array(2*i-1)+1,')' ! end do ! deallocate( gather_array, stat=ier) REQUIRE(ier == 0) ! end if ! ! #endif ! !We only allocate these as the number of kvectors this cpu does since totkq_count is ! !qmmm_mpi%kvec_end-qmmm_mpi%kvec_start+1 ! allocate ( qmewald%kvec(qmmm_mpi%totkq_count),stat=ier ) ! REQUIRE(ier == 0) ! allocate ( qmewald%dkvec(3,qmmm_mpi%totkq_count),stat=ier ) ! REQUIRE(ier == 0) ! allocate ( qmewald%dmkv(3,qmmm_mpi%totkq_count),stat=ier ) ! REQUIRE(ier == 0) ! if (.not. qmmm_nml%qm_pme) then ! allocate ( qmewald%ktable(6,natom,qmmm_mpi%totkq_count), stat=ier ) REQUIRE(ier == 0) end if + allocate ( qmewald%qmktable(6,nquant+nlink,qmmm_mpi%totkq_count), stat=ier ) + REQUIRE(ier == 0) return