Working on GPI2

This commit is contained in:
Anthony Scemama 2017-07-22 13:33:28 +02:00
parent b4b1d23f64
commit f3ceac1fd9
8 changed files with 13 additions and 119 deletions

View File

@ -0,0 +1 @@
Full_CI_ZMQ GPI2

View File

@ -1,8 +1,9 @@
===
MPI
===
================
Full_CI_ZMQ_GPI2
================
Providers for MPI programs.
GPI2 Slave for Full_CI with ZMQ. There should be one instance of the slave
per compute node.
Needed Modules
==============

View File

@ -14,7 +14,7 @@ end
subroutine provide_everything
PROVIDE H_apply_buffer_allocated mo_bielec_integrals_in_map psi_det_generators psi_coef_generators psi_det_sorted_bit psi_selectors n_det_generators n_states generators_bitmask zmq_context
PROVIDE pt2_e0_denominator mo_tot_num N_int fragment_count MPI_Initialized
PROVIDE pt2_e0_denominator mo_tot_num N_int fragment_count GASPI_is_Initialized
end
subroutine run_wf
@ -51,10 +51,10 @@ subroutine run_wf
! ---------
print *, 'Selection'
if (is_mpi_master) then
if (is_gaspi_master) then
call zmq_get_psi(zmq_to_qp_run_socket,1,energy,N_states)
endif
call mpi_bcast_psi()
call broadcast_wf(energy)
!$OMP PARALLEL PRIVATE(i)
i = omp_get_thread_num()
@ -68,10 +68,10 @@ subroutine run_wf
! --------
print *, 'Davidson'
if (is_mpi_master) then
if (is_gaspi_master) then
call zmq_get_psi(zmq_to_qp_run_socket,1,energy,N_states)
endif
call mpi_bcast_psi()
call broadcast_wf(energy)
call omp_set_nested(.True.)
call davidson_slave_tcp(0)
call omp_set_nested(.False.)
@ -83,10 +83,10 @@ subroutine run_wf
! ---
print *, 'PT2'
if (is_mpi_master) then
if (is_gaspi_master) then
call zmq_get_psi(zmq_to_qp_run_socket,1,energy,N_states)
endif
call mpi_bcast_psi()
call broadcast_wf(energy)
logical :: lstop
lstop = .False.

View File

@ -1 +0,0 @@
Full_CI_ZMQ MPI

View File

@ -1,14 +0,0 @@
===============
Full_CI_ZMQ_MPI
===============
MPI Slave for Full_CI with ZMQ
Needed Modules
==============
.. Do not edit this section It was auto-generated
.. by the `update_README.py` script.
Documentation
=============
.. Do not edit this section It was auto-generated
.. by the `update_README.py` script.

View File

@ -1 +0,0 @@
Bitmask

View File

@ -1,22 +0,0 @@
subroutine mpi_bcast_psi()
use f77_zmq
implicit none
BEGIN_DOC
! Broadcast the wave function coming from the qp_run scheduler
END_DOC
integer :: ierr
IRP_IF MPI
include 'mpif.h'
call MPI_BCast(N_states, 1, MPI_INTEGER, 0, MPI_COMM_WORLD, ierr)
call MPI_BCast(N_det, 1, MPI_INTEGER, 0, MPI_COMM_WORLD, ierr)
call MPI_BCast(psi_det_size, 1, MPI_INTEGER, 0, MPI_COMM_WORLD, ierr)
TOUCH psi_det_size N_det N_states
call MPI_BCast(psi_det, N_det, MPI_INTEGER8, 0, MPI_COMM_WORLD, ierr)
call MPI_BCast(psi_coef, psi_det_size, MPI_DOUBLE_PRECISION* N_states, 0, MPI_COMM_WORLD, ierr)
IRP_ENDIF
end

View File

@ -1,70 +0,0 @@
BEGIN_PROVIDER [ logical, MPI_Initialized ]
&BEGIN_PROVIDER [ logical, has_mpi ]
implicit none
BEGIN_DOC
! This is true when MPI_Init has been called
END_DOC
IRP_IF MPI
integer :: ierr
call MPI_Init(ierr)
if (ierr /= 0) then
print *, ierr
print *, 'MPI failed to initialize'
stop -1
endif
IRP_ENDIF
MPI_Initialized = .True.
END_PROVIDER
BEGIN_PROVIDER [ integer, MPI_rank ]
&BEGIN_PROVIDER [ integer, MPI_size ]
&BEGIN_PROVIDER [ logical, is_MPI_master ]
implicit none
BEGIN_DOC
! Usual MPI variables
END_DOC
PROVIDE MPI_Initialized
IRP_IF MPI
include 'mpif.h'
integer :: ierr
call mpi_comm_size(MPI_COMM_WORLD, MPI_size, ierr)
if (ierr /= 0) then
print *, ierr
print *, 'Unable to get MPI_size'
stop -1
endif
call mpi_comm_rank(MPI_COMM_WORLD, MPI_rank, ierr)
if (ierr /= 0) then
print *, ierr
print *, 'Unable to get MPI_rank'
stop -1
endif
is_MPI_master = (MPI_rank == 0)
IRP_ELSE
MPI_rank = 0
MPI_size = 1
is_MPI_master = .True.
IRP_ENDIF
END_PROVIDER
subroutine qp_mpi_finalize()
implicit none
PROVIDE MPI_Initialized
IRP_IF MPI
include 'mpif.h'
integer :: ierr
call MPI_Finalize(ierr)
if (ierr /= 0) then
print *, ierr
print *, 'Unable to finalize MPI'
stop -1
endif
IRP_ENDIF
end subroutine