10
0
mirror of https://github.com/LCPQ/quantum_package synced 2024-11-19 04:22:36 +01:00

First MPI tests seem OK

This commit is contained in:
Anthony Scemama 2017-11-22 17:07:16 +01:00
parent 6f68bc69e9
commit 334f165ff1
10 changed files with 171 additions and 14 deletions

62
config/gfortran_mpi.cfg Normal file
View File

@ -0,0 +1,62 @@
# Common flags
##############
#
# -ffree-line-length-none : Needed for IRPF90 which produces long lines
# -lblas -llapack : Link with libblas and liblapack libraries provided by the system
# -I . : Include the curent directory (Mandatory)
#
# --ninja : Allow the utilisation of ninja. (Mandatory)
# --align=32 : Align all provided arrays on a 32-byte boundary
#
#
[COMMON]
FC : mpif90 -ffree-line-length-none -I .
LAPACK_LIB : -lblas -llapack
IRPF90 : irpf90
IRPF90_FLAGS : --ninja --align=32 -DMPI
# Global options
################
#
# 1 : Activate
# 0 : Deactivate
#
[OPTION]
MODE : OPT ; [ OPT | PROFILE | DEBUG ] : Chooses the section below
CACHE : 1 ; Enable cache_compile.py
OPENMP : 1 ; Append OpenMP flags
# Optimization flags
####################
#
# -Ofast : Disregard strict standards compliance. Enables all -O3 optimizations.
# It also enables optimizations that are not valid
# for all standard-compliant programs. It turns on
# -ffast-math and the Fortran-specific
# -fno-protect-parens and -fstack-arrays.
[OPT]
FCFLAGS : -Ofast -msse4.2
# Profiling flags
#################
#
[PROFILE]
FC : -p -g
FCFLAGS : -Ofast -msse4.2
# Debugging flags
#################
#
# -fcheck=all : Checks uninitialized variables, array subscripts, etc...
# -g : Extra debugging information
#
[DEBUG]
FCFLAGS : -fcheck=all -g
# OpenMP flags
#################
#
[OPENMP]
FC : -fopenmp
IRPF90_FLAGS : --openmp

View File

@ -16,7 +16,7 @@ IRPF90_FLAGS : --ninja --align=32 -DMPI
#
# 1 : Activate
# 0 : Deactivate
#
#
[OPTION]
MODE : OPT ; [ OPT | PROFILE | DEBUG ] : Chooses the section below
CACHE : 1 ; Enable cache_compile.py

11
configure vendored
View File

@ -66,7 +66,6 @@ d_dependency = {
"python": [],
"ninja": ["g++", "python"],
"make": [],
"gpi2": ["g++", "make"],
"p_graphviz": ["python"],
"bats": []
}
@ -142,11 +141,6 @@ f77zmq = Info(
description=' F77-ZeroMQ',
default_path=join(QP_ROOT_LIB, "libf77zmq.a") )
gpi2 = Info(
url='https://github.com/cc-hpc-itwm/GPI-2/archive/v1.3.0.tar.gz',
description=' GPI-2',
default_path=join(QP_ROOT_LIB64, "libGPI2.a") )
p_graphviz = Info(
url='https://github.com/xflr6/graphviz/archive/master.tar.gz',
description=' Python library for graphviz',
@ -161,7 +155,7 @@ d_info = dict()
for m in ["ocaml", "m4", "curl", "zlib", "patch", "irpf90", "docopt",
"resultsFile", "ninja", "emsl", "ezfio", "p_graphviz",
"zeromq", "f77zmq", "bats", "gpi2"]:
"zeromq", "f77zmq", "bats"]:
exec ("d_info['{0}']={0}".format(m))
@ -497,6 +491,9 @@ def create_ninja_and_rc(l_installed):
'# Choose the correct network interface',
'# export QP_NIC=ib0',
'# export QP_NIC=eth0',
'',
'# Choose how to start MPI processes',
'# export QP_MPIRUN="mpirun"',
''
]

View File

@ -4,6 +4,7 @@ open Qputils
(* Environment variables :
QP_PREFIX=gdb : to run gdb (or valgrind, or whatever)
QP_MPIRUN=mpirun: to run mpi slaves
QP_TASK_DEBUG=1 : debug task server
*)
@ -15,7 +16,7 @@ let print_list () =
let () =
Random.self_init ()
let run slave exe ezfio_file =
let run slave mpi exe ezfio_file =
(** Check availability of the ports *)
@ -115,13 +116,18 @@ let run slave exe ezfio_file =
match Sys.getenv "QP_PREFIX" with
| Some x -> x^" "
| None -> ""
and mpirun =
match (mpi, Sys.getenv "QP_MPIRUN") with
| (true, None) -> "mpirun "
| (true, Some x) -> x^" "
| _ -> ""
and exe =
match (List.find ~f:(fun (x,_) -> x = exe) executables) with
| Some (_,x) -> x^" "
| None -> assert false
in
let exit_code =
match (Sys.command (prefix^exe^ezfio_file)) with
match (Sys.command (mpirun^prefix^exe^ezfio_file)) with
| 0 -> 0
| i -> (Printf.printf "Program exited with code %d.\n%!" i; i)
in
@ -141,7 +147,9 @@ let spec =
let open Command.Spec in
empty
+> flag "slave" no_arg
~doc:(" Needed for slave tasks")
~doc:(" Required for slave tasks")
+> flag "mpi" no_arg
~doc:(" Required for MPI slaves")
+> anon ("executable" %: string)
+> anon ("ezfio_file" %: string)
;;
@ -159,8 +167,8 @@ Executes a Quantum Package binary file among these:\n\n"
)
)
spec
(fun slave exe ezfio_file () ->
run slave exe ezfio_file
(fun slave mpi exe ezfio_file () ->
run slave mpi exe ezfio_file
)
|> Command.run ~version: Git.sha1 ~build_info: Git.message

5
plugins/MPI/.gitignore vendored Normal file
View File

@ -0,0 +1,5 @@
IRPF90_temp/
IRPF90_man/
irpf90.make
irpf90_entities
tags

View File

@ -0,0 +1 @@
Utils

12
plugins/MPI/README.rst Normal file
View File

@ -0,0 +1,12 @@
===
MPI
===
Needed Modules
==============
.. Do not edit this section It was auto-generated
.. by the `update_README.py` script.
Documentation
=============
.. Do not edit this section It was auto-generated
.. by the `update_README.py` script.

60
plugins/MPI/mpi.irp.f Normal file
View File

@ -0,0 +1,60 @@
BEGIN_PROVIDER [ logical, mpi_initialized ]
implicit none
BEGIN_DOC
! Always true. Initialized MPI
END_DOC
IRP_IF MPI
include 'mpif.h'
integer :: ierr
call mpi_init(ierr)
if (ierr /= 0) then
print *, 'ierr = ', ierr
stop 'Unable to initialize MPI'
endif
IRP_ENDIF
mpi_initialized = .True.
END_PROVIDER
BEGIN_PROVIDER [ integer, mpi_rank ]
&BEGIN_PROVIDER [ integer, mpi_size ]
implicit none
BEGIN_DOC
! Rank of MPI process and number of MPI processes
END_DOC
IRP_IF MPI
include 'mpif.h'
PROVIDE mpi_initialized
integer :: ierr
call MPI_COMM_RANK (MPI_COMM_WORLD, mpi_rank, ierr)
if (ierr /= 0) then
print *, 'ierr = ', ierr
stop 'Unable to get MPI rank'
endif
call MPI_COMM_SIZE (MPI_COMM_WORLD, mpi_size, ierr)
if (ierr /= 0) then
print *, 'ierr = ', ierr
stop 'Unable to get MPI size'
endif
IRP_ELSE
mpi_rank = 0
mpi_size = 1
IRP_ENDIF
ASSERT (mpi_rank >= 0)
ASSERT (mpi_rank < mpi_size)
END_PROVIDER
BEGIN_PROVIDER [ logical, mpi_master ]
implicit none
BEGIN_DOC
! If true, rank is zero
END_DOC
mpi_master = (mpi_rank == 0)
END_PROVIDER

View File

@ -0,0 +1,12 @@
program MPI
implicit none
BEGIN_DOC
! MPI test program
END_DOC
print *, 'hello world'
print *, 'rank, size, master = ', mpi_rank, mpi_size, mpi_master
integer :: ierr
call MPI_FINALIZE(ierr)
print *, ierr
end

View File

@ -280,7 +280,7 @@ subroutine get_inverse(A,LDA,m,C,LDC)
integer :: info,lwork
integer, allocatable :: ipiv(:)
double precision,allocatable :: work(:)
allocate (ipiv(ao_num), work(ao_num*ao_num))
allocate (ipiv(m), work(m*m))
lwork = size(work)
C(1:m,1:m) = A(1:m,1:m)
call dgetrf(m,m,C,size(C,1),ipiv,info)