TABLE OF CONTENTS
- ABINIT/m_xmpi
- m_wffile/xmpio_read_frm
- m_wffile/xmpio_write_frm
- m_xmpi/largetype_lang_log
- m_xmpi/largetype_lor_log
- m_xmpi/largetype_sum_cplx
- m_xmpi/largetype_sum_dble
- m_xmpi/largetype_sum_dcplx
- m_xmpi/largetype_sum_int
- m_xmpi/largetype_sum_real
- m_xmpi/pool2d_free
- m_xmpi/pool2d_from_dims
- m_xmpi/sys_exit
- m_xmpi/xcomm_t
- m_xmpi/xmpi_abort
- m_xmpi/xmpi_barrier
- m_xmpi/xmpi_comm_create
- m_xmpi/xmpi_comm_free_0D
- m_xmpi/xmpi_comm_free_1D
- m_xmpi/xmpi_comm_free_2D
- m_xmpi/xmpi_comm_free_3D
- m_xmpi/xmpi_comm_group
- m_xmpi/xmpi_comm_multiple
- m_xmpi/xmpi_comm_rank
- m_xmpi/xmpi_comm_set_errhandler
- m_xmpi/xmpi_comm_size
- m_xmpi/xmpi_comm_split
- m_xmpi/xmpi_comm_translate_rank
- m_xmpi/xmpi_comm_translate_ranks
- m_xmpi/xmpi_distab_4D
- m_xmpi/xmpi_distrib_2d
- m_xmpi/xmpi_distrib_with_replicas
- m_xmpi/xmpi_end
- m_xmpi/xmpi_error_string
- m_xmpi/xmpi_get_unit
- m_xmpi/xmpi_group_free
- m_xmpi/xmpi_group_incl
- m_xmpi/xmpi_group_translate_ranks
- m_xmpi/xmpi_init
- m_xmpi/xmpi_iprobe
- m_xmpi/xmpi_largetype_create
- m_xmpi/xmpi_largetype_free
- m_xmpi/xmpi_name
- m_xmpi/xmpi_pool2d_t
- m_xmpi/xmpi_request_free
- m_xmpi/xmpi_requests_add
- m_xmpi/xmpi_set_inplace_operations
- m_xmpi/xmpi_show_info
- m_xmpi/xmpi_split_block
- m_xmpi/xmpi_split_cyclic
- m_xmpi/xmpi_split_list
- m_xmpi/xmpi_split_work2_i4b
- m_xmpi/xmpi_split_work2_i8b
- m_xmpi/xmpi_split_work_i4b
- m_xmpi/xmpi_subcomm
- m_xmpi/xmpi_wait
- m_xmpi/xmpi_waitall_1d
- m_xmpi/xmpi_waitall_2d
- m_xmpi/xmpio_check_frmarkers
- m_xmpi/xmpio_create_coldistr_from_fp3blocks
- m_xmpi/xmpio_create_coldistr_from_fpacked
- m_xmpi/xmpio_create_fherm_packed
- m_xmpi/xmpio_create_fstripes
- m_xmpi/xmpio_create_fsubarray_2D
- m_xmpi/xmpio_create_fsubarray_3D
- m_xmpi/xmpio_create_fsubarray_4D
- m_xmpi/xmpio_get_info_frm
- m_xmpi/xmpio_max_address
- m_xmpi/xmpio_read_dp
- m_xmpi/xmpio_read_int
- m_xmpi/xmpio_type_struct
- m_xmpi/xmpio_write_frmarkers
ABINIT/m_xmpi [ Modules ]
NAME
m_xmpi
FUNCTION
This module provides MPI named constants, tools for inquiring the MPI environment and a set of generic interfaces wrapping the most commonly used MPI primitives.
COPYRIGHT
Copyright (C) 2009-2024 ABINIT group (MG, MB, XG, YP, MT) This file is distributed under the terms of the GNU General Public License, see ~abinit/COPYING or http://www.gnu.org/copyleft/gpl.txt .
TODO
Get rid of xmpi_paral. Sequential code is the **exception**. Developers should code parallel code or code that is compatible both with MPI and seq (thanks to the wrappers provided by this module)
SOURCE
21 #if defined HAVE_CONFIG_H 22 #include "config.h" 23 #endif 24 25 #include "abi_common.h" 26 27 module m_xmpi 28 29 use defs_basis 30 use m_profiling_abi 31 use, intrinsic :: iso_c_binding 32 #ifdef HAVE_FC_ISO_FORTRAN_2008 33 use ISO_FORTRAN_ENV, only : int16, int32, int64 34 #endif 35 #ifdef HAVE_MPI2 36 use mpi 37 #endif 38 #ifdef FC_NAG 39 use f90_unix_proc 40 #endif 41 use m_clib, only : clib_ulimit_stack !, clib_getpid !, clib_usleep 42 43 implicit none 44 45 private
m_wffile/xmpio_read_frm [ Functions ]
[ Top ] [ m_wffile ] [ Functions ]
NAME
xmpio_read_frm
FUNCTION
Read the content of a single record marker in a FORTRAN file at a given offset using MPI-IO. the file pointer is modified according to the value of advance.
INPUTS
fh=MPI-IO file handler. sc_mode= xmpio_single ==> for reading by current proc. xmpio_collective ==> for collective reading. offset=MPI/IO file pointer [advance]=By default the routine will move the file pointer to the next record. advance=.FALSE. can be used so that the next read will continue picking information off of the currect record.
OUTPUT
fmarker=Content of the Fortran record marker. mpierr= MPI error code
SIDE EFFECTS
offset= input: file pointer used to access the Fortran marker. output: new offset updated after the reading, depending on advance.
SOURCE
3288 #ifdef HAVE_MPI_IO 3289 3290 subroutine xmpio_read_frm(fh, offset, sc_mode, fmarker, mpierr, advance) 3291 3292 !Arguments ------------------------------------ 3293 !scalars 3294 integer,intent(in) :: fh,sc_mode 3295 integer(XMPI_OFFSET_KIND),intent(inout) :: offset 3296 integer(XMPI_OFFSET_KIND),intent(out) :: fmarker 3297 integer,intent(out) :: mpierr 3298 logical,optional,intent(in) :: advance 3299 3300 !Local variables------------------------------- 3301 !scalars 3302 integer :: bsize_frm,mpi_type_frm,myfh 3303 integer(kind=int16) :: delim_record2(1) 3304 integer(kind=int32) :: delim_record4(1) 3305 integer(kind=int64) :: delim_record8(1) 3306 #if defined HAVE_FC_INT_QUAD 3307 integer*16 :: delim_record16(1) 3308 #endif 3309 character(len=500) :: msg 3310 !arrays 3311 integer :: statux(MPI_STATUS_SIZE) 3312 3313 !************************************************************************ 3314 3315 !Workaround for XLF. 3316 myfh = fh 3317 3318 bsize_frm = xmpio_bsize_frm ! Byte size of the Fortran record marker. 3319 mpi_type_frm = xmpio_mpi_type_frm ! MPI type of the record marker. 3320 3321 SELECT CASE (sc_mode) 3322 3323 CASE (xmpio_single) 3324 3325 if (bsize_frm==4) then 3326 call MPI_FILE_READ_AT(myfh,offset,delim_record4,1,mpi_type_frm,statux,mpierr) 3327 fmarker = delim_record4(1) 3328 else if (bsize_frm==8) then 3329 call MPI_FILE_READ_AT(myfh,offset,delim_record8,1,mpi_type_frm,statux,mpierr) 3330 fmarker = delim_record8(1) 3331 #if defined HAVE_FC_INT_QUAD 3332 else if (bsize_frm==16) then 3333 call MPI_FILE_READ_AT(myfh,offset,delim_record16,1,mpi_type_frm,statux,mpierr) 3334 fmarker = delim_record16(1) 3335 #endif 3336 else if (bsize_frm==2) then 3337 call MPI_FILE_READ_AT(myfh,offset,delim_record2,1,mpi_type_frm,statux,mpierr) 3338 fmarker = delim_record2(1) 3339 else 3340 call xmpi_abort(msg='Wrong record marker length!') 3341 end if 3342 3343 CASE (xmpio_collective) 3344 3345 if (bsize_frm==4) then 3346 call MPI_FILE_READ_AT_ALL(myfh,offset,delim_record4,1,mpi_type_frm,statux,mpierr) 3347 fmarker = delim_record4(1) 3348 else if (bsize_frm==8) then 3349 call MPI_FILE_READ_AT_ALL(myfh,offset,delim_record8,1,mpi_type_frm,statux,mpierr) 3350 fmarker = delim_record8(1) 3351 #if defined HAVE_FC_INT_QUAD 3352 else if (bsize_frm==16) then 3353 call MPI_FILE_READ_AT_ALL(myfh,offset,delim_record16,1,mpi_type_frm,statux,mpierr) 3354 fmarker = delim_record16(1) 3355 #endif 3356 else if (bsize_frm==2) then 3357 call MPI_FILE_READ_AT_ALL(myfh,offset,delim_record2,1,mpi_type_frm,statux,mpierr) 3358 fmarker = delim_record2(1) 3359 else 3360 call xmpi_abort(msg='Wrong record marker length!') 3361 end if 3362 3363 CASE DEFAULT 3364 write(msg,"(a,i0)")" Wrong value for sc_mode: ",sc_mode 3365 call xmpi_abort(msg=msg) 3366 END SELECT 3367 3368 if (PRESENT(advance)) then 3369 if (advance) then 3370 offset = offset + fmarker + 2*bsize_frm ! Move the file pointer to the next record. 3371 else 3372 offset = offset + bsize_frm ! Move the pointer after the marker. 3373 end if 3374 else 3375 offset = offset + fmarker + 2*bsize_frm 3376 end if 3377 3378 end subroutine xmpio_read_frm
m_wffile/xmpio_write_frm [ Functions ]
[ Top ] [ m_wffile ] [ Functions ]
NAME
xmpio_write_frm
FUNCTION
Write a single record marker in a FORTRAN file at a given offset using MPI-IO. The file pointer is modified according to the value of advance.
INPUTS
fh=MPI-IO file handler. sc_mode= xmpio_single ==> for reading by current proc. xmpio_collective ==> for collective reading. fmarker=The content of the Fortran marker i.e. the size of the record in bytes. [advance]=By default the routine will move the file pointer to the next record. advance=.FALSE. can be used so that the next write will continue writing data on the currect record.
OUTPUT
mpierr= error code
SIDE EFFECTS
offset= input: offset of the Fortran marker. output: new offset updated after the writing, depending on advance.
SOURCE
3413 #ifdef HAVE_MPI_IO 3414 3415 subroutine xmpio_write_frm(fh, offset, sc_mode, fmarker, mpierr, advance) 3416 3417 !Arguments ------------------------------------ 3418 !scalars 3419 integer,intent(in) :: fh,sc_mode 3420 integer(XMPI_OFFSET_KIND),intent(in) :: fmarker 3421 integer(XMPI_OFFSET_KIND),intent(inout) :: offset 3422 integer,intent(out) :: mpierr 3423 logical,optional,intent(in) :: advance 3424 3425 !Local variables------------------------------- 3426 !scalars 3427 integer :: myfh,bsize_frm,mpi_type_frm 3428 integer(XMPI_OFFSET_KIND) :: last 3429 integer(kind=int16) :: delim_record2 3430 integer(kind=int32) :: delim_record4 3431 integer(kind=int64) :: delim_record8 3432 #if defined HAVE_FC_INT_QUAD 3433 integer*16 :: delim_record16 3434 #endif 3435 character(len=500) :: msg 3436 !arrays 3437 integer :: statux(MPI_STATUS_SIZE) 3438 3439 !************************************************************************ 3440 3441 ! Workaround for XLF 3442 myfh = fh 3443 3444 bsize_frm = xmpio_bsize_frm ! Byte size of the Fortran record marker. 3445 mpi_type_frm = xmpio_mpi_type_frm ! MPI type of the record marker. 3446 last = offset + bsize_frm + fmarker ! position of the end marker 3447 3448 SELECT CASE (sc_mode) 3449 3450 CASE (xmpio_single) 3451 if (bsize_frm==4) then 3452 delim_record4 = fmarker 3453 call MPI_FILE_WRITE_AT(myfh,offset,[delim_record4],1,mpi_type_frm,statux,mpierr) 3454 call MPI_FILE_WRITE_AT(myfh,last,[delim_record4],1,mpi_type_frm,statux,mpierr) 3455 3456 else if (bsize_frm==8) then 3457 delim_record8 = fmarker 3458 call MPI_FILE_WRITE_AT(myfh,offset,[delim_record8],1,mpi_type_frm,statux,mpierr) 3459 call MPI_FILE_WRITE_AT(myfh,last,[delim_record8],1,mpi_type_frm,statux,mpierr) 3460 #if defined HAVE_FC_INT_QUAD 3461 else if (bsize_frm==16) then 3462 delim_record16 = fmarker 3463 call MPI_FILE_WRITE_AT(myfh,offset,[delim_record16],1,mpi_type_frm,statux,mpierr) 3464 call MPI_FILE_WRITE_AT(myfh,last,[delim_record16],1,mpi_type_frm,statux,mpierr) 3465 #endif 3466 else if (bsize_frm==2) then 3467 delim_record2 = fmarker 3468 call MPI_FILE_WRITE_AT(myfh,offset,[delim_record2], 1,mpi_type_frm,statux,mpierr) 3469 call MPI_FILE_WRITE_AT(myfh,last,[delim_record2],1,mpi_type_frm,statux,mpierr) 3470 else 3471 call xmpi_abort(msg='Wrong record marker length!') 3472 end if 3473 3474 CASE (xmpio_collective) 3475 if (bsize_frm==4) then 3476 delim_record4 = fmarker 3477 call MPI_FILE_WRITE_AT_ALL(myfh,offset,[delim_record4],1,mpi_type_frm,statux,mpierr) 3478 call MPI_FILE_WRITE_AT_ALL(myfh,last,[delim_record4],1,mpi_type_frm,statux,mpierr) 3479 else if (bsize_frm==8) then 3480 delim_record8 = fmarker 3481 call MPI_FILE_WRITE_AT_ALL(myfh,offset,[delim_record8],1,mpi_type_frm,statux,mpierr) 3482 call MPI_FILE_WRITE_AT_ALL(myfh,last,[delim_record8],1,mpi_type_frm,statux,mpierr) 3483 #if defined HAVE_FC_INT_QUAD 3484 else if (bsize_frm==16) then 3485 delim_record16 = fmarker 3486 call MPI_FILE_WRITE_AT_ALL(myfh,offset,[delim_record16],1,mpi_type_frm,statux,mpierr) 3487 call MPI_FILE_WRITE_AT_ALL(myfh,last,[delim_record16],1,mpi_type_frm,statux,mpierr) 3488 #endif 3489 else if (bsize_frm==2) then 3490 delim_record2 = fmarker 3491 call MPI_FILE_WRITE_AT_ALL(myfh,offset,[delim_record2],1,mpi_type_frm,statux,mpierr) 3492 call MPI_FILE_WRITE_AT_ALL(myfh,last,[delim_record2],1,mpi_type_frm,statux,mpierr) 3493 else 3494 call xmpi_abort(msg='Wrong record marker length!') 3495 end if 3496 3497 CASE DEFAULT 3498 write(msg,"(a,i0)")" Wrong value for sc_mode: ",sc_mode 3499 call xmpi_abort(msg=msg) 3500 END SELECT 3501 3502 if (PRESENT(advance)) then 3503 if (advance) then 3504 offset = offset + fmarker + 2*bsize_frm ! Move the file pointer to the next record. 3505 else 3506 offset = offset + bsize_frm ! Move the pointer after the marker. 3507 end if 3508 else 3509 offset = offset + fmarker + 2*bsize_frm 3510 end if 3511 3512 end subroutine xmpio_write_frm
m_xmpi/largetype_lang_log [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
largetype_lang_log
FUNCTION
Routine used to overload MPI_LANG for logicals
m_xmpi/largetype_lor_log [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
largetype_lor_log
FUNCTION
Routine used to overload MPI_LOR for logicals
m_xmpi/largetype_sum_cplx [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
largetype_sum_cplx
FUNCTION
Routine used to overload MPI_SUM for complex
m_xmpi/largetype_sum_dble [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
largetype_sum_dble
FUNCTION
Routine used to overload MPI_SUM for double precision reals
m_xmpi/largetype_sum_dcplx [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
largetype_sum_dcplx
FUNCTION
Routine used to overload MPI_SUM for double commplex
m_xmpi/largetype_sum_int [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
largetype_sum_int
FUNCTION
Routine used to overload MPI_SUM for integers
m_xmpi/largetype_sum_real [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
largetype_sum_real
FUNCTION
Routine used to overload MPI_SUM for reals
m_xmpi/pool2d_free [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
pool2d_free
FUNCTION
Free memory
m_xmpi/pool2d_from_dims [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
pool2d_from_dims
FUNCTION
Build pool of MPI procs to distribute (n1 x n2) tasks.
INPUTS
n1, n2: dimensions of the problem input_comm: Initial MPI communicator [rectangular]: If True, change the number of procs in each pool so that it's possible to create a rectangular grid. Useful for Scalapack algorithms in which 1d grid are not efficient. Default: False.
SOURCE
5255 subroutine pool2d_from_dims(pool, n1, n2, input_comm, rectangular) 5256 5257 !Arguments------------------------- 5258 class(xmpi_pool2d_t),intent(out) :: pool 5259 integer,intent(in) :: n1, n2, input_comm 5260 logical,optional,intent(in) :: rectangular 5261 5262 !Local variables------------------- 5263 integer :: itask, ntasks, my_rank, nprocs, color, mpierr, jj, i1, i2, my_ntasks, new_comm 5264 integer :: grid_dims(2) ! , check(n1, n2) 5265 integer,allocatable :: my_inds(:) 5266 !---------------------------------------------------------------------- 5267 5268 my_rank = xmpi_comm_rank(input_comm); nprocs = xmpi_comm_size(input_comm) 5269 5270 pool%n1 = n1; pool%n2 = n2 5271 ABI_MALLOC(pool%treats, (n1, n2)) 5272 pool%treats = .False. 5273 5274 ntasks = n1 * n2; color = ntasks + 1 5275 5276 if (nprocs <= ntasks) then 5277 color = my_rank 5278 call xmpi_split_block(ntasks, input_comm, my_ntasks, my_inds) 5279 do jj=1,size(my_inds) 5280 itask = my_inds(jj) ! = i1 + (i2 - 1) * n1 5281 i1 = mod(itask - 1, n1) + 1 5282 i2 = 1 + (itask - i1) / n1 5283 pool%treats(i1, i2) = .True. 5284 end do 5285 ABI_FREE(my_inds) 5286 else 5287 i2_loop: do i2=1,n2 5288 do i1=1,n1 5289 itask = i1 + (i2 - 1) * n1 5290 if (xmpi_distrib_with_replicas(itask, ntasks, my_rank, nprocs)) then 5291 pool%treats(i1, i2) = .True.; color = itask; exit i2_loop 5292 end if 5293 end do 5294 end do i2_loop 5295 end if 5296 5297 !DEBUG 5298 ! where (pool%treats) 5299 ! check = 1 5300 ! else where 5301 ! check = 0 5302 ! end where 5303 ! call xmpi_sum(check, input_comm, mpierr) 5304 ! if (any(check == 0)) then 5305 ! write(std_out, *) check 5306 ! call xmpi_abort(msg="Wrong distribution in pool2d_from_dims") 5307 ! end if 5308 !END_DEBUG 5309 5310 call xmpi_comm_split(input_comm, color, my_rank, new_comm, mpierr) 5311 pool%comm = xcomm_from_mpi_int(new_comm) 5312 call xmpi_comm_free(new_comm) 5313 5314 if (present(rectangular)) then 5315 if (rectangular) then 5316 if (pool%comm%nproc == 1 .or. is_rectangular_grid(pool%comm%nproc, grid_dims)) return 5317 5318 do jj=pool%comm%nproc-1,1,-1 5319 if (is_rectangular_grid(jj, grid_dims)) then 5320 color = merge(1, 0, pool%comm%me < jj) 5321 call xmpi_comm_split(pool%comm%value, color, pool%comm%me, new_comm, mpierr) 5322 call pool%comm%free() 5323 pool%comm = xcomm_from_mpi_int(new_comm) 5324 call xmpi_comm_free(new_comm) 5325 if (color == 0) pool%treats = .False. 5326 exit 5327 end if 5328 end do 5329 end if 5330 end if 5331 5332 contains 5333 5334 logical function is_rectangular_grid(nproc, grid_dims) result (ans) 5335 integer,intent(in) :: nproc 5336 integer,intent(out) :: grid_dims(2) 5337 !---------------------------------------------------------------------- 5338 integer :: i 5339 ! Search for a rectangular grid of processors 5340 i = INT(SQRT(float(nproc))) 5341 do while (MOD(nproc,i) /= 0) 5342 i = i - 1 5343 end do 5344 i = max(i, 1) 5345 5346 grid_dims(1) = i 5347 grid_dims(2) = int(nproc / i) 5348 ans = grid_dims(1) > 1 .and. grid_dims(2) > 1 5349 5350 end function is_rectangular_grid 5351 5352 end subroutine pool2d_from_dims
m_xmpi/sys_exit [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
sys_exit
FUNCTION
Routine for clean exit of f90 code by one processor
INPUTS
exit_status: return code.
NOTES
By default, it uses "call exit(1)", that is not completely portable.
SOURCE
1039 subroutine sys_exit(exit_status) 1040 1041 !Arguments ------------------------------------ 1042 !scalars 1043 integer,intent(in) :: exit_status 1044 1045 ! ********************************************************************** 1046 1047 #if defined FC_NAG 1048 call exit(exit_status) 1049 #elif defined HAVE_FC_EXIT 1050 call exit(exit_status) 1051 #else 1052 ! stop with exit_status 1053 ! MT 06-2013:stop function only accept parameters ! 1054 if (exit_status== 0) stop "0" 1055 if (exit_status== 1) stop "1" 1056 if (exit_status==-1) stop "-1" 1057 #endif 1058 stop 1 1059 1060 end subroutine sys_exit
m_xmpi/xcomm_t [ Types ]
NAME
xcomm_t
FUNCTION
A small object storing the MPI communicator, the rank of the process and the size of the communicator. Provides helper functions to perform typical operations and parallelize loops. The datatype is initialized with xmpi_comm_self
SOURCE
166 type, public :: xcomm_t 167 integer :: value = xmpi_comm_self 168 integer :: nproc = 1 169 integer :: me = 0 170 integer,private :: can_use_shmem__ = -1 171 ! -1 --> unitialized, 0 if ranks do not belong to a shared memory region else 1 172 173 contains 174 procedure :: skip => xcomm_skip ! Skip iteration according to rank 175 procedure :: set_to_null => xcomm_set_to_null 176 procedure :: set_to_self => xcomm_set_to_self 177 procedure :: free => xcomm_free 178 procedure :: from_cart_sub => xcomm_from_cart_sub ! Build sub-communicators in a Cartesian grid. 179 procedure :: prep_gatherv => xcomm_prep_gatherv ! Prepare a typical gatherv operation. 180 procedure :: print_names => xcomm_print_names 181 procedure :: can_use_shmem => xcomm_can_use_shmem 182 procedure :: allocate_shared_master => xcomm_allocate_shared_master 183 end type xcomm_t 184 185 public :: xcomm_from_mpi_int
m_xmpi/xmpi_abort [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_abort
FUNCTION
Hides MPI_ABORT from MPI library.
INPUTS
[comm]=communicator of tasks to abort. [mpierr]=Error code to return to invoking environment. [msg]=User message [exit_status]=optional, shell return code, default 1
SOURCE
953 subroutine xmpi_abort(comm, mpierr, msg, exit_status) 954 955 !Arguments------------------------- 956 integer,optional,intent(in) :: comm,mpierr,exit_status 957 character(len=*),optional,intent(in) :: msg 958 959 !Local variables------------------- 960 integer :: ierr,my_comm,my_errorcode,ilen,ierr2 961 logical :: testopen 962 character(len=xmpi_msg_len) :: mpi_msg_error 963 964 ! ************************************************************************* 965 966 ierr=0 967 my_comm = xmpi_world; if (PRESENT(comm)) my_comm = comm 968 969 if (PRESENT(msg)) then 970 write(std_out,'(2a)')"User message: ",TRIM(msg) 971 end if 972 973 ! Close std_out and ab_out and flush units. 974 ! Note that flush does not guarantee that the data is committed to disk. 975 ! This is rather annoying because we may end up with incomplete log files 976 ! that cannot be parsed by Abinit. 977 ! For a possible approach based on fsync, see 978 ! https://gcc.gnu.org/onlinedocs/gcc-4.7.4/gfortran/FLUSH.html 979 980 inquire(std_out, opened=testopen) 981 if (testopen) then 982 #if defined HAVE_FC_FLUSH 983 call flush(std_out) 984 #endif 985 close(std_out) 986 end if 987 988 inquire(ab_out,opened=testopen) 989 if (testopen) then 990 #if defined HAVE_FC_FLUSH 991 call flush(ab_out) 992 #endif 993 close(ab_out) 994 end if 995 996 #ifdef HAVE_MPI 997 my_errorcode=MPI_ERR_UNKNOWN; if (PRESENT(mpierr)) my_errorcode=mpierr 998 999 call MPI_ERROR_STRING(my_errorcode, mpi_msg_error, ilen, ierr2) 1000 1001 !if (ilen>xmpi_msg_len) write(std_out,*)" WARNING: MPI message has been truncated!" 1002 !if (ierr2/=MPI_SUCCESS) then 1003 ! write(std_out,'(a,i0)')" WARNING: MPI_ERROR_STRING returned ierr2= ",ierr2 1004 !else 1005 ! write(std_out,'(2a)')" MPI_ERROR_STRING: ",TRIM(mpi_msg_error) 1006 !end if 1007 1008 !ierr = clib_usleep(300000_c_int32_t) 1009 call MPI_ABORT(my_comm, my_errorcode, ierr) 1010 #endif 1011 1012 if (present(exit_status)) then 1013 call sys_exit(exit_status) 1014 else 1015 call sys_exit(1) 1016 end if 1017 1018 end subroutine xmpi_abort
m_xmpi/xmpi_barrier [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_barrier
FUNCTION
Hides MPI_BARRIER from MPI library.
INPUTS
comm=MPI communicator
SOURCE
1851 subroutine xmpi_barrier(comm) 1852 1853 !Arguments------------------------- 1854 integer,intent(in) :: comm 1855 1856 !Local variables------------------- 1857 integer :: ier 1858 #ifdef HAVE_MPI 1859 integer :: nprocs 1860 #endif 1861 1862 ! ************************************************************************* 1863 1864 ier = 0 1865 #ifdef HAVE_MPI 1866 if (comm/=xmpi_comm_null) then 1867 call MPI_COMM_SIZE(comm,nprocs,ier) 1868 if(nprocs>1) call MPI_BARRIER(comm,ier) 1869 end if 1870 #endif 1871 1872 end subroutine xmpi_barrier
m_xmpi/xmpi_comm_create [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_comm_create
FUNCTION
Hides MPI_COMM_CREATE from MPI library.
INPUTS
comm=communicator group=group, which is a subset of the group of comm
OUTPUT
newcomm=new communicator
SOURCE
1499 subroutine xmpi_comm_create(comm,group,newcomm,mpierr) 1500 1501 !Arguments------------------------- 1502 !scalars 1503 integer,intent(in) :: comm,group 1504 integer,intent(out) :: mpierr 1505 integer,intent(inout) :: newcomm 1506 1507 ! ************************************************************************* 1508 1509 mpierr=0 1510 #ifdef HAVE_MPI 1511 if (group/=xmpi_group_null) then 1512 call MPI_comm_create(comm,group,newcomm,mpierr) 1513 else 1514 newcomm=xmpi_comm_null 1515 end if 1516 #else 1517 newcomm=xmpi_comm_self 1518 #endif 1519 1520 end subroutine xmpi_comm_create
m_xmpi/xmpi_comm_free_0D [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_comm_free_0D
FUNCTION
Hides MPI_COMM_FREE from MPI library. Does not abort MPI in case of an invalid communicator
INPUTS
comm=MPI communicator.
SOURCE
1214 subroutine xmpi_comm_free_0D(comm) 1215 1216 !Arguments------------------------- 1217 integer,intent(inout) :: comm 1218 1219 !Local variables------------------------------- 1220 !scalars 1221 #ifdef HAVE_MPI 1222 integer :: comm_world,err_handler_dum,err_handler_sav,ierr,mpierr,mpierr_class 1223 1224 ! ************************************************************************* 1225 1226 if (comm/=xmpi_comm_null.and.comm/=xmpi_world.and.comm/=xmpi_comm_self) then 1227 1228 comm_world=xmpi_world ! Needed to bypass a bug in some OMPI implementations (intent(inout)) 1229 call xmpi_comm_set_errhandler(comm_world,MPI_ERRORS_RETURN,err_handler_sav,ierr) 1230 call MPI_COMM_FREE(comm,mpierr) 1231 call xmpi_comm_set_errhandler(comm_world,err_handler_sav,err_handler_dum,ierr) 1232 1233 if (mpierr/=MPI_SUCCESS) then 1234 call MPI_ERROR_CLASS(mpierr,mpierr_class,ierr) 1235 if (mpierr_class/=MPI_ERR_COMM) then 1236 write(std_out,*)" WARNING: MPI_COMM_FREE returned ierr= ",mpierr 1237 end if 1238 end if 1239 1240 end if 1241 1242 #else 1243 if (.false.) write(std_out,*) comm 1244 #endif 1245 1246 end subroutine xmpi_comm_free_0D
m_xmpi/xmpi_comm_free_1D [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_comm_free_1D
FUNCTION
Hides MPI_COMM_FREE from MPI library. Target 1D arrays Does not abort MPI in case of an invalid communicator
INPUTS
comms(:)=MPI communicators
SOURCE
1264 subroutine xmpi_comm_free_1D(comms) 1265 1266 !Arguments------------------------- 1267 integer,intent(inout) :: comms(:) 1268 1269 !Local variables------------------------------- 1270 !scalars 1271 #ifdef HAVE_MPI 1272 integer :: comm_world,err_handler_dum,err_handler_sav,ii,mpierr 1273 1274 ! ************************************************************************* 1275 1276 comm_world=xmpi_world ! Needed to bypass a bug in some OMPI implementations (intent(inout)) 1277 call xmpi_comm_set_errhandler(comm_world,MPI_ERRORS_RETURN,err_handler_sav,mpierr) 1278 1279 do ii=LBOUND(comms,DIM=1),UBOUND(comms,DIM=1) 1280 if (comms(ii)/=xmpi_comm_null.and.comms(ii)/=xmpi_world.and.comms(ii)/=xmpi_comm_self) then 1281 call MPI_COMM_FREE(comms(ii),mpierr) 1282 end if 1283 end do 1284 1285 call xmpi_comm_set_errhandler(comm_world,err_handler_sav,err_handler_dum,mpierr) 1286 1287 #else 1288 if (.false.) write(std_out,*) comms(1) 1289 #endif 1290 1291 end subroutine xmpi_comm_free_1D
m_xmpi/xmpi_comm_free_2D [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_comm_free_2D
FUNCTION
Hides MPI_COMM_FREE from MPI library. Target 2D arrays Does not abort MPI in case of an invalid communicator
INPUTS
comms=MPI communicator.
SOURCE
1309 subroutine xmpi_comm_free_2D(comms) 1310 1311 !Arguments------------------------- 1312 integer,intent(inout) :: comms(:,:) 1313 1314 !Local variables------------------------------- 1315 !scalars 1316 #ifdef HAVE_MPI 1317 integer :: comm_world,err_handler_dum,err_handler_sav,ii,jj,mpierr 1318 1319 ! ************************************************************************* 1320 1321 comm_world=xmpi_world ! Needed to bypass a bug in some OMPI implementations (intent(inout)) 1322 call xmpi_comm_set_errhandler(comm_world,MPI_ERRORS_RETURN,err_handler_sav,mpierr) 1323 1324 do jj=LBOUND(comms,DIM=2),UBOUND(comms,DIM=2) 1325 do ii=LBOUND(comms,DIM=1),UBOUND(comms,DIM=1) 1326 if (comms(ii,jj)/=xmpi_comm_null.and.comms(ii,jj)/=xmpi_world.and. & 1327 & comms(ii,jj)/=xmpi_comm_self) then 1328 call MPI_COMM_FREE(comms(ii,jj),mpierr) 1329 end if 1330 end do 1331 end do 1332 1333 call xmpi_comm_set_errhandler(comm_world,err_handler_sav,err_handler_dum,mpierr) 1334 1335 #else 1336 if (.false.) write(std_out,*) comms(1,1) 1337 #endif 1338 1339 end subroutine xmpi_comm_free_2D
m_xmpi/xmpi_comm_free_3D [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_comm_free_3D
FUNCTION
Hides MPI_COMM_FREE from MPI library. Target 3D arrays Does not abort MPI in case of an invalid communicator
INPUTS
comms=MPI communicator.
SOURCE
1357 subroutine xmpi_comm_free_3D(comms) 1358 1359 !Arguments------------------------- 1360 integer,intent(inout) :: comms(:,:,:) 1361 1362 !Local variables------------------------------- 1363 !scalars 1364 #ifdef HAVE_MPI 1365 integer :: comm_world,err_handler_dum,err_handler_sav,ii,jj,kk,mpierr 1366 1367 ! ************************************************************************* 1368 1369 comm_world=xmpi_world ! Needed to bypass a bug in some OMPI implementations (intent(inout)) 1370 call xmpi_comm_set_errhandler(comm_world,MPI_ERRORS_RETURN,err_handler_sav,mpierr) 1371 1372 do kk=LBOUND(comms,DIM=3),UBOUND(comms,DIM=3) 1373 do jj=LBOUND(comms,DIM=2),UBOUND(comms,DIM=2) 1374 do ii=LBOUND(comms,DIM=1),UBOUND(comms,DIM=1) 1375 if (comms(ii,jj,kk)/=xmpi_comm_null.and.comms(ii,jj,kk)/=xmpi_world.and. & 1376 & comms(ii,jj,kk)/=xmpi_comm_self) then 1377 call MPI_COMM_FREE(comms(ii,jj,kk),mpierr) 1378 end if 1379 end do 1380 end do 1381 end do 1382 1383 call xmpi_comm_set_errhandler(comm_world,err_handler_sav,err_handler_dum,mpierr) 1384 1385 #else 1386 if (.false.) write(std_out,*) comms(1,1,1) 1387 #endif 1388 1389 end subroutine xmpi_comm_free_3D
m_xmpi/xmpi_comm_group [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_comm_group
FUNCTION
Hides MPI_COMM_GROUP from MPI library.
INPUTS
comm=MPI communicator.
OUTPUT
spaceGroup=The group associated to comm. mpierr=error code returned
SOURCE
1658 subroutine xmpi_comm_group(comm,spaceGroup,mpierr) 1659 1660 !Arguments------------------------- 1661 integer,intent(in) :: comm 1662 integer,intent(out) :: mpierr,spaceGroup 1663 1664 ! ************************************************************************* 1665 1666 mpierr=0; spaceGroup=xmpi_group_null 1667 #ifdef HAVE_MPI 1668 if (comm/=xmpi_comm_null) then 1669 call MPI_COMM_GROUP(comm,spaceGroup,mpierr) 1670 end if 1671 #endif 1672 1673 end subroutine xmpi_comm_group
m_xmpi/xmpi_comm_multiple [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_comm_multiple
FUNCTION
Given an input communicator `input_comm`, create a new communicator with number of procs multiple of a certain number `ntasks`. Use all procs if ntasks >= input_nprocs.
INPUTS
ntasks=Number of tasks. comm=input communicator
OUTPUT
idle_proc=True if this proc is idle. In this case, output_comm contains all the idle procs. output_comm=Output communicator
SOURCE
1609 subroutine xmpi_comm_multiple_of(ntasks, input_comm, idle_proc, output_comm) 1610 1611 !Arguments------------------------- 1612 !scalars 1613 integer,intent(in) :: ntasks, input_comm 1614 integer,intent(out) :: output_comm 1615 logical,intent(out) :: idle_proc 1616 1617 !Local variables------------------------------- 1618 integer :: color, my_rank, ierr, input_nproc 1619 1620 ! ************************************************************************* 1621 1622 my_rank = xmpi_comm_rank(input_comm) 1623 input_nproc = xmpi_comm_size(input_comm) 1624 1625 if (input_nproc <= ntasks) then 1626 ! Use all procs in input comm. 1627 idle_proc = .False.; output_comm = input_comm 1628 #ifdef HAVE_MPI 1629 call MPI_Comm_dup(input_comm, output_comm, ierr) 1630 #endif 1631 else 1632 color = merge(0, 1, my_rank + 1 <= (ntasks / input_nproc) * input_nproc) 1633 idle_proc = color == 1 1634 call xmpi_comm_split(input_comm, color, my_rank, output_comm, ierr) 1635 end if 1636 1637 end subroutine xmpi_comm_multiple_of
m_xmpi/xmpi_comm_rank [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_comm_rank
FUNCTION
Hides MPI_COMM_RANK from MPI library.
INPUTS
comm=MPI communicator.
OUTPUT
xmpi_comm_rank=The rank of the node inside comm
SOURCE
1137 function xmpi_comm_rank(comm) 1138 1139 !Arguments------------------------- 1140 integer,intent(in) :: comm 1141 integer :: xmpi_comm_rank 1142 1143 !Local variables------------------- 1144 integer :: mpierr 1145 1146 ! ************************************************************************* 1147 1148 mpierr=0 1149 #ifdef HAVE_MPI 1150 xmpi_comm_rank=-1 ! Return non-sense value if the proc does not belong to the comm 1151 if (comm/=xmpi_comm_null) then 1152 call MPI_COMM_RANK(comm,xmpi_comm_rank,mpierr) 1153 end if 1154 #else 1155 xmpi_comm_rank=0 1156 #endif 1157 1158 end function xmpi_comm_rank
m_xmpi/xmpi_comm_set_errhandler [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_set_errhandler
FUNCTION
Hides MPI_COMM_SET_ERRHANDLER from MPI library.
INPUTS
new_err_handler= new error handler
OUTPUT
ierror=error code old_err_handler= old error handler SIZE EFFECTS comm= communicator (should be intent(in) but is intent(inout) in some OMPI implementation ; known as a bug)
SOURCE
2201 subroutine xmpi_comm_set_errhandler(comm,new_err_handler,old_err_handler,ierror) 2202 2203 !Arguments------------------------- 2204 integer,intent(in) :: new_err_handler 2205 integer,intent(in) :: comm 2206 integer,intent(out) :: ierror,old_err_handler 2207 2208 !Local variables------------------------- 2209 integer :: mpierr1,mpierr2,my_comm 2210 2211 ! ************************************************************************* 2212 2213 ierror=0 2214 my_comm = comm !should be intent(in) but is intent(inout) in some OMPI implementation ; known as a bug) 2215 2216 #if defined HAVE_MPI 2217 2218 mpierr1=MPI_SUCCESS; mpierr2=MPI_SUCCESS 2219 2220 #if defined HAVE_MPI1 2221 call MPI_Errhandler_get(my_comm,old_err_handler,mpierr1) 2222 call MPI_Errhandler_set(my_comm,new_err_handler,mpierr2) 2223 #endif 2224 #if defined HAVE_MPI2 2225 call MPI_comm_get_Errhandler(my_comm,old_err_handler,mpierr1) 2226 call MPI_comm_set_Errhandler(my_comm,new_err_handler,mpierr2) 2227 #endif 2228 2229 ierror=MPI_SUCCESS 2230 if (mpierr1/=MPI_SUCCESS) then 2231 ierror=mpierr1 2232 else if (mpierr2/=MPI_SUCCESS) then 2233 ierror=mpierr2 2234 end if 2235 #endif 2236 2237 end subroutine xmpi_comm_set_errhandler
m_xmpi/xmpi_comm_size [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_comm_size
FUNCTION
Hides MPI_COMM_SIZE from MPI library.
INPUTS
comm=MPI communicator.
OUTPUT
xmpi_comm_size=The number of processors inside comm. Return 0 if comm = xmpi_comm_null
SOURCE
1178 function xmpi_comm_size(comm) 1179 1180 !Arguments------------------------- 1181 integer,intent(in) :: comm 1182 integer :: xmpi_comm_size 1183 1184 !Local variables------------------------------- 1185 !scalars 1186 integer :: mpierr 1187 1188 ! ************************************************************************* 1189 1190 mpierr=0; xmpi_comm_size=1 1191 #ifdef HAVE_MPI 1192 xmpi_comm_size = 0 1193 if (comm /= xmpi_comm_null) call MPI_COMM_SIZE(comm,xmpi_comm_size,mpierr) 1194 #endif 1195 1196 end function xmpi_comm_size
m_xmpi/xmpi_comm_split [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_comm_split
FUNCTION
Hides MPI_COMM_SPLIT from MPI library.
INPUTS
input_comm=Input MPI communicator (to be splitted) color=Control of subset assignment (nonnegative integer). Processes with the same color are in the same new communicator key=Control of rank assigment (integer)
OUTPUT
mpierr=error code returned output_comm=new splitted communicator
SOURCE
1697 subroutine xmpi_comm_split(input_comm, color, key, output_comm, mpierr) 1698 1699 !Arguments------------------------- 1700 integer,intent(in) :: color,input_comm,key 1701 integer,intent(out) :: mpierr,output_comm 1702 1703 ! ************************************************************************* 1704 1705 mpierr=0; output_comm=input_comm 1706 #ifdef HAVE_MPI 1707 if (input_comm/=xmpi_comm_null.and.input_comm/=xmpi_comm_self) then 1708 call MPI_COMM_SPLIT(input_comm,color,key,output_comm,mpierr) 1709 end if 1710 #endif 1711 1712 end subroutine xmpi_comm_split
m_xmpi/xmpi_comm_translate_rank [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_comm_translate_rank
FUNCTION
Helper function to translate a single rank `from_rank` in communicator `from_rank` to the rank in communicator `to_comm`.
m_xmpi/xmpi_comm_translate_ranks [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_comm_translate_ranks
FUNCTION
Helper function that translate the ranks from a communicator to another one. Wraps xmpi_group_translate_ranks but provides a more user-friendly interface
INPUTS
from_comm=MPI communicator where from_ranks are defined. nrank=number of ranks in from_ranks and to_ranks arrays from_ranks(nrank)=array of zero or more valid ranks in from_comm
OUTPUT
to_ranks(nrank)=array of corresponding ranks in to_comm xmpi_undefined when no correspondence exists
SOURCE
1783 subroutine xmpi_comm_translate_ranks(from_comm, nrank, from_ranks, to_comm, to_ranks) 1784 1785 !Arguments------------------------- 1786 !scalars 1787 integer,intent(in) :: nrank,from_comm,to_comm 1788 !arrays 1789 integer,intent(in) :: from_ranks(nrank) 1790 integer,intent(out) :: to_ranks(nrank) 1791 1792 !Local variables------------------------------- 1793 !scalars 1794 integer :: ierr,from_group,to_group 1795 1796 ! ************************************************************************* 1797 1798 ! Get the groups 1799 call xmpi_comm_group(from_comm,from_group,ierr) 1800 call xmpi_comm_group(to_comm,to_group,ierr) 1801 1802 call xmpi_group_translate_ranks(from_group,nrank,from_ranks,to_group,to_ranks,ierr) 1803 1804 ! Release the groups 1805 call xmpi_group_free(from_group) 1806 call xmpi_group_free(to_group) 1807 1808 end subroutine xmpi_comm_translate_ranks
m_xmpi/xmpi_distab_4D [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_distab_4D
FUNCTION
Fill table defining the distribution of the tasks according to the number of processors involved in the calculation. For each set of indices, the table contains the rank of the node in the MPI communicator.
INPUTS
nprocs=The number of processors performing the calculation in parallel.
OUTPUT
task_distrib(:,:,:,:) = Contains the rank of the node that is taking care of this particular set of loop indices. Tasks are distributed across the nodes in column-major order.
SOURCE
2570 subroutine xmpi_distab_4D(nprocs, task_distrib) 2571 2572 !Arguments ------------------------------------ 2573 integer,intent(in) :: nprocs 2574 !arrays 2575 integer,intent(inout) :: task_distrib(:,:,:,:) 2576 2577 !Local variables ------------------------------ 2578 !scalars 2579 integer :: ii,jj,n1,n2,n3,n4,ntasks,irank,remainder,ntpblock 2580 integer,allocatable :: list(:) 2581 2582 !************************************************************************ 2583 2584 n1= SIZE(task_distrib,DIM=1) 2585 n2= SIZE(task_distrib,DIM=2) 2586 n3= SIZE(task_distrib,DIM=3) 2587 n4= SIZE(task_distrib,DIM=4) 2588 ntasks = n1*n2*n3*n4 2589 2590 ABI_MALLOC(list, (ntasks)) 2591 list=-999 2592 2593 ntpblock = ntasks/nprocs 2594 remainder = MOD(ntasks,nprocs) 2595 2596 if (ntpblock==0) then ! nprocs > ntasks 2597 do ii=1,ntasks 2598 list(ii) = ii-1 2599 end do 2600 else 2601 ii=1 2602 do irank=nprocs-1,0,-1 ! If remainder/=0, master will get less tasks. 2603 jj = ii+ntpblock-1 2604 if (remainder>0) then 2605 jj=jj+1 2606 remainder = remainder-1 2607 end if 2608 list(ii:jj)=irank 2609 ii=jj+1 2610 end do 2611 end if 2612 2613 task_distrib = RESHAPE(list, [n1,n2,n3,n4]) 2614 2615 if (ANY(task_distrib==-999)) call xmpi_abort(msg="task_distrib == -999") 2616 2617 ABI_FREE(list) 2618 2619 end subroutine xmpi_distab_4D
m_xmpi/xmpi_distrib_2d [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_distrib_2d
FUNCTION
Try to optimally distribute nprocs in a 2d grid of shape (n1, n2) given a problem of dimension (n1, n2). Use order string to define priorities: "12" or "21" if both dimensions should be optimized (if not possibile the first one gets optimized) "1" or "2" to optimize only one dimension. Return: exit status in ierr.
SOURCE
4963 subroutine xmpi_distrib_2d(nprocs, order, size1, size2, n1, n2, ierr) 4964 4965 !Arguments ------------------------------------ 4966 integer,intent(in) :: nprocs, size1, size2 4967 character(len=*),intent(in) :: order 4968 integer,intent(out) :: n1, n2, ierr 4969 4970 !Local variables------------------------------- 4971 integer :: ii 4972 4973 !---------------------------------------------------------------------- 4974 4975 ierr = 1; n1 = -1; n2 = -1 4976 4977 select case (order) 4978 case ("12") 4979 call balance_12() 4980 if (ierr /= 0) call balance_1() 4981 case ("21") 4982 call balance_21() 4983 if (ierr /= 0) call balance_2() 4984 case ("1") 4985 call balance_1() 4986 case ("2") 4987 call balance_2() 4988 case default 4989 ! Wrong order 4990 ierr = -1 4991 end select 4992 4993 contains 4994 4995 subroutine balance_12() 4996 ! Try to find n1 x n2 = nprocs so that (size1, size2) are multiple of (n1, n2) 4997 do ii=nprocs,1,-1 4998 if (mod(size1, ii) == 0 .and. mod(nprocs, ii) == 0 .and. mod(size2, nprocs / ii) == 0) then 4999 n1 = ii; n2 = nprocs / ii; ierr = 0; exit 5000 end if 5001 end do 5002 5003 end subroutine balance_12 5004 5005 subroutine balance_21() 5006 ! Try to find n1 x n2 = nprocs so that (size1, size2) are multiple of (n1, n2) 5007 do ii=nprocs,1,-1 5008 if (mod(size2, ii) == 0 .and. mod(nprocs, ii) == 0 .and. mod(size1, nprocs / ii) == 0) then 5009 n2 = ii; n1 = nprocs / ii; ierr = 0; exit 5010 end if 5011 end do 5012 end subroutine balance_21 5013 5014 subroutine balance_1() 5015 integer :: imod1 5016 ! Try to find n1 x n2 = nprocs so that only size1 is multiple of n1. Allow for some load imbalance. 5017 do ii=nprocs,1,-1 5018 imod1 = mod(size1, ii) 5019 if ((imod1 == 0 .or. imod1 >= nprocs / 2) .and. mod(nprocs, ii) == 0) then 5020 n1 = ii; n2 = nprocs / ii; ierr = 0; exit 5021 end if 5022 end do 5023 5024 if (ierr /= 0 .and. nprocs <= size1) then 5025 n1 = nprocs; n2 = 1; ierr = 0; return 5026 end if 5027 end subroutine balance_1 5028 5029 subroutine balance_2() 5030 integer :: imod2 5031 ! Try to find n1 x n2 = nprocs so that only size2 is multiple of n2. Allow for some load imbalance. 5032 do ii=nprocs,1,-1 5033 imod2 = mod(size2, ii) 5034 if ((imod2 == 0 .or. imod2 >= nprocs / 2) .and. mod(nprocs, ii) == 0) then 5035 n2 = ii; n1 = nprocs / ii; ierr = 0; exit 5036 end if 5037 end do 5038 5039 if (ierr /= 0 .and. nprocs <= size2) then 5040 n2 = nprocs; n1 = 1; ierr = 0; return 5041 end if 5042 end subroutine balance_2 5043 5044 end subroutine xmpi_distrib_2d
m_xmpi/xmpi_distrib_with_replicas [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_distrib_with_replicas
FUNCTION
This function distributes the i-th task `itask` among `nprocs` inside a MPI communicator. If nprocs > ntasks, multiple MPI ranks will be assigned to a given task.
INPUTS
itask=Index of the task (must be <= ntasks) ntasks= number of tasks rank=MPI Rank of this processor in the MPI communicator. nprocs=Number of processors in the MPI communicator.
OUTPUT
True if this node will treat itask (replicas are possible if nprocs > ntasks)
SOURCE
2643 pure logical function xmpi_distrib_with_replicas(itask, ntasks, rank, nprocs) result(bool) 2644 2645 !Arguments ------------------------------------ 2646 integer,intent(in) :: itask,rank,nprocs,ntasks 2647 2648 !Local variables------------------------------- 2649 integer :: ii,mnp_pool,rk_base 2650 2651 ! ************************************************************************* 2652 2653 ! If the number of processors is less than ntasks, we have max one processor per task 2654 ! else we replicate the tasks inside a pool of max size mnp_pool 2655 if (nprocs <= ntasks) then 2656 bool = modulo(itask - 1, nprocs) == rank 2657 else 2658 mnp_pool = (nprocs / ntasks) 2659 !write(std_out,*)"Will duplicate itask, mnp_pool", mnp_pool, "nprocs, ntasks", nprocs, ntasks 2660 2661 rk_base = modulo(itask - 1, nprocs) 2662 bool = .False. 2663 do ii=1,mnp_pool+1 2664 if (rank == rk_base + (ii - 1) * ntasks) then 2665 bool = .True.; exit 2666 end if 2667 end do 2668 end if 2669 2670 end function xmpi_distrib_with_replicas
m_xmpi/xmpi_end [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_end
FUNCTION
Hides MPI_FINALIZE from MPI library.
INPUTS
None
SOURCE
915 subroutine xmpi_end() 916 917 !Local variables------------------- 918 integer :: mpierr 919 920 ! ************************************************************************* 921 922 mpierr=0 923 #ifdef HAVE_MPI 924 call MPI_BARRIER(MPI_COMM_WORLD,mpierr) ! Needed by some HPC architectures (MT, 20110315) 925 call MPI_FINALIZE(mpierr) 926 #endif 927 928 #ifndef FC_IBM 929 ! IBM8 returns 260. 320 ... 930 call sys_exit(0) 931 #endif 932 933 end subroutine xmpi_end
m_xmpi/xmpi_error_string [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_error_string
FUNCTION
Hides MPI_ERROR_STRING from MPI library.
INPUTS
OUTPUT
SOURCE
2159 subroutine xmpi_error_string(mpierr,err_string,ilen,ierror) 2160 2161 !Arguments------------------------- 2162 integer,intent(in) :: mpierr 2163 integer,intent(out) :: ilen,ierror 2164 character(len=*),intent(out) :: err_string 2165 2166 ! ************************************************************************* 2167 2168 ilen=0 2169 #ifdef HAVE_MPI 2170 call MPI_Error_string(mpierr,err_string,ilen,ierror) 2171 #else 2172 ierror=1 2173 err_string="Sorry, no MPI_Error_string routine is available to interpret the error message" 2174 #endif 2175 2176 end subroutine xmpi_error_string
m_xmpi/xmpi_get_unit [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_get_unit
FUNCTION
Get free unit (emulate F2008 newunit for portability reasons) Return -1 if no unit is found.
SOURCE
886 integer function xmpi_get_unit() result(unt) 887 888 !Local variables------------------- 889 logical :: isopen 890 891 ! ************************************************************************* 892 893 do unt=1024,-1,-1 894 inquire(unit=unt, opened=isopen) 895 if (.not.isopen) exit 896 end do 897 898 end function xmpi_get_unit
m_xmpi/xmpi_group_free [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_group_free
FUNCTION
Hides MPI_GROUP_FREE from MPI library. Does not abort MPI in case of an invalid group
INPUTS
spaceGroup=MPI group
SOURCE
1407 subroutine xmpi_group_free(spaceGroup) 1408 1409 !Arguments------------------------- 1410 integer,intent(inout) :: spaceGroup 1411 1412 !Local variables------------------------------- 1413 !scalars 1414 #ifdef HAVE_MPI 1415 integer :: comm_world,err_handler_dum,err_handler_sav,ierr,mpierr,mpierr_class 1416 1417 ! ************************************************************************* 1418 1419 if (spaceGroup/=xmpi_group_null) then 1420 1421 comm_world=xmpi_world ! Needed to bypass a bug in some OMPI implementations (intent(inout)) 1422 call xmpi_comm_set_errhandler(comm_world,MPI_ERRORS_RETURN,err_handler_sav,ierr) 1423 call MPI_GROUP_FREE(spaceGroup,mpierr) 1424 call xmpi_comm_set_errhandler(comm_world,err_handler_sav,err_handler_dum,ierr) 1425 1426 if (mpierr/=MPI_SUCCESS) then 1427 call MPI_ERROR_CLASS(mpierr,mpierr_class,ierr) 1428 if (mpierr_class/=MPI_ERR_GROUP) write(std_out,*)" WARNING: MPI_GROUP_FREE returned ierr= ",mpierr 1429 end if 1430 1431 end if 1432 1433 #else 1434 if (.false.) write(std_out,*) spaceGroup 1435 #endif 1436 1437 end subroutine xmpi_group_free
m_xmpi/xmpi_group_incl [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_group_incl
FUNCTION
Hides MPI_GROUP_INCL from MPI library.
INPUTS
group=input group nrank=number of elements in array ranks (size of newgroup) ranks=ranks of processes in group to appear in newgroup
OUTPUT
newgroup= new group derived from above, in the order defined by ranks
SOURCE
1459 subroutine xmpi_group_incl(group,nranks,ranks,newgroup,mpierr) 1460 1461 !Arguments------------------------- 1462 !scalars 1463 integer,intent(in) :: group,nranks 1464 integer,intent(out) :: mpierr 1465 integer,intent(inout) :: newgroup 1466 !arrays 1467 integer,intent(in) :: ranks(nranks) 1468 1469 ! ************************************************************************* 1470 1471 mpierr=0 ; newgroup=xmpi_group_null 1472 #ifdef HAVE_MPI 1473 if (group/=xmpi_group_null) then 1474 call MPI_GROUP_INCL(group,nranks,ranks,newgroup,mpierr) 1475 end if 1476 #endif 1477 1478 end subroutine xmpi_group_incl
m_xmpi/xmpi_group_translate_ranks [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_group_translate_ranks
FUNCTION
Hides MPI_GROUP_TRANSLATE_RANKS from MPI library.
INPUTS
nrank=number of ranks in ranks1 and ranks2 arrays ranks1(nrank)=array of zero or more valid ranks in group1 spaceGroup1=group1 spaceGroup2=group2
OUTPUT
mpierr=error code returned ranks2(nrank)=array of corresponding ranks in group2, xmpi_undefined when no correspondence exists
SOURCE
1737 subroutine xmpi_group_translate_ranks(spaceGroup1,nrank,ranks1,& 1738 & spaceGroup2,ranks2,mpierr) 1739 1740 !Arguments------------------------- 1741 !scalars 1742 integer,intent(in) :: nrank,spaceGroup1,spaceGroup2 1743 integer,intent(out) :: mpierr 1744 !arrays 1745 integer,intent(in) :: ranks1(nrank) 1746 integer,intent(out) :: ranks2(nrank) 1747 1748 ! ************************************************************************* 1749 1750 mpierr=0; ranks2(:)=xmpi_undefined 1751 #ifdef HAVE_MPI 1752 if (spaceGroup1/=xmpi_group_null.and.spaceGroup2/=xmpi_group_null) then 1753 call MPI_GROUP_TRANSLATE_RANKS(spaceGroup1,nrank,ranks1, spaceGroup2,ranks2,mpierr) 1754 end if 1755 #else 1756 ranks2(1)=0 1757 #endif 1758 1759 end subroutine xmpi_group_translate_ranks
m_xmpi/xmpi_init [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_init
FUNCTION
Hides MPI_INIT from MPI library. Perform the initialization of some basic variables used by the MPI routines employed in abinit.
INPUTS
None
SOURCE
765 subroutine xmpi_init() 766 767 !Local variables------------------- 768 integer :: mpierr, ierr, unt 769 integer(c_long) :: rlim_cur, rlim_max 770 logical :: exists 771 #ifdef HAVE_MPI 772 integer :: attribute_val 773 logical :: lflag 774 #ifdef HAVE_OPENMP 775 integer :: required,provided 776 #endif 777 #endif 778 779 ! ************************************************************************* 780 781 mpierr=0 782 #ifdef HAVE_MPI 783 784 #ifndef HAVE_OPENMP 785 call MPI_INIT(mpierr) 786 #else 787 required = MPI_THREAD_SINGLE 788 !required = MPI_THREAD_FUNNELED 789 !required = MPI_THREAD_SERIALIZED 790 !required = MPI_THREAD_MULTIPLE 791 call MPI_INIT_THREAD(required,provided,mpierr) 792 if (provided /= required) call xmpi_abort(msg="MPI_INIT_THREADS: provided /= required") 793 #endif 794 795 !%comm_world = xmpi_world ! Needed to bypass a bug in some OMPI implementations (intent(inout)) 796 !%call xmpi_comm_set_errhandler(comm_world, MPI_ERRORS_RETURN, err_handler_sav, mpierr) 797 798 ! Deprecated in MPI2 but not all MPI2 implementations provide MPI_Comm_get_attr ! 799 call MPI_ATTR_GET(xmpi_world, MPI_TAG_UB, attribute_val, lflag, mpierr) 800 !call MPI_Comm_get_attr(xmpi_world, MPI_TAG_UB, attribute_val, lflag, mpierr) 801 802 if (lflag) xmpi_tag_ub = attribute_val 803 804 ! Define type values. 805 call MPI_TYPE_SIZE(MPI_CHARACTER, xmpi_bsize_ch, mpierr) 806 call MPI_TYPE_SIZE(MPI_INTEGER, xmpi_bsize_int, mpierr) 807 call MPI_TYPE_SIZE(MPI_REAL, xmpi_bsize_sp, mpierr) 808 call MPI_TYPE_SIZE(MPI_DOUBLE_PRECISION, xmpi_bsize_dp, mpierr) 809 call MPI_TYPE_SIZE(MPI_COMPLEX, xmpi_bsize_spc, mpierr) 810 call MPI_TYPE_SIZE(MPI_DOUBLE_COMPLEX, xmpi_bsize_dpc, mpierr) 811 812 ! Find the byte size of Fortran record marker used in MPI-IO routines. 813 if (xmpio_bsize_frm == 0) then 814 call xmpio_get_info_frm(xmpio_bsize_frm, xmpio_mpi_type_frm, xmpi_world) 815 end if 816 #endif 817 818 ! Try to increase stack size. 819 call clib_ulimit_stack(rlim_cur, rlim_max, ierr) 820 821 if (xmpi_comm_rank(xmpi_world) == 0) then 822 823 if (ierr /= 0) then 824 write(std_out, "(a)")" WARNING: cannot increase stack size limit. " 825 !write(std_out, *)"rlim_cur, rlim_max, ierr", rlim_cur, rlim_max, ierr 826 end if 827 828 ! Master Removes the ABI_MPIABORTFILE if present so that we start with a clean environment 829 inquire(file=ABI_MPIABORTFILE, exist=exists) 830 if (exists) then 831 ! Get free unit (emulate F2008 newunit for portability reasons) 832 unt = xmpi_get_unit() 833 if (unt == -1) call xmpi_abort(msg="Cannot find free unit!!") 834 open(unit=unt, file=trim(ABI_MPIABORTFILE), status="old", iostat=ierr) 835 if (ierr == 0) close(unit=unt, status="delete", iostat=ierr) 836 if (ierr /= 0) call xmpi_abort(msg="Cannot remove ABI_MPIABORTFILE") 837 end if 838 839 ! If MPI interfaces are buggy, MPI_IN_PLACE is not allowed 840 #if defined HAVE_MPI2_INPLACE && defined HAVE_MPI_BUGGY_INTERFACES 841 write(std_out, "(a)")"ERROR: Cannot use MPI_IN_PLACE with this buggy MPI version!" 842 write(ab_out , "(a)")"ERROR: Cannot use MPI_IN_PLACE with this buggy MPI version!" 843 call xmpi_abort(msg="Stopping here!") 844 #endif 845 846 end if 847 848 end subroutine xmpi_init
m_xmpi/xmpi_iprobe [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_iprobe
FUNCTION
Hides MPI_IPROBE from MPI library. Nonblocking test for a message.
INPUTS
source= source processes tag= tag value mpicomm= communicator
OUTPUT
flag= True if a message with the specified source, tag, and communicator is available mpierr= status error
SOURCE
1938 subroutine xmpi_iprobe(source,tag,mpicomm,flag,mpierr) 1939 1940 !Arguments------------------------- 1941 integer,intent(in) :: mpicomm,source,tag 1942 integer,intent(out) :: mpierr 1943 logical,intent(out) :: flag 1944 1945 !Local variables------------------- 1946 #ifdef HAVE_MPI 1947 integer :: ier,status(MPI_STATUS_SIZE) 1948 #endif 1949 1950 ! ************************************************************************* 1951 1952 mpierr = 0 1953 #ifdef HAVE_MPI 1954 call MPI_IPROBE(source,tag,mpicomm,flag,status,ier) 1955 mpierr=ier 1956 #endif 1957 1958 end subroutine xmpi_iprobe
m_xmpi/xmpi_largetype_create [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_largetype_create
FUNCTION
This function builds a large-count contiguous datatype made of "small" adjacent chunks (of same original type). The new type can then be used in MPI routines when the number of elements to communicate exceeds a 32bit integer.
INPUTS
largecount= total number of elements expressed as a 64bit integer inputtype= (INTEGER) input type (typically INTEGER, REAL(dp), ...) op_type= type of operation that will be applied during collective comms At present, MPI_SUM, MPI_LOR, MPI_LAND are implemented
OUTPUT
largetype= (INTEGER) new MPI type made of a serie of adjacent chunks largetype_op= (INTEGER) MPI user-defined operation associated to largetype type NOTE This routine is partially inspired by https://github.com/jeffhammond/BigMPI See: J.R. Hammond. A. Schafer, R. Latham, "ToINT_MAX. . . and beyond. Exploring large-count support in MPI", 2014 Workshop on Exascale MPI at Supercomputing Conference MIT License (MIT) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so. From MPI4 specification, this routine is useless as large-count MPI communications can be called with the use of the MPI_count datatype (instead of INTEGER).
SOURCE
2712 subroutine xmpi_largetype_create(largecount,inputtype,largetype,largetype_op,op_type) 2713 2714 !Arguments ------------------------------------ 2715 !scalars 2716 integer(KIND=int64),intent(in) :: largecount 2717 integer,intent(in) :: inputtype,op_type 2718 integer,intent(out) :: largetype,largetype_op 2719 2720 !Local variables------------------------------- 2721 #ifdef HAVE_MPI 2722 !scalars 2723 integer,parameter :: INT_MAX=max(1,xmpi_maxint32/2) 2724 integer(KIND=int32) :: cc,rr,ierr 2725 integer(KIND=XMPI_ADDRESS_KIND) :: extent,lb,remdisp 2726 integer :: chunks,remainder 2727 !arrays 2728 integer(KIND=int32) :: blklens(2) 2729 integer(KIND=XMPI_ADDRESS_KIND) :: disps(2) 2730 integer :: types(2) 2731 #endif 2732 2733 ! ************************************************************************* 2734 2735 #ifdef HAVE_MPI 2736 if (XMPI_ADDRESS_KIND<int64) call xmpi_abort(msg="Too much data to communicate for this architecture!") 2737 2738 !Divide data in chunks 2739 cc=int(largecount,kind=int32)/INT_MAX 2740 rr=int(largecount,kind=int32)-cc*INT_MAX 2741 2742 !Create user-defined datatype 2743 if (rr==0) then 2744 call MPI_TYPE_VECTOR(cc,INT_MAX,INT_MAX,inputtype,largetype,ierr) 2745 if (ierr==0) call MPI_TYPE_COMMIT(largetype,ierr) 2746 else 2747 call MPI_TYPE_VECTOR(cc,INT_MAX,INT_MAX,inputtype,chunks,ierr) 2748 call MPI_TYPE_CONTIGUOUS(rr,inputtype,remainder,ierr) 2749 if (ierr==0) then 2750 call MPI_TYPE_GET_EXTENT(inputtype,lb,extent,ierr) 2751 remdisp=cc*INT_MAX*extent 2752 blklens(1:2)=1 2753 disps(1)=0;disps(2)=remdisp 2754 types(1)=chunks;types(2)=remainder 2755 #ifdef HAVE_MPI_TYPE_CREATE_STRUCT 2756 call MPI_TYPE_CREATE_STRUCT(2,blklens,disps,types,largetype,ierr) 2757 #else 2758 call MPI_TYPE_STRUCT(2,blklens,disps,types,largetype,ierr) 2759 #endif 2760 if (ierr==0) then 2761 call MPI_TYPE_COMMIT(largetype,ierr) 2762 call MPI_TYPE_FREE(chunks,ierr) 2763 call MPI_TYPE_FREE(remainder,ierr) 2764 end if 2765 end if 2766 end if 2767 if (ierr/=0) call xmpi_abort(msg="Cannot remove ABI_MPIABORTFILE") 2768 2769 !Associate user-defined MPI operation 2770 xmpi_largetype_size=largecount ; largetype_op=-1111 2771 if (op_type==MPI_SUM) then 2772 select case(inputtype) 2773 case(MPI_INTEGER) 2774 call MPI_OP_CREATE(largetype_sum_int ,.true.,largetype_op,ierr) 2775 case(MPI_REAL) 2776 call MPI_OP_CREATE(largetype_sum_real ,.true.,largetype_op,ierr) 2777 case(MPI_DOUBLE_PRECISION) 2778 call MPI_OP_CREATE(largetype_sum_dble ,.true.,largetype_op,ierr) 2779 case(MPI_COMPLEX) 2780 call MPI_OP_CREATE(largetype_sum_cplx ,.true.,largetype_op,ierr) 2781 case(MPI_DOUBLE_COMPLEX) 2782 call MPI_OP_CREATE(largetype_sum_dcplx,.true.,largetype_op,ierr) 2783 end select 2784 else if (op_type==MPI_LOR) then 2785 select case(inputtype) 2786 case(MPI_LOGICAL) 2787 call MPI_OP_CREATE(largetype_lor_log,.true.,largetype_op,ierr) 2788 end select 2789 else if (op_type==MPI_LAND) then 2790 select case(inputtype) 2791 case(MPI_LOGICAL) 2792 call MPI_OP_CREATE(largetype_land_log,.true.,largetype_op,ierr) 2793 end select 2794 else if (op_type==MPI_OP_NULL) then 2795 largetype_op=-1111 2796 end if 2797 #else 2798 ABI_UNUSED(largecount) 2799 ABI_UNUSED(inputtype) 2800 ABI_UNUSED(largetype) 2801 ABI_UNUSED(largetype_op) 2802 ABI_UNUSED(op_type) 2803 #endif 2804 2805 end subroutine xmpi_largetype_create
m_xmpi/xmpi_largetype_free [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_largetype_free
FUNCTION
This function release a large-count contiguous datatype.
SIDE EFFECTS
largetype= (INTEGER) MPI type to release largetype_op= (INTEGER) MPI user-defined operation associated to largetype type
SOURCE
2984 subroutine xmpi_largetype_free(largetype,largetype_op) 2985 2986 !Arguments ------------------------------------ 2987 !scalars 2988 integer,intent(inout) :: largetype,largetype_op 2989 !Local variables------------------------------- 2990 #ifdef HAVE_MPI 2991 integer :: ierr 2992 #endif 2993 2994 ! ************************************************************************* 2995 2996 #ifdef HAVE_MPI 2997 xmpi_largetype_size=0 2998 if (largetype_op/=-1111) call MPI_OP_FREE(largetype_op,ierr) 2999 call MPI_TYPE_FREE(largetype,ierr) 3000 #else 3001 ABI_UNUSED(largetype) 3002 ABI_UNUSED(largetype_op) 3003 #endif 3004 3005 end subroutine xmpi_largetype_free
m_xmpi/xmpi_name [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_name
FUNCTION
Returns the name of the processor Hides MPI_GET_PROCESSOR_NAME from MPI library. For the MPI standard: The name returned should identify a particular piece of hardware; the exact format is implementation defined. This name may or may not be the same as might be returned by gethostname, uname, or sysinfo.
SOURCE
1891 subroutine xmpi_name(name_ch, ierr) 1892 1893 !Arguments------------------------- 1894 character(20),intent(out) :: name_ch 1895 integer,intent(out) :: ierr 1896 1897 !Local variables------------------- 1898 integer :: len 1899 ! character(len=MPI_MAX_PROCESSOR_NAME) :: name_ch 1900 1901 ! ************************************************************************* 1902 !Get the name of this processor (usually the hostname) 1903 1904 ierr = 0 1905 1906 #ifdef HAVE_MPI 1907 call MPI_GET_PROCESSOR_NAME(name_ch, len, ierr) 1908 name_ch = trim(name_ch) 1909 1910 #else 1911 name_ch ='0' 1912 #endif 1913 1914 end subroutine xmpi_name
m_xmpi/xmpi_pool2d_t [ Types ]
NAME
xmpi_pool2d_t
FUNCTION
Pool of MPI processors operating a 2D problem of shape (n1, n2). Each item in the (n1, n2) matrix is assigned to a single pool. Note that differerent pools do not necessarily have the same number of procs, thus a pool is more flexibile than a Cartesian grid although inter-pool communication becomes more complex.
SOURCE
202 type, public :: xmpi_pool2d_t 203 204 integer :: n1 = -1, n2 = -1 205 ! Dimensions of the 2d problem 206 207 type(xcomm_t) :: comm 208 ! MPI communicator 209 210 logical,allocatable :: treats(:,:) 211 ! (n1, n2) 212 ! True if this pool treats (i1, i2) 213 214 contains 215 procedure :: from_dims => pool2d_from_dims ! Init pool from problem dims. 216 procedure :: free => pool2d_free ! Free memory. 217 end type xmpi_pool2d_t
m_xmpi/xmpi_request_free [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_request_free
FUNCTION
Hides MPI_REQUEST_FREE from MPI library. Frees an array of communication request objects.
INPUTS
requests(:)= communication request array (array of handles)
OUTPUT
mpierr= status error
SOURCE
2099 subroutine xmpi_request_free(requests,mpierr) 2100 2101 !Arguments------------------------- 2102 integer,intent(inout) :: requests(:) 2103 integer,intent(out) :: mpierr 2104 2105 !Local variables------------------- 2106 #ifdef HAVE_MPI 2107 integer :: ier,ii 2108 #endif 2109 2110 ! ************************************************************************* 2111 2112 mpierr = 0 2113 #ifdef HAVE_MPI 2114 do ii=1,size(requests) 2115 if (requests(ii) /= xmpi_request_null) xmpi_count_requests = xmpi_count_requests - 1 2116 call MPI_REQUEST_FREE(requests(ii),ier) 2117 end do 2118 mpierr=ier 2119 #endif 2120 2121 end subroutine xmpi_request_free
m_xmpi/xmpi_requests_add [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_requests_add
FUNCTION
Increase/decrement xmpi_count_requests internal counter
SOURCE
2133 subroutine xmpi_requests_add(count) 2134 2135 !Arguments------------------------- 2136 integer,intent(in) :: count 2137 ! ************************************************************************* 2138 2139 xmpi_count_requests = xmpi_count_requests + count 2140 2141 end subroutine xmpi_requests_add
m_xmpi/xmpi_set_inplace_operations [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_set_inplace_operations
FUNCTION
Set internal flag to use MPI_IN_PLACE whenever possible.
SOURCE
862 subroutine xmpi_set_inplace_operations(bool) 863 864 !Local variables------------------- 865 logical :: bool 866 867 ! ************************************************************************* 868 869 xmpi_use_inplace_operations = bool 870 871 end subroutine xmpi_set_inplace_operations
m_xmpi/xmpi_show_info [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_show_info
FUNCTION
Printout of the most important variables stored in this module (useful for debugging).
INPUTS
unt=Unit number for formatted output.
SOURCE
1077 subroutine xmpi_show_info(unit) 1078 1079 !Arguments------------------------- 1080 integer,optional,intent(in) :: unit 1081 1082 !Local variables------------------- 1083 integer :: my_unt 1084 1085 ! ************************************************************************* 1086 1087 !@m_xmpi 1088 my_unt = std_out; if (PRESENT(unit)) my_unt=unit 1089 1090 #ifdef HAVE_MPI1 1091 write(my_unt,*)" ==== Using MPI-1 specifications ==== " 1092 #endif 1093 #ifdef HAVE_MPI2 1094 write(my_unt,*)" ==== Using MPI-2 specifications ==== " 1095 #endif 1096 1097 #ifdef HAVE_MPI_IO 1098 write(my_unt,*)" MPI-IO support is ON" 1099 #else 1100 write(my_unt,*)" MPI-IO support is OFF" 1101 #endif 1102 1103 #ifdef HAVE_MPI 1104 write(my_unt,*)" xmpi_tag_ub ................ ",xmpi_tag_ub 1105 write(my_unt,*)" xmpi_bsize_ch .............. ",xmpi_bsize_ch 1106 write(my_unt,*)" xmpi_bsize_int ............. ",xmpi_bsize_int 1107 write(my_unt,*)" xmpi_bsize_sp .............. ",xmpi_bsize_sp 1108 write(my_unt,*)" xmpi_bsize_dp .............. ",xmpi_bsize_dp 1109 write(my_unt,*)" xmpi_bsize_spc ............. ",xmpi_bsize_spc 1110 write(my_unt,*)" xmpi_bsize_dpc ............. ",xmpi_bsize_dpc 1111 write(my_unt,*)" xmpio_bsize_frm ............ ",xmpio_bsize_frm 1112 write(my_unt,*)" xmpi_address_kind .......... ",xmpi_address_kind 1113 write(my_unt,*)" xmpi_offset_kind ........... ",xmpi_offset_kind 1114 write(my_unt,*)" MPI_WTICK .................. ",MPI_WTICK() 1115 #endif 1116 1117 end subroutine xmpi_show_info
m_xmpi/xmpi_split_block [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_split_block
FUNCTION
Splits tasks inside communicator using block distribution. Used for the MPI parallelization of simple loops.
INPUTS
ntasks: number of tasks comm: MPI communicator.
OUTPUT
my_ntasks: Number of tasks received by this rank. May be zero if ntasks > nprocs. my_inds(my_ntasks): List of tasks treated by this rank. Allocated by the routine. May be zero-sized.
SOURCE
2321 subroutine xmpi_split_block(ntasks, comm, my_ntasks, my_inds) 2322 2323 !Arguments ------------------------------------ 2324 integer,intent(in) :: ntasks, comm 2325 integer,intent(out) :: my_ntasks 2326 integer,allocatable,intent(out) :: my_inds(:) 2327 2328 !Local variables------------------------------- 2329 integer :: ii, istart, istop 2330 2331 ! ************************************************************************* 2332 2333 call xmpi_split_work(ntasks, comm, istart, istop) 2334 my_ntasks = istop - istart + 1 2335 ABI_MALLOC(my_inds, (my_ntasks)) 2336 if (my_ntasks > 0) my_inds = [(istart + (ii - 1), ii=1, my_ntasks)] 2337 2338 end subroutine xmpi_split_block
m_xmpi/xmpi_split_cyclic [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_split_cyclic
FUNCTION
Splits tasks inside communicator using cyclic distribution. Used for the MPI parallelization of simple loops.
INPUTS
ntasks: number of tasks comm: MPI communicator.
OUTPUT
my_ntasks: Number of tasks received by this rank. May be zero if ntasks > nprocs. my_inds(my_ntasks): List of tasks treated by this rank. Allocated by the routine. May be zero-sized.
SOURCE
2361 subroutine xmpi_split_cyclic(ntasks, comm, my_ntasks, my_inds) 2362 2363 !Arguments ------------------------------------ 2364 integer,intent(in) :: ntasks, comm 2365 integer,intent(out) :: my_ntasks 2366 integer,allocatable,intent(out) :: my_inds(:) 2367 2368 !Local variables------------------------------- 2369 integer :: ii, cnt, itask, my_rank, nprocs 2370 2371 ! ************************************************************************* 2372 2373 nprocs = xmpi_comm_size(comm); my_rank = xmpi_comm_rank(comm) 2374 2375 do ii=1,2 2376 if (ii == 2) then 2377 ABI_MALLOC(my_inds, (my_ntasks)) 2378 end if 2379 cnt = 0 2380 do itask=1,ntasks 2381 if (mod(itask, nprocs) == my_rank) then 2382 cnt = cnt + 1 2383 if (ii == 2) my_inds(cnt) = itask 2384 end if 2385 end do 2386 if (ii == 1) my_ntasks = cnt 2387 end do 2388 2389 end subroutine xmpi_split_cyclic
m_xmpi/xmpi_split_list [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_split_list
FUNCTION
Splits list of items inside communicator using block distribution. Used for the MPI parallelization of simple loops.
INPUTS
ntasks:Number of items in list (global) list(ntasks): List of indices comm: MPI communicator.
OUTPUT
my_ntasks: Number of tasks received by this rank. May be zero if ntasks > nprocs. my_inds(my_ntasks): List of tasks treated by this rank. Allocated by the routine. May be zero-sized.
SOURCE
2413 subroutine xmpi_split_list(ntasks, list, comm, my_ntasks, my_inds) 2414 2415 !Arguments ------------------------------------ 2416 integer,intent(in) :: ntasks, comm 2417 integer,intent(out) :: my_ntasks 2418 integer,intent(in) :: list(ntasks) 2419 integer,allocatable,intent(out) :: my_inds(:) 2420 2421 !Local variables------------------------------- 2422 integer :: my_start, my_stop 2423 2424 ! ************************************************************************* 2425 2426 call xmpi_split_work(ntasks, comm, my_start, my_stop) 2427 2428 my_ntasks = my_stop - my_start + 1 2429 2430 if (my_stop >= my_start) then 2431 ABI_MALLOC(my_inds, (my_ntasks)) 2432 my_inds = list(my_start:my_stop) 2433 else 2434 my_ntasks = 0 2435 ABI_MALLOC(my_inds, (0)) 2436 end if 2437 2438 end subroutine xmpi_split_list
m_xmpi/xmpi_split_work2_i4b [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_split_work2_i4b
FUNCTION
Splits a number of tasks, ntasks, among nprocs processors. The output arrays istart(1:nprocs) and istop(1:nprocs) report the starting and final task index for each CPU. Namely CPU with rank ii has to perform all the tasks between istart(ii+1) and istop(ii+1). Note the Fortran convention of using 1 as first index of the array. Note, moreover, that if a proc has rank > ntasks then: istart(rank+1)=ntasks+1 istop(rank+1)=ntask In this particular case, loops of the form do ii=istart(rank),istop(rank) ... end do are not executed. Moreover allocation such as foo(istart(rank):istop(rank)) will generate a zero-sized array
INPUTS
ntasks= number of tasks nprocs=Number of processors.
OUTPUT
istart(nprocs),istop(nprocs)= indices defining the initial and final task for each processor
SOURCE
2476 subroutine xmpi_split_work2_i4b(ntasks, nprocs, istart, istop) 2477 2478 !Arguments ------------------------------------ 2479 integer,intent(in) :: ntasks,nprocs 2480 integer,intent(inout) :: istart(nprocs), istop(nprocs) 2481 2482 !Local variables------------------------------- 2483 integer :: res,irank,block,block_tmp 2484 2485 ! ************************************************************************* 2486 2487 block_tmp = ntasks/nprocs 2488 res = MOD(ntasks,nprocs) 2489 block = block_tmp+1 2490 2491 do irank=0,nprocs-1 2492 if (irank<res) then 2493 istart(irank+1) = irank *block+1 2494 istop (irank+1) = (irank+1)*block 2495 else 2496 istart(irank+1) = res*block + (irank-res )*block_tmp+1 2497 istop (irank+1) = res*block + (irank-res+1)*block_tmp 2498 end if 2499 end do 2500 2501 end subroutine xmpi_split_work2_i4b
m_xmpi/xmpi_split_work2_i8b [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_split_work2_i8b
FUNCTION
Same as xmpi_split_work2_i8b but accepts 8 bytes integer.
INPUTS
ntasks= number of tasks nprocs=Number of processors.
OUTPUT
istart(nprocs),istop(nprocs)= indices defining the initial and final task for each processor
SOURCE
2522 subroutine xmpi_split_work2_i8b(ntasks,nprocs,istart,istop) 2523 2524 !Arguments ------------------------------------ 2525 integer,intent(in) :: nprocs 2526 integer(i8b),intent(in) :: ntasks 2527 integer(i8b),intent(inout) :: istart(nprocs),istop(nprocs) 2528 2529 !Local variables------------------------------- 2530 integer(i8b) :: res,irank,block,block_tmp 2531 2532 ! ************************************************************************* 2533 2534 block_tmp = ntasks/nprocs 2535 res = MOD(ntasks,INT(nprocs,KIND=i8b)) 2536 block = block_tmp+1 2537 2538 do irank=0,nprocs-1 2539 if (irank<res) then 2540 istart(irank+1)= irank *block+1 2541 istop (irank+1)=(irank+1)*block 2542 else 2543 istart(irank+1)=res*block+(irank-res )*block_tmp+1 2544 istop (irank+1)=res*block+(irank-res+1)*block_tmp 2545 end if 2546 end do 2547 2548 end subroutine xmpi_split_work2_i8b
m_xmpi/xmpi_split_work_i4b [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_split_work_i4b
FUNCTION
Splits the number of tasks, ntasks, among nprocs processors. Used for the MPI parallelization of simple loops.
INPUTS
ntasks=number of tasks comm=MPI communicator.
OUTPUT
my_start,my_stop= indices defining the initial and final task for this processor
NOTES
If nprocs > ntasks then: my_start = ntasks + 1 my_stop = ntask In this particular case, loops of the form do ii=my_start,my_stop ... end do are not executed. Moreover allocation such as foo(my_start:my_stop) will generate a zero-sized array.
SOURCE
2273 subroutine xmpi_split_work_i4b(ntasks, comm, my_start, my_stop) 2274 2275 !Arguments ------------------------------------ 2276 integer,intent(in) :: ntasks,comm 2277 integer,intent(out) :: my_start, my_stop 2278 2279 !Local variables------------------------------- 2280 integer :: res,nprocs,my_rank,block_p1,block 2281 2282 ! ************************************************************************* 2283 2284 nprocs = xmpi_comm_size(comm); my_rank = xmpi_comm_rank(comm) 2285 2286 block = ntasks / nprocs 2287 res = MOD(ntasks, nprocs) 2288 block_p1= block + 1 2289 2290 if (my_rank < res) then 2291 my_start = my_rank *block_p1+1 2292 my_stop = (my_rank+1)*block_p1 2293 else 2294 my_start = res*block_p1 + (my_rank-res )*block + 1 2295 my_stop = res*block_p1 + (my_rank-res+1)*block 2296 end if 2297 2298 end subroutine xmpi_split_work_i4b
m_xmpi/xmpi_subcomm [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_subcomm
FUNCTION
Return a sub-communicator from an input communicator and a given proc. ranks set. (hides subgroup creation/destruction)
INPUTS
comm=input communicator nrank=number of elements in array ranks (size of subcomm) ranks=ranks of processes in group to appear in subcomm
OUTPUT
[my_rank_in_group]=optional: my rank in the group of new sub-communicator xmpi_subcomm=new (sub-)communicator
SOURCE
1544 function xmpi_subcomm(comm,nranks,ranks,my_rank_in_group) 1545 1546 !Arguments------------------------- 1547 !scalars 1548 integer,intent(in) :: comm,nranks 1549 integer,intent(out),optional :: my_rank_in_group 1550 integer :: xmpi_subcomm 1551 !arrays 1552 integer,intent(in) :: ranks(nranks) 1553 1554 !Local variables------------------------------- 1555 #ifdef HAVE_MPI 1556 integer :: group,ierr,subgroup 1557 #endif 1558 1559 ! ************************************************************************* 1560 1561 xmpi_subcomm=xmpi_comm_null 1562 if (present(my_rank_in_group)) my_rank_in_group=xmpi_undefined 1563 1564 #ifdef HAVE_MPI 1565 if (comm/=xmpi_comm_null.and.nranks>=0) then 1566 call MPI_COMM_GROUP(comm,group,ierr) 1567 call MPI_GROUP_INCL(group,nranks,ranks,subgroup,ierr) 1568 call MPI_COMM_CREATE(comm,subgroup,xmpi_subcomm,ierr) 1569 if ( nranks == 0 )xmpi_subcomm=xmpi_comm_self 1570 if (present(my_rank_in_group)) then 1571 call MPI_Group_rank(subgroup,my_rank_in_group,ierr) 1572 end if 1573 call MPI_GROUP_FREE(subgroup,ierr) 1574 call MPI_GROUP_FREE(group,ierr) 1575 end if 1576 #else 1577 if (nranks>0) then 1578 if (ranks(1)==0) then 1579 xmpi_subcomm=xmpi_comm_self 1580 if (present(my_rank_in_group)) my_rank_in_group=0 1581 end if 1582 end if 1583 #endif 1584 1585 end function xmpi_subcomm
m_xmpi/xmpi_wait [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_wait
FUNCTION
Hides MPI_WAIT from MPI library. Waits for an MPI request to complete.
INPUTS
request= MPI request handle to wait for
OUTPUT
mpierr= status error
SOURCE
1979 subroutine xmpi_wait(request, mpierr) 1980 1981 !Arguments------------------------- 1982 integer,intent(inout) :: request 1983 integer,intent(out) :: mpierr 1984 1985 !Local variables------------------- 1986 #ifdef HAVE_MPI 1987 integer :: ier,status(MPI_STATUS_SIZE) 1988 #endif 1989 1990 ! ************************************************************************* 1991 1992 mpierr = 0 1993 #ifdef HAVE_MPI 1994 if (request /= xmpi_request_null) xmpi_count_requests = xmpi_count_requests - 1 1995 call MPI_WAIT(request,status,ier) 1996 mpierr=ier 1997 #endif 1998 1999 end subroutine xmpi_wait
m_xmpi/xmpi_waitall_1d [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_waitall_1d
FUNCTION
Hides MPI_WAITALL from MPI library. Waits for all given MPI Requests to complete.
INPUTS
array_of_requests= array of request handles
OUTPUT
mpierr= status error
SOURCE
2020 subroutine xmpi_waitall_1d(array_of_requests, mpierr) 2021 2022 !Arguments------------------------- 2023 integer,intent(inout) :: array_of_requests(:) 2024 integer,intent(out) :: mpierr 2025 2026 !Local variables------------------- 2027 #ifdef HAVE_MPI 2028 integer :: ier,status(MPI_STATUS_SIZE,size(array_of_requests)) 2029 #endif 2030 2031 ! ************************************************************************* 2032 2033 mpierr = 0 2034 #ifdef HAVE_MPI 2035 xmpi_count_requests = xmpi_count_requests - count(array_of_requests /= xmpi_request_null) 2036 call MPI_WAITALL(size(array_of_requests), array_of_requests, status, ier) 2037 mpierr=ier 2038 #endif 2039 2040 end subroutine xmpi_waitall_1d
m_xmpi/xmpi_waitall_2d [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpi_waitall_2d
FUNCTION
Hides MPI_WAITALL from MPI library. Waits for all given MPI Requests to complete.
INPUTS
array_of_requests= array of request handles
OUTPUT
mpierr= status error
SOURCE
2061 subroutine xmpi_waitall_2d(array_of_requests, mpierr) 2062 2063 !Arguments------------------------- 2064 integer,intent(inout) :: array_of_requests(:,:) 2065 integer,intent(out) :: mpierr 2066 2067 !Local variables------------------- 2068 integer :: flat_requests(product(shape(array_of_requests))) 2069 2070 ! ************************************************************************* 2071 2072 ! MPI_WAITALL is a Fortran interface so cannot pass count and base address a la C 2073 ! so flat 2d array and copy in-out. See https://github.com/open-mpi/ompi/issues/587 2074 flat_requests = pack(array_of_requests, mask=.True.) 2075 call xmpi_waitall_1d(flat_requests, mpierr) 2076 array_of_requests = reshape(flat_requests, shape(array_of_requests)) 2077 2078 end subroutine xmpi_waitall_2d
m_xmpi/xmpio_check_frmarkers [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpio_check_frmarkers
FUNCTION
Check a set of Fortran record markers starting at a given offset using MPI-IO.
INPUTS
fh=MPI-IO file handler. offset=MPI-IO file pointer sc_mode=Option for individual or collective reading. nfrec=Number of Fortran records to be checked. bsize_frecord(nfrec)=Byte size of the Fortran records (markers are NOT included) These values will be compared with the markers reported in the file.
OUTPUT
ierr=A non-zero error code signals failure.
SOURCE
3919 #ifdef HAVE_MPI_IO 3920 3921 subroutine xmpio_check_frmarkers(fh, offset, sc_mode, nfrec, bsize_frecord, ierr) 3922 3923 !Arguments ------------------------------------ 3924 !scalars 3925 integer,intent(in) :: fh,nfrec,sc_mode 3926 integer(XMPI_OFFSET_KIND),intent(in) :: offset 3927 integer,intent(out) :: ierr 3928 !arrays 3929 integer(XMPI_OFFSET_KIND),intent(in) :: bsize_frecord(nfrec) 3930 3931 !Local variables------------------------------- 3932 !scalars 3933 integer :: nb,irec,frmarkers_type,jj,bsize_frm,mpi_type_frm,mpierr,myfh 3934 integer(XMPI_OFFSET_KIND) :: displ 3935 !arrays 3936 integer(kind=int16),allocatable :: bufdelim2(:) 3937 integer(kind=int32),allocatable :: bufdelim4(:) 3938 integer(kind=int64),allocatable :: bufdelim8(:) 3939 #ifdef HAVE_FC_INT_QUAD 3940 integer*16,allocatable :: bufdelim16(:) 3941 #endif 3942 !integer :: statux(MPI_STATUS_SIZE) 3943 integer,allocatable :: block_length(:),block_type(:) 3944 integer(XMPI_ADDRESS_KIND),allocatable :: block_displ(:) 3945 integer(XMPI_OFFSET_KIND),allocatable :: delim_record(:) 3946 3947 !************************************************************************ 3948 3949 ! Workaround for XLF 3950 myfh = fh 3951 ierr=0 3952 3953 bsize_frm = xmpio_bsize_frm ! Byte size of the Fortran record marker. 3954 mpi_type_frm = xmpio_mpi_type_frm ! MPI type of the record marker. 3955 3956 ! Define the view for the file. 3957 nb=2*nfrec 3958 ABI_MALLOC(block_length,(nb+2)) 3959 ABI_MALLOC(block_displ,(nb+2)) 3960 ABI_MALLOC(block_type,(nb+2)) 3961 block_length(1)=1 3962 block_displ (1)=0 3963 block_type (1)=MPI_LB 3964 3965 jj=2; displ=0 3966 do irec=1,nfrec 3967 block_type (jj:jj+1) =mpi_type_frm 3968 block_length(jj:jj+1)=1 3969 block_displ(jj ) = displ 3970 block_displ(jj+1) = bsize_frm + displ + bsize_frecord(irec) 3971 jj=jj+2 3972 displ = displ + bsize_frecord(irec) + 2*bsize_frm ! Move to the beginning of the next column. 3973 if (xmpio_max_address(displ)) ierr=-1 ! Check for wraparound. 3974 end do 3975 3976 block_length(nb+2)=1 3977 block_displ (nb+2)=displ 3978 block_type (nb+2)=MPI_UB 3979 3980 call xmpio_type_struct(nb+2,block_length,block_displ,block_type,frmarkers_type,mpierr) 3981 ABI_FREE(block_length) 3982 ABI_FREE(block_displ) 3983 ABI_FREE(block_type) 3984 3985 call MPI_TYPE_COMMIT(frmarkers_type,mpierr) 3986 call MPI_FILE_SET_VIEW(myfh,offset,MPI_BYTE,frmarkers_type,"native",MPI_INFO_NULL,mpierr) 3987 3988 jj=1 3989 ABI_MALLOC(delim_record,(nb)) 3990 do irec=1,nfrec 3991 delim_record(jj:jj+1)=bsize_frecord(irec) 3992 jj=jj+2 3993 end do 3994 3995 ! Read markers according to the MPI type of the Fortran marker. 3996 SELECT CASE (bsize_frm) 3997 3998 CASE (4) 3999 ABI_MALLOC(bufdelim4,(nb)) 4000 if (sc_mode==xmpio_single) then 4001 call MPI_FILE_READ (myfh,bufdelim4,2*nfrec,mpi_type_frm,MPI_STATUS_IGNORE,mpierr) 4002 else if (sc_mode==xmpio_collective) then 4003 call MPI_FILE_READ_ALL(myfh,bufdelim4,2*nfrec,mpi_type_frm,MPI_STATUS_IGNORE,mpierr) 4004 else 4005 ierr=2 4006 end if 4007 if (ANY(bufdelim4/=delim_record)) ierr=1 4008 if (ierr==1) then 4009 do irec=1,2*nfrec 4010 write(std_out,*)"irec, bufdelim4, delim_record: ",irec,bufdelim4(irec),delim_record(irec) 4011 end do 4012 end if 4013 ABI_FREE(bufdelim4) 4014 4015 CASE (8) 4016 ABI_MALLOC(bufdelim8,(nb)) 4017 if (sc_mode==xmpio_single) then 4018 call MPI_FILE_READ (myfh,bufdelim8,2*nfrec,mpi_type_frm,MPI_STATUS_IGNORE,mpierr) 4019 else if (sc_mode==xmpio_collective) then 4020 call MPI_FILE_READ_ALL(myfh,bufdelim8,2*nfrec,mpi_type_frm,MPI_STATUS_IGNORE,mpierr) 4021 else 4022 ierr=2 4023 end if 4024 if (ANY(bufdelim8/=delim_record)) ierr=1 4025 ABI_FREE(bufdelim8) 4026 4027 #ifdef HAVE_FC_INT_QUAD 4028 CASE (16) 4029 ABI_MALLOC(bufdelim16,(nb)) 4030 if (sc_mode==xmpio_single) then 4031 call MPI_FILE_READ (myfh,bufdelim16,2*nfrec,mpi_type_frm,MPI_STATUS_IGNORE,mpierr) 4032 else if (sc_mode==xmpio_collective) then 4033 call MPI_FILE_READ_ALL(myfh,bufdelim16,2*nfrec,mpi_type_frm,MPI_STATUS_IGNORE,mpierr) 4034 else 4035 ierr=2 4036 end if 4037 if (ANY(bufdelim16/=delim_record)) ierr=1 4038 ABI_FREE(bufdelim16) 4039 #endif 4040 4041 CASE (2) 4042 ABI_MALLOC(bufdelim2,(nb)) 4043 if (sc_mode==xmpio_single) then 4044 call MPI_FILE_READ (myfh,bufdelim2,2*nfrec,mpi_type_frm,MPI_STATUS_IGNORE,mpierr) 4045 else if (sc_mode==xmpio_collective) then 4046 call MPI_FILE_READ_ALL(myfh,bufdelim2,2*nfrec,mpi_type_frm,MPI_STATUS_IGNORE,mpierr) 4047 else 4048 ierr=2 4049 end if 4050 if (ANY(bufdelim2/=delim_record)) ierr=1 4051 ABI_FREE(bufdelim2) 4052 4053 CASE DEFAULT 4054 ierr=-2 4055 END SELECT 4056 4057 ! Free memory 4058 call MPI_TYPE_FREE(frmarkers_type,mpierr) 4059 ABI_FREE(delim_record) 4060 4061 end subroutine xmpio_check_frmarkers
m_xmpi/xmpio_create_coldistr_from_fp3blocks [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpio_create_coldistr_from_fp3blocks
FUNCTION
Returns an MPI datatype that can be used to MPI-IO (read|write) the columns of a matrix of the form M = (S1 F3) (F3^H S2) where S1 and S2 are square (symmetric|Hermitian) matrices whose upper triangle is stored on file while F3 is a generic matrix (not necessarily square) stored in full mode. The Fortran file contains the blocks in the following order. upper(S1) upper(S2) F3
INPUTS
sizes(2)=Number of elements of type old_type in each dimension of the full array M (array of positive integers) my_cols(2)=initial and final column to (read|write). Array of positive integers, Fortran convention. block_sizes(2,3)=The sizes of S1, S2, F. old_type=MPI datatype of the elements of the matrix.
OUTPUT
new_type=New MPI type that can be used to instanciate the MPI-IO view for the Fortran file. my_offpad=Offset to be added to the file pointer giving the position of the first Fortran record marker (lets call it "base"). Each node should (read|write) using my_offset = base + my_offpad. my_offpad is used so that one can safely change the way the fileview is generated (for example to make it more efficient) without having to change the client code. offset_err=Error code. A non-zero returned value signals that the global matrix is tool large for a single MPI-IO access (see notes below).
NOTES
1) block_displ is given in bytes due to the presence of the marker. If the displacement of an element is too large, the routine returns offset_err=1 so that the caller knows that several MPI-IO reads are required to (read| write) the file.
SOURCE
4772 #ifdef HAVE_MPI_IO 4773 4774 subroutine xmpio_create_coldistr_from_fp3blocks(sizes,block_sizes,my_cols,old_type,new_type,my_offpad,offset_err) 4775 4776 !Arguments ------------------------------------ 4777 !scalars 4778 integer,intent(in) :: old_type 4779 integer,intent(out) :: new_type,offset_err 4780 integer(XMPI_OFFSET_KIND),intent(out) :: my_offpad 4781 !arrays 4782 integer,intent(in) :: sizes(2),my_cols(2),block_sizes(2,3) 4783 4784 !Local variables------------------------------- 4785 !scalars 4786 integer :: my_ncol,bsize_old,my_col,which_block,uplo,swap 4787 integer :: my_nels,my_el,row_glob,ii_hpk,jj_hpk,ii,jj 4788 integer :: col_glob,bsize_frm,mpierr,row_shift,col_shift,n1,n2 4789 integer(XMPI_OFFSET_KIND) :: my_offset,ijp,bsize_tot,max_displ,min_displ 4790 integer(XMPI_ADDRESS_KIND) :: address 4791 !arrays 4792 integer,allocatable :: block_length(:),block_type(:) 4793 integer(XMPI_ADDRESS_KIND),allocatable :: block_displ(:) 4794 integer(XMPI_OFFSET_KIND) :: bsize_mat(2) 4795 4796 !************************************************************************ 4797 4798 if (sizes(1) /= SUM(block_sizes(1,1:2)) .or. & 4799 sizes(2) /= SUM(block_sizes(2,1:2)) ) then 4800 write(std_out,*)" xmpio_create_coldistr_from_fp3blocks: Inconsistency between block_sizes ans sizes " 4801 call xmpi_abort() 4802 end if 4803 4804 if (block_sizes(1,1) /= block_sizes(2,1) .or.& 4805 block_sizes(1,2) /= block_sizes(2,2) ) then 4806 write(std_out,*)" xmpio_create_coldistr_from_fp3blocks: first two blocks must be square" 4807 call xmpi_abort() 4808 end if 4809 4810 if (block_sizes(2,3) /= block_sizes(2,2) .or.& 4811 block_sizes(1,3) /= block_sizes(1,1) ) then 4812 write(std_out,*)" xmpio_create_coldistr_from_fp3blocks: Full matrix must be square" 4813 call xmpi_abort() 4814 end if 4815 4816 write(std_out,*)" xmpio_create_coldistr_from_fp3blocks is still under testing" 4817 !call xmpi_abort() 4818 4819 ! Byte size of the Fortran record marker. 4820 bsize_frm = xmpio_bsize_frm 4821 4822 ! Byte size of old_type. 4823 call MPI_TYPE_SIZE(old_type,bsize_old,mpierr) 4824 4825 ! my number of columns and total numer of elements to be read. 4826 my_ncol = my_cols(2) - my_cols(1) + 1 4827 my_nels = sizes(1)*my_ncol 4828 ! 4829 ! block_displ(el+1) stores the displacement of the local element el taking into account the Fortran marker. 4830 ABI_MALLOC(block_displ,(my_nels+2)) 4831 ABI_MALLOC(block_length,(my_nels+2)) 4832 ABI_MALLOC(block_type,(my_nels+2)) 4833 ! 4834 ! * the view assumes that the file pointer used to instanciate the MPI-IO view 4835 ! points to the first element of the first column. In other words,the first Fortran record marker 4836 ! is not taken into account in the calculation of the displacements. 4837 my_offpad=xmpio_bsize_frm 4838 ! 4839 ! Byte size of the first two blocks including the markers. 4840 n1=block_sizes(1,1) 4841 bsize_mat(1) = (n1*(n1+1)/2)*bsize_old + 2*n1*bsize_frm 4842 4843 n2=block_sizes(1,2) 4844 bsize_mat(2) = (n2*(n2+1)/2)*bsize_old + 2*n2*bsize_frm 4845 4846 bsize_tot=SUM(bsize_mat) + PRODUCT(block_sizes(:,3))*bsize_old + block_sizes(2,3)*2*bsize_frm - bsize_frm 4847 write(std_out,*)"bsize_mat",bsize_mat,"bsize_tot",bsize_tot 4848 ! 4849 ! * Some matrix elements are read twice. This part has to be tested. 4850 offset_err=0; my_el=0; max_displ=0; min_displ=HUGE(address) 4851 do my_col=1,my_ncol 4852 col_glob = (my_col-1) + my_cols(1) 4853 do row_glob=1,sizes(1) 4854 ! 4855 which_block=3 4856 if (row_glob<=block_sizes(1,1).and.col_glob<=block_sizes(2,1)) which_block=1 4857 if (row_glob >block_sizes(1,1).and.col_glob >block_sizes(2,1)) which_block=2 4858 4859 if ( ANY(which_block == (/1,2/)) ) then ! S1 or S2 4860 ! 4861 row_shift=(which_block-1)*block_sizes(1,1) 4862 col_shift=(which_block-1)*block_sizes(2,1) 4863 4864 ii_hpk = row_glob - row_shift 4865 jj_hpk = col_glob - col_shift 4866 if (jj_hpk<ii_hpk) then ! Exchange the indices so that the symmetric is read. 4867 swap = jj_hpk 4868 jj_hpk = ii_hpk 4869 ii_hpk = swap 4870 end if 4871 ijp = ii_hpk + jj_hpk*(jj_hpk-1)/2 ! Index for packed form 4872 my_offset = (ijp-1)*bsize_old + (jj_hpk-1)*2*bsize_frm 4873 if (which_block==2) my_offset=my_offset+bsize_mat(1) ! Shift the offset to account for S1. 4874 !my_offset=4 4875 ! 4876 else 4877 ! The element belongs either to F3 of F3^H. 4878 ! Now find whether it is the upper or the lower block since only F3 is stored on file. 4879 uplo=1; if (row_glob>block_sizes(1,1)) uplo=2 4880 4881 if (uplo==1) then 4882 row_shift=0 4883 col_shift=block_sizes(2,1) 4884 else 4885 row_shift=block_sizes(1,1) 4886 col_shift=0 4887 end if 4888 ii = row_glob - row_shift 4889 jj = col_glob - col_shift 4890 4891 if (uplo==2) then ! Exchange the indices since the symmetric element will be read. 4892 swap=jj 4893 jj =ii 4894 ii =swap 4895 end if 4896 4897 my_offset = (ii-1)*bsize_old + (jj-1)*block_sizes(1,3)*bsize_old + (jj-1)*2*bsize_frm 4898 my_offset = my_offset + SUM(bsize_mat) 4899 !if (uplo==1) my_offset=my_offset + bsize_mat(1) 4900 !my_offset=0 4901 !if (ii==1.and.jj==1) write(std_out,*)" (1,1) offset = ",my_offset 4902 !if (ii==block_sizes(1,3).and.jj==block_sizes(2,3)) write(std_out,*)" (n,n) offset =", my_offset 4903 if (my_offset>=bsize_tot-1*bsize_old) then 4904 write(std_out,*)"WARNING (my_offset>bsize_tot-bsize_old),",ii,jj,my_offset,bsize_tot 4905 end if 4906 end if 4907 4908 if (xmpio_max_address(my_offset)) offset_err=1 ! Check for wraparounds. 4909 my_el = my_el+1 4910 block_displ (my_el+1)=my_offset 4911 block_length(my_el+1)=1 4912 block_type (my_el+1)=old_type 4913 max_displ = MAX(max_displ,my_offset) 4914 min_displ = MIN(min_displ,my_offset) 4915 !if (which_block==3) write(std_out,*)" my_el, which, displ: ",my_el,which_block,block_displ(my_el+1) 4916 end do 4917 end do 4918 4919 write(std_out,*)" MAX displ = ",max_displ," my_nels = ",my_nels 4920 write(std_out,*)" MIN displ = ",MINVAL(block_displ(2:my_nels+1)) 4921 4922 !block_displ (1)=max_displ ! Do not change this value. 4923 !if (min_displ>0) block_displ (1)=min_displ ! Do not change this value. 4924 4925 block_displ (1)=min_displ 4926 block_displ (1)=0 4927 block_length(1)=0 4928 block_type (1)=MPI_LB 4929 4930 block_length(my_nels+2)=0 4931 !block_displ (my_nels+2)=bsize_tot 4932 block_displ (my_nels+2)=max_displ 4933 block_type (my_nels+2)=MPI_UB 4934 4935 call xmpio_type_struct(my_nels+2,block_length,block_displ,block_type,new_type,mpierr) 4936 !call xmpio_type_struct(my_nels,block_length(2:),block_displ(2:),block_type(2:),new_type,mpierr) 4937 4938 !call MPI_TYPE_CREATE_INDEXED_BLOCK(my_nels, block_length(2:), block_displ(2:), old_type, new_type, mpierr) 4939 4940 call MPI_TYPE_COMMIT(new_type,mpierr) 4941 4942 ABI_FREE(block_length) 4943 ABI_FREE(block_displ) 4944 ABI_FREE(block_type) 4945 4946 end subroutine xmpio_create_coldistr_from_fp3blocks
m_xmpi/xmpio_create_coldistr_from_fpacked [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpio_create_coldistr_from_fpacked
FUNCTION
Returns an MPI datatype that can be used to MPI-IO (read|write) the columns of an (Hermitian|Symmetric) matrix whose upper triangle is written on a Fortran binary file. Note that the view assumes that the file pointer used to instanciate the MPI-IO view points to the first element of the first column. In other words,the first Fortran record marker (if any) is not taken into account in the calculation of the displacements.
INPUTS
sizes(2)=Number of elements of type old_type in each dimension of the full array (array of positive integers) my_cols(2)=initial and final column to (read|write). Array of positive integers, Fortran convention. old_type=MPI datatype of the elements of the matrix.
OUTPUT
new_type=New MPI type that can be used to instanciate the MPI-IO view for the Fortran file. my_offpad=Offset to be added to the file pointer giving the position of the first Fortran record marker (lets call it "base"). Each node should (read|write) using my_offset = base + my_offpad. my_offpad is used so that one can safely change the way the fileview is generated (for example to make it more efficient) without having to change the client code. offset_err=Error code. A non-zero returned value signals that the global matrix is tool large for a single MPI-IO access (see notes below).
NOTES
1) The matrix on file is written in the following FORTRAN format (let us assume a 3x3 matrix for simplicity) m (1,1) m m (1,2) (2,2) m m (1,3) (2,3) (3,3) m each Fortran record stores a column of the packed matrix, "m" denotes the Fortran record marker that introduces holes in the file view. 2) With (signed) Fortran integers, the maximum size of the file that that can be read in one-shot is around 2Gb when etype is set to byte. Using a larger etype might create portability problems (real data on machines using integer*16 for the marker) since etype must be a multiple of the Fortran record marker Due to the above reason, block_displ is given in bytes but it has to be defined as Fortran integer. If the displacement cannot be stored in a Fortran integer, the routine returns offset_err=1 so that the caller will know that several MPI-IO reads are nedded to read the file.
SOURCE
4646 #ifdef HAVE_MPI_IO 4647 4648 subroutine xmpio_create_coldistr_from_fpacked(sizes,my_cols,old_type,new_type,my_offpad,offset_err) 4649 4650 !Arguments ------------------------------------ 4651 !scalars 4652 integer,intent(in) :: old_type 4653 integer,intent(out) :: new_type,offset_err 4654 integer(XMPI_OFFSET_KIND),intent(out) :: my_offpad 4655 !arrays 4656 integer,intent(in) :: sizes(2),my_cols(2) 4657 4658 !Local variables------------------------------- 4659 !scalars 4660 integer :: my_ncol,bsize_old,my_col 4661 integer :: my_nels,my_el,row_glob,ii_hpk,jj_hpk,col_glob,bsize_frm,mpierr 4662 integer(XMPI_OFFSET_KIND) :: my_offset,ijp_glob 4663 !character(len=500) :: msg 4664 !arrays 4665 integer,allocatable :: block_length(:),block_type(:) 4666 integer(XMPI_ADDRESS_KIND),allocatable :: block_displ(:) 4667 4668 !************************************************************************ 4669 4670 ! Byte size of the Fortran record marker. 4671 bsize_frm = xmpio_bsize_frm 4672 4673 ! Byte size of old_type. 4674 call MPI_TYPE_SIZE(old_type,bsize_old,mpierr) 4675 4676 ! my number of columns and total numer of elements to be read. 4677 my_ncol = my_cols(2) - my_cols(1) + 1 4678 my_nels = my_ncol*sizes(1) 4679 ! 4680 ! block_displ(el+1) stores the displacement of the local element el taking into account the Fortran marker. 4681 ABI_MALLOC(block_displ,(my_nels+2)) 4682 ABI_MALLOC(block_length,(my_nels+2)) 4683 ABI_MALLOC(block_type,(my_nels+2)) 4684 4685 block_length(1)=1 4686 block_displ (1)=0 4687 block_type (1)=MPI_LB 4688 ! 4689 ! * the view assumes that the file pointer used to instanciate the MPI-IO view 4690 ! points to the first element of the first column. In other words,the first Fortran record marker 4691 ! is not taken into account in the calculation of the displacements. 4692 my_offpad=xmpio_bsize_frm 4693 4694 ! * Some matrix elements are read twice. This part has to be tested. 4695 offset_err=0; my_el=0 4696 do my_col=1,my_ncol 4697 col_glob = (my_col-1) + my_cols(1) 4698 do row_glob=1,sizes(1) 4699 if (col_glob>=row_glob) then 4700 ii_hpk = row_glob 4701 jj_hpk = col_glob 4702 ijp_glob = row_glob + col_glob*(col_glob-1)/2 ! Index for packed form 4703 else ! Exchange the indices as (jj,ii) will be read. 4704 ii_hpk = col_glob 4705 jj_hpk = row_glob 4706 ijp_glob = col_glob + row_glob*(row_glob-1)/2 ! Index for packed form 4707 end if 4708 my_el = my_el+1 4709 my_offset = (ijp_glob-1)* bsize_old + (jj_hpk-1)*2*bsize_frm 4710 if (xmpio_max_address(my_offset)) offset_err=1 ! Check for wraparounds. 4711 block_displ (my_el+1)=my_offset 4712 block_length(my_el+1)=1 4713 block_type (my_el+1)=old_type 4714 !write(std_out,*)" my_el, displ: ",my_el,block_displ(my_el+1) 4715 end do 4716 end do 4717 4718 block_length(my_nels+2)=1 4719 block_displ (my_nels+2)=my_offset 4720 block_type (my_nels+2)=MPI_UB 4721 4722 call xmpio_type_struct(my_nels+2,block_length,block_displ,block_type,new_type,mpierr) 4723 4724 call MPI_TYPE_COMMIT(new_type,mpierr) 4725 4726 ABI_FREE(block_length) 4727 ABI_FREE(block_displ) 4728 ABI_FREE(block_type) 4729 4730 end subroutine xmpio_create_coldistr_from_fpacked
m_xmpi/xmpio_create_fherm_packed [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpio_create_fherm_packed
FUNCTION
Returns an MPI datatype that can be used to (read|write) with MPI-IO the columns of an Hermitian matrix whose upper triangle is written on a Fortran binary file. Note that the view assumes that the file pointer used to create the MPI-IO view points to the first element of the first column. In other words,the first Fortran record marker (if any) is not taken into account in the calculation of the displacements.
INPUTS
array_of_starts(2)=starting coordinates in the global Hermitian matrix (array of positive integers with jj>=ii, Fortran convention) array_of_ends(2)=final coordinates in the global Hermitian matrix (array of positive integers, jj>=ii, Fortran convention) is_fortran_file=.FALSE. is C stream is used. .TRUE. for writing Fortran binary files. old_type=MPI datatype of the elements of the matrix.
OUTPUT
my_offset=Offset relative to the beginning of the matrix in the file. hmat_type=New MPI type. offset_err= error code
NOTES
The matrix on file is written in the following FORTRAN format (let us assume a 3x3 matrix for simplicity) m (1,1) m m (1,2) (2,2) m m (1,3) (2,3) (3,3) m each Fortran record stores a column of the packed Hermitian matrix, "m" denotes the Fortran record marker that introduces holes in the MPI-IO file view. To read the columns from (1,2) up to (2,2) one should use array_of_starts=(1,2) and array_of_ends=(2,2). The MPI-IO file view should be created by moving the file pointer so that it points to the elements (1,2). File views for C-streams is not optimal since one can use a single slice of contigous data.
SOURCE
4498 #ifdef HAVE_MPI_IO 4499 4500 subroutine xmpio_create_fherm_packed(array_of_starts,array_of_ends,is_fortran_file,my_offset,old_type,hmat_type,offset_err) 4501 4502 !Arguments ------------------------------------ 4503 !scalars 4504 integer,intent(in) :: old_type 4505 integer,intent(out) :: offset_err,hmat_type 4506 integer(XMPI_OFFSET_KIND),intent(out) :: my_offset 4507 logical,intent(in) :: is_fortran_file 4508 !arrays 4509 integer,intent(in) :: array_of_starts(2),array_of_ends(2) 4510 4511 !Local variables------------------------------- 4512 !scalars 4513 integer :: nrow,my_ncol,ii,bsize_old,col,jj_glob,bsize_frm,prev_col,mpierr 4514 integer(XMPI_OFFSET_KIND) :: col_displ 4515 !arrays 4516 integer,allocatable :: col_type(:),block_length(:),block_type(:) 4517 integer(XMPI_ADDRESS_KIND),allocatable :: block_displ(:) 4518 4519 !************************************************************************ 4520 4521 offset_err=0 4522 4523 ! Byte size of old_type. 4524 call MPI_TYPE_SIZE(old_type,bsize_old,mpierr) 4525 4526 bsize_frm=0; if (is_fortran_file) bsize_frm = xmpio_bsize_frm 4527 4528 my_ncol = array_of_ends(2) - array_of_starts(2) + 1 4529 ! 4530 ! Calculate my offset relative to the beginning of the matrix in the file. 4531 prev_col = array_of_starts(2)-1 4532 my_offset = (prev_col*(prev_col+1)/2)*bsize_old + (array_of_starts(1)-1)*bsize_old + 2*prev_col*bsize_frm + bsize_frm 4533 ! 4534 ! col_type(col) describes the col-th column of the packed matrix. 4535 ! block_displ(col+1) stores its displacement taking into account the Fortran marker. 4536 ABI_MALLOC(col_type,(my_ncol)) 4537 ABI_MALLOC(block_displ,(my_ncol+2)) 4538 4539 if (my_ncol>1) then 4540 col_displ=0 4541 do col=1,my_ncol 4542 jj_glob = (col-1) + array_of_starts(2) 4543 nrow = jj_glob 4544 if (jj_glob==array_of_starts(2)) nrow = jj_glob - array_of_starts(1) + 1 ! First column treated by me. 4545 if (jj_glob==array_of_ends(2)) nrow = array_of_ends(1) ! Last column treated by me. 4546 call MPI_Type_contiguous(nrow,old_type,col_type(col),mpierr) 4547 ! 4548 if (xmpio_max_address(col_displ)) offset_err=1 ! Test for wraparounds 4549 block_displ(col+1) = col_displ 4550 col_displ = col_displ + nrow * bsize_old + 2 * bsize_frm ! Move to the next column. 4551 end do 4552 4553 else if (my_ncol==1) then ! The case of a single column is treated separately. 4554 block_displ(2) = 0 4555 nrow = array_of_ends(1) - array_of_starts(1) + 1 4556 call MPI_Type_contiguous(nrow,old_type,col_type(2),mpierr) 4557 col_displ= nrow*bsize_old 4558 if (xmpio_max_address(col_displ)) offset_err=1 ! Test for wraparounds 4559 else 4560 call xmpi_abort(msg="my_ncol cannot be negative!") 4561 end if 4562 4563 ABI_MALLOC(block_length,(my_ncol+2)) 4564 ABI_MALLOC(block_type,(my_ncol+2)) 4565 4566 block_length(1)=1 4567 block_displ (1)=0 4568 block_type (1)=MPI_LB 4569 4570 do ii=2,my_ncol+1 4571 block_length(ii)=1 4572 block_type(ii) =col_type(ii-1) 4573 !write(std_out,*)" ii-1, depl, length, type: ",ii-1,block_displ(ii),block_length(ii),block_type(ii) 4574 end do 4575 4576 block_length(my_ncol+2)= 1 4577 block_displ (my_ncol+2)= col_displ 4578 block_type (my_ncol+2)= MPI_UB 4579 4580 call xmpio_type_struct(my_ncol+2,block_length,block_displ,block_type,hmat_type,mpierr) 4581 4582 call MPI_TYPE_COMMIT(hmat_type,mpierr) 4583 4584 ABI_FREE(block_length) 4585 ABI_FREE(block_displ) 4586 ABI_FREE(block_type) 4587 4588 do col=1,my_ncol 4589 call MPI_TYPE_FREE(col_type(col),mpierr) 4590 end do 4591 4592 ABI_FREE(col_type) 4593 4594 end subroutine xmpio_create_fherm_packed
m_xmpi/xmpio_create_fstripes [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpio_create_fstripes
FUNCTION
Return a MPI type that can be used to (read|write) a set of interleaved Fortran records. <FRM> type(1), type(1), ... <FRM> ! size(1) elements <FRM> type(2), type(2), ... <FRM> ! size(2) elements <FRM> type(1), type(1), ... <FRM> ! size(1) elements ....
INPUTS
ncount = Number of records with elements of type types(1) to (read|write) sizes(1:2) = Number of elements of each type in the two sets of record type(1:2) = MPI Type of the elements in the first and in the second record.
OUTPUT
my_offpad=Offset to be added to the file pointer giving the position of the first Fortran record marker individuating the beginning of the matrix. (lets call it "base"). Each node should (read|write) using my_offset = base + my_offpad. my_offpad is used so that one can safely change the way the fileview is generated (for example to make it more efficient) without having to change the client code. new_type=New MPI type. mpierr= MPI error code
SOURCE
3546 #ifdef HAVE_MPI_IO 3547 3548 subroutine xmpio_create_fstripes(ncount, sizes, types, new_type, my_offpad, mpierr) 3549 3550 !Arguments ------------------------------------ 3551 !scalars 3552 integer,intent(in) :: ncount 3553 integer(XMPI_OFFSET_KIND),intent(out) :: my_offpad 3554 integer,intent(out) :: new_type,mpierr 3555 !arrays 3556 integer,intent(in) :: types(2),sizes(2) 3557 3558 !Local variables------------------------------- 3559 !scalars 3560 integer :: type_x,type_y,bsize_frm,bsize_x,bsize_y,nx,ny,column_type 3561 integer(MPI_ADDRESS_KIND) :: stride 3562 3563 !************************************************************************ 3564 3565 ! Byte size of the Fortran record marker. 3566 bsize_frm = xmpio_bsize_frm 3567 3568 ! Number of elements in the two stripes. 3569 nx = sizes(1) 3570 ny = sizes(2) 3571 3572 type_x = types(1) 3573 type_y = types(2) 3574 3575 ! Byte size of type_x and type_y 3576 call MPI_TYPE_SIZE(type_x,bsize_x,mpierr) 3577 ABI_HANDLE_MPIERR(mpierr) 3578 3579 call MPI_TYPE_SIZE(type_y,bsize_y,mpierr) 3580 ABI_HANDLE_MPIERR(mpierr) 3581 3582 ! The view starts at the first element of the first stripe. 3583 my_offpad = xmpio_bsize_frm 3584 3585 call MPI_Type_contiguous(nx,type_x,column_type,mpierr) 3586 ABI_HANDLE_MPIERR(mpierr) 3587 3588 ! Byte size of the Fortran record + the two markers. 3589 stride = nx*bsize_x + 2*bsize_frm + ny*bsize_y + 2*bsize_frm 3590 3591 ! ncount colum_type separated by stride bytes 3592 if (ncount>0) then 3593 call MPI_Type_create_hvector(ncount,1,stride,column_type,new_type,mpierr) 3594 else 3595 call MPI_Type_create_hvector(1,1,stride,column_type,new_type,mpierr) 3596 end if 3597 ABI_HANDLE_MPIERR(mpierr) 3598 3599 call MPI_TYPE_COMMIT(new_type,mpierr) 3600 ABI_HANDLE_MPIERR(mpierr) 3601 3602 call MPI_TYPE_FREE(column_type,mpierr) 3603 ABI_HANDLE_MPIERR(mpierr) 3604 3605 end subroutine xmpio_create_fstripes
m_xmpi/xmpio_create_fsubarray_2D [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpio_create_fsubarray_2D
FUNCTION
Return a MPI type that can be used to (read|write) a 2D matrix of elements of type old_type stored in a Fortran file.
INPUTS
sizes(2)=number of elements of type old_type in each dimension of the full array (array of positive integers) subsizes(2)=number of elements of type old_type in each dimension of the subarray (array of positive integers) array_of_starts(2)=starting coordinates of the subarray in each dimension (array of nonnegative integers >=1, <=sizes) old_type=Old MPI type.
OUTPUT
my_offpad=Offset to be added to the file pointer giving the position of the first Fortran record marker individuating the beginning of the matrix. (lets call it "base"). Each node should (read|write) using my_offset = base + my_offpad. my_offpad is used so that one can safely change the way the fileview is generated (for example to make it more efficient) without having to change the client code. new_type=New MPI type. mpierr= MPI error code
SOURCE
3635 #ifdef HAVE_MPI_IO 3636 3637 subroutine xmpio_create_fsubarray_2D(sizes, subsizes, array_of_starts, old_type, new_type, my_offpad, mpierr) 3638 3639 !Arguments ------------------------------------ 3640 !scalars 3641 integer,intent(in) :: old_type 3642 integer(XMPI_OFFSET_KIND),intent(out) :: my_offpad 3643 integer,intent(out) :: mpierr,new_type 3644 !arrays 3645 integer,intent(in) :: sizes(2),subsizes(2),array_of_starts(2) 3646 !Local variables------------------------------- 3647 !scalars 3648 integer :: bsize_frm,bsize_old,nx,ny,column_type,ldx 3649 integer(XMPI_OFFSET_KIND) :: st_x,st_y 3650 integer(MPI_ADDRESS_KIND) :: stride_x 3651 !character(len=500) :: msg 3652 3653 !************************************************************************ 3654 3655 ! Byte size of the Fortran record marker. 3656 bsize_frm = xmpio_bsize_frm 3657 3658 ! Byte size of old_type. 3659 call MPI_TYPE_SIZE(old_type,bsize_old,mpierr) 3660 ABI_HANDLE_MPIERR(mpierr) 3661 ! 3662 ! Number of columns and rows of the submatrix. 3663 nx = subsizes(1) 3664 ny = subsizes(2) 3665 3666 ldx = sizes(1) 3667 st_x = array_of_starts(1) 3668 st_y = array_of_starts(2) 3669 3670 ! The view starts at the first element of the submatrix. 3671 my_offpad = (st_x-1)*bsize_old + (st_y-1)*(ldx*bsize_old+2*xmpio_bsize_frm) + xmpio_bsize_frm 3672 3673 ! Byte size of the Fortran record + the two markers. 3674 stride_x = ldx*bsize_old + 2*bsize_frm 3675 3676 call MPI_Type_contiguous(nx,old_type,column_type,mpierr) 3677 ABI_HANDLE_MPIERR(mpierr) 3678 3679 call MPI_Type_create_hvector(ny,1,stride_x,column_type,new_type,mpierr) 3680 ABI_HANDLE_MPIERR(mpierr) 3681 3682 call MPI_TYPE_COMMIT(new_type,mpierr) 3683 ABI_HANDLE_MPIERR(mpierr) 3684 3685 call MPI_TYPE_FREE(column_type, mpierr) 3686 ABI_HANDLE_MPIERR(mpierr) 3687 3688 end subroutine xmpio_create_fsubarray_2D
m_xmpi/xmpio_create_fsubarray_3D [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpio_create_fsubarray_3D
FUNCTION
Return a MPI type that can be used to (read|write) a 3D matrix of elements of type old_type stored in a Fortran file.
INPUTS
sizes(3)=number of elements of type old_type in each dimension of the full array (array of positive integers) subsizes(3)=number of elements of type old_type in each dimension of the subarray (array of positive integers) array_of_starts(3)=starting coordinates of the subarray in each dimension (array of nonnegative integers >=1, <=sizes) old_type=Old MPI type.
OUTPUT
my_offpad=Offset to be added to the file pointer giving the position of the first Fortran record marker individuating the beginning of the matrix. (lets call it "base"). Each node should (read|write) using my_offset = base + my_offpad. my_offpad is used so that one can safely change the way the fileview is generated (for example to make it more efficient) without having to change the client code. new_type=New MPI type. mpierr= MPI error code
SOURCE
3718 #ifdef HAVE_MPI_IO 3719 3720 subroutine xmpio_create_fsubarray_3D(sizes, subsizes, array_of_starts, old_type, new_type, my_offpad, mpierr) 3721 3722 !Arguments ------------------------------------ 3723 !scalars 3724 integer,intent(in) :: old_type 3725 integer,intent(out) :: mpierr,new_type 3726 integer(XMPI_OFFSET_KIND),intent(out) :: my_offpad 3727 !arrays 3728 integer,intent(in) :: sizes(3),subsizes(3),array_of_starts(3) 3729 !Local variables------------------------------- 3730 !scalars 3731 integer :: bsize_frm,bsize_old,nx,ny,nz 3732 integer :: column_type,plane_type,ldx,ldy,ldz 3733 integer(XMPI_OFFSET_KIND) :: st_x,st_y,st_z 3734 integer(MPI_ADDRESS_KIND) :: stride_x 3735 !character(len=500) :: msg 3736 3737 !************************************************************************ 3738 3739 bsize_frm = xmpio_bsize_frm ! Byte size of the Fortran record marker. 3740 3741 ! Byte size of old_type. 3742 call MPI_TYPE_SIZE(old_type,bsize_old,mpierr) 3743 ABI_HANDLE_MPIERR(mpierr) 3744 ! 3745 ! Number of columns and rows of the submatrix. 3746 nx = subsizes(1) 3747 ny = subsizes(2) 3748 nz = subsizes(3) 3749 3750 ldx = sizes(1) 3751 ldy = sizes(2) 3752 ldz = sizes(3) 3753 3754 st_x = array_of_starts(1) 3755 st_y = array_of_starts(2) 3756 st_z = array_of_starts(3) 3757 3758 ! The view starts at the first element of the submatrix. 3759 my_offpad = (st_x-1)*bsize_old + & 3760 (st_y-1)* (ldx*bsize_old+2*xmpio_bsize_frm) + & 3761 (st_z-1)*ldy*(ldx*bsize_old+2*xmpio_bsize_frm) + & 3762 xmpio_bsize_frm 3763 3764 ! Byte size of the Fortran record + the two markers. 3765 stride_x = ldx*bsize_old + 2*bsize_frm 3766 3767 call MPI_Type_contiguous(nx,old_type,column_type,mpierr) 3768 ABI_HANDLE_MPIERR(mpierr) 3769 3770 call MPI_Type_create_hvector(ny,1,stride_x,column_type,plane_type,mpierr) 3771 ABI_HANDLE_MPIERR(mpierr) 3772 3773 call MPI_Type_create_hvector(nz,1,ldy*stride_x,plane_type,new_type,mpierr) 3774 ABI_HANDLE_MPIERR(mpierr) 3775 3776 ! Commit the datatype 3777 call MPI_TYPE_COMMIT(new_type,mpierr) 3778 ABI_HANDLE_MPIERR(mpierr) 3779 3780 ! Free memory 3781 call MPI_TYPE_FREE(plane_type, mpierr) 3782 ABI_HANDLE_MPIERR(mpierr) 3783 3784 end subroutine xmpio_create_fsubarray_3D
m_xmpi/xmpio_create_fsubarray_4D [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpio_create_fsubarray_4D
FUNCTION
Return a MPI type that can be used to (read|write) a 2D matrix of elements of type old_type stored in a Fortran file.
INPUTS
sizes(4)=number of elements of type old_type in each dimension of the full array (array of positive integers) subsizes(4)=number of elements of type old_type in each dimension of the subarray (array of positive integers) array_of_starts(4)=starting coordinates of the subarray in each dimension (array of nonnegative integers >=1, <=sizes) old_type=Old MPI type.
OUTPUT
my_offpad=Offset to be added to the file pointer giving the position of the first Fortran record marker individuating the beginning of the matrix. (lets call it "base"). Each node should (read|write) using my_offset = base + my_offpad. my_offpad is used so that one can safely change the way the fileview is generated (for example to make it more efficient) without having to change the client code. new_type=New MPI type. mpierr= MPI error code
SOURCE
3814 #ifdef HAVE_MPI_IO 3815 3816 subroutine xmpio_create_fsubarray_4D(sizes, subsizes, array_of_starts, old_type, new_type, my_offpad, mpierr) 3817 3818 !Arguments ------------------------------------ 3819 !scalars 3820 integer,intent(in) :: old_type 3821 integer,intent(out) :: mpierr,new_type 3822 integer(XMPI_OFFSET_KIND),intent(out) :: my_offpad 3823 !arrays 3824 integer,intent(in) :: sizes(4),subsizes(4),array_of_starts(4) 3825 3826 !Local variables------------------------------- 3827 !scalars 3828 integer :: bsize_frm,bsize_old,nx,ny,nz,na 3829 integer :: column_type,plane_type,ldx,ldy,ldz,lda,vol_type 3830 integer(XMPI_OFFSET_KIND) :: st_x,st_y,st_z,st_a 3831 integer(MPI_ADDRESS_KIND) :: stride_x 3832 3833 !************************************************************************ 3834 3835 bsize_frm = xmpio_bsize_frm ! Byte size of the Fortran record marker. 3836 3837 ! Byte size of old_type. 3838 call MPI_TYPE_SIZE(old_type,bsize_old,mpierr) 3839 ABI_HANDLE_MPIERR(mpierr) 3840 ! 3841 ! Number of columns and rows of the submatrix. 3842 nx = subsizes(1) 3843 ny = subsizes(2) 3844 nz = subsizes(3) 3845 na = subsizes(4) 3846 3847 ldx = sizes(1) 3848 ldy = sizes(2) 3849 ldz = sizes(3) 3850 lda = sizes(4) 3851 3852 st_x = array_of_starts(1) 3853 st_y = array_of_starts(2) 3854 st_z = array_of_starts(3) 3855 st_a = array_of_starts(4) 3856 3857 ! The view starts at the first element of the submatrix. 3858 my_offpad = (st_x-1)*bsize_old + & 3859 (st_y-1)* (ldx*bsize_old+2*xmpio_bsize_frm) + & 3860 (st_z-1)*ldy* (ldx*bsize_old+2*xmpio_bsize_frm) + & 3861 (st_a-1)*lda*ldy*(ldx*bsize_old+2*xmpio_bsize_frm) + & 3862 xmpio_bsize_frm 3863 3864 ! Byte size of the Fortran record + the two markers. 3865 stride_x = ldx*bsize_old + 2*bsize_frm 3866 3867 call MPI_Type_contiguous(nx,old_type,column_type,mpierr) 3868 ABI_HANDLE_MPIERR(mpierr) 3869 3870 call MPI_Type_create_hvector(ny,1,stride_x,column_type,plane_type,mpierr) 3871 ABI_HANDLE_MPIERR(mpierr) 3872 3873 call MPI_Type_create_hvector(nz,1,ldy*stride_x,plane_type,vol_type,mpierr) 3874 ABI_HANDLE_MPIERR(mpierr) 3875 3876 call MPI_Type_create_hvector(na,1,ldz*ldy*stride_x,vol_type,new_type,mpierr) 3877 ABI_HANDLE_MPIERR(mpierr) 3878 3879 ! Commit the datatype 3880 call MPI_TYPE_COMMIT(new_type,mpierr) 3881 ABI_HANDLE_MPIERR(mpierr) 3882 3883 ! Free memory 3884 call MPI_TYPE_FREE(column_type, mpierr) 3885 ABI_HANDLE_MPIERR(mpierr) 3886 3887 call MPI_TYPE_FREE(plane_type, mpierr) 3888 ABI_HANDLE_MPIERR(mpierr) 3889 3890 call MPI_TYPE_FREE(vol_type, mpierr) 3891 ABI_HANDLE_MPIERR(mpierr) 3892 3893 end subroutine xmpio_create_fsubarray_4D
m_xmpi/xmpio_get_info_frm [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpio_marker_info
FUNCTION
Return the byte size of the Fortran record and its corresponding MPI_type (compiler-dependent). These two values are needed to access sequential binary Fortran files with MPI/IO routines where C-streams are used.
INPUTS
comm=MPI communicator. Only master will find the values for the record marker. The results are then broadcast to all the other nodes in comm.
OUTPUT
bsize_frm=Byte size of the Fortran record marker. mpi_type_frm=MPI type of the marker.
SOURCE
3122 subroutine xmpio_get_info_frm(bsize_frm, mpi_type_frm, comm) 3123 3124 !Arguments ------------------------------------ 3125 !scalars 3126 integer,intent(in) :: comm 3127 integer,intent(out) :: mpi_type_frm,bsize_frm 3128 3129 !Local variables------------------------------- 3130 integer :: my_rank 3131 #ifdef HAVE_MPI_IO 3132 !scalars 3133 integer,parameter :: master=0 3134 integer :: spt,ept,ii 3135 integer :: f90_unt,iimax,mpio_fh,bsize_int,mpierr 3136 integer(XMPI_OFFSET_KIND) :: offset,rml 3137 character(len=fnlen) :: fname 3138 character(len=500) :: errmsg 3139 logical :: file_exists 3140 !arrays 3141 integer :: xvals(2),ivals(100),read_5ivals(5),ref_5ivals(5) 3142 integer :: rm_lengths(4)=(/4,8,2,16/) 3143 integer :: statux(MPI_STATUS_SIZE) 3144 real(dp) :: xrand(fnlen) 3145 #endif 3146 3147 !************************************************************************ 3148 3149 bsize_frm=0; mpi_type_frm=0 3150 3151 my_rank = xmpi_comm_rank(comm) !; RETURN 3152 3153 #ifdef HAVE_MPI_IO 3154 if ( my_rank == master ) then 3155 ! Fortran scratch files cannot have a name so have to generate a random one. 3156 ! cannot use pick_aname since it is higher level. 3157 fname = "__MPI_IO_FRM__" 3158 spt=LEN(trim(fname))+1; ept=spt 3159 3160 inquire(file=trim(fname),exist=file_exists) 3161 3162 do while (file_exists) 3163 call RANDOM_NUMBER(xrand(spt:ept)) 3164 xrand(spt:ept) = 64+xrand(spt:ept)*26 3165 do ii=spt,ept 3166 fname(ii:ii) = ACHAR(NINT(xrand(ii))) 3167 end do 3168 ept = MIN(ept+1,fnlen) 3169 inquire(file=trim(fname),exist=file_exists) 3170 end do 3171 ! 3172 ! Write five integers on the binary file open in Fortran mode, then try 3173 ! to reread the values with MPI-IO using different offsets for the record marker. 3174 ! 3175 f90_unt = xmpi_get_unit() 3176 if (f90_unt == -1) call xmpi_abort(msg="Cannot find free unit!!") 3177 ! MT dec 2013: suppress the new attribute: often cause unwanted errors 3178 ! and theoretically useless because of the previous inquire 3179 open(unit=f90_unt,file=trim(fname),form="unformatted",err=10, iomsg=errmsg) 3180 3181 ref_5ivals = (/(ii, ii=5,9)/) 3182 ivals = HUGE(1); ivals(5:9)=ref_5ivals 3183 write(f90_unt, err=10, iomsg=errmsg) ivals 3184 close(f90_unt, err=10, iomsg=errmsg) 3185 3186 call MPI_FILE_OPEN(xmpi_comm_self, trim(fname), MPI_MODE_RDONLY, MPI_INFO_NULL, mpio_fh,mpierr) 3187 3188 iimax=3 ! Define number of INTEGER types to be tested 3189 #ifdef HAVE_FC_INT_QUAD 3190 iimax=4 3191 #endif 3192 ! 3193 ! Try to read ivals(5:9) from file. 3194 ii=0; bsize_frm=-1 3195 call MPI_TYPE_SIZE(MPI_INTEGER,bsize_int,mpierr) 3196 3197 do while (bsize_frm<=0 .and. ii<iimax) 3198 ii=ii+1 3199 rml = rm_lengths(ii) 3200 offset = rml + 4 * bsize_int 3201 call MPI_FILE_READ_AT(mpio_fh,offset,read_5ivals,5,MPI_INTEGER,statux,mpierr) 3202 !write(std_out,*)read_5ivals 3203 if (mpierr==MPI_SUCCESS .and. ALL(read_5ivals==ref_5ivals) ) bsize_frm=rml 3204 end do 3205 3206 if (ii==iimax.and.bsize_frm<=0) then 3207 write(std_out,'(7a)') & 3208 'Error during FORTRAN file record marker detection:',ch10,& 3209 'It was not possible to read/write a small file!',ch10,& 3210 'ACTION: check your access permissions to the file system.',ch10,& 3211 'Common sources of this problem: quota limit exceeded, R/W incorrect permissions, ...' 3212 call xmpi_abort() 3213 else 3214 !write(std_out,'(a,i0)')' Detected FORTRAN record mark length: ',bsize_frm 3215 end if 3216 3217 call MPI_FILE_CLOSE(mpio_fh, mpierr) 3218 ! 3219 ! Select MPI datatype corresponding to the Fortran marker. 3220 SELECT CASE (bsize_frm) 3221 CASE (4) 3222 mpi_type_frm=MPI_INTEGER4 3223 CASE (8) 3224 mpi_type_frm=MPI_INTEGER8 3225 #if defined HAVE_FC_INT_QUAD && defined HAVE_MPI_INTEGER16 3226 CASE (16) 3227 mpi_type_frm=MPI_INTEGER16 3228 #endif 3229 CASE (2) 3230 mpi_type_frm=MPI_INTEGER2 3231 CASE DEFAULT 3232 write(std_out,'(a,i0)')" Wrong bsize_frm: ",bsize_frm 3233 call xmpi_abort() 3234 END SELECT 3235 3236 open(unit=f90_unt,file=trim(fname), err=10, iomsg=errmsg) 3237 close(f90_unt,status="delete", err=10, iomsg=errmsg) 3238 end if 3239 ! 3240 ! Broadcast data. 3241 xvals = (/bsize_frm,mpi_type_frm/) 3242 call xmpi_bcast(xvals,master,comm,mpierr) 3243 3244 bsize_frm = xvals(1) 3245 mpi_type_frm = xvals(2) 3246 3247 return 3248 3249 !HANDLE IO ERROR 3250 10 continue 3251 call xmpi_abort(msg=errmsg) 3252 #endif 3253 3254 end subroutine xmpio_get_info_frm
m_xmpi/xmpio_max_address [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpio_max_address
FUNCTION
Returns .TRUE. if offset cannot be stored in a Fortran integer of kind XMPI_ADDRESS_KIND.
SOURCE
4260 #ifdef HAVE_MPI_IO 4261 4262 function xmpio_max_address(offset) 4263 4264 !Arguments ------------------------------------ 4265 !scalars 4266 logical :: xmpio_max_address 4267 integer(XMPI_OFFSET_KIND),intent(in) :: offset 4268 !arrays 4269 4270 !Local variables------------------------------- 4271 !scalars 4272 integer(XMPI_ADDRESS_KIND) :: address 4273 integer(XMPI_OFFSET_KIND),parameter :: max_address=HUGE(address)-100 4274 4275 !************************************************************************ 4276 4277 xmpio_max_address = (offset >= max_address) 4278 4279 end function xmpio_max_address
m_xmpi/xmpio_read_dp [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpio_read_dp
FUNCTION
Read the content of a single record marker in a FORTRAN file at a given offset using MPI-IO. the file pointer is modified according to the value of advance. targer: double precision real array
INPUTS
fh=MPI-IO file handler. offset=MPI-IO file pointer sc_mode= xmpio_single ==> for reading by current proc. xmpio_collective ==> for collective reading. ncount=Number of elements in the buffer [advance]=By default the routine will move the file pointer to the next record. advance=.FALSE. can be used so that the next read will continue picking information off of the currect record.
OUTPUT
buf(ncount)=array with the values read from file fmarker=Content of the Fortran record marker. mpierr= MPI error code
SIDE EFFECTS
offset= input: file pointer used to access the Fortran marker. output: new offset updated after the reading, depending on advance.
SOURCE
4191 #ifdef HAVE_MPI_IO 4192 4193 subroutine xmpio_read_dp(fh, offset, sc_mode, ncount, buf, fmarker, mpierr, advance) 4194 4195 !Arguments ------------------------------------ 4196 !scalars 4197 integer,intent(in) :: fh,sc_mode,ncount 4198 integer(XMPI_OFFSET_KIND),intent(inout) :: offset 4199 integer(XMPI_OFFSET_KIND),intent(out) :: fmarker 4200 integer,intent(out) :: mpierr 4201 logical,optional,intent(in) :: advance 4202 !arrays 4203 real(dp),intent(out) :: buf(ncount) 4204 4205 !Local variables------------------------------- 4206 !scalars 4207 integer :: bsize_frm,myfh 4208 integer(XMPI_OFFSET_KIND) :: my_offset 4209 character(len=500) :: msg 4210 !arrays 4211 integer :: statux(MPI_STATUS_SIZE) 4212 4213 !************************************************************************ 4214 4215 ! Workaround for XLF 4216 myfh = fh 4217 4218 my_offset = offset 4219 bsize_frm = xmpio_bsize_frm ! Byte size of the Fortran record marker. 4220 4221 call xmpio_read_frm(myfh,my_offset,sc_mode,fmarker,mpierr,advance=.FALSE.) 4222 4223 SELECT CASE (sc_mode) 4224 CASE (xmpio_single) 4225 call MPI_FILE_READ_AT(myfh, my_offset, buf, ncount, MPI_DOUBLE_PRECISION, statux, mpierr) 4226 4227 CASE (xmpio_collective) 4228 call MPI_FILE_READ_AT_ALL(myfh, my_offset, buf, ncount, MPI_DOUBLE_PRECISION, statux, mpierr) 4229 4230 CASE DEFAULT 4231 write(msg,"(a,i0)")" Wrong value for sc_mode: ",sc_mode 4232 call xmpi_abort(msg=msg) 4233 END SELECT 4234 4235 if (PRESENT(advance)) then 4236 if (advance) then 4237 offset = offset + fmarker + 2*bsize_frm ! Move the file pointer to the next record. 4238 else 4239 offset = offset + bsize_frm ! Move the pointer after the marker. 4240 end if 4241 else 4242 offset = offset + fmarker + 2*bsize_frm 4243 end if 4244 4245 end subroutine xmpio_read_dp
m_xmpi/xmpio_read_int [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpio_read_int
FUNCTION
Read the content of a single record marker in a FORTRAN file at a given offset using MPI-IO. the file pointer is modified according to the value of advance. target: integer array
INPUTS
fh=MPI-IO file handler. offset=MPI-IO file pointer sc_mode= xmpio_single ==> for reading by current proc. xmpio_collective ==> for collective reading. ncount=Number of elements in the buffer [advance]=By default the routine will move the file pointer to the next record. advance=.FALSE. can be used so that the next read will continue picking information off of the currect record.
OUTPUT
buf(ncount)=array with the values read from file fmarker=Content of the Fortran record marker. mpierr= MPI error code
SIDE EFFECTS
offset= input: file pointer used to access the Fortran marker. output: new offset updated after the reading, depending on advance.
SOURCE
4099 #ifdef HAVE_MPI_IO 4100 4101 subroutine xmpio_read_int(fh, offset, sc_mode, ncount, buf, fmarker, mpierr, advance) 4102 4103 !Arguments ------------------------------------ 4104 !scalars 4105 integer,intent(in) :: fh,sc_mode,ncount 4106 integer(XMPI_OFFSET_KIND),intent(inout) :: offset 4107 integer(XMPI_OFFSET_KIND),intent(out) :: fmarker 4108 integer,intent(out) :: mpierr 4109 logical,optional,intent(in) :: advance 4110 !arrays 4111 integer,intent(out) :: buf(ncount) 4112 4113 !Local variables------------------------------- 4114 !scalars 4115 integer :: myfh,bsize_frm 4116 integer(XMPI_OFFSET_KIND) :: my_offset 4117 character(len=500) :: msg 4118 !arrays 4119 integer :: statux(MPI_STATUS_SIZE) 4120 4121 !************************************************************************ 4122 4123 ! Workaround for XLF 4124 myfh = fh 4125 4126 my_offset = offset 4127 bsize_frm = xmpio_bsize_frm ! Byte size of the Fortran record marker. 4128 4129 call xmpio_read_frm(myfh,my_offset,sc_mode,fmarker,mpierr,advance=.FALSE.) 4130 4131 SELECT CASE (sc_mode) 4132 CASE (xmpio_single) 4133 call MPI_FILE_READ_AT(myfh, my_offset, buf, ncount, MPI_INTEGER, statux, mpierr) 4134 4135 CASE (xmpio_collective) 4136 call MPI_FILE_READ_AT_ALL(myfh, my_offset, buf, ncount, MPI_INTEGER, statux, mpierr) 4137 4138 CASE DEFAULT 4139 write(msg,"(a,i0)")" Wrong value for sc_mode: ",sc_mode 4140 call xmpi_abort(msg=msg) 4141 END SELECT 4142 4143 if (PRESENT(advance)) then 4144 if (advance) then 4145 offset = offset + fmarker + 2*bsize_frm ! Move the file pointer to the next record. 4146 else 4147 offset = offset + bsize_frm ! Move the pointer after the marker. 4148 end if 4149 else 4150 offset = offset + fmarker + 2*bsize_frm 4151 end if 4152 4153 end subroutine xmpio_read_int
m_xmpi/xmpio_type_struct [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpio_type_struct
FUNCTION
Some highly non-standard MPI implementations support MPI-IO without implementing the full set of MPI-2 extensions. This wrapper will call the obsolete MPI_TYPE_STRUCT if MPI_TYPE_CREATE_STRUCT is not supported. Note that MPI_TYPE_STRUCT requires the displacement arrays to be an array of default integers whereas the argument block_displ is an array of kind XMPI_ADDRESS_KIND. The routine will abort if the displacement cannot be represented with a default integer.
INPUTS
ncount= number of blocks (integer) --- also number of entries in arrays array_of_types, array_of_displacements and array_of_blocklengths array_of_blocklength(ncount)=number of elements in each block (array of integer) array_of_displacements(ncount)=byte displacement of each block (array of integer) array_of_types(ncount)=type of elements in each block (array of handles to datatype objects)
OUTPUT
new_type=new datatype (handle) mpierr=MPI status error
SOURCE
3063 #ifdef HAVE_MPI_IO 3064 3065 subroutine xmpio_type_struct(ncount, block_length, block_displ, block_type, new_type, mpierr) 3066 3067 !Arguments ------------------------------------ 3068 !scalars 3069 integer,intent(in) :: ncount 3070 integer,intent(out) :: new_type,mpierr 3071 !arrays 3072 integer,intent(in) :: block_length(ncount),block_type(ncount) 3073 integer(XMPI_ADDRESS_KIND),intent(in) :: block_displ(ncount) 3074 3075 !Local variables------------------- 3076 #ifndef HAVE_MPI_TYPE_CREATE_STRUCT 3077 integer,allocatable :: tmp_displ(:) 3078 #endif 3079 3080 !************************************************************************ 3081 3082 #ifdef HAVE_MPI_TYPE_CREATE_STRUCT 3083 call MPI_TYPE_CREATE_STRUCT(ncount,block_length,block_displ,block_type,new_type,mpierr) 3084 #else 3085 3086 ABI_MALLOC(tmp_displ,(ncount)) 3087 tmp_displ = block_displ 3088 if (ANY(block_displ > HUGE(tmp_displ(1)) ))then 3089 call xmpi_abort(msg=" byte displacement cannot be represented with a default integer") 3090 end if 3091 3092 call MPI_TYPE_STRUCT(ncount,block_length,block_displ,block_type,new_type,mpierr) 3093 ABI_FREE(tmp_displ) 3094 #endif 3095 3096 end subroutine xmpio_type_struct
m_xmpi/xmpio_write_frmarkers [ Functions ]
[ Top ] [ m_xmpi ] [ Functions ]
NAME
xmpio_write_frmarkers
FUNCTION
Write a set of Fortran record markers starting at a given offset using MPI-IO.
INPUTS
fh=MPI-IO file handler. offset=MPI-IO file pointer sc_mode=Option for individual or collective reading. nfrec=Number of Fortran records to be written. bsize_frecord(nfrec)=Byte size of the Fortran records to be written (markers are NOT included in the size)
OUTPUT
ierr=A non-zero error code signals failure.
SOURCE
4304 #ifdef HAVE_MPI_IO 4305 4306 subroutine xmpio_write_frmarkers(fh, offset, sc_mode, nfrec, bsize_frecord, ierr) 4307 4308 !Arguments ------------------------------------ 4309 !scalars 4310 integer,intent(in) :: fh,nfrec,sc_mode 4311 integer(XMPI_OFFSET_KIND),intent(in) :: offset 4312 integer,intent(out) :: ierr 4313 !arrays 4314 integer(XMPI_OFFSET_KIND),intent(in) :: bsize_frecord(nfrec) 4315 4316 !Local variables------------------------------- 4317 !scalars 4318 integer :: nb,irec,frmarkers_type,jj,bsize_frm,mpi_type_frm,mpierr,myfh 4319 integer(XMPI_OFFSET_KIND) :: displ 4320 !integer(XMPI_OFFSET_KIND) :: my_offset 4321 !character(len=500) :: msg 4322 !arrays 4323 integer(kind=int16),allocatable :: bufdelim2(:) 4324 integer(kind=int32),allocatable :: bufdelim4(:) 4325 integer(kind=int64),allocatable :: bufdelim8(:) 4326 #ifdef HAVE_FC_INT_QUAD 4327 integer*16,allocatable :: bufdelim16(:) 4328 #endif 4329 !integer :: statux(MPI_STATUS_SIZE) 4330 integer,allocatable :: block_length(:),block_type(:) 4331 integer(XMPI_ADDRESS_KIND),allocatable :: block_displ(:) 4332 integer(XMPI_OFFSET_KIND),allocatable :: delim_record(:) 4333 4334 !************************************************************************ 4335 4336 ! Workaround for XLF 4337 myfh = fh; ierr=0 4338 4339 !my_offset = offset 4340 !do irec=1,nfrec 4341 ! call xmpio_write_frm(myfh,my_offset,sc_mode,bsize_frecord(irec),mpierr) 4342 !end do 4343 !return 4344 4345 ! FIXME: This is buggy 4346 bsize_frm = xmpio_bsize_frm ! Byte size of the Fortran record marker. 4347 mpi_type_frm = xmpio_mpi_type_frm ! MPI type of the record marker. 4348 4349 ! Define the view for the file 4350 nb=2*nfrec 4351 ABI_MALLOC(block_length,(nb+2)) 4352 ABI_MALLOC(block_displ,(nb+2)) 4353 ABI_MALLOC(block_type,(nb+2)) 4354 block_length(1)=1 4355 block_displ (1)=0 4356 block_type (1)=MPI_LB 4357 4358 jj=2; displ=0 4359 do irec=1,nfrec 4360 block_type (jj:jj+1) = mpi_type_frm 4361 block_length(jj:jj+1) = 1 4362 block_displ(jj ) = displ 4363 block_displ(jj+1) = displ + bsize_frm + bsize_frecord(irec) 4364 jj=jj+2 4365 displ = displ + bsize_frecord(irec) + 2*bsize_frm ! Move to the beginning of the next column. 4366 if (xmpio_max_address(displ)) then ! Check for wraparound. 4367 ierr = -1; return 4368 end if 4369 end do 4370 4371 block_length(nb+2) = 1 4372 block_displ (nb+2) = displ 4373 block_type (nb+2) = MPI_UB 4374 4375 call xmpio_type_struct(nb+2,block_length,block_displ,block_type,frmarkers_type,mpierr) 4376 4377 ABI_FREE(block_length) 4378 ABI_FREE(block_displ) 4379 ABI_FREE(block_type) 4380 4381 call MPI_TYPE_COMMIT(frmarkers_type,mpierr) 4382 call MPI_FILE_SET_VIEW(myfh,offset,MPI_BYTE,frmarkers_type,"native",MPI_INFO_NULL,mpierr) 4383 4384 jj=1 4385 ABI_MALLOC(delim_record,(nb)) 4386 do irec=1,nfrec 4387 delim_record(jj:jj+1)=bsize_frecord(irec) 4388 jj=jj+2 4389 end do 4390 4391 ! Write all markers according to the MPI type of the Fortran marker. 4392 SELECT CASE (bsize_frm) 4393 4394 CASE (4) 4395 ABI_MALLOC(bufdelim4,(nb)) 4396 bufdelim4=delim_record 4397 if (sc_mode==xmpio_single) then 4398 call MPI_FILE_WRITE (myfh,bufdelim4,2*nfrec,mpi_type_frm,MPI_STATUS_IGNORE,mpierr) 4399 else if (sc_mode==xmpio_collective) then 4400 call MPI_FILE_WRITE_ALL(myfh,bufdelim4,2*nfrec,mpi_type_frm,MPI_STATUS_IGNORE,mpierr) 4401 else 4402 ierr=2 4403 end if 4404 ABI_FREE(bufdelim4) 4405 4406 CASE (8) 4407 ABI_MALLOC(bufdelim8,(nb)) 4408 bufdelim8=delim_record 4409 if (sc_mode==xmpio_single) then 4410 call MPI_FILE_WRITE (myfh,bufdelim8,2*nfrec,mpi_type_frm,MPI_STATUS_IGNORE,mpierr) 4411 else if (sc_mode==xmpio_collective) then 4412 call MPI_FILE_WRITE_ALL(myfh,bufdelim8,2*nfrec,mpi_type_frm,MPI_STATUS_IGNORE,mpierr) 4413 else 4414 ierr=2 4415 end if 4416 ABI_FREE(bufdelim8) 4417 4418 #ifdef HAVE_FC_INT_QUAD 4419 CASE (16) 4420 ABI_MALLOC(bufdelim16,(nb)) 4421 bufdelim16=delim_record 4422 if (sc_mode==xmpio_single) then 4423 call MPI_FILE_WRITE (myfh,bufdelim16,2*nfrec,mpi_type_frm,MPI_STATUS_IGNORE,mpierr) 4424 else if (sc_mode==xmpio_collective) then 4425 call MPI_FILE_WRITE_ALL(myfh,bufdelim16,2*nfrec,mpi_type_frm,MPI_STATUS_IGNORE,mpierr) 4426 else 4427 ierr=2 4428 end if 4429 ABI_FREE(bufdelim16) 4430 #endif 4431 4432 CASE (2) 4433 ABI_MALLOC(bufdelim2,(nb)) 4434 bufdelim2=delim_record 4435 if (sc_mode==xmpio_single) then 4436 call MPI_FILE_WRITE (myfh,bufdelim2,2*nfrec,mpi_type_frm,MPI_STATUS_IGNORE,mpierr) 4437 else if (sc_mode==xmpio_collective) then 4438 call MPI_FILE_WRITE_ALL(myfh,bufdelim2,2*nfrec,mpi_type_frm,MPI_STATUS_IGNORE,mpierr) 4439 else 4440 ierr=2 4441 end if 4442 ABI_FREE(bufdelim2) 4443 4444 CASE DEFAULT 4445 ierr=-2 4446 END SELECT 4447 4448 ! Free memory 4449 call MPI_TYPE_FREE(frmarkers_type,mpierr) 4450 ABI_FREE(delim_record) 4451 4452 end subroutine xmpio_write_frmarkers 4453 #endif