TABLE OF CONTENTS
- ABINIT/xmpi_ibcast
- ABINIT/xmpi_ibcast_dp1d
- ABINIT/xmpi_ibcast_dp2d
- ABINIT/xmpi_ibcast_dp3d
- ABINIT/xmpi_ibcast_dp4d
- ABINIT/xmpi_ibcast_dpc2d
- ABINIT/xmpi_ibcast_int1d
- ABINIT/xmpi_ibcast_int4d
- ABINIT/xmpi_ibcast_spc2d
ABINIT/xmpi_ibcast [ Functions ]
NAME
xmpi_ibcast
FUNCTION
This module contains functions that calls MPI routine MPI_IBCAST, to broadcast data from one processor to other procs inside a communicator, if we compile the code using the MPI CPP flags. xmpi_ibcast is the generic function.
COPYRIGHT
Copyright (C) 2001-2022 ABINIT group (MG) This file is distributed under the terms of the GNU General Public License, see ~ABINIT/COPYING or http://www.gnu.org/copyleft/gpl.txt .
SOURCE
ABINIT/xmpi_ibcast_dp1d [ Functions ]
NAME
xmpi_ibcast_int1d
FUNCTION
Sends data from one processor to others inside comm without blocking Target: double one-dimensional dp arrays.
INPUTS
root: rank of broadcast root (integer) comm: MPI communicator
OUTPUT
ierr= exit status, a non-zero value meaning there is an error
SIDE EFFECTS
xval= buffer array
SOURCE
131 subroutine xmpi_ibcast_dp1d(xval, root, comm, request, ierr) 132 133 !Arguments------------------------- 134 real(dp) ABI_ASYNC, intent(inout) :: xval(:) 135 integer, intent(in) :: root, comm 136 integer, intent(out) :: request, ierr 137 138 ! ************************************************************************* 139 140 ierr=0 141 #ifdef HAVE_MPI_IBCAST 142 if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then 143 call MPI_IBCAST(xval, product(shape(xval)), MPI_DOUBLE_PRECISION, root, comm, request, ierr) 144 xmpi_count_requests = xmpi_count_requests + 1 145 return 146 end if 147 #endif 148 149 ! Call the blocking version and return null request. 150 call xmpi_bcast(xval, root, comm, ierr) 151 request = xmpi_request_null 152 153 end subroutine xmpi_ibcast_dp1d
ABINIT/xmpi_ibcast_dp2d [ Functions ]
NAME
xmpi_ibcast_dp2d
FUNCTION
Sends data from one processor to others inside comm without blocking Target: double two-dimensional dp arrays.
INPUTS
root: rank of broadcast root (integer) comm: MPI communicator
OUTPUT
ierr= exit status, a non-zero value meaning there is an error
SIDE EFFECTS
xval= buffer array
SOURCE
176 subroutine xmpi_ibcast_dp2d(xval, root, comm, request, ierr) 177 178 !Arguments------------------------- 179 real(dp) ABI_ASYNC, intent(inout) :: xval(:, :) 180 integer, intent(in) :: root, comm 181 integer, intent(out) :: request, ierr 182 183 ! ************************************************************************* 184 185 ierr=0 186 #ifdef HAVE_MPI_IBCAST 187 if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then 188 call MPI_IBCAST(xval, product(shape(xval)), MPI_DOUBLE_PRECISION, root, comm, request, ierr) 189 xmpi_count_requests = xmpi_count_requests + 1 190 return 191 end if 192 #endif 193 194 ! Call the blocking version and return null request. 195 call xmpi_bcast(xval, root, comm, ierr) 196 request = xmpi_request_null 197 198 end subroutine xmpi_ibcast_dp2d
ABINIT/xmpi_ibcast_dp3d [ Functions ]
NAME
xmpi_ibcast_dp3d
FUNCTION
Sends data from one processor to others inside comm without blocking Target: double three-dimensional dp arrays.
INPUTS
root: rank of broadcast root (integer) comm: MPI communicator
OUTPUT
ierr= exit status, a non-zero value meaning there is an error
SIDE EFFECTS
xval= buffer array
SOURCE
221 subroutine xmpi_ibcast_dp3d(xval, root, comm, request, ierr) 222 223 !Arguments------------------------- 224 real(dp) ABI_ASYNC, intent(inout) :: xval(:, :, :) 225 integer, intent(in) :: root, comm 226 integer, intent(out) :: request, ierr 227 228 ! ************************************************************************* 229 230 ierr=0 231 #ifdef HAVE_MPI_IBCAST 232 if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then 233 call MPI_IBCAST(xval, product(shape(xval)), MPI_DOUBLE_PRECISION, root, comm, request, ierr) 234 xmpi_count_requests = xmpi_count_requests + 1 235 return 236 end if 237 #endif 238 239 ! Call the blocking version and return null request. 240 call xmpi_bcast(xval, root, comm, ierr) 241 request = xmpi_request_null 242 243 end subroutine xmpi_ibcast_dp3d
ABINIT/xmpi_ibcast_dp4d [ Functions ]
NAME
xmpi_ibcast_dp4d
FUNCTION
Sends data from one processor to others inside comm without blocking Target: double three-dimensional dp arrays.
INPUTS
root: rank of broadcast root (integer) comm: MPI communicator
OUTPUT
ierr= exit status, a non-zero value meaning there is an error
SIDE EFFECTS
xval= buffer array
SOURCE
266 subroutine xmpi_ibcast_dp4d(xval, root, comm, request, ierr) 267 268 !Arguments------------------------- 269 real(dp) ABI_ASYNC, intent(inout) :: xval(:, :, :, :) 270 integer, intent(in) :: root, comm 271 integer, intent(out) :: request, ierr 272 273 ! ************************************************************************* 274 275 ierr=0 276 #ifdef HAVE_MPI_IBCAST 277 if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then 278 call MPI_IBCAST(xval, product(shape(xval)), MPI_DOUBLE_PRECISION, root, comm, request, ierr) 279 xmpi_count_requests = xmpi_count_requests + 1 280 return 281 end if 282 #endif 283 284 ! Call the blocking version and return null request. 285 call xmpi_bcast(xval, root, comm, ierr) 286 request = xmpi_request_null 287 288 end subroutine xmpi_ibcast_dp4d
ABINIT/xmpi_ibcast_dpc2d [ Functions ]
NAME
xmpi_ibcast_dpc2d
FUNCTION
Sends data from one processor to others inside comm without blocking Target: double precision two-dimensional complex arrays.
INPUTS
root: rank of broadcast root (integer) comm: MPI communicator
OUTPUT
ierr= exit status, a non-zero value meaning there is an error
SIDE EFFECTS
xval= buffer array
SOURCE
311 subroutine xmpi_ibcast_dpc2d(xval, root, comm, request, ierr) 312 313 !Arguments------------------------- 314 complex(dp) ABI_ASYNC, intent(inout) :: xval(:, :) 315 integer, intent(in) :: root, comm 316 integer, intent(out) :: request, ierr 317 318 ! ************************************************************************* 319 320 ierr=0 321 #ifdef HAVE_MPI_IBCAST 322 if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then 323 call MPI_IBCAST(xval, product(shape(xval)), MPI_DOUBLE_COMPLEX, root, comm, request, ierr) 324 xmpi_count_requests = xmpi_count_requests + 1 325 return 326 end if 327 #endif 328 329 ! Call the blocking version and return null request. 330 call xmpi_bcast(xval, root, comm, ierr) 331 request = xmpi_request_null 332 333 end subroutine xmpi_ibcast_dpc2d
ABINIT/xmpi_ibcast_int1d [ Functions ]
NAME
xmpi_ibcast_int1d
FUNCTION
Sends data from one processor to others inside comm without blocking Target: integer one-dimensional arrays.
INPUTS
root: rank of broadcast root (integer) comm: MPI communicator
OUTPUT
ierr= exit status, a non-zero value meaning there is an error
SIDE EFFECTS
xval= buffer array
SOURCE
41 subroutine xmpi_ibcast_int1d(xval, root, comm, request, ierr) 42 43 !Arguments------------------------- 44 integer ABI_ASYNC, intent(inout) :: xval(:) 45 integer, intent(in) :: root, comm 46 integer, intent(out) :: request, ierr 47 48 ! ************************************************************************* 49 50 ierr=0 51 #ifdef HAVE_MPI_IBCAST 52 if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then 53 call MPI_IBCAST(xval, product(shape(xval)), MPI_INTEGER, root, comm, request, ierr) 54 xmpi_count_requests = xmpi_count_requests + 1 55 return 56 end if 57 #endif 58 59 ! Call the blocking version and return null request. 60 call xmpi_bcast(xval, root, comm, ierr) 61 request = xmpi_request_null 62 63 end subroutine xmpi_ibcast_int1d
ABINIT/xmpi_ibcast_int4d [ Functions ]
NAME
xmpi_ibcast_int4d
FUNCTION
Sends data from one processor to others inside comm without blocking Target: integer one-dimensional arrays.
INPUTS
root: rank of broadcast root (integer) comm: MPI communicator
OUTPUT
ierr= exit status, a non-zero value meaning there is an error
SIDE EFFECTS
xval= buffer array
SOURCE
86 subroutine xmpi_ibcast_int4d(xval, root, comm, request, ierr) 87 88 !Arguments------------------------- 89 integer ABI_ASYNC, intent(inout) :: xval(:,:,:,:) 90 integer, intent(in) :: root, comm 91 integer, intent(out) :: request, ierr 92 93 ! ************************************************************************* 94 95 ierr=0 96 #ifdef HAVE_MPI_IBCAST 97 if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then 98 call MPI_IBCAST(xval, product(shape(xval)), MPI_INTEGER, root, comm, request, ierr) 99 xmpi_count_requests = xmpi_count_requests + 1 100 return 101 end if 102 #endif 103 104 ! Call the blocking version and return null request. 105 call xmpi_bcast(xval, root, comm, ierr) 106 request = xmpi_request_null 107 108 end subroutine xmpi_ibcast_int4d
ABINIT/xmpi_ibcast_spc2d [ Functions ]
NAME
xmpi_ibcast_spc2d
FUNCTION
Sends data from one processor to others inside comm without blocking Target: single precision two-dimensional complex arrays.
INPUTS
root: rank of broadcast root (integer) comm: MPI communicator
OUTPUT
ierr= exit status, a non-zero value meaning there is an error
SIDE EFFECTS
xval= buffer array
SOURCE
356 subroutine xmpi_ibcast_spc2d(xval, root, comm, request, ierr) 357 358 !Arguments------------------------- 359 complex(sp) ABI_ASYNC, intent(inout) :: xval(:, :) 360 integer, intent(in) :: root, comm 361 integer, intent(out) :: request, ierr 362 363 ! ************************************************************************* 364 365 ierr=0 366 #ifdef HAVE_MPI_IBCAST 367 if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then 368 call MPI_IBCAST(xval, product(shape(xval)), MPI_COMPLEX, root, comm, request, ierr) 369 xmpi_count_requests = xmpi_count_requests + 1 370 return 371 end if 372 #endif 373 374 ! Call the blocking version and return null request. 375 call xmpi_bcast(xval, root, comm, ierr) 376 request = xmpi_request_null 377 378 end subroutine xmpi_ibcast_spc2d