TABLE OF CONTENTS
- ABINIT/xmpi_isum
- ABINIT/xmpi_isum_int0d
- ABINIT/xmpi_isum_ip_dp2d
- ABINIT/xmpi_isum_ip_dp3d
- ABINIT/xmpi_isum_ip_dp4d
- ABINIT/xmpi_isum_ip_dpc1d
- ABINIT/xmpi_isum_ip_dpc2d
- ABINIT/xmpi_isum_ip_dpc3d
- ABINIT/xmpi_isum_ip_spc1d
- ABINIT/xmpi_isum_ip_spc2d
- ABINIT/xmpi_isum_ip_spc3d
ABINIT/xmpi_isum [ Functions ]
NAME
xmpi_isum
FUNCTION
This module contains functions that calls MPI routine, if we compile the code using the MPI CPP flags. xmpi_isum is the generic function.
COPYRIGHT
Copyright (C) 2001-2024 ABINIT group (MG) This file is distributed under the terms of the GNU General Public License, see ~ABINIT/COPYING or http://www.gnu.org/copyleft/gpl.txt .
NOTES
MPI_IALLREDUCE(SENDBUF, RECVBUF, COUNT, DATATYPE, OP, COMM, REQUEST, IERROR) <type> SENDBUF(*), RECVBUF(*) INTEGER COUNT, DATATYPE, OP, COMM, REQUEST, IERROR
SOURCE
ABINIT/xmpi_isum_int0d [ Functions ]
NAME
xmpi_isum_int0d
FUNCTION
Combines values from all processes and distribute the result back to all processes. Target: scalar integers. Non-blocking version.
INPUTS
OUTPUT
SOURCE
40 subroutine xmpi_isum_int0d(xval, xsum, comm, request, ierr) 41 42 !Arguments------------------------- 43 integer ABI_ASYNC, intent(in), target :: xval 44 integer ABI_ASYNC, intent(out), target :: xsum 45 integer,intent(in) :: comm 46 integer,intent(out) :: ierr, request 47 48 !Local variables------------------- 49 integer :: itmp 50 #ifdef HAVE_MPI_IALLREDUCE 51 integer, pointer :: arr_xval(:),arr_xsum(:) 52 type(c_ptr) :: cptr 53 #endif 54 55 ! ************************************************************************* 56 57 #ifdef HAVE_MPI_IALLREDUCE 58 if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then 59 cptr=c_loc(xval) ; call c_f_pointer(cptr,arr_xval,[1]) 60 cptr=c_loc(xsum) ; call c_f_pointer(cptr,arr_xsum,[1]) 61 call MPI_IALLREDUCE(arr_xval, arr_xsum, 1, MPI_INTEGER, MPI_SUM, comm, request, ierr) 62 xmpi_count_requests = xmpi_count_requests + 1 63 return 64 end if 65 xsum = xval; request = xmpi_request_null 66 return 67 #endif 68 69 ! Call the blocking version and return null request. 70 !write(std_out,*)"will block here and return fake request" 71 itmp = xval 72 call xmpi_sum(itmp, comm, ierr) 73 xsum = itmp; request = xmpi_request_null 74 75 end subroutine xmpi_isum_int0d
ABINIT/xmpi_isum_ip_dp2d [ Functions ]
NAME
xmpi_isum_ip_dp2d
FUNCTION
Combines values from all processes and distribute the result back to all processes. Target: scalar integers. Non-blocking INPLACE version.
INPUTS
OUTPUT
SOURCE
92 subroutine xmpi_isum_ip_dp2d(xval, comm, request, ierr) 93 94 !Arguments------------------------- 95 real(dp) ABI_ASYNC, intent(inout) :: xval(:,:) 96 integer,intent(in) :: comm 97 integer,intent(out) :: ierr, request 98 #if !defined HAVE_MPI2_INPLACE 99 integer :: n1,n2 100 real(dp) ABI_ASYNC, allocatable :: xsum(:,:) 101 #endif 102 103 ! ************************************************************************* 104 105 #ifdef HAVE_MPI_IALLREDUCE 106 if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then 107 #if defined HAVE_MPI2_INPLACE 108 call MPI_IALLREDUCE(MPI_IN_PLACE, xval, product(shape(xval)), MPI_DOUBLE_PRECISION, MPI_SUM, comm, request, ierr) 109 #else 110 n1 = size(xval,1) ; n2 = size(xval,2) 111 ABI_STAT_MALLOC(xsum,(n1,n2), ierr) 112 if (ierr/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_isum_ip_dp2d') 113 call MPI_IALLREDUCE(xsum, xval, product(shape(xval)), MPI_DOUBLE_PRECISION, MPI_SUM, comm, request, ierr) 114 xval (:,:) = xsum(:,:) 115 ABI_FREE(xsum) 116 #endif 117 xmpi_count_requests = xmpi_count_requests + 1 118 return 119 end if 120 request = xmpi_request_null 121 return 122 #endif 123 124 ! Call the blocking version and return null request. 125 call xmpi_sum(xval, comm, ierr) 126 request = xmpi_request_null 127 128 end subroutine xmpi_isum_ip_dp2d
ABINIT/xmpi_isum_ip_dp3d [ Functions ]
NAME
xmpi_isum_ip_dp3d
FUNCTION
Combines values from all processes and distribute the result back to all processes. Target: scalar integers. Non-blocking INPLACE version.
INPUTS
OUTPUT
SOURCE
145 subroutine xmpi_isum_ip_dp3d(xval, comm, request, ierr) 146 147 !Arguments------------------------- 148 real(dp) ABI_ASYNC, intent(inout) :: xval(:,:,:) 149 integer,intent(in) :: comm 150 integer,intent(out) :: ierr, request 151 #if !defined HAVE_MPI2_INPLACE 152 integer :: n1,n2,n3 153 real(dp) ABI_ASYNC, allocatable :: xsum(:,:,:) 154 #endif 155 156 ! ************************************************************************* 157 158 #ifdef HAVE_MPI_IALLREDUCE 159 if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then 160 #if defined HAVE_MPI2_INPLACE 161 call MPI_IALLREDUCE(MPI_IN_PLACE, xval, product(shape(xval)), MPI_DOUBLE_PRECISION, MPI_SUM, comm, request, ierr) 162 #else 163 n1 = size(xval,1) ; n2 = size(xval,2) ; n3 = size(xval,3) 164 ABI_STAT_MALLOC(xsum,(n1,n2,n3), ierr) 165 if (ierr/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_isum_ip_dp3d') 166 call MPI_IALLREDUCE(xsum, xval, product(shape(xval)), MPI_DOUBLE_PRECISION, MPI_SUM, comm, request, ierr) 167 xval (:,:,:) = xsum(:,:,:) 168 ABI_FREE(xsum) 169 #endif 170 xmpi_count_requests = xmpi_count_requests + 1 171 return 172 end if 173 request = xmpi_request_null 174 return 175 #endif 176 177 ! Call the blocking version and return null request. 178 call xmpi_sum(xval, comm, ierr) 179 request = xmpi_request_null 180 181 end subroutine xmpi_isum_ip_dp3d
ABINIT/xmpi_isum_ip_dp4d [ Functions ]
NAME
xmpi_isum_ip_dp4d
FUNCTION
Combines values from all processes and distribute the result back to all processes. Target: scalar integers. Non-blocking INPLACE version.
INPUTS
OUTPUT
SOURCE
198 subroutine xmpi_isum_ip_dp4d(xval, comm, request, ierr) 199 200 !Arguments------------------------- 201 real(dp) ABI_ASYNC, intent(inout) :: xval(:,:,:,:) 202 integer,intent(in) :: comm 203 integer,intent(out) :: ierr, request 204 #if !defined HAVE_MPI2_INPLACE 205 integer :: n1,n2,n3,n4 206 real(dp) ABI_ASYNC, allocatable :: xsum(:,:,:,:) 207 #endif 208 209 ! ************************************************************************* 210 211 #ifdef HAVE_MPI_IALLREDUCE 212 if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then 213 #if defined HAVE_MPI2_INPLACE 214 call MPI_IALLREDUCE(MPI_IN_PLACE, xval, product(shape(xval)), MPI_DOUBLE_PRECISION, MPI_SUM, comm, request, ierr) 215 #else 216 n1 = size(xval,1) ; n2 = size(xval,2) ; n3 = size(xval,3) ; n4 = size(xval,4) 217 ABI_STAT_MALLOC(xsum,(n1,n2,n3,n4), ierr) 218 if (ierr/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_isum_ip_dp4d') 219 call MPI_IALLREDUCE(xsum, xval, product(shape(xval)), MPI_DOUBLE_PRECISION, MPI_SUM, comm, request, ierr) 220 xval (:,:,:,:) = xsum(:,:,:,:) 221 ABI_FREE(xsum) 222 #endif 223 xmpi_count_requests = xmpi_count_requests + 1 224 return 225 end if 226 request = xmpi_request_null 227 return 228 #endif 229 230 ! Call the blocking version and return null request. 231 call xmpi_sum(xval, comm, ierr) 232 request = xmpi_request_null 233 234 end subroutine xmpi_isum_ip_dp4d
ABINIT/xmpi_isum_ip_dpc1d [ Functions ]
NAME
xmpi_isum_ip_dpc1d
FUNCTION
Combines values from all processes and distribute the result back to all processes. Target: 1d double precision complex arrays. Non-blocking INPLACE version.
INPUTS
OUTPUT
SOURCE
304 subroutine xmpi_isum_ip_dpc1d(xval, comm, request, ierr) 305 306 !Arguments------------------------- 307 complex(dp) ABI_ASYNC, intent(inout) :: xval(:) 308 integer,intent(in) :: comm 309 integer,intent(out) :: ierr, request 310 #if !defined HAVE_MPI2_INPLACE 311 integer :: n1 312 complex(dp) ABI_ASYNC, allocatable :: xsum(:) 313 #endif 314 315 ! ************************************************************************* 316 317 #ifdef HAVE_MPI_IALLREDUCE 318 if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then 319 #if defined HAVE_MPI2_INPLACE 320 call MPI_IALLREDUCE(MPI_IN_PLACE, xval, product(shape(xval)), MPI_DOUBLE_COMPLEX, MPI_SUM, comm, request, ierr) 321 #else 322 n1 = size(xval,1) 323 ABI_STAT_MALLOC(xsum,(n1), ierr) 324 if (ierr/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_isum_ip_dpc1d') 325 call MPI_IALLREDUCE(xsum, xval, product(shape(xval)), MPI_DOUBLE_COMPLEX, MPI_SUM, comm, request, ierr) 326 xval (:) = xsum(:) 327 ABI_FREE(xsum) 328 #endif 329 xmpi_count_requests = xmpi_count_requests + 1 330 return 331 end if 332 request = xmpi_request_null 333 return 334 #endif 335 336 ! Call the blocking version and return null request. 337 call xmpi_sum(xval, comm, ierr) 338 request = xmpi_request_null 339 340 end subroutine xmpi_isum_ip_dpc1d
ABINIT/xmpi_isum_ip_dpc2d [ Functions ]
NAME
xmpi_isum_ip_dpc2d
FUNCTION
Combines values from all processes and distribute the result back to all processes. Target: 2d double precision complex arrays. Non-blocking INPLACE version.
INPUTS
OUTPUT
SOURCE
410 subroutine xmpi_isum_ip_dpc2d(xval, comm, request, ierr) 411 412 !Arguments------------------------- 413 complex(dp) ABI_ASYNC, intent(inout) :: xval(:,:) 414 integer,intent(in) :: comm 415 integer,intent(out) :: ierr, request 416 #if !defined HAVE_MPI2_INPLACE 417 integer :: n1,n2 418 complex(dp) ABI_ASYNC, allocatable :: xsum(:,:) 419 #endif 420 421 ! ************************************************************************* 422 423 #ifdef HAVE_MPI_IALLREDUCE 424 if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then 425 #if defined HAVE_MPI2_INPLACE 426 call MPI_IALLREDUCE(MPI_IN_PLACE, xval, product(shape(xval)), MPI_DOUBLE_COMPLEX, MPI_SUM, comm, request, ierr) 427 #else 428 n1 = size(xval,1) ; n2 = size(xval,2) 429 ABI_STAT_MALLOC(xsum,(n1,n2), ierr) 430 if (ierr/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_isum_ip_spc2d') 431 call MPI_IALLREDUCE(xsum, xval, product(shape(xval)), MPI_DOUBLE_COMPLEX, MPI_SUM, comm, request, ierr) 432 xval (:,:) = xsum(:,:) 433 ABI_FREE(xsum) 434 #endif 435 xmpi_count_requests = xmpi_count_requests + 1 436 return 437 end if 438 request = xmpi_request_null 439 return 440 #endif 441 442 ! Call the blocking version and return null request. 443 call xmpi_sum(xval, comm, ierr) 444 request = xmpi_request_null 445 446 end subroutine xmpi_isum_ip_dpc2d
ABINIT/xmpi_isum_ip_dpc3d [ Functions ]
NAME
xmpi_isum_ip_dpc3d
FUNCTION
Combines values from all processes and distribute the result back to all processes. Target: 3d double precision complex arrays. Non-blocking INPLACE version.
INPUTS
OUTPUT
SOURCE
516 subroutine xmpi_isum_ip_dpc3d(xval, comm, request, ierr) 517 518 !Arguments------------------------- 519 complex(dp) ABI_ASYNC, intent(inout) :: xval(:,:,:) 520 integer,intent(in) :: comm 521 integer,intent(out) :: ierr, request 522 #if !defined HAVE_MPI2_INPLACE 523 integer :: n1,n2,n3 524 complex(sp) ABI_ASYNC, allocatable :: xsum(:,:,:) 525 #endif 526 527 ! ************************************************************************* 528 529 #ifdef HAVE_MPI_IALLREDUCE 530 if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then 531 #if defined HAVE_MPI2_INPLACE 532 call MPI_IALLREDUCE(MPI_IN_PLACE, xval, product(shape(xval)), MPI_DOUBLE_COMPLEX, MPI_SUM, comm, request, ierr) 533 #else 534 n1 = size(xval,1) ; n2 = size(xval,2) ; n3 = size(xval,3) 535 ABI_STAT_MALLOC(xsum,(n1,n2,3), ierr) 536 if (ierr/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_isum_ip_dpc3d') 537 call MPI_IALLREDUCE(xsum, xval, product(shape(xval)), MPI_DOUBLE_COMPLEX, MPI_SUM, comm, request, ierr) 538 xval (:,:,:) = xsum(:,:,:) 539 ABI_FREE(xsum) 540 #endif 541 xmpi_count_requests = xmpi_count_requests + 1 542 return 543 end if 544 request = xmpi_request_null 545 return 546 #endif 547 548 ! Call the blocking version and return null request. 549 call xmpi_sum(xval, comm, ierr) 550 request = xmpi_request_null 551 552 end subroutine xmpi_isum_ip_dpc3d
ABINIT/xmpi_isum_ip_spc1d [ Functions ]
NAME
xmpi_isum_ip_spc1d
FUNCTION
Combines values from all processes and distribute the result back to all processes. Target: 1d single precision complex arrays. Non-blocking INPLACE version.
INPUTS
OUTPUT
SOURCE
251 subroutine xmpi_isum_ip_spc1d(xval, comm, request, ierr) 252 253 !Arguments------------------------- 254 complex(sp) ABI_ASYNC, intent(inout) :: xval(:) 255 integer,intent(in) :: comm 256 integer,intent(out) :: ierr, request 257 #if !defined HAVE_MPI2_INPLACE 258 integer :: n1 259 complex(sp) ABI_ASYNC, allocatable :: xsum(:) 260 #endif 261 262 ! ************************************************************************* 263 264 #ifdef HAVE_MPI_IALLREDUCE 265 if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then 266 #if defined HAVE_MPI2_INPLACE 267 call MPI_IALLREDUCE(MPI_IN_PLACE, xval, product(shape(xval)), MPI_COMPLEX, MPI_SUM, comm, request, ierr) 268 #else 269 n1 = size(xval,1) 270 ABI_STAT_MALLOC(xsum,(n1), ierr) 271 if (ierr/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_isum_ip_spc1d') 272 call MPI_IALLREDUCE(xsum, xval, product(shape(xval)), MPI_COMPLEX, MPI_SUM, comm, request, ierr) 273 xval (:) = xsum(:) 274 ABI_FREE(xsum) 275 #endif 276 xmpi_count_requests = xmpi_count_requests + 1 277 return 278 end if 279 request = xmpi_request_null 280 return 281 #endif 282 283 ! Call the blocking version and return null request. 284 call xmpi_sum(xval, comm, ierr) 285 request = xmpi_request_null 286 287 end subroutine xmpi_isum_ip_spc1d
ABINIT/xmpi_isum_ip_spc2d [ Functions ]
NAME
xmpi_isum_ip_spc2d
FUNCTION
Combines values from all processes and distribute the result back to all processes. Target: 2d single precision complex arrays. Non-blocking INPLACE version.
INPUTS
OUTPUT
SOURCE
357 subroutine xmpi_isum_ip_spc2d(xval, comm, request, ierr) 358 359 !Arguments------------------------- 360 complex(sp) ABI_ASYNC, intent(inout) :: xval(:,:) 361 integer,intent(in) :: comm 362 integer,intent(out) :: ierr, request 363 #if !defined HAVE_MPI2_INPLACE 364 integer :: n1,n2 365 complex(sp) ABI_ASYNC, allocatable :: xsum(:,:) 366 #endif 367 368 ! ************************************************************************* 369 370 #ifdef HAVE_MPI_IALLREDUCE 371 if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then 372 #if defined HAVE_MPI2_INPLACE 373 call MPI_IALLREDUCE(MPI_IN_PLACE, xval, product(shape(xval)), MPI_COMPLEX, MPI_SUM, comm, request, ierr) 374 #else 375 n1 = size(xval,1) ; n2 = size(xval,2) 376 ABI_STAT_MALLOC(xsum,(n1,n2), ierr) 377 if (ierr/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_isum_ip_spc2d') 378 call MPI_IALLREDUCE(xsum, xval, product(shape(xval)), MPI_DOUBLE_COMPLEX, MPI_SUM, comm, request, ierr) 379 xval (:,:) = xsum(:,:) 380 ABI_FREE(xsum) 381 #endif 382 xmpi_count_requests = xmpi_count_requests + 1 383 return 384 end if 385 request = xmpi_request_null 386 return 387 #endif 388 389 ! Call the blocking version and return null request. 390 call xmpi_sum(xval, comm, ierr) 391 request = xmpi_request_null 392 393 end subroutine xmpi_isum_ip_spc2d
ABINIT/xmpi_isum_ip_spc3d [ Functions ]
NAME
xmpi_isum_ip_spc3d
FUNCTION
Combines values from all processes and distribute the result back to all processes. Target: 3d single precision complex arrays. Non-blocking INPLACE version.
INPUTS
OUTPUT
SOURCE
463 subroutine xmpi_isum_ip_spc3d(xval, comm, request, ierr) 464 465 !Arguments------------------------- 466 complex(sp) ABI_ASYNC, intent(inout) :: xval(:,:,:) 467 integer,intent(in) :: comm 468 integer,intent(out) :: ierr, request 469 #if !defined HAVE_MPI2_INPLACE 470 integer :: n1,n2,n3 471 complex(sp) ABI_ASYNC, allocatable :: xsum(:,:,:) 472 #endif 473 474 ! ************************************************************************* 475 476 #ifdef HAVE_MPI_IALLREDUCE 477 if (comm /= MPI_COMM_SELF .and. comm /= MPI_COMM_NULL) then 478 #if defined HAVE_MPI2_INPLACE 479 call MPI_IALLREDUCE(MPI_IN_PLACE, xval, product(shape(xval)), MPI_COMPLEX, MPI_SUM, comm, request, ierr) 480 #else 481 n1 = size(xval,1) ; n2 = size(xval,2) ; n3 = size(xval,3) 482 ABI_STAT_MALLOC(xsum,(n1,n2,3), ierr) 483 if (ierr/= 0) call xmpi_abort(msg='error allocating xsum in xmpi_isum_ip_spc3d') 484 call MPI_IALLREDUCE(xsum, xval, product(shape(xval)), MPI_DOUBLE_COMPLEX, MPI_SUM, comm, request, ierr) 485 xval (:,:,:) = xsum(:,:,:) 486 ABI_FREE(xsum) 487 #endif 488 xmpi_count_requests = xmpi_count_requests + 1 489 return 490 end if 491 request = xmpi_request_null 492 return 493 #endif 494 495 ! Call the blocking version and return null request. 496 call xmpi_sum(xval, comm, ierr) 497 request = xmpi_request_null 498 499 end subroutine xmpi_isum_ip_spc3d