TABLE OF CONTENTS


ABINIT/distrb2 [ Functions ]

[ Top ] [ Functions ]

NAME

  distrb2

FUNCTION

  Creates the tabs of repartition of processors for sharing the jobs on k-points, spins and bands.

INPUTS

  mband = maximum number of bands
  nband(nkpt*nsppol) = number of bands per k point, for each spin
  nkpt = number of k-points
  nproc= number of processors available for this distribution
  nsppol = 1 for unpolarized, 2 for polarized

SIDE EFFECTS

   mpi_enreg%proc_distrb(nkpt,mband,nsppol)=number of the processor
       that will treat each band in each k point.
   mpi_enreg%nproc_kpt is set

NOTES

  For the time being, the band parallelisation works only
  when the number of bands is identical for spin up and spin down
  at the same k point. The problem is the most clearly seen
  in the kpgio routine, where a different parallel repartition
  of k points for spin up and spin down would conflict with the
  present computation of k+G sphere, independent of the spin.

PARENTS

      dfpt_looppert,eig2stern,eig2tot,mpi_setup

CHILDREN

SOURCE

2488 subroutine distrb2(mband,nband,nkpt,nproc,nsppol,mpi_enreg)
2489 
2490 
2491 !This section has been created automatically by the script Abilint (TD).
2492 !Do not modify the following lines by hand.
2493 #undef ABI_FUNC
2494 #define ABI_FUNC 'distrb2'
2495 !End of the abilint section
2496 
2497  implicit none
2498 
2499 !Arguments ------------------------------------
2500  integer,intent(in) :: mband,nkpt,nproc,nsppol
2501  integer,intent(in) :: nband(nkpt*nsppol)
2502  type(MPI_type),intent(inout) :: mpi_enreg
2503 
2504 !Local variables-------------------------------
2505  integer :: inb,inb1,ind,ind0,nband_k,proc_max,proc_min
2506  integer :: iiband,iikpt,iisppol,ikpt_this_proc,nbsteps,nproc_kpt,temp_unit
2507  integer :: kpt_distrb(nkpt)
2508  logical,save :: first=.true.,has_file
2509  character(len=500) :: message
2510 
2511 !******************************************************************
2512 
2513  nproc_kpt=mpi_enreg%nproc_kpt
2514  if (mpi_enreg%paral_pert==1) nproc_kpt=nproc
2515 
2516 !Initialization of proc_distrb
2517  do iisppol=1,nsppol
2518    do iiband=1,mband
2519      do iikpt=1,nkpt
2520        mpi_enreg%proc_distrb(iikpt,iiband,iisppol)=nproc_kpt-1
2521      end do
2522    end do
2523  end do
2524 !That s all for an empty communication space
2525  if (nproc==0) return
2526 
2527 !Some checks
2528  if (mpi_enreg%paralbd==0) then
2529 !  Check if nkpt and nproc_kpt match
2530    if(nproc_kpt>nkpt*nsppol) then
2531 !    Too much proc. with respect to nkpt
2532      write(message,'(a,i0,a,i0,a,i0,2a)')&
2533 &     'nproc_kpt=',nproc_kpt,' >= nkpt=',nkpt,'* nsppol=',nsppol,ch10,&
2534 &     'The number of processors is larger than nkpt*nsppol. This is a waste.'
2535      MSG_WARNING(message)
2536    else if(mod(nkpt*nsppol,nproc_kpt)/=0) then
2537 !    nkpt not a multiple of nproc_kpt
2538      write(message,'(a,i0,a,i0,3a)')&
2539 &     'nkpt*nsppol (', nkpt*nsppol, ') is not a multiple of nproc_kpt (',nproc_kpt, ')', ch10,&
2540 &     'The k-point parallelisation is not efficient.'
2541      MSG_WARNING(message)
2542    end if
2543  end if
2544 
2545 !Inquire whether there exist a file containing the processor distribution
2546  if (first) then
2547 !  Case first time: test file to do
2548 !  Open the file containing the k-point distribution
2549    first=.false.; has_file = file_exists("kpt_distrb")
2550  end if
2551 
2552 !Initialize the processor distribution, either from a file, or from an algorithm
2553  if (has_file) then
2554    if (open_file('kpt_distrb',message,newunit=temp_unit,form='formatted',status='old') /= 0) then
2555      MSG_ERROR(message)
2556    end if
2557    rewind(unit=temp_unit)
2558    if (mpi_enreg%paralbd == 1) then
2559 !    -> read bands distribution
2560      read(temp_unit,*) mpi_enreg%proc_distrb
2561    else
2562      read(temp_unit,*) kpt_distrb
2563    end if
2564    close(temp_unit)
2565    proc_max=0
2566    proc_min=nproc_kpt
2567 !  -> determine the range of proc. requested
2568    if (mpi_enreg%paralbd == 1) then
2569      do iisppol=1,nsppol
2570        do iikpt=1,nkpt
2571          nband_k = nband(iikpt+(iisppol-1)*nkpt)
2572          proc_max=maxval(mpi_enreg%proc_distrb(iikpt,1:nband_k,iisppol))
2573          proc_min=minval(mpi_enreg%proc_distrb(iikpt,1:nband_k,iisppol))
2574        end do
2575      end do
2576    else
2577      proc_max=maxval(kpt_distrb(1:nkpt))
2578      proc_min=minval(kpt_distrb(1:nkpt))
2579 !    -> fill the tab proc_distrb with kpt_distrb
2580      do iisppol=1,nsppol
2581        do iikpt=1,nkpt
2582          nband_k = nband(iikpt+(iisppol-1)*nkpt)
2583          do iiband=1,nband_k
2584            mpi_enreg%proc_distrb(iikpt,iiband,iisppol)=kpt_distrb(iikpt)
2585          end do
2586        end do
2587      end do
2588    end if ! mpi_enreg%paralbd
2589 
2590    if(proc_max>(nproc_kpt-1)) then
2591 !    Too much proc. requested
2592      write(message, '(a,a,a,i0,a,a,a)' )&
2593 &     'The number of processors mentioned in the kpt_distrb file',ch10,&
2594 &     'must be lower or equal to the actual number of processors =',nproc_kpt-1,ch10,&
2595 &     'Action: change the kpt_distrb file, or increase the','  number of processors.'
2596      MSG_ERROR(message)
2597    end if
2598 
2599    if(proc_max/=(nproc_kpt-1)) then
2600 !    Too few proc. used
2601      write(message, '(a,i0,a,a,a,i0,a,a,a)' )&
2602 &     'Only ',proc_max+1,' processors are used (from kpt_distrb file),',ch10,&
2603 &     'when',nproc_kpt,' processors are available.',ch10,&
2604 &     'Action: adjust number of processors and kpt_distrb file.'
2605      MSG_ERROR(message)
2606    end if
2607 
2608    if(proc_min<0) then
2609      write(message, '(a,a,a)' )&
2610 &     'The number of processors must be bigger than 0 in kpt_distrb file.',ch10,&
2611 &     'Action: modify kpt_distrb file.'
2612      MSG_ERROR(message)
2613    end if
2614 
2615  else
2616 !  'kpt_distrb' file does not exist
2617 
2618    if (mpi_enreg%paralbd==1) then
2619 
2620 !    No possible band parallelization
2621      if (nproc<(nkpt*nsppol)) then
2622 
2623 !      Does not allow a processor to treat different spins
2624        ind0=0
2625        inb1=(nkpt*nsppol)/nproc;if (mod((nkpt*nsppol),nproc)/=0) inb1=inb1+1
2626        do iikpt=1,nkpt
2627          nband_k=nband(iikpt)
2628          ind=ind0/inb1
2629          do iiband=1,nband_k
2630            mpi_enreg%proc_distrb(iikpt,iiband,1)=ind
2631            if (nsppol==2) mpi_enreg%proc_distrb(iikpt,iiband,2)=nproc-ind-1
2632          end do
2633          ind0=ind0+1
2634        end do
2635 
2636 !      MT130831 : OLD CODING
2637 !      do iisppol=1,nsppol;do iikpt=1,nkpt
2638 !      ind=(iikpt+(iisppol-1)*nkpt-1)/inb1
2639 !      nband_k=nband(iikpt+(iisppol-1)*nkpt)
2640 !      do iiband=1,nband_k
2641 !      mpi_enreg%proc_distrb(iikpt,iiband,iisppol)=ind
2642 !      end do;end do;end do
2643 !      MT130831 : END OF OLD CODING
2644 
2645 !    Possible band parallelization
2646      else
2647 !      Does not allow a processor to treat different spins
2648        ind0=0;inb=nproc/(nkpt*nsppol)
2649        do iikpt=1,nkpt
2650          nband_k=nband(iikpt)
2651          inb1=nband_k/inb;if (mod(nband_k,inb)/=0) inb1=inb1+1
2652          do iiband=1,nband_k
2653            ind=(iiband-1)/inb1+ind0
2654            mpi_enreg%proc_distrb(iikpt,iiband,1)=ind
2655            if (nsppol==2) mpi_enreg%proc_distrb(iikpt,iiband,2)=nproc-ind-1
2656          end do
2657          ind0=ind+1
2658        end do
2659 
2660 !      MT130831 : OLD CODING
2661 !      ind0=0;inb=nproc/(nkpt*nsppol)
2662 !      do iisppol=1,nsppol;do iikpt=1,nkpt
2663 !      nband_k=nband(iikpt+(iisppol-1)*nkpt)
2664 !      inb1=nband_k/inb;if (mod(nband_k,inb)/=0) inb1=inb1+1
2665 !      do iiband=1,nband_k
2666 !      ind=(iiband-1)/inb1+ind0
2667 !      mpi_enreg%proc_distrb(iikpt,iiband,iisppol)=ind
2668 !      end do
2669 !      ind0=ind+1
2670 !      end do;end do
2671 !      MT130831 : END OF OLD CODING
2672 
2673      end if
2674 
2675 !    XG060807 : OLD CODING
2676 !    ind=0
2677 !    do iisppol=1,nsppol;do iikpt=1,nkpt
2678 !    nband_k=nband(iikpt+(iisppol-1)*nkpt)
2679 !    do iiband=1,nband_k
2680 !    mpi_enreg%proc_distrb(iikpt,iiband,iisppol)=ind/nbsteps
2681 !    ind=ind+1
2682 !    end do;end do;end do
2683 !    XG060807 : END OF OLD CODING
2684 
2685    elseif (mpi_enreg%paralbd==0) then
2686 
2687 !    Does not allow a processor to treat different spins
2688      ind0=0
2689      nbsteps=(nsppol*nkpt)/nproc_kpt
2690      if (mod((nsppol*nkpt),nproc_kpt)/=0) nbsteps=nbsteps+1
2691      do iikpt=1,nkpt
2692        nband_k=nband(iikpt)
2693        ind=ind0/nbsteps
2694        do iiband=1,nband_k
2695          mpi_enreg%proc_distrb(iikpt,iiband,1)=ind
2696          if (nsppol==2) mpi_enreg%proc_distrb(iikpt,iiband,2)=nproc_kpt-ind-1
2697        end do
2698        ind0=ind0+1
2699      end do
2700 
2701 !    XG060807 : OLD CODING
2702 !    ind=0
2703 !    do iisppol=1,nsppol;do iikpt=1,nkpt
2704 !    nband_k = nband(iikpt+(iisppol-1)*nkpt)
2705 !    do iiband=1,nband_k
2706 !    Distribute k-points homogeneously
2707 !    proc_distrb(iikpt,iiband,iisppol)=mod(iikpt-1,nproc_kpt)
2708 !    mpi_enreg%proc_distrb(iikpt,iiband,iisppol)=ind/nbsteps
2709 !    end do
2710 !    ind=ind + 1
2711 !    end do;end do
2712 !    XG060807 : END OF OLD CODING
2713 
2714    end if ! mpi_enreg%paralbd
2715 
2716  end if ! has_file
2717 
2718  mpi_enreg%my_kpttab(:)=0
2719  mpi_enreg%my_isppoltab(:)=0
2720  do iisppol=1,nsppol
2721    ikpt_this_proc=0
2722    do iikpt=1,nkpt
2723      nband_k=nband(iikpt+(iisppol-1)*nkpt)
2724      if(proc_distrb_cycle(mpi_enreg%proc_distrb,iikpt,1,nband_k,iisppol,mpi_enreg%me_kpt)) cycle
2725      ikpt_this_proc=ikpt_this_proc+1
2726 !    This test should be done when dataset are read and slipt of work do between processor
2727 !    If this test is not good for one proc then other procs fall in deadlock->so PERS and MPI_ABORT
2728 !    if (ikpt_this_proc > mkmem) then
2729 !    message = ' this bandfft tab cannot be allocated !'
2730 !    MSG_BUG(message)
2731 !    end if
2732      mpi_enreg%my_kpttab(iikpt)=ikpt_this_proc
2733      mpi_enreg%my_isppoltab(iisppol)=1
2734    end do
2735  end do
2736 
2737 end subroutine distrb2

ABINIT/distrb2_hf [ Functions ]

[ Top ] [ Functions ]

NAME

  distrb2_hf

FUNCTION

  Ceate the tabs of repartition of processors for sharing the jobs
  on occupied states (labeled by k-points, bands and spin indices) for Hartree-Fock calculation.

INPUTS

  nbandhf = maximum number of occupied bands
  nkpthf = number of k-points in full BZ
  nproc= number of processors available for this distribution
  nsppol = 1 for unpolarized, 2 for polarized

SIDE EFFECTS

   mpi_enreg%proc_distrb(nkpthf,nbandhf,nsppol)=number of the processor
       that will treat each band in each k point.
   mpi_enreg%nproc_kpt is set

NOTES

  For the time being, the band parallelisation works only
  when the number of bands is identical for spin up and spin down
  at the same k point. The problem is the most clearly seen
  in the kpgio routine, where a different parallel repartition
  of k points for spin up and spin down would conflict with the
  present computation of k+G sphere, independent of the spin.

PARENTS

      mpi_setup

CHILDREN

SOURCE

2774 subroutine distrb2_hf(nbandhf,nkpthf, nproc, nsppol, mpi_enreg)
2775 
2776 
2777 !This section has been created automatically by the script Abilint (TD).
2778 !Do not modify the following lines by hand.
2779 #undef ABI_FUNC
2780 #define ABI_FUNC 'distrb2_hf'
2781 !End of the abilint section
2782 
2783  implicit none
2784 
2785 !Arguments ------------------------------------
2786  integer,intent(in) :: nbandhf,nkpthf,nproc,nsppol
2787  type(MPI_type),intent(inout) :: mpi_enreg
2788 
2789 !Local variables-------------------------------
2790  integer :: ind,iiband,iikpt,iistep,nproc_hf
2791  character(len=500) :: message
2792 
2793 !******************************************************************
2794 
2795  nproc_hf=mpi_enreg%nproc_hf
2796 
2797 !* Initialize distrb_hf (the array exists necessarily)
2798  do iiband=1,nbandhf
2799    do iikpt=1,nkpthf
2800      mpi_enreg%distrb_hf(iikpt,iiband,1)=nproc_hf-1
2801    end do
2802  end do
2803 
2804 !* End of the routine for an empty communication space
2805  if (nproc==0) return
2806 
2807 !*** Testing section ***
2808 
2809  if (nsppol==2) then
2810 !* Check that the distribution over (spin,k point) allow to consider spin up and spin dn independently.
2811    if (mpi_enreg%nproc_kpt/=1.and.mod(mpi_enreg%nproc_kpt,2)/=0) then
2812      MSG_ERROR( 'The variable nproc_kpt is not even but nssppol= 2')
2813 !* In this case, one processor will carry both spin. (this will create pbms for the calculation)
2814    end if
2815 !* Check that the number of band is the same for each spin, at each k-point. (It should be)
2816 !*   do iikpt=1,nkpthf
2817 !*     if (nband(iikpt)/=nband(iikpt+nkpthf)) then
2818 !*     message = ' WARNING - the number of bands is different for spin up or spin down. '
2819 !*     MSG_ERROR(message)
2820 !*     end if
2821 !*    end do
2822 !* If one of this test is not good for one proc then other procs fall in deadlock, according to distrb2.
2823 !* What does it mean ???
2824  end if
2825 
2826 
2827 !* Check if nkpthf and nproc_hf match
2828  if (nproc_hf>nkpthf*nbandhf) then
2829 !* There are too many processors with respect to nkpthf*nbandhf
2830    write(message, '(a,a,i4,a,i4,a,i4,a,a)' ) ch10,&
2831 &   'nproc_hf=',nproc_hf,' >= nkpthf=',nkpthf,'* nbandhf=',nbandhf,ch10,&
2832 &   'The number of processors is larger than nkpthf*nbandhf. This is a waste.'
2833    MSG_WARNING(message)
2834 
2835  else if(mod(nkpthf*nbandhf,nproc_hf)/=0) then
2836 !* nkpthf*nbandhf is not a multiple of nproc_hf
2837    write(message, '(2a,i5,a,i5,3a)' ) ch10,&
2838 &   'nkpthf*nbandhf (', nkpthf*nbandhf, ') is not a multiple of nproc_hf (',nproc_hf, ')', ch10,&
2839 &   'The parallelisation may not be efficient.'
2840    MSG_WARNING(message)
2841  end if
2842 
2843 !*** End of testing section ***
2844 
2845 !*** Initialize the processor distribution from a simple algorithm ***
2846 
2847  if (nproc_hf<nkpthf) then
2848 !* In this case, a parallelization over kpts only.
2849    iistep=nkpthf/nproc_hf
2850    if (mod(nkpthf,nproc_hf) /=0) iistep=iistep+1
2851    ind=0
2852    do iikpt=1,nkpthf
2853 !*** Only the first "nbandhf" bands are considered (they are assumed to be the only occupied ones)
2854      do iiband=1,nbandhf
2855        mpi_enreg%distrb_hf(iikpt,iiband,1)=ind/iistep
2856      end do
2857      ind=ind+1
2858    end do
2859 
2860  else
2861 !* In this case, a parallelization over all the occupied states is possible.
2862    if (nproc_hf < nbandhf*nkpthf) then
2863      iistep=(nbandhf*nkpthf)/nproc_hf;
2864      if (mod((nbandhf*nkpthf),nproc_hf) /=0) iistep=iistep+1
2865    else
2866      iistep=1
2867    end if
2868    ind=0
2869    do iikpt=1,nkpthf
2870 !*** Only the first "nbandhf" bands are considered (they are assumed to be the only occupied ones)
2871      do iiband=1,nbandhf
2872        mpi_enreg%distrb_hf(iikpt,iiband,1)=ind/iistep
2873        ind=ind+1
2874      end do
2875    end do
2876  end if
2877 
2878 !*** Initialization of processor distribution from a file (simple copy from distrb2, not yet implemented) ***
2879 
2880 ! !* Inquire whether there exist a file containing the processor distribution
2881 !  if (first) then
2882 ! !  Case first time : test file to do
2883 ! !  Open the file containing the (k-points,bands) distribution
2884 !    open(unit=temp_unit,file='kpt_distrb_hf',form='formatted',status='old',iostat=ios)
2885 !    if(ios==0) then
2886 ! !    'kpt_distrb_hf' file exists
2887 !      file_exist=1
2888 !      close(temp_unit)
2889 !    else
2890 !      file_exist=0
2891 !    end if
2892 !    first=.false.
2893 !  end if
2894 !
2895 ! !* Initialize the processor distribution, either from a file, or from an algorithm
2896 !  if (file_exist == 1) then
2897 ! !* Read (k-points,bands) distribution out of the file
2898 !    open(unit=temp_unit,file='kpt_distrb_hf',form='formatted',status='old',iostat=ios)
2899 !    rewind(unit=temp_unit)
2900 !    read(temp_unit,*) mpi_enreg%distrb_hf
2901 !    close(temp_unit)
2902 ! !* Determine the range of processors requested
2903 !    proc_max=0
2904 !    proc_min=nproc_hf
2905 !    do iikpt=1,nkpthf
2906 !      mband_occ_k = mband_occ(iikpt+(iisppol-1)*nkpthf)
2907 !      proc_max=maxval(mpi_enreg%distrb_hf(iikpt,1:mband_occ_k,1))
2908 !      proc_min=minval(mpi_enreg%distrb_hf(iikpt,1:mband_occ_k,1))
2909 !    end do
2910 !
2911 !    if(proc_max>(nproc_hf-1)) then
2912 ! !*    Too much proc. requested
2913 !      write(message, '(a,a,a,i4,a,a,a)' )&
2914 ! &     '  The number of processors mentioned in the kpt_distrb file',ch10,&
2915 ! &     '  must be lower or equal to the actual number of processors =',&
2916 ! &     nproc_hf-1,ch10,&
2917 ! &     '  Action: change the kpt_distrb file, or increase the',&
2918 ! &     '  number of processors.'
2919 !      MSG_ERROR(message)
2920 !    end if
2921 !
2922 !    if(proc_max/=(nproc_hf-1)) then
2923 ! !*    Too few proc. used
2924 !      write(message, '(a,i4,a,a,a,i4,a,a,a)' )&
2925 ! &     '  Only ',proc_max+1,' processors are used (from kpt_distrb file),',ch10,&
2926 ! &     '  when',nproc_hf,' processors are available.',ch10,&
2927 ! &     '  Action: adjust number of processors and kpt_distrb file.'
2928 !      MSG_ERROR(message)
2929 !    end if
2930 !
2931 !    if(proc_min<0) then
2932 !      write(message, '(a,a,a)' )&
2933 ! &     '  The number of processors must be bigger than 0 in kpt_distrb file.',ch10,&
2934 ! &     ' Action: modify kpt_distrb file.'
2935 !      MSG_ERROR(message)
2936 !    end if
2937 !  else
2938 ! !* The file does not exist...
2939 !  end if ! file_exist
2940 
2941 !DEBUG
2942 !write(std_out,*)' distrb2_hf: exit '
2943 !ENDDEBUG
2944 
2945 end subroutine distrb2_hf

ABINIT/initmpi_atom [ Functions ]

[ Top ] [ Functions ]

NAME

  initmpi_atom

FUNCTION

  Initializes the mpi information for parallelism over atoms (PAW).

INPUTS

  dtset <type(dataset_type)>=all input variables for this dataset
  mpi_enreg= information about MPI parallelization

OUTPUT

  mpi_enreg= information about MPI parallelization
    comm_atom                 =communicator over atoms
    nproc_atom                =size of the communicator over atoms
    my_natom                  =number of atoms treated by current proc
    my_atmtab(mpi_enreg%natom)=indexes of the atoms treated by current processor

PARENTS

      m_paral_pert,mpi_setup

CHILDREN

      get_my_atmtab,get_my_natom

SOURCE

1084 subroutine initmpi_atom(dtset,mpi_enreg)
1085 
1086 
1087 !This section has been created automatically by the script Abilint (TD).
1088 !Do not modify the following lines by hand.
1089 #undef ABI_FUNC
1090 #define ABI_FUNC 'initmpi_atom'
1091 !End of the abilint section
1092 
1093  implicit none
1094 
1095 !Arguments ------------------------------------
1096 !scalars
1097  type(dataset_type),intent(in) :: dtset
1098  type(MPI_type),intent(inout) :: mpi_enreg
1099 
1100 !Local variables-------------------------------
1101 !scalars
1102  logical :: my_atmtab_allocated,paral_atom
1103  character(len=500) :: msg
1104  integer :: iatom
1105 
1106 ! ***********************************************************************
1107 
1108  DBG_ENTER("COLL")
1109 
1110  mpi_enreg%nproc_atom=1
1111  mpi_enreg%comm_atom=xmpi_comm_self
1112  mpi_enreg%my_natom=dtset%natom
1113  if (associated(mpi_enreg%my_atmtab))then
1114    ABI_DEALLOCATE(mpi_enreg%my_atmtab)
1115  end if
1116  nullify(mpi_enreg%my_atmtab)
1117 
1118  if (xmpi_paral==0) then
1119    mpi_enreg%nproc_atom=0
1120    ABI_ALLOCATE(mpi_enreg%my_atmtab,(0))
1121    return
1122  end if
1123 
1124 !Check compatibility
1125  if (dtset%paral_atom>0) then
1126    msg=''
1127    if (dtset%usepaw==0)  msg= 'Parallelisation over atoms not compatible with usepaw=0 !'
1128    if (dtset%usedmft==1) msg=' Parallelisation over atoms not compatible with usedmft=1 !'
1129    if (dtset%usewvl==1)  msg= 'Parallelisation over atoms not compatible with usewvl=1 !'
1130    if (dtset%prtden>1.and.dtset%paral_kgb<=0) &
1131 &   msg= 'Parallelisation over atoms not compatible with prtden>1 (PAW AE densities) !'
1132    if (dtset%optdriver/=RUNL_GSTATE.and.dtset%optdriver/=RUNL_RESPFN) &
1133 &   msg=' Parallelisation over atoms only compatible with GS or RF !'
1134    if (dtset%macro_uj/=0)msg=' Parallelisation over atoms not compatible with macro_uj!=0 !'
1135    if (msg/='') then
1136      MSG_ERROR(msg)
1137    end if
1138  end if
1139 
1140  if (mpi_enreg%comm_atom==xmpi_comm_null) then
1141    mpi_enreg%nproc_atom=0;mpi_enreg%my_natom=0
1142    ABI_ALLOCATE(mpi_enreg%my_atmtab,(0))
1143    return
1144  end if
1145 
1146  if (dtset%paral_atom>0) then
1147 
1148 !  Build correct atom communicator
1149    if (dtset%optdriver==RUNL_GSTATE.and.dtset%paral_kgb==1) then
1150      mpi_enreg%comm_atom=mpi_enreg%comm_kptband
1151    else
1152      mpi_enreg%comm_atom=mpi_enreg%comm_cell
1153    end if
1154 
1155 !  Get number of processors sharing the atomic data distribution
1156    mpi_enreg%nproc_atom=xmpi_comm_size(mpi_enreg%comm_atom)
1157 
1158 !  Get local number of atoms
1159    call get_my_natom(mpi_enreg%comm_atom,mpi_enreg%my_natom,dtset%natom)
1160    paral_atom=(mpi_enreg%my_natom/=dtset%natom)
1161 
1162 !  Build atom table
1163    if (mpi_enreg%my_natom>0.and.paral_atom) then
1164      my_atmtab_allocated=.false.
1165      call get_my_atmtab(mpi_enreg%comm_atom,mpi_enreg%my_atmtab,my_atmtab_allocated, &
1166 &     paral_atom,dtset%natom)
1167    else if (.not.paral_atom) then
1168      ABI_ALLOCATE(mpi_enreg%my_atmtab,(dtset%natom))
1169      mpi_enreg%my_atmtab(1:dtset%natom)=(/(iatom, iatom=1,dtset%natom)/)
1170    else if (mpi_enreg%my_natom==0) then
1171      ABI_ALLOCATE(mpi_enreg%my_atmtab,(0))
1172    end if
1173 
1174  end if
1175 
1176  DBG_EXIT("COLL")
1177 
1178 end subroutine initmpi_atom

ABINIT/initmpi_band [ Functions ]

[ Top ] [ Functions ]

NAME

  initmpi_band

FUNCTION

  Initializes the mpi information for band parallelism (paralbd=1).

INPUTS

  mpi_enreg= information about MPI parallelization
  nband(nkpt*nsppol)= number of bands per k point, for each spin
  nkpt= number of k-points
  nsppol= 1 for unpolarized, 2 for polarized

OUTPUT

  mpi_enreg=information about MPI parallelization
  mpi_enreg%comm_band=communicator of BAND set

PARENTS

      dfpt_looppert

CHILDREN

SOURCE

2224 subroutine initmpi_band(mpi_enreg,nband,nkpt,nsppol)
2225 
2226 
2227 !This section has been created automatically by the script Abilint (TD).
2228 !Do not modify the following lines by hand.
2229 #undef ABI_FUNC
2230 #define ABI_FUNC 'initmpi_band'
2231 !End of the abilint section
2232 
2233  implicit none
2234 
2235 !Arguments ------------------------------------
2236 !scalars
2237  integer,intent(in) :: nkpt,nsppol
2238  integer,intent(in) :: nband(nkpt*nsppol)
2239  type(MPI_type),intent(inout) :: mpi_enreg
2240 
2241 !Local variables-------------------------------
2242 !scalars
2243  integer :: ii,ikpt,iproc_min,iproc_max,irank,isppol
2244  integer :: me,nband_k,nproc,nbsteps,nrank,nstates,spacecomm
2245  character(len=500) :: msg
2246 !arrays
2247  integer,allocatable :: ranks(:)
2248 
2249 ! ***********************************************************************
2250 
2251  mpi_enreg%comm_band=xmpi_comm_self
2252 
2253  if (mpi_enreg%paralbd==1.and.xmpi_paral==1) then
2254 
2255 !  Comm_kpt is supposed to treat spins, k-points and bands
2256    spacecomm=mpi_enreg%comm_kpt
2257    nproc=mpi_enreg%nproc_kpt
2258    me=mpi_enreg%me_kpt
2259 
2260    nstates=sum(nband(1:nkpt*nsppol))
2261    nbsteps=nstates/nproc
2262    if (mod(nstates,nproc)/=0) nbsteps=nbsteps+1
2263 
2264    if (nbsteps<maxval(nband(1:nkpt*nsppol))) then
2265 
2266      nrank=0
2267      do isppol=1,nsppol
2268        do ikpt=1,nkpt
2269          ii=ikpt+(isppol-1)*nkpt
2270          nband_k=nband(ii)
2271          if (nbsteps<nband_k) then
2272            iproc_min=minval(mpi_enreg%proc_distrb(ikpt,:,isppol))
2273            iproc_max=maxval(mpi_enreg%proc_distrb(ikpt,:,isppol))
2274            if ((me>=iproc_min).and.(me<=iproc_max)) then
2275              nrank=iproc_max-iproc_min+1
2276              if (.not.allocated(ranks)) then
2277                ABI_ALLOCATE(ranks,(nrank))
2278                if (nrank>0) ranks=(/((iproc_min+irank-1),irank=1,nrank)/)
2279              else if (nrank/=size(ranks)) then
2280                msg='Number of bands per proc should be the same for all k-points!'
2281                MSG_BUG(msg)
2282              end if
2283            end if
2284          end if
2285        end do
2286      end do
2287      if (.not.allocated(ranks)) then
2288        ABI_ALLOCATE(ranks,(0))
2289      end if
2290 
2291      mpi_enreg%comm_band=xmpi_subcomm(spacecomm,nrank,ranks)
2292 
2293      ABI_DEALLOCATE(ranks)
2294    end if
2295  end if
2296 
2297 end subroutine initmpi_band

ABINIT/initmpi_grid [ Functions ]

[ Top ] [ Functions ]

NAME

  initmpi_grid

FUNCTION

  Initializes the MPI information for the grid:
    * 2D if parallization KPT/FFT (paral_kgb == 0 & MPI)
    * 3D if parallization KPT/FFT/BAND (paral_kgb == 1 & MPI)
    * 2D in case of an Hartree-Fock calculation

INPUTS

OUTPUT

PARENTS

      mpi_setup

CHILDREN

      mpi_cart_coords,mpi_cart_create,mpi_cart_sub,mpi_comm_rank,wrtout
      xmpi_abort,xmpi_comm_free

SOURCE

1260 subroutine initmpi_grid(mpi_enreg)
1261 
1262 
1263 !This section has been created automatically by the script Abilint (TD).
1264 !Do not modify the following lines by hand.
1265 #undef ABI_FUNC
1266 #define ABI_FUNC 'initmpi_grid'
1267 !End of the abilint section
1268 
1269  implicit none
1270 
1271 !Arguments ------------------------------------
1272  type(MPI_type),intent(inout) :: mpi_enreg
1273 
1274 !Local variables-------------------------------
1275 !scalars
1276  integer :: nproc,nproc_eff,spacecomm
1277  character(len=500) :: msg
1278 #if defined HAVE_MPI
1279  integer :: commcart_4d,dimcart,ierr,me_cart_4d
1280  integer :: commcart_2d,me_cart_2d
1281  logical :: reorder
1282 !arrays
1283  integer,allocatable :: coords(:),sizecart(:)
1284  logical,allocatable :: periode(:), keepdim(:)
1285 #endif
1286 
1287 ! *********************************************************************
1288 
1289  DBG_ENTER("COLL")
1290 
1291 !Select the correct "world" communicator"
1292  nproc=mpi_enreg%nproc_cell
1293  if(mpi_enreg%paral_pert==1) nproc=mpi_enreg%nproc_cell
1294  spacecomm=mpi_enreg%comm_cell
1295 
1296 !Fake values for null communicator
1297  if (nproc==0) then
1298    mpi_enreg%nproc_fft    = 0
1299    mpi_enreg%nproc_band   = 0
1300    mpi_enreg%nproc_hf    = 0
1301    mpi_enreg%nproc_kpt    = 0
1302    mpi_enreg%nproc_spinor   = 0
1303    mpi_enreg%comm_fft            = xmpi_comm_null
1304    mpi_enreg%comm_band           = xmpi_comm_null
1305    mpi_enreg%comm_hf             = xmpi_comm_null
1306    mpi_enreg%comm_kpt            = xmpi_comm_null
1307    mpi_enreg%comm_kptband        = xmpi_comm_null
1308    mpi_enreg%comm_spinor         = xmpi_comm_null
1309    mpi_enreg%comm_bandspinor     = xmpi_comm_null
1310    mpi_enreg%comm_spinorfft      = xmpi_comm_null
1311    mpi_enreg%comm_bandfft        = xmpi_comm_null
1312    mpi_enreg%comm_bandspinorfft  = xmpi_comm_null
1313    mpi_enreg%bandpp       = 1
1314    return
1315  end if
1316 
1317 #if defined HAVE_MPI
1318  if (mpi_enreg%paral_hf==0) then
1319    ! either the option Fock exchange is not active or there is no parallelization on Fock exchange calculation.
1320 
1321    if (mpi_enreg%nproc_spinor>1) mpi_enreg%paral_spinor=1
1322 
1323     !Effective number of processors used for the grid
1324    nproc_eff=mpi_enreg%nproc_fft*mpi_enreg%nproc_band *mpi_enreg%nproc_kpt*mpi_enreg%nproc_spinor
1325    if(nproc_eff/=nproc) then
1326      write(msg,'(4a,5(a,i0))') &
1327 &     '  The number of band*FFT*kpt*spinor processors, npband*npfft*npkpt*npspinor should be',ch10,&
1328 &     '  equal to the total number of processors, nproc.',ch10,&
1329 &     '  However, npband   =',mpi_enreg%nproc_band,&
1330 &     '           npfft    =',mpi_enreg%nproc_fft,&
1331 &     '           npkpt    =',mpi_enreg%nproc_kpt,&
1332 &     '           npspinor =',mpi_enreg%nproc_spinor,&
1333 &     '       and nproc    =',nproc
1334      MSG_WARNING(msg)
1335    end if
1336 
1337    !Nothing to do if only 1 proc
1338    if (nproc_eff==1) return
1339 
1340    ! Initialize the communicator for Hartree-Fock to xmpi_comm_self
1341    mpi_enreg%me_hf =0
1342    mpi_enreg%comm_hf=xmpi_comm_self
1343 
1344    if(mpi_enreg%paral_kgb==0) then
1345      mpi_enreg%me_fft =0
1346      mpi_enreg%me_band=0
1347      mpi_enreg%me_kpt =mpi_enreg%me_cell
1348      mpi_enreg%me_spinor=0
1349      mpi_enreg%comm_fft=xmpi_comm_self
1350      mpi_enreg%comm_band=xmpi_comm_self
1351      mpi_enreg%comm_kpt=mpi_enreg%comm_cell
1352      mpi_enreg%comm_spinor=xmpi_comm_self
1353      mpi_enreg%comm_bandspinor=xmpi_comm_self
1354      mpi_enreg%comm_kptband=mpi_enreg%comm_cell
1355      mpi_enreg%comm_spinorfft=xmpi_comm_self
1356      mpi_enreg%comm_bandfft=xmpi_comm_self
1357      mpi_enreg%comm_bandspinorfft=xmpi_comm_self
1358    else
1359      !  CREATE THE 4D GRID
1360      !  ==================================================
1361 
1362      !  Create the global cartesian 4D- communicator
1363      !  valgrind claims this is not deallocated in test v5/72
1364      !  Can someone knowledgable check?
1365      dimcart=4
1366      ABI_ALLOCATE(sizecart,(dimcart))
1367      ABI_ALLOCATE(periode,(dimcart))
1368 !    MT 2012-june: change the order of the indexes; not sure this is efficient
1369 !    (not efficient on TGCC-Curie).
1370      sizecart(1)=mpi_enreg%nproc_kpt  ! mpi_enreg%nproc_kpt
1371      sizecart(2)=mpi_enreg%nproc_band ! mpi_enreg%nproc_band
1372      sizecart(3)=mpi_enreg%nproc_spinor ! mpi_enreg%nproc_spinor
1373      sizecart(4)=mpi_enreg%nproc_fft  ! mpi_enreg%nproc_fft
1374      periode(:)=.false.;reorder=.false.
1375      call MPI_CART_CREATE(spacecomm,dimcart,sizecart,periode,reorder,commcart_4d,ierr)
1376      ABI_DEALLOCATE(periode)
1377      ABI_DEALLOCATE(sizecart)
1378 
1379 !    Find the index and coordinates of the current processor
1380      call MPI_COMM_RANK(commcart_4d, me_cart_4d, ierr)
1381      ABI_ALLOCATE(coords,(dimcart))
1382      call MPI_CART_COORDS(commcart_4d, me_cart_4d,dimcart,coords,ierr)
1383      mpi_enreg%me_kpt =coords(1)
1384      mpi_enreg%me_band=coords(2)
1385      mpi_enreg%me_spinor=coords(3)
1386      mpi_enreg%me_fft =coords(4)
1387      ABI_DEALLOCATE(coords)
1388 
1389      ABI_ALLOCATE(keepdim,(dimcart))
1390 
1391 !    Create the communicator for fft distribution
1392      keepdim(1)=.false.
1393      keepdim(2)=.false.
1394      keepdim(3)=.false.
1395      keepdim(4)=.true.
1396      call MPI_CART_SUB(commcart_4d, keepdim, mpi_enreg%comm_fft,ierr)
1397 
1398 !    Create the communicator for band distribution
1399      keepdim(1)=.false.
1400      keepdim(2)=.true.
1401      keepdim(3)=.false.
1402      keepdim(4)=.false.
1403      call MPI_CART_SUB(commcart_4d, keepdim, mpi_enreg%comm_band,ierr)
1404 
1405 !    Create the communicator for kpt distribution
1406      keepdim(1)=.true.
1407      keepdim(2)=.false.
1408      keepdim(3)=.false.
1409      keepdim(4)=.false.
1410      call MPI_CART_SUB(commcart_4d, keepdim, mpi_enreg%comm_kpt,ierr)
1411 
1412 !    Create the communicator for spinor distribution
1413      keepdim(1)=.false.
1414      keepdim(2)=.false.
1415      keepdim(3)=.true.
1416      keepdim(4)=.false.
1417      call MPI_CART_SUB(commcart_4d, keepdim, mpi_enreg%comm_spinor,ierr)
1418 
1419 !    Create the communicator for band-spinor distribution
1420      keepdim(1)=.false.
1421      keepdim(2)=.true.
1422      keepdim(3)=.true.
1423      keepdim(4)=.false.
1424      call MPI_CART_SUB(commcart_4d, keepdim, mpi_enreg%comm_bandspinor,ierr)
1425      if (ierr /= MPI_SUCCESS ) then
1426        call xmpi_abort(mpi_enreg%comm_world,ierr)
1427      end if
1428 
1429 !    Create the communicator for kpt-band distribution
1430      keepdim(1)=.true.
1431      keepdim(2)=.true.
1432      keepdim(3)=.false.
1433      keepdim(4)=.false.
1434      call MPI_CART_SUB(commcart_4d, keepdim, mpi_enreg%comm_kptband,ierr)
1435 
1436 !    Create the communicator for fft-spinor distribution
1437      keepdim(1)=.false.
1438      keepdim(2)=.false.
1439      keepdim(3)=.true.
1440      keepdim(4)=.true.
1441      call MPI_CART_SUB(commcart_4d, keepdim, mpi_enreg%comm_spinorfft,ierr)
1442 
1443 !    Create the communicator for fft-band distribution
1444      keepdim(1)=.false.
1445      keepdim(2)=.true.
1446      keepdim(3)=.false.
1447      keepdim(4)=.true.
1448      call MPI_CART_SUB(commcart_4d, keepdim, mpi_enreg%comm_bandfft,ierr)
1449 
1450 !    Create the communicator for fft-band-spinor distribution
1451      keepdim(1)=.false.
1452      keepdim(2)=.true.
1453      keepdim(3)=.true.
1454      keepdim(4)=.true.
1455      call MPI_CART_SUB(commcart_4d, keepdim, mpi_enreg%comm_bandspinorfft,ierr)
1456 
1457      ABI_DEALLOCATE(keepdim)
1458      call xmpi_comm_free(commcart_4d)
1459    end if
1460 
1461 !  Write some data
1462    write(msg,'(a,4i5)') 'npfft, npband, npspinor and npkpt: ',&
1463 &   mpi_enreg%nproc_fft,mpi_enreg%nproc_band, &
1464 &   mpi_enreg%nproc_spinor,mpi_enreg%nproc_kpt
1465    call wrtout(std_out,msg,'COLL')
1466    write(msg,'(a,4i5)') 'me_fft, me_band, me_spinor , me_kpt: ',&
1467 &   mpi_enreg%me_fft,mpi_enreg%me_band,&
1468 &   mpi_enreg%me_spinor, mpi_enreg%me_kpt
1469 
1470  else ! paral_hf==1
1471 !* Option Hartree-Fock is active and more than 1 processor is dedicated to the parallelization over occupied states.
1472 
1473 !* Initialize the values of fft, band and spinor communicators, as in the case paral_kgb==0.
1474    mpi_enreg%me_fft =0
1475    mpi_enreg%me_band=0
1476    mpi_enreg%me_spinor=0
1477    mpi_enreg%comm_fft=xmpi_comm_self
1478    mpi_enreg%comm_band=xmpi_comm_self
1479    mpi_enreg%comm_spinor=xmpi_comm_self
1480    mpi_enreg%comm_bandspinor=xmpi_comm_self
1481    mpi_enreg%comm_kptband=mpi_enreg%comm_cell
1482    mpi_enreg%comm_spinorfft=xmpi_comm_self
1483    mpi_enreg%comm_bandfft=xmpi_comm_self
1484    mpi_enreg%comm_bandspinorfft=xmpi_comm_self
1485 
1486 !* Create the global cartesian 2D- communicator
1487    dimcart=2
1488    ABI_ALLOCATE(sizecart,(dimcart))
1489    ABI_ALLOCATE(periode,(dimcart))
1490    sizecart(1)=mpi_enreg%nproc_kpt  ! mpi_enreg%nproc_kpt
1491    sizecart(2)=mpi_enreg%nproc_hf   ! mpi_enreg%nproc_hf
1492    periode(:)=.false.;reorder=.false.
1493    call MPI_CART_CREATE(spacecomm,dimcart,sizecart,periode,reorder,commcart_2d,ierr)
1494    ABI_DEALLOCATE(periode)
1495    ABI_DEALLOCATE(sizecart)
1496 
1497 !* Find the index and coordinates of the current processor
1498    call MPI_COMM_RANK(commcart_2d, me_cart_2d, ierr)
1499    ABI_ALLOCATE(coords,(dimcart))
1500    call MPI_CART_COORDS(commcart_2d, me_cart_2d,dimcart,coords,ierr)
1501    mpi_enreg%me_kpt =coords(1)
1502    mpi_enreg%me_hf=coords(2)
1503    ABI_DEALLOCATE(coords)
1504 
1505    ABI_ALLOCATE(keepdim,(dimcart))
1506 
1507 !* Create the communicator for kpt distribution
1508    keepdim(1)=.true.
1509    keepdim(2)=.false.
1510    call MPI_CART_SUB(commcart_2d, keepdim, mpi_enreg%comm_kpt,ierr)
1511 
1512 !* Create the communicator for hf distribution
1513    keepdim(1)=.false.
1514    keepdim(2)=.true.
1515    call MPI_CART_SUB(commcart_2d, keepdim, mpi_enreg%comm_hf,ierr)
1516 
1517    ABI_DEALLOCATE(keepdim)
1518    call xmpi_comm_free(commcart_2d)
1519 
1520 !* Write some data
1521    write(msg,'(a,2(1x,i0))') 'nphf and npkpt: ',mpi_enreg%nproc_hf, mpi_enreg%nproc_kpt
1522    call wrtout(std_out,msg,'COLL')
1523    write(msg,'(a,2(1x,i0))') 'me_hf, me_kpt: ',mpi_enreg%me_hf, mpi_enreg%me_kpt
1524  end if
1525 #endif
1526 
1527  DBG_EXIT("COLL")
1528 
1529 end subroutine initmpi_grid

ABINIT/initmpi_img [ Functions ]

[ Top ] [ Functions ]

NAME

  initmpi_img

FUNCTION

  Initializes the mpi information for parallelism over images of the cell (npimage>1).

INPUTS

  dtset <type(dataset_type)>=all input variables in this dataset
  mpi_enreg= information about MPI parallelization
  option= see below

OUTPUT

  mpi_enreg%my_nimage= number of images of the cell treated by current proc
  ===== if option==1 or option==-1
    mpi_enreg%my_imgtab= indexes of images of the cell treated by current proc
  ===== if option==2 or option==3 or option==-1
    mpi_enreg%comm_cell=Communicator over all processors treating the same image
    mpi_enreg%nproc_cell=size of comm_cell
    mpi_enreg%me_cell=my rank in comm_cell
  ===== if option==3 or option==-1
    mpi_enreg%comm_img=Communicator over all images
    mpi_enreg%nproc_img=size of comm_img
    mpi_enreg%me_img=my rank in comm_img
    mpi_enreg%distrb_img(:)=index of processor treating each image (in comm_img communicator)

PARENTS

      mpi_setup

CHILDREN

      sort_int

SOURCE

1655 subroutine initmpi_img(dtset,mpi_enreg,option)
1656 
1657  !use m_io_tools,  only: flush_unit
1658 
1659 !This section has been created automatically by the script Abilint (TD).
1660 !Do not modify the following lines by hand.
1661 #undef ABI_FUNC
1662 #define ABI_FUNC 'initmpi_img'
1663 !End of the abilint section
1664 
1665  implicit none
1666 
1667 !Arguments ------------------------------------
1668  integer,intent(in) :: option
1669  type(dataset_type),intent(in) :: dtset
1670  type(MPI_type),intent(inout) :: mpi_enreg
1671 
1672 !Local variables-------------------------------
1673  integer :: imod,irank,iprocmax,iprocmin,jrank
1674  integer :: ndynimage_eff,nimage_eff,nproc_per_image,nrank
1675  logical,parameter :: debug=.false.
1676  integer,allocatable :: ranks(:)
1677  character(len=500) :: msg
1678 
1679 !integer :: group_cell,ierr
1680 
1681 ! ***********************************************************************
1682 
1683  DBG_ENTER("COLL")
1684  if (option/=0) then
1685    mpi_enreg%comm_img=xmpi_comm_self
1686    mpi_enreg%comm_cell=mpi_enreg%comm_world
1687  end if
1688 
1689  if (xmpi_paral==1.and.dtset%npimage>1.and.dtset%npimage<=mpi_enreg%nproc.and. &
1690 &    dtset%optdriver==RUNL_GSTATE) then
1691 
1692 !  Activate flag for parallelization over images
1693    mpi_enreg%paral_img=1
1694 
1695    ndynimage_eff=dtset%ndynimage;if (dtset%ntimimage<=1) ndynimage_eff=0
1696 
1697 !  Print several warnings
1698    if (option==0) then
1699      nimage_eff=max(ndynimage_eff,dtset%nimage-ndynimage_eff)
1700      if (dtset%npimage>nimage_eff) then
1701        write(unit=msg,fmt='(3a,i4,a,i4,4a)') &
1702 &       'The number of processors used for the parallelization',ch10,&
1703 &       ' over images (npimage=',dtset%npimage,&
1704 &       ') is greater than the number of dynamic (or static) images (',nimage_eff,') !',ch10,&
1705 &       ' This is unefficient.',ch10
1706        MSG_WARNING(msg)
1707      end if
1708      if (dtset%npimage>mpi_enreg%nproc) then
1709        write(unit=msg,fmt='(3a,i6,a,i4,4a)') &
1710 &       'The number of processors used for the parallelization',ch10,&
1711 &       ' over images (nproc=',mpi_enreg%nproc,&
1712 &       ') is smaller than npimage in input file (',dtset%npimage,&
1713 &       ')!',ch10,' This is unconsistent.',ch10
1714        MSG_ERROR(msg)
1715      end if
1716      if (mod(nimage_eff,dtset%npimage)/=0) then
1717        write(unit=msg,fmt='(3a,i4,a,i4,4a)') &
1718 &       'The number of processors used for the parallelization',ch10,&
1719 &       ' over images (npimage=',dtset%npimage,&
1720 &       ') does not divide the number of dynamic images (',nimage_eff,&
1721 &       ') !',ch10,' This is unefficient (charge unbalancing).',ch10
1722        MSG_WARNING(msg)
1723      end if
1724    end if
1725 
1726 !  # of images treated by current proc
1727    nproc_per_image=mpi_enreg%nproc/dtset%npimage
1728    iprocmax=nproc_per_image*dtset%npimage-1
1729    if (mpi_enreg%me<=iprocmax) then
1730      mpi_enreg%my_nimage=(ndynimage_eff/dtset%npimage)+((dtset%nimage-ndynimage_eff)/dtset%npimage)
1731      imod=mod(ndynimage_eff,dtset%npimage)-1
1732      if (mpi_enreg%me/nproc_per_image<=imod) mpi_enreg%my_nimage=mpi_enreg%my_nimage+1
1733      imod=mod((dtset%nimage-ndynimage_eff),dtset%npimage)-1
1734      if (mpi_enreg%me/nproc_per_image<=imod) mpi_enreg%my_nimage=mpi_enreg%my_nimage+1
1735    else
1736      mpi_enreg%my_nimage=0
1737    end if
1738    if (option==1.or.option==-1) then
1739 !    Indexes of images treated by current proc
1740      if (mpi_enreg%me<=iprocmax) then
1741        ABI_ALLOCATE(mpi_enreg%my_imgtab,(mpi_enreg%my_nimage))
1742        nrank=0
1743        imod=mpi_enreg%me/nproc_per_image+1;imod=mod(imod,dtset%npimage)
1744 !      Dynamic images
1745        irank=0
1746        do jrank=1,dtset%nimage
1747          if (dtset%dynimage(jrank)/=0.and.dtset%ntimimage>1) then
1748            irank=irank+1
1749            if (mod(irank,dtset%npimage)==imod) then
1750              nrank=nrank+1
1751              mpi_enreg%my_imgtab(nrank)=jrank
1752            end if
1753          end if
1754        end do
1755 !      Static images
1756        irank=0
1757        do jrank=1,dtset%nimage
1758          if (dtset%dynimage(jrank)==0.or.dtset%ntimimage<=1) then
1759            irank=irank+1
1760            if (mod(irank,dtset%npimage)==imod) then
1761              nrank=nrank+1
1762              mpi_enreg%my_imgtab(nrank)=jrank
1763            end if
1764          end if
1765        end do
1766        if (nrank/=mpi_enreg%my_nimage) then
1767          MSG_BUG('Error on nrank !')
1768        end if
1769 !      Sort images by increasing index (this step is MANDATORY !!)
1770        ABI_ALLOCATE(ranks,(nrank))
1771        call sort_int(nrank,mpi_enreg%my_imgtab,ranks)
1772        ABI_DEALLOCATE(ranks)
1773      else
1774        ABI_ALLOCATE(mpi_enreg%my_imgtab,(0))
1775      end if
1776    end if
1777    if (option==2.or.option==3.or.option==-1) then
1778 !    Communicator over one image
1779      if (mpi_enreg%me<=iprocmax) then
1780        ABI_ALLOCATE(ranks,(nproc_per_image))
1781        iprocmin=(mpi_enreg%me/nproc_per_image)*nproc_per_image
1782        ranks=(/((iprocmin+irank-1),irank=1,nproc_per_image)/)
1783        mpi_enreg%comm_cell=xmpi_subcomm(mpi_enreg%comm_world,nproc_per_image,ranks)
1784        ABI_DEALLOCATE(ranks)
1785        mpi_enreg%me_cell=xmpi_comm_rank(mpi_enreg%comm_cell)
1786        mpi_enreg%nproc_cell=nproc_per_image
1787        if (mpi_enreg%me_cell==0.and.mod(mpi_enreg%me,nproc_per_image)/=0) then
1788          MSG_BUG('Error on me_cell !')
1789        end if
1790      else
1791        mpi_enreg%comm_img=xmpi_comm_null
1792        mpi_enreg%nproc_cell=0
1793        mpi_enreg%me_cell=-1
1794      end if
1795    end if
1796    if (option==3.or.option==-1) then
1797 !    Communicator over all images
1798      if (mpi_enreg%me<=iprocmax) then
1799        ABI_ALLOCATE(ranks,(dtset%npimage))
1800        iprocmin=mod(mpi_enreg%me,nproc_per_image)
1801        ranks=(/((iprocmin+(irank-1)*nproc_per_image),irank=1,dtset%npimage)/)
1802        mpi_enreg%comm_img=xmpi_subcomm(mpi_enreg%comm_world,dtset%npimage,ranks)
1803        ABI_DEALLOCATE(ranks)
1804        mpi_enreg%me_img=xmpi_comm_rank(mpi_enreg%comm_img)
1805        mpi_enreg%nproc_img=dtset%npimage
1806        if (iprocmin==0.and.mpi_enreg%me_img==0.and.mpi_enreg%me/=0) then
1807          MSG_BUG('Error on me_img!')
1808        end if
1809        ABI_ALLOCATE(mpi_enreg%distrb_img,(dtset%nimage))
1810 !      Dynamic images
1811        nrank=0
1812        do irank=1,dtset%nimage
1813          if (dtset%dynimage(irank)/=0.and.dtset%ntimimage>1) then
1814            nrank=nrank+1
1815            mpi_enreg%distrb_img(irank)=mod(nrank,dtset%npimage)-1
1816            if (mpi_enreg%distrb_img(irank)==-1) mpi_enreg%distrb_img(irank)=dtset%npimage-1
1817          end if
1818        end do
1819 !      Static images
1820        nrank=0
1821        do irank=1,dtset%nimage
1822          if (dtset%dynimage(irank)==0.or.dtset%ntimimage<=1) then
1823            nrank=nrank+1
1824            mpi_enreg%distrb_img(irank)=mod(nrank,dtset%npimage)-1
1825            if (mpi_enreg%distrb_img(irank)==-1) mpi_enreg%distrb_img(irank)=dtset%npimage-1
1826          end if
1827        end do
1828      else
1829        mpi_enreg%comm_img=xmpi_comm_null
1830        mpi_enreg%nproc_img=0
1831        mpi_enreg%me_img=-1
1832        ABI_ALLOCATE(mpi_enreg%distrb_img,(0))
1833      end if
1834    end if
1835 
1836 !  if (debug) then
1837 !  write(200+mpi_enreg%me,*) "=================================="
1838 !  write(200+mpi_enreg%me,*) "DEBUGGING STATMENTS IN INITMPI_IMG"
1839 !  write(200+mpi_enreg%me,*) "=================================="
1840 !  write(200+mpi_enreg%me,*) "option         =",option
1841 !  write(200+mpi_enreg%me,*) "MPI_UNDEFINED  =",MPI_UNDEFINED
1842 !  write(200+mpi_enreg%me,*) "MPI_IDENT      =",MPI_IDENT
1843 !  write(200+mpi_enreg%me,*) "MPI_CONGRUENT  =",MPI_CONGRUENT
1844 !  write(200+mpi_enreg%me,*) "MPI_SIMILAR    =",MPI_SIMILAR
1845 !  write(200+mpi_enreg%me,*) "MPI_UNEQUAL    =",MPI_UNEQUAL
1846 !  write(200+mpi_enreg%me,*) "null_comm      =",MPI_COMM_NULL
1847 !  write(200+mpi_enreg%me,*) "self_comm      =",xmpi_comm_self
1848 !  write(200+mpi_enreg%me,*) "world_comm     =",mpi_enreg%comm_world
1849 !  write(200+mpi_enreg%me,*) "empty_group    =",MPI_GROUP_EMPTY
1850 !  write(200+mpi_enreg%me,*) "nimage         =",mpi_enreg%my_nimage
1851 !  write(200+mpi_enreg%me,*) "nproc_per_image=",nproc_per_image
1852 !  call MPI_COMM_SIZE(mpi_enreg%comm_world,irank,ierr)
1853 !  write(200+mpi_enreg%me,*) "Size of world_comm    =",irank
1854 !  call MPI_COMM_RANK(mpi_enreg%comm_world,irank,ierr)
1855 !  write(200+mpi_enreg%me,*) "My rank in world_comm =",irank
1856 !  if (option==1.or.option==-1) then
1857 !  write(200+mpi_enreg%me,*) "index_img=",mpi_enreg%my_imgtab(:)
1858 !  end if
1859 !  if (option==2.or.option==3.or.option==-1) then
1860 !  write(200+mpi_enreg%me,*) "nproc_cell  =",mpi_enreg%nproc_cell
1861 !  write(200+mpi_enreg%me,*) "me_cell     =",mpi_enreg%me_cell
1862 !  call xmpi_comm_group(mpi_enreg%comm_cell,group_cell,ierr)
1863 !  write(200+mpi_enreg%me,*) "group_cell  =",group_cell
1864 !  write(200+mpi_enreg%me,*) "comm_cell   =",mpi_enreg%comm_cell
1865 !  if (group_cell/=MPI_GROUP_EMPTY) then
1866 !  call MPI_GROUP_SIZE(group_cell,irank,ierr)
1867 !  write(200+mpi_enreg%me,*) "Size of group_cell   =",irank
1868 !  call MPI_GROUP_RANK(group_cell,irank,ierr)
1869 !  write(200+mpi_enreg%me,*) "My rank in group_cell=",irank
1870 !  else
1871 !  write(200+mpi_enreg%me,*) "Size of group_cell   =",0
1872 !  write(200+mpi_enreg%me,*) "My rank in group_cell=",-1
1873 !  end if
1874 !  if (mpi_enreg%comm_cell/=MPI_COMM_NULL) then
1875 !  call MPI_COMM_SIZE(mpi_enreg%comm_cell,irank,ierr)
1876 !  write(200+mpi_enreg%me,*) "Size of comm_cell   =",irank
1877 !  call MPI_COMM_RANK(mpi_enreg%comm_cell,irank,ierr)
1878 !  write(200+mpi_enreg%me,*) "My rank in comm_cell=",irank
1879 !  call MPI_COMM_COMPARE(mpi_enreg%comm_world,mpi_enreg%comm_cell,irank,ierr)
1880 !  write(200+mpi_enreg%me,*) "Comparison world_comm/comm_cell=",irank
1881 !  call MPI_COMM_COMPARE(xmpi_comm_self,mpi_enreg%comm_cell,irank,ierr)
1882 !  write(200+mpi_enreg%me,*) "Comparison self_comm/comm_cell =",irank
1883 !  else
1884 !  write(200+mpi_enreg%me,*) "Size of comm_cell   =",0
1885 !  write(200+mpi_enreg%me,*) "My rank in comm_cell=",-1
1886 !  write(200+mpi_enreg%me,*) "Comparison world_comm/comm_cell=",MPI_UNEQUAL
1887 !  write(200+mpi_enreg%me,*) "Comparison self_comm/comm_cell =",MPI_UNEQUAL
1888 !  end if
1889 !  end if
1890 !  if (option==3.or.option==-1) then
1891 !  write(200+mpi_enreg%me,*) "nproc_img  =",mpi_enreg%nproc_img
1892 !  write(200+mpi_enreg%me,*) "me_img     =",mpi_enreg%me_img
1893 !  write(200+mpi_enreg%me,*) "img_comm   =",mpi_enreg%comm_img
1894 !  if (mpi_enreg%comm_img/=MPI_COMM_NULL) then
1895 !  call MPI_COMM_SIZE(mpi_enreg%comm_img,irank,ierr)
1896 !  write(200+mpi_enreg%me,*) "Size of img_comm   =",irank
1897 !  call MPI_COMM_RANK(mpi_enreg%comm_img,irank,ierr)
1898 !  write(200+mpi_enreg%me,*) "My rank in img_comm=",irank
1899 !  call MPI_COMM_COMPARE(mpi_enreg%comm_world,mpi_enreg%comm_img,irank,ierr)
1900 !  write(200+mpi_enreg%me,*) "Comparison world_comm/img_comm=",irank
1901 !  call MPI_COMM_COMPARE(xmpi_comm_self,mpi_enreg%comm_img,irank,ierr)
1902 !  write(200+mpi_enreg%me,*) "Comparison self_comm/img_comm =",irank
1903 !  else
1904 !  write(200+mpi_enreg%me,*) "Size of img_comm   =",0
1905 !  write(200+mpi_enreg%me,*) "My rank in img_comm=",-1
1906 !  write(200+mpi_enreg%me,*) "Comparison world_comm/img_comm=",MPI_UNEQUAL
1907 !  write(200+mpi_enreg%me,*) "Comparison self_comm/img_comm =",MPI_UNEQUAL
1908 !  end if
1909 !  write(200+mpi_enreg%me,*) "distrb_img=",mpi_enreg%distrb_img(:)
1910 !  end if
1911 !  write(200+mpi_enreg%me,*)
1912 !  call flush_unit(200+mpi_enreg%me)
1913 !  if (option==-1) stop
1914 !  end if
1915 
1916  else
1917 
1918 !  Do not activate flag for parallelization over images
1919    mpi_enreg%paral_img=0
1920 !  # of images treated by current proc
1921    if (dtset%optdriver==RUNL_GSTATE) then
1922      mpi_enreg%my_nimage=dtset%nimage
1923    else
1924      mpi_enreg%my_nimage=1
1925    end if
1926 !  Indexes of images treated by current proc
1927    if (option==1.or.option==-1) then
1928      ABI_ALLOCATE(mpi_enreg%my_imgtab,(mpi_enreg%my_nimage))
1929      mpi_enreg%my_imgtab=(/(irank,irank=1,mpi_enreg%my_nimage)/)
1930    end if
1931 !  Communicator over all images
1932    if (option==2.or.option==3.or.option==-1) then
1933 !    Communicator for one image
1934      mpi_enreg%nproc_cell=mpi_enreg%nproc
1935      mpi_enreg%me_cell=mpi_enreg%me
1936    end if
1937    if (option==3.or.option==-1) then
1938 !    Communicator over all images
1939      mpi_enreg%nproc_img=1
1940      mpi_enreg%comm_img=xmpi_comm_self
1941      mpi_enreg%me_img=0
1942      ABI_ALLOCATE(mpi_enreg%distrb_img,(dtset%nimage))
1943      mpi_enreg%distrb_img(:)=0
1944    end if
1945  end if
1946 
1947  DBG_EXIT("COLL")
1948 
1949 end subroutine initmpi_img

ABINIT/initmpi_pert [ Functions ]

[ Top ] [ Functions ]

NAME

  initmpi_pert

FUNCTION

  Creates group for Parallelization over Perturbations.

INPUTS

  dtset <type(dataset_type)>=all input variables in this dataset

OUTPUT

SIDE EFFECTS

  mpi_enreg=information about MPI parallelization

PARENTS

      mpi_setup

CHILDREN

      get_npert_rbz,xmpi_comm_free

SOURCE

2040 subroutine initmpi_pert(dtset,mpi_enreg)
2041 
2042 
2043 !This section has been created automatically by the script Abilint (TD).
2044 !Do not modify the following lines by hand.
2045 #undef ABI_FUNC
2046 #define ABI_FUNC 'initmpi_pert'
2047 !End of the abilint section
2048 
2049  implicit none
2050 
2051 !Arguments ------------------------------------
2052 !scalars
2053  type(MPI_type),intent(inout) :: mpi_enreg
2054  type(dataset_type),intent(in) :: dtset
2055 
2056 !Local variables-------------------------------
2057 !scalars
2058  integer:: iprocmin,irank,npert,nproc_per_cell,nrank,numproc
2059  integer,allocatable :: ranks(:)
2060  character(len=500) :: msg
2061 !arrays
2062  integer,pointer :: nkpt_rbz(:)
2063  real(dp),pointer :: nband_rbz(:,:)
2064 
2065 ! ***********************************************************************
2066 
2067  if (mpi_enreg%me_pert<0) then
2068    msg='Error in MPI distribution! Change your proc(s) distribution or use autoparal>0.'
2069    MSG_ERROR(msg)
2070  end if
2071 
2072  call get_npert_rbz(dtset,nband_rbz,nkpt_rbz,npert)
2073 
2074  if (dtset%nppert>=1) then
2075    if (mpi_enreg%comm_cell/=mpi_enreg%comm_world) then
2076      call xmpi_comm_free(mpi_enreg%comm_cell)
2077    end if
2078    mpi_enreg%comm_cell=mpi_enreg%comm_world
2079 
2080    ! These values will be properly set in set_pert_comm
2081    mpi_enreg%me_cell=mpi_enreg%me
2082    mpi_enreg%nproc_cell=mpi_enreg%nproc
2083 
2084    if (mpi_enreg%me>=0) then
2085      nproc_per_cell=mpi_enreg%nproc/dtset%nppert
2086      ABI_ALLOCATE(ranks,(dtset%nppert))
2087      iprocmin=mod(mpi_enreg%me,nproc_per_cell)
2088      ranks=(/((iprocmin+(irank-1)*nproc_per_cell),irank=1,dtset%nppert)/)
2089      mpi_enreg%comm_pert=xmpi_subcomm(mpi_enreg%comm_world,dtset%nppert,ranks)
2090      ABI_DEALLOCATE(ranks)
2091      mpi_enreg%me_pert=xmpi_comm_rank(mpi_enreg%comm_pert)
2092      mpi_enreg%nproc_pert=dtset%nppert
2093      if (iprocmin==0.and.mpi_enreg%me_pert==0.and.mpi_enreg%me/=0) then
2094        MSG_BUG('Error on me_pert!')
2095      end if
2096 !    Define mpi_enreg%distrb_pert
2097      ABI_ALLOCATE(mpi_enreg%distrb_pert,(npert))
2098      nrank=0
2099      do irank=1,npert
2100        nrank=nrank+1
2101        mpi_enreg%distrb_pert(irank)=mod(nrank,dtset%nppert)-1
2102        if (mpi_enreg%distrb_pert(irank)==-1) mpi_enreg%distrb_pert(irank)=dtset%nppert-1
2103      end do
2104      ! Make sure that subrank 0 is working on the last perturbation
2105      ! Swap the ranks if necessary
2106      numproc=mpi_enreg%distrb_pert(npert)
2107      if(numproc/=0) then
2108        do irank=1,npert
2109          if (mpi_enreg%distrb_pert(irank)==numproc) mpi_enreg%distrb_pert(irank)=-2
2110          if (mpi_enreg%distrb_pert(irank)==0) mpi_enreg%distrb_pert(irank)=-3
2111        end do
2112        do irank=1,npert
2113          if (mpi_enreg%distrb_pert(irank)==-2) mpi_enreg%distrb_pert(irank)=0
2114          if (mpi_enreg%distrb_pert(irank)==-3) mpi_enreg%distrb_pert(irank)=numproc
2115        end do
2116      end if
2117 !    Communicator over one cell
2118      ABI_ALLOCATE(ranks,(nproc_per_cell))
2119      iprocmin=(mpi_enreg%me/nproc_per_cell)*nproc_per_cell
2120      ranks=(/((iprocmin+irank-1),irank=1,nproc_per_cell)/)
2121      mpi_enreg%comm_cell_pert=xmpi_subcomm(mpi_enreg%comm_world,nproc_per_cell,ranks)
2122      ABI_DEALLOCATE(ranks)
2123    end if
2124 
2125  else  !nppert<=1
2126    mpi_enreg%nproc_pert=1
2127    mpi_enreg%comm_pert=xmpi_comm_self
2128    mpi_enreg%me_pert=0
2129    ABI_ALLOCATE(mpi_enreg%distrb_pert,(npert))
2130    mpi_enreg%distrb_pert(:)=0
2131  end if
2132 
2133  ABI_DEALLOCATE(nband_rbz)
2134  ABI_DEALLOCATE(nkpt_rbz)
2135 
2136 end subroutine initmpi_pert

ABINIT/initmpi_seq [ Functions ]

[ Top ] [ Functions ]

NAME

  initmpi_seq

FUNCTION

  Initializes the MPI information for sequential use.

INPUTS

OUTPUT

  mpi_enreg=information about MPI parallelization

PARENTS

      atm2fft,bethe_salpeter,bsepostproc,calc_vhxc_me,cut3d,debug_tools
      dfpt_atm2fft,dfpt_nstpaw,dieltcel,eph,fftprof,ks_ddiago
      linear_optics_paw,m_cut3d,m_dvdb,m_fft,m_fft_prof,m_fftcore,m_gsphere
      m_hamiltonian,m_ioarr,m_kxc,m_mpinfo,m_pawpwij,m_ppmodel,m_screening
      m_wfd,m_wfk,mlwfovlp_qp,mrggkk,mrgscr,partial_dos_fractions,pawmknhat
      pawmknhat_psipsi,pawsushat,posdoppler,scfcv,screening,sigma,suscep_stat
      susk,suskmm,ujdet,vdw_kernelgen,wfk_analyze

CHILDREN

      nullify_mpi_enreg

SOURCE

 972 subroutine initmpi_seq(mpi_enreg)
 973 
 974 
 975 !This section has been created automatically by the script Abilint (TD).
 976 !Do not modify the following lines by hand.
 977 #undef ABI_FUNC
 978 #define ABI_FUNC 'initmpi_seq'
 979 !End of the abilint section
 980 
 981  implicit none
 982 
 983 !Arguments ------------------------------------
 984  type(MPI_type),intent(out) :: mpi_enreg
 985 
 986 ! ***********************************************************************
 987 
 988  DBG_ENTER("COLL")
 989 
 990 !Set default seq values for scalars
 991  mpi_enreg%bandpp=1
 992  mpi_enreg%me=0
 993  mpi_enreg%me_band=0
 994  mpi_enreg%me_cell=0
 995  mpi_enreg%me_fft=0
 996  mpi_enreg%me_g0=1
 997  mpi_enreg%me_img=0
 998  mpi_enreg%me_hf=0
 999  mpi_enreg%me_kpt=0
1000  mpi_enreg%me_pert=0
1001  mpi_enreg%me_spinor=0
1002  mpi_enreg%me_wvl=0
1003  mpi_enreg%my_natom=0       ! Should be natom
1004  mpi_enreg%my_isppoltab=0   ! Should be (1,0) if nsppol=1 or (1,1) if nsppol=2
1005  mpi_enreg%ngfft3_ionic=1
1006  mpi_enreg%my_nimage=1
1007  mpi_enreg%nproc=1
1008  mpi_enreg%nproc_atom=1
1009  mpi_enreg%nproc_band=1
1010  mpi_enreg%nproc_cell=1
1011  mpi_enreg%nproc_fft=1
1012  mpi_enreg%nproc_img=1
1013  mpi_enreg%nproc_hf=1
1014  mpi_enreg%nproc_kpt=1
1015  mpi_enreg%nproc_pert=1
1016  mpi_enreg%nproc_spinor=1
1017  mpi_enreg%nproc_wvl=1
1018  mpi_enreg%paralbd=0
1019  mpi_enreg%paral_img=0
1020  mpi_enreg%paral_hf=0
1021  mpi_enreg%paral_kgb=0
1022  mpi_enreg%paral_pert=0
1023  mpi_enreg%paral_spinor=0
1024  mpi_enreg%pw_unbal_thresh=-1._dp
1025 
1026 !Set default seq values for communicators
1027  mpi_enreg%comm_world          = xmpi_world
1028  mpi_enreg%comm_atom           = xmpi_comm_self
1029  mpi_enreg%comm_band           = xmpi_comm_self
1030  mpi_enreg%comm_bandspinor     = xmpi_comm_self
1031  mpi_enreg%comm_bandfft        = xmpi_comm_self
1032  mpi_enreg%comm_bandspinorfft  = xmpi_comm_self
1033  mpi_enreg%comm_cell           = xmpi_comm_self
1034  mpi_enreg%comm_cell_pert      = xmpi_comm_self
1035  mpi_enreg%comm_fft            = xmpi_comm_self
1036  mpi_enreg%comm_hf             = xmpi_comm_self
1037  mpi_enreg%comm_img            = xmpi_comm_self
1038  mpi_enreg%comm_kpt            = xmpi_comm_self
1039  mpi_enreg%comm_kptband        = xmpi_comm_self
1040  mpi_enreg%comm_pert           = xmpi_comm_self
1041  mpi_enreg%comm_spinor         = xmpi_comm_self
1042  mpi_enreg%comm_spinorfft      = xmpi_comm_self
1043  mpi_enreg%comm_wvl            = xmpi_comm_self
1044 
1045 !Nullify all pointers
1046  call nullify_mpi_enreg(mpi_enreg)
1047 
1048 !Allocate and nullify distribfft datastructure
1049 ! This is not good since distribfft is not initialized here (even with 0s).
1050 ! It can be dangerous if use with no care (Valgrind might complain)
1051  ABI_DATATYPE_ALLOCATE(mpi_enreg%distribfft,)
1052 
1053  DBG_EXIT("COLL")
1054 
1055 end subroutine initmpi_seq

ABINIT/initmpi_world [ Functions ]

[ Top ] [ Functions ]

NAME

  initmpi_world

FUNCTION

  %comm_world is redifined for the number of processors on which ABINIT is launched

INPUTS

OUTPUT

PARENTS

      finddistrproc

CHILDREN

      abi_io_redirect,libpaw_write_comm_set

SOURCE

900 subroutine initmpi_world(mpi_enreg,nproc)
901 
902 
903 !This section has been created automatically by the script Abilint (TD).
904 !Do not modify the following lines by hand.
905 #undef ABI_FUNC
906 #define ABI_FUNC 'initmpi_world'
907 !End of the abilint section
908 
909  implicit none
910 
911 !Arguments ------------------------------------
912  integer, intent(in)::nproc
913  type(MPI_type),intent(inout) :: mpi_enreg
914 
915 !Local variables-------------------------------
916 !scalars
917  integer :: ii
918 !arrays
919  integer,allocatable :: ranks(:)
920 
921 ! ***********************************************************************
922 
923  DBG_ENTER("COLL")
924 
925  if(nproc==mpi_enreg%nproc) return
926 
927  ABI_ALLOCATE(ranks,(0:nproc-1))
928  ranks(0:nproc-1)=(/((ii),ii=0,nproc-1)/)
929  mpi_enreg%comm_world=xmpi_subcomm(xmpi_world,nproc,ranks)
930  ABI_DEALLOCATE(ranks)
931 
932  if(mpi_enreg%me<nproc)  then
933    mpi_enreg%me=xmpi_comm_rank(mpi_enreg%comm_world)
934    mpi_enreg%nproc=xmpi_comm_size(mpi_enreg%comm_world)
935    call abi_io_redirect(new_io_comm=mpi_enreg%comm_world)
936    call libpaw_write_comm_set(mpi_enreg%comm_world)
937  else
938    mpi_enreg%me=-1
939  end if
940 
941  DBG_EXIT("COLL")
942 
943 end subroutine initmpi_world

ABINIT/m_mpinfo [ Modules ]

[ Top ] [ Modules ]

NAME

 m_mpinfo

FUNCTION

COPYRIGHT

  Copyright (C) 2008-2018 ABINIT group (MT, GG, XG, FJ, AR, MB, CMartins)
  This file is distributed under the terms of the
  GNU General Public License, see ~abinit/COPYING
  or http://www.gnu.org/copyleft/gpl.txt .

PARENTS

CHILDREN

TODO

  Change the name of the datatype: (MPI_|mpi_) is a reserved keyword
  and should not be used in client code!

SOURCE

24 #if defined HAVE_CONFIG_H
25 #include "config.h"
26 #endif
27 
28 #include "abi_common.h"
29 
30 MODULE m_mpinfo
31 
32  use defs_basis
33  use m_errors
34  use m_abicore
35 #if defined HAVE_MPI2
36  use mpi
37 #endif
38  use m_xmpi
39  use m_sort
40  use m_distribfft
41 
42  use defs_abitypes,   only : MPI_type, dataset_type
43  use m_io_tools,      only : file_exists, open_file
44  use m_libpaw_tools,  only : libpaw_write_comm_set
45  use m_paral_atom,    only : get_my_natom, get_my_atmtab
46  use m_dtset,         only : get_npert_rbz
47 
48  implicit none
49 
50  private
51 
52 #if defined HAVE_MPI1 || (defined HAVE_MPI && defined FC_G95)
53  include 'mpif.h'
54 #endif
55 
56  public :: init_mpi_enreg        ! Initialise a mpi_enreg structure with dataset independent values.
57  public :: nullify_mpi_enreg     ! nullify a mpi_enreg datastructure
58  public :: destroy_mpi_enreg     ! Free memory
59  public :: copy_mpi_enreg        ! Copy a mpi_enreg datastructure into another.
60  public :: set_mpi_enreg_fft     ! Set the content of a MPI datastructure in order to call fourwf/fourdp
61  public :: unset_mpi_enreg_fft   ! Unset the content of a MPI datastructure used to call fourwf/fourdp
62  public :: ptabs_fourdp          ! Return *pointers* to the internal MPI-FFT tables used in fourdp
63  public :: ptabs_fourwf          ! Return *pointers* to the internal MPI-FFT tables used in fourwf
64  public :: mpi_distrib_is_ok     ! Check if a MPI datastructure contains number of processors
65                                  ! compatible (in terms of efficiency) with the number of spins/kpts/bands
66  public :: proc_distrb_cycle     ! Test a condition to cycle
67 
68  public :: initmpi_seq           ! Initializes the MPI information for sequential use.
69  public :: initmpi_world         ! %comm_world is redifined for the number of processors on which ABINIT is launched
70 
71  public :: initmpi_atom          ! Initializes the mpi information for parallelism over atoms (PAW).
72  public :: clnmpi_atom           ! Cleans-up the mpi information for the parallelism over atoms (PAW).
73 
74  public :: initmpi_grid          ! Initializes the MPI information for the grid
75  public :: clnmpi_grid           ! Cleans-up the mpi information for parallelism over grid (kpt/band/fft).
76 
77  public :: initmpi_img           ! Initializes the mpi information for parallelism over images of the cell (npimage>1).
78  public :: clnmpi_img            ! Cleans-up the mpi information for parallelism over images of the cell (npimage>1).
79 
80  public :: initmpi_pert          ! Creates group for Parallelization over Perturbations.
81  public :: clnmpi_pert           ! Cleans-up the mpi information for parallelization over perturbations.
82 
83  public :: initmpi_band          !  Initializes the mpi information for band parallelism (paralbd=1).
84 
85 ! Helper functions.
86  public :: pre_gather
87  public :: pre_scatter
88  public :: iwrite_fftdatar  ! Select the subset of processors that will write density/potential files.
89 
90  public :: distrb2          ! Creates the tabs of repartition of processors for sharing the jobs on k-points, spins and bands.
91  public :: distrb2_hf       ! Ceate the tabs of repartition for Hartree-Fock calculations.

ABINIT/proc_distrb_cycle [ Functions ]

[ Top ] [ Functions ]

NAME

  proc_distrb_cycle

FUNCTION

  test a condition to cycle

INPUTS

PARENTS

CHILDREN

SOURCE

850 function proc_distrb_cycle(distrb,ikpt,iband1,iband2,isppol,me)
851 
852 
853 !This section has been created automatically by the script Abilint (TD).
854 !Do not modify the following lines by hand.
855 #undef ABI_FUNC
856 #define ABI_FUNC 'proc_distrb_cycle'
857 !End of the abilint section
858 
859  implicit none
860 
861 !Arguments ------------------------------------
862 !scalars
863  integer,intent(in) :: ikpt,iband1,iband2,isppol,me
864  integer,allocatable,intent(in) :: distrb(:,:,:)
865  logical :: proc_distrb_cycle
866 
867 ! *************************************************************************
868 
869  proc_distrb_cycle=.false.
870  if (allocated(distrb)) then
871    if (isppol==-1) then
872      proc_distrb_cycle=(minval(abs(distrb(ikpt,iband1:iband2,:)-me))/=0)
873    else
874      proc_distrb_cycle=(minval(abs(distrb(ikpt,iband1:iband2,isppol)-me))/=0)
875    end if
876  end if
877 
878 end function proc_distrb_cycle

m_mpinfo/clnmpi_atom [ Functions ]

[ Top ] [ m_mpinfo ] [ Functions ]

NAME

  clnmpi_atom

FUNCTION

  Cleans-up the mpi information for the parallelism over atoms (PAW).

SIDE EFFECTS

  mpi_enreg=information about MPI parallelization

PARENTS

      abinit

CHILDREN

SOURCE

1200 subroutine clnmpi_atom(mpi_enreg)
1201 
1202 
1203 !This section has been created automatically by the script Abilint (TD).
1204 !Do not modify the following lines by hand.
1205 #undef ABI_FUNC
1206 #define ABI_FUNC 'clnmpi_atom'
1207 !End of the abilint section
1208 
1209  implicit none
1210 
1211 !Arguments ------------------------------------
1212  type(MPI_type), intent(inout) :: mpi_enreg
1213 
1214 ! ***********************************************************************
1215 
1216  DBG_ENTER("COLL")
1217 
1218  if (xmpi_paral==0) return
1219 
1220  if (mpi_enreg%comm_atom/=mpi_enreg%comm_world) then
1221    call xmpi_comm_free(mpi_enreg%comm_atom)
1222    mpi_enreg%comm_atom=xmpi_comm_null
1223  end if
1224 
1225  if(associated(mpi_enreg%my_atmtab)) then
1226    ABI_DEALLOCATE(mpi_enreg%my_atmtab)
1227  end if
1228 
1229  mpi_enreg%nproc_atom=1
1230  mpi_enreg%my_natom=0 ! should be natom
1231 
1232  DBG_EXIT("COLL")
1233 
1234 end subroutine clnmpi_atom

m_mpinfo/clnmpi_grid [ Functions ]

[ Top ] [ m_mpinfo ] [ Functions ]

NAME

  clnmpi_grid

FUNCTION

  Cleans-up the mpi information for parallelism over grid (kpt/band/fft).

SIDE EFFECTS

  mpi_enreg=information about MPI parallelization

PARENTS

      abinit

CHILDREN

SOURCE

1551 subroutine clnmpi_grid(mpi_enreg)
1552 
1553 
1554 !This section has been created automatically by the script Abilint (TD).
1555 !Do not modify the following lines by hand.
1556 #undef ABI_FUNC
1557 #define ABI_FUNC 'clnmpi_grid'
1558 !End of the abilint section
1559 
1560  implicit none
1561 
1562 !Arguments ------------------------------------
1563  type(MPI_type), intent(inout) :: mpi_enreg
1564 
1565 ! ***********************************************************************
1566 
1567  DBG_ENTER("COLL")
1568 
1569  if (xmpi_paral==0) return
1570 
1571  if (mpi_enreg%comm_bandspinorfft/=mpi_enreg%comm_world) then
1572    call xmpi_comm_free(mpi_enreg%comm_bandspinorfft)
1573    mpi_enreg%comm_bandspinorfft=xmpi_comm_null
1574  end if
1575 
1576  if (mpi_enreg%comm_bandfft/=mpi_enreg%comm_world) then
1577    call xmpi_comm_free(mpi_enreg%comm_bandfft)
1578    mpi_enreg%comm_bandfft=xmpi_comm_null
1579  end if
1580 
1581  if (mpi_enreg%comm_spinorfft/=mpi_enreg%comm_world) then
1582    call xmpi_comm_free(mpi_enreg%comm_spinorfft)
1583    mpi_enreg%comm_spinorfft=xmpi_comm_null
1584  end if
1585 
1586  if (mpi_enreg%comm_bandspinor/=mpi_enreg%comm_world) then
1587    call xmpi_comm_free(mpi_enreg%comm_bandspinor)
1588    mpi_enreg%comm_bandspinor=xmpi_comm_null
1589  end if
1590 
1591  if (mpi_enreg%comm_kptband/=mpi_enreg%comm_world) then
1592    call xmpi_comm_free(mpi_enreg%comm_kptband)
1593    mpi_enreg%comm_kptband=xmpi_comm_null
1594  end if
1595 
1596  if (mpi_enreg%comm_fft/=mpi_enreg%comm_world) then
1597    call xmpi_comm_free(mpi_enreg%comm_fft)
1598    mpi_enreg%comm_fft=xmpi_comm_null
1599  end if
1600 
1601  if (mpi_enreg%comm_band/=mpi_enreg%comm_world) then
1602    call xmpi_comm_free(mpi_enreg%comm_band)
1603    mpi_enreg%comm_band=xmpi_comm_null
1604  end if
1605 
1606  if (mpi_enreg%comm_spinor/=mpi_enreg%comm_world) then
1607    call xmpi_comm_free(mpi_enreg%comm_spinor)
1608    mpi_enreg%comm_spinor=xmpi_comm_null
1609  end if
1610 
1611  if (mpi_enreg%comm_kpt/=mpi_enreg%comm_world) then
1612    call xmpi_comm_free(mpi_enreg%comm_kpt)
1613    mpi_enreg%comm_kpt=xmpi_comm_null
1614  end if
1615 
1616  DBG_EXIT("COLL")
1617 
1618 end subroutine clnmpi_grid

m_mpinfo/clnmpi_img [ Functions ]

[ Top ] [ m_mpinfo ] [ Functions ]

NAME

  clnmpi_img

FUNCTION

  Cleans-up the mpi information for parallelism over images of the cell (npimage>1).

PARENTS

      abinit

CHILDREN

SOURCE

1968 subroutine clnmpi_img(mpi_enreg)
1969 
1970 
1971 !This section has been created automatically by the script Abilint (TD).
1972 !Do not modify the following lines by hand.
1973 #undef ABI_FUNC
1974 #define ABI_FUNC 'clnmpi_img'
1975 !End of the abilint section
1976 
1977  implicit none
1978 
1979 !Arguments ------------------------------------
1980  type(MPI_type), intent(inout) :: mpi_enreg
1981 
1982 ! ***********************************************************************
1983 
1984  DBG_ENTER("COLL")
1985 
1986  if (xmpi_paral==0) return
1987 
1988  if (mpi_enreg%comm_cell/=mpi_enreg%comm_world) then
1989    call xmpi_comm_free(mpi_enreg%comm_cell)
1990    mpi_enreg%comm_cell=xmpi_comm_null
1991  end if
1992 
1993  if (mpi_enreg%comm_img/=mpi_enreg%comm_world) then
1994    call xmpi_comm_free(mpi_enreg%comm_img)
1995    mpi_enreg%comm_img=xmpi_comm_null
1996  end if
1997 
1998  if (allocated(mpi_enreg%my_imgtab))  then
1999    ABI_DEALLOCATE(mpi_enreg%my_imgtab)
2000  end if
2001  if (allocated(mpi_enreg%distrb_img))  then
2002    ABI_DEALLOCATE(mpi_enreg%distrb_img)
2003  end if
2004 
2005  mpi_enreg%paral_img=0
2006  mpi_enreg%my_nimage=1
2007  mpi_enreg%me_img=0
2008  mpi_enreg%me_cell=0
2009  mpi_enreg%nproc_img=1
2010  mpi_enreg%nproc_cell=1
2011 
2012  DBG_EXIT("COLL")
2013 
2014 end subroutine clnmpi_img

m_mpinfo/clnmpi_pert [ Functions ]

[ Top ] [ m_mpinfo ] [ Functions ]

NAME

  clnmpi_pert

FUNCTION

  Cleans-up the mpi information for parallelization over perturbations.

INPUTS

PARENTS

      abinit

CHILDREN

SOURCE

2157 subroutine clnmpi_pert(mpi_enreg)
2158 
2159 
2160 !This section has been created automatically by the script Abilint (TD).
2161 !Do not modify the following lines by hand.
2162 #undef ABI_FUNC
2163 #define ABI_FUNC 'clnmpi_pert'
2164 !End of the abilint section
2165 
2166  implicit none
2167 
2168 !Arguments ------------------------------------
2169  type(MPI_type),intent(inout) :: mpi_enreg
2170 
2171 ! ***********************************************************************
2172 
2173  DBG_ENTER("COLL")
2174 
2175  if (xmpi_paral==0) return
2176 
2177  if(mpi_enreg%paral_pert == 1) then
2178 
2179    !  Reset communicators
2180    if (mpi_enreg%comm_pert/=mpi_enreg%comm_world) then
2181      call xmpi_comm_free(mpi_enreg%comm_pert)
2182      mpi_enreg%comm_pert=xmpi_comm_null
2183    end if
2184 
2185    if (allocated(mpi_enreg%distrb_pert))  then
2186      ABI_DEALLOCATE(mpi_enreg%distrb_pert)
2187    end if
2188 
2189    mpi_enreg%me_pert=0
2190    mpi_enreg%me_cell=0
2191    mpi_enreg%nproc_pert=1
2192    mpi_enreg%nproc_cell=1
2193  end if
2194 
2195  DBG_EXIT("COLL")
2196 
2197 end subroutine clnmpi_pert

m_mpinfo/copy_mpi_enreg [ Functions ]

[ Top ] [ m_mpinfo ] [ Functions ]

NAME

 copy_mpi_enreg

FUNCTION

  Copy a mpi_enreg datastructure into another

INPUTS

  MPI_enreg1<MPI_type>=input mpi_enreg datastructure

OUTPUT

  MPI_enreg2<MPI_type>=output mpi_enreg datastructure

PARENTS

      gwls_hamiltonian,inwffil,m_fft_prof,m_fock,m_wfd

CHILDREN

SOURCE

315 subroutine copy_mpi_enreg(MPI_enreg1,MPI_enreg2)
316 
317 
318 !This section has been created automatically by the script Abilint (TD).
319 !Do not modify the following lines by hand.
320 #undef ABI_FUNC
321 #define ABI_FUNC 'copy_mpi_enreg'
322 !End of the abilint section
323 
324  implicit none
325 
326 !Arguments ------------------------------------
327 !scalars
328  type(MPI_type),intent(in) :: mpi_enreg1
329  type(MPI_type),intent(out) :: MPI_enreg2
330 
331 !Local variables-------------------------------
332 !scalars
333  integer :: sz1,sz2,sz3
334 
335 ! *********************************************************************
336 
337 !scalars
338  mpi_enreg2%comm_world=mpi_enreg1%comm_world
339  mpi_enreg2%me=mpi_enreg1%me
340  mpi_enreg2%nproc=mpi_enreg1%nproc
341  mpi_enreg2%paral_spinor=mpi_enreg1%paral_spinor
342  mpi_enreg2%paralbd=mpi_enreg1%paralbd
343  mpi_enreg2%me_fft=mpi_enreg1%me_fft
344  mpi_enreg2%me_band=mpi_enreg1%me_band
345  mpi_enreg2%nproc_fft=mpi_enreg1%nproc_fft
346  mpi_enreg2%paral_kgb=mpi_enreg1%paral_kgb
347  mpi_enreg2%me_g0=mpi_enreg1%me_g0
348  mpi_enreg2%paral_pert=mpi_enreg1%paral_pert
349  mpi_enreg2%me_pert=mpi_enreg1%me_pert
350  mpi_enreg2%nproc_pert=mpi_enreg1%nproc_pert
351  mpi_enreg2%comm_pert=mpi_enreg1%comm_pert
352  mpi_enreg2%comm_bandfft=mpi_enreg1%comm_bandfft
353  mpi_enreg2%comm_band=mpi_enreg1%comm_band
354  mpi_enreg2%comm_fft=mpi_enreg1%comm_fft
355  mpi_enreg2%nproc_band=mpi_enreg1%nproc_band
356  mpi_enreg2%comm_bandspinorfft=mpi_enreg1%comm_bandspinorfft
357  mpi_enreg2%comm_kpt=mpi_enreg1%comm_kpt
358  mpi_enreg2%me_kpt=mpi_enreg1%me_kpt
359  mpi_enreg2%nproc_kpt=mpi_enreg1%nproc_kpt
360  mpi_enreg2%my_isppoltab=mpi_enreg1%my_isppoltab
361  mpi_enreg2%my_natom=mpi_enreg1%my_natom
362  mpi_enreg2%comm_atom=mpi_enreg1%comm_atom
363  mpi_enreg2%nproc_atom=mpi_enreg1%nproc_atom
364  mpi_enreg2%comm_kptband=mpi_enreg1%comm_kptband
365  mpi_enreg2%bandpp=mpi_enreg1%bandpp
366  mpi_enreg2%paral_img=mpi_enreg1%paral_img
367  mpi_enreg2%comm_img=mpi_enreg1%comm_img
368  mpi_enreg2%me_img=mpi_enreg1%me_img
369  mpi_enreg2%nproc_img=mpi_enreg1%nproc_img
370  mpi_enreg2%comm_cell=mpi_enreg1%comm_cell
371  mpi_enreg2%comm_cell_pert=mpi_enreg1%comm_cell_pert
372  mpi_enreg2%me_cell=mpi_enreg1%me_cell
373  mpi_enreg2%nproc_cell=mpi_enreg1%nproc_cell
374  mpi_enreg2%nproc_spinor=mpi_enreg1%nproc_spinor
375  mpi_enreg2%me_spinor=mpi_enreg1%me_spinor
376  mpi_enreg2%comm_spinorfft=mpi_enreg1%comm_spinorfft
377  mpi_enreg2%me_wvl      =mpi_enreg1%me_wvl
378  mpi_enreg2%nproc_wvl   =mpi_enreg1%nproc_wvl
379  mpi_enreg2%comm_wvl    =mpi_enreg1%comm_wvl
380  mpi_enreg2%me_hf      =mpi_enreg1%me_hf
381  mpi_enreg2%nproc_hf   =mpi_enreg1%nproc_hf
382  mpi_enreg2%comm_hf    =mpi_enreg1%comm_hf
383  mpi_enreg2%paral_hf=mpi_enreg1%paral_hf
384  mpi_enreg2%pw_unbal_thresh=mpi_enreg1%pw_unbal_thresh
385 
386 !pointers
387  if (associated(mpi_enreg1%distribfft)) then
388    if (.not.associated(mpi_enreg2%distribfft)) then
389      ABI_DATATYPE_ALLOCATE(mpi_enreg2%distribfft,)
390    end if
391    call copy_distribfft(mpi_enreg1%distribfft,mpi_enreg2%distribfft)
392  end if
393 
394  if (allocated(mpi_enreg1%proc_distrb)) then
395    sz1=size(mpi_enreg1%proc_distrb,1)
396    sz2=size(mpi_enreg1%proc_distrb,2)
397    sz3=size(mpi_enreg1%proc_distrb,3)
398    ABI_ALLOCATE(mpi_enreg2%proc_distrb,(sz1,sz2,sz3))
399    mpi_enreg2%proc_distrb=mpi_enreg1%proc_distrb
400  end if
401  if (allocated(mpi_enreg1%kptdstrb)) then
402    sz1=size(mpi_enreg1%kptdstrb,1)
403    sz2=size(mpi_enreg1%kptdstrb,2)
404    sz3=size(mpi_enreg1%kptdstrb,3)
405    ABI_ALLOCATE(mpi_enreg2%kptdstrb,(sz1,sz2,sz3))
406    mpi_enreg2%kptdstrb=mpi_enreg1%kptdstrb
407  end if
408  if (allocated(mpi_enreg1%kpt_loc2fbz_sp)) then
409    sz1=size(mpi_enreg1%kpt_loc2fbz_sp,1)-1
410    sz2=size(mpi_enreg1%kpt_loc2fbz_sp,2)
411    sz3=size(mpi_enreg1%kpt_loc2fbz_sp,3)
412    ABI_ALLOCATE(mpi_enreg2%kpt_loc2fbz_sp,(0:sz1,1:sz2,1:sz3))
413    mpi_enreg2%kpt_loc2fbz_sp=mpi_enreg1%kpt_loc2fbz_sp
414  end if
415  if (allocated(mpi_enreg1%kpt_loc2ibz_sp)) then
416    sz1=size(mpi_enreg1%kpt_loc2ibz_sp,1)-1
417    sz2=size(mpi_enreg1%kpt_loc2ibz_sp,2)
418    sz3=size(mpi_enreg1%kpt_loc2ibz_sp,3)
419    ABI_ALLOCATE(mpi_enreg2%kpt_loc2ibz_sp,(0:sz1,1:sz2,1:sz3))
420    mpi_enreg2%kpt_loc2ibz_sp=mpi_enreg1%kpt_loc2ibz_sp
421  end if
422  if (allocated(mpi_enreg1%mkmem)) then
423    ABI_ALLOCATE(mpi_enreg2%mkmem,(0:size(mpi_enreg1%mkmem,1)-1))
424    mpi_enreg2%mkmem=mpi_enreg1%mkmem
425  end if
426  if (associated(mpi_enreg1%my_atmtab)) then
427    ABI_ALLOCATE(mpi_enreg2%my_atmtab,(size(mpi_enreg1%my_atmtab)))
428    mpi_enreg2%my_atmtab=mpi_enreg1%my_atmtab
429  else
430    nullify(mpi_enreg2%my_atmtab)
431  end if
432  if (allocated(mpi_enreg1%my_kgtab)) then
433    sz1=size(mpi_enreg1%my_kgtab,1)
434    sz2=size(mpi_enreg1%my_kgtab,2)
435    ABI_ALLOCATE(mpi_enreg2%my_kgtab,(sz1,sz2))
436    mpi_enreg2%my_kgtab=mpi_enreg1%my_kgtab
437  end if
438  if (allocated(mpi_enreg1%distrb_pert)) then
439    ABI_ALLOCATE(mpi_enreg2%distrb_pert,(size(mpi_enreg1%distrb_pert)))
440    mpi_enreg2%distrb_pert=mpi_enreg1%distrb_pert
441  end if
442  if (allocated(mpi_enreg1%distrb_img)) then
443    ABI_ALLOCATE(mpi_enreg2%distrb_img,(size(mpi_enreg1%distrb_img)))
444    mpi_enreg2%distrb_img=mpi_enreg1%distrb_img
445  end if
446  if (allocated(mpi_enreg1%my_imgtab)) then
447    ABI_ALLOCATE(mpi_enreg2%my_imgtab,(size(mpi_enreg1%my_imgtab)))
448    mpi_enreg2%my_imgtab=mpi_enreg1%my_imgtab
449  end if
450  if (allocated(mpi_enreg1%distrb_hf)) then
451    sz1=size(mpi_enreg1%distrb_hf,1)
452    sz2=size(mpi_enreg1%distrb_hf,2)
453    sz3=size(mpi_enreg1%distrb_hf,3)
454    ABI_ALLOCATE(mpi_enreg2%distrb_hf,(sz1,sz2,sz3))
455    mpi_enreg2%distrb_hf=mpi_enreg1%distrb_hf
456  end if
457 
458 !Optional pointers
459  if (allocated(mpi_enreg1%my_kpttab)) then
460    ABI_ALLOCATE(mpi_enreg2%my_kpttab,(size(mpi_enreg1%my_kpttab)))
461    mpi_enreg2%my_kpttab=mpi_enreg1%my_kpttab
462  end if
463 
464 !Do not copy wavelet pointers, just associate.
465  mpi_enreg2%nscatterarr => mpi_enreg1%nscatterarr
466  mpi_enreg2%ngatherarr => mpi_enreg1%ngatherarr
467 
468 end subroutine copy_mpi_enreg

m_mpinfo/destroy_mpi_enreg [ Functions ]

[ Top ] [ m_mpinfo ] [ Functions ]

NAME

 destroy_mpi_enreg

FUNCTION

  Destroy a mpi_enreg datastructure

SIDE EFFECTS

  MPI_enreg<MPI_type>=Datatype gathering information on the parallelism.

PARENTS

      abinit,bethe_salpeter,bsepostproc,calc_vhxc_me,conducti,cut3d
      debug_tools,dfpt_nstpaw,dieltcel,eph,fftprof,gwls_hamiltonian,inwffil
      ks_ddiago,lapackprof,linear_optics_paw,m_cut3d,m_dvdb,m_fft,m_fft_prof
      m_fftcore,m_fock,m_gsphere,m_hamiltonian,m_ioarr,m_kxc,m_pawpwij
      m_ppmodel,m_screening,m_wfd,m_wfk,mlwfovlp_qp,mover_effpot,mrggkk
      mrgscr,partial_dos_fractions,posdoppler,scfcv,screening,sigma
      suscep_stat,susk,suskmm,ujdet,vdw_kernelgen,wfk_analyze

CHILDREN

SOURCE

226 subroutine destroy_mpi_enreg(MPI_enreg)
227 
228 
229 !This section has been created automatically by the script Abilint (TD).
230 !Do not modify the following lines by hand.
231 #undef ABI_FUNC
232 #define ABI_FUNC 'destroy_mpi_enreg'
233 !End of the abilint section
234 
235  implicit none
236 
237 !Arguments ------------------------------------
238 !scalars
239  type(MPI_type),intent(inout) :: MPI_enreg
240 
241 ! *********************************************************************
242 
243  if (associated(mpi_enreg%distribfft)) then
244    call destroy_distribfft(mpi_enreg%distribfft)
245    ABI_DATATYPE_DEALLOCATE(mpi_enreg%distribfft)
246    nullify(mpi_enreg%distribfft)
247  end if
248 
249  if (allocated(mpi_enreg%proc_distrb)) then
250    ABI_DEALLOCATE(mpi_enreg%proc_distrb)
251  end if
252  if (allocated(mpi_enreg%kptdstrb)) then
253    ABI_DEALLOCATE(mpi_enreg%kptdstrb)
254  end if
255  if (allocated(mpi_enreg%kpt_loc2fbz_sp)) then
256    ABI_DEALLOCATE(mpi_enreg%kpt_loc2fbz_sp)
257  end if
258  if (allocated(mpi_enreg%kpt_loc2ibz_sp)) then
259    ABI_DEALLOCATE(mpi_enreg%kpt_loc2ibz_sp)
260  end if
261  if (allocated(mpi_enreg%mkmem)) then
262    ABI_DEALLOCATE(mpi_enreg%mkmem)
263  end if
264  if (allocated(mpi_enreg%my_kpttab)) then
265    ABI_DEALLOCATE(mpi_enreg%my_kpttab)
266  end if
267  if (associated(mpi_enreg%my_atmtab)) then
268    ABI_DEALLOCATE(mpi_enreg%my_atmtab)
269    nullify(mpi_enreg%my_atmtab)
270  end if
271  if (allocated(mpi_enreg%distrb_pert)) then
272    ABI_DEALLOCATE(mpi_enreg%distrb_pert)
273  end if
274  if (allocated(mpi_enreg%distrb_img)) then
275    ABI_DEALLOCATE(mpi_enreg%distrb_img)
276  end if
277  if (allocated(mpi_enreg%my_imgtab)) then
278    ABI_DEALLOCATE(mpi_enreg%my_imgtab)
279  end if
280  if (allocated(mpi_enreg%my_kgtab)) then
281    ABI_DEALLOCATE(mpi_enreg%my_kgtab)
282  end if
283  if (allocated(mpi_enreg%distrb_hf)) then
284    ABI_DEALLOCATE(mpi_enreg%distrb_hf)
285  end if
286 
287 !Do not deallocate wavelet denspot distribution arrays,
288 !they are handled by BigDFT.
289 
290 end subroutine destroy_mpi_enreg

m_mpinfo/init_mpi_enreg [ Functions ]

[ Top ] [ m_mpinfo ] [ Functions ]

NAME

 init_mpi_enreg

FUNCTION

  Initialise a mpi_enreg structure with dataset independent values.
  Other values of mpi_enreg are dataset dependent, and should NOT be initialized
  inside abinit.F90 .
  XG 071118 : At present several other values are
  initialized temporarily inside invars1.F90, FROM THE DTSET
  VALUES. In order to releave the present constraint of having mpi_enreg
  equal for all datasets, they should be reinitialized from the dtset values
  inside invars2m.F90 (where there is a loop over datasets, and finally,
  reinitialized from the dataset values inside each big routine called by driver,
  according to the kind of parallelisation that is needed there.
  One should have one init_mpi_dtset routine (or another name) per big routine (well, there is also
  the problem of TDDFT ...). Also, one should have a clean_mpi_dtset called at the end
  of each big routine, as well as invars1.F90 or invars2m.F90 .

INPUTS

SIDE EFFECTS

  MPI_enreg<MPI_type>=All pointer set to null().

PARENTS

      lapackprof,mpi_setup

CHILDREN

SOURCE

128 subroutine init_mpi_enreg(mpi_enreg)
129 
130 
131 !This section has been created automatically by the script Abilint (TD).
132 !Do not modify the following lines by hand.
133 #undef ABI_FUNC
134 #define ABI_FUNC 'init_mpi_enreg'
135 !End of the abilint section
136 
137  implicit none
138 
139 !Arguments ------------------------------------
140 !scalars
141  type(MPI_type),intent(inout) :: MPI_enreg
142 
143 ! *********************************************************************
144 
145 !Default for sequential use
146  call initmpi_seq(mpi_enreg)
147 !Initialize MPI
148 #if defined HAVE_MPI
149  mpi_enreg%comm_world=xmpi_world
150  mpi_enreg%me = xmpi_comm_rank(xmpi_world)
151  mpi_enreg%nproc = xmpi_comm_size(xmpi_world)
152 #endif
153 
154 end subroutine init_mpi_enreg

m_mpinfo/iwrite_fftdatar [ Functions ]

[ Top ] [ m_mpinfo ] [ Functions ]

NAME

  iwrite_fftdatar

FUNCTION

  This function selects the subset of processors that should write density/potential
  Return True if the processors should do IO.

INPUTS

  mpi_enreg<MPI_type>=Datatype gathering information on the parallelism

PARENTS

CHILDREN

SOURCE

2425 logical function iwrite_fftdatar(mpi_enreg) result(ans)
2426 
2427 
2428 !This section has been created automatically by the script Abilint (TD).
2429 !Do not modify the following lines by hand.
2430 #undef ABI_FUNC
2431 #define ABI_FUNC 'iwrite_fftdatar'
2432 !End of the abilint section
2433 
2434  implicit none
2435 
2436 !Arguments ------------------------------------
2437 !scalars
2438  type(MPI_type),intent(in) :: mpi_enreg
2439 
2440 ! *********************************************************************
2441 
2442  ans = (xmpi_paral==0 .or. &                                  ! No MPI
2443   (mpi_enreg%paral_kgb==0 .and. mpi_enreg%me==0) .or. &       ! paral_kgb=0 does not use MPI-FFT and cartesian communicators.
2444   (mpi_enreg%paral_kgb==1 .and. mpi_enreg%me_band==0 .and. &  ! select procs in one FFT communicator.
2445   mpi_enreg%me_kpt==0 .and. mpi_enreg%me_spinor==0) .or.   &
2446   (mpi_enreg%paral_pert==1 .and. mpi_enreg%me_cell==0) & ! Group master in perturbation communicator.
2447   )
2448 
2449 end function iwrite_fftdatar

m_mpinfo/mpi_distrib_is_ok [ Functions ]

[ Top ] [ m_mpinfo ] [ Functions ]

NAME

  mpi_distrib_is_ok

FUNCTION

  Check if a MPI datastructure contains number of processors
  compatible (in terms of efficiency) with the number of spins/k-points/bands

INPUTS

  MPI_enreg<MPI_type>=Datatype gathering information on the parallelism
  nband=number of bands
  nkpt=number of k-points
  nptk_current_proc=number of k-points handled by current MPI process
  nsppol= number of spins (1 or 2)

OUTPUT

  mpi_distrib_is_ok (current function)=TRUE if the current MPI distribution is optimal
                                       FALSE otherwise
  [msg]= -optional- warning message to be printed out

PARENTS

  driver,mpi_setup

CHILDREN

SOURCE

786 logical function mpi_distrib_is_ok(MPI_enreg,nband,nkpt,nkpt_current_proc,nsppol,msg)
787 
788 
789 !This section has been created automatically by the script Abilint (TD).
790 !Do not modify the following lines by hand.
791 #undef ABI_FUNC
792 #define ABI_FUNC 'mpi_distrib_is_ok'
793 !End of the abilint section
794 
795  implicit none
796 
797 !Arguments ------------------------------------
798 !scalars
799  integer,intent(in) :: nband,nkpt,nkpt_current_proc,nsppol
800  type(MPI_type),intent(in) :: MPI_enreg
801  character(len=*),optional,intent(out) :: msg
802 
803 ! *********************************************************************
804 
805  mpi_distrib_is_ok=.true.
806 
807  if (MPI_enreg%paralbd==0) then
808    if (MPI_enreg%nproc_kpt-floor(nsppol*nkpt*one/nkpt_current_proc)>=nkpt_current_proc) then
809      mpi_distrib_is_ok=.false.
810      if (present(msg)) then
811        write(msg,'(a,i0,4a,i0,3a)') &
812 &        'Your number of spins*k-points (=',nsppol*nkpt,') ',&
813 &        'will not distribute correctly',ch10, &
814 &        'with the current number of processors (=',MPI_enreg%nproc_kpt,').',ch10,&
815 &        'You will leave some empty.'
816      end if
817    end if
818  else
819    if (mod(nband,max(1,MPI_enreg%nproc_kpt/(nsppol*nkpt)))/=0) then
820      mpi_distrib_is_ok=.false.
821      if (present(msg)) then
822        write(msg,'(a,i0,2a,i0,4a,i0,7a)')&
823 &        'Your number of spins*k-points (=',nsppol*nkpt,') ',&
824 &         'and bands (=',nband,') ',&
825 &         'will not distribute correctly',ch10,&
826 &         'with the current number of processors (=',MPI_enreg%nproc_kpt,').',ch10,&
827 &         'You will leave some empty.'
828      end if
829    end if
830  end if
831 
832 end function mpi_distrib_is_ok

m_mpinfo/nullify_mpi_enreg [ Functions ]

[ Top ] [ m_mpinfo ] [ Functions ]

NAME

 nullify_mpi_enreg

FUNCTION

  nullify a mpi_enreg datastructure

SIDE EFFECTS

  MPI_enreg<MPI_type>=All pointer set to null().

PARENTS

      initmpi_seq,m_fft_prof,m_wfd

CHILDREN

SOURCE

176 subroutine nullify_mpi_enreg(MPI_enreg)
177 
178 
179 !This section has been created automatically by the script Abilint (TD).
180 !Do not modify the following lines by hand.
181 #undef ABI_FUNC
182 #define ABI_FUNC 'nullify_mpi_enreg'
183 !End of the abilint section
184 
185  implicit none
186 
187 !Arguments ------------------------------------
188 !scalars
189  type(MPI_type),intent(inout) :: MPI_enreg
190 
191 ! *********************************************************************
192 
193  nullify(mpi_enreg%nscatterarr)
194  nullify(mpi_enreg%ngatherarr)
195  nullify(mpi_enreg%my_atmtab)
196  nullify(mpi_enreg%distribfft)
197 
198  end subroutine nullify_mpi_enreg

m_mpinfo/pre_gather [ Functions ]

[ Top ] [ m_mpinfo ] [ Functions ]

NAME

  pre_gather

FUNCTION

  Gathers data from FFT processors.

INPUTS

  n1,n2,n3= FFT grid dimensions
  n4= n3/mpi_enreg%nproc_fft
  array= data to gather among procs

OUTPUT

  None

SIDE EFFECTS

  array_allgather= gathered data

PARENTS

      fresid

CHILDREN

SOURCE

2329 subroutine pre_gather(array,array_allgather,n1,n2,n3,n4,mpi_enreg)
2330 
2331 
2332 !This section has been created automatically by the script Abilint (TD).
2333 !Do not modify the following lines by hand.
2334 #undef ABI_FUNC
2335 #define ABI_FUNC 'pre_gather'
2336 !End of the abilint section
2337 
2338  implicit none
2339 
2340 !Arguments ------------------------------------
2341  integer,intent(in) :: n1,n2,n3,n4
2342  real(dp),intent(in) :: array(n1,n2,n4,1)
2343  real(dp),intent(inout) :: array_allgather(n1,n2,n3,1)
2344  type(mpi_type),intent(in) :: mpi_enreg
2345 
2346 !Local variables-------------------------------
2347  integer :: ier
2348 
2349 ! *********************************************************************
2350 
2351 !Gather the array on all procs
2352  call xmpi_allgather(array,n1*n2*n3/mpi_enreg%nproc_fft,array_allgather,mpi_enreg%comm_fft,ier)
2353 
2354 end subroutine pre_gather

m_mpinfo/pre_scatter [ Functions ]

[ Top ] [ m_mpinfo ] [ Functions ]

NAME

  pre_scatter

FUNCTION

  Scatters data among FFT processors.

INPUTS

  n1,n2,n3= FFT grid dimensions
  n4= n3/mpi_enreg%nproc_fft
  array_allgather= data to scatter among FFT procs

OUTPUT

  array= scattered data

PARENTS

      fresid

CHILDREN

SOURCE

2382 subroutine pre_scatter(array,array_allgather,n1,n2,n3,n4,mpi_enreg)
2383 
2384 
2385 !This section has been created automatically by the script Abilint (TD).
2386 !Do not modify the following lines by hand.
2387 #undef ABI_FUNC
2388 #define ABI_FUNC 'pre_scatter'
2389 !End of the abilint section
2390 
2391  implicit none
2392 
2393 !Arguments ------------------------------------
2394  integer,intent(in) :: n1,n2,n3,n4
2395  real(dp),intent(out) :: array(n1,n2,n4,1)
2396  real(dp),intent(in) :: array_allgather(n1,n2,n3,1)
2397  type(mpi_type),intent(in) :: mpi_enreg
2398 
2399 ! *********************************************************************
2400 
2401 !Perform the reverse operation
2402  array(:,:,:,:) = &
2403 &  array_allgather(:,:,n3/mpi_enreg%nproc_fft*mpi_enreg%me_fft+1:n3/mpi_enreg%nproc_fft*(mpi_enreg%me_fft+1),:)
2404 
2405 end subroutine pre_scatter

m_mpinfo/ptabs_fourdp [ Functions ]

[ Top ] [ m_mpinfo ] [ Functions ]

NAME

  ptabs_fourdp

FUNCTION

  Returns pointers to the tables used for the MPI FFT of densities and potentials (fourdp routine).

NOTES

   1) These pointers are references to the internal tables stored in MPI_enreg hence
      *** DO NOT DEALLOCATE THE POINTERS YOU HAVE RECEIVED! ***

   2) Client code should declare the pointers with the attribute ABI_CONTIGUOUS
      (this macro expands to F2008 CONTIGUOUS if the compiler supports it)

INPUTS

  MPI_enreg<MPI_type>=Datatype gathering information on the parallelism.
  n2,n3=Number of FFT divisions along y and z

OUTPUT

  fftn2_distrib(:)=  rank of the processor which own fft planes in 2nd dimension for fourdp
  ffti2_local(:) = local i2 indices in fourdp
  fftn3_distrib(:) = rank of the processor which own fft planes in 3rd dimension for fourdp
  ffti3_local(:) = local i3 indices in fourdp

PARENTS

      dfpt_eltfrhar,dfpt_eltfrloc,dfpt_vlocal,fftpac,fourdp,hartre,hartrestr
      indirect_parallel_Fourier,initro,laplacian,m_fock,m_ioarr,mag_constr
      make_efg_el,mkcore,mkcore_paw,mklocl_realspace,mklocl_recipspace
      moddiel,out1dm,posdoppler,prcrskerker2,strhar,symrhg,vlocalstr,xcden
      xcpot

CHILDREN

SOURCE

615 subroutine ptabs_fourdp(MPI_enreg,n2,n3,fftn2_distrib,ffti2_local,fftn3_distrib,ffti3_local)
616 
617 
618 !This section has been created automatically by the script Abilint (TD).
619 !Do not modify the following lines by hand.
620 #undef ABI_FUNC
621 #define ABI_FUNC 'ptabs_fourdp'
622 !End of the abilint section
623 
624  implicit none
625 
626 !Arguments ------------------------------------
627 !scalars
628  integer,intent(in) :: n2,n3
629  type(MPI_type),intent(in) :: MPI_enreg
630  integer, ABI_CONTIGUOUS pointer :: fftn2_distrib(:),ffti2_local(:)
631  integer, ABI_CONTIGUOUS pointer :: fftn3_distrib(:),ffti3_local(:)
632 
633 !Local variables-------------------------------
634 !scalars
635  logical :: grid_found
636 
637 ! *********************************************************************
638 
639  grid_found=.false.
640 
641  ! Get the distrib associated with this fft_grid => for i2 and i3 planes
642  grid_found=.false.
643  if (n2== mpi_enreg%distribfft%n2_coarse) then
644    if( n3 == size(mpi_enreg%distribfft%tab_fftdp3_distrib) )then
645      fftn2_distrib => mpi_enreg%distribfft%tab_fftdp2_distrib
646      ffti2_local   => mpi_enreg%distribfft%tab_fftdp2_local
647      fftn3_distrib => mpi_enreg%distribfft%tab_fftdp3_distrib
648      ffti3_local   => mpi_enreg%distribfft%tab_fftdp3_local
649      grid_found=.true.
650    end if
651  end if
652 
653  if((n2 == mpi_enreg%distribfft%n2_fine).and.(.not.(grid_found))) then
654    if( n3 == size(mpi_enreg%distribfft%tab_fftdp3dg_distrib) )then
655      fftn2_distrib => mpi_enreg%distribfft%tab_fftdp2dg_distrib
656      ffti2_local   => mpi_enreg%distribfft%tab_fftdp2dg_local
657      fftn3_distrib => mpi_enreg%distribfft%tab_fftdp3dg_distrib
658      ffti3_local   => mpi_enreg%distribfft%tab_fftdp3dg_local
659      grid_found=.true.
660    end if
661  end if
662 
663  if(.not.(grid_found)) then
664    MSG_BUG("Unable to find an allocated distrib for this fft grid")
665  end if
666 
667 end subroutine ptabs_fourdp

m_mpinfo/ptabs_fourwf [ Functions ]

[ Top ] [ m_mpinfo ] [ Functions ]

NAME

  ptabs_fourwf

FUNCTION

  Returns pointers to the tables used for the MPI FFT of the wavefunctions (fourwf routine).

NOTES

   1) These pointers are references to the internal tables stored in MPI_enreg hence
      *** DO NOT DEALLOCATE THE POINTERS YOU HAVE RECEIVED! ***

   2) Client code should declare the pointers with the attribute ABI_CONTIGUOUS
      (this macro expands to F2008 CONTIGUOUS if the compiler supports it)

INPUTS

  MPI_enreg<MPI_type>=Datatype gathering information on the parallelism.
  n2,n3=Number of FFT divisions along y and z

OUTPUT

  fftn2_distrib(:)=  rank of the processors which own fft planes in 2nd dimension for fourwf
  ffti2_local(:) = local i2 indices in fourwf
  fftn3_distrib(:) = rank of the processors which own fft planes in 3rd dimension for fourwf
  ffti3_local(:) = local i3 indices in fourwf

PARENTS

      fourwf

CHILDREN

SOURCE

703 subroutine ptabs_fourwf(MPI_enreg,n2,n3,fftn2_distrib,ffti2_local,fftn3_distrib,ffti3_local)
704 
705 
706 !This section has been created automatically by the script Abilint (TD).
707 !Do not modify the following lines by hand.
708 #undef ABI_FUNC
709 #define ABI_FUNC 'ptabs_fourwf'
710 !End of the abilint section
711 
712  implicit none
713 
714 !Arguments ------------------------------------
715 !scalars
716  integer,intent(in) :: n2,n3
717  type(MPI_type),intent(in) :: MPI_enreg
718  integer, ABI_CONTIGUOUS pointer :: fftn2_distrib(:),ffti2_local(:)
719  integer, ABI_CONTIGUOUS pointer :: fftn3_distrib(:),ffti3_local(:)
720 
721 !Local variables-------------------------------
722 !scalars
723  logical :: grid_found
724 
725 ! *********************************************************************
726 
727  grid_found=.false.
728 
729  ! Get the distrib associated with this fft_grid => for i2 and i3 planes
730  if (n2 == mpi_enreg%distribfft%n2_coarse) then
731    if (n3 == size(mpi_enreg%distribfft%tab_fftdp3_distrib))then
732      fftn2_distrib => mpi_enreg%distribfft%tab_fftwf2_distrib
733      ffti2_local   => mpi_enreg%distribfft%tab_fftwf2_local
734      fftn3_distrib => mpi_enreg%distribfft%tab_fftdp3_distrib
735      ffti3_local   => mpi_enreg%distribfft%tab_fftdp3_local
736      grid_found=.true.
737    end if
738  end if
739 
740  if((n2 == mpi_enreg%distribfft%n2_fine).and.(.not.(grid_found))) then
741    if (n3 == size(mpi_enreg%distribfft%tab_fftdp3dg_distrib) )then
742      fftn2_distrib => mpi_enreg%distribfft%tab_fftwf2dg_distrib
743      ffti2_local   => mpi_enreg%distribfft%tab_fftwf2dg_local
744      fftn3_distrib => mpi_enreg%distribfft%tab_fftdp3dg_distrib
745      ffti3_local   => mpi_enreg%distribfft%tab_fftdp3dg_local
746      grid_found=.true.
747    end if
748  end if
749 
750  if(.not.(grid_found)) then
751    MSG_BUG("Unable to find an allocated distrib for this fft grid")
752  end if
753 
754 end subroutine ptabs_fourwf

m_mpinfo/set_mpi_enreg_fft [ Functions ]

[ Top ] [ m_mpinfo ] [ Functions ]

NAME

 set_mpi_enreg_fft

FUNCTION

  Set the content of a MPI datastructure in order to call fourwf/fourdp
  (in view of a wrapper for these routines)

INPUTS

  me_g0=1 if the current process treat the g=0 plane-wave
  comm_fft= MPI communicator over FFT components
  paral_kgb= flag used to activate "band-FFT" parallelism

SIDE EFFECTS

  MPI_enreg<MPI_type>=FFT pointer/flags intialized

PARENTS

      atm2fft,dfpt_atm2fft,pawmknhat,pawmknhat_psipsi,pawsushat,posdoppler

CHILDREN

SOURCE

496 subroutine set_mpi_enreg_fft(MPI_enreg,comm_fft,distribfft,me_g0,paral_kgb)
497 
498 
499 !This section has been created automatically by the script Abilint (TD).
500 !Do not modify the following lines by hand.
501 #undef ABI_FUNC
502 #define ABI_FUNC 'set_mpi_enreg_fft'
503 !End of the abilint section
504 
505  implicit none
506 
507 !Arguments ------------------------------------
508 !scalars
509  integer,intent(in) :: me_g0,comm_fft,paral_kgb
510  type(distribfft_type),intent(in),target :: distribfft
511  type(MPI_type),intent(inout) :: MPI_enreg
512 
513 ! *********************************************************************
514 
515  mpi_enreg%comm_fft=comm_fft
516  mpi_enreg%paral_kgb=paral_kgb
517  mpi_enreg%me_g0=me_g0
518  mpi_enreg%nproc_fft=xmpi_comm_size(comm_fft)
519  mpi_enreg%me_fft=xmpi_comm_rank(comm_fft)
520  if (associated(mpi_enreg%distribfft)) then
521    call destroy_distribfft(mpi_enreg%distribfft)
522    ABI_DATATYPE_DEALLOCATE(mpi_enreg%distribfft)
523  end if
524  mpi_enreg%distribfft => distribfft
525 
526 end subroutine set_mpi_enreg_fft

m_mpinfo/unset_mpi_enreg_fft [ Functions ]

[ Top ] [ m_mpinfo ] [ Functions ]

NAME

 unset_mpi_enreg_fft

FUNCTION

  Unset the content of a MPI datastructure used to call fourwf/fourdp
  (in view of a wrapper for these routines)

INPUTS

SIDE EFFECTS

  MPI_enreg<MPI_type>=FFT pointer/flags intialized

PARENTS

      atm2fft,dfpt_atm2fft,pawmknhat,pawmknhat_psipsi,pawsushat,posdoppler

CHILDREN

SOURCE

551 subroutine unset_mpi_enreg_fft(MPI_enreg)
552 
553 
554 !This section has been created automatically by the script Abilint (TD).
555 !Do not modify the following lines by hand.
556 #undef ABI_FUNC
557 #define ABI_FUNC 'unset_mpi_enreg_fft'
558 !End of the abilint section
559 
560  implicit none
561 
562 !Arguments ------------------------------------
563 !scalars
564  type(MPI_type),intent(inout) :: MPI_enreg
565 
566 ! *********************************************************************
567 
568  mpi_enreg%me_g0=1
569  mpi_enreg%comm_fft=xmpi_comm_self
570  mpi_enreg%nproc_fft=1
571  mpi_enreg%me_fft=0
572  mpi_enreg%paral_kgb=0
573  nullify(mpi_enreg%distribfft)
574 
575 end subroutine unset_mpi_enreg_fft