TABLE OF CONTENTS


ABINIT/m_cumulant [ Modules ]

[ Top ] [ Modules ]

NAME

  m_cumulant

FUNCTION

  Module to compute the e-ph spectral functions within the cumulant formalism
  and, optionally, transport properties within the Kubo formalism.

COPYRIGHT

  Copyright (C) 2008-2024 ABINIT group (JCA, MG)
  This file is distributed under the terms of the
  GNU General Public License, see ~abinit/COPYING
  or http://www.gnu.org/copyleft/gpl.txt .

SOURCE

17 #if defined HAVE_CONFIG_H
18 #include "config.h"
19 #endif
20 
21 #include "abi_common.h"
22 
23 module m_cumulant
24 
25  use defs_basis
26  use m_abicore
27  use m_xmpi
28  use m_errors
29  use m_ebands
30  use m_nctk
31  use m_sigmaph
32  use m_dtset
33  use m_dtfil
34  use netcdf
35 
36  use defs_datatypes,   only : ebands_t
37  use defs_abitypes,    only : MPI_type
38  use m_io_tools,       only : open_file, file_exists, is_open
39  use m_time,           only : cwtime, cwtime_report
40  use m_crystal,        only : crystal_t
41  use m_numeric_tools,  only : simpson_cplx, arth, c2r, simpson, safe_div, simpson_int, ctrap, linfit, linspace
42  use m_fstrings,       only : strcat, sjoin, itoa, ltoa, stoa, ftoa
43  use m_distribfft,     only : init_distribfft_seq
44  !use m_kpts,           only : kpts_timrev_from_kptopt
45  use m_mpinfo,         only : destroy_mpi_enreg, initmpi_seq
46  use m_fft,            only : fourdp
47  use m_fftcore,        only : ngfft_seq,fftalg_isavailable
48  use m_occ,            only : occ_fd, occ_dfde
49 
50  implicit none
51 
52  private

m_cumulant/cumulant_compute [ Functions ]

[ Top ] [ m_cumulant ] [ Functions ]

NAME

 cumulant_compute

FUNCTION

  Compute cumulant.

INPUTS

SOURCE

 780 subroutine cumulant_compute(self)
 781 
 782 !Arguments ------------------------------------
 783  class(cumulant_t),intent(inout) :: self
 784 
 785 !Local variables ------------------------------
 786  integer,parameter :: master = 0
 787  integer :: nbands, ib, ikcalc, it, iw, spin, itemp, comm
 788  integer :: nwr, nwr_ce
 789  integer :: my_rank, nprocs, my_spin, my_ik
 790  real(dp) :: time, omega, init_t, time_step, time_step_init, time_max!, wmesh_step_init
 791  real(dp) :: cpu, wall, gflops, cpu_kloop, wall_kloop, gflops_kloop
 792  real(dp) :: wr_step, Ha_fs ! wr_step_ce
 793  character(len=500) :: msg
 794 !arrays
 795  real(dp),allocatable :: temp_g(:,:,:), temp_r(:,:), temp_r_cplx(:,:), temp_g_ce(:,:,:)
 796  real(dp),allocatable :: betaoverw2(:) !, dfft(:)
 797  !complex(dpc),allocatable :: temp_reflex(:) ! betaoverw2c(:),
 798  real(dp),allocatable :: wrmesh_shifted(:), wrmesh_shifted_ce(:), beta(:), c3(:)
 799  real(dp),allocatable :: time_mesh(:), time_mesh_temp(:)
 800  real(dp) :: output_c3!, output_test2r, output_test2i
 801  real(dp) :: m_fit_re, b_fit_re, m_fit_im, b_fit_im, res_re, res_im
 802  complex(dpc),allocatable :: c1(:), ct_temp(:), c_temp(:)
 803  complex(dpc),allocatable :: c2(:), ct(:), gt(:), gw(:), g1(:)
 804  integer :: fftalg, fftalga
 805  logical :: use_fft
 806 
 807 !************************************************************************
 808 
 809 ! Initialization of parallel variables
 810  comm = self%comm
 811  my_rank = xmpi_comm_rank(comm); nprocs = xmpi_comm_size(comm)
 812 
 813  call wrtout(std_out, " Computing cumulant. This may take some time depending on the dims of the problem...")
 814  call cwtime(cpu_kloop, wall_kloop, gflops_kloop, "start")
 815 
 816  ! Initialization of variables and allocation of arrays
 817  nwr = self%nwr
 818  nwr_ce = self%nwr_ce
 819  ABI_CALLOC(c1, (nwr))
 820  ABI_CALLOC(c_temp, (nwr_ce))
 821  !ABI_CALLOC(temp_reflex, (nwr/2))
 822  ABI_CALLOC(c2, (nwr))
 823  ABI_CALLOC(c3, (nwr))
 824  ABI_CALLOC(g1, (nwr_ce))
 825  ABI_CALLOC(gw, (nwr_ce))
 826  !ABI_CALLOC(dfft, (nwr_ce))
 827  ABI_CALLOC(time_mesh, (nwr_ce))
 828  ABI_CALLOC(time_mesh_temp, (nwr))
 829  ABI_CALLOC(ct, (nwr_ce))
 830  ABI_CALLOC(ct_temp, (nwr))
 831  ABI_CALLOC(gt, (nwr_ce))
 832  ABI_CALLOC(temp_g, (2,nwr,1))
 833  ABI_CALLOC(temp_g_ce, (2,nwr_ce,1))
 834 
 835  ABI_CALLOC(temp_r, (nwr,1))
 836  ABI_CALLOC(temp_r_cplx, (2*nwr_ce,1))
 837  !ABI_CALLOC(inv_wrmesh_shifted_sq, (nwr_ce))
 838  ABI_CALLOC(wrmesh_shifted, (nwr))
 839  ABI_CALLOC(wrmesh_shifted_ce, (nwr_ce))
 840  ABI_CALLOC(beta, (nwr))
 841  ABI_CALLOC(betaoverw2, (nwr))
 842  !ABI_CALLOC(betaoverw2c, (nwr))
 843  Ha_fs = 8.955433106
 844 
 845  ! Setting direct domain after fft
 846  ! 0 1 2 3 ... N/2    -(N-1)/2 ... -1    <= gc
 847  ! 1 2 3 4 ....N/2+1  N/2+2    ...  N    <= index ig
 848 
 849  !do iw=1,nwr_ce
 850  !  dfft(iw) = ig2gfft(iw,nwr)
 851  !end do
 852  !ABI_FREE(dfft)
 853 
 854  fftalg = self%ce_ngfft(7)
 855  fftalga = fftalg/100
 856  ! Loops are MPI-parallelized over k-points and spin.
 857  do my_spin=1,self%my_nspins
 858    spin = self%my_spins(my_spin)
 859    do my_ik=1,self%my_nkcalc
 860      ikcalc= self%my_ikcalc(my_ik)
 861      !if (ikcalc /= 1) cycle ! For debugging purpose
 862      nbands = self%nbcalc_ks(ikcalc, spin)
 863      call cwtime(cpu, wall, gflops, "start")
 864 
 865      do ib=1,nbands
 866        !if (ib > 1) cycle ! MG HACK To be able to run tests quickly.
 867 
 868        ! Shifting w-mesh to center at KS-energy and remove the value w=0.0 from integrations ( Principal Value )
 869        wr_step = self%wrmesh_b(2,ib,my_ik,spin) - self%wrmesh_b(1,ib,my_ik,spin)
 870        wrmesh_shifted(:) = self%wrmesh_b(:,ib,my_ik,spin) - self%e0vals(ib,my_ik,spin) &
 871                - 0.5 * wr_step
 872 
 873 
 874        do itemp=1,self%ntemp
 875          ! if (itemp > 1) cycle ! MG HACK To be able to run tests quickly.
 876 
 877          ! Defining time mesh
 878          ! all variables with nwr_ce size are to use after interpolation of the
 879          ! cumulant function in case we want to add more points
 880          time_max = (log(self%tolcum) / ( - abs(aimag(self%vals_e0ks(itemp, ib, my_ik,spin)) )) )
 881 
 882          init_t = 0.0
 883          time_step_init = 1.0/(nwr - 1.0)
 884          time_mesh_temp(:) = arth(init_t, time_step_init, nwr)
 885          time_mesh_temp = time_mesh_temp * 2*PI/wr_step
 886          time_step = time_mesh_temp(2) - time_mesh_temp(1)
 887 
 888          time_mesh(:) = time_mesh_temp(:) !! arth(init_t, time_step_init, nwr_ce)
 889          time_mesh = time_mesh * 2*PI/wr_step
 890 
 891          ! Defining frequency mesh for after interpolation
 892 
 893          !! TODO: Fix problems from when adding more points to time mesh by
 894          !interpolation
 895 
 896          !!wmesh_step_init = 1.0/(nwr_ce/2.0 -1 )
 897          !!init_w = -1.0
 898          !!end_w = 0.0
 899          !!wrmesh_shifted_ce(:nwr_ce/2+1) = linspace(init_w,end_w, nwr_ce/2+1)
 900          !!init_w = 0.0
 901          !!end_w = 1.0
 902          !!wrmesh_shifted_ce(nwr_ce/2+1:) = linspace(init_w,end_w, nwr_ce/2+1)
 903 
 904          !!wr_step_ce = wrmesh_shifted_ce(2) - wrmesh_shifted_ce(1)
 905          !!wrmesh_shifted_ce(:) = wrmesh_shifted_ce(:) * PI/time_step - 0.5 *wr_step_ce
 906          !!self%wrmesh_ce(:,ib,my_ik,spin) = wrmesh_shifted_ce(:) + self%e0vals(ib,my_ik,spin) + 0.5 *wr_step_ce
 907          !!wrmesh_shifted_ce = wrmesh_shifted
 908 
 909          if (self%debug == 1) self%time_mesh(:,itemp,ib,my_ik,spin) = time_mesh_temp(:)! * 0.24188845385 *10E-4 ! picoseconds ( I think )
 910 
 911          if (time_mesh_temp(nwr) < time_max) ABI_WARNING(sjoin("KBT",itoa(my_ik),itoa(ib),itoa(itemp)))
 912          msg = sjoin( &
 913                  "Time mesh smaller than needed to reach tolerance value. Actual value of", &
 914                  ftoa(time_mesh_temp(nwr)), ", desirable ",ftoa(time_max),". Increase nfreqsp.")
 915          if (time_mesh_temp(nwr) < time_max) ABI_WARNING(msg)
 916          beta(:) = abs( aimag( self%vals_wr(:, itemp, ib, my_ik, spin ) ) ) / pi
 917 
 918          ! Calculation of the different terms of the cumulant ( c1, c2 and c3 ),
 919          betaoverw2(:) = beta(:) / (wrmesh_shifted(:) ** 2)
 920 
 921          c2(:) = -j_dpc * real( self%vals_e0ks( itemp, ib, my_ik, spin ) ) * time_mesh_temp(:)
 922 
 923          output_c3 = simpson(wr_step, betaoverw2)
 924          c3(:) = -1.0 * output_c3
 925 
 926 
 927          temp_r(:,1) = betaoverw2(:)
 928 
 929          !@Joao: I got different results when I set use_fft to .False.
 930          !Can you recheck this part, if use_fft = .False. still needed?
 931 
 932          use_fft = .True.
 933          !use_fft = .not. fftalga == FFT_SG
 934          if (use_fft) then
 935            call fourdp(1, temp_g, temp_r, -1, self%ce_mpi_enreg, nwr, 1, self%ce_ngfft , 0 )
 936            c1(:) = temp_g(1, :, 1) + j_dpc* temp_g(2, :, 1)
 937            c1 = c1 * wr_step * nwr
 938            c1(2::2) = -1.0 * c1(2::2)
 939          else
 940            msg = sjoin("FFT not available, using DFT instead.",  &
 941                        " Slower but reliable.",  &
 942                        " Parallelism over frequency for integration of C(t)")
 943            ABI_COMMENT(msg)
 944            do it=1, nwr
 945              ! if (mod(it, self%wt_comm%nproc) /= self%wt_comm%me) cycle  ! MPI parallelism over time
 946              time = time_mesh_temp(it)
 947 
 948              ! Discrete Fourier Transform of C1 ( cumulant function without the +iwt and -1 parts
 949              c_temp(:) = betaoverw2(:) *  exp( - j_dpc * time * wrmesh_shifted(:) )
 950              c1(it) = simpson_cplx( nwr, wr_step, c_temp)
 951            enddo
 952 
 953          endif
 954 
 955 
 956          ! The cumulant function as sum of the different components
 957          ct_temp(:) = c1 + c2 + c3
 958          ! Fitting the cumulant function
 959          res_re = linfit(nwr,time_mesh_temp(:), real(ct_temp(:)), m_fit_re, b_fit_re)
 960          res_im = linfit(nwr,time_mesh_temp(:), aimag(ct_temp(:)), m_fit_im, b_fit_im)
 961          !        call xmpi_sum(ct, self%wt_comm%value, ierr)
 962          if (self%debug == 1) then ! .and. self%wt_comm%nproc > 1) then
 963            self%ct_vals(:, itemp, ib, my_ik, spin) = ct(:)
 964            self%c1(:, itemp, ib, my_ik, spin) = c1(:)
 965            self%c2(:, itemp, ib, my_ik, spin) = c2(:)
 966            self%c3(:, itemp, ib, my_ik, spin) = c3(:)
 967          endif
 968 
 969          ! Adding extra interpolated points to the cumulant function
 970          !!ct(:nwr/2) = ct_temp(:nwr/2)
 971          !!ct(nwr/2:) = time_mesh(nwr/2:) * m_fit_re + b_fit_re + j_dpc * ( time_mesh(nwr/2:) * m_fit_im + b_fit_im )
 972 
 973          !!do iw=1, nwr/2
 974          !!    temp_reflex(nwr/2-iw) = - ct(iw) + time_mesh(iw) * m_fit_re + b_fit_re + j_dpc * ( time_mesh(iw) * m_fit_im + b_fit_im )
 975          !!enddo
 976          !!ct(nwr_ce - nwr/2:) =  ct(nwr_ce - nwr/2:) + temp_reflex(:)
 977          ct(:) = ct_temp(:)
 978          ! Retarded Green's function in time domain
 979          gt = - j_dpc * exp( ct )
 980 
 981          ! Collect data if wt parallelism.
 982          !call xmpi_sum(gt, self%wt_comm%value, ierr)
 983 
 984          if (self%debug == 1) then ! .and. self%wt_comm%nproc > 1) then
 985            self%gt_vals(:, itemp, ib, my_ik, spin) = gt(:)
 986          end if
 987 
 988          use_fft = .True.
 989          !use_fft = .not. fftalga == FFT_SG
 990          if (use_fft) then
 991 
 992            ! Fast Fourier Transform to obtain the Green's function in frequency
 993            ! domain
 994            temp_g_ce(1,:,1) = real(gt(:))
 995            temp_g_ce(2,:,1) = aimag(gt(:))
 996            call fourdp(2, temp_g_ce, temp_r_cplx, 1, self%ce_mpi_enreg, nwr_ce, 1, self%ce_ngfft_g , 0 )
 997            gw(:) = temp_r_cplx(1::2,1) + j_dpc* temp_r_cplx(2::2,1)
 998 
 999 
1000            ! TODO use ig2gfft from 52_fft_mpi_noabirule/m_fftcore.F90 instead of the two following lines
1001            if ( mod(nwr_ce,2) == 0 ) then
1002              self%gw_vals(1:int(nwr_ce/2.0), itemp, ib, my_ik, spin) = gw(int(nwr_ce/2.0):nwr_ce)
1003              self%gw_vals(int(nwr_ce/2.0)+1:nwr_ce, itemp, ib, my_ik, spin) = gw(1:int(nwr_ce/2.0)-1)
1004            else
1005              self%gw_vals(1:int(nwr_ce/2.0)+1, itemp, ib, my_ik, spin) = gw(int(nwr_ce/2.0)+1:nwr_ce)
1006              self%gw_vals(int(nwr_ce/2.0)+2:nwr_ce, itemp, ib, my_ik, spin) = gw(1:int(nwr_ce/2.0))
1007            endif
1008 
1009 
1010            self%gw_vals(:, itemp, ib, my_ik, spin) = self%gw_vals(:, itemp, ib, my_ik, spin) * time_step
1011 
1012            ! FFT is different from integration methods and the two extreme points
1013            ! need to be compensated
1014            self%gw_vals(:, itemp, ib, my_ik, spin) = self%gw_vals(:, itemp, ib, my_ik, spin)  &
1015              - 0.5* gt(1)*time_step - 0.5 * gt(nwr_ce)*exp(j_dpc*wrmesh_shifted_ce(:)*time_mesh(nwr_ce)) * time_step
1016 
1017 
1018          else
1019            msg = sjoin("FFT not available, using DFT instead.",  &
1020                           " Slower but reliable.",  &
1021                           " Parallelism over time for integration of G(t)")
1022            ABI_COMMENT(msg)
1023 
1024            do iw=1, nwr_ce
1025              ! if (mod(iw, self%wt_comm%nproc) /= self%wt_comm%me) cycle  ! MPI parallelism over freqs
1026              omega = wrmesh_shifted_ce(iw)
1027 
1028              ! Discrete Fourier Transform of the Green's function
1029              g1(:) = exp( j_dpc * omega * time_mesh(:) ) * gt(:)
1030 
1031              ! Retarded Green's function in frequency domain
1032              self%gw_vals(iw, itemp, ib, my_ik, spin) = simpson_cplx(nwr_ce, time_step, g1)
1033 
1034              !if (my_rank == 0) write(ab_out, *)"gw_vals",  self%gw_vals(iw, itemp, ib, my_ik, spin)
1035            end do ! iw
1036          end if
1037 
1038          ! Collect data if wt parallelism.
1039          ! call xmpi_sum(self%gw_vals(:, itemp, ib, my_ik, spin) , self%wt_comm%value, ierr)
1040 
1041        end do  ! itemp
1042      end do ! ib
1043 
1044      write(msg,'(4(a,i0),a,f8.2)') " k-point [", ikcalc, "/", self%nkcalc, "]"
1045      call cwtime_report(msg, cpu, wall, gflops)
1046    end do ! my_ik
1047  end do ! my_spin
1048 
1049  ABI_SFREE(c1)
1050  ABI_FREE(ct_temp)
1051  ABI_FREE(c_temp)
1052  ABI_SFREE(c2)
1053  ABI_SFREE(c3)
1054  ABI_SFREE(ct)
1055  ABI_SFREE(gt)
1056  ABI_SFREE(g1)
1057  ABI_SFREE(gw)
1058  ABI_SFREE(beta)
1059  ABI_SFREE(temp_g)
1060  ABI_SFREE(temp_g_ce)
1061  ABI_SFREE(temp_r)
1062  ABI_SFREE(temp_r_cplx)
1063  ABI_SFREE(time_mesh)
1064  ABI_SFREE(wrmesh_shifted)
1065  ABI_SFREE(wrmesh_shifted_ce)
1066  !ABI_SFREE(inv_wrmesh_shifted_sq)
1067  ABI_SFREE(betaoverw2)
1068  ABI_FREE(time_mesh_temp)
1069 
1070  call cwtime_report(" cumulant_compute", cpu_kloop, wall_kloop, gflops_kloop)
1071 
1072  ! contains
1073 
1074  ! real function lreg(x,y, n, nsteps) result(m,b)
1075  ! !
1076  ! ! Determines a linear regression from x and y values
1077  ! ! What are the best m and b values to produce y= m*x + b ?
1078  ! !
1079  ! !
1080  ! ! Cost function ( Root Mean Squared Error ): J = 1/n sum_i^n (pred_i - y_i)^2
1081  ! ! where pred is the predicted value and y the true value
1082  ! !
1083  ! ! Goal: minimize J
1084  ! ! How? Using Gradient Descent
1085  ! ! - Learning rate is the step that the new value will be ( too small, takes longer; too large, it can be instable )
1086  ! ! - Initial m or b are chosen randomly
1087  ! ! - n is the number of points
1088  ! !
1089  ! ! new b = old b - 2*(learning rate)/n sum_i^n (pred(x_i) - y) * x_i
1090  ! ! new m = old m  - 2*(learning rate)/n sum_i^n (pred(x_i) - y)
1091  ! !
1092  !
1093  ! integer, intent(in) :: n, nsteps
1094  ! real(dp), intent(in) :: x(n), y(n)
1095  ! !real(dp) :: m,b
1096  ! integer :: istep, i
1097  ! real(dp) :: lrate, cost, acc
1098  ! real(dp) :: pred(n)
1099  !
1100  ! m = 0 ! Initial guesses
1101  ! b = 0
1102  ! do istep=1,nsteps
1103  !
1104  !  pred(:) = m * x(:) + b ! Prediction with the new coefficients m, b
1105  !
1106  !  ! Check accuracy comparing the linear regression and the data
1107  !  do i=1, n
1108  !    if (abs(y(i) ) < 1e-4 ) cycle
1109  !    acc = acc + sum( abs(pred(i) - y(i))/y(i) )
1110  !  end do i
1111  !  acc = 1 - acc
1112  !  print *, "Accuracy: ",istep, acc
1113  !
1114  !  cost = 1.0/n * sum(pred(:) - y(:))**2
1115  !
1116  !  print *, "Cost: ", istep, cost
1117  !
1118  !  ! Update coefficients
1119  !
1120  !  m = m - 2.0*lrate/n * sum(pred(:) - y(:))
1121  !  b = b - 2.0*lrate/n * sum((pred(:) - y(:))*x(:))
1122  ! enddo ! istep
1123  !
1124  ! end function lreg
1125 
1126  !  complex function trapz(f_size, f_step, f)
1127  !
1128  !   integer,intent(in) :: f_size
1129  !   real(dp),intent(in) :: f_step
1130  !   complex(dpc),intent(in) :: f(f_size)
1131  !
1132  !   trapz = ( sum(f) - 0.5* f(1) - 0.5* f(f_size) )* f_step
1133  !
1134  !  end function trapz
1135 
1136 end subroutine cumulant_compute

m_cumulant/cumulant_driver [ Functions ]

[ Top ] [ m_cumulant ] [ Functions ]

NAME

 cumulant_driver

FUNCTION

 General driver to compute transport properties

INPUTS

 dtfil<datafiles_type>=variables related to files.
 dtset<dataset_type>=All input variables for this dataset.
 ebands<ebands_t>=The GS KS band structure (energies, occupancies, k-weights...)
 cryst<crystal_t>=Crystalline structure
 comm=Initial MPI communicator with all procs provided by user.

SOURCE

360 subroutine cumulant_driver(dtfil, dtset, ebands, cryst, comm)
361 
362 !Arguments ------------------------------------
363 !scalars
364  integer, intent(in) :: comm
365  type(datafiles_type),intent(in) :: dtfil
366  type(dataset_type),intent(in) :: dtset
367  type(crystal_t),intent(in) :: cryst
368  type(ebands_t),intent(in) :: ebands
369 
370 !Local variables ------------------------------
371  integer,parameter :: master = 0
372  integer :: my_rank
373  character(len=fnlen) :: path
374  type(cumulant_t) :: cumulant
375  type(sigmaph_t) :: sigmaph
376 !arrays
377  integer :: unts(2)
378 
379 ! *************************************************************************
380 
381  my_rank = xmpi_comm_rank(comm)
382  unts = [std_out, ab_out]
383 
384  !sigeph_filepath = dtfil%filsigephin
385  call wrtout(unts, ch10//' Entering cumulant expansion computation driver.')
386 
387  ! Initialize cumulant object
388  call cumulant%init(dtset, dtfil, cryst, ebands, comm, sigmaph)
389  !if (cumulant%all_comm%me == -1) goto 100
390 
391  ! Compute C(t), G(t), G(w) and A(w) using cumulant expansion
392  call cumulant%compute()
393 
394  ! Compute transport properties within the Kubo formalism.
395  if (any(abs(dtset%sigma_erange) > zero)) call cumulant%kubo_transport(dtset, cryst)!path, ncid)
396 
397  ! Output netcdf file with cumulant results e.g. A_nk(w).
398  ! Use EPH prefix because we may also have EE and PHE cumulants.
399  path = strcat(dtfil%filnam_ds(4), "_EPH_CUMULANT.nc")
400  !call cumulant%ncwrite(path, cryst, ebands, dtset)
401  call cumulant%ncwrite(path, cryst, dtset)
402 
403  !if (my_rank == master) then
404  !  ! Print cumulant expansion results to stdout and other external txt files (for the test suite)
405  !  !call cumulant%print_txt_files(cryst, dtset, dtfil)
406  !end if
407 
408  ! Free memory
409 !100
410 call cumulant%free()
411 call sigmaph%free()
412 
413 end subroutine cumulant_driver

m_cumulant/cumulant_free [ Functions ]

[ Top ] [ m_cumulant ] [ Functions ]

NAME

 cumulant_free

FUNCTION

  Free dynamic memory.

INPUTS

SOURCE

1764 subroutine cumulant_free(self)
1765 
1766 !Arguments --------------------------------------
1767  class(cumulant_t),intent(inout) :: self
1768 
1769 !************************************************************************
1770 
1771  ABI_SFREE(self%kcalc2ebands)
1772  ABI_SFREE(self%linewidths)
1773  ABI_SFREE(self%vbks)
1774  ABI_SFREE(self%nbcalc_ks)
1775  ABI_SFREE(self%bstart_ks)
1776  !ABI_SFREE(self%bstop_ks)
1777  ABI_SFREE(self%kcalc2ibz)
1778  ABI_SFREE(self%coords_kws)
1779  ABI_SFREE(self%my_spins)
1780  ABI_SFREE(self%my_ikcalc)
1781  ABI_SFREE(self%e0vals)
1782  ABI_SFREE(self%dw_vals)
1783  ABI_SFREE(self%gw_vals)
1784  !ABI_SFREE(self%ce_spfunc_wr)
1785  ABI_SFREE(self%conductivity_mu)
1786  ABI_SFREE(self%mobility_mu)
1787  ABI_SFREE(self%transport_mu_e)
1788  ABI_SFREE(self%print_dfdw)
1789  ABI_SFREE(self%seebeck)
1790  ABI_SFREE(self%kappa)
1791  ABI_SFREE(self%l0)
1792  ABI_SFREE(self%l1)
1793  ABI_SFREE(self%l2)
1794  ABI_SFREE(self%time_mesh)
1795  ABI_SFREE(self%ct_vals)
1796  ABI_SFREE(self%c1)
1797  ABI_SFREE(self%c2)
1798  ABI_SFREE(self%c3)
1799  ABI_SFREE(self%gt_vals)
1800  ABI_SFREE(self%wrmesh_b)
1801  ABI_SFREE(self%wrmesh_ce)
1802  ABI_SFREE(self%vals_e0ks)
1803  ABI_SFREE(self%vals_wr)
1804  ABI_SFREE(self%kcalc)
1805  ABI_SFREE(self%kTmesh)
1806  ABI_SFREE(self%mu_e)
1807  ABI_SFREE(self%n_ehst)
1808  call destroy_mpi_enreg(self%ce_mpi_enreg)
1809  call ebands_free(self%ebands)
1810 
1811  call self%spin_comm%free()
1812  call self%kcalc_comm%free()
1813  call self%wt_comm%free()
1814  call self%ncwrite_comm%free()
1815 
1816 end subroutine cumulant_free

m_cumulant/cumulant_init [ Functions ]

[ Top ] [ m_cumulant ] [ Functions ]

NAME

 cumulant_init

FUNCTION

   Initialization of the cumulant, ex: time mesh

INPUTS

 dtset<dataset_type>=All input variables for this dataset.
 dtfil<datafiles_type>=variables related to files.
 comm=Initial MPI communicator with all procs provided by user.

SOURCE

432 subroutine cumulant_init(self, dtset, dtfil, cryst, ebands, comm, sigmaph )
433 
434 !Arguments --------------------------------------
435  integer,intent(in) :: comm
436  type(dataset_type),intent(in) :: dtset
437  type(datafiles_type),intent(in) :: dtfil
438  type(crystal_t),intent(in) :: cryst
439  type(ebands_t),intent(in) :: ebands
440  type(sigmaph_t), intent(out) :: sigmaph
441  class(cumulant_t),intent(inout) :: self
442 
443 !Local variables --------------------------------
444  integer, parameter :: master = 0
445  integer :: ncerr, ncid, my_rank, nprocs, ierr, spin, ikcalc, ib,  color !my_spin, my_kcalc, cnt, nbands,
446  integer :: fftalg, fftalga
447  real(dp) :: cpu, wall, gflops, rsize
448  logical :: is_prime
449  character(len=500) :: msg
450  character(len=fnlen) :: sigeph_filepath
451  integer :: unts(2), facts(2)
452  real(dp), allocatable :: rtmp_vals_wr(:,:,:,:,:,:), rtmp_vals_e0ks(:,:,:,:,:)
453  !type(ebands_t) :: tmp_ebands
454  real(dp) :: extrael_fermie(2),sigma_erange(2)
455  integer :: sigma_ngkpt(3)
456 #ifdef HAVE_MPI
457  integer :: ndims, comm_cart, me_cart
458  logical :: reorder
459  integer,allocatable :: dims(:)
460  logical,allocatable :: periods(:), keepdim(:)
461 #endif
462 
463 !************************************************************************
464 
465  call cwtime(cpu, wall, gflops, "start")
466 
467  self%comm = comm
468  my_rank = xmpi_comm_rank(comm); nprocs = xmpi_comm_size(comm)
469 
470 
471  unts = [std_out, ab_out]
472 
473  ! Read data from out_SIGEPH.out file
474  sigeph_filepath = dtfil%filsigephin
475  call wrtout(unts, sjoin("- Reading Sigma results from:", sigeph_filepath), newlines=1, do_flush=.True.)
476  call self%sigmaph_ncread(sigeph_filepath, ncid, comm)
477  sigmaph = sigmaph_read(sigeph_filepath, dtset, comm, msg, ierr, keep_open = .false., &
478          extrael_fermie=extrael_fermie, sigma_ngkpt=sigma_ngkpt, sigma_erange=sigma_erange)
479 
480  self%bmin = minval(sigmaph%bstart_ks); self%bmax = maxval(sigmaph%bstop_ks)
481  !ABI_CALLOC(self%vbks, (3, self%bmin:self%bmax, ebands%nkpt, sigmaph%nsppol))
482 
483  !if (any(abs(dtset%sigma_erange) > zero)) then
484  self%ebands = sigmaph%get_ebands(cryst, ebands, [self%bmin, self%bmax], &
485                                    self%kcalc2ebands, self%linewidths, self%vbks, xmpi_comm_self)
486  !end if
487 
488  self%eph_extrael = extrael_fermie(1)
489  self%eph_fermie = extrael_fermie(2)
490 
491  ABI_MALLOC(self%mu_e, (self%ntemp))
492  ! Possibility to increase nwr in case of interpolation in cumulant to add extra points
493  ! TODO: not working yet
494  self%nwr_ce = self%nwr!*4 -1 ! Odd
495 
496  self%ieta = j_dpc * sigmaph%ieta
497  !self%ebands = tmp_ebands
498  self%mu_e = sigmaph%mu_e
499 
500  ! Setting variables to launch 1d FFT calculations later on
501  call ngfft_seq(self%ce_ngfft, [self%nwr, 1, 1])
502  self%ce_ngfft(4:6) = self%ce_ngfft(1:3)
503 
504  fftalg = self%ce_ngfft(7); fftalga = fftalg/100
505  if (fftalga == FFT_SG) then
506    self%ce_ngfft(7)= 102
507    ABI_WARNING("Setting fftalg to 102. Please link with FFTW3 or DFTI for better performance!")
508  end if
509  !self%ce_ngfft(7)= 102
510 
511  call initmpi_seq(self%ce_mpi_enreg)
512  call init_distribfft_seq(self%ce_mpi_enreg%distribfft, 'c', self%ce_ngfft(2), self%ce_ngfft(3), 'all')
513  call init_distribfft_seq(self%ce_mpi_enreg%distribfft, 'f', self%ce_ngfft(2), self%ce_ngfft(3), 'all')
514 
515  call ngfft_seq(self%ce_ngfft_g, [self%nwr_ce, 1, 1])
516  self%ce_ngfft_g(4:6) = self%ce_ngfft_g(1:3)
517 
518  fftalg = self%ce_ngfft_g(7); fftalga = fftalg/100
519  if (fftalga == FFT_SG) then
520    self%ce_ngfft_g(7)= 102
521    ABI_WARNING("Setting fftalg to 102. Please link with FFTW3 or DFTI for better performance!")
522  end if
523  !self%ce_ngfft_g(7)= 102
524 
525  ! Setting debugging ( higher verbosity )
526  self%tolcum = dtset%tolcum
527  if (self%tolcum > zero) then
528    self%debug = 0
529  else
530    self%debug = 1
531  endif
532  self%tolcum = abs(self%tolcum)
533 
534  call wrtout(unts, " Cumulant parameters:")
535  call wrtout(unts, sjoin(" Number of spins:                   ", itoa(self%nsppol)))
536  call wrtout(unts, sjoin(" Number of spinor components:       ", itoa(self%nspinor)))
537  call wrtout(unts, sjoin(" Number of k-points computed:       ", itoa(self%nkcalc)))
538  call wrtout(unts, sjoin(" Maximum number of bands computed:  ", itoa(self%max_nbcalc)))
539  call wrtout(unts, sjoin(" Number of frequencies in Sigma(w): ", itoa(self%nwr)))
540  call wrtout(unts, sjoin(" Number of frequencies in G(w): ", itoa(self%nwr_ce)))
541  call wrtout(unts, sjoin(" Number of Temperatures:            ", itoa(self%ntemp)))
542 
543  ! ========================
544  ! === MPI DISTRIBUTION ===
545  ! ========================
546  ! At present, there are two levels of MPI parallelism: kcalc and time/frequencies.
547  ! Only the kcalc parallelism leads to memory distribution (my_nkcalc) whereas the secon level is only
548  ! used to distribute the loop over frequencies/time points.
549  ! Note the following:
550  !
551  ! 1) We distribute contigous blocks of kcalc k-points so that we can read/write data in mykcalc chuncks.
552 
553  ! TODO:
554  !  *) Spin and nkcalc(nsppol)
555  !  *) Decide whther it makes sense to create a new comm if nprocs > nkcalc  and mod(nprocs, nkcalc) /= 0
556  !  *) No need anymore for parallelism in time/frequency
557 
558 
559  self%kcalc_comm%nproc = nprocs
560 ! self%wt_comm%nproc = nprocs / self%kcalc_comm%nproc
561 
562  if (nprocs > self%nkcalc) then
563    call ifact2(nprocs, facts, is_prime)
564    if (is_prime) then
565      ! TODO
566      ABI_ERROR("prime numbers are evil!")
567    end if
568    if (facts(1) > self%nkcalc) then
569       ! Avoid procs with no k-points.
570       facts(1:2) = facts(2:1:-1)
571    end if
572    self%kcalc_comm%nproc = facts(1)
573  !  self%wt_comm%nproc = facts(2)
574    !do cnt=self%nkcalc,1,-1
575    !  if (mod(nprocs, cnt) == 0 .and. mod(self%nkcalc, cnt) == 0) then
576    !    self%kcalc_comm%nproc = cnt; self%my_nkcalc = self%nkcalc / cnt; exit
577    !  end if
578    !end do
579  end if
580 
581  ABI_ICALLOC(self%coords_kws, (2, self%nsppol))
582 #ifdef HAVE_MPI
583  ! Create 2d cartesian communicator: kpoints in Sigma_k, (time/frequency)
584  ! FIXME: Fix spin
585  spin = 1
586  ndims = 1
587  ABI_MALLOC(dims, (ndims))
588  ABI_MALLOC(periods, (ndims))
589  ABI_MALLOC(keepdim, (ndims))
590  periods(:) = .False.; reorder = .False.
591 ! dims = [self%kcalc_comm%nproc, self%wt_comm%nproc]
592  dims = [self%kcalc_comm%nproc]
593 
594  call MPI_CART_CREATE(comm, ndims, dims, periods, reorder, comm_cart, ierr)
595  ! Find the index and coordinates of the current processor
596  call MPI_COMM_RANK(comm_cart, me_cart, ierr)
597  call MPI_CART_COORDS(comm_cart, me_cart, ndims, self%coords_kws(:, spin), ierr)
598 
599  ! Create communicator for kpoints.
600  keepdim = .False.; keepdim(1) = .True.
601  call MPI_CART_SUB(comm_cart, keepdim, self%kcalc_comm%value, ierr); self%kcalc_comm%me = xmpi_comm_rank(self%kcalc_comm%value)
602 
603  ! Create communicator for w/t.
604 ! keepdim = .False.; keepdim(2) = .True.
605 ! call MPI_CART_SUB(comm_cart, keepdim, self%wt_comm%value, ierr); self%wt_comm%me = xmpi_comm_rank(self%wt_comm%value)
606 
607  ! Create communicator for spins.
608  !keepdim = .False.; keepdim(5) = .True.
609  !call MPI_CART_SUB(comm_cart, keepdim, self%spin_comm%value, ierr); self%spin_comm%me = xmpi_comm_rank(self%spin_comm%value)
610 
611  ! Create communicator for parallel IO
612  ! If parallelism over time/frequencies is ON, we select only a column of procs (kcalc dimension)
613  ! to avoid writing the same data multiple times as omega/time are not MPI distributed
614  ! Obviously I'm assuming HDF5 + MPI-IO
615  color = xmpi_undefined; if (self%coords_kws(2, spin) == 0) color = 1
616  call xmpi_comm_split(comm, color, my_rank, self%ncwrite_comm%value, ierr)
617  if (color == 1) then
618    self%ncwrite_comm%me = xmpi_comm_rank(self%ncwrite_comm%value)
619    self%ncwrite_comm%nproc = xmpi_comm_size(self%ncwrite_comm%value)
620  else
621    call self%ncwrite_comm%set_to_null()
622  end if
623 
624  ABI_FREE(dims)
625  ABI_FREE(periods)
626  ABI_FREE(keepdim)
627  call xmpi_comm_free(comm_cart)
628 #endif
629 
630  call wrtout(unts, &
631    sjoin("- Using: ", itoa(self%kcalc_comm%nproc), "MPI procs to distribute:", itoa(self%nkcalc), "k-points."))
632  !call wrtout(unts, &
633 !   sjoin("- Using: ", itoa(self%wt_comm%nproc), "MPI procs to parallelize:", itoa(self%nwr_ce), "frequency/time loops."))
634  call wrtout(std_out, sjoin("- Performing parallel IO with:", itoa(self%ncwrite_comm%nproc), "procs"))
635 
636  if (self%ncwrite_comm%nproc /= 1 .and. .not. nctk_has_mpiio) then
637    ABI_WARNING("Cumulant requires netcdf with MPI-IO support! Continuing anyway but runtime error is expected!")
638  end if
639 
640  ! Consistency check.
641  !if (self%kcalc_comm%nproc * self%wt_comm%nproc /= nprocs) then
642  if (self%kcalc_comm%nproc /= nprocs) then
643    write(msg, "(a,i0,3a, 3(a,1x,i0))") &
644      "Cannot create 2d Cartesian grid with total nprocs: ", nprocs, ch10, &
645      "Idle processes are not supported. The product of comm%nproc should be equal to nprocs.", ch10, &
646      "kcalc_nproc (", self%kcalc_comm%nproc, ") != ", & ! x wt_nproc (", self%wt_comm%nproc, ") != ", &
647      self%kcalc_comm%nproc ! * self%wt_comm%nproc
648    ABI_ERROR(msg)
649  end if
650 
651  ! Here we distribute the k-points inside kcalc_comm.
652  call xmpi_split_block(self%nkcalc, self%kcalc_comm%value, self%my_nkcalc, self%my_ikcalc)
653  ABI_CHECK(self%my_nkcalc > 0, sjoin("nkcalc (", itoa(self%nkcalc), ") > nprocs (", itoa(nprocs), ")"))
654 
655  ib = mod(self%nkcalc, self%my_nkcalc)
656  call xmpi_sum(ib, self%comm, ierr)
657  if (ib /= 0) then
658    ABI_COMMENT("The number of MPI procs should be divisible by nkcalc to distribute memory equally!")
659  end if
660 
661  ! Distribute spins and create mapping to spin index.
662  if (self%nsppol == 2) then
663    ABI_ERROR("cumulant with nsppol 2 is not supported.")
664    call xmpi_split_block(self%nsppol, comm, self%my_nspins, self%my_spins)
665    !ABI_CHECK(self%my_nspins > 0, sjoin("nsppol (", itoa(self%nsppol), ") > spin_comm_nproc (", itoa(comm), ")"))
666  else
667    ! No nsppol parallelism DOH!
668    self%my_nspins = 1
669    ABI_MALLOC(self%my_spins, (self%my_nspins))
670    self%my_spins = 1
671  end if
672 
673  !ABI_CALLOC(self%ce_spfunc_wr, (self%nwr, self%ntemp, self%max_nbcalc, self%my_nkcalc, self%nsppol))
674  ABI_CALLOC(self%gw_vals, (self%nwr_ce, self%ntemp, self%max_nbcalc, self%my_nkcalc, self%nsppol))
675 
676  if (self%debug == 1) then
677    ABI_CALLOC(self%time_mesh, (self%nwr_ce, self%ntemp, self%max_nbcalc, self%my_nkcalc, self%nsppol))
678    ABI_CALLOC(self%ct_vals, (self%nwr_ce, self%ntemp, self%max_nbcalc, self%my_nkcalc, self%nsppol))
679    ABI_CALLOC(self%c1, (self%nwr_ce, self%ntemp, self%max_nbcalc, self%my_nkcalc, self%nsppol))
680    ABI_CALLOC(self%c2, (self%nwr_ce, self%ntemp, self%max_nbcalc, self%my_nkcalc, self%nsppol))
681    ABI_CALLOC(self%c3, (self%nwr_ce, self%ntemp, self%max_nbcalc, self%my_nkcalc, self%nsppol))
682    ABI_CALLOC(self%gt_vals, (self%nwr_ce, self%ntemp, self%max_nbcalc, self%my_nkcalc, self%nsppol))
683  endif
684 
685  ABI_MALLOC(rtmp_vals_wr, (2, self%nwr, self%ntemp, self%max_nbcalc, self%my_nkcalc, self%nsppol))
686  ABI_MALLOC(rtmp_vals_e0ks, (2, self%ntemp, self%max_nbcalc, self%my_nkcalc, self%nsppol))
687  ABI_MALLOC(self%vals_wr, (self%nwr, self%ntemp, self%max_nbcalc, self%my_nkcalc, self%nsppol))
688  ABI_MALLOC(self%vals_e0ks, (self%ntemp, self%max_nbcalc, self%my_nkcalc, self%nsppol))
689  ABI_MALLOC(self%wrmesh_b, (self%nwr, self%max_nbcalc, self%my_nkcalc, self%nsppol))
690  ABI_MALLOC(self%wrmesh_ce, (self%nwr_ce, self%max_nbcalc, self%my_nkcalc, self%nsppol))
691 ! ABI_MALLOC(self%wrmesh_ce, (self%nwr_ce, self%max_nbcalc, self%my_nkcalc, self%nsppol))
692  ABI_MALLOC(self%e0vals, (self%max_nbcalc, self%my_nkcalc, self%nsppol))
693  ABI_MALLOC(self%dw_vals, (self%ntemp, self%max_nbcalc, self%my_nkcalc, self%nsppol))
694 
695  ! Estimate memory (only nwr arrays)
696  rsize = five; if (self%debug == 1) rsize = rsize + 3
697  rsize = rsize * self%nwr_ce * self%ntemp * self%max_nbcalc * self%my_nkcalc * self%nsppol
698  call xmpi_max(rsize, self%comm, ierr)
699  write(msg,'(a,f8.1,a)')' Memory needed for cumulant arrays per MPI proc: ',dp * rsize * b2Mb, ' [Mb] <<< MEM'
700  call wrtout(std_out, msg)
701 
702  ! Each MPI proc reads a chunk of my_nkcalc entries starting at (ikcalc, spin) from the SIGEPH.nc file.
703  ! so that memory is MPI-distributed.
704  ! On disk, we have the following netcdf arrays:
705  !
706  !         nctkarr_t("wrmesh_b", "dp", "nwr, max_nbcalc, nkcalc, nsppol"), &
707  !         nctkarr_t("vals_wr", "dp", "two, nwr, ntemp, max_nbcalc, nkcalc, nsppol"), &
708  !         nctkarr_t("vals_e0ks", "dp", "two, ntemp, max_nbcalc, nkcalc, nsppol"), &
709  !         nctkarr_t("ks_enes", "dp", "max_nbcalc, nkcalc, nsppol"), &
710  !
711  ! Note the nkcalc global dimension instead of my_nkcalc.
712  ! Results are in a.u.
713 
714  ! Offse of this proc
715  spin = self%my_spins(1)
716  ikcalc = self%my_ikcalc(1)
717 
718  ncerr = nf90_get_var(ncid, vid("vals_wr"), rtmp_vals_wr, start=[1,1,1,1,ikcalc,spin], &
719                       count=[2, self%nwr, self%ntemp, self%max_nbcalc, self%my_nkcalc, self%my_nspins])
720  NCF_CHECK(ncerr)
721 
722  ncerr = nf90_get_var(ncid, vid("wrmesh_b"), self%wrmesh_b, start=[1,1,ikcalc,spin], &
723                       count=[self%nwr, self%max_nbcalc, self%my_nkcalc, self%my_nspins])
724  NCF_CHECK(ncerr)
725 
726  ncerr = nf90_get_var(ncid, vid("vals_e0ks"), rtmp_vals_e0ks, start=[1,1,1,ikcalc,spin],  &
727                       count=[2, self%ntemp, self%max_nbcalc, self%my_nkcalc, self%my_nspins])
728  NCF_CHECK(ncerr)
729 
730  ncerr = nf90_get_var(ncid, vid("ks_enes"), self%e0vals, start=[1,ikcalc,spin], &
731                       count=[self%max_nbcalc, self%my_nkcalc, self%my_nspins])
732  NCF_CHECK(ncerr)
733 
734  ncerr = nf90_get_var(ncid, vid("dw_vals"), self%dw_vals, start=[1,1,ikcalc,spin], &
735                       count=[self%ntemp, self%max_nbcalc, self%my_nkcalc, self%my_nspins])
736  NCF_CHECK(ncerr)
737 
738  ! Close the file here but the Kubo equation needs to read v_nk from file.
739  ! We will have to rationalize this part.
740  NCF_CHECK(nf90_close(ncid))
741 
742  ! Convert from real arrays to Fortran complex
743  self%vals_wr =  (rtmp_vals_wr(1,:,:,:,:,:) + j_dpc * rtmp_vals_wr(2,:,:,:,:,:))!*Ha_eV
744  self%vals_e0ks = (rtmp_vals_e0ks(1,:,:,:,:) + j_dpc * rtmp_vals_e0ks(2,:,:,:,:))!*Ha_eV
745 
746 ! self%wrmesh_b = self%wrmesh_b*Ha_eV
747 ! self%e0vals = self%e0vals*Ha_eV
748 ! self%max_nbcalc=self%max_nbcalc*Ha_eV
749 
750 ! self%e0vals = self%e0vals * Ha_eV
751 ! self%wrmesh_b = self%wrmesh_b * Ha_eV
752 
753  ABI_SFREE(rtmp_vals_wr)
754  ABI_SFREE(rtmp_vals_e0ks)
755 
756  call cwtime_report(" cumulant init", cpu, wall, gflops)
757 
758 contains
759  integer function vid(vname)
760    character(len=*),intent(in) :: vname
761    vid = nctk_idname(ncid, vname)
762 end function vid
763 
764 end subroutine cumulant_init

m_cumulant/cumulant_kubo_transport [ Functions ]

[ Top ] [ m_cumulant ] [ Functions ]

NAME

 cumulant_kubo_transport

FUNCTION

  Compute conductivity/mobility

INPUTS

SOURCE

1152 subroutine cumulant_kubo_transport(self, dtset, cryst)
1153 
1154 !Arguments ------------------------------------
1155  class(cumulant_t),intent(inout) :: self
1156  type(dataset_type),intent(in) :: dtset
1157  type(crystal_t),intent(in) :: cryst
1158 
1159 !Local variables ------------------------------
1160  integer,parameter :: master = 0
1161  integer :: cnt
1162  integer :: nbands, ib, ikcalc, iw, spin, itemp, comm, ik_ibz
1163  integer :: ieh, ib_eph
1164  integer :: ii, jj, time_opt, isym_k, trev_k
1165  integer :: my_rank, nprocs, my_spin, my_ik
1166  real(dp) :: omega!, time_step!, time_max
1167  real(dp) :: cpu, wall, gflops, cpu_kloop, wall_kloop, gflops_kloop
1168  real(dp) :: eig_nk, sp_func, wr_step
1169  real(dp) :: integration, mu_e, max_occ, fact0, fact
1170  character(len=500) :: msg
1171 !arrays
1172  real(dp),allocatable :: kernel(:), dfdw_acc(:),Aw(:),Aw_l0(:),dfdw_l0(:)!test_Aw(:), test_dfdw(:)
1173  real(dp) :: int_Aw, int_dfdw, dfdw
1174  real(dp) :: vr(3), vv_tens(3,3), S(3,3), wtk!, spfunc
1175 ! real(dp), allocatable :: onsager_coeff(:,:)
1176  real(dp) :: work_33(3,3),l0inv_33nw(3,3,2)
1177  real(dp) :: Tkelv
1178 
1179 
1180 !************************************************************************
1181 
1182  comm = self%comm
1183  my_rank = xmpi_comm_rank(comm); nprocs = xmpi_comm_size(comm)
1184 
1185  call wrtout(std_out, " Computing conductivity using Kubo-Greenwood method.")
1186  call cwtime(cpu_kloop, wall_kloop, gflops_kloop, "start")
1187 
1188 
1189  ABI_MALLOC(kernel, (self%nwr))
1190 ! ABI_MALLOC(test_Aw, (self%ntemp))
1191 ! ABI_MALLOC(test_dfdw, (self%ntemp))
1192  ABI_MALLOC(Aw, (self%nwr))
1193  ABI_MALLOC(dfdw_acc, (self%nwr))
1194  ABI_MALLOC(Aw_l0, (self%ntemp))
1195  ABI_MALLOC(dfdw_l0, (self%ntemp))
1196 
1197  ABI_CALLOC(self%l0, (3, 3, 2, self%nsppol, self%ntemp))
1198  ABI_CALLOC(self%l1, (3, 3, 2, self%nsppol, self%ntemp))
1199  ABI_CALLOC(self%l2, (3, 3, 2, self%nsppol, self%ntemp))
1200  ABI_CALLOC(self%seebeck, (3, 3, 2, self%nsppol, self%ntemp))
1201  ABI_CALLOC(self%kappa, (3, 3, 2, self%nsppol, self%ntemp))
1202 
1203 
1204 
1205  ABI_CALLOC(self%mobility_mu, (3, 3, 2, self%nsppol, self%ntemp))
1206  ABI_CALLOC(self%conductivity_mu, (3, 3, 2, self%nsppol, self%ntemp))
1207  ABI_CALLOC(self%print_dfdw, (self%nwr, self%ntemp))
1208 
1209  ABI_MALLOC(self%transport_mu_e, (self%ntemp))
1210 
1211  ABI_CALLOC(self%n_ehst, (2, self%nsppol, self%ntemp))
1212  self%transport_fermie = dtset%eph_fermie
1213  self%transport_extrael = dtset%eph_extrael
1214  self%transport_mu_e = self%mu_e
1215  if (self%transport_fermie /= zero) self%transport_mu_e = self%transport_fermie
1216 
1217  if (self%transport_fermie == zero .and. self%transport_extrael /= self%eph_extrael) then
1218 
1219    if (self%transport_extrael /= self%eph_extrael) then
1220      write(msg,'(2(a,e18.8),3a)') &
1221        ' extrael from SIGEPH: ',self%transport_extrael, ' and input file: ',self%eph_extrael, "differ", ch10, &
1222        ' Will recompute the chemical potential'
1223      call wrtout(std_out, msg)
1224    end if
1225 
1226    ! Compute Fermi level for different T values.
1227    call ebands_get_muT_with_fd(self%ebands, self%ntemp, self%kTmesh, dtset%spinmagntarget, dtset%prtvol, self%transport_mu_e, comm)
1228  end if
1229 
1230  call ebands_get_carriers(self%ebands, self%ntemp, self%kTmesh, self%transport_mu_e, self%n_ehst)
1231 
1232  time_opt =0
1233  cnt = 0
1234  do my_spin=1,self%my_nspins
1235    spin = self%my_spins(my_spin)
1236    do my_ik=1,self%my_nkcalc
1237      ikcalc= self%my_ikcalc(my_ik)
1238      !if (ikcalc > 1) cycle
1239      ik_ibz = self%kcalc2ibz(ikcalc, 1)
1240      isym_k = self%kcalc2ibz(ikcalc, 2)
1241      trev_k = self%kcalc2ibz(ikcalc, 6)
1242 
1243      wtk = self%ebands%wtk(ik_ibz)
1244      S = transpose(cryst%symrel_cart(:,:,isym_k))
1245 
1246      nbands = self%nbcalc_ks(ikcalc, spin)
1247      call cwtime(cpu, wall, gflops, "start")
1248 
1249      do ib=self%bmin,self%bmax
1250        !if (ib > self%bmin) cycle ! MG HACK To be able to run tests quickly.
1251        ib_eph = ib - self%bmin + 1
1252        eig_nk = self%ebands%eig(ib, ik_ibz, spin)
1253        wr_step = self%wrmesh_b(2,ib_eph,my_ik,spin) - self%wrmesh_b(1,ib_eph,my_ik,spin)
1254        vr(:) = self%vbks(:, ib, ik_ibz, spin)
1255        ! Store outer product (v_bks x v_bks) in vv_tens. This part does not depend on T and irta.
1256        do ii=1,3
1257          do jj=1,3
1258            vv_tens(ii, jj) = vr(ii) * vr(jj)
1259          end do
1260        end do
1261        ! Calculation of the velocity tensor
1262        vv_tens = cryst%symmetrize_cart_tens33(vv_tens, time_opt)
1263          do itemp=1,self%ntemp
1264      !if (itemp > 1) cycle
1265 
1266          Tkelv = self%kTmesh(itemp) / kb_HaK; if (Tkelv < one) Tkelv = one
1267            do iw=1, self%nwr
1268 !             if (mod(iw, self%wt_comm%nproc) /= self%wt_comm%me) cycle  ! MPI parallelism over freqs
1269 
1270                 !  Preparing all elements needed for conductivity
1271                 omega = self%wrmesh_b(iw,ib_eph,my_ik,my_spin)
1272                 sp_func = -aimag (self%gw_vals(iw, itemp, ib_eph, my_ik, my_spin) ) / pi
1273 !                test_Aw(itemp) = test_Aw(itemp) + sp_func
1274                 dfdw = occ_dfde(omega, self%kTmesh(itemp), self%mu_e(itemp))
1275                 self%print_dfdw(iw,itemp) = dfdw
1276 !                test_dfdw(itemp) = test_dfdw(itemp) + dfdw
1277                 kernel(iw) = - dfdw * sp_func**2
1278                 Aw(iw) = sp_func**2
1279                 dfdw_acc(iw) = dfdw
1280 
1281            end do !iw
1282          mu_e = self%transport_mu_e(itemp)
1283          ieh = 2; if (eig_nk >= mu_e) ieh = 1
1284          integration = simpson( wr_step, kernel)
1285          int_Aw = simpson(wr_step,Aw)
1286          int_dfdw = simpson(wr_step,dfdw_acc)
1287          ! Calculation of the conductivity
1288          self%l0( :, :, ieh, spin, itemp ) = self%l0( :, :, ieh, spin, itemp ) + integration*vv_tens(:,:)*wtk
1289          Aw_l0(itemp) = Aw_l0(itemp) + int_Aw*wtk*vv_tens(1,1)
1290          dfdw_l0(itemp) = dfdw_l0(itemp) + int_dfdw*wtk*vv_tens(1,1)
1291          call inv33(self%l0(:, :, ieh, spin, itemp), work_33)
1292 
1293          l0inv_33nw(:,:,ieh) = work_33
1294          self%l1( :, :, ieh, spin, itemp ) = self%l0( :, :, ieh, spin, itemp )*(eig_nk - self%mu_e(itemp))
1295          self%l2( :, :, ieh, spin, itemp ) = self%l1( :, :, ieh, spin, itemp )*(eig_nk - self%mu_e(itemp))
1296 
1297          self%seebeck(:,:,ieh,spin,itemp) = matmul(work_33, self%l1(:,:,ieh,spin,itemp)) / Tkelv
1298 
1299          work_33 = self%l1(:, :, ieh, spin, itemp)
1300          work_33 = self%l2(:, :, ieh, spin, itemp) - matmul(work_33, matmul(l0inv_33nw(:, :, ieh), work_33))
1301          self%kappa(:,:,ieh,spin,itemp) = work_33 / Tkelv
1302          !self%conductivity_mu( :, :, ieh, spin, itemp ) = self%conductivity_mu( :, :, ieh, spin, itemp ) + integration*vv_tens(:,:)*wtk
1303 !         call xmpi_sum(self%conductivity_mu(:, :, ieh, spin, itemp) , self%wt_comm%value, ierr)
1304          end do ! itemp
1305 
1306      end do !ib
1307 
1308    end do ! my_ik
1309    ! Collect data if k-points parallelism.
1310    !call xmpi_sum(self%conductivity_mu , self%kcalc_comm%value, ierr)
1311  end do !my_spin
1312 
1313  max_occ = two / (self%nspinor * self%nsppol)
1314  fact0 = max_occ * (siemens_SI / Bohr_meter / cryst%ucvol) / 100
1315  self%conductivity_mu = fact0 * self%l0  ! siemens cm^-1
1316  self%seebeck = - volt_SI  * max_occ * self%seebeck
1317  self%kappa = + volt_SI**2 * fact0 * self%kappa
1318 
1319  do itemp=1, self%ntemp
1320    Tkelv = self%kTmesh(itemp) / kb_HaK; if (Tkelv < one) Tkelv = one
1321  end do
1322 
1323  ! Scale by the carrier concentration
1324  fact = 100**3 / e_Cb
1325  do my_spin=1,self%my_nspins
1326    spin = self%my_spins(my_spin)
1327      do itemp=1,self%ntemp
1328        do ieh=1,2 ! e/h
1329          call safe_div(fact * self%conductivity_mu(:,:,ieh,spin,itemp), &
1330                        self%n_ehst(ieh, spin, itemp) / cryst%ucvol / Bohr_meter**3, zero, &
1331                        self%mobility_mu(:,:,ieh,spin,itemp))
1332        end do
1333      end do
1334    end do
1335 
1336 
1337  call cwtime_report(" cumulant_kubo_transport", cpu_kloop, wall_kloop, gflops_kloop)
1338 
1339  ABI_SFREE(kernel)
1340 ! ABI_SFREE(test_Aw)
1341 ! ABI_SFREE(test_dfdw)
1342  ABI_SFREE(Aw)
1343  ABI_SFREE(dfdw_acc)
1344  ABI_SFREE(Aw_l0)
1345  ABI_SFREE(dfdw_l0)
1346 
1347 end subroutine cumulant_kubo_transport

m_cumulant/cumulant_ncwrite [ Functions ]

[ Top ] [ m_cumulant ] [ Functions ]

NAME

 cumulant_ncwrite

FUNCTION

INPUTS

 path=Filenae of output netcdf file.
 cryst<crystal_t>=Crystalline structure
 dtset<dataset_type>=All input variables for this dataset.
 ncid=Netcdf file handle.

SOURCE

1449 !subroutine cumulant_ncwrite(self, path, cryst, ebands, dtset)
1450 subroutine cumulant_ncwrite(self, path, cryst, dtset)
1451 
1452 !Arguments --------------------------------------
1453  class(cumulant_t),intent(in) :: self
1454  type(crystal_t),intent(in) :: cryst
1455 ! type(ebands_t),intent(in) :: ebands
1456  type(dataset_type),intent(in) :: dtset
1457  character(len=*),intent(in) :: path
1458 
1459 !Local variables --------------------------------
1460  integer,parameter :: master = 0
1461  integer :: ncerr, ncid, ikcalc, spin, my_ik, ib, itemp, ntemp
1462  integer :: ii, ieh
1463  real(dp) :: cpu, wall, gflops
1464  character(len=1000) :: msg
1465 
1466 !************************************************************************
1467 
1468  !comm = self%comm my_rank =
1469  call wrtout([std_out, ab_out], ch10//sjoin("- Writing cumulant results to:", path))
1470  call cwtime(cpu, wall, gflops, "start")
1471 
1472  ! Only one proc create the file, write structure and define basic dimensions.
1473  ! Then we reopen the file in MPI-IO mode.
1474 
1475  if (xmpi_comm_rank(self%comm) == master) then
1476 
1477    NCF_CHECK(nctk_open_create(ncid, path, xmpi_comm_self))
1478 
1479    ! Write to netcdf file
1480    NCF_CHECK(cryst%ncwrite(ncid))
1481    ! FIXME: Cannot write ebands because it crashes in
1482    !   k_dependent = "no"; if (any(ebands%nband(1) /= ebands%nband)) k_dependent = "yes"
1483    ! Should understand why!
1484    !NCF_CHECK(ebands_ncwrite(ebands, ncid))
1485 
1486    ! Add cumulant dimensions.
1487    ncerr = nctk_def_dims(ncid, [ &
1488      nctkdim_t("nkcalc", self%nkcalc), nctkdim_t("max_nbcalc", self%max_nbcalc), &
1489      nctkdim_t("nsppol", self%nsppol), nctkdim_t("ntemp", self%ntemp), &
1490      nctkdim_t("nqbz", self%nsppol), nctkdim_t("nqibz", self%ntemp), &
1491      nctkdim_t("nwr", self%nwr)], &
1492      defmode=.True.)
1493    NCF_CHECK(ncerr)
1494    ncerr = nctk_def_iscalars(ncid, [character(len=nctk_slen) :: &
1495      "eph_task",  "nbsum", "bsum_start", "bsum_stop", "symdynmat", &
1496      "ph_intmeth", "eph_intmeth", "qint_method", "eph_transport", &
1497      "imag_only", "symv1scf", "dvdb_add_lr", "mrta", "ibte_prep"])
1498    NCF_CHECK(ncerr)
1499    ncerr = nctk_def_dpscalars(ncid, [character(len=nctk_slen) :: &
1500      "eta", "wr_step", "eph_fsewin", "eph_fsmear", "eph_extrael", "eph_fermie", "ph_wstep", "ph_smear", "eph_phwinfact"])
1501    NCF_CHECK(ncerr)
1502 
1503 
1504    ! Define arrays. Note nkcalc instead of my_nkcalc
1505    ncerr = nctk_def_arrays(ncid, [ &
1506      nctkarr_t("bstart_ks", "int", "nkcalc, nsppol"), &
1507      nctkarr_t("nbcalc_ks", "int", "nkcalc, nsppol"), &
1508      nctkarr_t("ngqpt", "dp", "three"), &
1509      nctkarr_t("kcalc", "dp", "three, nkcalc"), &
1510      nctkarr_t("kcalc2ibz", "dp", " nkcalc, six"), &
1511      nctkarr_t("kTmesh", "dp", "ntemp"), &
1512      !nctkarr_t("mu_e", "dp", "ntemp"), &
1513      nctkarr_t("wrmesh_b", "dp", "nwr, max_nbcalc, nkcalc, nsppol"), &
1514      !nctkarr_t("vals_wr", "dp", "two, nwr, ntemp, max_nbcalc, nkcalc, nsppol"), &
1515      nctkarr_t("gw_vals", "dp", "two, nwr, ntemp, max_nbcalc, nkcalc, nsppol"), &
1516      nctkarr_t("ks_enes", "dp", "max_nbcalc, nkcalc, nsppol"), &
1517      nctkarr_t("dw_vals", "dp", "ntemp, max_nbcalc, nkcalc, nsppol"), &
1518      nctkarr_t('dfdw',"dp", "nwr, ntemp"), &
1519      nctkarr_t('conductivity_mu',"dp", "three, three, two, nsppol, ntemp"), &
1520      nctkarr_t('mobility_mu', "dp", "three, three, two, nsppol, ntemp"), &
1521      nctkarr_t('seebeck',"dp", "three, three, two, nsppol, ntemp"), &
1522      nctkarr_t('kappa',"dp", "three, three, two, nsppol, ntemp") &
1523    ])
1524    NCF_CHECK(ncerr)
1525 
1526    if (self%debug == 1) then
1527      ncerr = nctk_def_arrays(ncid, [ &
1528      nctkarr_t("time_mesh", "dp", "nwr, ntemp, max_nbcalc, nkcalc, nsppol"), &
1529      nctkarr_t("ct_vals", "dp","two, nwr, ntemp, max_nbcalc, nkcalc, nsppol"), &
1530      nctkarr_t("c1", "dp","two, nwr, ntemp, max_nbcalc, nkcalc, nsppol"), &
1531      nctkarr_t("c2", "dp","two, nwr, ntemp, max_nbcalc, nkcalc, nsppol"), &
1532      nctkarr_t("c3", "dp","two, nwr, ntemp, max_nbcalc, nkcalc, nsppol"), &
1533      nctkarr_t("gt_vals", "dp","two, nwr, ntemp, max_nbcalc, nkcalc, nsppol") ] )
1534      NCF_CHECK(ncerr)
1535    endif
1536 
1537    !NCF_CHECK(nf90_put_var(ncid, nctk_idname(ncid, "eta"), self%ieta))
1538    NCF_CHECK(nf90_put_var(ncid, nctk_idname(ncid, "nbsum"), self%nbsum))
1539    NCF_CHECK(nf90_put_var(ncid, nctk_idname(ncid, "ngqpt"), self%ngqpt))
1540    NCF_CHECK(nf90_put_var(ncid, nctk_idname(ncid, "kcalc"), self%kcalc))
1541    NCF_CHECK(nf90_put_var(ncid, nctk_idname(ncid, "kTmesh"), self%kTmesh))
1542    NCF_CHECK(nf90_put_var(ncid, nctk_idname(ncid, "nbcalc_ks"), self%nbcalc_ks))
1543    NCF_CHECK(nf90_put_var(ncid, nctk_idname(ncid, "bstart_ks"), self%bstart_ks))
1544    NCF_CHECK(nf90_put_var(ncid, nctk_idname(ncid, "kcalc2ibz"), self%kcalc2ibz))
1545    NCF_CHECK(nf90_put_var(ncid, nctk_idname(ncid, "dfdw"), self%print_dfdw))
1546    ! FIXME This part is wrong since these arrays are MPI distributed
1547    !NCF_CHECK(nf90_put_var(ncid, nctk_idname(ncid, "conductivity_mu"), self%conductivity_mu))
1548    !NCF_CHECK(nf90_put_var(ncid, nctk_idname(ncid, "mobility_mu"), self%mobility_mu))
1549    !NCF_CHECK(nf90_put_var(ncid, nctk_idname(ncid, "seebeck"), self%seebeck))
1550    !NCF_CHECK(nf90_put_var(ncid, nctk_idname(ncid, "kappa"), self%kappa))
1551    !NCF_CHECK(nf90_put_var(ncid, nctk_idname(ncid, "wrmesh_b"), self%wrmesh_b))
1552    !NCF_CHECK(nf90_put_var(ncid, nctk_idname(ncid, "vals_wr"), c2r(self%vals_wr)))
1553    !NCF_CHECK(nf90_put_var(ncid, nctk_idname(ncid, "ks_enes"), self%e0vals))
1554 
1555    NCF_CHECK(nf90_close(ncid))
1556  end if ! master
1557 
1558  ! Barrier to avoid race conditions.
1559  !call wrtout(std_out, "before barrier")
1560  call xmpi_barrier(self%comm)
1561 
1562  ! Only the procs in the ncwrite_comm communicator write to disk.
1563  if (self%ncwrite_comm%value == xmpi_comm_null) goto 100
1564 
1565  ! open file for parallel-IO mode inside comm. All procs in ncwrite_comm enter this part.
1566  call wrtout(std_out, sjoin(" Performing parallel IO with:", itoa(self%ncwrite_comm%nproc), "procs"))
1567  NCF_CHECK(nctk_open_modify(ncid, path, self%ncwrite_comm%value))
1568 
1569  NCF_CHECK(nctk_set_datamode(ncid))
1570 
1571  ! Activate collectve IO.
1572  ncerr = nf90_var_par_access(ncid, nctk_idname(ncid, "gw_vals"), nf90_collective)
1573  NCF_CHECK(ncerr)
1574 
1575  ncerr = nf90_var_par_access(ncid, nctk_idname(ncid, "wrmesh_b"), nf90_collective)
1576  NCF_CHECK(ncerr)
1577 
1578  ncerr = nf90_var_par_access(ncid, nctk_idname(ncid, "ks_enes"), nf90_collective)
1579  NCF_CHECK(ncerr)
1580 
1581  ncerr = nf90_var_par_access(ncid, nctk_idname(ncid, "dw_vals"), nf90_collective)
1582  NCF_CHECK(ncerr)
1583 
1584  if (any(abs(dtset%sigma_erange) > zero)) then
1585    ncerr = nf90_var_par_access(ncid, nctk_idname(ncid, "dfdw"), nf90_collective)
1586    NCF_CHECK(ncerr)
1587 
1588    ncerr = nf90_var_par_access(ncid, nctk_idname(ncid, "conductivity_mu"), nf90_collective)
1589    NCF_CHECK(ncerr)
1590 
1591    ncerr = nf90_var_par_access(ncid, nctk_idname(ncid, "mobility_mu"), nf90_collective)
1592    NCF_CHECK(ncerr)
1593 
1594    ncerr = nf90_var_par_access(ncid, nctk_idname(ncid, "seebeck"), nf90_collective)
1595    NCF_CHECK(ncerr)
1596 
1597    ncerr = nf90_var_par_access(ncid, nctk_idname(ncid, "kappa"), nf90_collective)
1598    NCF_CHECK(ncerr)
1599 
1600 
1601  end if
1602 
1603  if (self%debug == 1) then
1604    ncerr = nf90_var_par_access(ncid, nctk_idname(ncid, "time_mesh"), nf90_collective)
1605    NCF_CHECK(ncerr)
1606 
1607    ncerr = nf90_var_par_access(ncid, nctk_idname(ncid, "ct_vals"), nf90_collective)
1608    NCF_CHECK(ncerr)
1609 
1610    ncerr = nf90_var_par_access(ncid, nctk_idname(ncid, "c1"), nf90_collective)
1611    NCF_CHECK(ncerr)
1612 
1613    ncerr = nf90_var_par_access(ncid, nctk_idname(ncid, "c2"), nf90_collective)
1614    NCF_CHECK(ncerr)
1615 
1616    ncerr = nf90_var_par_access(ncid, nctk_idname(ncid, "c3"), nf90_collective)
1617    NCF_CHECK(ncerr)
1618 
1619    ncerr = nf90_var_par_access(ncid, nctk_idname(ncid, "gt_vals"), nf90_collective)
1620    NCF_CHECK(ncerr)
1621  endif
1622  spin = self%my_spins(1)
1623  ikcalc = self%my_ikcalc(1) ! index of the first kcalc treated by this rank.
1624 
1625  ! Start to write my **contiguous block** of kpoints from this **global** location
1626  ! Each MPI proc writes my_nkcalc entries.
1627 
1628  ncerr = nf90_put_var(ncid, nctk_idname(ncid, "gw_vals"), c2r(self%gw_vals), &
1629                       start=[1,1,1,1,ikcalc,spin], &
1630                       count=[2, self%nwr, self%ntemp, self%max_nbcalc, self%my_nkcalc, self%my_nspins])
1631  NCF_CHECK(ncerr)
1632 
1633  ncerr = nf90_put_var(ncid, nctk_idname(ncid, "wrmesh_b"), self%wrmesh_b, &
1634                       start=[1,1,ikcalc,spin], &
1635                       count=[self%nwr, self%max_nbcalc, self%my_nkcalc, self%my_nspins])
1636  NCF_CHECK(ncerr)
1637 
1638  ncerr = nf90_put_var(ncid, nctk_idname(ncid, "ks_enes"), self%e0vals, &
1639                       start=[1,ikcalc,spin], &
1640                       count=[self%max_nbcalc, self%my_nkcalc, self%my_nspins])
1641  NCF_CHECK(ncerr)
1642 
1643  ncerr = nf90_put_var(ncid, nctk_idname(ncid, "dw_vals"), self%dw_vals, &
1644                       start=[1,1,ikcalc,spin], &
1645                       count=[self%ntemp, self%max_nbcalc, self%my_nkcalc, self%my_nspins])
1646  NCF_CHECK(ncerr)
1647 
1648 
1649  if (any(abs(dtset%sigma_erange) > zero)) then
1650 
1651  ncerr = nf90_put_var(ncid, nctk_idname(ncid, "seebeck"), self%seebeck, &
1652                       start=[1,1,1,spin,1], &
1653                       count=[3, 3, 2, self%my_nspins , self%ntemp])
1654  NCF_CHECK(ncerr)
1655 
1656  ncerr = nf90_put_var(ncid, nctk_idname(ncid, "kappa"), self%kappa, &
1657                       start=[1,1,1,spin,1], &
1658                       count=[3, 3, 2, self%my_nspins , self%ntemp])
1659  NCF_CHECK(ncerr)
1660 
1661 
1662  ncerr = nf90_put_var(ncid, nctk_idname(ncid, "mobility_mu"), self%mobility_mu, &
1663                       start=[1,1,1,spin,1], &
1664                       count=[3, 3, 2, self%my_nspins , self%ntemp])
1665  NCF_CHECK(ncerr)
1666 
1667  ncerr = nf90_put_var(ncid, nctk_idname(ncid, "conductivity_mu"), self%conductivity_mu, &
1668                       start=[1,1,1,spin,1], &
1669                       count=[3, 3, 2, self%my_nspins , self%ntemp])
1670  NCF_CHECK(ncerr)
1671 
1672  end if
1673 
1674 
1675  if (self%debug == 1) then
1676 
1677    ncerr = nf90_put_var(ncid, nctk_idname(ncid, "time_mesh"), self%time_mesh, &
1678                         start=[1,1,1,ikcalc,spin], &
1679                         count=[self%nwr, self%ntemp, self%max_nbcalc, self%my_nkcalc, self%my_nspins])
1680    NCF_CHECK(ncerr)
1681 
1682    ncerr = nf90_put_var(ncid, nctk_idname(ncid, "ct_vals"), c2r(self%ct_vals), &
1683                         start=[1,1,1,1,ikcalc,spin], &
1684                         count=[2, self%nwr, self%ntemp, self%max_nbcalc, self%my_nkcalc, self%my_nspins])
1685    NCF_CHECK(ncerr)
1686 
1687    ncerr = nf90_put_var(ncid, nctk_idname(ncid, "c1"), c2r(self%c1), &
1688                         start=[1,1,1,1,ikcalc,spin], &
1689                         count=[2, self%nwr, self%ntemp, self%max_nbcalc, self%my_nkcalc, self%my_nspins])
1690    NCF_CHECK(ncerr)
1691 
1692    ncerr = nf90_put_var(ncid, nctk_idname(ncid, "c2"), c2r(self%c2), &
1693                         start=[1,1,1,1,ikcalc,spin], &
1694                         count=[2, self%nwr, self%ntemp, self%max_nbcalc, self%my_nkcalc, self%my_nspins])
1695    NCF_CHECK(ncerr)
1696 
1697    ncerr = nf90_put_var(ncid, nctk_idname(ncid, "c3"), c2r(self%c3), &
1698                         start=[1,1,1,1,ikcalc,spin], &
1699                         count=[2, self%nwr, self%ntemp, self%max_nbcalc, self%my_nkcalc, self%my_nspins])
1700    NCF_CHECK(ncerr)
1701 
1702    ncerr = nf90_put_var(ncid, nctk_idname(ncid, "gt_vals"), c2r(self%gt_vals), &
1703                         start=[1,1,1,1,ikcalc,spin], &
1704                         count=[2, self%nwr, self%ntemp, self%max_nbcalc, self%my_nkcalc, self%my_nspins])
1705    NCF_CHECK(ncerr)
1706  endif
1707 
1708  NCF_CHECK(nf90_close(ncid))
1709 
1710  ! Write to ab_out for automatic testing.
1711  if (xmpi_comm_rank(self%comm) == master .and. is_open(ab_out)) then
1712    write(ab_out, "(/,a)")" Print first 10 frequencies in gw_vals array (re-im) for testing purposes:"
1713    write(ab_out, "(2(a, i0))")" spin: ", spin, ", ikcalc: ", ikcalc
1714    my_ik = 1
1715    do ib=1,self%nbcalc_ks(ikcalc, spin)
1716      do itemp=1,self%ntemp
1717        write(ab_out, "(2(a,i0))")" gw_vals for itemp:", itemp, "ib: ", ib
1718        write(ab_out, "(*(es13.5))")dble(self%gw_vals(1:min(10, self%nwr), itemp, ib, my_ik, spin))
1719        write(ab_out, "(*(es13.5))")aimag(self%gw_vals(1:min(10, self%nwr), itemp, ib, my_ik, spin))
1720      end do
1721   end do
1722  end if
1723  if (xmpi_comm_rank(self%comm) == master .and. is_open(ab_out) .and. any(abs(dtset%sigma_erange) > zero)) then
1724    msg = sjoin(" Print first 5 temperatures of diagonal mobility_mu", &
1725                " > 1e-6 (with ieh as electrons or holes) for testing purposes:")
1726    write(ab_out, "(/,a)") trim(msg)
1727    write(ab_out, "(2(a, i0))")" spin: ", spin
1728    if (self%ntemp > 5) then
1729      ntemp = 5
1730    else
1731      ntemp = self%ntemp
1732    end if
1733    do ieh=1,2
1734      do ii=1,3
1735        do itemp=1,ntemp
1736          if (dble(self%mobility_mu(ii, ii, ieh, spin, itemp)) > 1e-6) then
1737            write(ab_out, "(3(a,i0))")" mobility_mu for itemp:", itemp, " ieh: ", ieh, " xyz: ", ii
1738            write(ab_out, "(*(es13.5))")dble(self%mobility_mu(ii, ii, ieh, spin, itemp))
1739          end if
1740        end do
1741      end do
1742    end do
1743  end if
1744 
1745 
1746  100 call cwtime_report(" cumulant_ncwrite", cpu, wall, gflops)
1747 
1748 end subroutine cumulant_ncwrite

m_cumulant/cumulant_sigmaph_ncread [ Functions ]

[ Top ] [ m_cumulant ] [ Functions ]

NAME

 cumulant_sigmaph_ncread

FUNCTION

   read out_SIGPH.nc file

INPUTS

 cryst<crystal_t>=Crystalline structure
 dtset<dataset_type>=All input variables for this dataset.
 ncid=Netcdf file handle.

SOURCE

1367 subroutine cumulant_sigmaph_ncread(self, path, ncid, comm)
1368 
1369 !Arguments --------------------------------------
1370  class(cumulant_t),intent(inout) :: self
1371  character(len=fnlen),intent(in) :: path
1372  integer,intent(in) :: comm
1373  integer,intent(out) :: ncid
1374 
1375 !Local variables --------------------------------
1376  integer :: ierr
1377  real(dp) :: cpu, wall, gflops
1378  character(len=1000) :: msg
1379  !real(dp), allocatable :: vals_wr(:,:,:,:,:,:),vals_e0ks(:,:,:,:,:)
1380 
1381 !************************************************************************
1382 
1383  call cwtime(cpu, wall, gflops, "start")
1384 
1385  ! Open netcdf file
1386  ierr = 0
1387 
1388  if (.not. file_exists(path)) then
1389    msg = sjoin("Cannot find file", path)
1390    ierr = 1; return
1391  end if
1392 
1393  call cwtime(cpu, wall, gflops, "start")
1394  NCF_CHECK(nctk_open_read(ncid, path, comm))
1395 
1396  NCF_CHECK(nctk_get_dim(ncid, "nkcalc", self%nkcalc))
1397  NCF_CHECK(nctk_get_dim(ncid, "max_nbcalc", self%max_nbcalc))
1398  NCF_CHECK(nctk_get_dim(ncid, "nsppol", self%nsppol))
1399  NCF_CHECK(nctk_get_dim(ncid, "number_of_spinor_components", self%nspinor))
1400  NCF_CHECK(nctk_get_dim(ncid, "ntemp", self%ntemp))
1401  NCF_CHECK(nctk_get_dim(ncid, "nwr", self%nwr))
1402  NCF_CHECK(nctk_get_dim(ncid, "nqbz", self%nqbz))
1403  NCF_CHECK(nctk_get_dim(ncid, "nqibz", self%nqibz))
1404  NCF_CHECK(nctk_get_dim(ncid, "natom3", self%natom3))
1405 
1406  ABI_MALLOC(self%kcalc, (3, self%nkcalc))
1407  ABI_MALLOC(self%nbcalc_ks, (self%nkcalc, self%nsppol))
1408  ABI_MALLOC(self%bstart_ks, (self%nkcalc, self%nsppol))
1409  ABI_MALLOC(self%kcalc2ibz, (self%nkcalc, 6))
1410  ABI_MALLOC(self%kTmesh, (self%ntemp))
1411  !ABI_MALLOC(self%wrmesh_b, (self%nwr, self%max_nbcalc, self%nkcalc, self%nsppol))
1412  !ABI_MALLOC(self%vals_wr, ( self%nwr, self%ntemp, self%max_nbcalc, self%nkcalc, self%nsppol))
1413  !ABI_MALLOC(self%vals_e0ks, ( self%ntemp, self%max_nbcalc, self%nkcalc, self%nsppol))
1414  !ABI_MALLOC(self%e0vals, (self%max_nbcalc, self%nkcalc, self%nsppol))
1415  NCF_CHECK(nf90_get_var(ncid, vid("ngqpt"), self%ngqpt))
1416  NCF_CHECK(nf90_get_var(ncid, vid("kcalc"), self%kcalc))
1417  NCF_CHECK(nf90_get_var(ncid, vid("kTmesh"), self%kTmesh))
1418  NCF_CHECK(nf90_get_var(ncid, vid("nbcalc_ks"), self%nbcalc_ks))
1419  NCF_CHECK(nf90_get_var(ncid, vid("bstart_ks"), self%bstart_ks))
1420  NCF_CHECK(nf90_get_var(ncid, vid("kcalc2ibz"), self%kcalc2ibz))
1421 
1422  call cwtime_report(" sigmaph_ncread", cpu, wall, gflops)
1423 
1424 contains
1425  integer function vid(vname)
1426    character(len=*),intent(in) :: vname
1427    vid = nctk_idname(ncid, vname)
1428 end function vid
1429 
1430 end subroutine cumulant_sigmaph_ncread

m_cumulant/cumulant_t [ Types ]

[ Top ] [ m_cumulant ] [ Types ]

NAME

 cumulant_t

FUNCTION

 Container for cumulant and transport quantities

SOURCE

 69  type,public :: cumulant_t
 70 
 71   integer :: my_nspins
 72    ! Number of spins treated by this MPI rank
 73 
 74   integer :: nkcalc
 75    ! Total number of k-points computed. global variable
 76 
 77   integer :: my_nkcalc
 78    ! Number of k-points treated by this MPI rank i.e. local variable.
 79 
 80   integer :: max_nbcalc
 81    ! Maximum number of bands computed (max over nkcalc and spin). global variable
 82 
 83   integer :: nsppol
 84    ! Number of independent spin polarizations.
 85 
 86   integer :: nspinor
 87    ! Number of spinor components.
 88 
 89   integer :: nqbz
 90    ! Number of q-points in the (dense) BZ for sigma integration
 91 
 92   integer :: nqibz
 93    ! Number of q-points in the (dense) IBZ for sigma integration
 94 
 95   integer :: nbsum
 96    ! Total number of bands used in sum over states without taking into account MPI distribution.
 97 
 98   integer :: natom3
 99    ! 3 * natom.
100 
101   integer :: nwr
102    ! Number of frequency points along the real axis for Sigma(w) and spectral function A(w)
103    ! Read from SIGEPH.nc
104    ! Odd number so that the mesh is centered on the KS energy.
105    ! The spectral function is computed only if nwr > 0 (taken from dtset%nfreqsp)
106 
107   integer :: nwr_ce
108    ! Number of frequency points along the real axis for Sigma(w) and spectral function A(w)
109    ! Read from SIGEPH.nc
110    ! Odd number so that the mesh is centered on the KS energy.
111    ! The spectral function is computed only if nwr > 0 (taken from dtset%nfreqsp)
112 
113   integer :: ntemp
114    ! Number of temperatures.
115 
116   integer :: comm
117    ! MPI communicator
118 
119   !type(xcomm_t) :: all_comm
120 
121   type(xcomm_t) :: spin_comm
122    ! MPI communicator for parallelism over spins (high-level)
123 
124   type(xcomm_t) :: kcalc_comm
125    ! MPI communicator for parallelism over k-points
126 
127   type(xcomm_t) :: wt_comm
128     ! MPI communicator for time/frequency domain (low-level)
129     ! NB: memory is not distributed
130 
131   type(xcomm_t) :: ncwrite_comm
132    ! MPI communicator for parallel netcdf IO used to write results for the different k-points/spins
133 
134   !integer :: coords(5)
135    ! Cartesian coordinates of this processor in the Cartesian grid.
136 
137   ! real(dp) :: wr_step
138    ! Step of the linear mesh along the real axis (Ha units).
139 
140   integer :: ngqpt(3)
141    ! Number of divisions in the Q mesh in the BZ.
142 
143   integer,allocatable :: kcalc2ebands(:,:)
144    ! Mapping ikcalc --> ebands IBZ
145    ! Note that this array is not necessarily equation to kcalc2ibz computed in sigmaph
146    ! because we may have used sigma_nkpt to downsample the initial nkpt mesh.
147    ! This array is computed in get_ebands and is equal to kcalc2ibz if sigma_nkpt == ngkpt
148 
149    real(dp),allocatable :: linewidths(:,:,:,:,:)
150    ! (ntemp, bmin:bmax, nkpt, nsppol, nrta)
151    ! Linewidth in the IBZ computed in the SERTA/MRTA.
152    ! Non-zero only for the kcalc k-points.
153 
154    real(dp),allocatable :: vbks(:,:,:,:)
155    ! (3, bmin:bmax, nkpt, nsppol))
156    ! band velocity in Cartesian coordinates in the IBZ
157    ! Non-zero only for the kcalc k-points.
158 
159    integer :: bmin, bmax, bsize
160    ! Only bands between bmin and bmax are considered in the integrals
161    ! as we don't compute linewidths for all states.
162    ! bmin = minval(%bstart_ks); bmax = maxval(%bstop_ks)
163    ! bisze = bmax - bmin + 1
164 
165    type(ebands_t) :: ebands
166    ! bandstructure object used to compute the transport properties
167    ! Allocate using only the relevant bands for transport
168    ! including valence states to allow to compute different doping
169 
170   complex(dpc) :: ieta
171    ! Used to shift the poles in the complex plane (Ha units)
172    ! Corresponds to `i eta` term in equations.
173 
174   real(dp) :: tolcum
175 
176   integer :: debug
177 
178   integer,allocatable :: bstart_ks(:,:)
179    ! bstart_ks(nkcalc, nsppol)
180    ! Initial KS band index included in self-energy matrix elements for each k-point in kcalc.
181    ! Depends on spin because all denerate states should be included when symmetries are used.
182 
183   !integer,allocatable :: bstop_ks(:,:)
184    ! bstop_ks(nkcalc, nsppol)
185 
186   integer,allocatable :: nbcalc_ks(:,:)
187    ! nbcalc_ks(nkcalc, nsppol)
188    ! Number of bands included in self-energy matrix elements for each k-point in kcalc.
189    ! Depends on spin because all denerate states should be included when symmetries are used.
190 
191   integer,allocatable :: coords_kws(:,:)
192    ! (2, self%nsppol))
193    ! Cartesian coordinates of this processor in the Cartesian grid.
194 
195   integer,allocatable :: my_spins(:)
196    ! my_spins(my_nspins)
197    ! Indirect table giving the spin indices treated by this rank.
198    ! Used only the collinear case with nspinor == 1
199 
200   integer,allocatable :: my_ikcalc(:)
201    ! my_ikcalc(my_nkcalc)
202    ! List of ikcalc indices treated by this pool if k-point parallelism is activated.
203 
204   real(dp),allocatable :: time_mesh(:,:,:,:,:)
205   ! time_mesh(nwr, max_nbcalc, my_nkcalc, nsppol)
206   ! Time mesh in atomic units
207 
208   real(dp),allocatable :: kcalc(:,:)
209    ! kcalc(3, nkcalc)
210    ! List of k-points where the self-energy is computed.
211    ! This array is not MPI-distributed.
212 
213   real(dp),allocatable :: kTmesh(:)
214    ! kTmesh(ntemp)
215    ! List of temperatures (kT units).
216 
217   real(dp),allocatable :: mu_e(:)
218    ! mu_e(ntemp)
219    ! chemical potential of electrons for the different temperatures.
220 
221    real(dp),allocatable :: l0(:,:,:,:,:), l1(:,:,:,:,:), l2(:,:,:,:,:)
222    ! (3, 3, 2, nsppol, ntemp)
223    ! Onsager coeficients in Cartesian coordinates
224 
225   integer,allocatable :: kcalc2ibz(:,:)
226    !kcalc2ibz(nkcalc, 6))
227    ! Mapping ikcalc --> IBZ as reported by listkk.
228 
229   real(dp),allocatable :: dw_vals(:,:,:,:)
230    !  dw_vals(ntemp, max_nbcalc, my_nkcalc, nsppol)
231    !  Debye-Waller term (static).
232 
233   real(dp),allocatable :: e0vals(:,:,:)
234    ! (max_nbcalc, my_nkcalc, nsppol)
235    ! KS energies at the calculated states retrieved from EPH output file
236 
237   real(dp),allocatable :: wrmesh_b(:,:,:,:)
238   real(dp),allocatable :: wrmesh_ce(:,:,:,:)
239   ! wrmesh_b(nwr, max_nbcalc, my_nkcalc, nsppol))
240   ! Frequency mesh along the real axis (Ha units) used for the different bands
241   ! Each mesh is **centered** on the corresponding KS energy.
242 
243   complex(dpc),allocatable :: vals_e0ks(:,:,:,:)
244    ! vals_e0ks(ntemp, max_nbcalc, my_nkcalc, nsppol))
245    ! Sigma_eph(omega=eKS, kT, band, ikcalc, spin).
246    ! Fan-Migdal + Debye-Waller
247 
248   complex(dpc),allocatable :: vals_wr(:,:,:,:,:)
249    ! vals_wr(nwr, ntemp, max_nbcalc, my_nkcalc, nsppol)
250    ! Sigma_eph(omega, kT, band, ikcalc, spin).
251    ! enk_KS corresponds to nwr/2 + 1.
252 
253      complex(dpc),allocatable :: ct_vals(:,:,:,:,:)
254    ! ct_vals(nwr, ntemp, max_nbcalc, my_nkcalc, nsppol)
255    ! Cumulant function (time, kT, band, ikcalc, spin).
256 
257      complex(dpc),allocatable :: c1(:,:,:,:,:)
258    ! ct_vals(nwr, ntemp, max_nbcalc, my_nkcalc, nsppol)
259    ! Cumulant function (time, kT, band, ikcalc, spin).
260 
261      complex(dpc),allocatable :: c2(:,:,:,:,:)
262    ! ct_vals(nwr, ntemp, max_nbcalc, my_nkcalc, nsppol)
263    ! Cumulant function (time, kT, band, ikcalc, spin).
264 
265      complex(dpc),allocatable :: c3(:,:,:,:,:)
266    ! ct_vals(nwr, ntemp, max_nbcalc, my_nkcalc, nsppol)
267    ! Cumulant function (time, kT, band, ikcalc, spin).
268 
269      complex(dpc),allocatable :: gt_vals(:,:,:,:,:)
270    ! vals_wr(nwr, ntemp, max_nbcalc, my_nkcalc, nsppol)
271    ! Green's function in time domain (time, kT, band, ikcalc, spin).
272 
273      complex(dpc),allocatable :: gw_vals(:,:,:,:,:)
274    ! gw_vals(nwr, ntemp, max_nbcalc, my_nkcalc, nsppol)
275    ! Green's function in frequency domain(omega, kT, band) for given (ikcalc, spin).
276 
277    ! real(dp),allocatable :: ce_spfunc_wr(:,:,:,:,:)
278    ! ce_spfunc_wr(nwr, ntemp, max_nbcalc, my_nkcalc, nsppol)
279    ! Absorption spectrum (omega, kT, band, ikcalc, spin).
280 
281    real(dp),allocatable :: seebeck(:,:,:,:,:)
282    real(dp),allocatable :: kappa(:,:,:,:,:)
283    ! (3, 3, 2, nsppol, ntemp)
284    ! Transport coefficients in Cartesian coordinates
285 
286    real(dp),allocatable :: conductivity_mu(:,:,:,:,:)
287    ! (3, 3, 2, nsppol, ntemp)
288    ! Conductivity in Siemens * cm-1
289    ! computed by summing over k-points rather that by performing an energy integration).
290 
291    real(dp),allocatable :: print_dfdw(:,:)
292 
293    real(dp),allocatable :: mobility_mu(:,:,:,:,:)
294    ! (3, 3, 2, nsppol, ntemp)
295    ! mobility for electrons and holes (third dimension) at transport_mu_e(ntemp)
296    ! Third dimension is for electron/hole
297 
298    real(dp),allocatable :: transport_mu_e(:)
299    ! (%ntemp)
300    ! Chemical potential at this carrier concentration and temperature
301 
302    real(dp) :: transport_fermie
303    ! Fermi level specified in the input file when computing the SIGEPH file.
304 
305    real(dp) :: transport_extrael
306    ! Extra electrons per unit cell specified in the input file when computing the SIGEPH file.
307 
308    real(dp),allocatable :: n_ehst(:,:,:)
309     ! (2, %nsppol, %ntemp)
310     ! Number of electrons (e) and holes (h) per unit cell
311     ! The first dimension is for electrons/holes.
312     ! If nsppol == 2, the second dimension is the number of e/h for spin else the total number of e/h summed over spins.
313 
314    real(dp) :: eph_extrael
315    ! Extra electrons per unit cell used to compute SERTA lifetimes in sigmaph.
316 
317    real(dp) :: eph_fermie
318    ! Fermi level specified in the input file when computing the SIGEPH file.
319 
320    integer :: ce_ngfft(18)
321    integer :: ce_ngfft_g(18)
322    type(mpi_type) :: ce_mpi_enreg
323 
324  contains
325 
326     procedure :: init => cumulant_init
327     procedure :: compute => cumulant_compute
328     procedure :: kubo_transport => cumulant_kubo_transport
329     procedure :: sigmaph_ncread => cumulant_sigmaph_ncread
330     procedure :: ncwrite => cumulant_ncwrite
331     !procedure :: print_txt_files => cumulant_print_txt_files
332     procedure :: free => cumulant_free
333 
334  end type cumulant_t