Coverage for gpaw/directmin/functional/lcao/ks.py: 99%

76 statements  

« prev     ^ index     » next       coverage.py v7.7.1, created at 2025-07-19 00:19 +0000

1import numpy as np 

2from gpaw.directmin.tools import d_matrix 

3 

4 

5class KSLCAO: 

6 """ 

7 This class is used to calculate the gradient of the KS functional 

8 w.r.t rotation parameters in LCAO 

9 """ 

10 

11 def __init__(self, wfs, dens, ham): 

12 self.name = 'ks' 

13 self.wfs = wfs 

14 self.dens = dens 

15 self.ham = ham 

16 

17 def todict(self): 

18 return {'name': self.name} 

19 

20 def get_gradients(self, h_mm, c_nm, f_n, evec, evals, kpt, wfs, timer, 

21 matrix_exp, representation, ind_up, constraints): 

22 

23 with timer('Construct Gradient Matrix'): 

24 use_egdecomp = matrix_exp == 'egdecomp' 

25 

26 hc_mn, h_ij, h_ia = self.get_ham_in_mol_orb_representation( 

27 h_mm, c_nm, f_n, representation, use_egdecomp) 

28 

29 with timer('Residual'): 

30 error = self.get_residual_error( 

31 hc_mn, kpt.S_MM, c_nm, h_ij, f_n, wfs.nvalence, 

32 constraints) 

33 

34 if not use_egdecomp: 

35 if representation == 'u-invar': 

36 h_ij = h_ia 

37 else: 

38 if representation == 'full' and wfs.dtype == complex: 

39 indl = np.tril_indices(h_ij.shape[0], -1) 

40 else: 

41 indl = np.tril_indices(h_ij.shape[0]) 

42 h_ij[indl] = np.inf 

43 if representation == 'sparse': 

44 h_ij = np.concatenate((h_ij, h_ia), axis=1) 

45 h_ij = h_ij.ravel() 

46 h_ij = h_ij[h_ij != np.inf] 

47 

48 ones_mat = np.ones(shape=(len(f_n), len(f_n))) 

49 anti_occ = f_n * ones_mat - f_n[:, np.newaxis] * ones_mat 

50 anti_occ = anti_occ[ind_up] 

51 grad = np.ascontiguousarray(anti_occ * h_ij) 

52 

53 else: 

54 h_ij = f_n * h_ij - f_n[:, np.newaxis] * h_ij 

55 grad = np.ascontiguousarray(h_ij) 

56 

57 with timer('Use Eigendecomposition'): 

58 grad = self.get_exact_gradient_matrix(grad, evec, evals) 

59 

60 grad = grad[ind_up] 

61 

62 if wfs.dtype == float: 

63 grad = grad.real 

64 

65 if constraints: 

66 constrain_grad(grad, constraints, ind_up) 

67 

68 return 2.0 * grad, error 

69 

70 def get_ham_in_mol_orb_representation(self, h_mm, c_nm, f_n, 

71 representation, full_ham): 

72 """ 

73 H = (C_nM @ H_MM @ C_nM.T.conj()).conj() 

74 for sparse and u_inv representation we calculate 

75 H_ij and H_ia, where i,j -- occupied and a - virtual 

76 H_ij is really needed only to calculate the residual later 

77 but it is not needed for u-invar representation. 

78 When matrix exp calculated using egdecomp method 

79 we need the whole matrix H though 

80 

81 :return: H@C_nM[:occ].T, H_ij, H_ia or 

82 H@C_nM.T, H, H_ia 

83 """ 

84 

85 occ = sum(f_n > 1.0e-10) 

86 if representation in ['sparse', 'u-invar'] \ 

87 and not full_ham: 

88 hc_mn = h_mm.conj() @ c_nm[:occ].T 

89 h_ij = hc_mn.T.conj() @ c_nm[:occ].T 

90 h_ia = hc_mn.T.conj() @ c_nm[occ:].T 

91 else: 

92 hc_mn = h_mm.conj() @ c_nm.T 

93 h_ij = c_nm.conj() @ hc_mn 

94 h_ia = h_ij[:occ][:, occ:] 

95 

96 return hc_mn, h_ij, h_ia 

97 

98 def get_residual_error( 

99 self, hc_mn, S_MM, c_nm, h_ij, f_n, nvalence, constraints=None): 

100 """ 

101 Calculate residual error of KS equations 

102 """ 

103 occ = sum(f_n > 1.0e-10) 

104 hc_mn = hc_mn[:, :occ] - S_MM.conj() @ c_nm[:occ].T @ h_ij[:occ, :occ] 

105 if constraints: 

106 # Zero out the components of the residual that are constrained, 

107 # so that the constrained degrees of freedom are always considered 

108 # converged 

109 for con in constraints: 

110 con1 = con[0] 

111 hc_mn[:, con1] = 0.0 

112 norm = sum(hc_mn.conj() * hc_mn * f_n[:occ]) 

113 error = sum(norm.real) 

114 

115 return error 

116 

117 def get_exact_gradient_matrix(self, h_ij, evec, evals): 

118 """ 

119 Given eigendecomposition of A 

120 calculate exact gradient matrix 

121 eq.(14), (39)-(41) from 

122 arXiv:2101.12597 [physics.comp-ph] 

123 Comput. Phys. Commun. 267, 108047 (2021). 

124 :return: gradient matrix 

125 """ 

126 

127 grad = evec.T.conj() @ h_ij @ evec 

128 grad = grad * d_matrix(evals) 

129 grad = evec @ grad @ evec.T.conj() 

130 for i in range(grad.shape[0]): 

131 grad[i][i] *= 0.5 

132 

133 return grad 

134 

135 

136def constrain_grad(grad, constraints, ind_up): 

137 """ 

138 Zero out the components of the gradient that are constrained, so that no 

139 optimization step is taken along the constrained degrees of freedom (It 

140 would be better not to evaluate these components of the gradient to begin 

141 with.). 

142 """ 

143 

144 for con in constraints: 

145 num = -1 

146 for ind1, ind2 in zip(ind_up[0], ind_up[1]): 

147 num += 1 

148 if con == [ind1, ind2]: 

149 grad[num] = 0.0 

150 return grad