Coverage for gpaw/directmin/sd_etdm.py: 88%

437 statements  

« prev     ^ index     » next       coverage.py v7.7.1, created at 2025-07-14 00:18 +0000

1""" 

2Search directions in space of skew-hermitian matrices 

3 

4LSR1 algorithm and application to excited states: 

5arXiv:2006.15922 [physics.chem-ph] 

6J. Chem. Theory Comput. 16, 6968 (2020). 

7https://doi.org/10.1021/acs.jctc.0c00597 

8""" 

9 

10import numpy as np 

11import copy 

12from gpaw.directmin.tools import array_to_dict, dict_to_array 

13 

14 

15class SearchDirectionBase: 

16 """ 

17 Base class for search direction algorithms 

18 """ 

19 

20 def __init__(self): 

21 self.iters = 0 

22 self.kp = None 

23 self.p = None 

24 self.k = None 

25 super().__init__() 

26 

27 def __str__(self): 

28 raise NotImplementedError('Search direction class needs string ' 

29 'representation.') 

30 

31 def todict(self): 

32 raise NotImplementedError('Search direction class needs \'todict\' ' 

33 'method.') 

34 

35 def update_data( 

36 self, wfs, x_k1, g_k1, dimensions=None, precond=None, mode=None, 

37 subspace=False): 

38 raise NotImplementedError('Search direction class needs ' 

39 '\'update_data\' method.') 

40 

41 def reset(self): 

42 self.iters = 0 

43 self.kp = {} 

44 self.p = 0 

45 self.k = 0 

46 

47 

48class ModeFollowingBase: 

49 """ 

50 Base gradient partitioning and negation implementation for minimum mode 

51 following 

52 """ 

53 

54 def __init__(self, partial_diagonalizer, convex_step_length=0.1, 

55 reset_on_convex=False): 

56 self.eigv = None 

57 self.eigvec = None 

58 self.eigvec_old = None 

59 self.partial_diagonalizer = partial_diagonalizer 

60 self.fixed_sp_order = None 

61 self.was_convex = False 

62 self.convex_step_length = convex_step_length 

63 self.reset_on_convex = reset_on_convex 

64 

65 def update_eigenpairs(self, g_k1, wfs, ham, dens): 

66 """ 

67 Performs a partial Hessian diagonalization to obtain the eigenvectors 

68 with negative eigenvalues. 

69 

70 :param g_k1: Gradient. 

71 :param wfs: 

72 :param ham: 

73 :param dens: 

74 :return: 

75 """ 

76 

77 self.partial_diagonalizer.grad = g_k1 

78 use_prev = False if self.eigv is None or ( 

79 self.was_convex and self.reset_on_convex) else True 

80 self.partial_diagonalizer.run(wfs, ham, dens, use_prev) 

81 self.eigv = copy.deepcopy(self.partial_diagonalizer.lambda_w) 

82 self.eigvec_old = copy.deepcopy(self.eigvec) 

83 self.eigvec = copy.deepcopy(self.partial_diagonalizer.x_w.T) 

84 if wfs.dtype == complex: 

85 dimtot = int(len(self.eigvec[0]) / 2) 

86 eigvec = np.zeros(shape=(len(self.eigvec), dimtot), 

87 dtype=complex) 

88 for i in range(len(self.eigvec)): 

89 eigvec[i] += self.eigvec[i][: dimtot] \ 

90 + 1.0j * self.eigvec[i][dimtot:] 

91 self.eigvec = eigvec 

92 self.fixed_sp_order = self.partial_diagonalizer.sp_order 

93 

94 def invert_parallel_grad(self, g_k1): 

95 """ 

96 Uses the stored eigenpairs and inverts the projections of the gradient 

97 parallel to the eigenvectors with negative eigenvalues. 

98 

99 :param g_k1: Gradient. 

100 :return: Modified gradient. 

101 """ 

102 

103 grad, dim, dimtot = dict_to_array(g_k1) 

104 get_dots = 0 

105 if self.fixed_sp_order is None: 

106 for i in range(len(self.eigv)): 

107 if self.eigv[i] <= -1e-4: 

108 get_dots += 1 

109 else: 

110 break 

111 else: 

112 neg_temp = 0 

113 for i in range(len(self.eigv)): 

114 if self.eigv[i] <= -1e-4: 

115 neg_temp += 1 

116 else: 

117 break 

118 get_dots = self.fixed_sp_order 

119 grad_par = np.zeros_like(grad) 

120 if self.fixed_sp_order is not None: 

121 for i in range(get_dots): 

122 grad_par += self.eigvec[i] * np.dot(self.eigvec[i].conj(), 

123 grad.T).real 

124 grad_mod = grad - 2.0 * grad_par 

125 else: 

126 for i in range(get_dots): 

127 grad_par += self.eigvec[i] \ 

128 * np.dot(self.eigvec[i].conj(), grad.T).real 

129 if get_dots == 0: 

130 grad_mod = -self.convex_step_length * grad_par \ 

131 / np.linalg.norm(grad_par) 

132 self.partial_diagonalizer.etdm.searchdir_algo.reset() 

133 self.was_convex = True 

134 else: 

135 grad_mod = grad - 2.0 * grad_par 

136 if self.was_convex: 

137 self.partial_diagonalizer.etdm.searchdir_algo.reset() 

138 self.was_convex = False 

139 return array_to_dict(grad_mod, dim) 

140 

141 

142class ModeFollowing(ModeFollowingBase, SearchDirectionBase): 

143 """ 

144 Minimum mode following class handling the GMF tag of the search direction 

145 class for ETDM and negation of the gradient projection. 

146 """ 

147 

148 def __init__(self, partial_diagonalizer, search_direction, 

149 convex_step_length=0.1): 

150 self.sd = search_direction 

151 self.name = self.sd.name + '_gmf' 

152 self.type = self.sd.type + '_gmf' 

153 super().__init__(partial_diagonalizer, convex_step_length) 

154 

155 @property 

156 def beta_0(self): 

157 return self.sd.beta_0 

158 

159 def __str__(self): 

160 return self.sd.__str__() + ' with minimum mode following' 

161 

162 def todict(self): 

163 res = self.sd.todict() 

164 res['name'] += '_gmf' # tag will be removed in etdm 

165 return res 

166 

167 def update_data( 

168 self, wfs, x_k1, g_k1, dimensions=None, precond=None, mode=None, 

169 subspace=False): 

170 if not subspace: 

171 g_k1 = self.invert_parallel_grad(g_k1) 

172 return self.sd.update_data(wfs, x_k1, g_k1, dimensions=dimensions, 

173 precond=precond, mode=mode) 

174 

175 def reset(self): 

176 self.sd.reset() 

177 

178 

179class SteepestDescent(SearchDirectionBase): 

180 """ 

181 Steepest descent algorithm 

182 """ 

183 

184 def __init__(self): 

185 super().__init__() 

186 

187 self.name = 'sd' 

188 self.type = 'steepest-descent' 

189 

190 def __str__(self): 

191 return 'Steepest Descent algorithm' 

192 

193 def todict(self): 

194 return {'name': self.name} 

195 

196 def update_data( 

197 self, wfs, x_k1, g_k1, dimensions=None, precond=None, mode=None, 

198 subspace=False): 

199 

200 if precond is None: 

201 p_k = minus(g_k1) 

202 else: 

203 p_k = apply_prec(precond, g_k1, -1.0, wfs, mode) 

204 self.iters += 1 

205 return p_k 

206 

207 

208class FRcg(SteepestDescent): 

209 """ 

210 The Fletcher-Reeves conj. grad. method 

211 See Jorge Nocedal and Stephen J. Wright 'Numerical 

212 Optimization' Second Edition, 2006 (p. 121) 

213 """ 

214 

215 def __init__(self): 

216 super().__init__() 

217 self.name = 'fr-cg' 

218 self.type = 'conjugate-gradients' 

219 

220 def __str__(self): 

221 return 'Fletcher-Reeves conjugate gradient method' 

222 

223 def todict(self): 

224 return {'name': self.name} 

225 

226 def update_data( 

227 self, wfs, x_k1, g_k1, dimensions=None, precond=None, mode=None, 

228 subspace=False): 

229 

230 if precond is not None: 

231 g_k1 = apply_prec(precond, g_k1, 1.0, wfs, mode) 

232 

233 if self.iters == 0: 

234 self.p_k = minus(g_k1) 

235 else: 

236 dot_g_k1_g_k1 = dot_all_k_and_b(g_k1, g_k1, wfs, dimensions, mode) 

237 dot_g_g = dot_all_k_and_b( 

238 self.g_k, self.g_k, wfs, dimensions, mode) 

239 beta_k = dot_g_k1_g_k1 / dot_g_g 

240 self.p_k = calc_diff(self.p_k, g_k1, beta_k) 

241 

242 # save this step 

243 self.g_k = copy.deepcopy(g_k1) 

244 

245 self.iters += 1 

246 return self.p_k 

247 

248 

249class LBFGS(SearchDirectionBase): 

250 

251 def __init__(self, memory=3): 

252 """ 

253 :param memory: amount of previous steps to use 

254 """ 

255 super().__init__() 

256 

257 self.s_k = {i: None for i in range(memory)} 

258 self.y_k = {i: None for i in range(memory)} 

259 

260 self.rho_k = np.zeros(shape=memory) 

261 

262 self.kp = {} 

263 self.p = 0 

264 self.k = 0 

265 

266 self.memory = memory 

267 self.stable = True 

268 self.name = 'l-bfgs' 

269 self.type = 'quasi-newton' 

270 

271 def __str__(self): 

272 

273 return 'L-BFGS' 

274 

275 def todict(self): 

276 return {'name': self.name, 

277 'memory': self.memory} 

278 

279 def update_data( 

280 self, wfs, x_k1, g_k1, dimensions=None, precond=None, mode=None, 

281 subspace=False): 

282 

283 self.iters += 1 

284 

285 if precond is not None: 

286 g_k1 = apply_prec(precond, g_k1, 1.0, wfs, mode) 

287 

288 if self.k == 0: 

289 

290 self.kp[self.k] = self.p 

291 self.x_k = copy.deepcopy(x_k1) 

292 self.g_k = g_k1 

293 self.s_k[self.kp[self.k]] = zeros(g_k1) 

294 self.y_k[self.kp[self.k]] = zeros(g_k1) 

295 self.k += 1 

296 self.p += 1 

297 self.kp[self.k] = self.p 

298 

299 return minus(g_k1) 

300 

301 else: 

302 

303 if self.p == self.memory: 

304 self.p = 0 

305 self.kp[self.k] = self.p 

306 

307 s_k = self.s_k 

308 x_k = self.x_k 

309 y_k = self.y_k 

310 g_k = self.g_k 

311 

312 x_k1 = copy.deepcopy(x_k1) 

313 

314 rho_k = self.rho_k 

315 

316 kp = self.kp 

317 k = self.k 

318 m = self.memory 

319 

320 s_k[kp[k]] = calc_diff(x_k1, x_k) 

321 y_k[kp[k]] = calc_diff(g_k1, g_k) 

322 dot_ys = dot_all_k_and_b( 

323 y_k[kp[k]], s_k[kp[k]], wfs, dimensions, mode) 

324 

325 if abs(dot_ys) > 1.0e-15: 

326 rho_k[kp[k]] = 1.0 / dot_ys 

327 else: 

328 rho_k[kp[k]] = 1.0e15 

329 

330 if dot_ys < 0.0: 

331 self.stable = False 

332 

333 q = copy.deepcopy(g_k1) 

334 

335 alpha = np.zeros(np.minimum(k + 1, m)) 

336 j = np.maximum(-1, k - m) 

337 

338 for i in range(k, j, -1): 

339 dot_sq = dot_all_k_and_b(s_k[kp[i]], q, wfs, dimensions, mode) 

340 

341 alpha[kp[i]] = rho_k[kp[i]] * dot_sq 

342 

343 q = calc_diff(q, y_k[kp[i]], const=alpha[kp[i]]) 

344 

345 t = k 

346 dot_yy = dot_all_k_and_b( 

347 y_k[kp[t]], y_k[kp[t]], wfs, dimensions, mode) 

348 

349 if abs(dot_yy) > 1.0e-15: 

350 r = multiply(q, 1.0 / (rho_k[kp[t]] * dot_yy)) 

351 else: 

352 r = multiply(q, 1.0e15) 

353 

354 for i in range(np.maximum(0, k - m + 1), k + 1): 

355 dot_yr = dot_all_k_and_b( 

356 y_k[kp[i]], r, wfs, dimensions, mode) 

357 

358 beta = rho_k[kp[i]] * dot_yr 

359 

360 r = calc_diff(r, s_k[kp[i]], const=(beta - alpha[kp[i]])) 

361 

362 # save this step: 

363 self.x_k = copy.deepcopy(x_k1) 

364 self.g_k = copy.deepcopy(g_k1) 

365 self.k += 1 

366 self.p += 1 

367 self.kp[self.k] = self.p 

368 

369 return multiply(r, -1.0) 

370 

371 

372class LBFGS_P(SearchDirectionBase): 

373 

374 def __init__(self, memory=3, beta_0=1.0): 

375 """ 

376 :param memory: amount of previous steps to use 

377 """ 

378 super().__init__() 

379 self.s_k = {i: None for i in range(memory)} 

380 self.y_k = {i: None for i in range(memory)} 

381 self.rho_k = np.zeros(shape=memory) 

382 self.kp = {} 

383 self.p = 0 

384 self.k = 0 

385 self.memory = memory 

386 self.stable = True 

387 self.beta_0 = beta_0 

388 self.name = 'l-bfgs-p' 

389 self.type = 'quasi-newton' 

390 

391 def __str__(self): 

392 

393 return 'L-BFGS-P' 

394 

395 def todict(self): 

396 

397 return {'name': self.name, 

398 'memory': self.memory, 

399 'beta_0': self.beta_0} 

400 

401 def update_data( 

402 self, wfs, x_k1, g_k1, dimensions=None, precond=None, mode=None, 

403 subspace=False): 

404 # For L-BFGS-P, the preconditioner passed here has to be differentiated 

405 # from the preconditioner passed in ETDM. To keep the UI of this member 

406 # function consistent, the term precond is still used in the signature 

407 hess_1 = precond 

408 self.iters += 1 

409 if self.k == 0: 

410 self.kp[self.k] = self.p 

411 self.x_k = copy.deepcopy(x_k1) 

412 self.g_k = copy.deepcopy(g_k1) 

413 self.s_k[self.kp[self.k]] = zeros(g_k1) 

414 self.y_k[self.kp[self.k]] = zeros(g_k1) 

415 self.k += 1 

416 self.p += 1 

417 self.kp[self.k] = self.p 

418 if hess_1 is None: 

419 p = minus(g_k1) 

420 else: 

421 p = apply_prec(hess_1, g_k1, -1.0, wfs, mode) 

422 self.beta_0 = 1.0 

423 return p 

424 

425 else: 

426 if self.p == self.memory: 

427 self.p = 0 

428 self.kp[self.k] = self.p 

429 

430 s_k = self.s_k 

431 x_k = self.x_k 

432 y_k = self.y_k 

433 g_k = self.g_k 

434 rho_k = self.rho_k 

435 kp = self.kp 

436 k = self.k 

437 m = self.memory 

438 

439 s_k[kp[k]] = calc_diff(x_k1, x_k) 

440 y_k[kp[k]] = calc_diff(g_k1, g_k) 

441 

442 dot_ys = dot_all_k_and_b( 

443 y_k[kp[k]], s_k[kp[k]], wfs, dimensions, mode) 

444 

445 if abs(dot_ys) > 1.0e-20: 

446 rho_k[kp[k]] = 1.0 / dot_ys 

447 else: 

448 rho_k[kp[k]] = 1.0e20 

449 

450 if rho_k[kp[k]] < 0.0: 

451 self.stable = False 

452 self.__init__(memory=self.memory) 

453 return self.update_data( 

454 wfs, x_k1, g_k1, precond=hess_1, dimensions=dimensions, 

455 mode=mode) 

456 

457 q = copy.deepcopy(g_k1) 

458 

459 alpha = np.zeros(np.minimum(k + 1, m)) 

460 j = np.maximum(-1, k - m) 

461 

462 for i in range(k, j, -1): 

463 dot_sq = dot_all_k_and_b(s_k[kp[i]], q, wfs, dimensions, mode) 

464 alpha[kp[i]] = rho_k[kp[i]] * dot_sq 

465 q = calc_diff(q, y_k[kp[i]], const=alpha[kp[i]]) 

466 

467 t = k 

468 dot_yy = dot_all_k_and_b( 

469 y_k[kp[t]], y_k[kp[t]], wfs, dimensions, mode) 

470 rhoyy = rho_k[kp[t]] * dot_yy 

471 if abs(rhoyy) > 1.0e-20: 

472 self.beta_0 = 1.0 / rhoyy 

473 else: 

474 self.beta_0 = 1.0e20 

475 

476 if hess_1 is not None: 

477 r = apply_prec(hess_1, q, wfs=wfs, mode=mode) 

478 else: 

479 r = multiply(q, self.beta_0) 

480 

481 for i in range(np.maximum(0, k - m + 1), k + 1): 

482 dot_yr = dot_all_k_and_b(y_k[kp[i]], r, wfs, dimensions, mode) 

483 beta = rho_k[kp[i]] * dot_yr 

484 r = calc_diff(r, s_k[kp[i]], const=(beta - alpha[kp[i]])) 

485 

486 # save this step: 

487 self.x_k = copy.deepcopy(x_k1) 

488 self.g_k = copy.deepcopy(g_k1) 

489 

490 self.k += 1 

491 self.p += 1 

492 

493 self.kp[self.k] = self.p 

494 

495 return multiply(r, -1.0) 

496 

497 

498class LSR1P(SearchDirectionBase): 

499 """ 

500 This class describes limited memory versions of 

501 SR-1, Powell and their combinations (such as Bofill). 

502 """ 

503 

504 def __init__(self, memory=20, method='LSR1', phi=None): 

505 """ 

506 :param memory: amount of previous steps to use 

507 """ 

508 super().__init__() 

509 

510 self.u_k = {i: None for i in range(memory)} 

511 self.j_k = {i: None for i in range(memory)} 

512 self.yj_k = np.zeros(shape=memory) 

513 self.method = method 

514 self.phi = phi 

515 

516 self.phi_k = np.zeros(shape=memory) 

517 if self.phi is None: 

518 assert self.method in ['LSR1', 'LP', 'LBofill', 

519 'Linverse_Bofill'], 'Value Error' 

520 if self.method == 'LP': 

521 self.phi_k.fill(1.0) 

522 else: 

523 self.phi_k.fill(self.phi) 

524 

525 self.kp = {} 

526 self.p = 0 

527 self.k = 0 

528 

529 self.memory = memory 

530 self.name = 'l-sr1p' 

531 self.type = 'quasi-newton' 

532 

533 def __str__(self): 

534 

535 return 'LSR1P' 

536 

537 def todict(self): 

538 

539 return {'name': self.name, 

540 'memory': self.memory, 

541 'method': self.method} 

542 

543 def update_data( 

544 self, wfs, x_k1, g_k1, dimensions=None, precond=None, mode=None, 

545 subspace=False): 

546 

547 if precond is not None: 

548 bg_k1 = apply_prec(precond, g_k1, 1.0, wfs, mode) 

549 else: 

550 bg_k1 = g_k1.copy() 

551 

552 if self.k == 0: 

553 self.kp[self.k] = self.p 

554 self.x_k = copy.deepcopy(x_k1) 

555 self.g_k = copy.deepcopy(g_k1) 

556 self.u_k[self.kp[self.k]] = zeros(g_k1) 

557 self.j_k[self.kp[self.k]] = zeros(g_k1) 

558 self.k += 1 

559 self.p += 1 

560 self.kp[self.k] = self.p 

561 else: 

562 if self.p == self.memory: 

563 self.p = 0 

564 self.kp[self.k] = self.p 

565 

566 x_k = self.x_k 

567 g_k = self.g_k 

568 u_k = self.u_k 

569 j_k = self.j_k 

570 yj_k = self.yj_k 

571 phi_k = self.phi_k 

572 

573 x_k1 = copy.deepcopy(x_k1) 

574 

575 kp = self.kp 

576 k = self.k 

577 m = self.memory 

578 

579 s_k = calc_diff(x_k1, x_k) 

580 y_k = calc_diff(g_k1, g_k) 

581 if precond is not None: 

582 by_k = apply_prec(precond, y_k, 1.0, wfs, mode) 

583 else: 

584 by_k = y_k.copy() 

585 

586 by_k = self.update_bv(wfs, by_k, y_k, u_k, j_k, yj_k, phi_k, 

587 np.maximum(1, k - m), k, dimensions, mode) 

588 

589 j_k[kp[k]] = calc_diff(s_k, by_k) 

590 yj_k[kp[k]] = dot_all_k_and_b( 

591 y_k, j_k[kp[k]], wfs, dimensions, mode) 

592 

593 if self.method == 'LSR1': 

594 if abs(yj_k[kp[k]]) < 1e-12: 

595 yj_k[kp[k]] = 1e-12 

596 

597 dot_yy = dot_all_k_and_b(y_k, y_k, wfs, dimensions, mode) 

598 if abs(dot_yy) > 1.0e-15: 

599 u_k[kp[k]] = multiply(y_k, 1.0 / dot_yy) 

600 else: 

601 u_k[kp[k]] = multiply(y_k, 1.0e15) 

602 

603 if self.method == 'LBofill' and self.phi is None: 

604 jj_k = dot_all_k_and_b( 

605 j_k[kp[k]], j_k[kp[k]], wfs, dimensions, mode) 

606 phi_k[kp[k]] = 1 - yj_k[kp[k]]**2 / (dot_yy * jj_k) 

607 elif self.method == 'Linverse_Bofill' and self.phi is None: 

608 jj_k = dot_all_k_and_b( 

609 j_k[kp[k]], j_k[kp[k]], wfs, dimensions, mode) 

610 phi_k[kp[k]] = yj_k[kp[k]] ** 2 / (dot_yy * jj_k) 

611 

612 bg_k1 = self.update_bv(wfs, bg_k1, g_k1, u_k, j_k, yj_k, phi_k, 

613 np.maximum(1, k - m + 1), k + 1, dimensions, 

614 mode) 

615 

616 # save this step: 

617 self.x_k = copy.deepcopy(x_k1) 

618 self.g_k = copy.deepcopy(g_k1) 

619 self.k += 1 

620 self.p += 1 

621 self.kp[self.k] = self.p 

622 

623 self.iters += 1 

624 return multiply(bg_k1, -1.0) 

625 

626 def update_bv( 

627 self, wfs, bv, v, u_k, j_k, yj_k, phi_k, i_0, i_m, dimensions=None, 

628 mode=None): 

629 if mode is None: 

630 mode = wfs.mode 

631 

632 kp = self.kp 

633 for i in range(i_0, i_m): 

634 dot_uv = dot_all_k_and_b(u_k[kp[i]], v, wfs, dimensions, mode) 

635 dot_jv = dot_all_k_and_b(j_k[kp[i]], v, wfs, dimensions, mode) 

636 alpha = dot_jv - yj_k[kp[i]] * dot_uv 

637 beta_p = calc_diff(j_k[kp[i]], u_k[kp[i]], dot_uv, -alpha) 

638 beta_ms = multiply(j_k[kp[i]], dot_jv / yj_k[kp[i]]) 

639 beta = calc_diff(beta_ms, beta_p, 1 - phi_k[kp[i]], -phi_k[kp[i]]) 

640 bv = calc_diff(bv, beta, const=-1.0) 

641 

642 return bv 

643 

644 

645def multiply(x, const=1.0): 

646 """ 

647 it must not change x! 

648 :param x: 

649 :param const: 

650 :return: new dictionary y = cons*x 

651 """ 

652 y = {} 

653 for k in x.keys(): 

654 y[k] = const * x[k] 

655 return y 

656 

657 

658def zeros(x): 

659 y = {} 

660 for k in x.keys(): 

661 y[k] = np.zeros_like(x[k]) 

662 return y 

663 

664 

665def minus(x): 

666 return multiply(x, -1.0) 

667 

668 

669def calc_diff(x1, x2, const_0=1.0, const=1.0): 

670 y_k = {} 

671 for k in x1.keys(): 

672 y_k[k] = const_0 * x1[k] - const * x2[k] 

673 return y_k 

674 

675 

676def dot_all_k_and_b(x1, x2, wfs, dimensions=None, mode=None): 

677 if mode is None: 

678 mode = wfs.mode 

679 dot_pr_x1x2 = 0.0 

680 if mode == 'lcao': 

681 for k in x1.keys(): 

682 dot_pr_x1x2 += np.dot(x1[k].conj(), x2[k]).real 

683 else: 

684 dot_pr_x1x2 = 0.0j if wfs.dtype == complex else 0.0 

685 for k, kpt in enumerate(wfs.kpt_u): 

686 for i in range(dimensions[k]): 

687 dot_prod = wfs.integrate(x1[k][i], x2[k][i], False) 

688 dot_prod = wfs.gd.comm.sum_scalar(dot_prod) 

689 dot_pr_x1x2 += dot_prod 

690 dot_pr_x1x2 = wfs.kd.comm.sum_scalar(dot_pr_x1x2) 

691 dot_pr_x1x2 = 2.0 * dot_pr_x1x2.real 

692 

693 return dot_pr_x1x2 

694 

695 

696def apply_prec(prec, x, const=1.0, wfs=None, mode=None): 

697 if mode is None: 

698 mode = wfs.mode 

699 y = {} 

700 if mode == 'lcao': 

701 for k in x.keys(): 

702 if prec[k].dtype == complex: 

703 y[k] = const * (prec[k].real * x[k].real 

704 + 1.0j * prec[k].imag * x[k].imag) 

705 else: 

706 y[k] = const * prec[k] * x[k] 

707 return y 

708 elif mode == 'pw': 

709 deg = (3.0 - wfs.kd.nspins) 

710 deg *= 2.0 

711 for k, kpt in enumerate(wfs.kpt_u): 

712 y[k] = x[k].copy() 

713 for i, z in enumerate(x[k]): 

714 psit_G = kpt.psit.array[i] 

715 ekin = prec.calculate_kinetic_energy(psit_G, kpt) 

716 y[k][i] = - const * prec(z, kpt, ekin) / deg 

717 else: 

718 deg = (3.0 - wfs.kd.nspins) 

719 for k, kpt in enumerate(wfs.kpt_u): 

720 y[k] = x[k].copy() 

721 for i, z in enumerate(x[k]): 

722 y[k][i] = - const * prec(z, kpt, None) / deg 

723 return y