Coverage for gpaw/test/parallel/test_parallel_eigh.py: 79%

53 statements  

« prev     ^ index     » next       coverage.py v7.7.1, created at 2025-07-14 00:18 +0000

1import pytest 

2import numpy as np 

3from gpaw.mpi import world 

4from gpaw.blacs import BlacsGrid 

5from gpaw.blacs import Redistributor 

6 

7 

8def parallel_eigh(matrixfile, blacsgrid=(4, 2), blocksize=64): 

9 """Diagonalize matrix in parallel""" 

10 assert np.prod(blacsgrid) == world.size 

11 grid = BlacsGrid(world, *blacsgrid) 

12 

13 if world.rank == 0: 

14 H_MM = np.load(matrixfile, allow_pickle=True) 

15 assert H_MM.ndim == 2 

16 assert H_MM.shape[0] == H_MM.shape[1] 

17 NM = len(H_MM) 

18 else: 

19 NM = 0 

20 NM = world.sum_scalar(NM) # distribute matrix shape to all nodes 

21 

22 # descriptor for the individual blocks 

23 block_desc = grid.new_descriptor(NM, NM, blocksize, blocksize) 

24 

25 # descriptor for global array on MASTER 

26 local_desc = grid.new_descriptor(NM, NM, NM, NM) 

27 

28 # Make some dummy array on all the slaves 

29 if world.rank != 0: 

30 H_MM = local_desc.zeros() 

31 assert local_desc.check(H_MM) 

32 

33 # The local version of the matrix 

34 H_mm = block_desc.empty() 

35 

36 # Distribute global array to smaller blocks 

37 redistributor = Redistributor(world, local_desc, block_desc) 

38 redistributor.redistribute(H_MM, H_mm) 

39 

40 # Allocate arrays for eigenvalues and -vectors 

41 eps_M = np.empty(NM) 

42 C_mm = block_desc.empty() 

43 block_desc.diagonalize_ex(H_mm, C_mm, eps_M) 

44 

45 # Collect eigenvectors on MASTER 

46 C_MM = local_desc.empty() 

47 redistributor2 = Redistributor(world, block_desc, local_desc) 

48 redistributor2.redistribute(C_mm, C_MM) 

49 

50 # Return eigenvalues and -vectors on Master 

51 if world.rank == 0: 

52 return eps_M, C_MM 

53 else: 

54 return None, None 

55 

56 

57@pytest.mark.intel 

58def test_parallel_parallel_eigh(in_tmp_dir, scalapack): 

59 # Test script which should be run on 1, 2, 4, or 8 CPUs 

60 

61 if world.size == 1: 

62 blacsgrid = (1, 1) 

63 elif world.size == 2: 

64 blacsgrid = (2, 1) 

65 elif world.size == 4: 

66 blacsgrid = (2, 2) 

67 elif world.size == 8: 

68 blacsgrid = (4, 2) 

69 else: 

70 raise RuntimeError('Please use 1, 2, 4, or 8 nodes for this test') 

71 

72 if world.rank == 0: 

73 a = np.diag(range(1, 51)).astype(float) 

74 a.dump('H_50x50.pckl') 

75 eps, U = np.linalg.eigh(a) 

76 

77 eps, U = parallel_eigh('H_50x50.pckl', blacsgrid, blocksize=6) 

78 if world.rank == 0: 

79 assert abs(eps - range(1, 51)).sum() < 1e-5 

80 assert abs(U - np.identity(50)).sum() < 1e-5 

81 

82 

83if __name__ == '__main__': 

84 test_parallel_parallel_eigh(0)