Coverage for gpaw/benchmark/__main__.py: 0%

40 statements  

« prev     ^ index     » next       coverage.py v7.7.1, created at 2025-07-08 00:17 +0000

1import argparse 

2 

3 

4description = """\ 

5GPAW benchmark suite. Provides a list of unchanging benchmarks which allow \ 

6to track speed and memory usage of GPAW calculations over time.\ 

7""" 

8run_benchmark_help = """\ 

9Run a list of benchmarks. Alternatively, users can use special filter \ 

10commands to select benchmarks containing particular style of calculations, \ 

11and thus easily create subsets of benchmarks.\ 

12""" 

13 

14benchmarks_help = """\ 

15Run the given list of benchmarks and produce benchmarking data. \ 

16Benchmarks can be either nicknames. or long description strings. \ 

17Run `python -m gpaw.benchmark list` for extended list of standard \ 

18benchmark nicknames, and the explanation of how to create custom \ 

19benchmarks.\ 

20""" 

21 

22list_benchmark_help = """\ 

23Give a list of all benchmark nicknames, and their respective long names, \ 

24and a brief description of the systems and the parameter sets utilized in \ 

25the long names. Users can then use this information to build their own \ 

26benchmark calculation parameter sets for particular systems.\ 

27""" 

28 

29view_benchmark_help = """\ 

30Usage `python -m gpaw.benchmark view benchmarkfile.json`. 

31 

32Will display a pretty formatted version of the benchmark run.\ 

33""" 

34 

35benchmarks_output_help = 'Output JSON with all the gathered information.' 

36 

37version = "May 2025" 

38 

39if __name__ == '__main__': 

40 from gpaw.benchmark import (benchmark_main, 

41 list_benchmarks, 

42 view_benchmark, 

43 parse_name, 

44 gather_benchmarks) 

45 parser = argparse.ArgumentParser(prog='gpaw.benchmark', 

46 description=description) 

47 subparsers = parser.add_subparsers(help='subcommand help', dest='command') 

48 run_parser = subparsers.add_parser('run', help=run_benchmark_help) 

49 run_parser.add_argument('benchmarks', nargs='*', help=benchmarks_help) 

50 list_parser = subparsers.add_parser('list', help=list_benchmark_help) 

51 view_parser = subparsers.add_parser('view', help=view_benchmark_help) 

52 gather_parser = subparsers.add_parser('gather', help='') 

53 gather_parser.add_argument('benchmarks', nargs='*', help=benchmarks_help) 

54 gather_parser.add_argument('-o', '--output', help=benchmarks_output_help, 

55 default='benchmarks.json') 

56 test_parser = subparsers.add_parser('test', help='') 

57 view_parser.add_argument('benchmarkfile') 

58 

59 args = parser.parse_args() 

60 if args.command == 'run': 

61 for benchmark in args.benchmarks: 

62 benchmark_main(benchmark) 

63 elif args.command == 'list': 

64 print(list_benchmarks()) 

65 elif args.command == 'view': 

66 view_benchmark(args.benchmarkfile) 

67 elif args.command == 'gather': 

68 gather_benchmarks(args.benchmarks, args.output) 

69 elif args.command == 'test': 

70 from gpaw.benchmark import benchmarks, benchmark_atoms_and_calc 

71 for benchmark in benchmarks: 

72 print(benchmark) 

73 _, long_name, calc_info = parse_name(benchmark) 

74 benchmark_atoms_and_calc(long_name, calc_info) 

75 else: 

76 if args.command is None: 

77 raise ValueError('Run `python -m gpaw.benchmark ' 

78 '--help` for how to use the program.') 

79 raise ValueError(f'Invalid command {args.command}.')