mirror of
				https://github.com/paboyle/Grid.git
				synced 2025-11-01 04:24:32 +00:00 
			
		
		
		
	Compare commits
	
		
			430 Commits
		
	
	
		
			v0.7.0
			...
			feature/la
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|  | 27ea2afe86 | ||
|  | 78e8704eac | ||
|  | 67131d82f2 | ||
|  | 615a9448b9 | ||
|  | 00164f5ce5 | ||
|  | a7f72eb994 | ||
|  | 501fa1614a | ||
|  | 5bf42e1e15 | ||
|  | fe4d9b003c | ||
|  | 4a699b4da3 | ||
|  | 689323f4ee | ||
|  | 84b441800f | ||
|  | 1ef424b139 | ||
|  | aa66f41c69 | ||
|  | f96c800d25 | ||
|  | 32a52d7583 | ||
|  | fa04b6d3c2 | ||
|  | 7fab183c0e | ||
|  | 9ec9850bdb | ||
|  | 0c4ddaea0b | ||
|  | 00ebc150ad | ||
|  | 0f3e9ae57d | ||
|  | 034de160bf | ||
|  | 14507fd6e4 | ||
|  | 2db05ac214 | ||
|  | 31f99574fa | ||
|  | a34c8a2961 | ||
|  | ccd20df827 | ||
|  | e9be293444 | ||
|  | d577211cc3 | ||
|  | f4336e480a | ||
|  | e4d461cb03 | ||
|  | 3d63b4894e | ||
|  | 08583afaff | ||
|  | b395a312af | ||
|  | 66295b99aa | ||
|  | b8654be0ef | ||
|  | a479325349 | ||
|  | f6c3f6bf2d | ||
|  | d83868fdbb | ||
|  | 303e0b927d | ||
|  | 28ba8a0f48 | ||
|  | f9e28577f3 | ||
|  | 8a3aae98f6 | ||
|  | 8309f2364b | ||
|  | cac1750078 | ||
|  | 27936900e6 | ||
|  | e325929851 | ||
|  | 47af3565f4 | ||
|  | 4b4d187935 | ||
|  | 9aff354ab5 | ||
|  | cb9ff20249 | ||
|  | 9fe6ac71ea | ||
|  | f1fa00b71b | ||
|  | bf58557fb1 | ||
|  | 10cb37f504 | ||
|  | 1374c943d4 | ||
|  | a1d80282ec | ||
|  | 4eb8bbbebe | ||
|  | d1c6288c5f | ||
|  | dd949bc428 | ||
|  | bb7378cfc3 | ||
|  | f0e084a88c | ||
|  | 153672d8ec | ||
|  | 08ca338875 | ||
|  | f7cbf82c04 | ||
|  | 07009c569a | ||
|  | 09f4cdb11e | ||
|  | 1e54882f71 | ||
|  | d54807b8c0 | ||
|  | 5625b47c7d | ||
|  | 1edcf902b7 | ||
|  | e5c19e1fd7 | ||
|  | a11d0a33d1 | ||
|  | 4f8b6f26b4 | ||
|  | 073525c5b3 | ||
|  | eb6153080a | ||
|  | f7072d1ac2 | ||
|  | fddeb29d6b | ||
|  | a9ec5cf564 | ||
|  | 946a8671b9 | ||
|  | a6eeea777b | ||
|  | 771a1b8e79 | ||
|  | bfb68e6f02 | ||
|  | 77f7737ccc | ||
|  | 18c335198a | ||
|  | f9df685cde | ||
|  | 17c5b0f152 | ||
|  | 5918769f97 | ||
|  | bbaf1ada91 | ||
|  | 1950ac9294 | ||
|  | 13fa70ac1a | ||
|  | 7cb2b11f26 | ||
|  | 1184ed29ae | ||
|  | 203c7bf6fa | ||
|  | c709883f3f | ||
|  | aed5de4d50 | ||
|  | ba27cc6571 | ||
|  | d856327250 | ||
|  | d75369cb56 | ||
|  | bf973d0d56 | ||
|  | 837bf8a5be | ||
|  | c05b2199f6 | ||
|  | a5fe07c077 | ||
|  | b83b2b1415 | ||
|  | b331be9101 | ||
|  | 49c20a9fa8 | ||
|  | 7359df3501 | ||
|  | 59bd1fe21b | ||
|  | 4e907fef2c | ||
|  | 67888b657f | ||
|  | 74af885d4e | ||
|  | d36d2fb40d | ||
|  | 5b9267e88d | ||
|  | 15fd4003ef | ||
|  | 4b4c2a715b | ||
|  | 54a5e6c1d0 | ||
|  | 73aeca7dea | ||
|  | ad89abb018 | ||
|  | 80c5bce5bb | ||
|  | f68b5de9c8 | ||
|  | d0f3d525d5 | ||
|  | f365a83fae | ||
|  | 3a58217405 | ||
|  | c289699d9a | ||
|  | c3b1263e75 | ||
|  | 34a9aeb331 | ||
| 102ea9ae66 | |||
|  | 5fa386ddc9 | ||
|  | edabb3577f | ||
|  | ce5df177ee | ||
|  | a0bb8e5b46 | ||
|  | 46f88e6d72 | ||
|  | dd8f1ea189 | ||
|  | b61835c1a5 | ||
|  | d9cd4f0273 | ||
|  | 459f70e8d4 | ||
|  | 061e48fd73 | ||
|  | ab50145001 | ||
|  | b49bec0cec | ||
|  | ae56e556c6 | ||
|  | 1cdf999668 | ||
|  | 11062fb686 | ||
|  | 383ca7d392 | ||
|  | a446d95c33 | ||
|  | be66e7dd95 | ||
|  | 6d0d064a6c | ||
|  | bfef525ed2 | ||
|  | 0b0cf62193 | ||
|  | 7d88198387 | ||
|  | 2f619482b8 | ||
|  | d6472eda8d | ||
|  | 9e658de238 | ||
|  | bcefdd7c4e | ||
|  | 9d45fca8bc | ||
|  | ac9e6b63c0 | ||
|  | e140b3f802 | ||
|  | d9d3d30cc7 | ||
|  | 47a12ec7b5 | ||
|  | ec1e2f7a40 | ||
|  | 41f73ec083 | ||
|  | fd367d8bfd | ||
|  | 6d0786ff9d | ||
|  | b7f93aeb4d | ||
|  | 202a7fe900 | ||
|  | 8a3fe60a27 | ||
|  | 44051aecd1 | ||
|  | 06e6f8de00 | ||
|  | dbe4d7850c | ||
|  | 4fe182e5a7 | ||
|  | 175f393f9d | ||
|  | 7d867a8134 | ||
|  | 9939b267d2 | ||
|  | 14d53e1c9e | ||
|  | 8bd869da37 | ||
|  | c7036f6717 | ||
|  | c0485d799d | ||
|  | 7abc5613bd | ||
|  | 237cfd11ab | ||
|  | a4b7dddb67 | ||
|  | 5696781862 | ||
|  | 8f4b3049cd | ||
|  | 2a6e673a91 | ||
|  | 9b6cde173f | ||
|  | 9f280b82c4 | ||
| c3f0889eda | |||
|  | 7a53dc3715 | ||
|  | 0f214ad427 | ||
|  | fe4912880d | ||
|  | f038c6babe | ||
|  | 169f4b2711 | ||
|  | 2d8aff36fe | ||
|  | 9fa07eecde | ||
|  | 659d7d1a40 | ||
|  | f64fb7bd77 | ||
|  | 2a35449b91 | ||
|  | 184af5bd05 | ||
|  | 097c9637ee | ||
|  | dc6f078246 | ||
|  | 8a4714a4a6 | ||
|  | 40e119c61c | ||
|  | d9593c4b81 | ||
|  | ac740f73ce | ||
|  | 75dc7794b9 | ||
|  | dee68fc728 | ||
|  | a2d3643634 | ||
|  | 57002924bc | ||
|  | 7b0237b081 | ||
|  | b68ad0cc0b | ||
|  | 37263fd9b1 | ||
|  | 3d09e3e9e0 | ||
|  | 1354b46338 | ||
|  | 251a97fe1b | ||
|  | e18929eaa0 | ||
|  | f3b0a92e71 | ||
|  | a0be3f7330 | ||
|  | b5a6e4f1fd | ||
|  | 7a788db3dc | ||
|  | f20eceb6cd | ||
|  | 38325ebbc6 | ||
|  | b73bd151bb | ||
|  | 694b305cab | ||
|  | 2d3737a133 | ||
|  | ac1f1838bc | ||
|  | 09d09d0fe5 | ||
|  | bf630a6821 | ||
|  | 8859a151cc | ||
|  | 688a39cfd9 | ||
|  | 6f5a5cd9b3 | ||
|  | 0933aeefd4 | ||
|  | 322f61acee | ||
|  | 08e04b9676 | ||
| feaa2ac947 | |||
| 07de925127 | |||
|  | a9c816a268 | ||
|  | e43a8b6b8a | ||
|  | bf729766dd | ||
|  | dafb351d38 | ||
| 0b707b861c | |||
| 15e87a4607 | |||
| 7d7220cbd7 | |||
|  | 54e94360ad | ||
| 0af740dc15 | |||
| d2e8372df3 | |||
|  | 869b99ec1e | ||
|  | 4a29ab0d0a | ||
|  | 0165bcb58e | ||
| 4372d04ad4 | |||
|  | 349d75e483 | ||
|  | 56abbdf4c2 | ||
|  | af71c63f4c | ||
|  | e51475703a | ||
|  | 1feddf4ba6 | ||
|  | 600d7ddc2e | ||
|  | e504260f3d | ||
|  | 0440d4ce66 | ||
|  | 5e4bea8f20 | ||
|  | 6ebf9f15b7 | ||
|  | 1d7aa673a4 | ||
|  | b9104f3072 | ||
| b22eab8c8b | |||
|  | a7d56523ab | ||
|  | 9e56c65730 | ||
|  | ef4f2b8c41 | ||
|  | e8b95bd35b | ||
|  | 7e35286860 | ||
|  | 0486ff8e79 | ||
| 1e8a2e1621 | |||
| 7587df831a | |||
|  | e9cc21900f | ||
|  | 0a8faac271 | ||
|  | abc4de0fd2 | ||
| b672717096 | |||
| 284ee194b1 | |||
|  | cfe3cd76d1 | ||
|  | 3fa5e3109f | ||
|  | 8b7049f737 | ||
|  | c85024683e | ||
|  | 1300b0b04b | ||
|  | e6d984b484 | ||
|  | 1d18d95d4f | ||
|  | ae39ec85a3 | ||
|  | b96daf53a0 | ||
|  | 46879e1658 | ||
|  | ae4de94798 | ||
|  | 0ab555b4f5 | ||
|  | 8e9be9f84f | ||
|  | d572170170 | ||
| 81b18f843a | |||
|  | a833f88c32 | ||
|  | 07b2c1b253 | ||
|  | 735cbdb983 | ||
|  | 2ad54c5a02 | ||
|  | 12ccc73cf5 | ||
|  | 3d04dc33c6 | ||
|  | e7564f8330 | ||
|  | 91199a8ea0 | ||
|  | 0494feec98 | ||
|  | a16b1e134e | ||
|  | 769ad578f5 | ||
|  | eaac0044b5 | ||
|  | 56042f002c | ||
|  | 3bfd1f13e6 | ||
|  | 70ab598c96 | ||
|  | 1d0ca65e28 | ||
|  | 2bc4d0a20e | ||
| 2490816297 | |||
| 5f55bca378 | |||
| f6aa82b7f2 | |||
| 22749699a3 | |||
| 0503c028be | |||
|  | 092dcd4e04 | ||
|  | 4a8c4ccfba | ||
|  | 9b44189d5a | ||
|  | 7da4856e8e | ||
|  | aaf1e33a77 | ||
|  | 094c3d091a | ||
|  | 4b98e524a0 | ||
|  | 1a1f6d55f9 | ||
|  | 21421656ab | ||
|  | 6f687a67cd | ||
|  | b30754e762 | ||
|  | 1e429a0d57 | ||
|  | d38a4de36c | ||
|  | ef1b7db374 | ||
|  | 53a9aeb965 | ||
|  | e30fa9f4b8 | ||
|  | 58e8d0a10d | ||
|  | 62cf9cf638 | ||
|  | 0fb458879d | ||
|  | 725c513d94 | ||
| d8648307ff | |||
| 064315c00b | |||
|  | 7c6cc85df6 | ||
|  | a6691ef87c | ||
|  | 8e0ced627a | ||
|  | 0de314870d | ||
|  | ffb91e53d2 | ||
|  | f4e8bf2858 | ||
| a74c34315c | |||
|  | 69470ccc10 | ||
|  | b8b5934193 | ||
|  | 75856f2945 | ||
|  | 3c112a7a25 | ||
|  | ab3596d4d3 | ||
|  | a8c10b1933 | ||
|  | 15e801af3f | ||
|  | 0ffc235741 | ||
|  | 8e19c99c7d | ||
|  | a0bc0ad06f | ||
|  | a8fb2835ca | ||
|  | bc862ce3ab | ||
| 22f4feee7b | |||
| 3f858d6755 | |||
|  | 3267683e22 | ||
|  | f46a67ffb3 | ||
|  | f7b8383ef5 | ||
|  | 10f2872aae | ||
| 35fa3d1dfd | |||
|  | cd73897b8d | ||
| d1ece74137 | |||
| 43c817cc67 | |||
|  | 741bc836f6 | ||
|  | 8546d01a4c | ||
| 1407418755 | |||
| a6a0da873f | |||
|  | 7b03d8d087 | ||
|  | 4b759b8f2a | ||
|  | 038b6ee9cd | ||
|  | 38806343a8 | ||
|  | 831ca4e3bf | ||
| eedcaf6470 | |||
| b39f0d1fb6 | |||
| 9f1267dfe6 | |||
| 2e90285232 | |||
| e254de982e | |||
| 28d99b5297 | |||
|  | ee93f0218b | ||
| 161ed102a5 | |||
|  | f65a585236 | ||
|  | ae99e99da2 | ||
| f3ca29af6c | |||
| 37988221a8 | |||
| 7a327a3f28 | |||
| 92f8950a56 | |||
| 65987a8a58 | |||
| 889d828bc2 | |||
| ad98b6193d | |||
| fc760016b3 | |||
| 2da86f7dae | |||
| 97843e2b58 | |||
| 82b3f54697 | |||
| 673994b281 | |||
| bbc0eff078 | |||
| 4c60e31070 | |||
| afbf7d4c37 | |||
| 8c3cc32364 | |||
| 4c3fd9fa3f | |||
| 17b3a10d46 | |||
| 149a46b92c | |||
| db9c28a773 | |||
| 9ac3ac41df | |||
| 2af9ab9034 | |||
| 6f1ea96293 | |||
| 2e3c5890b6 | |||
| bc6678732f | |||
| b10ae00c8a | |||
|  | 0cd6b1858c | ||
|  | 6ad73145bc | ||
| f7293f2ddb | |||
|  | 6b8ee7bae0 | ||
|  | 739c2308b5 | ||
|  | a71b69389b | ||
|  | d49e502f53 | ||
|  | 92ec3404f8 | ||
|  | f4ebea3381 | ||
|  | cf167d0cd1 | ||
|  | c30d96ea50 | ||
|  | 7ffe17ada1 | ||
| 330a9b3f4c | |||
|  | 28ff66a381 | ||
|  | 78c7bcee36 | ||
| 00a7b95631 | |||
| 94d8321d01 | |||
|  | ac24cc9f99 | ||
|  | 3ab4c8c0bb | ||
| 26d124283e | |||
| 0d889b7041 | |||
| ab31ad006a | |||
| 6e4a06e180 | 
							
								
								
									
										68
									
								
								.travis.yml
									
									
									
									
									
								
							
							
						
						
									
										68
									
								
								.travis.yml
									
									
									
									
									
								
							| @@ -9,68 +9,6 @@ matrix: | ||||
|     - os:        osx | ||||
|       osx_image: xcode8.3 | ||||
|       compiler: clang | ||||
|     - compiler: gcc | ||||
|       dist: trusty | ||||
|       sudo: required | ||||
|       addons: | ||||
|         apt: | ||||
|           sources: | ||||
|             - ubuntu-toolchain-r-test | ||||
|           packages: | ||||
|             - g++-4.9 | ||||
|             - libmpfr-dev | ||||
|             - libgmp-dev | ||||
|             - libmpc-dev | ||||
|             - libopenmpi-dev | ||||
|             - openmpi-bin | ||||
|             - binutils-dev | ||||
|       env: VERSION=-4.9 | ||||
|     - compiler: gcc | ||||
|       dist: trusty | ||||
|       sudo: required | ||||
|       addons: | ||||
|         apt: | ||||
|           sources: | ||||
|             - ubuntu-toolchain-r-test | ||||
|           packages: | ||||
|             - g++-5 | ||||
|             - libmpfr-dev | ||||
|             - libgmp-dev | ||||
|             - libmpc-dev | ||||
|             - libopenmpi-dev | ||||
|             - openmpi-bin | ||||
|             - binutils-dev | ||||
|       env: VERSION=-5 | ||||
|     - compiler: clang | ||||
|       dist: trusty | ||||
|       addons: | ||||
|         apt: | ||||
|           sources: | ||||
|             - ubuntu-toolchain-r-test | ||||
|           packages: | ||||
|             - g++-4.8 | ||||
|             - libmpfr-dev | ||||
|             - libgmp-dev | ||||
|             - libmpc-dev | ||||
|             - libopenmpi-dev | ||||
|             - openmpi-bin | ||||
|             - binutils-dev | ||||
|       env: CLANG_LINK=http://llvm.org/releases/3.8.0/clang+llvm-3.8.0-x86_64-linux-gnu-ubuntu-14.04.tar.xz | ||||
|     - compiler: clang | ||||
|       dist: trusty | ||||
|       addons: | ||||
|         apt: | ||||
|           sources: | ||||
|             - ubuntu-toolchain-r-test | ||||
|           packages: | ||||
|             - g++-4.8 | ||||
|             - libmpfr-dev | ||||
|             - libgmp-dev | ||||
|             - libmpc-dev | ||||
|             - libopenmpi-dev | ||||
|             - openmpi-bin | ||||
|             - binutils-dev | ||||
|       env: CLANG_LINK=http://llvm.org/releases/3.7.0/clang+llvm-3.7.0-x86_64-linux-gnu-ubuntu-14.04.tar.xz | ||||
|        | ||||
| before_install: | ||||
|     - export GRIDDIR=`pwd` | ||||
| @@ -106,9 +44,3 @@ script: | ||||
|     - make -j4 | ||||
|     - ./benchmarks/Benchmark_dwf --threads 1 --debug-signals | ||||
|     - make check | ||||
|     - echo make clean | ||||
|     - if [[ "$TRAVIS_OS_NAME" == "linux" ]] && [[ "$CC" == "clang" ]]; then ../configure --enable-precision=single --enable-simd=SSE4 --enable-comms=mpi-auto ; fi | ||||
|     - if [[ "$TRAVIS_OS_NAME" == "linux" ]] && [[ "$CC" == "clang" ]]; then make -j4; fi | ||||
|     - if [[ "$TRAVIS_OS_NAME" == "linux" ]] && [[ "$CC" == "clang" ]]; then mpirun.openmpi -n 2 ./benchmarks/Benchmark_dwf --threads 1 --mpi 2.1.1.1; fi | ||||
|  | ||||
|  | ||||
|   | ||||
							
								
								
									
										277
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										277
									
								
								README.md
									
									
									
									
									
								
							| @@ -1,27 +1,44 @@ | ||||
| # Grid | ||||
| <table> | ||||
| <tr> | ||||
|     <td>Last stable release</td> | ||||
|     <td><a href="https://travis-ci.org/paboyle/Grid"> | ||||
|     <img src="https://travis-ci.org/paboyle/Grid.svg?branch=master"></a> | ||||
|     </td> | ||||
| </tr> | ||||
| <tr> | ||||
|     <td>Development branch</td> | ||||
|     <td><a href="https://travis-ci.org/paboyle/Grid"> | ||||
|     <img src="https://travis-ci.org/paboyle/Grid.svg?branch=develop"></a> | ||||
|     </td> | ||||
| </tr> | ||||
| </table> | ||||
| # Grid [),branch:name:develop)/statusIcon.svg)](http://ci.cliath.ph.ed.ac.uk/project.html?projectId=Grid&tab=projectOverview) [](https://travis-ci.org/paboyle/Grid) | ||||
|  | ||||
| **Data parallel C++ mathematical object library.** | ||||
|  | ||||
| License: GPL v2. | ||||
|  | ||||
| Last update Nov 2016. | ||||
| Last update June 2017. | ||||
|  | ||||
| _Please do not send pull requests to the `master` branch which is reserved for releases._ | ||||
|  | ||||
|  | ||||
|  | ||||
| ### Description | ||||
| This library provides data parallel C++ container classes with internal memory layout | ||||
| that is transformed to map efficiently to SIMD architectures. CSHIFT facilities | ||||
| are provided, similar to HPF and cmfortran, and user control is given over the mapping of | ||||
| array indices to both MPI tasks and SIMD processing elements. | ||||
|  | ||||
| * Identically shaped arrays then be processed with perfect data parallelisation. | ||||
| * Such identically shaped arrays are called conformable arrays. | ||||
|  | ||||
| The transformation is based on the observation that Cartesian array processing involves | ||||
| identical processing to be performed on different regions of the Cartesian array. | ||||
|  | ||||
| The library will both geometrically decompose into MPI tasks and across SIMD lanes. | ||||
| Local vector loops are parallelised with OpenMP pragmas. | ||||
|  | ||||
| Data parallel array operations can then be specified with a SINGLE data parallel paradigm, but | ||||
| optimally use MPI, OpenMP and SIMD parallelism under the hood. This is a significant simplification | ||||
| for most programmers. | ||||
|  | ||||
| The layout transformations are parametrised by the SIMD vector length. This adapts according to the architecture. | ||||
| Presently SSE4, ARM NEON (128 bits) AVX, AVX2, QPX (256 bits), IMCI and AVX512 (512 bits) targets are supported. | ||||
|  | ||||
| These are presented as `vRealF`, `vRealD`, `vComplexF`, and `vComplexD` internal vector data types.  | ||||
| The corresponding scalar types are named `RealF`, `RealD`, `ComplexF` and `ComplexD`. | ||||
|  | ||||
| MPI, OpenMP, and SIMD parallelism are present in the library. | ||||
| Please see [this paper](https://arxiv.org/abs/1512.03487) for more detail. | ||||
|  | ||||
|  | ||||
| ### Compilers | ||||
|  | ||||
| Intel ICPC v16.0.3 and later | ||||
| @@ -56,35 +73,25 @@ When you file an issue, please go though the following checklist: | ||||
| 6. Attach the output of `make V=1`. | ||||
| 7. Describe the issue and any previous attempt to solve it. If relevant, show how to reproduce the issue using a minimal working example. | ||||
|  | ||||
| ### Required libraries | ||||
| Grid requires: | ||||
|  | ||||
| [GMP](https://gmplib.org/),  | ||||
|  | ||||
| ### Description | ||||
| This library provides data parallel C++ container classes with internal memory layout | ||||
| that is transformed to map efficiently to SIMD architectures. CSHIFT facilities | ||||
| are provided, similar to HPF and cmfortran, and user control is given over the mapping of | ||||
| array indices to both MPI tasks and SIMD processing elements. | ||||
| [MPFR](http://www.mpfr.org/)  | ||||
|  | ||||
| * Identically shaped arrays then be processed with perfect data parallelisation. | ||||
| * Such identically shaped arrays are called conformable arrays. | ||||
| Bootstrapping grid downloads and uses for internal dense matrix (non-QCD operations) the Eigen library. | ||||
|  | ||||
| The transformation is based on the observation that Cartesian array processing involves | ||||
| identical processing to be performed on different regions of the Cartesian array. | ||||
| Grid optionally uses: | ||||
|  | ||||
| The library will both geometrically decompose into MPI tasks and across SIMD lanes. | ||||
| Local vector loops are parallelised with OpenMP pragmas. | ||||
| [HDF5](https://support.hdfgroup.org/HDF5/)   | ||||
|  | ||||
| Data parallel array operations can then be specified with a SINGLE data parallel paradigm, but | ||||
| optimally use MPI, OpenMP and SIMD parallelism under the hood. This is a significant simplification | ||||
| for most programmers. | ||||
| [LIME](http://usqcd-software.github.io/c-lime/) for ILDG and SciDAC file format support.  | ||||
|  | ||||
| The layout transformations are parametrised by the SIMD vector length. This adapts according to the architecture. | ||||
| Presently SSE4 (128 bit) AVX, AVX2, QPX (256 bit), IMCI, and AVX512 (512 bit) targets are supported (ARM NEON on the way). | ||||
| [FFTW](http://www.fftw.org) either generic version or via the Intel MKL library. | ||||
|  | ||||
| These are presented as `vRealF`, `vRealD`, `vComplexF`, and `vComplexD` internal vector data types. These may be useful in themselves for other programmers. | ||||
| The corresponding scalar types are named `RealF`, `RealD`, `ComplexF` and `ComplexD`. | ||||
| LAPACK either generic version or Intel MKL library. | ||||
|  | ||||
| MPI, OpenMP, and SIMD parallelism are present in the library. | ||||
| Please see https://arxiv.org/abs/1512.03487 for more detail. | ||||
|  | ||||
| ### Quick start | ||||
| First, start by cloning the repository: | ||||
| @@ -155,7 +162,6 @@ The following options can be use with the `--enable-comms=` option to target dif | ||||
| | `none`         | no communications                                             | | ||||
| | `mpi[-auto]`   | MPI communications                                            | | ||||
| | `mpi3[-auto]`  | MPI communications using MPI 3 shared memory                  | | ||||
| | `mpi3l[-auto]` | MPI communications using MPI 3 shared memory and leader model | | ||||
| | `shmem `       | Cray SHMEM communications                                     | | ||||
|  | ||||
| For the MPI interfaces the optional `-auto` suffix instructs the `configure` scripts to determine all the necessary compilation and linking flags. This is done by extracting the informations from the MPI wrapper specified in the environment variable `MPICXX` (if not specified `configure` will scan though a list of default names). The `-auto` suffix is not supported by the Cray environment wrapper scripts. Use the standard versions instead.   | ||||
| @@ -173,7 +179,8 @@ The following options can be use with the `--enable-simd=` option to target diff | ||||
| | `AVXFMA4`   | AVX (256 bit) + FMA4                   | | ||||
| | `AVX2`      | AVX 2 (256 bit)                        | | ||||
| | `AVX512`    | AVX 512 bit                            | | ||||
| | `QPX`       | QPX (256 bit)                          | | ||||
| | `NEONv8`    | [ARM NEON](http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.den0024a/ch07s03.html) (128 bit)                     | | ||||
| | `QPX`       | IBM QPX (256 bit)                      | | ||||
|  | ||||
| Alternatively, some CPU codenames can be directly used: | ||||
|  | ||||
| @@ -196,20 +203,204 @@ The following configuration is recommended for the Intel Knights Landing platfor | ||||
| ../configure --enable-precision=double\ | ||||
|              --enable-simd=KNL        \ | ||||
|              --enable-comms=mpi-auto  \ | ||||
|              --with-gmp=<path>        \ | ||||
|              --with-mpfr=<path>       \ | ||||
|              --enable-mkl             \ | ||||
|              CXX=icpc MPICXX=mpiicpc | ||||
| ``` | ||||
| The MKL flag enables use of BLAS and FFTW from the Intel Math Kernels Library. | ||||
|  | ||||
| where `<path>` is the UNIX prefix where GMP and MPFR are installed. If you are working on a Cray machine that does not use the `mpiicpc` wrapper, please use: | ||||
| If you are working on a Cray machine that does not use the `mpiicpc` wrapper, please use: | ||||
|  | ||||
| ``` bash | ||||
| ../configure --enable-precision=double\ | ||||
|              --enable-simd=KNL        \ | ||||
|              --enable-comms=mpi       \ | ||||
|              --with-gmp=<path>        \ | ||||
|              --with-mpfr=<path>       \ | ||||
|              --enable-mkl             \ | ||||
|              CXX=CC CC=cc | ||||
| ``` | ||||
|  | ||||
| If gmp and mpfr are NOT in standard places (/usr/) these flags may be needed: | ||||
| ``` bash | ||||
|                --with-gmp=<path>        \ | ||||
|                --with-mpfr=<path>       \ | ||||
| ``` | ||||
| where `<path>` is the UNIX prefix where GMP and MPFR are installed.  | ||||
|  | ||||
| Knight's Landing with Intel Omnipath adapters with two adapters per node  | ||||
| presently performs better with use of more than one rank per node, using shared memory  | ||||
| for interior communication. This is the mpi3 communications implementation.  | ||||
| We recommend four ranks per node for best performance, but optimum is local volume dependent. | ||||
|  | ||||
| ``` bash | ||||
| ../configure --enable-precision=double\ | ||||
|              --enable-simd=KNL        \ | ||||
|              --enable-comms=mpi3-auto \ | ||||
|              --enable-mkl             \ | ||||
|              CC=icpc MPICXX=mpiicpc  | ||||
| ``` | ||||
|  | ||||
| ### Build setup for Intel Haswell Xeon platform | ||||
|  | ||||
| The following configuration is recommended for the Intel Haswell platform: | ||||
|  | ||||
| ``` bash | ||||
| ../configure --enable-precision=double\ | ||||
|              --enable-simd=AVX2       \ | ||||
|              --enable-comms=mpi3-auto \ | ||||
|              --enable-mkl             \ | ||||
|              CXX=icpc MPICXX=mpiicpc | ||||
| ``` | ||||
| The MKL flag enables use of BLAS and FFTW from the Intel Math Kernels Library. | ||||
|  | ||||
| If gmp and mpfr are NOT in standard places (/usr/) these flags may be needed: | ||||
| ``` bash | ||||
|                --with-gmp=<path>        \ | ||||
|                --with-mpfr=<path>       \ | ||||
| ``` | ||||
| where `<path>` is the UNIX prefix where GMP and MPFR are installed.  | ||||
|  | ||||
| If you are working on a Cray machine that does not use the `mpiicpc` wrapper, please use: | ||||
|  | ||||
| ``` bash | ||||
| ../configure --enable-precision=double\ | ||||
|              --enable-simd=AVX2       \ | ||||
|              --enable-comms=mpi3      \ | ||||
|              --enable-mkl             \ | ||||
|              CXX=CC CC=cc | ||||
| ``` | ||||
| Since Dual socket nodes are commonplace, we recommend MPI-3 as the default with the use of  | ||||
| one rank per socket. If using the Intel MPI library, threads should be pinned to NUMA domains using | ||||
| ``` | ||||
|         export I_MPI_PIN=1 | ||||
| ``` | ||||
| This is the default. | ||||
|  | ||||
| ### Build setup for Intel Skylake Xeon platform | ||||
|  | ||||
| The following configuration is recommended for the Intel Skylake platform: | ||||
|  | ||||
| ``` bash | ||||
| ../configure --enable-precision=double\ | ||||
|              --enable-simd=AVX512     \ | ||||
|              --enable-comms=mpi3      \ | ||||
|              --enable-mkl             \ | ||||
|              CXX=mpiicpc | ||||
| ``` | ||||
| The MKL flag enables use of BLAS and FFTW from the Intel Math Kernels Library. | ||||
|  | ||||
| If gmp and mpfr are NOT in standard places (/usr/) these flags may be needed: | ||||
| ``` bash | ||||
|                --with-gmp=<path>        \ | ||||
|                --with-mpfr=<path>       \ | ||||
| ``` | ||||
| where `<path>` is the UNIX prefix where GMP and MPFR are installed.  | ||||
|  | ||||
| If you are working on a Cray machine that does not use the `mpiicpc` wrapper, please use: | ||||
|  | ||||
| ``` bash | ||||
| ../configure --enable-precision=double\ | ||||
|              --enable-simd=AVX512     \ | ||||
|              --enable-comms=mpi3      \ | ||||
|              --enable-mkl             \ | ||||
|              CXX=CC CC=cc | ||||
| ``` | ||||
| Since Dual socket nodes are commonplace, we recommend MPI-3 as the default with the use of  | ||||
| one rank per socket. If using the Intel MPI library, threads should be pinned to NUMA domains using | ||||
| ```  | ||||
|         export I_MPI_PIN=1 | ||||
| ``` | ||||
| This is the default.  | ||||
|  | ||||
| #### Expected Skylake Gold 6148 dual socket (single prec, single node 20+20 cores) performance using NUMA MPI mapping):  | ||||
|  | ||||
| mpirun -n 2 benchmarks/Benchmark_dwf --grid 16.16.16.16 --mpi 2.1.1.1 --cacheblocking 2.2.2.2 --dslash-asm --shm 1024 --threads 18  | ||||
|  | ||||
| TBA | ||||
|  | ||||
|  | ||||
| ### Build setup for AMD EPYC / RYZEN | ||||
|  | ||||
| The AMD EPYC is a multichip module comprising 32 cores spread over four distinct chips each with 8 cores. | ||||
| So, even with a single socket node there is a quad-chip module. Dual socket nodes with 64 cores total | ||||
| are common. Each chip within the module exposes a separate NUMA domain. | ||||
| There are four NUMA domains per socket and we recommend one MPI rank per NUMA domain. | ||||
| MPI-3 is recommended with the use of four ranks per socket, | ||||
| and 8 threads per rank.  | ||||
|  | ||||
| The following configuration is recommended for the AMD EPYC platform. | ||||
|  | ||||
| ``` bash | ||||
| ../configure --enable-precision=double\ | ||||
|              --enable-simd=AVX2       \ | ||||
|              --enable-comms=mpi3 \ | ||||
|              CXX=mpicxx  | ||||
| ``` | ||||
|  | ||||
| If gmp and mpfr are NOT in standard places (/usr/) these flags may be needed: | ||||
| ``` bash | ||||
|                --with-gmp=<path>        \ | ||||
|                --with-mpfr=<path>       \ | ||||
| ``` | ||||
| where `<path>` is the UNIX prefix where GMP and MPFR are installed.  | ||||
|  | ||||
| Using MPICH and g++ v4.9.2, best performance can be obtained using explicit GOMP_CPU_AFFINITY flags for each MPI rank. | ||||
| This can be done by invoking MPI on a wrapper script omp_bind.sh to handle this.  | ||||
|  | ||||
| It is recommended to run 8 MPI ranks on a single dual socket AMD EPYC, with 8 threads per rank using MPI3 and | ||||
| shared memory to communicate within this node: | ||||
|  | ||||
| mpirun -np 8 ./omp_bind.sh ./Benchmark_dwf --mpi 2.2.2.1 --dslash-unroll --threads 8 --grid 16.16.16.16 --cacheblocking 4.4.4.4  | ||||
|  | ||||
| Where omp_bind.sh does the following: | ||||
| ``` | ||||
| #!/bin/bash | ||||
|  | ||||
| numanode=` expr $PMI_RANK % 8 ` | ||||
| basecore=`expr $numanode \* 16` | ||||
| core0=`expr $basecore + 0 ` | ||||
| core1=`expr $basecore + 2 ` | ||||
| core2=`expr $basecore + 4 ` | ||||
| core3=`expr $basecore + 6 ` | ||||
| core4=`expr $basecore + 8 ` | ||||
| core5=`expr $basecore + 10 ` | ||||
| core6=`expr $basecore + 12 ` | ||||
| core7=`expr $basecore + 14 ` | ||||
|  | ||||
| export GOMP_CPU_AFFINITY="$core0 $core1 $core2 $core3 $core4 $core5 $core6 $core7" | ||||
| echo GOMP_CUP_AFFINITY $GOMP_CPU_AFFINITY | ||||
|  | ||||
| $@ | ||||
| ``` | ||||
|  | ||||
| Performance: | ||||
|  | ||||
| #### Expected AMD EPYC 7601 dual socket (single prec, single node 32+32 cores) performance using NUMA MPI mapping):  | ||||
|  | ||||
| mpirun  -np 8 ./omp_bind.sh ./Benchmark_dwf --threads 8 --mpi 2.2.2.1 --dslash-unroll --grid 16.16.16.16 --cacheblocking 4.4.4.4 | ||||
|  | ||||
| TBA | ||||
|  | ||||
| ### Build setup for BlueGene/Q | ||||
|  | ||||
| To be written... | ||||
|  | ||||
| ### Build setup for ARM Neon | ||||
|  | ||||
| To be written... | ||||
|  | ||||
| ### Build setup for laptops, other compilers, non-cluster builds | ||||
|  | ||||
| Many versions of g++ and clang++ work with Grid, and involve merely replacing CXX (and MPICXX), | ||||
| and omit the enable-mkl flag.  | ||||
|  | ||||
| Single node builds are enabled with  | ||||
| ``` | ||||
|             --enable-comms=none | ||||
| ``` | ||||
|  | ||||
| FFTW support that is not in the default search path may then enabled with | ||||
| ``` | ||||
|     --with-fftw=<installpath> | ||||
| ``` | ||||
|  | ||||
| BLAS will not be compiled in by default, and Lanczos will default to Eigen diagonalisation. | ||||
|  | ||||
|   | ||||
							
								
								
									
										33
									
								
								TODO
									
									
									
									
									
								
							
							
						
						
									
										33
									
								
								TODO
									
									
									
									
									
								
							| @@ -1,23 +1,32 @@ | ||||
| TODO: | ||||
| --------------- | ||||
|  | ||||
| Peter's work list: | ||||
| 2)- Precision conversion and sort out localConvert      <--  | ||||
| 3)- Remove DenseVector, DenseMatrix; Use Eigen instead. <-- started  | ||||
| 4)- Binary I/O speed up & x-strips | ||||
| -- Profile CG, BlockCG, etc... Flop count/rate -- PARTIAL, time but no flop/s yet | ||||
| -- Physical propagator interface | ||||
| -- Conserved currents | ||||
| -- GaugeFix into central location | ||||
| -- Multigrid Wilson and DWF, compare to other Multigrid implementations | ||||
| -- HDCR resume | ||||
| Large item work list: | ||||
|  | ||||
| 1)- BG/Q port and check ; Andrew says ok. | ||||
| 2)- Christoph's local basis expansion Lanczos | ||||
| -- | ||||
| 3a)- RNG I/O in ILDG/SciDAC (minor) | ||||
| 3b)- Precision conversion and sort out localConvert      <-- partial/easy | ||||
| 3c)- Consistent linear solver flop count/rate -- PARTIAL, time but no flop/s yet | ||||
| 4)- Physical propagator interface | ||||
| 5)- Conserved currents | ||||
| 6)- Multigrid Wilson and DWF, compare to other Multigrid implementations | ||||
| 7)- HDCR resume | ||||
|  | ||||
| Recent DONE  | ||||
| -- MultiRHS with spread out extra dim -- Go through filesystem with SciDAC I/O ; <-- DONE ; bmark cori | ||||
| -- Lanczos Remove DenseVector, DenseMatrix; Use Eigen instead. <-- DONE | ||||
| -- GaugeFix into central location                      <-- DONE | ||||
| -- Scidac and Ildg metadata handling                   <-- DONE | ||||
| -- Binary I/O MPI2 IO                                  <-- DONE | ||||
| -- Binary I/O speed up & x-strips                      <-- DONE | ||||
| -- Cut down the exterior overhead                      <-- DONE | ||||
| -- Interior legs from SHM comms                        <-- DONE | ||||
| -- Half-precision comms                                <-- DONE | ||||
| -- Merge high precision reduction into develop         | ||||
| -- multiRHS DWF; benchmark on Cori/BNL for comms elimination | ||||
| -- Merge high precision reduction into develop         <-- DONE | ||||
| -- BlockCG, BCGrQ                                      <-- DONE | ||||
| -- multiRHS DWF; benchmark on Cori/BNL for comms elimination <-- DONE | ||||
|    -- slice* linalg routines for multiRHS, BlockCG     | ||||
|  | ||||
| ----- | ||||
|   | ||||
							
								
								
									
										800
									
								
								benchmarks/Benchmark_ITT.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										800
									
								
								benchmarks/Benchmark_ITT.cc
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,800 @@ | ||||
|     /************************************************************************************* | ||||
|  | ||||
|     Grid physics library, www.github.com/paboyle/Grid  | ||||
|  | ||||
|     Source file: ./benchmarks/Benchmark_memory_bandwidth.cc | ||||
|  | ||||
|     Copyright (C) 2015 | ||||
|  | ||||
| Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||
| Author: paboyle <paboyle@ph.ed.ac.uk> | ||||
|  | ||||
|     This program is free software; you can redistribute it and/or modify | ||||
|     it under the terms of the GNU General Public License as published by | ||||
|     the Free Software Foundation; either version 2 of the License, or | ||||
|     (at your option) any later version. | ||||
|  | ||||
|     This program is distributed in the hope that it will be useful, | ||||
|     but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
|     GNU General Public License for more details. | ||||
|  | ||||
|     You should have received a copy of the GNU General Public License along | ||||
|     with this program; if not, write to the Free Software Foundation, Inc., | ||||
|     51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||
|  | ||||
|     See the full license in the file "LICENSE" in the top level distribution directory | ||||
|     *************************************************************************************/ | ||||
|     /*  END LEGAL */ | ||||
| #include <Grid/Grid.h> | ||||
|  | ||||
| using namespace std; | ||||
| using namespace Grid; | ||||
| using namespace Grid::QCD; | ||||
|  | ||||
| typedef WilsonFermion5D<DomainWallVec5dImplR> WilsonFermion5DR; | ||||
| typedef WilsonFermion5D<DomainWallVec5dImplF> WilsonFermion5DF; | ||||
| typedef WilsonFermion5D<DomainWallVec5dImplD> WilsonFermion5DD; | ||||
|  | ||||
|  | ||||
| std::vector<int> L_list; | ||||
| std::vector<int> Ls_list; | ||||
| std::vector<double> mflop_list; | ||||
|  | ||||
| double mflop_ref; | ||||
| double mflop_ref_err; | ||||
|  | ||||
| int NN_global; | ||||
|  | ||||
| struct time_statistics{ | ||||
|   double mean; | ||||
|   double err; | ||||
|   double min; | ||||
|   double max; | ||||
|  | ||||
|   void statistics(std::vector<double> v){ | ||||
|       double sum = std::accumulate(v.begin(), v.end(), 0.0); | ||||
|       mean = sum / v.size(); | ||||
|  | ||||
|       std::vector<double> diff(v.size()); | ||||
|       std::transform(v.begin(), v.end(), diff.begin(), [=](double x) { return x - mean; }); | ||||
|       double sq_sum = std::inner_product(diff.begin(), diff.end(), diff.begin(), 0.0); | ||||
|       err = std::sqrt(sq_sum / (v.size()*(v.size() - 1))); | ||||
|  | ||||
|       auto result = std::minmax_element(v.begin(), v.end()); | ||||
|       min = *result.first; | ||||
|       max = *result.second; | ||||
| } | ||||
| }; | ||||
|  | ||||
| void comms_header(){ | ||||
|   std::cout <<GridLogMessage << " L  "<<"\t"<<" Ls  "<<"\t" | ||||
|             <<std::setw(11)<<"bytes"<<"MB/s uni (err/min/max)"<<"\t\t"<<"MB/s bidi (err/min/max)"<<std::endl; | ||||
| }; | ||||
|  | ||||
| Gamma::Algebra Gmu [] = { | ||||
|   Gamma::Algebra::GammaX, | ||||
|   Gamma::Algebra::GammaY, | ||||
|   Gamma::Algebra::GammaZ, | ||||
|   Gamma::Algebra::GammaT | ||||
| }; | ||||
| struct controls { | ||||
|   int Opt; | ||||
|   int CommsOverlap; | ||||
|   Grid::CartesianCommunicator::CommunicatorPolicy_t CommsAsynch; | ||||
|   //  int HugePages; | ||||
| }; | ||||
|  | ||||
| class Benchmark { | ||||
| public: | ||||
|   static void Decomposition (void ) { | ||||
|  | ||||
|     int threads = GridThread::GetThreads(); | ||||
|     std::cout<<GridLogMessage << "=================================================================================="<<std::endl; | ||||
|     std::cout<<GridLogMessage << "= Grid is setup to use "<<threads<<" threads"<<std::endl; | ||||
|     std::cout<<GridLogMessage << "=================================================================================="<<std::endl; | ||||
|     std::cout<<GridLogMessage<<"Grid Default Decomposition patterns\n"; | ||||
|     std::cout<<GridLogMessage<<"\tOpenMP threads : "<<GridThread::GetThreads()<<std::endl; | ||||
|     std::cout<<GridLogMessage<<"\tMPI tasks      : "<<GridCmdVectorIntToString(GridDefaultMpi())<<std::endl; | ||||
|     std::cout<<GridLogMessage<<"\tvReal          : "<<sizeof(vReal )*8    <<"bits ; " <<GridCmdVectorIntToString(GridDefaultSimd(4,vReal::Nsimd()))<<std::endl; | ||||
|     std::cout<<GridLogMessage<<"\tvRealF         : "<<sizeof(vRealF)*8    <<"bits ; " <<GridCmdVectorIntToString(GridDefaultSimd(4,vRealF::Nsimd()))<<std::endl; | ||||
|     std::cout<<GridLogMessage<<"\tvRealD         : "<<sizeof(vRealD)*8    <<"bits ; " <<GridCmdVectorIntToString(GridDefaultSimd(4,vRealD::Nsimd()))<<std::endl; | ||||
|     std::cout<<GridLogMessage<<"\tvComplex       : "<<sizeof(vComplex )*8 <<"bits ; " <<GridCmdVectorIntToString(GridDefaultSimd(4,vComplex::Nsimd()))<<std::endl; | ||||
|     std::cout<<GridLogMessage<<"\tvComplexF      : "<<sizeof(vComplexF)*8 <<"bits ; " <<GridCmdVectorIntToString(GridDefaultSimd(4,vComplexF::Nsimd()))<<std::endl; | ||||
|     std::cout<<GridLogMessage<<"\tvComplexD      : "<<sizeof(vComplexD)*8 <<"bits ; " <<GridCmdVectorIntToString(GridDefaultSimd(4,vComplexD::Nsimd()))<<std::endl; | ||||
|     std::cout<<GridLogMessage << "=================================================================================="<<std::endl; | ||||
|  | ||||
|   } | ||||
|  | ||||
|   static void Comms(void) | ||||
|   { | ||||
|     int Nloop=200; | ||||
|     int nmu=0; | ||||
|     int maxlat=32; | ||||
|  | ||||
|     std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplexD::Nsimd()); | ||||
|     std::vector<int> mpi_layout  = GridDefaultMpi(); | ||||
|  | ||||
|     for(int mu=0;mu<Nd;mu++) if (mpi_layout[mu]>1) nmu++; | ||||
|  | ||||
|     std::vector<double> t_time(Nloop); | ||||
|     time_statistics timestat; | ||||
|  | ||||
|     std::cout<<GridLogMessage << "===================================================================================================="<<std::endl; | ||||
|     std::cout<<GridLogMessage << "= Benchmarking threaded STENCIL halo exchange in "<<nmu<<" dimensions"<<std::endl; | ||||
|     std::cout<<GridLogMessage << "===================================================================================================="<<std::endl; | ||||
|     comms_header(); | ||||
|  | ||||
|     for(int lat=4;lat<=maxlat;lat+=4){ | ||||
|       for(int Ls=8;Ls<=8;Ls*=2){ | ||||
|  | ||||
| 	std::vector<int> latt_size  ({lat*mpi_layout[0], | ||||
| 	      lat*mpi_layout[1], | ||||
| 	      lat*mpi_layout[2], | ||||
| 	      lat*mpi_layout[3]}); | ||||
|  | ||||
| 	GridCartesian     Grid(latt_size,simd_layout,mpi_layout); | ||||
| 	RealD Nrank = Grid._Nprocessors; | ||||
| 	RealD Nnode = Grid.NodeCount(); | ||||
| 	RealD ppn = Nrank/Nnode; | ||||
|  | ||||
| 	std::vector<HalfSpinColourVectorD *> xbuf(8); | ||||
| 	std::vector<HalfSpinColourVectorD *> rbuf(8); | ||||
| 	Grid.ShmBufferFreeAll(); | ||||
| 	for(int d=0;d<8;d++){ | ||||
| 	  xbuf[d] = (HalfSpinColourVectorD *)Grid.ShmBufferMalloc(lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD)); | ||||
| 	  rbuf[d] = (HalfSpinColourVectorD *)Grid.ShmBufferMalloc(lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD)); | ||||
| 	  bzero((void *)xbuf[d],lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD)); | ||||
| 	  bzero((void *)rbuf[d],lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD)); | ||||
| 	} | ||||
|  | ||||
| 	int bytes=lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD); | ||||
| 	int ncomm; | ||||
| 	double dbytes; | ||||
| 	std::vector<double> times(Nloop); | ||||
| 	for(int i=0;i<Nloop;i++){ | ||||
|  | ||||
| 	  double start=usecond(); | ||||
|  | ||||
| 	  dbytes=0; | ||||
| 	  ncomm=0; | ||||
|  | ||||
| 	  parallel_for(int dir=0;dir<8;dir++){ | ||||
|  | ||||
| 	    double tbytes; | ||||
| 	    int mu =dir % 4; | ||||
|  | ||||
| 	    if (mpi_layout[mu]>1 ) { | ||||
| 	         | ||||
| 	      int xmit_to_rank; | ||||
| 	      int recv_from_rank; | ||||
| 	      if ( dir == mu ) {  | ||||
| 		int comm_proc=1; | ||||
| 		Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank); | ||||
| 	      } else {  | ||||
| 		int comm_proc = mpi_layout[mu]-1; | ||||
| 		Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank); | ||||
| 	      } | ||||
| 	      tbytes= Grid.StencilSendToRecvFrom((void *)&xbuf[dir][0], xmit_to_rank, | ||||
| 						 (void *)&rbuf[dir][0], recv_from_rank, | ||||
| 						 bytes,dir); | ||||
| 	   | ||||
| #ifdef GRID_OMP | ||||
| #pragma omp atomic | ||||
| #endif | ||||
| 	      ncomm++; | ||||
|  | ||||
| #ifdef GRID_OMP | ||||
| #pragma omp atomic | ||||
| #endif | ||||
| 	      dbytes+=tbytes; | ||||
| 	    } | ||||
| 	  } | ||||
| 	  Grid.Barrier(); | ||||
| 	  double stop=usecond(); | ||||
| 	  t_time[i] = stop-start; // microseconds | ||||
| 	} | ||||
|  | ||||
| 	timestat.statistics(t_time); | ||||
| 	//	for(int i=0;i<t_time.size();i++){ | ||||
| 	//	  std::cout << i<<" "<<t_time[i]<<std::endl; | ||||
| 	//	} | ||||
|  | ||||
| 	dbytes=dbytes*ppn; | ||||
| 	double xbytes    = dbytes*0.5; | ||||
| 	double rbytes    = dbytes*0.5; | ||||
| 	double bidibytes = dbytes; | ||||
|  | ||||
| 	std::cout<<GridLogMessage << std::setw(4) << lat<<"\t"<<Ls<<"\t" | ||||
| 		 <<std::setw(11) << bytes<< std::fixed << std::setprecision(1) << std::setw(7) | ||||
| 		 <<std::right<< xbytes/timestat.mean<<"  "<< xbytes*timestat.err/(timestat.mean*timestat.mean)<< " " | ||||
| 		 <<xbytes/timestat.max <<" "<< xbytes/timestat.min   | ||||
| 		 << "\t\t"<<std::setw(7)<< bidibytes/timestat.mean<< "  " << bidibytes*timestat.err/(timestat.mean*timestat.mean) << " " | ||||
| 		 << bidibytes/timestat.max << " " << bidibytes/timestat.min << std::endl; | ||||
|  | ||||
|   | ||||
| 	 | ||||
| 	    } | ||||
|     }     | ||||
|  | ||||
|     return; | ||||
|   } | ||||
|  | ||||
|   static void Memory(void) | ||||
|   { | ||||
|     const int Nvec=8; | ||||
|     typedef Lattice< iVector< vReal,Nvec> > LatticeVec; | ||||
|     typedef iVector<vReal,Nvec> Vec; | ||||
|  | ||||
|     std::vector<int> simd_layout = GridDefaultSimd(Nd,vReal::Nsimd()); | ||||
|     std::vector<int> mpi_layout  = GridDefaultMpi(); | ||||
|  | ||||
|     std::cout<<GridLogMessage << "=================================================================================="<<std::endl; | ||||
|     std::cout<<GridLogMessage << "= Benchmarking a*x + y bandwidth"<<std::endl; | ||||
|     std::cout<<GridLogMessage << "=================================================================================="<<std::endl; | ||||
|     std::cout<<GridLogMessage << "  L  "<<"\t\t"<<"bytes"<<"\t\t\t"<<"GB/s"<<"\t\t"<<"Gflop/s"<<"\t\t seconds"<< "\t\tGB/s / node"<<std::endl; | ||||
|     std::cout<<GridLogMessage << "----------------------------------------------------------"<<std::endl; | ||||
|    | ||||
|     uint64_t NP; | ||||
|     uint64_t NN; | ||||
|  | ||||
|  | ||||
|   uint64_t lmax=48; | ||||
| #define NLOOP (100*lmax*lmax*lmax*lmax/lat/lat/lat/lat) | ||||
|  | ||||
|     GridSerialRNG          sRNG;      sRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9})); | ||||
|     for(int lat=8;lat<=lmax;lat+=4){ | ||||
|  | ||||
|       std::vector<int> latt_size  ({lat*mpi_layout[0],lat*mpi_layout[1],lat*mpi_layout[2],lat*mpi_layout[3]}); | ||||
|       int64_t vol= latt_size[0]*latt_size[1]*latt_size[2]*latt_size[3]; | ||||
|       GridCartesian     Grid(latt_size,simd_layout,mpi_layout); | ||||
|  | ||||
|       NP= Grid.RankCount(); | ||||
|       NN =Grid.NodeCount(); | ||||
|  | ||||
|       Vec rn ; random(sRNG,rn); | ||||
|  | ||||
|       LatticeVec z(&Grid); z=rn; | ||||
|       LatticeVec x(&Grid); x=rn; | ||||
|       LatticeVec y(&Grid); y=rn; | ||||
|       double a=2.0; | ||||
|  | ||||
|       uint64_t Nloop=NLOOP; | ||||
|  | ||||
|       double start=usecond(); | ||||
|       for(int i=0;i<Nloop;i++){ | ||||
| 	z=a*x-y; | ||||
|         x._odata[0]=z._odata[0]; // force serial dependency to prevent optimise away | ||||
|         y._odata[4]=z._odata[4]; | ||||
|       } | ||||
|       double stop=usecond(); | ||||
|       double time = (stop-start)/Nloop*1000; | ||||
|       | ||||
|       double flops=vol*Nvec*2;// mul,add | ||||
|       double bytes=3.0*vol*Nvec*sizeof(Real); | ||||
|       std::cout<<GridLogMessage<<std::setprecision(3)  | ||||
| 	       << lat<<"\t\t"<<bytes<<"   \t\t"<<bytes/time<<"\t\t"<<flops/time<<"\t\t"<<(stop-start)/1000./1000. | ||||
| 	       << "\t\t"<< bytes/time/NN <<std::endl; | ||||
|  | ||||
|     } | ||||
|   }; | ||||
|  | ||||
|   static double DWF5(int Ls,int L) | ||||
|   { | ||||
|     RealD mass=0.1; | ||||
|     RealD M5  =1.8; | ||||
|  | ||||
|     double mflops; | ||||
|     double mflops_best = 0; | ||||
|     double mflops_worst= 0; | ||||
|     std::vector<double> mflops_all; | ||||
|  | ||||
|     /////////////////////////////////////////////////////// | ||||
|     // Set/Get the layout & grid size | ||||
|     /////////////////////////////////////////////////////// | ||||
|     int threads = GridThread::GetThreads(); | ||||
|     std::vector<int> mpi = GridDefaultMpi(); assert(mpi.size()==4); | ||||
|     std::vector<int> local({L,L,L,L}); | ||||
|  | ||||
|     GridCartesian         * TmpGrid   = SpaceTimeGrid::makeFourDimGrid(std::vector<int>({64,64,64,64}),  | ||||
| 								       GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi()); | ||||
|     uint64_t NP = TmpGrid->RankCount(); | ||||
|     uint64_t NN = TmpGrid->NodeCount(); | ||||
|     NN_global=NN; | ||||
|     uint64_t SHM=NP/NN; | ||||
|  | ||||
|     std::vector<int> internal; | ||||
|     if      ( SHM == 1 )   internal = std::vector<int>({1,1,1,1}); | ||||
|     else if ( SHM == 2 )   internal = std::vector<int>({2,1,1,1}); | ||||
|     else if ( SHM == 4 )   internal = std::vector<int>({2,2,1,1}); | ||||
|     else if ( SHM == 8 )   internal = std::vector<int>({2,2,2,1}); | ||||
|     else assert(0); | ||||
|  | ||||
|     std::vector<int> nodes({mpi[0]/internal[0],mpi[1]/internal[1],mpi[2]/internal[2],mpi[3]/internal[3]}); | ||||
|     std::vector<int> latt4({local[0]*nodes[0],local[1]*nodes[1],local[2]*nodes[2],local[3]*nodes[3]}); | ||||
|  | ||||
|     ///////// Welcome message //////////// | ||||
|     std::cout<<GridLogMessage << "=================================================================================="<<std::endl; | ||||
|     std::cout<<GridLogMessage << "Benchmark DWF Ls vec on "<<L<<"^4 local volume "<<std::endl; | ||||
|     std::cout<<GridLogMessage << "* Global volume  : "<<GridCmdVectorIntToString(latt4)<<std::endl; | ||||
|     std::cout<<GridLogMessage << "* Ls             : "<<Ls<<std::endl; | ||||
|     std::cout<<GridLogMessage << "* MPI ranks      : "<<GridCmdVectorIntToString(mpi)<<std::endl; | ||||
|     std::cout<<GridLogMessage << "* Intranode      : "<<GridCmdVectorIntToString(internal)<<std::endl; | ||||
|     std::cout<<GridLogMessage << "* nodes          : "<<GridCmdVectorIntToString(nodes)<<std::endl; | ||||
|     std::cout<<GridLogMessage << "* Using "<<threads<<" threads"<<std::endl; | ||||
|     std::cout<<GridLogMessage << "=================================================================================="<<std::endl; | ||||
|  | ||||
|     ///////// Lattice Init //////////// | ||||
|     GridCartesian         * UGrid    = SpaceTimeGrid::makeFourDimGrid(latt4, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi()); | ||||
|     GridRedBlackCartesian * UrbGrid  = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); | ||||
|     GridCartesian         * sUGrid   = SpaceTimeGrid::makeFourDimDWFGrid(latt4,GridDefaultMpi()); | ||||
|     GridRedBlackCartesian * sUrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(sUGrid); | ||||
|     GridCartesian         * sFGrid   = SpaceTimeGrid::makeFiveDimDWFGrid(Ls,UGrid); | ||||
|     GridRedBlackCartesian * sFrbGrid = SpaceTimeGrid::makeFiveDimDWFRedBlackGrid(Ls,UGrid); | ||||
|  | ||||
|     ///////// RNG Init //////////// | ||||
|     std::vector<int> seeds4({1,2,3,4}); | ||||
|     std::vector<int> seeds5({5,6,7,8}); | ||||
|     GridParallelRNG          RNG4(UGrid);  RNG4.SeedFixedIntegers(seeds4); | ||||
|     GridParallelRNG          RNG5(sFGrid);  RNG5.SeedFixedIntegers(seeds5); | ||||
|     std::cout << GridLogMessage << "Initialised RNGs" << std::endl; | ||||
|  | ||||
|     ///////// Source preparation //////////// | ||||
|     LatticeFermion src   (sFGrid); random(RNG5,src); | ||||
|     LatticeFermion tmp   (sFGrid); | ||||
|  | ||||
|     RealD N2 = 1.0/::sqrt(norm2(src)); | ||||
|     src = src*N2; | ||||
|      | ||||
|     LatticeGaugeField Umu(UGrid);  SU3::HotConfiguration(RNG4,Umu);  | ||||
|  | ||||
|     WilsonFermion5DR sDw(Umu,*sFGrid,*sFrbGrid,*sUGrid,*sUrbGrid,M5); | ||||
|     LatticeFermion src_e (sFrbGrid); | ||||
|     LatticeFermion src_o (sFrbGrid); | ||||
|     LatticeFermion r_e   (sFrbGrid); | ||||
|     LatticeFermion r_o   (sFrbGrid); | ||||
|     LatticeFermion r_eo  (sFGrid); | ||||
|     LatticeFermion err   (sFGrid); | ||||
|     { | ||||
|  | ||||
|       pickCheckerboard(Even,src_e,src); | ||||
|       pickCheckerboard(Odd,src_o,src); | ||||
|  | ||||
| #if defined(AVX512)  | ||||
|       const int num_cases = 6; | ||||
|       std::string fmt("A/S ; A/O ; U/S ; U/O ; G/S ; G/O "); | ||||
| #else | ||||
|       const int num_cases = 4; | ||||
|       std::string fmt("U/S ; U/O ; G/S ; G/O "); | ||||
| #endif | ||||
|       controls Cases [] = { | ||||
| #ifdef AVX512 | ||||
| 	{ QCD::WilsonKernelsStatic::OptInlineAsm , QCD::WilsonKernelsStatic::CommsThenCompute ,CartesianCommunicator::CommunicatorPolicySequential  }, | ||||
| 	{ QCD::WilsonKernelsStatic::OptInlineAsm , QCD::WilsonKernelsStatic::CommsAndCompute  ,CartesianCommunicator::CommunicatorPolicySequential  }, | ||||
| #endif | ||||
| 	{ QCD::WilsonKernelsStatic::OptHandUnroll, QCD::WilsonKernelsStatic::CommsThenCompute ,CartesianCommunicator::CommunicatorPolicySequential  }, | ||||
| 	{ QCD::WilsonKernelsStatic::OptHandUnroll, QCD::WilsonKernelsStatic::CommsAndCompute  ,CartesianCommunicator::CommunicatorPolicySequential  }, | ||||
| 	{ QCD::WilsonKernelsStatic::OptGeneric   , QCD::WilsonKernelsStatic::CommsThenCompute ,CartesianCommunicator::CommunicatorPolicySequential  }, | ||||
| 	{ QCD::WilsonKernelsStatic::OptGeneric   , QCD::WilsonKernelsStatic::CommsAndCompute  ,CartesianCommunicator::CommunicatorPolicySequential  } | ||||
|       };  | ||||
|  | ||||
|       for(int c=0;c<num_cases;c++) { | ||||
|  | ||||
| 	QCD::WilsonKernelsStatic::Comms = Cases[c].CommsOverlap; | ||||
| 	QCD::WilsonKernelsStatic::Opt   = Cases[c].Opt; | ||||
| 	CartesianCommunicator::SetCommunicatorPolicy(Cases[c].CommsAsynch); | ||||
|  | ||||
| 	std::cout<<GridLogMessage << "=================================================================================="<<std::endl; | ||||
| 	if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptGeneric   ) std::cout << GridLogMessage<< "* Using GENERIC Nc WilsonKernels" <<std::endl; | ||||
| 	if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptHandUnroll) std::cout << GridLogMessage<< "* Using Nc=3       WilsonKernels" <<std::endl; | ||||
| 	if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptInlineAsm ) std::cout << GridLogMessage<< "* Using Asm Nc=3   WilsonKernels" <<std::endl; | ||||
| 	if ( WilsonKernelsStatic::Comms == WilsonKernelsStatic::CommsAndCompute ) std::cout << GridLogMessage<< "* Using Overlapped Comms/Compute" <<std::endl; | ||||
| 	if ( WilsonKernelsStatic::Comms == WilsonKernelsStatic::CommsThenCompute) std::cout << GridLogMessage<< "* Using sequential comms compute" <<std::endl; | ||||
| 	if ( sizeof(Real)==4 )   std::cout << GridLogMessage<< "* SINGLE precision "<<std::endl; | ||||
| 	if ( sizeof(Real)==8 )   std::cout << GridLogMessage<< "* DOUBLE precision "<<std::endl; | ||||
| 	std::cout<<GridLogMessage << "=================================================================================="<<std::endl; | ||||
|  | ||||
| 	int nwarm = 100; | ||||
| 	uint64_t ncall = 1000; | ||||
|  | ||||
| 	double t0=usecond(); | ||||
| 	sFGrid->Barrier(); | ||||
| 	for(int i=0;i<nwarm;i++){ | ||||
| 	  sDw.DhopEO(src_o,r_e,DaggerNo); | ||||
| 	} | ||||
| 	sFGrid->Barrier(); | ||||
| 	double t1=usecond(); | ||||
|  | ||||
| 	sDw.ZeroCounters(); | ||||
| 	time_statistics timestat; | ||||
| 	std::vector<double> t_time(ncall); | ||||
| 	for(uint64_t i=0;i<ncall;i++){ | ||||
| 	  t0=usecond(); | ||||
| 	  sDw.DhopEO(src_o,r_e,DaggerNo); | ||||
| 	  t1=usecond(); | ||||
| 	  t_time[i] = t1-t0; | ||||
| 	} | ||||
| 	sFGrid->Barrier(); | ||||
| 	 | ||||
| 	double volume=Ls;  for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu]; | ||||
| 	double flops=(1344.0*volume)/2; | ||||
| 	double mf_hi, mf_lo, mf_err; | ||||
|  | ||||
| 	timestat.statistics(t_time); | ||||
| 	mf_hi = flops/timestat.min; | ||||
| 	mf_lo = flops/timestat.max; | ||||
| 	mf_err= flops/timestat.min * timestat.err/timestat.mean; | ||||
|  | ||||
| 	mflops = flops/timestat.mean; | ||||
| 	mflops_all.push_back(mflops); | ||||
| 	if ( mflops_best == 0   ) mflops_best = mflops; | ||||
| 	if ( mflops_worst== 0   ) mflops_worst= mflops; | ||||
| 	if ( mflops>mflops_best ) mflops_best = mflops; | ||||
| 	if ( mflops<mflops_worst) mflops_worst= mflops; | ||||
|  | ||||
| 	std::cout<<GridLogMessage << std::fixed << std::setprecision(1)<<"sDeo mflop/s =   "<< mflops << " ("<<mf_err<<") " << mf_lo<<"-"<<mf_hi <<std::endl; | ||||
| 	std::cout<<GridLogMessage << std::fixed << std::setprecision(1)<<"sDeo mflop/s per rank   "<< mflops/NP<<std::endl; | ||||
| 	std::cout<<GridLogMessage << std::fixed << std::setprecision(1)<<"sDeo mflop/s per node   "<< mflops/NN<<std::endl; | ||||
|  | ||||
| 	sDw.Report(); | ||||
|  | ||||
|       } | ||||
|       double robust = mflops_worst/mflops_best;; | ||||
|       std::cout<<GridLogMessage << "=================================================================================="<<std::endl; | ||||
|       std::cout<<GridLogMessage << L<<"^4 x "<<Ls<< " sDeo Best  mflop/s        =   "<< mflops_best << " ; " << mflops_best/NN<<" per node " <<std::endl; | ||||
|       std::cout<<GridLogMessage << L<<"^4 x "<<Ls<< " sDeo Worst mflop/s        =   "<< mflops_worst<< " ; " << mflops_worst/NN<<" per node " <<std::endl; | ||||
|  | ||||
|       std::cout<<GridLogMessage <<std::setprecision(3)<< L<<"^4 x "<<Ls<< " Performance Robustness   =   "<< robust <<std::endl; | ||||
|       std::cout<<GridLogMessage <<fmt << std::endl; | ||||
|       std::cout<<GridLogMessage; | ||||
|  | ||||
|       for(int i=0;i<mflops_all.size();i++){ | ||||
| 	std::cout<<mflops_all[i]/NN<<" ; " ; | ||||
|       } | ||||
|       std::cout<<std::endl; | ||||
|       std::cout<<GridLogMessage << "=================================================================================="<<std::endl; | ||||
|  | ||||
|     } | ||||
|     return mflops_best; | ||||
|   } | ||||
|  | ||||
|   static double DWF(int Ls,int L, double & robust) | ||||
|   { | ||||
|     RealD mass=0.1; | ||||
|     RealD M5  =1.8; | ||||
|  | ||||
|     double mflops; | ||||
|     double mflops_best = 0; | ||||
|     double mflops_worst= 0; | ||||
|     std::vector<double> mflops_all; | ||||
|  | ||||
|     /////////////////////////////////////////////////////// | ||||
|     // Set/Get the layout & grid size | ||||
|     /////////////////////////////////////////////////////// | ||||
|     int threads = GridThread::GetThreads(); | ||||
|     std::vector<int> mpi = GridDefaultMpi(); assert(mpi.size()==4); | ||||
|     std::vector<int> local({L,L,L,L}); | ||||
|  | ||||
|     GridCartesian         * TmpGrid   = SpaceTimeGrid::makeFourDimGrid(std::vector<int>({64,64,64,64}),  | ||||
| 								       GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi()); | ||||
|     uint64_t NP = TmpGrid->RankCount(); | ||||
|     uint64_t NN = TmpGrid->NodeCount(); | ||||
|     NN_global=NN; | ||||
|     uint64_t SHM=NP/NN; | ||||
|  | ||||
|     std::vector<int> internal; | ||||
|     if      ( SHM == 1 )   internal = std::vector<int>({1,1,1,1}); | ||||
|     else if ( SHM == 2 )   internal = std::vector<int>({2,1,1,1}); | ||||
|     else if ( SHM == 4 )   internal = std::vector<int>({2,2,1,1}); | ||||
|     else if ( SHM == 8 )   internal = std::vector<int>({2,2,2,1}); | ||||
|     else assert(0); | ||||
|  | ||||
|     std::vector<int> nodes({mpi[0]/internal[0],mpi[1]/internal[1],mpi[2]/internal[2],mpi[3]/internal[3]}); | ||||
|     std::vector<int> latt4({local[0]*nodes[0],local[1]*nodes[1],local[2]*nodes[2],local[3]*nodes[3]}); | ||||
|  | ||||
|     ///////// Welcome message //////////// | ||||
|     std::cout<<GridLogMessage << "=================================================================================="<<std::endl; | ||||
|     std::cout<<GridLogMessage << "Benchmark DWF on "<<L<<"^4 local volume "<<std::endl; | ||||
|     std::cout<<GridLogMessage << "* Global volume  : "<<GridCmdVectorIntToString(latt4)<<std::endl; | ||||
|     std::cout<<GridLogMessage << "* Ls             : "<<Ls<<std::endl; | ||||
|     std::cout<<GridLogMessage << "* MPI ranks      : "<<GridCmdVectorIntToString(mpi)<<std::endl; | ||||
|     std::cout<<GridLogMessage << "* Intranode      : "<<GridCmdVectorIntToString(internal)<<std::endl; | ||||
|     std::cout<<GridLogMessage << "* nodes          : "<<GridCmdVectorIntToString(nodes)<<std::endl; | ||||
|     std::cout<<GridLogMessage << "* Using "<<threads<<" threads"<<std::endl; | ||||
|     std::cout<<GridLogMessage << "=================================================================================="<<std::endl; | ||||
|  | ||||
|  | ||||
|     ///////// Lattice Init //////////// | ||||
|     GridCartesian         * UGrid   = SpaceTimeGrid::makeFourDimGrid(latt4, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi()); | ||||
|     GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); | ||||
|     GridCartesian         * FGrid   = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); | ||||
|     GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); | ||||
|  | ||||
|      | ||||
|     ///////// RNG Init //////////// | ||||
|     std::vector<int> seeds4({1,2,3,4}); | ||||
|     std::vector<int> seeds5({5,6,7,8}); | ||||
|     GridParallelRNG          RNG4(UGrid);  RNG4.SeedFixedIntegers(seeds4); | ||||
|     GridParallelRNG          RNG5(FGrid);  RNG5.SeedFixedIntegers(seeds5); | ||||
|     std::cout << GridLogMessage << "Initialised RNGs" << std::endl; | ||||
|  | ||||
|     ///////// Source preparation //////////// | ||||
|     LatticeFermion src   (FGrid); random(RNG5,src); | ||||
|     LatticeFermion ref   (FGrid); | ||||
|     LatticeFermion tmp   (FGrid); | ||||
|  | ||||
|     RealD N2 = 1.0/::sqrt(norm2(src)); | ||||
|     src = src*N2; | ||||
|      | ||||
|     LatticeGaugeField Umu(UGrid);  SU3::HotConfiguration(RNG4,Umu);  | ||||
|  | ||||
|     DomainWallFermionR Dw(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5); | ||||
|  | ||||
|     //////////////////////////////////// | ||||
|     // Naive wilson implementation | ||||
|     //////////////////////////////////// | ||||
|     { | ||||
|       LatticeGaugeField Umu5d(FGrid);  | ||||
|       std::vector<LatticeColourMatrix> U(4,FGrid); | ||||
|       for(int ss=0;ss<Umu._grid->oSites();ss++){ | ||||
| 	for(int s=0;s<Ls;s++){ | ||||
| 	  Umu5d._odata[Ls*ss+s] = Umu._odata[ss]; | ||||
| 	} | ||||
|       } | ||||
|       ref = zero; | ||||
|       for(int mu=0;mu<Nd;mu++){ | ||||
| 	U[mu] = PeekIndex<LorentzIndex>(Umu5d,mu); | ||||
|       } | ||||
|       for(int mu=0;mu<Nd;mu++){ | ||||
| 	 | ||||
| 	tmp = U[mu]*Cshift(src,mu+1,1); | ||||
| 	ref=ref + tmp - Gamma(Gmu[mu])*tmp; | ||||
| 	 | ||||
| 	tmp =adj(U[mu])*src; | ||||
| 	tmp =Cshift(tmp,mu+1,-1); | ||||
| 	ref=ref + tmp + Gamma(Gmu[mu])*tmp; | ||||
|       } | ||||
|       ref = -0.5*ref; | ||||
|     } | ||||
|  | ||||
|     LatticeFermion src_e (FrbGrid); | ||||
|     LatticeFermion src_o (FrbGrid); | ||||
|     LatticeFermion r_e   (FrbGrid); | ||||
|     LatticeFermion r_o   (FrbGrid); | ||||
|     LatticeFermion r_eo  (FGrid); | ||||
|     LatticeFermion err   (FGrid); | ||||
|     { | ||||
|  | ||||
|       pickCheckerboard(Even,src_e,src); | ||||
|       pickCheckerboard(Odd,src_o,src); | ||||
|  | ||||
| #if defined(AVX512)  | ||||
|       const int num_cases = 6; | ||||
|       std::string fmt("A/S ; A/O ; U/S ; U/O ; G/S ; G/O "); | ||||
| #else | ||||
|       const int num_cases = 4; | ||||
|       std::string fmt("U/S ; U/O ; G/S ; G/O "); | ||||
| #endif | ||||
|       controls Cases [] = { | ||||
| #ifdef AVX512 | ||||
| 	{ QCD::WilsonKernelsStatic::OptInlineAsm , QCD::WilsonKernelsStatic::CommsThenCompute ,CartesianCommunicator::CommunicatorPolicySequential  }, | ||||
| 	{ QCD::WilsonKernelsStatic::OptInlineAsm , QCD::WilsonKernelsStatic::CommsAndCompute  ,CartesianCommunicator::CommunicatorPolicySequential  }, | ||||
| #endif | ||||
| 	{ QCD::WilsonKernelsStatic::OptHandUnroll, QCD::WilsonKernelsStatic::CommsThenCompute ,CartesianCommunicator::CommunicatorPolicySequential  }, | ||||
| 	{ QCD::WilsonKernelsStatic::OptHandUnroll, QCD::WilsonKernelsStatic::CommsAndCompute  ,CartesianCommunicator::CommunicatorPolicySequential  }, | ||||
| 	{ QCD::WilsonKernelsStatic::OptGeneric   , QCD::WilsonKernelsStatic::CommsThenCompute ,CartesianCommunicator::CommunicatorPolicySequential  }, | ||||
| 	{ QCD::WilsonKernelsStatic::OptGeneric   , QCD::WilsonKernelsStatic::CommsAndCompute  ,CartesianCommunicator::CommunicatorPolicySequential  } | ||||
|       };  | ||||
|  | ||||
|       for(int c=0;c<num_cases;c++) { | ||||
|  | ||||
| 	QCD::WilsonKernelsStatic::Comms = Cases[c].CommsOverlap; | ||||
| 	QCD::WilsonKernelsStatic::Opt   = Cases[c].Opt; | ||||
| 	CartesianCommunicator::SetCommunicatorPolicy(Cases[c].CommsAsynch); | ||||
|  | ||||
| 	std::cout<<GridLogMessage << "=================================================================================="<<std::endl; | ||||
| 	if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptGeneric   ) std::cout << GridLogMessage<< "* Using GENERIC Nc WilsonKernels" <<std::endl; | ||||
| 	if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptHandUnroll) std::cout << GridLogMessage<< "* Using Nc=3       WilsonKernels" <<std::endl; | ||||
| 	if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptInlineAsm ) std::cout << GridLogMessage<< "* Using Asm Nc=3   WilsonKernels" <<std::endl; | ||||
| 	if ( WilsonKernelsStatic::Comms == WilsonKernelsStatic::CommsAndCompute ) std::cout << GridLogMessage<< "* Using Overlapped Comms/Compute" <<std::endl; | ||||
| 	if ( WilsonKernelsStatic::Comms == WilsonKernelsStatic::CommsThenCompute) std::cout << GridLogMessage<< "* Using sequential comms compute" <<std::endl; | ||||
| 	if ( sizeof(Real)==4 )   std::cout << GridLogMessage<< "* SINGLE precision "<<std::endl; | ||||
| 	if ( sizeof(Real)==8 )   std::cout << GridLogMessage<< "* DOUBLE precision "<<std::endl; | ||||
| 	std::cout<<GridLogMessage << "=================================================================================="<<std::endl; | ||||
|  | ||||
| 	int nwarm = 200; | ||||
| 	double t0=usecond(); | ||||
| 	FGrid->Barrier(); | ||||
| 	for(int i=0;i<nwarm;i++){ | ||||
| 	  Dw.DhopEO(src_o,r_e,DaggerNo); | ||||
| 	} | ||||
| 	FGrid->Barrier(); | ||||
| 	double t1=usecond(); | ||||
| 	//	uint64_t ncall = (uint64_t) 2.5*1000.0*1000.0*nwarm/(t1-t0); | ||||
| 	//	if (ncall < 500) ncall = 500; | ||||
| 	uint64_t ncall = 1000; | ||||
|  | ||||
| 	FGrid->Broadcast(0,&ncall,sizeof(ncall)); | ||||
|  | ||||
| 	//	std::cout << GridLogMessage << " Estimate " << ncall << " calls per second"<<std::endl; | ||||
| 	Dw.ZeroCounters(); | ||||
|  | ||||
| 	time_statistics timestat; | ||||
| 	std::vector<double> t_time(ncall); | ||||
| 	for(uint64_t i=0;i<ncall;i++){ | ||||
| 	  t0=usecond(); | ||||
| 	  Dw.DhopEO(src_o,r_e,DaggerNo); | ||||
| 	  t1=usecond(); | ||||
| 	  t_time[i] = t1-t0; | ||||
| 	} | ||||
| 	FGrid->Barrier(); | ||||
| 	 | ||||
| 	double volume=Ls;  for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu]; | ||||
| 	double flops=(1344.0*volume)/2; | ||||
| 	double mf_hi, mf_lo, mf_err; | ||||
|  | ||||
| 	timestat.statistics(t_time); | ||||
| 	mf_hi = flops/timestat.min; | ||||
| 	mf_lo = flops/timestat.max; | ||||
| 	mf_err= flops/timestat.min * timestat.err/timestat.mean; | ||||
|  | ||||
| 	mflops = flops/timestat.mean; | ||||
| 	mflops_all.push_back(mflops); | ||||
| 	if ( mflops_best == 0   ) mflops_best = mflops; | ||||
| 	if ( mflops_worst== 0   ) mflops_worst= mflops; | ||||
| 	if ( mflops>mflops_best ) mflops_best = mflops; | ||||
| 	if ( mflops<mflops_worst) mflops_worst= mflops; | ||||
|  | ||||
| 	std::cout<<GridLogMessage << std::fixed << std::setprecision(1)<<"Deo mflop/s =   "<< mflops << " ("<<mf_err<<") " << mf_lo<<"-"<<mf_hi <<std::endl; | ||||
| 	std::cout<<GridLogMessage << std::fixed << std::setprecision(1)<<"Deo mflop/s per rank   "<< mflops/NP<<std::endl; | ||||
| 	std::cout<<GridLogMessage << std::fixed << std::setprecision(1)<<"Deo mflop/s per node   "<< mflops/NN<<std::endl; | ||||
|  | ||||
| 	Dw.Report(); | ||||
|  | ||||
| 	Dw.DhopEO(src_o,r_e,DaggerNo); | ||||
| 	Dw.DhopOE(src_e,r_o,DaggerNo); | ||||
| 	setCheckerboard(r_eo,r_o); | ||||
| 	setCheckerboard(r_eo,r_e); | ||||
| 	err = r_eo-ref;  | ||||
| 	std::cout<<GridLogMessage << "norm diff   "<< norm2(err)<<std::endl; | ||||
| 	assert((norm2(err)<1.0e-4)); | ||||
|  | ||||
|       } | ||||
|       robust = mflops_worst/mflops_best; | ||||
|       std::cout<<GridLogMessage << "=================================================================================="<<std::endl; | ||||
|       std::cout<<GridLogMessage << L<<"^4 x "<<Ls<< " Deo Best  mflop/s        =   "<< mflops_best << " ; " << mflops_best/NN<<" per node " <<std::endl; | ||||
|       std::cout<<GridLogMessage << L<<"^4 x "<<Ls<< " Deo Worst mflop/s        =   "<< mflops_worst<< " ; " << mflops_worst/NN<<" per node " <<std::endl; | ||||
|       std::cout<<GridLogMessage << std::fixed<<std::setprecision(3)<< L<<"^4 x "<<Ls<< " Performance Robustness   =   "<< robust  <<std::endl; | ||||
|       std::cout<<GridLogMessage <<fmt << std::endl; | ||||
|       std::cout<<GridLogMessage ; | ||||
|  | ||||
|       for(int i=0;i<mflops_all.size();i++){ | ||||
| 	std::cout<<mflops_all[i]/NN<<" ; " ; | ||||
|       } | ||||
|       std::cout<<std::endl; | ||||
|       std::cout<<GridLogMessage << "=================================================================================="<<std::endl; | ||||
|  | ||||
|     } | ||||
|     return mflops_best; | ||||
|   } | ||||
|  | ||||
| }; | ||||
|  | ||||
| int main (int argc, char ** argv) | ||||
| { | ||||
|   Grid_init(&argc,&argv); | ||||
|  | ||||
|   CartesianCommunicator::SetCommunicatorPolicy(CartesianCommunicator::CommunicatorPolicySequential); | ||||
| #ifdef KNL | ||||
|   LebesgueOrder::Block = std::vector<int>({8,2,2,2}); | ||||
| #else | ||||
|   LebesgueOrder::Block = std::vector<int>({2,2,2,2}); | ||||
| #endif | ||||
|   Benchmark::Decomposition(); | ||||
|  | ||||
|   int do_memory=1; | ||||
|   int do_comms =1; | ||||
|   int do_su3   =0; | ||||
|   int do_wilson=1; | ||||
|   int do_dwf   =1; | ||||
|  | ||||
|   if ( do_su3 ) { | ||||
|     // empty for now | ||||
|   } | ||||
| #if 1 | ||||
|   int sel=2; | ||||
|   std::vector<int> L_list({8,12,16,24}); | ||||
| #else | ||||
|   int sel=1; | ||||
|   std::vector<int> L_list({8,12}); | ||||
| #endif | ||||
|   int selm1=sel-1; | ||||
|   std::vector<double> robust_list; | ||||
|  | ||||
|   std::vector<double> wilson; | ||||
|   std::vector<double> dwf4; | ||||
|   std::vector<double> dwf5; | ||||
|  | ||||
|   if ( do_wilson ) { | ||||
|     int Ls=1; | ||||
|     std::cout<<GridLogMessage << "=================================================================================="<<std::endl; | ||||
|     std::cout<<GridLogMessage << " Wilson dslash 4D vectorised" <<std::endl; | ||||
|     std::cout<<GridLogMessage << "=================================================================================="<<std::endl; | ||||
|     for(int l=0;l<L_list.size();l++){ | ||||
|       double robust; | ||||
|       wilson.push_back(Benchmark::DWF(1,L_list[l],robust)); | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   int Ls=16; | ||||
|   if ( do_dwf ) { | ||||
|     std::cout<<GridLogMessage << "=================================================================================="<<std::endl; | ||||
|     std::cout<<GridLogMessage << " Domain wall dslash 4D vectorised" <<std::endl; | ||||
|     std::cout<<GridLogMessage << "=================================================================================="<<std::endl; | ||||
|     for(int l=0;l<L_list.size();l++){ | ||||
|       double robust; | ||||
|       double result = Benchmark::DWF(Ls,L_list[l],robust) ; | ||||
|       dwf4.push_back(result); | ||||
|       robust_list.push_back(robust); | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   if ( do_dwf ) { | ||||
|     std::cout<<GridLogMessage << "=================================================================================="<<std::endl; | ||||
|     std::cout<<GridLogMessage << " Domain wall dslash 4D vectorised" <<std::endl; | ||||
|     std::cout<<GridLogMessage << "=================================================================================="<<std::endl; | ||||
|     for(int l=0;l<L_list.size();l++){ | ||||
|       dwf5.push_back(Benchmark::DWF5(Ls,L_list[l])); | ||||
|     } | ||||
|  | ||||
|   } | ||||
|  | ||||
|   if ( do_dwf ) { | ||||
|  | ||||
|   std::cout<<GridLogMessage << "=================================================================================="<<std::endl; | ||||
|   std::cout<<GridLogMessage << " Summary table Ls="<<Ls <<std::endl; | ||||
|   std::cout<<GridLogMessage << "=================================================================================="<<std::endl; | ||||
|   std::cout<<GridLogMessage << "L \t\t Wilson \t DWF4 \t DWF5 " <<std::endl; | ||||
|   for(int l=0;l<L_list.size();l++){ | ||||
|     std::cout<<GridLogMessage << L_list[l] <<" \t\t "<< wilson[l]<<" \t "<<dwf4[l]<<" \t "<<dwf5[l] <<std::endl; | ||||
|   } | ||||
|   std::cout<<GridLogMessage << "=================================================================================="<<std::endl; | ||||
|   } | ||||
|  | ||||
|   int NN=NN_global; | ||||
|   if ( do_memory ) { | ||||
|     std::cout<<GridLogMessage << "=================================================================================="<<std::endl; | ||||
|     std::cout<<GridLogMessage << " Memory benchmark " <<std::endl; | ||||
|     std::cout<<GridLogMessage << "=================================================================================="<<std::endl; | ||||
|     Benchmark::Memory(); | ||||
|   } | ||||
|  | ||||
|   if ( do_comms && (NN>1) ) { | ||||
|     std::cout<<GridLogMessage << "=================================================================================="<<std::endl; | ||||
|     std::cout<<GridLogMessage << " Communications benchmark " <<std::endl; | ||||
|     std::cout<<GridLogMessage << "=================================================================================="<<std::endl; | ||||
|     Benchmark::Comms(); | ||||
|   } | ||||
|  | ||||
|   if ( do_dwf ) { | ||||
|   std::cout<<GridLogMessage << "=================================================================================="<<std::endl; | ||||
|   std::cout<<GridLogMessage << " Per Node Summary table Ls="<<Ls <<std::endl; | ||||
|   std::cout<<GridLogMessage << "=================================================================================="<<std::endl; | ||||
|   std::cout<<GridLogMessage << " L \t\t Wilson\t\t DWF4  \t\t DWF5 " <<std::endl; | ||||
|   for(int l=0;l<L_list.size();l++){ | ||||
|     std::cout<<GridLogMessage << L_list[l] <<" \t\t "<< wilson[l]/NN<<" \t "<<dwf4[l]/NN<<" \t "<<dwf5[l] /NN<<std::endl; | ||||
|   } | ||||
|   std::cout<<GridLogMessage << "=================================================================================="<<std::endl; | ||||
|  | ||||
|   std::cout<<GridLogMessage << "=================================================================================="<<std::endl; | ||||
|   std::cout<<GridLogMessage << " Comparison point     result: "  << 0.5*(dwf4[sel]+dwf4[selm1])/NN << " Mflop/s per node"<<std::endl; | ||||
|   std::cout<<GridLogMessage << " Comparison point is 0.5*("<<dwf4[sel]/NN<<"+"<<dwf4[selm1]/NN << ") "<<std::endl; | ||||
|   std::cout<<std::setprecision(3); | ||||
|   std::cout<<GridLogMessage << " Comparison point robustness: "  << robust_list[sel] <<std::endl; | ||||
|   std::cout<<GridLogMessage << "=================================================================================="<<std::endl; | ||||
|  | ||||
|   } | ||||
|  | ||||
|  | ||||
|   Grid_finalize(); | ||||
| } | ||||
| @@ -31,6 +31,32 @@ using namespace std; | ||||
| using namespace Grid; | ||||
| using namespace Grid::QCD; | ||||
|  | ||||
| struct time_statistics{ | ||||
|   double mean; | ||||
|   double err; | ||||
|   double min; | ||||
|   double max; | ||||
|  | ||||
|   void statistics(std::vector<double> v){ | ||||
|       double sum = std::accumulate(v.begin(), v.end(), 0.0); | ||||
|       mean = sum / v.size(); | ||||
|  | ||||
|       std::vector<double> diff(v.size()); | ||||
|       std::transform(v.begin(), v.end(), diff.begin(), [=](double x) { return x - mean; }); | ||||
|       double sq_sum = std::inner_product(diff.begin(), diff.end(), diff.begin(), 0.0); | ||||
|       err = std::sqrt(sq_sum / (v.size()*(v.size() - 1))); | ||||
|  | ||||
|       auto result = std::minmax_element(v.begin(), v.end()); | ||||
|       min = *result.first; | ||||
|       max = *result.second; | ||||
| } | ||||
| }; | ||||
|  | ||||
| void header(){ | ||||
|   std::cout <<GridLogMessage << " L  "<<"\t"<<" Ls  "<<"\t" | ||||
|             <<std::setw(11)<<"bytes"<<"MB/s uni (err/min/max)"<<"\t\t"<<"MB/s bidi (err/min/max)"<<std::endl; | ||||
| }; | ||||
|  | ||||
| int main (int argc, char ** argv) | ||||
| { | ||||
|   Grid_init(&argc,&argv); | ||||
| @@ -40,17 +66,21 @@ int main (int argc, char ** argv) | ||||
|   int threads = GridThread::GetThreads(); | ||||
|   std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl; | ||||
|  | ||||
|   int Nloop=10; | ||||
|   int Nloop=100; | ||||
|   int nmu=0; | ||||
|   int maxlat=32; | ||||
|   for(int mu=0;mu<Nd;mu++) if (mpi_layout[mu]>1) nmu++; | ||||
|  | ||||
|   std::cout << GridLogMessage << "Number of iterations to average: "<< Nloop << std::endl; | ||||
|   std::vector<double> t_time(Nloop); | ||||
|   time_statistics timestat; | ||||
|  | ||||
|   std::cout<<GridLogMessage << "===================================================================================================="<<std::endl; | ||||
|   std::cout<<GridLogMessage << "= Benchmarking concurrent halo exchange in "<<nmu<<" dimensions"<<std::endl; | ||||
|   std::cout<<GridLogMessage << "===================================================================================================="<<std::endl; | ||||
|   std::cout<<GridLogMessage << "  L  "<<"\t\t"<<" Ls  "<<"\t\t"<<"bytes"<<"\t\t"<<"MB/s uni"<<"\t\t"<<"MB/s bidi"<<std::endl; | ||||
|   int maxlat=24; | ||||
|   header(); | ||||
|   for(int lat=4;lat<=maxlat;lat+=4){ | ||||
|     for(int Ls=8;Ls<=32;Ls*=2){ | ||||
|     for(int Ls=8;Ls<=8;Ls*=2){ | ||||
|  | ||||
|       std::vector<int> latt_size  ({lat*mpi_layout[0], | ||||
|       				    lat*mpi_layout[1], | ||||
| @@ -58,15 +88,23 @@ int main (int argc, char ** argv) | ||||
|       				    lat*mpi_layout[3]}); | ||||
|  | ||||
|       GridCartesian     Grid(latt_size,simd_layout,mpi_layout); | ||||
|       RealD Nrank = Grid._Nprocessors; | ||||
|       RealD Nnode = Grid.NodeCount(); | ||||
|       RealD ppn = Nrank/Nnode; | ||||
|  | ||||
|       std::vector<std::vector<HalfSpinColourVectorD> > xbuf(8,std::vector<HalfSpinColourVectorD>(lat*lat*lat*Ls)); | ||||
|       std::vector<std::vector<HalfSpinColourVectorD> > rbuf(8,std::vector<HalfSpinColourVectorD>(lat*lat*lat*Ls)); | ||||
|       std::vector<Vector<HalfSpinColourVectorD> > xbuf(8);	 | ||||
|       std::vector<Vector<HalfSpinColourVectorD> > rbuf(8); | ||||
|  | ||||
|       int ncomm; | ||||
|       int bytes=lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD); | ||||
|       for(int mu=0;mu<8;mu++){ | ||||
| 	xbuf[mu].resize(lat*lat*lat*Ls); | ||||
| 	rbuf[mu].resize(lat*lat*lat*Ls); | ||||
| 	//	std::cout << " buffers " << std::hex << (uint64_t)&xbuf[mu][0] <<" " << (uint64_t)&rbuf[mu][0] <<std::endl; | ||||
|       } | ||||
|  | ||||
|       double start=usecond(); | ||||
|       for(int i=0;i<Nloop;i++){ | ||||
|       double start=usecond(); | ||||
|  | ||||
| 	std::vector<CartesianCommunicator::CommsRequest_t> requests; | ||||
|  | ||||
| @@ -79,7 +117,6 @@ int main (int argc, char ** argv) | ||||
| 	    int comm_proc=1; | ||||
| 	    int xmit_to_rank; | ||||
| 	    int recv_from_rank; | ||||
| 	     | ||||
| 	    Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank); | ||||
| 	    Grid.SendToRecvFromBegin(requests, | ||||
| 				   (void *)&xbuf[mu][0], | ||||
| @@ -102,18 +139,24 @@ int main (int argc, char ** argv) | ||||
| 	} | ||||
| 	Grid.SendToRecvFromComplete(requests); | ||||
| 	Grid.Barrier(); | ||||
|  | ||||
|       } | ||||
| 	double stop=usecond(); | ||||
| 	t_time[i] = stop-start; // microseconds | ||||
|       } | ||||
|  | ||||
|       double dbytes    = bytes; | ||||
|       double xbytes    = Nloop*dbytes*2.0*ncomm; | ||||
|       timestat.statistics(t_time); | ||||
|  | ||||
|       double dbytes    = bytes*ppn; | ||||
|       double xbytes    = dbytes*2.0*ncomm; | ||||
|       double rbytes    = xbytes; | ||||
|       double bidibytes = xbytes+rbytes; | ||||
|  | ||||
|       double time = stop-start; // microseconds | ||||
|       std::cout<<GridLogMessage << std::setw(4) << lat<<"\t"<<Ls<<"\t" | ||||
|                <<std::setw(11) << bytes<< std::fixed << std::setprecision(1) << std::setw(7) | ||||
|                <<std::right<< xbytes/timestat.mean<<"  "<< xbytes*timestat.err/(timestat.mean*timestat.mean)<< " " | ||||
|                <<xbytes/timestat.max <<" "<< xbytes/timestat.min   | ||||
|                << "\t\t"<<std::setw(7)<< bidibytes/timestat.mean<< "  " << bidibytes*timestat.err/(timestat.mean*timestat.mean) << " " | ||||
|                << bidibytes/timestat.max << " " << bidibytes/timestat.min << std::endl; | ||||
|  | ||||
|       std::cout<<GridLogMessage << lat<<"\t\t"<<Ls<<"\t\t"<<bytes<<"\t\t"<<xbytes/time<<"\t\t"<<bidibytes/time<<std::endl; | ||||
|     } | ||||
|   }     | ||||
|  | ||||
| @@ -121,25 +164,32 @@ int main (int argc, char ** argv) | ||||
|   std::cout<<GridLogMessage << "===================================================================================================="<<std::endl; | ||||
|   std::cout<<GridLogMessage << "= Benchmarking sequential halo exchange in "<<nmu<<" dimensions"<<std::endl; | ||||
|   std::cout<<GridLogMessage << "===================================================================================================="<<std::endl; | ||||
|   std::cout<<GridLogMessage << "  L  "<<"\t\t"<<" Ls  "<<"\t\t"<<"bytes"<<"\t\t"<<"MB/s uni"<<"\t\t"<<"MB/s bidi"<<std::endl; | ||||
|  | ||||
|   header(); | ||||
|  | ||||
|   for(int lat=4;lat<=maxlat;lat+=4){ | ||||
|     for(int Ls=8;Ls<=32;Ls*=2){ | ||||
|     for(int Ls=8;Ls<=8;Ls*=2){ | ||||
|  | ||||
|       std::vector<int> latt_size  ({lat,lat,lat,lat}); | ||||
|  | ||||
|       GridCartesian     Grid(latt_size,simd_layout,mpi_layout); | ||||
|       RealD Nrank = Grid._Nprocessors; | ||||
|       RealD Nnode = Grid.NodeCount(); | ||||
|       RealD ppn = Nrank/Nnode; | ||||
|  | ||||
|       std::vector<std::vector<HalfSpinColourVectorD> > xbuf(8,std::vector<HalfSpinColourVectorD>(lat*lat*lat*Ls)); | ||||
|       std::vector<std::vector<HalfSpinColourVectorD> > rbuf(8,std::vector<HalfSpinColourVectorD>(lat*lat*lat*Ls)); | ||||
|       std::vector<Vector<HalfSpinColourVectorD> > xbuf(8); | ||||
|       std::vector<Vector<HalfSpinColourVectorD> > rbuf(8); | ||||
|  | ||||
|       for(int mu=0;mu<8;mu++){ | ||||
| 	xbuf[mu].resize(lat*lat*lat*Ls); | ||||
| 	rbuf[mu].resize(lat*lat*lat*Ls); | ||||
| 	//	std::cout << " buffers " << std::hex << (uint64_t)&xbuf[mu][0] <<" " << (uint64_t)&rbuf[mu][0] <<std::endl; | ||||
|       } | ||||
|  | ||||
|       int ncomm; | ||||
|       int bytes=lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD); | ||||
|  | ||||
|       double start=usecond(); | ||||
|       for(int i=0;i<Nloop;i++){ | ||||
|       double start=usecond(); | ||||
|      | ||||
| 	ncomm=0; | ||||
| 	for(int mu=0;mu<4;mu++){ | ||||
| @@ -178,30 +228,37 @@ int main (int argc, char ** argv) | ||||
| 	  } | ||||
| 	} | ||||
| 	Grid.Barrier(); | ||||
| 	double stop=usecond(); | ||||
| 	t_time[i] = stop-start; // microseconds | ||||
|  | ||||
|       } | ||||
|  | ||||
|       double stop=usecond(); | ||||
|       timestat.statistics(t_time); | ||||
|        | ||||
|       double dbytes    = bytes; | ||||
|       double xbytes    = Nloop*dbytes*2.0*ncomm; | ||||
|       double dbytes    = bytes*ppn; | ||||
|       double xbytes    = dbytes*2.0*ncomm; | ||||
|       double rbytes    = xbytes; | ||||
|       double bidibytes = xbytes+rbytes; | ||||
|  | ||||
|       double time = stop-start; | ||||
|     std::cout<<GridLogMessage << std::setw(4) << lat<<"\t"<<Ls<<"\t" | ||||
|                <<std::setw(11) << bytes<< std::fixed << std::setprecision(1) << std::setw(7) | ||||
|                <<std::right<< xbytes/timestat.mean<<"  "<< xbytes*timestat.err/(timestat.mean*timestat.mean)<< " " | ||||
|                <<xbytes/timestat.max <<" "<< xbytes/timestat.min   | ||||
|                << "\t\t"<<std::setw(7)<< bidibytes/timestat.mean<< "  " << bidibytes*timestat.err/(timestat.mean*timestat.mean) << " " | ||||
|                << bidibytes/timestat.max << " " << bidibytes/timestat.min << std::endl; | ||||
|  | ||||
|        | ||||
|       std::cout<<GridLogMessage << lat<<"\t\t"<<Ls<<"\t\t"<<bytes<<"\t\t"<<xbytes/time<<"\t\t"<<bidibytes/time<<std::endl; | ||||
|     } | ||||
|   }   | ||||
|  | ||||
|  | ||||
|   Nloop=10; | ||||
|   std::cout<<GridLogMessage << "===================================================================================================="<<std::endl; | ||||
|   std::cout<<GridLogMessage << "= Benchmarking concurrent STENCIL halo exchange in "<<nmu<<" dimensions"<<std::endl; | ||||
|   std::cout<<GridLogMessage << "===================================================================================================="<<std::endl; | ||||
|   std::cout<<GridLogMessage << "  L  "<<"\t\t"<<" Ls  "<<"\t\t"<<"bytes"<<"\t\t"<<"MB/s uni"<<"\t\t"<<"MB/s bidi"<<std::endl; | ||||
|   header(); | ||||
|  | ||||
|   for(int lat=4;lat<=maxlat;lat+=4){ | ||||
|     for(int Ls=8;Ls<=32;Ls*=2){ | ||||
|     for(int Ls=8;Ls<=8;Ls*=2){ | ||||
|  | ||||
|       std::vector<int> latt_size  ({lat*mpi_layout[0], | ||||
|       				    lat*mpi_layout[1], | ||||
| @@ -209,6 +266,9 @@ int main (int argc, char ** argv) | ||||
|       				    lat*mpi_layout[3]}); | ||||
|  | ||||
|       GridCartesian     Grid(latt_size,simd_layout,mpi_layout); | ||||
|       RealD Nrank = Grid._Nprocessors; | ||||
|       RealD Nnode = Grid.NodeCount(); | ||||
|       RealD ppn = Nrank/Nnode; | ||||
|  | ||||
|       std::vector<HalfSpinColourVectorD *> xbuf(8); | ||||
|       std::vector<HalfSpinColourVectorD *> rbuf(8); | ||||
| @@ -216,73 +276,86 @@ int main (int argc, char ** argv) | ||||
|       for(int d=0;d<8;d++){ | ||||
| 	xbuf[d] = (HalfSpinColourVectorD *)Grid.ShmBufferMalloc(lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD)); | ||||
| 	rbuf[d] = (HalfSpinColourVectorD *)Grid.ShmBufferMalloc(lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD)); | ||||
| 	bzero((void *)xbuf[d],lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD)); | ||||
| 	bzero((void *)rbuf[d],lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD)); | ||||
|       } | ||||
|  | ||||
|       int ncomm; | ||||
|       int bytes=lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD); | ||||
|  | ||||
|       double start=usecond(); | ||||
|       double dbytes; | ||||
|       for(int i=0;i<Nloop;i++){ | ||||
| 	double start=usecond(); | ||||
|  | ||||
| 	dbytes=0; | ||||
| 	ncomm=0; | ||||
|  | ||||
| 	std::vector<CartesianCommunicator::CommsRequest_t> requests; | ||||
|  | ||||
| 	ncomm=0; | ||||
| 	for(int mu=0;mu<4;mu++){ | ||||
| 	 | ||||
|  | ||||
| 	  if (mpi_layout[mu]>1 ) { | ||||
| 	   | ||||
| 	    ncomm++; | ||||
| 	    int comm_proc=1; | ||||
| 	    int xmit_to_rank; | ||||
| 	    int recv_from_rank; | ||||
| 	     | ||||
| 	    Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank); | ||||
| 	    dbytes+= | ||||
| 	      Grid.StencilSendToRecvFromBegin(requests, | ||||
| 					      (void *)&xbuf[mu][0], | ||||
| 					      xmit_to_rank, | ||||
| 					      (void *)&rbuf[mu][0], | ||||
| 					      recv_from_rank, | ||||
| 					    bytes); | ||||
| 					      bytes,mu); | ||||
| 	 | ||||
| 	    comm_proc = mpi_layout[mu]-1; | ||||
| 	   | ||||
| 	    Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank); | ||||
| 	    dbytes+= | ||||
| 	      Grid.StencilSendToRecvFromBegin(requests, | ||||
| 					      (void *)&xbuf[mu+4][0], | ||||
| 					      xmit_to_rank, | ||||
| 					      (void *)&rbuf[mu+4][0], | ||||
| 					      recv_from_rank, | ||||
| 					    bytes); | ||||
| 					      bytes,mu+4); | ||||
| 	   | ||||
| 	  } | ||||
| 	} | ||||
| 	Grid.StencilSendToRecvFromComplete(requests); | ||||
| 	Grid.StencilSendToRecvFromComplete(requests,0); | ||||
| 	Grid.Barrier(); | ||||
|  | ||||
|       } | ||||
| 	double stop=usecond(); | ||||
| 	t_time[i] = stop-start; // microseconds | ||||
| 	 | ||||
|       double dbytes    = bytes; | ||||
|       double xbytes    = Nloop*dbytes*2.0*ncomm; | ||||
|       double rbytes    = xbytes; | ||||
|       double bidibytes = xbytes+rbytes; | ||||
|       } | ||||
|  | ||||
|       timestat.statistics(t_time); | ||||
|  | ||||
|       dbytes=dbytes*ppn; | ||||
|       double xbytes    = dbytes*0.5; | ||||
|       double rbytes    = dbytes*0.5; | ||||
|       double bidibytes = dbytes; | ||||
|  | ||||
|       std::cout<<GridLogMessage << std::setw(4) << lat<<"\t"<<Ls<<"\t" | ||||
|                <<std::setw(11) << bytes<< std::fixed << std::setprecision(1) << std::setw(7) | ||||
|                <<std::right<< xbytes/timestat.mean<<"  "<< xbytes*timestat.err/(timestat.mean*timestat.mean)<< " " | ||||
|                <<xbytes/timestat.max <<" "<< xbytes/timestat.min   | ||||
|                << "\t\t"<<std::setw(7)<< bidibytes/timestat.mean<< "  " << bidibytes*timestat.err/(timestat.mean*timestat.mean) << " " | ||||
|                << bidibytes/timestat.max << " " << bidibytes/timestat.min << std::endl; | ||||
|  | ||||
|       double time = stop-start; // microseconds | ||||
|  | ||||
|       std::cout<<GridLogMessage << lat<<"\t\t"<<Ls<<"\t\t"<<bytes<<"\t\t"<<xbytes/time<<"\t\t"<<bidibytes/time<<std::endl; | ||||
|     } | ||||
|   }     | ||||
|  | ||||
|  | ||||
|  | ||||
|   Nloop=100; | ||||
|   std::cout<<GridLogMessage << "===================================================================================================="<<std::endl; | ||||
|   std::cout<<GridLogMessage << "= Benchmarking sequential STENCIL halo exchange in "<<nmu<<" dimensions"<<std::endl; | ||||
|   std::cout<<GridLogMessage << "===================================================================================================="<<std::endl; | ||||
|   std::cout<<GridLogMessage << "  L  "<<"\t\t"<<" Ls  "<<"\t\t"<<"bytes"<<"\t\t"<<"MB/s uni"<<"\t\t"<<"MB/s bidi"<<std::endl; | ||||
|   header(); | ||||
|  | ||||
|   for(int lat=4;lat<=maxlat;lat+=4){ | ||||
|     for(int Ls=8;Ls<=32;Ls*=2){ | ||||
|     for(int Ls=8;Ls<=8;Ls*=2){ | ||||
|  | ||||
|       std::vector<int> latt_size  ({lat*mpi_layout[0], | ||||
|       				    lat*mpi_layout[1], | ||||
| @@ -290,6 +363,9 @@ int main (int argc, char ** argv) | ||||
|       				    lat*mpi_layout[3]}); | ||||
|  | ||||
|       GridCartesian     Grid(latt_size,simd_layout,mpi_layout); | ||||
|       RealD Nrank = Grid._Nprocessors; | ||||
|       RealD Nnode = Grid.NodeCount(); | ||||
|       RealD ppn = Nrank/Nnode; | ||||
|  | ||||
|       std::vector<HalfSpinColourVectorD *> xbuf(8); | ||||
|       std::vector<HalfSpinColourVectorD *> rbuf(8); | ||||
| @@ -297,16 +373,18 @@ int main (int argc, char ** argv) | ||||
|       for(int d=0;d<8;d++){ | ||||
| 	xbuf[d] = (HalfSpinColourVectorD *)Grid.ShmBufferMalloc(lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD)); | ||||
| 	rbuf[d] = (HalfSpinColourVectorD *)Grid.ShmBufferMalloc(lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD)); | ||||
| 	bzero((void *)xbuf[d],lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD)); | ||||
| 	bzero((void *)rbuf[d],lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD)); | ||||
|       } | ||||
|  | ||||
|       int ncomm; | ||||
|       int bytes=lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD); | ||||
|  | ||||
|       double start=usecond(); | ||||
|       double dbytes; | ||||
|       for(int i=0;i<Nloop;i++){ | ||||
| 	double start=usecond(); | ||||
|  | ||||
| 	std::vector<CartesianCommunicator::CommsRequest_t> requests; | ||||
|  | ||||
| 	dbytes=0; | ||||
| 	ncomm=0; | ||||
| 	for(int mu=0;mu<4;mu++){ | ||||
| 	 | ||||
| @@ -318,44 +396,146 @@ int main (int argc, char ** argv) | ||||
| 	    int recv_from_rank; | ||||
| 	     | ||||
| 	    Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank); | ||||
| 	    dbytes+= | ||||
| 	      Grid.StencilSendToRecvFromBegin(requests, | ||||
| 					      (void *)&xbuf[mu][0], | ||||
| 					      xmit_to_rank, | ||||
| 					      (void *)&rbuf[mu][0], | ||||
| 					      recv_from_rank, | ||||
| 					    bytes); | ||||
| 	    Grid.StencilSendToRecvFromComplete(requests); | ||||
| 					      bytes,mu); | ||||
| 	    Grid.StencilSendToRecvFromComplete(requests,mu); | ||||
| 	    requests.resize(0); | ||||
|  | ||||
| 	    comm_proc = mpi_layout[mu]-1; | ||||
| 	   | ||||
| 	    Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank); | ||||
| 	    dbytes+= | ||||
| 	      Grid.StencilSendToRecvFromBegin(requests, | ||||
| 					      (void *)&xbuf[mu+4][0], | ||||
| 					      xmit_to_rank, | ||||
| 					      (void *)&rbuf[mu+4][0], | ||||
| 					      recv_from_rank, | ||||
| 					    bytes); | ||||
| 	    Grid.StencilSendToRecvFromComplete(requests); | ||||
| 					      bytes,mu+4); | ||||
| 	    Grid.StencilSendToRecvFromComplete(requests,mu+4); | ||||
| 	    requests.resize(0); | ||||
| 	   | ||||
| 	  } | ||||
| 	} | ||||
| 	Grid.Barrier(); | ||||
|  | ||||
|       } | ||||
| 	double stop=usecond(); | ||||
| 	t_time[i] = stop-start; // microseconds | ||||
| 	 | ||||
|       double dbytes    = bytes; | ||||
|       double xbytes    = Nloop*dbytes*2.0*ncomm; | ||||
|       double rbytes    = xbytes; | ||||
|       double bidibytes = xbytes+rbytes; | ||||
|       } | ||||
|  | ||||
|       double time = stop-start; // microseconds | ||||
|       timestat.statistics(t_time); | ||||
|  | ||||
|       dbytes=dbytes*ppn; | ||||
|       double xbytes    = dbytes*0.5; | ||||
|       double rbytes    = dbytes*0.5; | ||||
|       double bidibytes = dbytes; | ||||
|  | ||||
|  | ||||
|       std::cout<<GridLogMessage << std::setw(4) << lat<<"\t"<<Ls<<"\t" | ||||
|                <<std::setw(11) << bytes<< std::fixed << std::setprecision(1) << std::setw(7) | ||||
|                <<std::right<< xbytes/timestat.mean<<"  "<< xbytes*timestat.err/(timestat.mean*timestat.mean)<< " " | ||||
|                <<xbytes/timestat.max <<" "<< xbytes/timestat.min   | ||||
|                << "\t\t"<<std::setw(7)<< bidibytes/timestat.mean<< "  " << bidibytes*timestat.err/(timestat.mean*timestat.mean) << " " | ||||
|                << bidibytes/timestat.max << " " << bidibytes/timestat.min << std::endl; | ||||
|   | ||||
|       std::cout<<GridLogMessage << lat<<"\t\t"<<Ls<<"\t\t"<<bytes<<"\t\t"<<xbytes/time<<"\t\t"<<bidibytes/time<<std::endl; | ||||
|     } | ||||
|   }     | ||||
|  | ||||
|  | ||||
|  | ||||
|   std::cout<<GridLogMessage << "===================================================================================================="<<std::endl; | ||||
|   std::cout<<GridLogMessage << "= Benchmarking threaded STENCIL halo exchange in "<<nmu<<" dimensions"<<std::endl; | ||||
|   std::cout<<GridLogMessage << "===================================================================================================="<<std::endl; | ||||
|   header(); | ||||
|  | ||||
|   for(int lat=4;lat<=maxlat;lat+=4){ | ||||
|     for(int Ls=8;Ls<=8;Ls*=2){ | ||||
|  | ||||
|       std::vector<int> latt_size  ({lat*mpi_layout[0], | ||||
|       				    lat*mpi_layout[1], | ||||
|       				    lat*mpi_layout[2], | ||||
|       				    lat*mpi_layout[3]}); | ||||
|  | ||||
|       GridCartesian     Grid(latt_size,simd_layout,mpi_layout); | ||||
|       RealD Nrank = Grid._Nprocessors; | ||||
|       RealD Nnode = Grid.NodeCount(); | ||||
|       RealD ppn = Nrank/Nnode; | ||||
|  | ||||
|       std::vector<HalfSpinColourVectorD *> xbuf(8); | ||||
|       std::vector<HalfSpinColourVectorD *> rbuf(8); | ||||
|       Grid.ShmBufferFreeAll(); | ||||
|       for(int d=0;d<8;d++){ | ||||
| 	xbuf[d] = (HalfSpinColourVectorD *)Grid.ShmBufferMalloc(lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD)); | ||||
| 	rbuf[d] = (HalfSpinColourVectorD *)Grid.ShmBufferMalloc(lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD)); | ||||
| 	bzero((void *)xbuf[d],lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD)); | ||||
| 	bzero((void *)rbuf[d],lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD)); | ||||
|       } | ||||
|  | ||||
|       int ncomm; | ||||
|       int bytes=lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD); | ||||
|       double dbytes; | ||||
|       for(int i=0;i<Nloop;i++){ | ||||
| 	double start=usecond(); | ||||
|  | ||||
| 	std::vector<CartesianCommunicator::CommsRequest_t> requests; | ||||
| 	dbytes=0; | ||||
| 	ncomm=0; | ||||
|  | ||||
| 	parallel_for(int dir=0;dir<8;dir++){ | ||||
|  | ||||
| 	  double tbytes; | ||||
| 	  int mu =dir % 4; | ||||
|  | ||||
| 	  if (mpi_layout[mu]>1 ) { | ||||
| 	   | ||||
| 	    ncomm++; | ||||
| 	    int xmit_to_rank; | ||||
| 	    int recv_from_rank; | ||||
| 	    if ( dir == mu ) {  | ||||
| 	      int comm_proc=1; | ||||
| 	      Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank); | ||||
| 	    } else {  | ||||
| 	      int comm_proc = mpi_layout[mu]-1; | ||||
| 	      Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank); | ||||
| 	    } | ||||
|  | ||||
| 	    tbytes= Grid.StencilSendToRecvFrom((void *)&xbuf[dir][0], xmit_to_rank, | ||||
| 					       (void *)&rbuf[dir][0], recv_from_rank, bytes,dir); | ||||
|  | ||||
| #pragma omp atomic | ||||
| 	    dbytes+=tbytes; | ||||
| 	  } | ||||
| 	} | ||||
| 	Grid.Barrier(); | ||||
| 	double stop=usecond(); | ||||
| 	t_time[i] = stop-start; // microseconds | ||||
|       } | ||||
|  | ||||
|       timestat.statistics(t_time); | ||||
|  | ||||
|       dbytes=dbytes*ppn; | ||||
|       double xbytes    = dbytes*0.5; | ||||
|       double rbytes    = dbytes*0.5; | ||||
|       double bidibytes = dbytes; | ||||
|  | ||||
|  | ||||
|       std::cout<<GridLogMessage << std::setw(4) << lat<<"\t"<<Ls<<"\t" | ||||
|                <<std::setw(11) << bytes<< std::fixed << std::setprecision(1) << std::setw(7) | ||||
|                <<std::right<< xbytes/timestat.mean<<"  "<< xbytes*timestat.err/(timestat.mean*timestat.mean)<< " " | ||||
|                <<xbytes/timestat.max <<" "<< xbytes/timestat.min   | ||||
|                << "\t\t"<<std::setw(7)<< bidibytes/timestat.mean<< "  " << bidibytes*timestat.err/(timestat.mean*timestat.mean) << " " | ||||
|                << bidibytes/timestat.max << " " << bidibytes/timestat.min << std::endl; | ||||
|   | ||||
|     } | ||||
|   }     | ||||
|  | ||||
|   std::cout<<GridLogMessage << "===================================================================================================="<<std::endl; | ||||
|   std::cout<<GridLogMessage << "= All done; Bye Bye"<<std::endl; | ||||
|   std::cout<<GridLogMessage << "===================================================================================================="<<std::endl; | ||||
|  | ||||
|   Grid_finalize(); | ||||
| } | ||||
|   | ||||
| @@ -51,7 +51,13 @@ int main (int argc, char ** argv) | ||||
|   std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl; | ||||
|  | ||||
|   std::vector<int> latt4 = GridDefaultLatt(); | ||||
|   const int Ls=16; | ||||
|   int Ls=16; | ||||
|   for(int i=0;i<argc;i++) | ||||
|     if(std::string(argv[i]) == "-Ls"){ | ||||
|       std::stringstream ss(argv[i+1]); ss >> Ls; | ||||
|     } | ||||
|  | ||||
|  | ||||
|   GridCartesian         * UGrid   = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi()); | ||||
|   GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); | ||||
|   GridCartesian         * FGrid   = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); | ||||
| @@ -165,7 +171,7 @@ int main (int argc, char ** argv) | ||||
|   std::cout << GridLogMessage<< "*****************************************************************" <<std::endl; | ||||
|  | ||||
|   DomainWallFermionR Dw(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5); | ||||
|   int ncall =1000; | ||||
|   int ncall =500; | ||||
|   if (1) { | ||||
|     FGrid->Barrier(); | ||||
|     Dw.ZeroCounters(); | ||||
| @@ -303,6 +309,7 @@ int main (int argc, char ** argv) | ||||
|     } | ||||
|     assert(sum < 1.0e-4); | ||||
|  | ||||
|      | ||||
|     if(1){ | ||||
|       std::cout << GridLogMessage<< "*********************************************************" <<std::endl; | ||||
|       std::cout << GridLogMessage<< "* Benchmarking WilsonFermion5D<DomainWallVec5dImplR>::DhopEO "<<std::endl; | ||||
| @@ -381,7 +388,22 @@ int main (int argc, char ** argv) | ||||
|       } | ||||
|       assert(error<1.0e-4); | ||||
|     } | ||||
|  | ||||
|   if(0){ | ||||
|     std::cout << "Single cache warm call to sDw.Dhop " <<std::endl; | ||||
|     for(int i=0;i< PerformanceCounter::NumTypes(); i++ ){ | ||||
|       sDw.Dhop(ssrc,sresult,0); | ||||
|       PerformanceCounter Counter(i); | ||||
|       Counter.Start(); | ||||
|       sDw.Dhop(ssrc,sresult,0); | ||||
|       Counter.Stop(); | ||||
|       Counter.Report(); | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   } | ||||
|  | ||||
|  | ||||
|  | ||||
|   if (1) | ||||
|   { // Naive wilson dag implementation | ||||
| @@ -487,9 +509,9 @@ int main (int argc, char ** argv) | ||||
|   std::cout<<GridLogMessage << "norm diff even  "<< norm2(src_e)<<std::endl; | ||||
|   std::cout<<GridLogMessage << "norm diff odd   "<< norm2(src_o)<<std::endl; | ||||
|  | ||||
|   //assert(norm2(src_e)<1.0e-4); | ||||
|   //assert(norm2(src_o)<1.0e-4); | ||||
|  | ||||
|   assert(norm2(src_e)<1.0e-4); | ||||
|   assert(norm2(src_o)<1.0e-4); | ||||
|   Grid_finalize(); | ||||
|   exit(0); | ||||
| } | ||||
|  | ||||
|   | ||||
							
								
								
									
										190
									
								
								benchmarks/Benchmark_gparity.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										190
									
								
								benchmarks/Benchmark_gparity.cc
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,190 @@ | ||||
| #include <Grid/Grid.h> | ||||
| #include <sstream> | ||||
| using namespace std; | ||||
| using namespace Grid; | ||||
| using namespace Grid::QCD; | ||||
|  | ||||
| template<class d> | ||||
| struct scal { | ||||
|   d internal; | ||||
| }; | ||||
|  | ||||
|   Gamma::Algebra Gmu [] = { | ||||
|     Gamma::Algebra::GammaX, | ||||
|     Gamma::Algebra::GammaY, | ||||
|     Gamma::Algebra::GammaZ, | ||||
|     Gamma::Algebra::GammaT | ||||
|   }; | ||||
|  | ||||
| typedef typename GparityDomainWallFermionF::FermionField GparityLatticeFermionF; | ||||
| typedef typename GparityDomainWallFermionD::FermionField GparityLatticeFermionD; | ||||
|  | ||||
|  | ||||
|  | ||||
| int main (int argc, char ** argv) | ||||
| { | ||||
|   Grid_init(&argc,&argv); | ||||
|  | ||||
|   int Ls=16; | ||||
|   for(int i=0;i<argc;i++) | ||||
|     if(std::string(argv[i]) == "-Ls"){ | ||||
|       std::stringstream ss(argv[i+1]); ss >> Ls; | ||||
|     } | ||||
|  | ||||
|  | ||||
|   int threads = GridThread::GetThreads(); | ||||
|   std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl; | ||||
|   std::cout<<GridLogMessage << "Ls = " << Ls << std::endl; | ||||
|  | ||||
|   std::vector<int> latt4 = GridDefaultLatt(); | ||||
|  | ||||
|   GridCartesian         * UGrid   = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplexF::Nsimd()),GridDefaultMpi()); | ||||
|   GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); | ||||
|   GridCartesian         * FGrid   = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); | ||||
|   GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); | ||||
|  | ||||
|   std::vector<int> seeds4({1,2,3,4}); | ||||
|   std::vector<int> seeds5({5,6,7,8}); | ||||
|    | ||||
|   std::cout << GridLogMessage << "Initialising 4d RNG" << std::endl; | ||||
|   GridParallelRNG          RNG4(UGrid);  RNG4.SeedFixedIntegers(seeds4); | ||||
|   std::cout << GridLogMessage << "Initialising 5d RNG" << std::endl; | ||||
|   GridParallelRNG          RNG5(FGrid);  RNG5.SeedFixedIntegers(seeds5); | ||||
|   std::cout << GridLogMessage << "Initialised RNGs" << std::endl; | ||||
|  | ||||
|   GparityLatticeFermionF src   (FGrid); random(RNG5,src); | ||||
|   RealD N2 = 1.0/::sqrt(norm2(src)); | ||||
|   src = src*N2; | ||||
|  | ||||
|   GparityLatticeFermionF result(FGrid); result=zero; | ||||
|   GparityLatticeFermionF    ref(FGrid);    ref=zero; | ||||
|   GparityLatticeFermionF    tmp(FGrid); | ||||
|   GparityLatticeFermionF    err(FGrid); | ||||
|  | ||||
|   std::cout << GridLogMessage << "Drawing gauge field" << std::endl; | ||||
|   LatticeGaugeFieldF Umu(UGrid);  | ||||
|   SU3::HotConfiguration(RNG4,Umu);  | ||||
|   std::cout << GridLogMessage << "Random gauge initialised " << std::endl; | ||||
|  | ||||
|   RealD mass=0.1; | ||||
|   RealD M5  =1.8; | ||||
|  | ||||
|   RealD NP = UGrid->_Nprocessors; | ||||
|   RealD NN = UGrid->NodeCount(); | ||||
|  | ||||
|   std::cout << GridLogMessage<< "*****************************************************************" <<std::endl; | ||||
|   std::cout << GridLogMessage<< "* Kernel options --dslash-generic, --dslash-unroll, --dslash-asm" <<std::endl; | ||||
|   std::cout << GridLogMessage<< "*****************************************************************" <<std::endl; | ||||
|   std::cout << GridLogMessage<< "*****************************************************************" <<std::endl; | ||||
|   std::cout << GridLogMessage<< "* Benchmarking DomainWallFermion::Dhop                  "<<std::endl; | ||||
|   std::cout << GridLogMessage<< "* Vectorising space-time by "<<vComplexF::Nsimd()<<std::endl; | ||||
| #ifdef GRID_OMP | ||||
|   if ( WilsonKernelsStatic::Comms == WilsonKernelsStatic::CommsAndCompute ) std::cout << GridLogMessage<< "* Using Overlapped Comms/Compute" <<std::endl; | ||||
|   if ( WilsonKernelsStatic::Comms == WilsonKernelsStatic::CommsThenCompute) std::cout << GridLogMessage<< "* Using sequential comms compute" <<std::endl; | ||||
| #endif | ||||
|   if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptGeneric   ) std::cout << GridLogMessage<< "* Using GENERIC Nc WilsonKernels" <<std::endl; | ||||
|   if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptHandUnroll) std::cout << GridLogMessage<< "* Using Nc=3       WilsonKernels" <<std::endl; | ||||
|   if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptInlineAsm ) std::cout << GridLogMessage<< "* Using Asm Nc=3   WilsonKernels" <<std::endl; | ||||
|   std::cout << GridLogMessage<< "*****************************************************************" <<std::endl; | ||||
|  | ||||
|  | ||||
|  | ||||
|   std::cout << GridLogMessage<< "* SINGLE/SINGLE"<<std::endl; | ||||
|   GparityDomainWallFermionF Dw(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5); | ||||
|   int ncall =1000; | ||||
|   if (1) { | ||||
|     FGrid->Barrier(); | ||||
|     Dw.ZeroCounters(); | ||||
|     Dw.Dhop(src,result,0); | ||||
|     std::cout<<GridLogMessage<<"Called warmup"<<std::endl; | ||||
|     double t0=usecond(); | ||||
|     for(int i=0;i<ncall;i++){ | ||||
|       __SSC_START; | ||||
|       Dw.Dhop(src,result,0); | ||||
|       __SSC_STOP; | ||||
|     } | ||||
|     double t1=usecond(); | ||||
|     FGrid->Barrier(); | ||||
|      | ||||
|     double volume=Ls;  for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu]; | ||||
|     double flops=2*1344*volume*ncall; | ||||
|  | ||||
|     std::cout<<GridLogMessage << "Called Dw "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl; | ||||
|     //    std::cout<<GridLogMessage << "norm result "<< norm2(result)<<std::endl; | ||||
|     //    std::cout<<GridLogMessage << "norm ref    "<< norm2(ref)<<std::endl; | ||||
|     std::cout<<GridLogMessage << "mflop/s =   "<< flops/(t1-t0)<<std::endl; | ||||
|     std::cout<<GridLogMessage << "mflop/s per rank =  "<< flops/(t1-t0)/NP<<std::endl; | ||||
|     std::cout<<GridLogMessage << "mflop/s per node =  "<< flops/(t1-t0)/NN<<std::endl; | ||||
|     Dw.Report(); | ||||
|   } | ||||
|  | ||||
|   std::cout << GridLogMessage<< "* SINGLE/HALF"<<std::endl; | ||||
|   GparityDomainWallFermionFH DwH(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5); | ||||
|   if (1) { | ||||
|     FGrid->Barrier(); | ||||
|     DwH.ZeroCounters(); | ||||
|     DwH.Dhop(src,result,0); | ||||
|     double t0=usecond(); | ||||
|     for(int i=0;i<ncall;i++){ | ||||
|       __SSC_START; | ||||
|       DwH.Dhop(src,result,0); | ||||
|       __SSC_STOP; | ||||
|     } | ||||
|     double t1=usecond(); | ||||
|     FGrid->Barrier(); | ||||
|      | ||||
|     double volume=Ls;  for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu]; | ||||
|     double flops=2*1344*volume*ncall; | ||||
|  | ||||
|     std::cout<<GridLogMessage << "Called half prec comms Dw "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl; | ||||
|     std::cout<<GridLogMessage << "mflop/s =   "<< flops/(t1-t0)<<std::endl; | ||||
|     std::cout<<GridLogMessage << "mflop/s per rank =  "<< flops/(t1-t0)/NP<<std::endl; | ||||
|     std::cout<<GridLogMessage << "mflop/s per node =  "<< flops/(t1-t0)/NN<<std::endl; | ||||
|     DwH.Report(); | ||||
|   } | ||||
|  | ||||
|   GridCartesian         * UGrid_d   = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplexD::Nsimd()),GridDefaultMpi()); | ||||
|   GridRedBlackCartesian * UrbGrid_d = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid_d); | ||||
|   GridCartesian         * FGrid_d   = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid_d); | ||||
|   GridRedBlackCartesian * FrbGrid_d = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid_d); | ||||
|  | ||||
|    | ||||
|   std::cout << GridLogMessage<< "* DOUBLE/DOUBLE"<<std::endl; | ||||
|   GparityLatticeFermionD src_d(FGrid_d); | ||||
|   precisionChange(src_d,src); | ||||
|  | ||||
|   LatticeGaugeFieldD Umu_d(UGrid_d);  | ||||
|   precisionChange(Umu_d,Umu); | ||||
|  | ||||
|   GparityLatticeFermionD result_d(FGrid_d); | ||||
|  | ||||
|   GparityDomainWallFermionD DwD(Umu_d,*FGrid_d,*FrbGrid_d,*UGrid_d,*UrbGrid_d,mass,M5); | ||||
|   if (1) { | ||||
|     FGrid_d->Barrier(); | ||||
|     DwD.ZeroCounters(); | ||||
|     DwD.Dhop(src_d,result_d,0); | ||||
|     std::cout<<GridLogMessage<<"Called warmup"<<std::endl; | ||||
|     double t0=usecond(); | ||||
|     for(int i=0;i<ncall;i++){ | ||||
|       __SSC_START; | ||||
|       DwD.Dhop(src_d,result_d,0); | ||||
|       __SSC_STOP; | ||||
|     } | ||||
|     double t1=usecond(); | ||||
|     FGrid_d->Barrier(); | ||||
|      | ||||
|     double volume=Ls;  for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu]; | ||||
|     double flops=2*1344*volume*ncall; | ||||
|  | ||||
|     std::cout<<GridLogMessage << "Called Dw "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl; | ||||
|     //    std::cout<<GridLogMessage << "norm result "<< norm2(result)<<std::endl; | ||||
|     //    std::cout<<GridLogMessage << "norm ref    "<< norm2(ref)<<std::endl; | ||||
|     std::cout<<GridLogMessage << "mflop/s =   "<< flops/(t1-t0)<<std::endl; | ||||
|     std::cout<<GridLogMessage << "mflop/s per rank =  "<< flops/(t1-t0)/NP<<std::endl; | ||||
|     std::cout<<GridLogMessage << "mflop/s per node =  "<< flops/(t1-t0)/NN<<std::endl; | ||||
|     DwD.Report(); | ||||
|   } | ||||
|  | ||||
|   Grid_finalize(); | ||||
| } | ||||
|  | ||||
| @@ -55,21 +55,21 @@ int main (int argc, char ** argv) | ||||
|   std::cout<<GridLogMessage << "===================================================================================================="<<std::endl; | ||||
|   std::cout<<GridLogMessage << "  L  "<<"\t\t"<<"bytes"<<"\t\t\t"<<"GB/s"<<"\t\t"<<"Gflop/s"<<"\t\t seconds"<<std::endl; | ||||
|   std::cout<<GridLogMessage << "----------------------------------------------------------"<<std::endl; | ||||
|   uint64_t lmax=44; | ||||
| #define NLOOP (1*lmax*lmax*lmax*lmax/vol) | ||||
|   for(int lat=4;lat<=lmax;lat+=4){ | ||||
|   uint64_t lmax=96; | ||||
| #define NLOOP (10*lmax*lmax*lmax*lmax/vol) | ||||
|   for(int lat=8;lat<=lmax;lat+=8){ | ||||
|  | ||||
|       std::vector<int> latt_size  ({lat*mpi_layout[0],lat*mpi_layout[1],lat*mpi_layout[2],lat*mpi_layout[3]}); | ||||
|       int vol = latt_size[0]*latt_size[1]*latt_size[2]*latt_size[3]; | ||||
|       int64_t vol= latt_size[0]*latt_size[1]*latt_size[2]*latt_size[3]; | ||||
|       GridCartesian     Grid(latt_size,simd_layout,mpi_layout); | ||||
|  | ||||
|       uint64_t Nloop=NLOOP; | ||||
|  | ||||
|       //      GridParallelRNG          pRNG(&Grid);      pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}); | ||||
|       //      GridParallelRNG          pRNG(&Grid);      pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9})); | ||||
|  | ||||
|       LatticeVec z(&Grid); //random(pRNG,z); | ||||
|       LatticeVec x(&Grid); //random(pRNG,x); | ||||
|       LatticeVec y(&Grid); //random(pRNG,y); | ||||
|       LatticeVec z(&Grid);// random(pRNG,z); | ||||
|       LatticeVec x(&Grid);// random(pRNG,x); | ||||
|       LatticeVec y(&Grid);// random(pRNG,y); | ||||
|       double a=2.0; | ||||
|  | ||||
|  | ||||
| @@ -83,7 +83,7 @@ int main (int argc, char ** argv) | ||||
|       double time = (stop-start)/Nloop*1000; | ||||
|        | ||||
|       double flops=vol*Nvec*2;// mul,add | ||||
|       double bytes=3*vol*Nvec*sizeof(Real); | ||||
|       double bytes=3.0*vol*Nvec*sizeof(Real); | ||||
|       std::cout<<GridLogMessage<<std::setprecision(3) << lat<<"\t\t"<<bytes<<"   \t\t"<<bytes/time<<"\t\t"<<flops/time<<"\t\t"<<(stop-start)/1000./1000.<<std::endl; | ||||
|  | ||||
|     } | ||||
| @@ -94,17 +94,17 @@ int main (int argc, char ** argv) | ||||
|   std::cout<<GridLogMessage << "  L  "<<"\t\t"<<"bytes"<<"\t\t\t"<<"GB/s"<<"\t\t"<<"Gflop/s"<<"\t\t seconds"<<std::endl; | ||||
|   std::cout<<GridLogMessage << "----------------------------------------------------------"<<std::endl; | ||||
|    | ||||
|   for(int lat=4;lat<=lmax;lat+=4){ | ||||
|   for(int lat=8;lat<=lmax;lat+=8){ | ||||
|  | ||||
|       std::vector<int> latt_size  ({lat*mpi_layout[0],lat*mpi_layout[1],lat*mpi_layout[2],lat*mpi_layout[3]}); | ||||
|       int vol = latt_size[0]*latt_size[1]*latt_size[2]*latt_size[3]; | ||||
|       int64_t vol= latt_size[0]*latt_size[1]*latt_size[2]*latt_size[3]; | ||||
|       GridCartesian     Grid(latt_size,simd_layout,mpi_layout); | ||||
|  | ||||
|       //      GridParallelRNG          pRNG(&Grid);      pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}); | ||||
|       //      GridParallelRNG          pRNG(&Grid);      pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9})); | ||||
|  | ||||
|       LatticeVec z(&Grid); //random(pRNG,z); | ||||
|       LatticeVec x(&Grid); //random(pRNG,x); | ||||
|       LatticeVec y(&Grid); //random(pRNG,y); | ||||
|       LatticeVec z(&Grid);// random(pRNG,z); | ||||
|       LatticeVec x(&Grid);// random(pRNG,x); | ||||
|       LatticeVec y(&Grid);// random(pRNG,y); | ||||
|       double a=2.0; | ||||
|  | ||||
|       uint64_t Nloop=NLOOP; | ||||
| @@ -119,7 +119,7 @@ int main (int argc, char ** argv) | ||||
|       double time = (stop-start)/Nloop*1000; | ||||
|       | ||||
|       double flops=vol*Nvec*2;// mul,add | ||||
|       double bytes=3*vol*Nvec*sizeof(Real); | ||||
|       double bytes=3.0*vol*Nvec*sizeof(Real); | ||||
|       std::cout<<GridLogMessage<<std::setprecision(3) << lat<<"\t\t"<<bytes<<"   \t\t"<<bytes/time<<"\t\t"<<flops/time<<"\t\t"<<(stop-start)/1000./1000.<<std::endl; | ||||
|  | ||||
|     } | ||||
| @@ -129,20 +129,20 @@ int main (int argc, char ** argv) | ||||
|   std::cout<<GridLogMessage << "===================================================================================================="<<std::endl; | ||||
|   std::cout<<GridLogMessage << "  L  "<<"\t\t"<<"bytes"<<"\t\t\t"<<"GB/s"<<"\t\t"<<"Gflop/s"<<"\t\t seconds"<<std::endl; | ||||
|  | ||||
|   for(int lat=4;lat<=lmax;lat+=4){ | ||||
|   for(int lat=8;lat<=lmax;lat+=8){ | ||||
|  | ||||
|  | ||||
|       std::vector<int> latt_size  ({lat*mpi_layout[0],lat*mpi_layout[1],lat*mpi_layout[2],lat*mpi_layout[3]}); | ||||
|       int vol = latt_size[0]*latt_size[1]*latt_size[2]*latt_size[3]; | ||||
|       int64_t vol= latt_size[0]*latt_size[1]*latt_size[2]*latt_size[3]; | ||||
|       uint64_t Nloop=NLOOP; | ||||
|  | ||||
|       GridCartesian     Grid(latt_size,simd_layout,mpi_layout); | ||||
|  | ||||
|       //      GridParallelRNG          pRNG(&Grid);      pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}); | ||||
|       //      GridParallelRNG          pRNG(&Grid);      pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9})); | ||||
|  | ||||
|       LatticeVec z(&Grid); //random(pRNG,z); | ||||
|       LatticeVec x(&Grid); //random(pRNG,x); | ||||
|       LatticeVec y(&Grid); //random(pRNG,y); | ||||
|       LatticeVec z(&Grid);// random(pRNG,z); | ||||
|       LatticeVec x(&Grid);// random(pRNG,x); | ||||
|       LatticeVec y(&Grid);// random(pRNG,y); | ||||
|       RealD a=2.0; | ||||
|  | ||||
|  | ||||
| @@ -154,7 +154,7 @@ int main (int argc, char ** argv) | ||||
|       double stop=usecond(); | ||||
|       double time = (stop-start)/Nloop*1000; | ||||
|        | ||||
|       double bytes=2*vol*Nvec*sizeof(Real); | ||||
|       double bytes=2.0*vol*Nvec*sizeof(Real); | ||||
|       double flops=vol*Nvec*1;// mul | ||||
|       std::cout<<GridLogMessage <<std::setprecision(3) << lat<<"\t\t"<<bytes<<"   \t\t"<<bytes/time<<"\t\t"<<flops/time<<"\t\t"<<(stop-start)/1000./1000.<<std::endl; | ||||
|  | ||||
| @@ -166,17 +166,17 @@ int main (int argc, char ** argv) | ||||
|   std::cout<<GridLogMessage << "  L  "<<"\t\t"<<"bytes"<<"\t\t\t"<<"GB/s"<<"\t\t"<<"Gflop/s"<<"\t\t seconds"<<std::endl; | ||||
|   std::cout<<GridLogMessage << "----------------------------------------------------------"<<std::endl; | ||||
|  | ||||
|   for(int lat=4;lat<=lmax;lat+=4){ | ||||
|   for(int lat=8;lat<=lmax;lat+=8){ | ||||
|  | ||||
|       std::vector<int> latt_size  ({lat*mpi_layout[0],lat*mpi_layout[1],lat*mpi_layout[2],lat*mpi_layout[3]}); | ||||
|       int vol = latt_size[0]*latt_size[1]*latt_size[2]*latt_size[3]; | ||||
|       int64_t vol= latt_size[0]*latt_size[1]*latt_size[2]*latt_size[3]; | ||||
|       uint64_t Nloop=NLOOP; | ||||
|       GridCartesian     Grid(latt_size,simd_layout,mpi_layout); | ||||
|  | ||||
|       //      GridParallelRNG          pRNG(&Grid);      pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}); | ||||
|       LatticeVec z(&Grid); //random(pRNG,z); | ||||
|       LatticeVec x(&Grid); //random(pRNG,x); | ||||
|       LatticeVec y(&Grid); //random(pRNG,y); | ||||
|       //      GridParallelRNG          pRNG(&Grid);      pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9})); | ||||
|       LatticeVec z(&Grid);// random(pRNG,z); | ||||
|       LatticeVec x(&Grid);// random(pRNG,x); | ||||
|       LatticeVec y(&Grid);// random(pRNG,y); | ||||
|       RealD a=2.0; | ||||
|       Real nn;       | ||||
|       double start=usecond(); | ||||
| @@ -187,7 +187,7 @@ int main (int argc, char ** argv) | ||||
|       double stop=usecond(); | ||||
|       double time = (stop-start)/Nloop*1000; | ||||
|        | ||||
|       double bytes=vol*Nvec*sizeof(Real); | ||||
|       double bytes=1.0*vol*Nvec*sizeof(Real); | ||||
|       double flops=vol*Nvec*2;// mul,add | ||||
|       std::cout<<GridLogMessage<<std::setprecision(3) << lat<<"\t\t"<<bytes<<"  \t\t"<<bytes/time<<"\t\t"<<flops/time<< "\t\t"<<(stop-start)/1000./1000.<< "\t\t " <<std::endl; | ||||
|  | ||||
|   | ||||
| @@ -40,7 +40,7 @@ int main (int argc, char ** argv) | ||||
|   std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); | ||||
|   std::vector<int> mpi_layout  = GridDefaultMpi(); | ||||
|   GridCartesian               Grid(latt_size,simd_layout,mpi_layout); | ||||
|   GridRedBlackCartesian     RBGrid(latt_size,simd_layout,mpi_layout); | ||||
|   GridRedBlackCartesian     RBGrid(&Grid); | ||||
|  | ||||
|   int threads = GridThread::GetThreads(); | ||||
|   std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl; | ||||
|   | ||||
| @@ -35,14 +35,14 @@ using namespace Grid::QCD; | ||||
| int main (int argc, char ** argv) | ||||
| { | ||||
|   Grid_init(&argc,&argv); | ||||
| #define LMAX (32) | ||||
| #define LMAX (64) | ||||
|  | ||||
|   int Nloop=200; | ||||
|   int64_t Nloop=20; | ||||
|  | ||||
|   std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); | ||||
|   std::vector<int> mpi_layout  = GridDefaultMpi(); | ||||
|  | ||||
|   int threads = GridThread::GetThreads(); | ||||
|   int64_t threads = GridThread::GetThreads(); | ||||
|   std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl; | ||||
|  | ||||
|   std::cout<<GridLogMessage << "===================================================================================================="<<std::endl; | ||||
| @@ -54,16 +54,16 @@ int main (int argc, char ** argv) | ||||
|   for(int lat=2;lat<=LMAX;lat+=2){ | ||||
|  | ||||
|       std::vector<int> latt_size  ({lat*mpi_layout[0],lat*mpi_layout[1],lat*mpi_layout[2],lat*mpi_layout[3]}); | ||||
|       int vol = latt_size[0]*latt_size[1]*latt_size[2]*latt_size[3]; | ||||
|       int64_t vol = latt_size[0]*latt_size[1]*latt_size[2]*latt_size[3]; | ||||
|       GridCartesian     Grid(latt_size,simd_layout,mpi_layout); | ||||
|       //      GridParallelRNG          pRNG(&Grid);      pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}); | ||||
|       GridParallelRNG          pRNG(&Grid);      pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9})); | ||||
|  | ||||
|       LatticeColourMatrix z(&Grid);// random(pRNG,z); | ||||
|       LatticeColourMatrix x(&Grid);// random(pRNG,x); | ||||
|       LatticeColourMatrix y(&Grid);// random(pRNG,y); | ||||
|       LatticeColourMatrix z(&Grid); random(pRNG,z); | ||||
|       LatticeColourMatrix x(&Grid); random(pRNG,x); | ||||
|       LatticeColourMatrix y(&Grid); random(pRNG,y); | ||||
|  | ||||
|       double start=usecond(); | ||||
|       for(int i=0;i<Nloop;i++){ | ||||
|       for(int64_t i=0;i<Nloop;i++){ | ||||
| 	x=x*y; | ||||
|       } | ||||
|       double stop=usecond(); | ||||
| @@ -86,17 +86,17 @@ int main (int argc, char ** argv) | ||||
|   for(int lat=2;lat<=LMAX;lat+=2){ | ||||
|  | ||||
|       std::vector<int> latt_size  ({lat*mpi_layout[0],lat*mpi_layout[1],lat*mpi_layout[2],lat*mpi_layout[3]}); | ||||
|       int vol = latt_size[0]*latt_size[1]*latt_size[2]*latt_size[3]; | ||||
|       int64_t vol = latt_size[0]*latt_size[1]*latt_size[2]*latt_size[3]; | ||||
|  | ||||
|       GridCartesian     Grid(latt_size,simd_layout,mpi_layout); | ||||
|       //      GridParallelRNG          pRNG(&Grid);      pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}); | ||||
|       GridParallelRNG          pRNG(&Grid);      pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9})); | ||||
|  | ||||
|       LatticeColourMatrix z(&Grid); //random(pRNG,z); | ||||
|       LatticeColourMatrix x(&Grid); //random(pRNG,x); | ||||
|       LatticeColourMatrix y(&Grid); //random(pRNG,y); | ||||
|       LatticeColourMatrix z(&Grid); random(pRNG,z); | ||||
|       LatticeColourMatrix x(&Grid); random(pRNG,x); | ||||
|       LatticeColourMatrix y(&Grid); random(pRNG,y); | ||||
|  | ||||
|       double start=usecond(); | ||||
|       for(int i=0;i<Nloop;i++){ | ||||
|       for(int64_t i=0;i<Nloop;i++){ | ||||
| 	z=x*y; | ||||
|       } | ||||
|       double stop=usecond(); | ||||
| @@ -117,17 +117,17 @@ int main (int argc, char ** argv) | ||||
|   for(int lat=2;lat<=LMAX;lat+=2){ | ||||
|  | ||||
|       std::vector<int> latt_size  ({lat*mpi_layout[0],lat*mpi_layout[1],lat*mpi_layout[2],lat*mpi_layout[3]}); | ||||
|       int vol = latt_size[0]*latt_size[1]*latt_size[2]*latt_size[3]; | ||||
|       int64_t vol = latt_size[0]*latt_size[1]*latt_size[2]*latt_size[3]; | ||||
|  | ||||
|       GridCartesian     Grid(latt_size,simd_layout,mpi_layout); | ||||
|       //      GridParallelRNG          pRNG(&Grid);      pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}); | ||||
|       GridParallelRNG          pRNG(&Grid);      pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9})); | ||||
|  | ||||
|       LatticeColourMatrix z(&Grid); //random(pRNG,z); | ||||
|       LatticeColourMatrix x(&Grid); //random(pRNG,x); | ||||
|       LatticeColourMatrix y(&Grid); //random(pRNG,y); | ||||
|       LatticeColourMatrix z(&Grid); random(pRNG,z); | ||||
|       LatticeColourMatrix x(&Grid); random(pRNG,x); | ||||
|       LatticeColourMatrix y(&Grid); random(pRNG,y); | ||||
|  | ||||
|       double start=usecond(); | ||||
|       for(int i=0;i<Nloop;i++){ | ||||
|       for(int64_t i=0;i<Nloop;i++){ | ||||
| 	mult(z,x,y); | ||||
|       } | ||||
|       double stop=usecond(); | ||||
| @@ -148,17 +148,17 @@ int main (int argc, char ** argv) | ||||
|   for(int lat=2;lat<=LMAX;lat+=2){ | ||||
|  | ||||
|       std::vector<int> latt_size  ({lat*mpi_layout[0],lat*mpi_layout[1],lat*mpi_layout[2],lat*mpi_layout[3]}); | ||||
|       int vol = latt_size[0]*latt_size[1]*latt_size[2]*latt_size[3]; | ||||
|       int64_t vol = latt_size[0]*latt_size[1]*latt_size[2]*latt_size[3]; | ||||
|  | ||||
|       GridCartesian     Grid(latt_size,simd_layout,mpi_layout); | ||||
|       //      GridParallelRNG          pRNG(&Grid);      pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}); | ||||
|       GridParallelRNG          pRNG(&Grid);      pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9})); | ||||
|  | ||||
|       LatticeColourMatrix z(&Grid); //random(pRNG,z); | ||||
|       LatticeColourMatrix x(&Grid); //random(pRNG,x); | ||||
|       LatticeColourMatrix y(&Grid); //random(pRNG,y); | ||||
|       LatticeColourMatrix z(&Grid); random(pRNG,z); | ||||
|       LatticeColourMatrix x(&Grid); random(pRNG,x); | ||||
|       LatticeColourMatrix y(&Grid); random(pRNG,y); | ||||
|  | ||||
|       double start=usecond(); | ||||
|       for(int i=0;i<Nloop;i++){ | ||||
|       for(int64_t i=0;i<Nloop;i++){ | ||||
| 	mac(z,x,y); | ||||
|       } | ||||
|       double stop=usecond(); | ||||
|   | ||||
| @@ -58,7 +58,7 @@ int main (int argc, char ** argv) | ||||
|   std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); | ||||
|   std::vector<int> mpi_layout  = GridDefaultMpi(); | ||||
|   GridCartesian               Grid(latt_size,simd_layout,mpi_layout); | ||||
|   GridRedBlackCartesian     RBGrid(latt_size,simd_layout,mpi_layout); | ||||
|   GridRedBlackCartesian     RBGrid(&Grid); | ||||
|  | ||||
|   int threads = GridThread::GetThreads(); | ||||
|   std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl; | ||||
|   | ||||
| @@ -93,7 +93,7 @@ int main (int argc, char ** argv) | ||||
| 	  std::cout << latt_size.back() << "\t\t"; | ||||
|  | ||||
| 	  GridCartesian           Grid(latt_size,simd_layout,mpi_layout); | ||||
| 	  GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout); | ||||
| 	  GridRedBlackCartesian RBGrid(&Grid); | ||||
|  | ||||
| 	  GridParallelRNG  pRNG(&Grid); pRNG.SeedFixedIntegers(seeds); | ||||
| 	  LatticeGaugeField Umu(&Grid); random(pRNG,Umu); | ||||
|   | ||||
| @@ -1,4 +1,4 @@ | ||||
| ]#!/usr/bin/env bash | ||||
| #!/usr/bin/env bash | ||||
|  | ||||
| EIGEN_URL='http://bitbucket.org/eigen/eigen/get/3.3.3.tar.bz2' | ||||
|  | ||||
|   | ||||
							
								
								
									
										70
									
								
								configure.ac
									
									
									
									
									
								
							
							
						
						
									
										70
									
								
								configure.ac
									
									
									
									
									
								
							| @@ -13,6 +13,10 @@ m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])]) | ||||
| ################ Get git info | ||||
| #AC_REVISION([m4_esyscmd_s([./scripts/configure.commit])]) | ||||
|  | ||||
| ################ Set flags | ||||
| # do not move! | ||||
| CXXFLAGS="-O3 $CXXFLAGS" | ||||
|  | ||||
| ############### Checks for programs | ||||
| AC_PROG_CXX | ||||
| AC_PROG_RANLIB | ||||
| @@ -27,7 +31,6 @@ AX_GXX_VERSION | ||||
| AC_DEFINE_UNQUOTED([GXX_VERSION],["$GXX_VERSION"], | ||||
|       [version of g++ that will compile the code]) | ||||
|  | ||||
| CXXFLAGS="-O3 $CXXFLAGS" | ||||
|  | ||||
|  | ||||
| ############### Checks for typedefs, structures, and compiler characteristics | ||||
| @@ -51,9 +54,14 @@ AC_CHECK_HEADERS(malloc/malloc.h) | ||||
| AC_CHECK_HEADERS(malloc.h) | ||||
| AC_CHECK_HEADERS(endian.h) | ||||
| AC_CHECK_HEADERS(execinfo.h) | ||||
| AC_CHECK_HEADERS(numaif.h) | ||||
| AC_CHECK_DECLS([ntohll],[], [], [[#include <arpa/inet.h>]]) | ||||
| AC_CHECK_DECLS([be64toh],[], [], [[#include <arpa/inet.h>]]) | ||||
|  | ||||
| ############## Standard libraries | ||||
| AC_CHECK_LIB([m],[cos]) | ||||
| AC_CHECK_LIB([stdc++],[abort]) | ||||
|  | ||||
| ############### GMP and MPFR | ||||
| AC_ARG_WITH([gmp], | ||||
|     [AS_HELP_STRING([--with-gmp=prefix], | ||||
| @@ -184,6 +192,15 @@ AC_SEARCH_LIBS([limeCreateReader], [lime], | ||||
| In order to use ILGG file format please install or provide the correct path to your installation | ||||
| Info at: http://usqcd.jlab.org/usqcd-docs/c-lime/)]) | ||||
|  | ||||
| AC_SEARCH_LIBS([crc32], [z], | ||||
|                [AC_DEFINE([HAVE_ZLIB], [1], [Define to 1 if you have the `LIBZ' library])] | ||||
|                [have_zlib=true] [LIBS="${LIBS} -lz"], | ||||
| 	       [AC_MSG_ERROR(zlib library was not found in your system.)]) | ||||
|  | ||||
| AC_SEARCH_LIBS([move_pages], [numa], | ||||
|                [AC_DEFINE([HAVE_LIBNUMA], [1], [Define to 1 if you have the `LIBNUMA' library])] | ||||
|                [have_libnuma=true] [LIBS="${LIBS} -lnuma"], | ||||
| 	       [AC_MSG_WARN(libnuma library was not found in your system. Some optimisations will not apply)]) | ||||
|  | ||||
| AC_SEARCH_LIBS([H5Fopen], [hdf5_cpp], | ||||
|                [AC_DEFINE([HAVE_HDF5], [1], [Define to 1 if you have the `HDF5' library])] | ||||
| @@ -237,6 +254,7 @@ case ${ax_cv_cxx_compiler_vendor} in | ||||
|         SIMD_FLAGS='';; | ||||
|       KNL) | ||||
|         AC_DEFINE([AVX512],[1],[AVX512 intrinsics]) | ||||
|         AC_DEFINE([KNL],[1],[Knights landing processor]) | ||||
|         SIMD_FLAGS='-march=knl';; | ||||
|       GEN) | ||||
|         AC_DEFINE([GEN],[1],[generic vector code]) | ||||
| @@ -244,6 +262,9 @@ case ${ax_cv_cxx_compiler_vendor} in | ||||
|                            [generic SIMD vector width (in bytes)]) | ||||
|         SIMD_GEN_WIDTH_MSG=" (width= $ac_gen_simd_width)" | ||||
|         SIMD_FLAGS='';; | ||||
|       NEONv8) | ||||
|         AC_DEFINE([NEONV8],[1],[ARMv8 NEON]) | ||||
|         SIMD_FLAGS='-march=armv8-a';; | ||||
|       QPX|BGQ) | ||||
|         AC_DEFINE([QPX],[1],[QPX intrinsics for BG/Q]) | ||||
|         SIMD_FLAGS='';; | ||||
| @@ -272,6 +293,7 @@ case ${ax_cv_cxx_compiler_vendor} in | ||||
|         SIMD_FLAGS='';; | ||||
|       KNL) | ||||
|         AC_DEFINE([AVX512],[1],[AVX512 intrinsics for Knights Landing]) | ||||
|         AC_DEFINE([KNL],[1],[Knights landing processor]) | ||||
|         SIMD_FLAGS='-xmic-avx512';; | ||||
|       GEN) | ||||
|         AC_DEFINE([GEN],[1],[generic vector code]) | ||||
| @@ -309,8 +331,41 @@ case ${ac_PRECISION} in | ||||
|      double) | ||||
|        AC_DEFINE([GRID_DEFAULT_PRECISION_DOUBLE],[1],[GRID_DEFAULT_PRECISION is DOUBLE] ) | ||||
|      ;; | ||||
|      *) | ||||
|      AC_MSG_ERROR([${ac_PRECISION} unsupported --enable-precision option]); | ||||
|      ;; | ||||
| esac | ||||
|  | ||||
| ######################  Shared memory allocation technique under MPI3 | ||||
| AC_ARG_ENABLE([shm],[AC_HELP_STRING([--enable-shm=shmget|shmopen|hugetlbfs], | ||||
|               [Select SHM allocation technique])],[ac_SHM=${enable_shm}],[ac_SHM=shmopen]) | ||||
|  | ||||
| case ${ac_SHM} in | ||||
|  | ||||
|      shmget) | ||||
|      AC_DEFINE([GRID_MPI3_SHMGET],[1],[GRID_MPI3_SHMGET] ) | ||||
|      ;; | ||||
|  | ||||
|      shmopen) | ||||
|      AC_DEFINE([GRID_MPI3_SHMOPEN],[1],[GRID_MPI3_SHMOPEN] ) | ||||
|      ;; | ||||
|  | ||||
|      hugetlbfs) | ||||
|      AC_DEFINE([GRID_MPI3_SHMMMAP],[1],[GRID_MPI3_SHMMMAP] ) | ||||
|      ;; | ||||
|  | ||||
|      *) | ||||
|      AC_MSG_ERROR([${ac_SHM} unsupported --enable-shm option]); | ||||
|      ;; | ||||
| esac | ||||
|  | ||||
| ######################  Shared base path for SHMMMAP | ||||
| AC_ARG_ENABLE([shmpath],[AC_HELP_STRING([--enable-shmpath=path], | ||||
|               [Select SHM mmap base path for hugetlbfs])], | ||||
| 	      [ac_SHMPATH=${enable_shmpath}], | ||||
| 	      [ac_SHMPATH=/var/lib/hugetlbfs/pagesize-2MB/]) | ||||
| AC_DEFINE_UNQUOTED([GRID_SHM_PATH],["$ac_SHMPATH"],[Path to a hugetlbfs filesystem for MMAPing]) | ||||
|  | ||||
| ############### communication type selection | ||||
| AC_ARG_ENABLE([comms],[AC_HELP_STRING([--enable-comms=none|mpi|mpi-auto|mpi3|mpi3-auto|shmem], | ||||
|               [Select communications])],[ac_COMMS=${enable_comms}],[ac_COMMS=none]) | ||||
| @@ -320,14 +375,14 @@ case ${ac_COMMS} in | ||||
|         AC_DEFINE([GRID_COMMS_NONE],[1],[GRID_COMMS_NONE] ) | ||||
|         comms_type='none' | ||||
|      ;; | ||||
|      mpi3l*) | ||||
|        AC_DEFINE([GRID_COMMS_MPI3L],[1],[GRID_COMMS_MPI3L] ) | ||||
|        comms_type='mpi3l' | ||||
|      ;; | ||||
|      mpi3*) | ||||
|         AC_DEFINE([GRID_COMMS_MPI3],[1],[GRID_COMMS_MPI3] ) | ||||
|         comms_type='mpi3' | ||||
|      ;; | ||||
|      mpit) | ||||
|         AC_DEFINE([GRID_COMMS_MPIT],[1],[GRID_COMMS_MPIT] ) | ||||
|         comms_type='mpit' | ||||
|      ;; | ||||
|      mpi*) | ||||
|         AC_DEFINE([GRID_COMMS_MPI],[1],[GRID_COMMS_MPI] ) | ||||
|         comms_type='mpi' | ||||
| @@ -355,7 +410,7 @@ esac | ||||
| AM_CONDITIONAL(BUILD_COMMS_SHMEM, [ test "${comms_type}X" == "shmemX" ]) | ||||
| AM_CONDITIONAL(BUILD_COMMS_MPI,   [ test "${comms_type}X" == "mpiX" ]) | ||||
| AM_CONDITIONAL(BUILD_COMMS_MPI3,  [ test "${comms_type}X" == "mpi3X" ] ) | ||||
| AM_CONDITIONAL(BUILD_COMMS_MPI3L, [ test "${comms_type}X" == "mpi3lX" ] ) | ||||
| AM_CONDITIONAL(BUILD_COMMS_MPIT,  [ test "${comms_type}X" == "mpitX" ] ) | ||||
| AM_CONDITIONAL(BUILD_COMMS_NONE,  [ test "${comms_type}X" == "noneX" ]) | ||||
|  | ||||
| ############### RNG selection | ||||
| @@ -460,6 +515,8 @@ compiler version            : ${ax_cv_gxx_version} | ||||
| SIMD                        : ${ac_SIMD}${SIMD_GEN_WIDTH_MSG} | ||||
| Threading                   : ${ac_openmp} | ||||
| Communications type         : ${comms_type} | ||||
| Shared memory allocator     : ${ac_SHM} | ||||
| Shared memory mmap path     : ${ac_SHMPATH} | ||||
| Default precision           : ${ac_PRECISION} | ||||
| Software FP16 conversion    : ${ac_SFW_FP16} | ||||
| RNG choice                  : ${ac_RNG} | ||||
| @@ -493,6 +550,7 @@ AC_CONFIG_FILES(tests/forces/Makefile) | ||||
| AC_CONFIG_FILES(tests/hadrons/Makefile) | ||||
| AC_CONFIG_FILES(tests/hmc/Makefile) | ||||
| AC_CONFIG_FILES(tests/solver/Makefile) | ||||
| AC_CONFIG_FILES(tests/lanczos/Makefile) | ||||
| AC_CONFIG_FILES(tests/smearing/Makefile) | ||||
| AC_CONFIG_FILES(tests/qdpxx/Makefile) | ||||
| AC_CONFIG_FILES(tests/testu01/Makefile) | ||||
|   | ||||
| @@ -41,9 +41,10 @@ using namespace Hadrons; | ||||
| // constructor ///////////////////////////////////////////////////////////////// | ||||
| Environment::Environment(void) | ||||
| { | ||||
|     nd_ = GridDefaultLatt().size(); | ||||
|     dim_ = GridDefaultLatt(); | ||||
|     nd_  = dim_.size(); | ||||
|     grid4d_.reset(SpaceTimeGrid::makeFourDimGrid( | ||||
|         GridDefaultLatt(), GridDefaultSimd(nd_, vComplex::Nsimd()), | ||||
|         dim_, GridDefaultSimd(nd_, vComplex::Nsimd()), | ||||
|         GridDefaultMpi())); | ||||
|     gridRb4d_.reset(SpaceTimeGrid::makeFourDimRedBlackGrid(grid4d_.get())); | ||||
|     auto loc = getGrid()->LocalDimensions(); | ||||
| @@ -132,6 +133,16 @@ unsigned int Environment::getNd(void) const | ||||
|     return nd_; | ||||
| } | ||||
|  | ||||
| std::vector<int> Environment::getDim(void) const | ||||
| { | ||||
|     return dim_; | ||||
| } | ||||
|  | ||||
| int Environment::getDim(const unsigned int mu) const | ||||
| { | ||||
|     return dim_[mu]; | ||||
| } | ||||
|  | ||||
| // random number generator ///////////////////////////////////////////////////// | ||||
| void Environment::setSeed(const std::vector<int> &seed) | ||||
| { | ||||
| @@ -271,6 +282,21 @@ std::string Environment::getModuleType(const std::string name) const | ||||
|     return getModuleType(getModuleAddress(name)); | ||||
| } | ||||
|  | ||||
| std::string Environment::getModuleNamespace(const unsigned int address) const | ||||
| { | ||||
|     std::string type = getModuleType(address), ns; | ||||
|      | ||||
|     auto pos2 = type.rfind("::"); | ||||
|     auto pos1 = type.rfind("::", pos2 - 2); | ||||
|      | ||||
|     return type.substr(pos1 + 2, pos2 - pos1 - 2); | ||||
| } | ||||
|  | ||||
| std::string Environment::getModuleNamespace(const std::string name) const | ||||
| { | ||||
|     return getModuleNamespace(getModuleAddress(name)); | ||||
| } | ||||
|  | ||||
| bool Environment::hasModule(const unsigned int address) const | ||||
| { | ||||
|     return (address < module_.size()); | ||||
| @@ -491,9 +517,16 @@ std::string Environment::getObjectName(const unsigned int address) const | ||||
| std::string Environment::getObjectType(const unsigned int address) const | ||||
| { | ||||
|     if (hasRegisteredObject(address)) | ||||
|     { | ||||
|         if (object_[address].type) | ||||
|         { | ||||
|             return typeName(object_[address].type); | ||||
|         } | ||||
|         else | ||||
|         { | ||||
|             return "<no type>"; | ||||
|         } | ||||
|     } | ||||
|     else if (hasObject(address)) | ||||
|     { | ||||
|         HADRON_ERROR("object with address " + std::to_string(address) | ||||
| @@ -532,6 +565,23 @@ Environment::Size Environment::getObjectSize(const std::string name) const | ||||
|     return getObjectSize(getObjectAddress(name)); | ||||
| } | ||||
|  | ||||
| unsigned int Environment::getObjectModule(const unsigned int address) const | ||||
| { | ||||
|     if (hasObject(address)) | ||||
|     { | ||||
|         return object_[address].module; | ||||
|     } | ||||
|     else | ||||
|     { | ||||
|         HADRON_ERROR("no object with address " + std::to_string(address)); | ||||
|     } | ||||
| } | ||||
|  | ||||
| unsigned int Environment::getObjectModule(const std::string name) const | ||||
| { | ||||
|     return getObjectModule(getObjectAddress(name)); | ||||
| } | ||||
|  | ||||
| unsigned int Environment::getObjectLs(const unsigned int address) const | ||||
| { | ||||
|     if (hasRegisteredObject(address)) | ||||
|   | ||||
| @@ -106,6 +106,8 @@ public: | ||||
|     void                    createGrid(const unsigned int Ls); | ||||
|     GridCartesian *         getGrid(const unsigned int Ls = 1) const; | ||||
|     GridRedBlackCartesian * getRbGrid(const unsigned int Ls = 1) const; | ||||
|     std::vector<int>        getDim(void) const; | ||||
|     int                     getDim(const unsigned int mu) const; | ||||
|     unsigned int            getNd(void) const; | ||||
|     // random number generator | ||||
|     void                    setSeed(const std::vector<int> &seed); | ||||
| @@ -131,6 +133,8 @@ public: | ||||
|     std::string             getModuleName(const unsigned int address) const; | ||||
|     std::string             getModuleType(const unsigned int address) const; | ||||
|     std::string             getModuleType(const std::string name) const; | ||||
|     std::string             getModuleNamespace(const unsigned int address) const; | ||||
|     std::string             getModuleNamespace(const std::string name) const; | ||||
|     bool                    hasModule(const unsigned int address) const; | ||||
|     bool                    hasModule(const std::string name) const; | ||||
|     Graph<unsigned int>     makeModuleGraph(void) const; | ||||
| @@ -171,6 +175,8 @@ public: | ||||
|     std::string             getObjectType(const std::string name) const; | ||||
|     Size                    getObjectSize(const unsigned int address) const; | ||||
|     Size                    getObjectSize(const std::string name) const; | ||||
|     unsigned int            getObjectModule(const unsigned int address) const; | ||||
|     unsigned int            getObjectModule(const std::string name) const; | ||||
|     unsigned int            getObjectLs(const unsigned int address) const; | ||||
|     unsigned int            getObjectLs(const std::string name) const; | ||||
|     bool                    hasObject(const unsigned int address) const; | ||||
| @@ -181,6 +187,10 @@ public: | ||||
|     bool                    hasCreatedObject(const std::string name) const; | ||||
|     bool                    isObject5d(const unsigned int address) const; | ||||
|     bool                    isObject5d(const std::string name) const; | ||||
|     template <typename T> | ||||
|     bool                    isObjectOfType(const unsigned int address) const; | ||||
|     template <typename T> | ||||
|     bool                    isObjectOfType(const std::string name) const; | ||||
|     Environment::Size       getTotalSize(void) const; | ||||
|     void                    addOwnership(const unsigned int owner, | ||||
|                                          const unsigned int property); | ||||
| @@ -197,6 +207,7 @@ private: | ||||
|     bool                                   dryRun_{false}; | ||||
|     unsigned int                           traj_, locVol_; | ||||
|     // grids | ||||
|     std::vector<int>                       dim_; | ||||
|     GridPt                                 grid4d_; | ||||
|     std::map<unsigned int, GridPt>         grid5d_; | ||||
|     GridRbPt                               gridRb4d_; | ||||
| @@ -343,7 +354,7 @@ T * Environment::getObject(const unsigned int address) const | ||||
|         else | ||||
|         { | ||||
|             HADRON_ERROR("object with address " + std::to_string(address) + | ||||
|                          " does not have type '" + typeid(T).name() + | ||||
|                          " does not have type '" + typeName(&typeid(T)) + | ||||
|                          "' (has type '" + getObjectType(address) + "')"); | ||||
|         } | ||||
|     } | ||||
| @@ -380,6 +391,37 @@ T * Environment::createLattice(const std::string name) | ||||
|     return createLattice<T>(getObjectAddress(name)); | ||||
| } | ||||
|  | ||||
| template <typename T> | ||||
| bool Environment::isObjectOfType(const unsigned int address) const | ||||
| { | ||||
|     if (hasRegisteredObject(address)) | ||||
|     { | ||||
|         if (auto h = dynamic_cast<Holder<T> *>(object_[address].data.get())) | ||||
|         { | ||||
|             return true; | ||||
|         } | ||||
|         else | ||||
|         { | ||||
|             return false; | ||||
|         } | ||||
|     } | ||||
|     else if (hasObject(address)) | ||||
|     { | ||||
|         HADRON_ERROR("object with address " + std::to_string(address) + | ||||
|                      " exists but is not registered"); | ||||
|     } | ||||
|     else | ||||
|     { | ||||
|         HADRON_ERROR("no object with address " + std::to_string(address)); | ||||
|     } | ||||
| } | ||||
|  | ||||
| template <typename T> | ||||
| bool Environment::isObjectOfType(const std::string name) const | ||||
| { | ||||
|     return isObjectOfType<T>(getObjectAddress(name)); | ||||
| } | ||||
|  | ||||
| END_HADRONS_NAMESPACE | ||||
|  | ||||
| #endif // Hadrons_Environment_hpp_ | ||||
|   | ||||
| @@ -51,23 +51,43 @@ using Grid::operator<<; | ||||
|  * error with GCC 5 (clang & GCC 6 compile fine without it). | ||||
|  */ | ||||
|  | ||||
| // FIXME: find a way to do that in a more general fashion | ||||
| #ifndef FIMPL | ||||
| #define FIMPL WilsonImplR | ||||
| #endif | ||||
| #ifndef SIMPL | ||||
| #define SIMPL ScalarImplCR | ||||
| #endif | ||||
|  | ||||
| BEGIN_HADRONS_NAMESPACE | ||||
|  | ||||
| // type aliases | ||||
| #define TYPE_ALIASES(FImpl, suffix)\ | ||||
| #define FERM_TYPE_ALIASES(FImpl, suffix)\ | ||||
| typedef FermionOperator<FImpl>                       FMat##suffix;             \ | ||||
| typedef typename FImpl::FermionField                 FermionField##suffix;     \ | ||||
| typedef typename FImpl::PropagatorField              PropagatorField##suffix;  \ | ||||
| typedef typename FImpl::SitePropagator               SitePropagator##suffix;   \ | ||||
| typedef typename FImpl::DoubledGaugeField            DoubledGaugeField##suffix;\ | ||||
| typedef std::function<void(FermionField##suffix &,                             \ | ||||
| typedef std::vector<typename FImpl::SitePropagator::scalar_object>             \ | ||||
|                                                      SlicedPropagator##suffix; | ||||
|  | ||||
| #define GAUGE_TYPE_ALIASES(FImpl, suffix)\ | ||||
| typedef typename FImpl::DoubledGaugeField DoubledGaugeField##suffix; | ||||
|  | ||||
| #define SCALAR_TYPE_ALIASES(SImpl, suffix)\ | ||||
| typedef typename SImpl::Field ScalarField##suffix;\ | ||||
| typedef typename SImpl::Field PropagatorField##suffix; | ||||
|  | ||||
| #define SOLVER_TYPE_ALIASES(FImpl, suffix)\ | ||||
| typedef std::function<void(FermionField##suffix &,\ | ||||
|                       const FermionField##suffix &)> SolverFn##suffix; | ||||
|  | ||||
| #define SINK_TYPE_ALIASES(suffix)\ | ||||
| typedef std::function<SlicedPropagator##suffix(const PropagatorField##suffix &)> SinkFn##suffix; | ||||
|  | ||||
| #define FGS_TYPE_ALIASES(FImpl, suffix)\ | ||||
| FERM_TYPE_ALIASES(FImpl, suffix)\ | ||||
| GAUGE_TYPE_ALIASES(FImpl, suffix)\ | ||||
| SOLVER_TYPE_ALIASES(FImpl, suffix) | ||||
|  | ||||
| // logger | ||||
| class HadronsLogger: public Logger | ||||
| { | ||||
|   | ||||
| @@ -1,31 +1,3 @@ | ||||
| /************************************************************************************* | ||||
|  | ||||
| Grid physics library, www.github.com/paboyle/Grid  | ||||
|  | ||||
| Source file: extras/Hadrons/Modules.hpp | ||||
|  | ||||
| Copyright (C) 2015 | ||||
| Copyright (C) 2016 | ||||
|  | ||||
| Author: Antonin Portelli <antonin.portelli@me.com> | ||||
|  | ||||
| This program is free software; you can redistribute it and/or modify | ||||
| it under the terms of the GNU General Public License as published by | ||||
| the Free Software Foundation; either version 2 of the License, or | ||||
| (at your option) any later version. | ||||
|  | ||||
| This program is distributed in the hope that it will be useful, | ||||
| but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
| GNU General Public License for more details. | ||||
|  | ||||
| You should have received a copy of the GNU General Public License along | ||||
| with this program; if not, write to the Free Software Foundation, Inc., | ||||
| 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||
|  | ||||
| See the full license in the file "LICENSE" in the top level distribution directory | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
| #include <Grid/Hadrons/Modules/MAction/DWF.hpp> | ||||
| #include <Grid/Hadrons/Modules/MAction/Wilson.hpp> | ||||
| #include <Grid/Hadrons/Modules/MContraction/Baryon.hpp> | ||||
| @@ -36,13 +8,18 @@ See the full license in the file "LICENSE" in the top level distribution directo | ||||
| #include <Grid/Hadrons/Modules/MContraction/WeakHamiltonianEye.hpp> | ||||
| #include <Grid/Hadrons/Modules/MContraction/WeakHamiltonianNonEye.hpp> | ||||
| #include <Grid/Hadrons/Modules/MContraction/WeakNeutral4ptDisc.hpp> | ||||
| #include <Grid/Hadrons/Modules/MFermion/GaugeProp.hpp> | ||||
| #include <Grid/Hadrons/Modules/MGauge/Load.hpp> | ||||
| #include <Grid/Hadrons/Modules/MGauge/Random.hpp> | ||||
| #include <Grid/Hadrons/Modules/MGauge/StochEm.hpp> | ||||
| #include <Grid/Hadrons/Modules/MGauge/Unit.hpp> | ||||
| #include <Grid/Hadrons/Modules/MLoop/NoiseLoop.hpp> | ||||
| #include <Grid/Hadrons/Modules/MScalar/ChargedProp.hpp> | ||||
| #include <Grid/Hadrons/Modules/MScalar/FreeProp.hpp> | ||||
| #include <Grid/Hadrons/Modules/MScalar/Scalar.hpp> | ||||
| #include <Grid/Hadrons/Modules/MSink/Point.hpp> | ||||
| #include <Grid/Hadrons/Modules/MSolver/RBPrecCG.hpp> | ||||
| #include <Grid/Hadrons/Modules/MSource/Point.hpp> | ||||
| #include <Grid/Hadrons/Modules/MSource/SeqGamma.hpp> | ||||
| #include <Grid/Hadrons/Modules/MSource/Wall.hpp> | ||||
| #include <Grid/Hadrons/Modules/MSource/Z2.hpp> | ||||
| #include <Grid/Hadrons/Modules/Quark.hpp> | ||||
|   | ||||
| @@ -27,8 +27,8 @@ See the full license in the file "LICENSE" in the top level distribution directo | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
|  | ||||
| #ifndef Hadrons_DWF_hpp_ | ||||
| #define Hadrons_DWF_hpp_ | ||||
| #ifndef Hadrons_MAction_DWF_hpp_ | ||||
| #define Hadrons_MAction_DWF_hpp_ | ||||
|  | ||||
| #include <Grid/Hadrons/Global.hpp> | ||||
| #include <Grid/Hadrons/Module.hpp> | ||||
| @@ -48,14 +48,15 @@ public: | ||||
|                                     std::string, gauge, | ||||
|                                     unsigned int, Ls, | ||||
|                                     double      , mass, | ||||
|                                     double      , M5); | ||||
|                                     double      , M5, | ||||
|                                     std::string , boundary); | ||||
| }; | ||||
|  | ||||
| template <typename FImpl> | ||||
| class TDWF: public Module<DWFPar> | ||||
| { | ||||
| public: | ||||
|     TYPE_ALIASES(FImpl,); | ||||
|     FGS_TYPE_ALIASES(FImpl,); | ||||
| public: | ||||
|     // constructor | ||||
|     TDWF(const std::string name); | ||||
| @@ -116,14 +117,19 @@ void TDWF<FImpl>::execute(void) | ||||
|                  << par().mass << ", M5= " << par().M5 << " and Ls= " | ||||
|                  << par().Ls << " using gauge field '" << par().gauge << "'" | ||||
|                  << std::endl; | ||||
|     LOG(Message) << "Fermion boundary conditions: " << par().boundary  | ||||
|                  << std::endl; | ||||
|     env().createGrid(par().Ls); | ||||
|     auto &U      = *env().template getObject<LatticeGaugeField>(par().gauge); | ||||
|     auto &g4     = *env().getGrid(); | ||||
|     auto &grb4   = *env().getRbGrid(); | ||||
|     auto &g5     = *env().getGrid(par().Ls); | ||||
|     auto &grb5   = *env().getRbGrid(par().Ls); | ||||
|     std::vector<Complex> boundary = strToVec<Complex>(par().boundary); | ||||
|     typename DomainWallFermion<FImpl>::ImplParams implParams(boundary); | ||||
|     FMat *fMatPt = new DomainWallFermion<FImpl>(U, g5, grb5, g4, grb4, | ||||
|                                                 par().mass, par().M5); | ||||
|                                                 par().mass, par().M5, | ||||
|                                                 implParams); | ||||
|     env().setObject(getName(), fMatPt); | ||||
| } | ||||
|  | ||||
| @@ -131,4 +137,4 @@ END_MODULE_NAMESPACE | ||||
|  | ||||
| END_HADRONS_NAMESPACE | ||||
|  | ||||
| #endif // Hadrons_DWF_hpp_ | ||||
| #endif // Hadrons_MAction_DWF_hpp_ | ||||
|   | ||||
| @@ -27,8 +27,8 @@ See the full license in the file "LICENSE" in the top level distribution directo | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
|  | ||||
| #ifndef Hadrons_Wilson_hpp_ | ||||
| #define Hadrons_Wilson_hpp_ | ||||
| #ifndef Hadrons_MAction_Wilson_hpp_ | ||||
| #define Hadrons_MAction_Wilson_hpp_ | ||||
|  | ||||
| #include <Grid/Hadrons/Global.hpp> | ||||
| #include <Grid/Hadrons/Module.hpp> | ||||
| @@ -46,14 +46,15 @@ class WilsonPar: Serializable | ||||
| public: | ||||
|     GRID_SERIALIZABLE_CLASS_MEMBERS(WilsonPar, | ||||
|                                     std::string, gauge, | ||||
|                                     double     , mass); | ||||
|                                     double     , mass, | ||||
|                                     std::string, boundary); | ||||
| }; | ||||
|  | ||||
| template <typename FImpl> | ||||
| class TWilson: public Module<WilsonPar> | ||||
| { | ||||
| public: | ||||
|     TYPE_ALIASES(FImpl,); | ||||
|     FGS_TYPE_ALIASES(FImpl,); | ||||
| public: | ||||
|     // constructor | ||||
|     TWilson(const std::string name); | ||||
| @@ -112,10 +113,15 @@ void TWilson<FImpl>::execute() | ||||
| { | ||||
|     LOG(Message) << "Setting up TWilson fermion matrix with m= " << par().mass | ||||
|                  << " using gauge field '" << par().gauge << "'" << std::endl; | ||||
|     LOG(Message) << "Fermion boundary conditions: " << par().boundary  | ||||
|                  << std::endl; | ||||
|     auto &U      = *env().template getObject<LatticeGaugeField>(par().gauge); | ||||
|     auto &grid   = *env().getGrid(); | ||||
|     auto &gridRb = *env().getRbGrid(); | ||||
|     FMat *fMatPt = new WilsonFermion<FImpl>(U, grid, gridRb, par().mass); | ||||
|     std::vector<Complex> boundary = strToVec<Complex>(par().boundary); | ||||
|     typename WilsonFermion<FImpl>::ImplParams implParams(boundary); | ||||
|     FMat *fMatPt = new WilsonFermion<FImpl>(U, grid, gridRb, par().mass, | ||||
|                                             implParams); | ||||
|     env().setObject(getName(), fMatPt); | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -27,8 +27,8 @@ See the full license in the file "LICENSE" in the top level distribution directo | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
|  | ||||
| #ifndef Hadrons_Baryon_hpp_ | ||||
| #define Hadrons_Baryon_hpp_ | ||||
| #ifndef Hadrons_MContraction_Baryon_hpp_ | ||||
| #define Hadrons_MContraction_Baryon_hpp_ | ||||
|  | ||||
| #include <Grid/Hadrons/Global.hpp> | ||||
| #include <Grid/Hadrons/Module.hpp> | ||||
| @@ -55,9 +55,9 @@ template <typename FImpl1, typename FImpl2, typename FImpl3> | ||||
| class TBaryon: public Module<BaryonPar> | ||||
| { | ||||
| public: | ||||
|     TYPE_ALIASES(FImpl1, 1); | ||||
|     TYPE_ALIASES(FImpl2, 2); | ||||
|     TYPE_ALIASES(FImpl3, 3); | ||||
|     FERM_TYPE_ALIASES(FImpl1, 1); | ||||
|     FERM_TYPE_ALIASES(FImpl2, 2); | ||||
|     FERM_TYPE_ALIASES(FImpl3, 3); | ||||
|     class Result: Serializable | ||||
|     { | ||||
|     public: | ||||
| @@ -121,11 +121,11 @@ void TBaryon<FImpl1, FImpl2, FImpl3>::execute(void) | ||||
|      | ||||
|     // FIXME: do contractions | ||||
|      | ||||
|     write(writer, "meson", result); | ||||
|     // write(writer, "meson", result); | ||||
| } | ||||
|  | ||||
| END_MODULE_NAMESPACE | ||||
|  | ||||
| END_HADRONS_NAMESPACE | ||||
|  | ||||
| #endif // Hadrons_Baryon_hpp_ | ||||
| #endif // Hadrons_MContraction_Baryon_hpp_ | ||||
|   | ||||
| @@ -26,8 +26,8 @@ See the full license in the file "LICENSE" in the top level distribution directo | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
|  | ||||
| #ifndef Hadrons_DiscLoop_hpp_ | ||||
| #define Hadrons_DiscLoop_hpp_ | ||||
| #ifndef Hadrons_MContraction_DiscLoop_hpp_ | ||||
| #define Hadrons_MContraction_DiscLoop_hpp_ | ||||
|  | ||||
| #include <Grid/Hadrons/Global.hpp> | ||||
| #include <Grid/Hadrons/Module.hpp> | ||||
| @@ -52,7 +52,7 @@ public: | ||||
| template <typename FImpl> | ||||
| class TDiscLoop: public Module<DiscLoopPar> | ||||
| { | ||||
|     TYPE_ALIASES(FImpl,); | ||||
|     FERM_TYPE_ALIASES(FImpl,); | ||||
|     class Result: Serializable | ||||
|     { | ||||
|     public: | ||||
| @@ -141,4 +141,4 @@ END_MODULE_NAMESPACE | ||||
|  | ||||
| END_HADRONS_NAMESPACE | ||||
|  | ||||
| #endif // Hadrons_DiscLoop_hpp_ | ||||
| #endif // Hadrons_MContraction_DiscLoop_hpp_ | ||||
|   | ||||
| @@ -26,8 +26,8 @@ See the full license in the file "LICENSE" in the top level distribution directo | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
|  | ||||
| #ifndef Hadrons_Gamma3pt_hpp_ | ||||
| #define Hadrons_Gamma3pt_hpp_ | ||||
| #ifndef Hadrons_MContraction_Gamma3pt_hpp_ | ||||
| #define Hadrons_MContraction_Gamma3pt_hpp_ | ||||
|  | ||||
| #include <Grid/Hadrons/Global.hpp> | ||||
| #include <Grid/Hadrons/Module.hpp> | ||||
| @@ -72,9 +72,9 @@ public: | ||||
| template <typename FImpl1, typename FImpl2, typename FImpl3> | ||||
| class TGamma3pt: public Module<Gamma3ptPar> | ||||
| { | ||||
|     TYPE_ALIASES(FImpl1, 1); | ||||
|     TYPE_ALIASES(FImpl2, 2); | ||||
|     TYPE_ALIASES(FImpl3, 3); | ||||
|     FERM_TYPE_ALIASES(FImpl1, 1); | ||||
|     FERM_TYPE_ALIASES(FImpl2, 2); | ||||
|     FERM_TYPE_ALIASES(FImpl3, 3); | ||||
|     class Result: Serializable | ||||
|     { | ||||
|     public: | ||||
| @@ -167,4 +167,4 @@ END_MODULE_NAMESPACE | ||||
|  | ||||
| END_HADRONS_NAMESPACE | ||||
|  | ||||
| #endif // Hadrons_Gamma3pt_hpp_ | ||||
| #endif // Hadrons_MContraction_Gamma3pt_hpp_ | ||||
|   | ||||
| @@ -29,8 +29,8 @@ See the full license in the file "LICENSE" in the top level distribution directo | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
|  | ||||
| #ifndef Hadrons_Meson_hpp_ | ||||
| #define Hadrons_Meson_hpp_ | ||||
| #ifndef Hadrons_MContraction_Meson_hpp_ | ||||
| #define Hadrons_MContraction_Meson_hpp_ | ||||
|  | ||||
| #include <Grid/Hadrons/Global.hpp> | ||||
| #include <Grid/Hadrons/Module.hpp> | ||||
| @@ -69,7 +69,7 @@ public: | ||||
|                                     std::string, q1, | ||||
|                                     std::string, q2, | ||||
|                                     std::string, gammas, | ||||
|                                     std::string, mom, | ||||
|                                     std::string, sink, | ||||
|                                     std::string, output); | ||||
| }; | ||||
|  | ||||
| @@ -77,8 +77,10 @@ template <typename FImpl1, typename FImpl2> | ||||
| class TMeson: public Module<MesonPar> | ||||
| { | ||||
| public: | ||||
|     TYPE_ALIASES(FImpl1, 1); | ||||
|     TYPE_ALIASES(FImpl2, 2); | ||||
|     FERM_TYPE_ALIASES(FImpl1, 1); | ||||
|     FERM_TYPE_ALIASES(FImpl2, 2); | ||||
|     FERM_TYPE_ALIASES(ScalarImplCR, Scalar); | ||||
|     SINK_TYPE_ALIASES(Scalar); | ||||
|     class Result: Serializable | ||||
|     { | ||||
|     public: | ||||
| @@ -115,7 +117,7 @@ TMeson<FImpl1, FImpl2>::TMeson(const std::string name) | ||||
| template <typename FImpl1, typename FImpl2> | ||||
| std::vector<std::string> TMeson<FImpl1, FImpl2>::getInput(void) | ||||
| { | ||||
|     std::vector<std::string> input = {par().q1, par().q2}; | ||||
|     std::vector<std::string> input = {par().q1, par().q2, par().sink}; | ||||
|      | ||||
|     return input; | ||||
| } | ||||
| @@ -131,12 +133,11 @@ std::vector<std::string> TMeson<FImpl1, FImpl2>::getOutput(void) | ||||
| template <typename FImpl1, typename FImpl2> | ||||
| void TMeson<FImpl1, FImpl2>::parseGammaString(std::vector<GammaPair> &gammaList) | ||||
| { | ||||
|     gammaList.clear(); | ||||
|     // Determine gamma matrices to insert at source/sink. | ||||
|     if (par().gammas.compare("all") == 0) | ||||
|     { | ||||
|         // Do all contractions. | ||||
|         unsigned int n_gam = Ns * Ns; | ||||
|         gammaList.resize(n_gam*n_gam); | ||||
|         for (unsigned int i = 1; i < Gamma::nGamma; i += 2) | ||||
|         { | ||||
|             for (unsigned int j = 1; j < Gamma::nGamma; j += 2) | ||||
| @@ -155,6 +156,9 @@ void TMeson<FImpl1, FImpl2>::parseGammaString(std::vector<GammaPair> &gammaList) | ||||
|  | ||||
|  | ||||
| // execution /////////////////////////////////////////////////////////////////// | ||||
| #define mesonConnected(q1, q2, gSnk, gSrc) \ | ||||
| (g5*(gSnk))*(q1)*(adj(gSrc)*g5)*adj(q2) | ||||
|  | ||||
| template <typename FImpl1, typename FImpl2> | ||||
| void TMeson<FImpl1, FImpl2>::execute(void) | ||||
| { | ||||
| @@ -163,44 +167,73 @@ void TMeson<FImpl1, FImpl2>::execute(void) | ||||
|                  << std::endl; | ||||
|      | ||||
|     CorrWriter             writer(par().output); | ||||
|     PropagatorField1       &q1 = *env().template getObject<PropagatorField1>(par().q1); | ||||
|     PropagatorField2       &q2 = *env().template getObject<PropagatorField2>(par().q2); | ||||
|     LatticeComplex         c(env().getGrid()); | ||||
|     Gamma                  g5(Gamma::Algebra::Gamma5); | ||||
|     std::vector<GammaPair> gammaList; | ||||
|     std::vector<TComplex>  buf; | ||||
|     std::vector<Result>    result; | ||||
|     std::vector<Real>      p; | ||||
|  | ||||
|     p  = strToVec<Real>(par().mom); | ||||
|     LatticeComplex         ph(env().getGrid()), coor(env().getGrid()); | ||||
|     Complex                i(0.0,1.0); | ||||
|     ph = zero; | ||||
|     for(unsigned int mu = 0; mu < env().getNd(); mu++) | ||||
|     { | ||||
|         LatticeCoordinate(coor, mu); | ||||
|         ph = ph + p[mu]*coor*((1./(env().getGrid()->_fdimensions[mu]))); | ||||
|     } | ||||
|     ph = exp((Real)(2*M_PI)*i*ph); | ||||
|     Gamma                  g5(Gamma::Algebra::Gamma5); | ||||
|     std::vector<GammaPair> gammaList; | ||||
|     int                    nt = env().getDim(Tp); | ||||
|      | ||||
|     parseGammaString(gammaList); | ||||
|  | ||||
|     result.resize(gammaList.size()); | ||||
|     for (unsigned int i = 0; i < result.size(); ++i) | ||||
|     { | ||||
|         result[i].gamma_snk = gammaList[i].first; | ||||
|         result[i].gamma_src = gammaList[i].second; | ||||
|         result[i].corr.resize(nt); | ||||
|     } | ||||
|     if (env().template isObjectOfType<SlicedPropagator1>(par().q1) and | ||||
|         env().template isObjectOfType<SlicedPropagator2>(par().q2)) | ||||
|     { | ||||
|         SlicedPropagator1 &q1 = *env().template getObject<SlicedPropagator1>(par().q1); | ||||
|         SlicedPropagator2 &q2 = *env().template getObject<SlicedPropagator2>(par().q2); | ||||
|          | ||||
|         LOG(Message) << "(propagator already sinked)" << std::endl; | ||||
|         for (unsigned int i = 0; i < result.size(); ++i) | ||||
|         { | ||||
|             Gamma gSnk(gammaList[i].first); | ||||
|             Gamma gSrc(gammaList[i].second); | ||||
|         c = trace((g5*gSnk)*q1*(adj(gSrc)*g5)*adj(q2))*ph; | ||||
|         sliceSum(c, buf, Tp); | ||||
|              | ||||
|         result[i].gamma_snk = gammaList[i].first; | ||||
|         result[i].gamma_src = gammaList[i].second; | ||||
|         result[i].corr.resize(buf.size()); | ||||
|             for (unsigned int t = 0; t < buf.size(); ++t) | ||||
|             { | ||||
|                 result[i].corr[t] = TensorRemove(trace(mesonConnected(q1[t], q2[t], gSnk, gSrc))); | ||||
|             } | ||||
|         } | ||||
|     } | ||||
|     else | ||||
|     { | ||||
|         PropagatorField1 &q1   = *env().template getObject<PropagatorField1>(par().q1); | ||||
|         PropagatorField2 &q2   = *env().template getObject<PropagatorField2>(par().q2); | ||||
|         LatticeComplex   c(env().getGrid()); | ||||
|          | ||||
|         LOG(Message) << "(using sink '" << par().sink << "')" << std::endl; | ||||
|         for (unsigned int i = 0; i < result.size(); ++i) | ||||
|         { | ||||
|             Gamma       gSnk(gammaList[i].first); | ||||
|             Gamma       gSrc(gammaList[i].second); | ||||
|             std::string ns; | ||||
|                  | ||||
|             ns = env().getModuleNamespace(env().getObjectModule(par().sink)); | ||||
|             if (ns == "MSource") | ||||
|             { | ||||
|                 PropagatorField1 &sink = | ||||
|                     *env().template getObject<PropagatorField1>(par().sink); | ||||
|                  | ||||
|                 c = trace(mesonConnected(q1, q2, gSnk, gSrc)*sink); | ||||
|                 sliceSum(c, buf, Tp); | ||||
|             } | ||||
|             else if (ns == "MSink") | ||||
|             { | ||||
|                 SinkFnScalar &sink = *env().template getObject<SinkFnScalar>(par().sink); | ||||
|                  | ||||
|                 c   = trace(mesonConnected(q1, q2, gSnk, gSrc)); | ||||
|                 buf = sink(c); | ||||
|             } | ||||
|             for (unsigned int t = 0; t < buf.size(); ++t) | ||||
|             { | ||||
|                 result[i].corr[t] = TensorRemove(buf[t]); | ||||
|             } | ||||
|         } | ||||
|     } | ||||
|     write(writer, "meson", result); | ||||
| } | ||||
|  | ||||
| @@ -208,4 +241,4 @@ END_MODULE_NAMESPACE | ||||
|  | ||||
| END_HADRONS_NAMESPACE | ||||
|  | ||||
| #endif // Hadrons_Meson_hpp_ | ||||
| #endif // Hadrons_MContraction_Meson_hpp_ | ||||
|   | ||||
| @@ -26,8 +26,8 @@ See the full license in the file "LICENSE" in the top level distribution directo | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
|  | ||||
| #ifndef Hadrons_WeakHamiltonian_hpp_ | ||||
| #define Hadrons_WeakHamiltonian_hpp_ | ||||
| #ifndef Hadrons_MContraction_WeakHamiltonian_hpp_ | ||||
| #define Hadrons_MContraction_WeakHamiltonian_hpp_ | ||||
|  | ||||
| #include <Grid/Hadrons/Global.hpp> | ||||
| #include <Grid/Hadrons/Module.hpp> | ||||
| @@ -83,7 +83,7 @@ public: | ||||
| class T##modname: public Module<WeakHamiltonianPar>\ | ||||
| {\ | ||||
| public:\ | ||||
|     TYPE_ALIASES(FIMPL,)\ | ||||
|     FERM_TYPE_ALIASES(FIMPL,)\ | ||||
|     class Result: Serializable\ | ||||
|     {\ | ||||
|     public:\ | ||||
| @@ -111,4 +111,4 @@ END_MODULE_NAMESPACE | ||||
|  | ||||
| END_HADRONS_NAMESPACE | ||||
|  | ||||
| #endif // Hadrons_WeakHamiltonian_hpp_ | ||||
| #endif // Hadrons_MContraction_WeakHamiltonian_hpp_ | ||||
|   | ||||
| @@ -26,8 +26,8 @@ See the full license in the file "LICENSE" in the top level distribution directo | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
|  | ||||
| #ifndef Hadrons_WeakHamiltonianEye_hpp_ | ||||
| #define Hadrons_WeakHamiltonianEye_hpp_ | ||||
| #ifndef Hadrons_MContraction_WeakHamiltonianEye_hpp_ | ||||
| #define Hadrons_MContraction_WeakHamiltonianEye_hpp_ | ||||
|  | ||||
| #include <Grid/Hadrons/Modules/MContraction/WeakHamiltonian.hpp> | ||||
|  | ||||
| @@ -55,4 +55,4 @@ END_MODULE_NAMESPACE | ||||
|  | ||||
| END_HADRONS_NAMESPACE | ||||
|  | ||||
| #endif // Hadrons_WeakHamiltonianEye_hpp_ | ||||
| #endif // Hadrons_MContraction_WeakHamiltonianEye_hpp_ | ||||
|   | ||||
| @@ -26,8 +26,8 @@ See the full license in the file "LICENSE" in the top level distribution directo | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
|  | ||||
| #ifndef Hadrons_WeakHamiltonianNonEye_hpp_ | ||||
| #define Hadrons_WeakHamiltonianNonEye_hpp_ | ||||
| #ifndef Hadrons_MContraction_WeakHamiltonianNonEye_hpp_ | ||||
| #define Hadrons_MContraction_WeakHamiltonianNonEye_hpp_ | ||||
|  | ||||
| #include <Grid/Hadrons/Modules/MContraction/WeakHamiltonian.hpp> | ||||
|  | ||||
| @@ -54,4 +54,4 @@ END_MODULE_NAMESPACE | ||||
|  | ||||
| END_HADRONS_NAMESPACE | ||||
|  | ||||
| #endif // Hadrons_WeakHamiltonianNonEye_hpp_ | ||||
| #endif // Hadrons_MContraction_WeakHamiltonianNonEye_hpp_ | ||||
|   | ||||
| @@ -26,8 +26,8 @@ See the full license in the file "LICENSE" in the top level distribution directo | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
|  | ||||
| #ifndef Hadrons_WeakNeutral4ptDisc_hpp_ | ||||
| #define Hadrons_WeakNeutral4ptDisc_hpp_ | ||||
| #ifndef Hadrons_MContraction_WeakNeutral4ptDisc_hpp_ | ||||
| #define Hadrons_MContraction_WeakNeutral4ptDisc_hpp_ | ||||
|  | ||||
| #include <Grid/Hadrons/Modules/MContraction/WeakHamiltonian.hpp> | ||||
|  | ||||
| @@ -56,4 +56,4 @@ END_MODULE_NAMESPACE | ||||
|  | ||||
| END_HADRONS_NAMESPACE | ||||
|  | ||||
| #endif // Hadrons_WeakNeutral4ptDisc_hpp_ | ||||
| #endif // Hadrons_MContraction_WeakNeutral4ptDisc_hpp_ | ||||
|   | ||||
| @@ -1,34 +1,5 @@ | ||||
| /*************************************************************************************
 | ||||
| 
 | ||||
| Grid physics library, www.github.com/paboyle/Grid  | ||||
| 
 | ||||
| Source file: extras/Hadrons/Modules/Quark.hpp | ||||
| 
 | ||||
| Copyright (C) 2015 | ||||
| Copyright (C) 2016 | ||||
| 
 | ||||
| Author: Antonin Portelli <antonin.portelli@me.com> | ||||
| 
 | ||||
| This program is free software; you can redistribute it and/or modify | ||||
| it under the terms of the GNU General Public License as published by | ||||
| the Free Software Foundation; either version 2 of the License, or | ||||
| (at your option) any later version. | ||||
| 
 | ||||
| This program is distributed in the hope that it will be useful, | ||||
| but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
| GNU General Public License for more details. | ||||
| 
 | ||||
| You should have received a copy of the GNU General Public License along | ||||
| with this program; if not, write to the Free Software Foundation, Inc., | ||||
| 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||
| 
 | ||||
| See the full license in the file "LICENSE" in the top level distribution directory | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
| 
 | ||||
| #ifndef Hadrons_Quark_hpp_ | ||||
| #define Hadrons_Quark_hpp_ | ||||
| #ifndef Hadrons_MFermion_GaugeProp_hpp_ | ||||
| #define Hadrons_MFermion_GaugeProp_hpp_ | ||||
| 
 | ||||
| #include <Grid/Hadrons/Global.hpp> | ||||
| #include <Grid/Hadrons/Module.hpp> | ||||
| @@ -37,27 +8,29 @@ See the full license in the file "LICENSE" in the top level distribution directo | ||||
| BEGIN_HADRONS_NAMESPACE | ||||
| 
 | ||||
| /******************************************************************************
 | ||||
|  *                               TQuark                                       * | ||||
|  *                                GaugeProp                                   * | ||||
|  ******************************************************************************/ | ||||
| class QuarkPar: Serializable | ||||
| BEGIN_MODULE_NAMESPACE(MFermion) | ||||
| 
 | ||||
| class GaugePropPar: Serializable | ||||
| { | ||||
| public: | ||||
|     GRID_SERIALIZABLE_CLASS_MEMBERS(QuarkPar, | ||||
|     GRID_SERIALIZABLE_CLASS_MEMBERS(GaugePropPar, | ||||
|                                     std::string, source, | ||||
|                                     std::string, solver); | ||||
| }; | ||||
| 
 | ||||
| template <typename FImpl> | ||||
| class TQuark: public Module<QuarkPar> | ||||
| class TGaugeProp: public Module<GaugePropPar> | ||||
| { | ||||
| public: | ||||
|     TYPE_ALIASES(FImpl,); | ||||
|     FGS_TYPE_ALIASES(FImpl,); | ||||
| public: | ||||
|     // constructor
 | ||||
|     TQuark(const std::string name); | ||||
|     TGaugeProp(const std::string name); | ||||
|     // destructor
 | ||||
|     virtual ~TQuark(void) = default; | ||||
|     // dependencies/products
 | ||||
|     virtual ~TGaugeProp(void) = default; | ||||
|     // dependency relation
 | ||||
|     virtual std::vector<std::string> getInput(void); | ||||
|     virtual std::vector<std::string> getOutput(void); | ||||
|     // setup
 | ||||
| @@ -69,20 +42,20 @@ private: | ||||
|     SolverFn     *solver_{nullptr}; | ||||
| }; | ||||
| 
 | ||||
| MODULE_REGISTER(Quark, TQuark<FIMPL>); | ||||
| MODULE_REGISTER_NS(GaugeProp, TGaugeProp<FIMPL>, MFermion); | ||||
| 
 | ||||
| /******************************************************************************
 | ||||
|  *                          TQuark implementation                             * | ||||
|  *                      TGaugeProp implementation                             * | ||||
|  ******************************************************************************/ | ||||
| // constructor /////////////////////////////////////////////////////////////////
 | ||||
| template <typename FImpl> | ||||
| TQuark<FImpl>::TQuark(const std::string name) | ||||
| : Module(name) | ||||
| TGaugeProp<FImpl>::TGaugeProp(const std::string name) | ||||
| : Module<GaugePropPar>(name) | ||||
| {} | ||||
| 
 | ||||
| // dependencies/products ///////////////////////////////////////////////////////
 | ||||
| template <typename FImpl> | ||||
| std::vector<std::string> TQuark<FImpl>::getInput(void) | ||||
| std::vector<std::string> TGaugeProp<FImpl>::getInput(void) | ||||
| { | ||||
|     std::vector<std::string> in = {par().source, par().solver}; | ||||
|      | ||||
| @@ -90,7 +63,7 @@ std::vector<std::string> TQuark<FImpl>::getInput(void) | ||||
| } | ||||
| 
 | ||||
| template <typename FImpl> | ||||
| std::vector<std::string> TQuark<FImpl>::getOutput(void) | ||||
| std::vector<std::string> TGaugeProp<FImpl>::getOutput(void) | ||||
| { | ||||
|     std::vector<std::string> out = {getName(), getName() + "_5d"}; | ||||
|      | ||||
| @@ -99,7 +72,7 @@ std::vector<std::string> TQuark<FImpl>::getOutput(void) | ||||
| 
 | ||||
| // setup ///////////////////////////////////////////////////////////////////////
 | ||||
| template <typename FImpl> | ||||
| void TQuark<FImpl>::setup(void) | ||||
| void TGaugeProp<FImpl>::setup(void) | ||||
| { | ||||
|     Ls_ = env().getObjectLs(par().solver); | ||||
|     env().template registerLattice<PropagatorField>(getName()); | ||||
| @@ -111,7 +84,7 @@ void TQuark<FImpl>::setup(void) | ||||
| 
 | ||||
| // execution ///////////////////////////////////////////////////////////////////
 | ||||
| template <typename FImpl> | ||||
| void TQuark<FImpl>::execute(void) | ||||
| void TGaugeProp<FImpl>::execute(void) | ||||
| { | ||||
|     LOG(Message) << "Computing quark propagator '" << getName() << "'" | ||||
|     << std::endl; | ||||
| @@ -180,6 +153,8 @@ void TQuark<FImpl>::execute(void) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| END_MODULE_NAMESPACE | ||||
| 
 | ||||
| END_HADRONS_NAMESPACE | ||||
| 
 | ||||
| #endif // Hadrons_Quark_hpp_
 | ||||
| #endif // Hadrons_MFermion_GaugeProp_hpp_
 | ||||
| @@ -65,7 +65,7 @@ void TLoad::setup(void) | ||||
| // execution /////////////////////////////////////////////////////////////////// | ||||
| void TLoad::execute(void) | ||||
| { | ||||
|     NerscField  header; | ||||
|     FieldMetaData  header; | ||||
|     std::string fileName = par().file + "." | ||||
|                            + std::to_string(env().getTrajectory()); | ||||
|      | ||||
| @@ -74,5 +74,5 @@ void TLoad::execute(void) | ||||
|     LatticeGaugeField &U = *env().createLattice<LatticeGaugeField>(getName()); | ||||
|     NerscIO::readConfiguration(U, header, fileName); | ||||
|     LOG(Message) << "NERSC header:" << std::endl; | ||||
|     dump_nersc_header(header, LOG(Message)); | ||||
|     dump_meta_data(header, LOG(Message)); | ||||
| } | ||||
|   | ||||
| @@ -27,8 +27,8 @@ See the full license in the file "LICENSE" in the top level distribution directo | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
|  | ||||
| #ifndef Hadrons_Load_hpp_ | ||||
| #define Hadrons_Load_hpp_ | ||||
| #ifndef Hadrons_MGauge_Load_hpp_ | ||||
| #define Hadrons_MGauge_Load_hpp_ | ||||
|  | ||||
| #include <Grid/Hadrons/Global.hpp> | ||||
| #include <Grid/Hadrons/Module.hpp> | ||||
| @@ -70,4 +70,4 @@ END_MODULE_NAMESPACE | ||||
|  | ||||
| END_HADRONS_NAMESPACE | ||||
|  | ||||
| #endif // Hadrons_Load_hpp_ | ||||
| #endif // Hadrons_MGauge_Load_hpp_ | ||||
|   | ||||
| @@ -27,8 +27,8 @@ See the full license in the file "LICENSE" in the top level distribution directo | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
|  | ||||
| #ifndef Hadrons_Random_hpp_ | ||||
| #define Hadrons_Random_hpp_ | ||||
| #ifndef Hadrons_MGauge_Random_hpp_ | ||||
| #define Hadrons_MGauge_Random_hpp_ | ||||
|  | ||||
| #include <Grid/Hadrons/Global.hpp> | ||||
| #include <Grid/Hadrons/Module.hpp> | ||||
| @@ -63,4 +63,4 @@ END_MODULE_NAMESPACE | ||||
|  | ||||
| END_HADRONS_NAMESPACE | ||||
|  | ||||
| #endif // Hadrons_Random_hpp_ | ||||
| #endif // Hadrons_MGauge_Random_hpp_ | ||||
|   | ||||
							
								
								
									
										88
									
								
								extras/Hadrons/Modules/MGauge/StochEm.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										88
									
								
								extras/Hadrons/Modules/MGauge/StochEm.cc
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,88 @@ | ||||
| /************************************************************************************* | ||||
|  | ||||
| Grid physics library, www.github.com/paboyle/Grid  | ||||
|  | ||||
| Source file: extras/Hadrons/Modules/MGauge/StochEm.cc | ||||
|  | ||||
| Copyright (C) 2015 | ||||
| Copyright (C) 2016 | ||||
|  | ||||
|  | ||||
| This program is free software; you can redistribute it and/or modify | ||||
| it under the terms of the GNU General Public License as published by | ||||
| the Free Software Foundation; either version 2 of the License, or | ||||
| (at your option) any later version. | ||||
|  | ||||
| This program is distributed in the hope that it will be useful, | ||||
| but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
| GNU General Public License for more details. | ||||
|  | ||||
| You should have received a copy of the GNU General Public License along | ||||
| with this program; if not, write to the Free Software Foundation, Inc., | ||||
| 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||
|  | ||||
| See the full license in the file "LICENSE" in the top level distribution directory | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
| #include <Grid/Hadrons/Modules/MGauge/StochEm.hpp> | ||||
|  | ||||
| using namespace Grid; | ||||
| using namespace Hadrons; | ||||
| using namespace MGauge; | ||||
|  | ||||
| /****************************************************************************** | ||||
| *                  TStochEm implementation                             * | ||||
| ******************************************************************************/ | ||||
| // constructor ///////////////////////////////////////////////////////////////// | ||||
| TStochEm::TStochEm(const std::string name) | ||||
| : Module<StochEmPar>(name) | ||||
| {} | ||||
|  | ||||
| // dependencies/products /////////////////////////////////////////////////////// | ||||
| std::vector<std::string> TStochEm::getInput(void) | ||||
| { | ||||
|     std::vector<std::string> in; | ||||
|      | ||||
|     return in; | ||||
| } | ||||
|  | ||||
| std::vector<std::string> TStochEm::getOutput(void) | ||||
| { | ||||
|     std::vector<std::string> out = {getName()}; | ||||
|      | ||||
|     return out; | ||||
| } | ||||
|  | ||||
| // setup /////////////////////////////////////////////////////////////////////// | ||||
| void TStochEm::setup(void) | ||||
| { | ||||
|     if (!env().hasRegisteredObject("_" + getName() + "_weight")) | ||||
|     { | ||||
|         env().registerLattice<EmComp>("_" + getName() + "_weight"); | ||||
|     } | ||||
|     env().registerLattice<EmField>(getName()); | ||||
| } | ||||
|  | ||||
| // execution /////////////////////////////////////////////////////////////////// | ||||
| void TStochEm::execute(void) | ||||
| { | ||||
|     PhotonR photon(par().gauge, par().zmScheme); | ||||
|     EmField &a = *env().createLattice<EmField>(getName()); | ||||
|     EmComp  *w; | ||||
|      | ||||
|     if (!env().hasCreatedObject("_" + getName() + "_weight")) | ||||
|     { | ||||
|         LOG(Message) << "Caching stochatic EM potential weight (gauge: " | ||||
|                      << par().gauge << ", zero-mode scheme: " | ||||
|                      << par().zmScheme << ")..." << std::endl; | ||||
|         w = env().createLattice<EmComp>("_" + getName() + "_weight"); | ||||
|         photon.StochasticWeight(*w); | ||||
|     } | ||||
|     else | ||||
|     { | ||||
|         w = env().getObject<EmComp>("_" + getName() + "_weight"); | ||||
|     } | ||||
|     LOG(Message) << "Generating stochatic EM potential..." << std::endl; | ||||
|     photon.StochasticField(a, *env().get4dRng(), *w); | ||||
| } | ||||
							
								
								
									
										75
									
								
								extras/Hadrons/Modules/MGauge/StochEm.hpp
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										75
									
								
								extras/Hadrons/Modules/MGauge/StochEm.hpp
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,75 @@ | ||||
| /************************************************************************************* | ||||
|  | ||||
| Grid physics library, www.github.com/paboyle/Grid  | ||||
|  | ||||
| Source file: extras/Hadrons/Modules/MGauge/StochEm.hpp | ||||
|  | ||||
| Copyright (C) 2015 | ||||
| Copyright (C) 2016 | ||||
|  | ||||
|  | ||||
| This program is free software; you can redistribute it and/or modify | ||||
| it under the terms of the GNU General Public License as published by | ||||
| the Free Software Foundation; either version 2 of the License, or | ||||
| (at your option) any later version. | ||||
|  | ||||
| This program is distributed in the hope that it will be useful, | ||||
| but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
| GNU General Public License for more details. | ||||
|  | ||||
| You should have received a copy of the GNU General Public License along | ||||
| with this program; if not, write to the Free Software Foundation, Inc., | ||||
| 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||
|  | ||||
| See the full license in the file "LICENSE" in the top level distribution directory | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
| #ifndef Hadrons_MGauge_StochEm_hpp_ | ||||
| #define Hadrons_MGauge_StochEm_hpp_ | ||||
|  | ||||
| #include <Grid/Hadrons/Global.hpp> | ||||
| #include <Grid/Hadrons/Module.hpp> | ||||
| #include <Grid/Hadrons/ModuleFactory.hpp> | ||||
|  | ||||
| BEGIN_HADRONS_NAMESPACE | ||||
|  | ||||
| /****************************************************************************** | ||||
|  *                         StochEm                                 * | ||||
|  ******************************************************************************/ | ||||
| BEGIN_MODULE_NAMESPACE(MGauge) | ||||
|  | ||||
| class StochEmPar: Serializable | ||||
| { | ||||
| public: | ||||
|     GRID_SERIALIZABLE_CLASS_MEMBERS(StochEmPar, | ||||
|                                     PhotonR::Gauge,    gauge, | ||||
|                                     PhotonR::ZmScheme, zmScheme); | ||||
| }; | ||||
|  | ||||
| class TStochEm: public Module<StochEmPar> | ||||
| { | ||||
| public: | ||||
|     typedef PhotonR::GaugeField     EmField; | ||||
|     typedef PhotonR::GaugeLinkField EmComp; | ||||
| public: | ||||
|     // constructor | ||||
|     TStochEm(const std::string name); | ||||
|     // destructor | ||||
|     virtual ~TStochEm(void) = default; | ||||
|     // dependency relation | ||||
|     virtual std::vector<std::string> getInput(void); | ||||
|     virtual std::vector<std::string> getOutput(void); | ||||
|     // setup | ||||
|     virtual void setup(void); | ||||
|     // execution | ||||
|     virtual void execute(void); | ||||
| }; | ||||
|  | ||||
| MODULE_REGISTER_NS(StochEm, TStochEm, MGauge); | ||||
|  | ||||
| END_MODULE_NAMESPACE | ||||
|  | ||||
| END_HADRONS_NAMESPACE | ||||
|  | ||||
| #endif // Hadrons_MGauge_StochEm_hpp_ | ||||
| @@ -27,8 +27,8 @@ See the full license in the file "LICENSE" in the top level distribution directo | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
|  | ||||
| #ifndef Hadrons_Unit_hpp_ | ||||
| #define Hadrons_Unit_hpp_ | ||||
| #ifndef Hadrons_MGauge_Unit_hpp_ | ||||
| #define Hadrons_MGauge_Unit_hpp_ | ||||
|  | ||||
| #include <Grid/Hadrons/Global.hpp> | ||||
| #include <Grid/Hadrons/Module.hpp> | ||||
| @@ -63,4 +63,4 @@ END_MODULE_NAMESPACE | ||||
|  | ||||
| END_HADRONS_NAMESPACE | ||||
|  | ||||
| #endif // Hadrons_Unit_hpp_ | ||||
| #endif // Hadrons_MGauge_Unit_hpp_ | ||||
|   | ||||
| @@ -26,8 +26,8 @@ See the full license in the file "LICENSE" in the top level distribution directo | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
|  | ||||
| #ifndef Hadrons_NoiseLoop_hpp_ | ||||
| #define Hadrons_NoiseLoop_hpp_ | ||||
| #ifndef Hadrons_MLoop_NoiseLoop_hpp_ | ||||
| #define Hadrons_MLoop_NoiseLoop_hpp_ | ||||
|  | ||||
| #include <Grid/Hadrons/Global.hpp> | ||||
| #include <Grid/Hadrons/Module.hpp> | ||||
| @@ -65,7 +65,7 @@ template <typename FImpl> | ||||
| class TNoiseLoop: public Module<NoiseLoopPar> | ||||
| { | ||||
| public: | ||||
|     TYPE_ALIASES(FImpl,); | ||||
|     FERM_TYPE_ALIASES(FImpl,); | ||||
| public: | ||||
|     // constructor | ||||
|     TNoiseLoop(const std::string name); | ||||
| @@ -129,4 +129,4 @@ END_MODULE_NAMESPACE | ||||
|  | ||||
| END_HADRONS_NAMESPACE | ||||
|  | ||||
| #endif // Hadrons_NoiseLoop_hpp_ | ||||
| #endif // Hadrons_MLoop_NoiseLoop_hpp_ | ||||
|   | ||||
							
								
								
									
										226
									
								
								extras/Hadrons/Modules/MScalar/ChargedProp.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										226
									
								
								extras/Hadrons/Modules/MScalar/ChargedProp.cc
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,226 @@ | ||||
| #include <Grid/Hadrons/Modules/MScalar/ChargedProp.hpp> | ||||
| #include <Grid/Hadrons/Modules/MScalar/Scalar.hpp> | ||||
|  | ||||
| using namespace Grid; | ||||
| using namespace Hadrons; | ||||
| using namespace MScalar; | ||||
|  | ||||
| /****************************************************************************** | ||||
| *                     TChargedProp implementation                             * | ||||
| ******************************************************************************/ | ||||
| // constructor ///////////////////////////////////////////////////////////////// | ||||
| TChargedProp::TChargedProp(const std::string name) | ||||
| : Module<ChargedPropPar>(name) | ||||
| {} | ||||
|  | ||||
| // dependencies/products /////////////////////////////////////////////////////// | ||||
| std::vector<std::string> TChargedProp::getInput(void) | ||||
| { | ||||
|     std::vector<std::string> in = {par().source, par().emField}; | ||||
|      | ||||
|     return in; | ||||
| } | ||||
|  | ||||
| std::vector<std::string> TChargedProp::getOutput(void) | ||||
| { | ||||
|     std::vector<std::string> out = {getName()}; | ||||
|      | ||||
|     return out; | ||||
| } | ||||
|  | ||||
| // setup /////////////////////////////////////////////////////////////////////// | ||||
| void TChargedProp::setup(void) | ||||
| { | ||||
|     freeMomPropName_ = FREEMOMPROP(par().mass); | ||||
|     phaseName_.clear(); | ||||
|     for (unsigned int mu = 0; mu < env().getNd(); ++mu) | ||||
|     { | ||||
|         phaseName_.push_back("_shiftphase_" + std::to_string(mu)); | ||||
|     } | ||||
|     GFSrcName_ = "_" + getName() + "_DinvSrc"; | ||||
|     if (!env().hasRegisteredObject(freeMomPropName_)) | ||||
|     { | ||||
|         env().registerLattice<ScalarField>(freeMomPropName_); | ||||
|     } | ||||
|     if (!env().hasRegisteredObject(phaseName_[0])) | ||||
|     { | ||||
|         for (unsigned int mu = 0; mu < env().getNd(); ++mu) | ||||
|         { | ||||
|             env().registerLattice<ScalarField>(phaseName_[mu]); | ||||
|         } | ||||
|     } | ||||
|     if (!env().hasRegisteredObject(GFSrcName_)) | ||||
|     { | ||||
|         env().registerLattice<ScalarField>(GFSrcName_); | ||||
|     } | ||||
|     env().registerLattice<ScalarField>(getName()); | ||||
| } | ||||
|  | ||||
| // execution /////////////////////////////////////////////////////////////////// | ||||
| void TChargedProp::execute(void) | ||||
| { | ||||
|     // CACHING ANALYTIC EXPRESSIONS | ||||
|     ScalarField &source = *env().getObject<ScalarField>(par().source); | ||||
|     Complex     ci(0.0,1.0); | ||||
|     FFT         fft(env().getGrid()); | ||||
|      | ||||
|     // cache free scalar propagator | ||||
|     if (!env().hasCreatedObject(freeMomPropName_)) | ||||
|     { | ||||
|         LOG(Message) << "Caching momentum space free scalar propagator" | ||||
|                      << " (mass= " << par().mass << ")..." << std::endl; | ||||
|         freeMomProp_ = env().createLattice<ScalarField>(freeMomPropName_); | ||||
|         SIMPL::MomentumSpacePropagator(*freeMomProp_, par().mass); | ||||
|     } | ||||
|     else | ||||
|     { | ||||
|         freeMomProp_ = env().getObject<ScalarField>(freeMomPropName_); | ||||
|     } | ||||
|     // cache G*F*src | ||||
|     if (!env().hasCreatedObject(GFSrcName_)) | ||||
|          | ||||
|     { | ||||
|         GFSrc_ = env().createLattice<ScalarField>(GFSrcName_); | ||||
|         fft.FFT_all_dim(*GFSrc_, source, FFT::forward); | ||||
|         *GFSrc_ = (*freeMomProp_)*(*GFSrc_); | ||||
|     } | ||||
|     else | ||||
|     { | ||||
|         GFSrc_ = env().getObject<ScalarField>(GFSrcName_); | ||||
|     } | ||||
|     // cache phases | ||||
|     if (!env().hasCreatedObject(phaseName_[0])) | ||||
|     { | ||||
|         std::vector<int> &l = env().getGrid()->_fdimensions; | ||||
|          | ||||
|         LOG(Message) << "Caching shift phases..." << std::endl; | ||||
|         for (unsigned int mu = 0; mu < env().getNd(); ++mu) | ||||
|         { | ||||
|             Real    twoPiL = M_PI*2./l[mu]; | ||||
|              | ||||
|             phase_.push_back(env().createLattice<ScalarField>(phaseName_[mu])); | ||||
|             LatticeCoordinate(*(phase_[mu]), mu); | ||||
|             *(phase_[mu]) = exp(ci*twoPiL*(*(phase_[mu]))); | ||||
|         } | ||||
|     } | ||||
|     else | ||||
|     { | ||||
|         for (unsigned int mu = 0; mu < env().getNd(); ++mu) | ||||
|         { | ||||
|             phase_.push_back(env().getObject<ScalarField>(phaseName_[mu])); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     // PROPAGATOR CALCULATION | ||||
|     LOG(Message) << "Computing charged scalar propagator" | ||||
|                  << " (mass= " << par().mass | ||||
|                  << ", charge= " << par().charge << ")..." << std::endl; | ||||
|      | ||||
|     ScalarField &prop   = *env().createLattice<ScalarField>(getName()); | ||||
|     ScalarField buf(env().getGrid()); | ||||
|     ScalarField &GFSrc = *GFSrc_, &G = *freeMomProp_; | ||||
|     double      q = par().charge; | ||||
|      | ||||
|     // G*F*Src | ||||
|     prop = GFSrc; | ||||
|  | ||||
|     // - q*G*momD1*G*F*Src (momD1 = F*D1*Finv) | ||||
|     buf = GFSrc; | ||||
|     momD1(buf, fft); | ||||
|     buf = G*buf; | ||||
|     prop = prop - q*buf; | ||||
|  | ||||
|     // + q^2*G*momD1*G*momD1*G*F*Src (here buf = G*momD1*G*F*Src) | ||||
|     momD1(buf, fft); | ||||
|     prop = prop + q*q*G*buf; | ||||
|  | ||||
|     // - q^2*G*momD2*G*F*Src (momD2 = F*D2*Finv) | ||||
|     buf = GFSrc; | ||||
|     momD2(buf, fft); | ||||
|     prop = prop - q*q*G*buf; | ||||
|  | ||||
|     // final FT | ||||
|     fft.FFT_all_dim(prop, prop, FFT::backward); | ||||
|      | ||||
|     // OUTPUT IF NECESSARY | ||||
|     if (!par().output.empty()) | ||||
|     { | ||||
|         std::string           filename = par().output + "." + | ||||
|                                          std::to_string(env().getTrajectory()); | ||||
|          | ||||
|         LOG(Message) << "Saving zero-momentum projection to '" | ||||
|                      << filename << "'..." << std::endl; | ||||
|          | ||||
|         CorrWriter            writer(filename); | ||||
|         std::vector<TComplex> vecBuf; | ||||
|         std::vector<Complex>  result; | ||||
|          | ||||
|         sliceSum(prop, vecBuf, Tp); | ||||
|         result.resize(vecBuf.size()); | ||||
|         for (unsigned int t = 0; t < vecBuf.size(); ++t) | ||||
|         { | ||||
|             result[t] = TensorRemove(vecBuf[t]); | ||||
|         } | ||||
|         write(writer, "charge", q); | ||||
|         write(writer, "prop", result); | ||||
|     } | ||||
| } | ||||
|  | ||||
| void TChargedProp::momD1(ScalarField &s, FFT &fft) | ||||
| { | ||||
|     EmField     &A = *env().getObject<EmField>(par().emField); | ||||
|     ScalarField buf(env().getGrid()), result(env().getGrid()), | ||||
|                 Amu(env().getGrid()); | ||||
|     Complex     ci(0.0,1.0); | ||||
|  | ||||
|     result = zero; | ||||
|  | ||||
|     for (unsigned int mu = 0; mu < env().getNd(); ++mu) | ||||
|     { | ||||
|         Amu = peekLorentz(A, mu); | ||||
|         buf = (*phase_[mu])*s; | ||||
|         fft.FFT_all_dim(buf, buf, FFT::backward); | ||||
|         buf = Amu*buf; | ||||
|         fft.FFT_all_dim(buf, buf, FFT::forward); | ||||
|         result = result - ci*buf; | ||||
|     } | ||||
|     fft.FFT_all_dim(s, s, FFT::backward); | ||||
|     for (unsigned int mu = 0; mu < env().getNd(); ++mu) | ||||
|     { | ||||
|         Amu = peekLorentz(A, mu); | ||||
|         buf = Amu*s; | ||||
|         fft.FFT_all_dim(buf, buf, FFT::forward); | ||||
|         result = result + ci*adj(*phase_[mu])*buf; | ||||
|     } | ||||
|  | ||||
|     s = result; | ||||
| } | ||||
|  | ||||
| void TChargedProp::momD2(ScalarField &s, FFT &fft) | ||||
| { | ||||
|     EmField     &A = *env().getObject<EmField>(par().emField); | ||||
|     ScalarField buf(env().getGrid()), result(env().getGrid()), | ||||
|                 Amu(env().getGrid()); | ||||
|  | ||||
|     result = zero; | ||||
|      | ||||
|     for (unsigned int mu = 0; mu < env().getNd(); ++mu) | ||||
|     { | ||||
|         Amu = peekLorentz(A, mu); | ||||
|         buf = (*phase_[mu])*s; | ||||
|         fft.FFT_all_dim(buf, buf, FFT::backward); | ||||
|         buf = Amu*Amu*buf; | ||||
|         fft.FFT_all_dim(buf, buf, FFT::forward); | ||||
|         result = result + .5*buf; | ||||
|     } | ||||
|     fft.FFT_all_dim(s, s, FFT::backward); | ||||
|     for (unsigned int mu = 0; mu < env().getNd(); ++mu) | ||||
|     { | ||||
|         Amu = peekLorentz(A, mu);         | ||||
|         buf = Amu*Amu*s; | ||||
|         fft.FFT_all_dim(buf, buf, FFT::forward); | ||||
|         result = result + .5*adj(*phase_[mu])*buf; | ||||
|     } | ||||
|  | ||||
|     s = result; | ||||
| } | ||||
							
								
								
									
										61
									
								
								extras/Hadrons/Modules/MScalar/ChargedProp.hpp
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										61
									
								
								extras/Hadrons/Modules/MScalar/ChargedProp.hpp
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,61 @@ | ||||
| #ifndef Hadrons_MScalar_ChargedProp_hpp_ | ||||
| #define Hadrons_MScalar_ChargedProp_hpp_ | ||||
|  | ||||
| #include <Grid/Hadrons/Global.hpp> | ||||
| #include <Grid/Hadrons/Module.hpp> | ||||
| #include <Grid/Hadrons/ModuleFactory.hpp> | ||||
|  | ||||
| BEGIN_HADRONS_NAMESPACE | ||||
|  | ||||
| /****************************************************************************** | ||||
|  *                       Charged scalar propagator                            * | ||||
|  ******************************************************************************/ | ||||
| BEGIN_MODULE_NAMESPACE(MScalar) | ||||
|  | ||||
| class ChargedPropPar: Serializable | ||||
| { | ||||
| public: | ||||
|     GRID_SERIALIZABLE_CLASS_MEMBERS(ChargedPropPar, | ||||
|                                     std::string, emField, | ||||
|                                     std::string, source, | ||||
|                                     double,      mass, | ||||
|                                     double,      charge, | ||||
|                                     std::string, output); | ||||
| }; | ||||
|  | ||||
| class TChargedProp: public Module<ChargedPropPar> | ||||
| { | ||||
| public: | ||||
|     SCALAR_TYPE_ALIASES(SIMPL,); | ||||
|     typedef PhotonR::GaugeField     EmField; | ||||
|     typedef PhotonR::GaugeLinkField EmComp; | ||||
| public: | ||||
|     // constructor | ||||
|     TChargedProp(const std::string name); | ||||
|     // destructor | ||||
|     virtual ~TChargedProp(void) = default; | ||||
|     // dependency relation | ||||
|     virtual std::vector<std::string> getInput(void); | ||||
|     virtual std::vector<std::string> getOutput(void); | ||||
|     // setup | ||||
|     virtual void setup(void); | ||||
|     // execution | ||||
|     virtual void execute(void); | ||||
| private: | ||||
|     void momD1(ScalarField &s, FFT &fft); | ||||
|     void momD2(ScalarField &s, FFT &fft); | ||||
| private: | ||||
|     std::string                freeMomPropName_, GFSrcName_; | ||||
|     std::vector<std::string>   phaseName_; | ||||
|     ScalarField                *freeMomProp_, *GFSrc_; | ||||
|     std::vector<ScalarField *> phase_; | ||||
|     EmField                    *A; | ||||
| }; | ||||
|  | ||||
| MODULE_REGISTER_NS(ChargedProp, TChargedProp, MScalar); | ||||
|  | ||||
| END_MODULE_NAMESPACE | ||||
|  | ||||
| END_HADRONS_NAMESPACE | ||||
|  | ||||
| #endif // Hadrons_MScalar_ChargedProp_hpp_ | ||||
							
								
								
									
										79
									
								
								extras/Hadrons/Modules/MScalar/FreeProp.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										79
									
								
								extras/Hadrons/Modules/MScalar/FreeProp.cc
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,79 @@ | ||||
| #include <Grid/Hadrons/Modules/MScalar/FreeProp.hpp> | ||||
| #include <Grid/Hadrons/Modules/MScalar/Scalar.hpp> | ||||
|  | ||||
| using namespace Grid; | ||||
| using namespace Hadrons; | ||||
| using namespace MScalar; | ||||
|  | ||||
| /****************************************************************************** | ||||
| *                        TFreeProp implementation                             * | ||||
| ******************************************************************************/ | ||||
| // constructor ///////////////////////////////////////////////////////////////// | ||||
| TFreeProp::TFreeProp(const std::string name) | ||||
| : Module<FreePropPar>(name) | ||||
| {} | ||||
|  | ||||
| // dependencies/products /////////////////////////////////////////////////////// | ||||
| std::vector<std::string> TFreeProp::getInput(void) | ||||
| { | ||||
|     std::vector<std::string> in = {par().source}; | ||||
|      | ||||
|     return in; | ||||
| } | ||||
|  | ||||
| std::vector<std::string> TFreeProp::getOutput(void) | ||||
| { | ||||
|     std::vector<std::string> out = {getName()}; | ||||
|      | ||||
|     return out; | ||||
| } | ||||
|  | ||||
| // setup /////////////////////////////////////////////////////////////////////// | ||||
| void TFreeProp::setup(void) | ||||
| { | ||||
|     freeMomPropName_ = FREEMOMPROP(par().mass); | ||||
|      | ||||
|     if (!env().hasRegisteredObject(freeMomPropName_)) | ||||
|     { | ||||
|         env().registerLattice<ScalarField>(freeMomPropName_); | ||||
|     } | ||||
|     env().registerLattice<ScalarField>(getName()); | ||||
| } | ||||
|  | ||||
| // execution /////////////////////////////////////////////////////////////////// | ||||
| void TFreeProp::execute(void) | ||||
| { | ||||
|     ScalarField &prop   = *env().createLattice<ScalarField>(getName()); | ||||
|     ScalarField &source = *env().getObject<ScalarField>(par().source); | ||||
|     ScalarField *freeMomProp; | ||||
|  | ||||
|     if (!env().hasCreatedObject(freeMomPropName_)) | ||||
|     { | ||||
|         LOG(Message) << "Caching momentum space free scalar propagator" | ||||
|                      << " (mass= " << par().mass << ")..." << std::endl; | ||||
|         freeMomProp = env().createLattice<ScalarField>(freeMomPropName_); | ||||
|         SIMPL::MomentumSpacePropagator(*freeMomProp, par().mass); | ||||
|     } | ||||
|     else | ||||
|     { | ||||
|         freeMomProp = env().getObject<ScalarField>(freeMomPropName_); | ||||
|     } | ||||
|     LOG(Message) << "Computing free scalar propagator..." << std::endl; | ||||
|     SIMPL::FreePropagator(source, prop, *freeMomProp); | ||||
|      | ||||
|     if (!par().output.empty()) | ||||
|     { | ||||
|         TextWriter            writer(par().output + "." + | ||||
|                                      std::to_string(env().getTrajectory())); | ||||
|         std::vector<TComplex> buf; | ||||
|         std::vector<Complex>  result; | ||||
|          | ||||
|         sliceSum(prop, buf, Tp); | ||||
|         result.resize(buf.size()); | ||||
|         for (unsigned int t = 0; t < buf.size(); ++t) | ||||
|         { | ||||
|             result[t] = TensorRemove(buf[t]); | ||||
|         } | ||||
|         write(writer, "prop", result); | ||||
|     } | ||||
| } | ||||
							
								
								
									
										50
									
								
								extras/Hadrons/Modules/MScalar/FreeProp.hpp
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										50
									
								
								extras/Hadrons/Modules/MScalar/FreeProp.hpp
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,50 @@ | ||||
| #ifndef Hadrons_MScalar_FreeProp_hpp_ | ||||
| #define Hadrons_MScalar_FreeProp_hpp_ | ||||
|  | ||||
| #include <Grid/Hadrons/Global.hpp> | ||||
| #include <Grid/Hadrons/Module.hpp> | ||||
| #include <Grid/Hadrons/ModuleFactory.hpp> | ||||
|  | ||||
| BEGIN_HADRONS_NAMESPACE | ||||
|  | ||||
| /****************************************************************************** | ||||
|  *                               FreeProp                                     * | ||||
|  ******************************************************************************/ | ||||
| BEGIN_MODULE_NAMESPACE(MScalar) | ||||
|  | ||||
| class FreePropPar: Serializable | ||||
| { | ||||
| public: | ||||
|     GRID_SERIALIZABLE_CLASS_MEMBERS(FreePropPar, | ||||
|                                     std::string, source, | ||||
|                                     double,      mass, | ||||
|                                     std::string, output); | ||||
| }; | ||||
|  | ||||
| class TFreeProp: public Module<FreePropPar> | ||||
| { | ||||
| public: | ||||
|     SCALAR_TYPE_ALIASES(SIMPL,); | ||||
| public: | ||||
|     // constructor | ||||
|     TFreeProp(const std::string name); | ||||
|     // destructor | ||||
|     virtual ~TFreeProp(void) = default; | ||||
|     // dependency relation | ||||
|     virtual std::vector<std::string> getInput(void); | ||||
|     virtual std::vector<std::string> getOutput(void); | ||||
|     // setup | ||||
|     virtual void setup(void); | ||||
|     // execution | ||||
|     virtual void execute(void); | ||||
| private: | ||||
|     std::string freeMomPropName_; | ||||
| }; | ||||
|  | ||||
| MODULE_REGISTER_NS(FreeProp, TFreeProp, MScalar); | ||||
|  | ||||
| END_MODULE_NAMESPACE | ||||
|  | ||||
| END_HADRONS_NAMESPACE | ||||
|  | ||||
| #endif // Hadrons_MScalar_FreeProp_hpp_ | ||||
							
								
								
									
										6
									
								
								extras/Hadrons/Modules/MScalar/Scalar.hpp
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										6
									
								
								extras/Hadrons/Modules/MScalar/Scalar.hpp
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,6 @@ | ||||
| #ifndef Hadrons_Scalar_hpp_ | ||||
| #define Hadrons_Scalar_hpp_ | ||||
|  | ||||
| #define FREEMOMPROP(m) "_scalar_mom_prop_" + std::to_string(m) | ||||
|  | ||||
| #endif // Hadrons_Scalar_hpp_ | ||||
							
								
								
									
										114
									
								
								extras/Hadrons/Modules/MSink/Point.hpp
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										114
									
								
								extras/Hadrons/Modules/MSink/Point.hpp
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,114 @@ | ||||
| #ifndef Hadrons_MSink_Point_hpp_ | ||||
| #define Hadrons_MSink_Point_hpp_ | ||||
|  | ||||
| #include <Grid/Hadrons/Global.hpp> | ||||
| #include <Grid/Hadrons/Module.hpp> | ||||
| #include <Grid/Hadrons/ModuleFactory.hpp> | ||||
|  | ||||
| BEGIN_HADRONS_NAMESPACE | ||||
|  | ||||
| /****************************************************************************** | ||||
|  *                                   Point                                    * | ||||
|  ******************************************************************************/ | ||||
| BEGIN_MODULE_NAMESPACE(MSink) | ||||
|  | ||||
| class PointPar: Serializable | ||||
| { | ||||
| public: | ||||
|     GRID_SERIALIZABLE_CLASS_MEMBERS(PointPar, | ||||
|                                     std::string, mom); | ||||
| }; | ||||
|  | ||||
| template <typename FImpl> | ||||
| class TPoint: public Module<PointPar> | ||||
| { | ||||
| public: | ||||
|     FERM_TYPE_ALIASES(FImpl,); | ||||
|     SINK_TYPE_ALIASES(); | ||||
| public: | ||||
|     // constructor | ||||
|     TPoint(const std::string name); | ||||
|     // destructor | ||||
|     virtual ~TPoint(void) = default; | ||||
|     // dependency relation | ||||
|     virtual std::vector<std::string> getInput(void); | ||||
|     virtual std::vector<std::string> getOutput(void); | ||||
|     // setup | ||||
|     virtual void setup(void); | ||||
|     // execution | ||||
|     virtual void execute(void); | ||||
| }; | ||||
|  | ||||
| MODULE_REGISTER_NS(Point,       TPoint<FIMPL>,        MSink); | ||||
| MODULE_REGISTER_NS(ScalarPoint, TPoint<ScalarImplCR>, MSink); | ||||
|  | ||||
| /****************************************************************************** | ||||
|  *                          TPoint implementation                             * | ||||
|  ******************************************************************************/ | ||||
| // constructor ///////////////////////////////////////////////////////////////// | ||||
| template <typename FImpl> | ||||
| TPoint<FImpl>::TPoint(const std::string name) | ||||
| : Module<PointPar>(name) | ||||
| {} | ||||
|  | ||||
| // dependencies/products /////////////////////////////////////////////////////// | ||||
| template <typename FImpl> | ||||
| std::vector<std::string> TPoint<FImpl>::getInput(void) | ||||
| { | ||||
|     std::vector<std::string> in; | ||||
|      | ||||
|     return in; | ||||
| } | ||||
|  | ||||
| template <typename FImpl> | ||||
| std::vector<std::string> TPoint<FImpl>::getOutput(void) | ||||
| { | ||||
|     std::vector<std::string> out = {getName()}; | ||||
|      | ||||
|     return out; | ||||
| } | ||||
|  | ||||
| // setup /////////////////////////////////////////////////////////////////////// | ||||
| template <typename FImpl> | ||||
| void TPoint<FImpl>::setup(void) | ||||
| { | ||||
|     unsigned int size; | ||||
|      | ||||
|     size = env().template lattice4dSize<LatticeComplex>(); | ||||
|     env().registerObject(getName(), size); | ||||
| } | ||||
|  | ||||
| // execution /////////////////////////////////////////////////////////////////// | ||||
| template <typename FImpl> | ||||
| void TPoint<FImpl>::execute(void) | ||||
| { | ||||
|     std::vector<Real> p = strToVec<Real>(par().mom); | ||||
|     LatticeComplex    ph(env().getGrid()), coor(env().getGrid()); | ||||
|     Complex           i(0.0,1.0); | ||||
|      | ||||
|     LOG(Message) << "Setting up point sink function for momentum [" | ||||
|                  << par().mom << "]" << std::endl; | ||||
|     ph = zero; | ||||
|     for(unsigned int mu = 0; mu < env().getNd(); mu++) | ||||
|     { | ||||
|         LatticeCoordinate(coor, mu); | ||||
|         ph = ph + (p[mu]/env().getGrid()->_fdimensions[mu])*coor; | ||||
|     } | ||||
|     ph = exp((Real)(2*M_PI)*i*ph); | ||||
|     auto sink = [ph](const PropagatorField &field) | ||||
|     { | ||||
|         SlicedPropagator res; | ||||
|         PropagatorField  tmp = ph*field; | ||||
|          | ||||
|         sliceSum(tmp, res, Tp); | ||||
|          | ||||
|         return res; | ||||
|     }; | ||||
|     env().setObject(getName(), new SinkFn(sink)); | ||||
| } | ||||
|  | ||||
| END_MODULE_NAMESPACE | ||||
|  | ||||
| END_HADRONS_NAMESPACE | ||||
|  | ||||
| #endif // Hadrons_MSink_Point_hpp_ | ||||
| @@ -27,8 +27,8 @@ See the full license in the file "LICENSE" in the top level distribution directo | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
|  | ||||
| #ifndef Hadrons_RBPrecCG_hpp_ | ||||
| #define Hadrons_RBPrecCG_hpp_ | ||||
| #ifndef Hadrons_MSolver_RBPrecCG_hpp_ | ||||
| #define Hadrons_MSolver_RBPrecCG_hpp_ | ||||
|  | ||||
| #include <Grid/Hadrons/Global.hpp> | ||||
| #include <Grid/Hadrons/Module.hpp> | ||||
| @@ -53,7 +53,7 @@ template <typename FImpl> | ||||
| class TRBPrecCG: public Module<RBPrecCGPar> | ||||
| { | ||||
| public: | ||||
|     TYPE_ALIASES(FImpl,); | ||||
|     FGS_TYPE_ALIASES(FImpl,); | ||||
| public: | ||||
|     // constructor | ||||
|     TRBPrecCG(const std::string name); | ||||
| @@ -129,4 +129,4 @@ END_MODULE_NAMESPACE | ||||
|  | ||||
| END_HADRONS_NAMESPACE | ||||
|  | ||||
| #endif // Hadrons_RBPrecCG_hpp_ | ||||
| #endif // Hadrons_MSolver_RBPrecCG_hpp_ | ||||
|   | ||||
| @@ -27,8 +27,8 @@ See the full license in the file "LICENSE" in the top level distribution directo | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
|  | ||||
| #ifndef Hadrons_Point_hpp_ | ||||
| #define Hadrons_Point_hpp_ | ||||
| #ifndef Hadrons_MSource_Point_hpp_ | ||||
| #define Hadrons_MSource_Point_hpp_ | ||||
|  | ||||
| #include <Grid/Hadrons/Global.hpp> | ||||
| #include <Grid/Hadrons/Module.hpp> | ||||
| @@ -63,7 +63,7 @@ template <typename FImpl> | ||||
| class TPoint: public Module<PointPar> | ||||
| { | ||||
| public: | ||||
|     TYPE_ALIASES(FImpl,); | ||||
|     FERM_TYPE_ALIASES(FImpl,); | ||||
| public: | ||||
|     // constructor | ||||
|     TPoint(const std::string name); | ||||
| @@ -79,6 +79,7 @@ public: | ||||
| }; | ||||
|  | ||||
| MODULE_REGISTER_NS(Point,       TPoint<FIMPL>,        MSource); | ||||
| MODULE_REGISTER_NS(ScalarPoint, TPoint<ScalarImplCR>, MSource); | ||||
|  | ||||
| /****************************************************************************** | ||||
|  *                       TPoint template implementation                       * | ||||
| @@ -132,4 +133,4 @@ END_MODULE_NAMESPACE | ||||
|  | ||||
| END_HADRONS_NAMESPACE | ||||
|  | ||||
| #endif // Hadrons_Point_hpp_ | ||||
| #endif // Hadrons_MSource_Point_hpp_ | ||||
|   | ||||
| @@ -28,8 +28,8 @@ See the full license in the file "LICENSE" in the top level distribution directo | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
|  | ||||
| #ifndef Hadrons_SeqGamma_hpp_ | ||||
| #define Hadrons_SeqGamma_hpp_ | ||||
| #ifndef Hadrons_MSource_SeqGamma_hpp_ | ||||
| #define Hadrons_MSource_SeqGamma_hpp_ | ||||
|  | ||||
| #include <Grid/Hadrons/Global.hpp> | ||||
| #include <Grid/Hadrons/Module.hpp> | ||||
| @@ -72,7 +72,7 @@ template <typename FImpl> | ||||
| class TSeqGamma: public Module<SeqGammaPar> | ||||
| { | ||||
| public: | ||||
|     TYPE_ALIASES(FImpl,); | ||||
|     FGS_TYPE_ALIASES(FImpl,); | ||||
| public: | ||||
|     // constructor | ||||
|     TSeqGamma(const std::string name); | ||||
| @@ -161,4 +161,4 @@ END_MODULE_NAMESPACE | ||||
|  | ||||
| END_HADRONS_NAMESPACE | ||||
|  | ||||
| #endif // Hadrons_SeqGamma_hpp_ | ||||
| #endif // Hadrons_MSource_SeqGamma_hpp_ | ||||
|   | ||||
| @@ -26,8 +26,8 @@ See the full license in the file "LICENSE" in the top level distribution directo | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
|  | ||||
| #ifndef Hadrons_WallSource_hpp_ | ||||
| #define Hadrons_WallSource_hpp_ | ||||
| #ifndef Hadrons_MSource_WallSource_hpp_ | ||||
| #define Hadrons_MSource_WallSource_hpp_ | ||||
|  | ||||
| #include <Grid/Hadrons/Global.hpp> | ||||
| #include <Grid/Hadrons/Module.hpp> | ||||
| @@ -64,7 +64,7 @@ template <typename FImpl> | ||||
| class TWall: public Module<WallPar> | ||||
| { | ||||
| public: | ||||
|     TYPE_ALIASES(FImpl,); | ||||
|     FERM_TYPE_ALIASES(FImpl,); | ||||
| public: | ||||
|     // constructor | ||||
|     TWall(const std::string name); | ||||
| @@ -144,4 +144,4 @@ END_MODULE_NAMESPACE | ||||
|  | ||||
| END_HADRONS_NAMESPACE | ||||
|  | ||||
| #endif // Hadrons_WallSource_hpp_ | ||||
| #endif // Hadrons_MSource_WallSource_hpp_ | ||||
|   | ||||
| @@ -27,8 +27,8 @@ See the full license in the file "LICENSE" in the top level distribution directo | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
|  | ||||
| #ifndef Hadrons_Z2_hpp_ | ||||
| #define Hadrons_Z2_hpp_ | ||||
| #ifndef Hadrons_MSource_Z2_hpp_ | ||||
| #define Hadrons_MSource_Z2_hpp_ | ||||
|  | ||||
| #include <Grid/Hadrons/Global.hpp> | ||||
| #include <Grid/Hadrons/Module.hpp> | ||||
| @@ -67,7 +67,7 @@ template <typename FImpl> | ||||
| class TZ2: public Module<Z2Par> | ||||
| { | ||||
| public: | ||||
|     TYPE_ALIASES(FImpl,); | ||||
|     FERM_TYPE_ALIASES(FImpl,); | ||||
| public: | ||||
|     // constructor | ||||
|     TZ2(const std::string name); | ||||
| @@ -83,6 +83,7 @@ public: | ||||
| }; | ||||
|  | ||||
| MODULE_REGISTER_NS(Z2,       TZ2<FIMPL>,        MSource); | ||||
| MODULE_REGISTER_NS(ScalarZ2, TZ2<ScalarImplCR>, MSource); | ||||
|  | ||||
| /****************************************************************************** | ||||
|  *                       TZ2 template implementation                          * | ||||
| @@ -148,4 +149,4 @@ END_MODULE_NAMESPACE | ||||
|  | ||||
| END_HADRONS_NAMESPACE | ||||
|  | ||||
| #endif // Hadrons_Z2_hpp_ | ||||
| #endif // Hadrons_MSource_Z2_hpp_ | ||||
|   | ||||
| @@ -1,5 +1,5 @@ | ||||
| #ifndef Hadrons____FILEBASENAME____hpp_ | ||||
| #define Hadrons____FILEBASENAME____hpp_ | ||||
| #ifndef Hadrons____NAMESPACE_______FILEBASENAME____hpp_ | ||||
| #define Hadrons____NAMESPACE_______FILEBASENAME____hpp_ | ||||
|  | ||||
| #include <Grid/Hadrons/Global.hpp> | ||||
| #include <Grid/Hadrons/Module.hpp> | ||||
| @@ -41,4 +41,4 @@ END_MODULE_NAMESPACE | ||||
|  | ||||
| END_HADRONS_NAMESPACE | ||||
|  | ||||
| #endif // Hadrons____FILEBASENAME____hpp_ | ||||
| #endif // Hadrons____NAMESPACE_______FILEBASENAME____hpp_ | ||||
|   | ||||
| @@ -1,5 +1,5 @@ | ||||
| #ifndef Hadrons____FILEBASENAME____hpp_ | ||||
| #define Hadrons____FILEBASENAME____hpp_ | ||||
| #ifndef Hadrons____NAMESPACE_______FILEBASENAME____hpp_ | ||||
| #define Hadrons____NAMESPACE_______FILEBASENAME____hpp_ | ||||
|  | ||||
| #include <Grid/Hadrons/Global.hpp> | ||||
| #include <Grid/Hadrons/Module.hpp> | ||||
| @@ -82,4 +82,4 @@ END_MODULE_NAMESPACE | ||||
|  | ||||
| END_HADRONS_NAMESPACE | ||||
|  | ||||
| #endif // Hadrons____FILEBASENAME____hpp_ | ||||
| #endif // Hadrons____NAMESPACE_______FILEBASENAME____hpp_ | ||||
|   | ||||
| @@ -4,7 +4,10 @@ modules_cc =\ | ||||
|   Modules/MContraction/WeakNeutral4ptDisc.cc \ | ||||
|   Modules/MGauge/Load.cc \ | ||||
|   Modules/MGauge/Random.cc \ | ||||
|   Modules/MGauge/Unit.cc | ||||
|   Modules/MGauge/StochEm.cc \ | ||||
|   Modules/MGauge/Unit.cc \ | ||||
|   Modules/MScalar/ChargedProp.cc \ | ||||
|   Modules/MScalar/FreeProp.cc | ||||
|  | ||||
| modules_hpp =\ | ||||
|   Modules/MAction/DWF.hpp \ | ||||
| @@ -17,14 +20,19 @@ modules_hpp =\ | ||||
|   Modules/MContraction/WeakHamiltonianEye.hpp \ | ||||
|   Modules/MContraction/WeakHamiltonianNonEye.hpp \ | ||||
|   Modules/MContraction/WeakNeutral4ptDisc.hpp \ | ||||
|   Modules/MFermion/GaugeProp.hpp \ | ||||
|   Modules/MGauge/Load.hpp \ | ||||
|   Modules/MGauge/Random.hpp \ | ||||
|   Modules/MGauge/StochEm.hpp \ | ||||
|   Modules/MGauge/Unit.hpp \ | ||||
|   Modules/MLoop/NoiseLoop.hpp \ | ||||
|   Modules/MScalar/ChargedProp.hpp \ | ||||
|   Modules/MScalar/FreeProp.hpp \ | ||||
|   Modules/MScalar/Scalar.hpp \ | ||||
|   Modules/MSink/Point.hpp \ | ||||
|   Modules/MSolver/RBPrecCG.hpp \ | ||||
|   Modules/MSource/Point.hpp \ | ||||
|   Modules/MSource/SeqGamma.hpp \ | ||||
|   Modules/MSource/Wall.hpp \ | ||||
|   Modules/MSource/Z2.hpp \ | ||||
|   Modules/Quark.hpp | ||||
|   Modules/MSource/Z2.hpp | ||||
|  | ||||
|   | ||||
							
								
								
									
										11
									
								
								extras/qed-fvol/Global.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								extras/qed-fvol/Global.cc
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| #include <qed-fvol/Global.hpp> | ||||
|  | ||||
| using namespace Grid; | ||||
| using namespace QCD; | ||||
| using namespace QedFVol; | ||||
|  | ||||
| QedFVolLogger QedFVol::QedFVolLogError(1,"Error"); | ||||
| QedFVolLogger QedFVol::QedFVolLogWarning(1,"Warning"); | ||||
| QedFVolLogger QedFVol::QedFVolLogMessage(1,"Message"); | ||||
| QedFVolLogger QedFVol::QedFVolLogIterative(1,"Iterative"); | ||||
| QedFVolLogger QedFVol::QedFVolLogDebug(1,"Debug"); | ||||
							
								
								
									
										42
									
								
								extras/qed-fvol/Global.hpp
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										42
									
								
								extras/qed-fvol/Global.hpp
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,42 @@ | ||||
| #ifndef QedFVol_Global_hpp_ | ||||
| #define QedFVol_Global_hpp_ | ||||
|  | ||||
| #include <Grid/Grid.h> | ||||
|  | ||||
| #define BEGIN_QEDFVOL_NAMESPACE \ | ||||
| namespace Grid {\ | ||||
| using namespace QCD;\ | ||||
| namespace QedFVol {\ | ||||
| using Grid::operator<<; | ||||
| #define END_QEDFVOL_NAMESPACE }} | ||||
|  | ||||
| /* the 'using Grid::operator<<;' statement prevents a very nasty compilation | ||||
|  * error with GCC (clang compiles fine without it). | ||||
|  */ | ||||
|  | ||||
| BEGIN_QEDFVOL_NAMESPACE | ||||
|  | ||||
| class QedFVolLogger: public Logger | ||||
| { | ||||
| public: | ||||
|     QedFVolLogger(int on, std::string nm): Logger("QedFVol", on, nm, | ||||
|                                                   GridLogColours, "BLACK"){}; | ||||
| }; | ||||
|  | ||||
| #define LOG(channel) std::cout << QedFVolLog##channel | ||||
| #define QEDFVOL_ERROR(msg)\ | ||||
| LOG(Error) << msg << " (" << __FUNCTION__ << " at " << __FILE__ << ":"\ | ||||
|            << __LINE__ << ")" << std::endl;\ | ||||
| abort(); | ||||
|  | ||||
| #define DEBUG_VAR(var) LOG(Debug) << #var << "= " << (var) << std::endl; | ||||
|  | ||||
| extern QedFVolLogger QedFVolLogError; | ||||
| extern QedFVolLogger QedFVolLogWarning; | ||||
| extern QedFVolLogger QedFVolLogMessage; | ||||
| extern QedFVolLogger QedFVolLogIterative; | ||||
| extern QedFVolLogger QedFVolLogDebug; | ||||
|  | ||||
| END_QEDFVOL_NAMESPACE | ||||
|  | ||||
| #endif // QedFVol_Global_hpp_ | ||||
							
								
								
									
										9
									
								
								extras/qed-fvol/Makefile.am
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										9
									
								
								extras/qed-fvol/Makefile.am
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,9 @@ | ||||
| AM_CXXFLAGS += -I$(top_srcdir)/extras | ||||
|  | ||||
| bin_PROGRAMS = qed-fvol | ||||
|  | ||||
| qed_fvol_SOURCES =   \ | ||||
|     qed-fvol.cc      \ | ||||
|     Global.cc | ||||
|  | ||||
| qed_fvol_LDADD   = -lGrid | ||||
							
								
								
									
										265
									
								
								extras/qed-fvol/WilsonLoops.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										265
									
								
								extras/qed-fvol/WilsonLoops.h
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,265 @@ | ||||
| #ifndef QEDFVOL_WILSONLOOPS_H | ||||
| #define QEDFVOL_WILSONLOOPS_H | ||||
|  | ||||
| #include <Global.hpp> | ||||
|  | ||||
| BEGIN_QEDFVOL_NAMESPACE | ||||
|  | ||||
| template <class Gimpl> class NewWilsonLoops : public Gimpl { | ||||
| public: | ||||
|   INHERIT_GIMPL_TYPES(Gimpl); | ||||
|  | ||||
|   typedef typename Gimpl::GaugeLinkField GaugeMat; | ||||
|   typedef typename Gimpl::GaugeField GaugeLorentz; | ||||
|  | ||||
|   ////////////////////////////////////////////////// | ||||
|   // directed plaquette oriented in mu,nu plane | ||||
|   ////////////////////////////////////////////////// | ||||
|   static void dirPlaquette(GaugeMat &plaq, const std::vector<GaugeMat> &U, | ||||
|                            const int mu, const int nu) { | ||||
|     // Annoyingly, must use either scope resolution to find dependent base | ||||
|     // class, | ||||
|     // or this-> ; there is no "this" in a static method. This forces explicit | ||||
|     // Gimpl scope | ||||
|     // resolution throughout the usage in this file, and rather defeats the | ||||
|     // purpose of deriving | ||||
|     // from Gimpl. | ||||
|     plaq = Gimpl::CovShiftBackward( | ||||
|         U[mu], mu, Gimpl::CovShiftBackward( | ||||
|                        U[nu], nu, Gimpl::CovShiftForward(U[mu], mu, U[nu]))); | ||||
|   } | ||||
|   ////////////////////////////////////////////////// | ||||
|   // trace of directed plaquette oriented in mu,nu plane | ||||
|   ////////////////////////////////////////////////// | ||||
|   static void traceDirPlaquette(LatticeComplex &plaq, | ||||
|                                 const std::vector<GaugeMat> &U, const int mu, | ||||
|                                 const int nu) { | ||||
|     GaugeMat sp(U[0]._grid); | ||||
|     dirPlaquette(sp, U, mu, nu); | ||||
|     plaq = trace(sp); | ||||
|   } | ||||
|   ////////////////////////////////////////////////// | ||||
|   // sum over all planes of plaquette | ||||
|   ////////////////////////////////////////////////// | ||||
|   static void sitePlaquette(LatticeComplex &Plaq, | ||||
|                             const std::vector<GaugeMat> &U) { | ||||
|     LatticeComplex sitePlaq(U[0]._grid); | ||||
|     Plaq = zero; | ||||
|     for (int mu = 1; mu < U[0]._grid->_ndimension; mu++) { | ||||
|       for (int nu = 0; nu < mu; nu++) { | ||||
|         traceDirPlaquette(sitePlaq, U, mu, nu); | ||||
|         Plaq = Plaq + sitePlaq; | ||||
|       } | ||||
|     } | ||||
|   } | ||||
|   ////////////////////////////////////////////////// | ||||
|   // sum over all x,y,z,t and over all planes of plaquette | ||||
|   ////////////////////////////////////////////////// | ||||
|   static Real sumPlaquette(const GaugeLorentz &Umu) { | ||||
|     std::vector<GaugeMat> U(4, Umu._grid); | ||||
|  | ||||
|     for (int mu = 0; mu < Umu._grid->_ndimension; mu++) { | ||||
|       U[mu] = PeekIndex<LorentzIndex>(Umu, mu); | ||||
|     } | ||||
|  | ||||
|     LatticeComplex Plaq(Umu._grid); | ||||
|  | ||||
|     sitePlaquette(Plaq, U); | ||||
|  | ||||
|     TComplex Tp = sum(Plaq); | ||||
|     Complex p = TensorRemove(Tp); | ||||
|     return p.real(); | ||||
|   } | ||||
|   ////////////////////////////////////////////////// | ||||
|   // average over all x,y,z,t and over all planes of plaquette | ||||
|   ////////////////////////////////////////////////// | ||||
|   static Real avgPlaquette(const GaugeLorentz &Umu) { | ||||
|     int ndim = Umu._grid->_ndimension; | ||||
|     Real sumplaq = sumPlaquette(Umu); | ||||
|     Real vol = Umu._grid->gSites(); | ||||
|     Real faces = (1.0 * ndim * (ndim - 1)) / 2.0; | ||||
|     return sumplaq / vol / faces / Nc; // Nc dependent... FIXME | ||||
|   } | ||||
|  | ||||
|   ////////////////////////////////////////////////// | ||||
|   // Wilson loop of size (R1, R2), oriented in mu,nu plane | ||||
|   ////////////////////////////////////////////////// | ||||
|   static void wilsonLoop(GaugeMat &wl, const std::vector<GaugeMat> &U, | ||||
|                            const int Rmu, const int Rnu, | ||||
|                            const int mu, const int nu) { | ||||
|     wl = U[nu]; | ||||
|  | ||||
|     for(int i = 0; i < Rnu-1; i++){ | ||||
|       wl = Gimpl::CovShiftForward(U[nu], nu, wl); | ||||
|     } | ||||
|  | ||||
|     for(int i = 0; i < Rmu; i++){ | ||||
|       wl = Gimpl::CovShiftForward(U[mu], mu, wl); | ||||
|     } | ||||
|  | ||||
|     for(int i = 0; i < Rnu; i++){ | ||||
|       wl = Gimpl::CovShiftBackward(U[nu], nu, wl); | ||||
|     } | ||||
|  | ||||
|     for(int i = 0; i < Rmu; i++){ | ||||
|       wl = Gimpl::CovShiftBackward(U[mu], mu, wl); | ||||
|     } | ||||
|   } | ||||
|   ////////////////////////////////////////////////// | ||||
|   // trace of Wilson Loop oriented in mu,nu plane | ||||
|   ////////////////////////////////////////////////// | ||||
|   static void traceWilsonLoop(LatticeComplex &wl, | ||||
|                                 const std::vector<GaugeMat> &U, | ||||
|                                 const int Rmu, const int Rnu, | ||||
|                                 const int mu, const int nu) { | ||||
|     GaugeMat sp(U[0]._grid); | ||||
|     wilsonLoop(sp, U, Rmu, Rnu, mu, nu); | ||||
|     wl = trace(sp); | ||||
|   } | ||||
|   ////////////////////////////////////////////////// | ||||
|   // sum over all planes of Wilson loop | ||||
|   ////////////////////////////////////////////////// | ||||
|   static void siteWilsonLoop(LatticeComplex &Wl, | ||||
|                             const std::vector<GaugeMat> &U, | ||||
|                             const int R1, const int R2) { | ||||
|     LatticeComplex siteWl(U[0]._grid); | ||||
|     Wl = zero; | ||||
|     for (int mu = 1; mu < U[0]._grid->_ndimension; mu++) { | ||||
|       for (int nu = 0; nu < mu; nu++) { | ||||
|         traceWilsonLoop(siteWl, U, R1, R2, mu, nu); | ||||
|         Wl = Wl + siteWl; | ||||
|         traceWilsonLoop(siteWl, U, R2, R1, mu, nu); | ||||
|         Wl = Wl + siteWl; | ||||
|       } | ||||
|     } | ||||
|   } | ||||
|   ////////////////////////////////////////////////// | ||||
|   // sum over planes of Wilson loop with length R1 | ||||
|   // in the time direction | ||||
|   ////////////////////////////////////////////////// | ||||
|   static void siteTimelikeWilsonLoop(LatticeComplex &Wl, | ||||
|                             const std::vector<GaugeMat> &U, | ||||
|                             const int R1, const int R2) { | ||||
|     LatticeComplex siteWl(U[0]._grid); | ||||
|  | ||||
|     int ndim = U[0]._grid->_ndimension; | ||||
|  | ||||
|     Wl = zero; | ||||
|     for (int nu = 0; nu < ndim - 1; nu++) { | ||||
|       traceWilsonLoop(siteWl, U, R1, R2, ndim-1, nu); | ||||
|       Wl = Wl + siteWl; | ||||
|     } | ||||
|   } | ||||
|   ////////////////////////////////////////////////// | ||||
|   // sum Wilson loop over all planes orthogonal to the time direction | ||||
|   ////////////////////////////////////////////////// | ||||
|   static void siteSpatialWilsonLoop(LatticeComplex &Wl, | ||||
|                             const std::vector<GaugeMat> &U, | ||||
|                             const int R1, const int R2) { | ||||
|     LatticeComplex siteWl(U[0]._grid); | ||||
|  | ||||
|     Wl = zero; | ||||
|     for (int mu = 1; mu < U[0]._grid->_ndimension - 1; mu++) { | ||||
|       for (int nu = 0; nu < mu; nu++) { | ||||
|         traceWilsonLoop(siteWl, U, R1, R2, mu, nu); | ||||
|         Wl = Wl + siteWl; | ||||
|         traceWilsonLoop(siteWl, U, R2, R1, mu, nu); | ||||
|         Wl = Wl + siteWl; | ||||
|       } | ||||
|     } | ||||
|   } | ||||
|   ////////////////////////////////////////////////// | ||||
|   // sum over all x,y,z,t and over all planes of Wilson loop | ||||
|   ////////////////////////////////////////////////// | ||||
|   static Real sumWilsonLoop(const GaugeLorentz &Umu, | ||||
|                             const int R1, const int R2) { | ||||
|     std::vector<GaugeMat> U(4, Umu._grid); | ||||
|  | ||||
|     for (int mu = 0; mu < Umu._grid->_ndimension; mu++) { | ||||
|       U[mu] = PeekIndex<LorentzIndex>(Umu, mu); | ||||
|     } | ||||
|  | ||||
|     LatticeComplex Wl(Umu._grid); | ||||
|  | ||||
|     siteWilsonLoop(Wl, U, R1, R2); | ||||
|  | ||||
|     TComplex Tp = sum(Wl); | ||||
|     Complex p = TensorRemove(Tp); | ||||
|     return p.real(); | ||||
|   } | ||||
|   ////////////////////////////////////////////////// | ||||
|   // sum over all x,y,z,t and over all planes of timelike Wilson loop | ||||
|   ////////////////////////////////////////////////// | ||||
|   static Real sumTimelikeWilsonLoop(const GaugeLorentz &Umu, | ||||
|                             const int R1, const int R2) { | ||||
|     std::vector<GaugeMat> U(4, Umu._grid); | ||||
|  | ||||
|     for (int mu = 0; mu < Umu._grid->_ndimension; mu++) { | ||||
|       U[mu] = PeekIndex<LorentzIndex>(Umu, mu); | ||||
|     } | ||||
|  | ||||
|     LatticeComplex Wl(Umu._grid); | ||||
|  | ||||
|     siteTimelikeWilsonLoop(Wl, U, R1, R2); | ||||
|  | ||||
|     TComplex Tp = sum(Wl); | ||||
|     Complex p = TensorRemove(Tp); | ||||
|     return p.real(); | ||||
|   } | ||||
|   ////////////////////////////////////////////////// | ||||
|   // sum over all x,y,z,t and over all planes of spatial Wilson loop | ||||
|   ////////////////////////////////////////////////// | ||||
|   static Real sumSpatialWilsonLoop(const GaugeLorentz &Umu, | ||||
|                             const int R1, const int R2) { | ||||
|     std::vector<GaugeMat> U(4, Umu._grid); | ||||
|  | ||||
|     for (int mu = 0; mu < Umu._grid->_ndimension; mu++) { | ||||
|       U[mu] = PeekIndex<LorentzIndex>(Umu, mu); | ||||
|     } | ||||
|  | ||||
|     LatticeComplex Wl(Umu._grid); | ||||
|  | ||||
|     siteSpatialWilsonLoop(Wl, U, R1, R2); | ||||
|  | ||||
|     TComplex Tp = sum(Wl); | ||||
|     Complex p = TensorRemove(Tp); | ||||
|     return p.real(); | ||||
|   } | ||||
|   ////////////////////////////////////////////////// | ||||
|   // average over all x,y,z,t and over all planes of Wilson loop | ||||
|   ////////////////////////////////////////////////// | ||||
|   static Real avgWilsonLoop(const GaugeLorentz &Umu, | ||||
|                             const int R1, const int R2) { | ||||
|     int ndim = Umu._grid->_ndimension; | ||||
|     Real sumWl = sumWilsonLoop(Umu, R1, R2); | ||||
|     Real vol = Umu._grid->gSites(); | ||||
|     Real faces = 1.0 * ndim * (ndim - 1); | ||||
|     return sumWl / vol / faces / Nc; // Nc dependent... FIXME | ||||
|   } | ||||
|   ////////////////////////////////////////////////// | ||||
|   // average over all x,y,z,t and over all planes of timelike Wilson loop | ||||
|   ////////////////////////////////////////////////// | ||||
|   static Real avgTimelikeWilsonLoop(const GaugeLorentz &Umu, | ||||
|                             const int R1, const int R2) { | ||||
|     int ndim = Umu._grid->_ndimension; | ||||
|     Real sumWl = sumTimelikeWilsonLoop(Umu, R1, R2); | ||||
|     Real vol = Umu._grid->gSites(); | ||||
|     Real faces = 1.0 * (ndim - 1); | ||||
|     return sumWl / vol / faces / Nc; // Nc dependent... FIXME | ||||
|   } | ||||
|   ////////////////////////////////////////////////// | ||||
|   // average over all x,y,z,t and over all planes of spatial Wilson loop | ||||
|   ////////////////////////////////////////////////// | ||||
|   static Real avgSpatialWilsonLoop(const GaugeLorentz &Umu, | ||||
|                             const int R1, const int R2) { | ||||
|     int ndim = Umu._grid->_ndimension; | ||||
|     Real sumWl = sumSpatialWilsonLoop(Umu, R1, R2); | ||||
|     Real vol = Umu._grid->gSites(); | ||||
|     Real faces = 1.0 * (ndim - 1) * (ndim - 2); | ||||
|     return sumWl / vol / faces / Nc; // Nc dependent... FIXME | ||||
|   } | ||||
| }; | ||||
|  | ||||
| END_QEDFVOL_NAMESPACE | ||||
|  | ||||
| #endif // QEDFVOL_WILSONLOOPS_H | ||||
							
								
								
									
										88
									
								
								extras/qed-fvol/qed-fvol.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										88
									
								
								extras/qed-fvol/qed-fvol.cc
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,88 @@ | ||||
| #include <Global.hpp> | ||||
| #include <WilsonLoops.h> | ||||
|  | ||||
| using namespace Grid; | ||||
| using namespace QCD; | ||||
| using namespace QedFVol; | ||||
|  | ||||
| typedef PeriodicGaugeImpl<QedGimplR>    QedPeriodicGimplR; | ||||
| typedef PhotonR::GaugeField             EmField; | ||||
| typedef PhotonR::GaugeLinkField         EmComp; | ||||
|  | ||||
| const int NCONFIGS = 10; | ||||
| const int NWILSON = 10; | ||||
|  | ||||
| int main(int argc, char *argv[]) | ||||
| { | ||||
|     // parse command line | ||||
|     std::string parameterFileName; | ||||
|      | ||||
|     if (argc < 2) | ||||
|     { | ||||
|         std::cerr << "usage: " << argv[0] << " <parameter file> [Grid options]"; | ||||
|         std::cerr << std::endl; | ||||
|         std::exit(EXIT_FAILURE); | ||||
|     } | ||||
|     parameterFileName = argv[1]; | ||||
|      | ||||
|     // initialization | ||||
|     Grid_init(&argc, &argv); | ||||
|     QedFVolLogError.Active(GridLogError.isActive()); | ||||
|     QedFVolLogWarning.Active(GridLogWarning.isActive()); | ||||
|     QedFVolLogMessage.Active(GridLogMessage.isActive()); | ||||
|     QedFVolLogIterative.Active(GridLogIterative.isActive()); | ||||
|     QedFVolLogDebug.Active(GridLogDebug.isActive()); | ||||
|     LOG(Message) << "Grid initialized" << std::endl; | ||||
|      | ||||
|     // QED stuff | ||||
|     std::vector<int> latt_size   = GridDefaultLatt(); | ||||
|     std::vector<int> simd_layout = GridDefaultSimd(4, vComplex::Nsimd()); | ||||
|     std::vector<int> mpi_layout  = GridDefaultMpi(); | ||||
|     GridCartesian    grid(latt_size,simd_layout,mpi_layout); | ||||
|     GridParallelRNG  pRNG(&grid); | ||||
|     PhotonR          photon(PhotonR::Gauge::feynman, | ||||
|                             PhotonR::ZmScheme::qedL); | ||||
|     EmField          a(&grid); | ||||
|     EmField          expA(&grid); | ||||
|  | ||||
|     Complex imag_unit(0, 1); | ||||
|  | ||||
|     Real wlA; | ||||
|     std::vector<Real> logWlAvg(NWILSON, 0.0), logWlTime(NWILSON, 0.0), logWlSpace(NWILSON, 0.0); | ||||
|  | ||||
|     pRNG.SeedRandomDevice(); | ||||
|  | ||||
|     LOG(Message) << "Wilson loop calculation beginning" << std::endl; | ||||
|     for(int ic = 0; ic < NCONFIGS; ic++){ | ||||
|         LOG(Message) << "Configuration " << ic <<std::endl; | ||||
|         photon.StochasticField(a, pRNG); | ||||
|  | ||||
|         // Exponentiate photon field | ||||
|         expA = exp(imag_unit*a); | ||||
|  | ||||
|         // Calculate Wilson loops | ||||
|         for(int iw=1; iw<=NWILSON; iw++){ | ||||
|             wlA = NewWilsonLoops<QedPeriodicGimplR>::avgWilsonLoop(expA, iw, iw) * 3; | ||||
|             logWlAvg[iw-1] -= 2*log(wlA); | ||||
|             wlA = NewWilsonLoops<QedPeriodicGimplR>::avgTimelikeWilsonLoop(expA, iw, iw) * 3; | ||||
|             logWlTime[iw-1] -= 2*log(wlA); | ||||
|             wlA = NewWilsonLoops<QedPeriodicGimplR>::avgSpatialWilsonLoop(expA, iw, iw) * 3; | ||||
|             logWlSpace[iw-1] -= 2*log(wlA); | ||||
|         } | ||||
|     } | ||||
|     LOG(Message) << "Wilson loop calculation completed" << std::endl; | ||||
|      | ||||
|     // Calculate Wilson loops | ||||
|     for(int iw=1; iw<=10; iw++){ | ||||
|         LOG(Message) << iw << 'x' << iw << " Wilson loop" << std::endl; | ||||
|         LOG(Message) << "-2log(W) average: " << logWlAvg[iw-1]/NCONFIGS << std::endl; | ||||
|         LOG(Message) << "-2log(W) timelike: " << logWlTime[iw-1]/NCONFIGS << std::endl; | ||||
|         LOG(Message) << "-2log(W) spatial: " << logWlSpace[iw-1]/NCONFIGS << std::endl; | ||||
|     } | ||||
|  | ||||
|     // epilogue | ||||
|     LOG(Message) << "Grid is finalizing now" << std::endl; | ||||
|     Grid_finalize(); | ||||
|      | ||||
|     return EXIT_SUCCESS; | ||||
| } | ||||
| @@ -41,7 +41,9 @@ Author: paboyle <paboyle@ph.ed.ac.uk> | ||||
| #include <Grid/GridCore.h> | ||||
| #include <Grid/GridQCDcore.h> | ||||
| #include <Grid/qcd/action/Action.h> | ||||
| #include <Grid/qcd/utils/GaugeFix.h> | ||||
| #include <Grid/qcd/smearing/Smearing.h> | ||||
| #include <Grid/parallelIO/MetaData.h> | ||||
| #include <Grid/qcd/hmc/HMC_aggregate.h> | ||||
|  | ||||
| #endif | ||||
|   | ||||
| @@ -7,6 +7,7 @@ | ||||
| #include <cassert> | ||||
| #include <complex> | ||||
| #include <vector> | ||||
| #include <string> | ||||
| #include <iostream> | ||||
| #include <iomanip> | ||||
| #include <random> | ||||
| @@ -18,6 +19,7 @@ | ||||
| #include <ctime> | ||||
| #include <sys/time.h> | ||||
| #include <chrono> | ||||
| #include <zlib.h> | ||||
|  | ||||
| /////////////////// | ||||
| // Grid config | ||||
|   | ||||
| @@ -10,8 +10,8 @@ if BUILD_COMMS_MPI3 | ||||
|   extra_sources+=communicator/Communicator_base.cc | ||||
| endif | ||||
|  | ||||
| if BUILD_COMMS_MPI3L | ||||
|   extra_sources+=communicator/Communicator_mpi3_leader.cc | ||||
| if BUILD_COMMS_MPIT | ||||
|   extra_sources+=communicator/Communicator_mpit.cc | ||||
|   extra_sources+=communicator/Communicator_base.cc | ||||
| endif | ||||
|  | ||||
|   | ||||
| @@ -37,6 +37,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||
| #include <Grid/algorithms/approx/Chebyshev.h> | ||||
| #include <Grid/algorithms/approx/Remez.h> | ||||
| #include <Grid/algorithms/approx/MultiShiftFunction.h> | ||||
| #include <Grid/algorithms/approx/Forecast.h> | ||||
|  | ||||
| #include <Grid/algorithms/iterative/ConjugateGradient.h> | ||||
| #include <Grid/algorithms/iterative/ConjugateResidual.h> | ||||
| @@ -44,30 +45,16 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||
| #include <Grid/algorithms/iterative/SchurRedBlack.h> | ||||
| #include <Grid/algorithms/iterative/ConjugateGradientMultiShift.h> | ||||
| #include <Grid/algorithms/iterative/ConjugateGradientMixedPrec.h> | ||||
|  | ||||
| // Lanczos support | ||||
| //#include <Grid/algorithms/iterative/MatrixUtils.h> | ||||
| #include <Grid/algorithms/iterative/BlockConjugateGradient.h> | ||||
| #include <Grid/algorithms/iterative/ConjugateGradientReliableUpdate.h> | ||||
| #include <Grid/algorithms/iterative/ImplicitlyRestartedLanczos.h> | ||||
| #include <Grid/algorithms/CoarsenedMatrix.h> | ||||
| #include <Grid/algorithms/FFT.h> | ||||
|  | ||||
| // Eigen/lanczos | ||||
| // EigCg | ||||
| // MCR | ||||
| // Pcg | ||||
| // Multishift CG | ||||
| // Hdcg | ||||
| // GCR | ||||
| // etc.. | ||||
|  | ||||
| // integrator/Leapfrog | ||||
| // integrator/Omelyan | ||||
| // integrator/ForceGradient | ||||
|  | ||||
| // montecarlo/hmc | ||||
| // montecarlo/rhmc | ||||
| // montecarlo/metropolis | ||||
| // etc... | ||||
|  | ||||
|  | ||||
| #endif | ||||
|   | ||||
| @@ -103,29 +103,32 @@ namespace Grid { | ||||
|     GridBase *CoarseGrid; | ||||
|     GridBase *FineGrid; | ||||
|     std::vector<Lattice<Fobj> > subspace; | ||||
|     int checkerboard; | ||||
|  | ||||
|     Aggregation(GridBase *_CoarseGrid,GridBase *_FineGrid) :  | ||||
|   Aggregation(GridBase *_CoarseGrid,GridBase *_FineGrid,int _checkerboard) :  | ||||
|     CoarseGrid(_CoarseGrid), | ||||
|       FineGrid(_FineGrid), | ||||
|       subspace(nbasis,_FineGrid) | ||||
|       subspace(nbasis,_FineGrid), | ||||
|       checkerboard(_checkerboard) | ||||
| 	{ | ||||
| 	}; | ||||
|    | ||||
|     void Orthogonalise(void){ | ||||
|       CoarseScalar InnerProd(CoarseGrid);  | ||||
|       std::cout << GridLogMessage <<" Gramm-Schmidt pass 1"<<std::endl; | ||||
|       blockOrthogonalise(InnerProd,subspace); | ||||
|       std::cout << GridLogMessage <<" Gramm-Schmidt pass 2"<<std::endl; | ||||
|       blockOrthogonalise(InnerProd,subspace); | ||||
|       //      std::cout << GridLogMessage <<" Gramm-Schmidt checking orthogonality"<<std::endl; | ||||
|       //      CheckOrthogonal(); | ||||
|     }  | ||||
|     void CheckOrthogonal(void){ | ||||
|       CoarseVector iProj(CoarseGrid);  | ||||
|       CoarseVector eProj(CoarseGrid);  | ||||
|       Lattice<CComplex> pokey(CoarseGrid); | ||||
|  | ||||
|        | ||||
|       for(int i=0;i<nbasis;i++){ | ||||
| 	blockProject(iProj,subspace[i],subspace); | ||||
|  | ||||
| 	eProj=zero;  | ||||
| 	for(int ss=0;ss<CoarseGrid->oSites();ss++){ | ||||
| 	parallel_for(int ss=0;ss<CoarseGrid->oSites();ss++){ | ||||
| 	  eProj._odata[ss](i)=CComplex(1.0); | ||||
| 	} | ||||
| 	eProj=eProj - iProj; | ||||
| @@ -137,6 +140,7 @@ namespace Grid { | ||||
|       blockProject(CoarseVec,FineVec,subspace); | ||||
|     } | ||||
|     void PromoteFromSubspace(const CoarseVector &CoarseVec,FineField &FineVec){ | ||||
|       FineVec.checkerboard = subspace[0].checkerboard; | ||||
|       blockPromote(CoarseVec,FineVec,subspace); | ||||
|     } | ||||
|     void CreateSubspaceRandom(GridParallelRNG &RNG){ | ||||
| @@ -147,6 +151,7 @@ namespace Grid { | ||||
|       Orthogonalise(); | ||||
|     } | ||||
|  | ||||
|     /* | ||||
|     virtual void CreateSubspaceLanczos(GridParallelRNG  &RNG,LinearOperatorBase<FineField> &hermop,int nn=nbasis)  | ||||
|     { | ||||
|       // Run a Lanczos with sloppy convergence | ||||
| @@ -195,7 +200,7 @@ namespace Grid { | ||||
| 	  std::cout << GridLogMessage <<"subspace["<<b<<"] = "<<norm2(subspace[b])<<std::endl; | ||||
| 	} | ||||
|     } | ||||
|  | ||||
|     */ | ||||
|     virtual void CreateSubspace(GridParallelRNG  &RNG,LinearOperatorBase<FineField> &hermop,int nn=nbasis) { | ||||
|  | ||||
|       RealD scale; | ||||
|   | ||||
| @@ -230,6 +230,7 @@ namespace Grid { | ||||
|       // Barrel shift and collect global pencil | ||||
|       std::vector<int> lcoor(Nd), gcoor(Nd); | ||||
|       result = source; | ||||
|       int pc = processor_coor[dim]; | ||||
|       for(int p=0;p<processors[dim];p++) { | ||||
|         PARALLEL_REGION | ||||
|         { | ||||
| @@ -240,7 +241,8 @@ namespace Grid { | ||||
|           for(int idx=0;idx<sgrid->lSites();idx++) { | ||||
|             sgrid->LocalIndexToLocalCoor(idx,cbuf); | ||||
|             peekLocalSite(s,result,cbuf); | ||||
|             cbuf[dim]+=p*L; | ||||
| 	    cbuf[dim]+=((pc+p) % processors[dim])*L; | ||||
| 	    //            cbuf[dim]+=p*L; | ||||
|             pokeLocalSite(s,pgbuf,cbuf); | ||||
|           } | ||||
|         } | ||||
| @@ -278,7 +280,6 @@ namespace Grid { | ||||
|       flops+= flops_call*NN; | ||||
|        | ||||
|       // writing out result | ||||
|       int pc = processor_coor[dim]; | ||||
|       PARALLEL_REGION | ||||
|       { | ||||
|         std::vector<int> clbuf(Nd), cgbuf(Nd); | ||||
|   | ||||
| @@ -162,15 +162,10 @@ namespace Grid { | ||||
| 	_Mat.M(in,out); | ||||
|       } | ||||
|       void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ | ||||
| 	ComplexD dot; | ||||
|  | ||||
| 	_Mat.M(in,out); | ||||
| 	 | ||||
| 	dot= innerProduct(in,out); | ||||
| 	n1=real(dot); | ||||
|  | ||||
| 	dot = innerProduct(out,out); | ||||
| 	n2=real(dot); | ||||
| 	ComplexD dot= innerProduct(in,out); n1=real(dot); | ||||
| 	n2=norm2(out); | ||||
|       } | ||||
|       void HermOp(const Field &in, Field &out){ | ||||
| 	_Mat.M(in,out); | ||||
| @@ -192,10 +187,10 @@ namespace Grid { | ||||
| 	ni=Mpc(in,tmp); | ||||
| 	no=MpcDag(tmp,out); | ||||
|       } | ||||
|       void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ | ||||
|       virtual void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ | ||||
| 	MpcDagMpc(in,out,n1,n2); | ||||
|       } | ||||
|       void HermOp(const Field &in, Field &out){ | ||||
|       virtual void HermOp(const Field &in, Field &out){ | ||||
| 	RealD n1,n2; | ||||
| 	HermOpAndNorm(in,out,n1,n2); | ||||
|       } | ||||
| @@ -212,7 +207,6 @@ namespace Grid { | ||||
|       void OpDir  (const Field &in, Field &out,int dir,int disp) { | ||||
| 	assert(0); | ||||
|       } | ||||
|  | ||||
|     }; | ||||
|     template<class Matrix,class Field> | ||||
|       class SchurDiagMooeeOperator :  public SchurOperatorBase<Field> { | ||||
| @@ -270,7 +264,6 @@ namespace Grid { | ||||
| 	return axpy_norm(out,-1.0,tmp,in); | ||||
|       } | ||||
|     }; | ||||
|  | ||||
|     template<class Matrix,class Field> | ||||
|       class SchurDiagTwoOperator :  public SchurOperatorBase<Field> { | ||||
|     protected: | ||||
| @@ -299,6 +292,45 @@ namespace Grid { | ||||
| 	return axpy_norm(out,-1.0,tmp,in); | ||||
|       } | ||||
|     }; | ||||
|     /////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|     // Left  handed Moo^-1 ; (Moo - Moe Mee^-1 Meo) psi = eta  -->  ( 1 - Moo^-1 Moe Mee^-1 Meo ) psi = Moo^-1 eta | ||||
|     // Right handed Moo^-1 ; (Moo - Moe Mee^-1 Meo) Moo^-1 Moo psi = eta  -->  ( 1 - Moe Mee^-1 Meo ) Moo^-1 phi=eta ; psi = Moo^-1 phi | ||||
|     /////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|     template<class Matrix,class Field> using SchurDiagOneRH = SchurDiagTwoOperator<Matrix,Field> ; | ||||
|     template<class Matrix,class Field> using SchurDiagOneLH = SchurDiagOneOperator<Matrix,Field> ; | ||||
|     /////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|     //  Staggered use | ||||
|     /////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|     template<class Matrix,class Field> | ||||
|       class SchurStaggeredOperator :  public SchurOperatorBase<Field> { | ||||
|     protected: | ||||
|       Matrix &_Mat; | ||||
|     public: | ||||
|       SchurStaggeredOperator (Matrix &Mat): _Mat(Mat){}; | ||||
|       virtual void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ | ||||
| 	n2 = Mpc(in,out); | ||||
| 	ComplexD dot= innerProduct(in,out); | ||||
| 	n1 = real(dot); | ||||
|       } | ||||
|       virtual void HermOp(const Field &in, Field &out){ | ||||
| 	Mpc(in,out); | ||||
|       } | ||||
|       virtual  RealD Mpc      (const Field &in, Field &out) { | ||||
| 	Field tmp(in._grid); | ||||
| 	_Mat.Meooe(in,tmp); | ||||
| 	_Mat.MooeeInv(tmp,out); | ||||
| 	_Mat.Meooe(out,tmp); | ||||
| 	_Mat.Mooee(in,out); | ||||
|         return axpy_norm(out,-1.0,tmp,out); | ||||
|       } | ||||
|       virtual  RealD MpcDag   (const Field &in, Field &out){ | ||||
| 	return Mpc(in,out); | ||||
|       } | ||||
|       virtual void MpcDagMpc(const Field &in, Field &out,RealD &ni,RealD &no) { | ||||
| 	assert(0);// Never need with staggered | ||||
|       } | ||||
|     }; | ||||
|     template<class Matrix,class Field> using SchurStagOperator = SchurStaggeredOperator<Matrix,Field>; | ||||
|  | ||||
|  | ||||
|     ///////////////////////////////////////////////////////////// | ||||
| @@ -314,6 +346,14 @@ namespace Grid { | ||||
|       virtual void operator() (const Field &in, Field &out) = 0; | ||||
|     }; | ||||
|  | ||||
|     template<class Field> class IdentityLinearFunction : public LinearFunction<Field> { | ||||
|     public: | ||||
|       void operator() (const Field &in, Field &out){ | ||||
| 	out = in; | ||||
|       }; | ||||
|     }; | ||||
|  | ||||
|  | ||||
|     ///////////////////////////////////////////////////////////// | ||||
|     // Base classes for Multishift solvers for operators | ||||
|     ///////////////////////////////////////////////////////////// | ||||
| @@ -336,6 +376,64 @@ namespace Grid { | ||||
|      }; | ||||
|     */ | ||||
|  | ||||
|   //////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   // Hermitian operator Linear function and operator function | ||||
|   //////////////////////////////////////////////////////////////////////////////////////////// | ||||
|     template<class Field> | ||||
|       class HermOpOperatorFunction : public OperatorFunction<Field> { | ||||
|       void operator() (LinearOperatorBase<Field> &Linop, const Field &in, Field &out) { | ||||
| 	Linop.HermOp(in,out); | ||||
|       }; | ||||
|     }; | ||||
|  | ||||
|     template<typename Field> | ||||
|       class PlainHermOp : public LinearFunction<Field> { | ||||
|     public: | ||||
|       LinearOperatorBase<Field> &_Linop; | ||||
|        | ||||
|       PlainHermOp(LinearOperatorBase<Field>& linop) : _Linop(linop)  | ||||
|       {} | ||||
|        | ||||
|       void operator()(const Field& in, Field& out) { | ||||
| 	_Linop.HermOp(in,out); | ||||
|       } | ||||
|     }; | ||||
|  | ||||
|     template<typename Field> | ||||
|     class FunctionHermOp : public LinearFunction<Field> { | ||||
|     public: | ||||
|       OperatorFunction<Field>   & _poly; | ||||
|       LinearOperatorBase<Field> &_Linop; | ||||
|        | ||||
|       FunctionHermOp(OperatorFunction<Field> & poly,LinearOperatorBase<Field>& linop)  | ||||
| 	: _poly(poly), _Linop(linop) {}; | ||||
|        | ||||
|       void operator()(const Field& in, Field& out) { | ||||
| 	_poly(_Linop,in,out); | ||||
|       } | ||||
|     }; | ||||
|  | ||||
|   template<class Field> | ||||
|   class Polynomial : public OperatorFunction<Field> { | ||||
|   private: | ||||
|     std::vector<RealD> Coeffs; | ||||
|   public: | ||||
|     Polynomial(std::vector<RealD> &_Coeffs) : Coeffs(_Coeffs) { }; | ||||
|  | ||||
|     // Implement the required interface | ||||
|     void operator() (LinearOperatorBase<Field> &Linop, const Field &in, Field &out) { | ||||
|  | ||||
|       Field AtoN(in._grid); | ||||
|       Field Mtmp(in._grid); | ||||
|       AtoN = in; | ||||
|       out = AtoN*Coeffs[0]; | ||||
|       for(int n=1;n<Coeffs.size();n++){ | ||||
| 	Mtmp = AtoN; | ||||
| 	Linop.HermOp(Mtmp,AtoN); | ||||
| 	out=out+AtoN*Coeffs[n]; | ||||
|       } | ||||
|     }; | ||||
|   }; | ||||
|  | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -8,6 +8,7 @@ | ||||
|  | ||||
| Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||
| Author: paboyle <paboyle@ph.ed.ac.uk> | ||||
| Author: Christoph Lehner <clehner@bnl.gov> | ||||
|  | ||||
|     This program is free software; you can redistribute it and/or modify | ||||
|     it under the terms of the GNU General Public License as published by | ||||
| @@ -33,41 +34,12 @@ Author: paboyle <paboyle@ph.ed.ac.uk> | ||||
|  | ||||
| namespace Grid { | ||||
|  | ||||
|   //////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   // Simple general polynomial with user supplied coefficients | ||||
|   //////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   template<class Field> | ||||
|   class HermOpOperatorFunction : public OperatorFunction<Field> { | ||||
|     void operator() (LinearOperatorBase<Field> &Linop, const Field &in, Field &out) { | ||||
|       Linop.HermOp(in,out); | ||||
|     }; | ||||
|   }; | ||||
|  | ||||
|   template<class Field> | ||||
|   class Polynomial : public OperatorFunction<Field> { | ||||
|   private: | ||||
|     std::vector<RealD> Coeffs; | ||||
|   public: | ||||
|     Polynomial(std::vector<RealD> &_Coeffs) : Coeffs(_Coeffs) { }; | ||||
|  | ||||
|     // Implement the required interface | ||||
|     void operator() (LinearOperatorBase<Field> &Linop, const Field &in, Field &out) { | ||||
|  | ||||
|       Field AtoN(in._grid); | ||||
|       Field Mtmp(in._grid); | ||||
|       AtoN = in; | ||||
|       out = AtoN*Coeffs[0]; | ||||
| //            std::cout <<"Poly in " <<norm2(in)<<" size "<< Coeffs.size()<<std::endl; | ||||
| //            std::cout <<"Coeffs[0]= "<<Coeffs[0]<< " 0 " <<norm2(out)<<std::endl; | ||||
|       for(int n=1;n<Coeffs.size();n++){ | ||||
| 	Mtmp = AtoN; | ||||
| 	Linop.HermOp(Mtmp,AtoN); | ||||
| 	out=out+AtoN*Coeffs[n]; | ||||
| //            std::cout <<"Coeffs "<<n<<"= "<< Coeffs[n]<< " 0 " <<std::endl; | ||||
| //		std::cout << n<<" " <<norm2(out)<<std::endl; | ||||
|       } | ||||
|     }; | ||||
|   }; | ||||
| struct ChebyParams : Serializable { | ||||
|   GRID_SERIALIZABLE_CLASS_MEMBERS(ChebyParams, | ||||
| 				  RealD, alpha,   | ||||
| 				  RealD, beta,    | ||||
| 				  int, Npoly); | ||||
| }; | ||||
|  | ||||
|   //////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   // Generic Chebyshev approximations | ||||
| @@ -83,7 +55,9 @@ namespace Grid { | ||||
|   public: | ||||
|     void csv(std::ostream &out){ | ||||
|       RealD diff = hi-lo; | ||||
|       for (RealD x=lo-0.2*diff; x<hi+0.2*diff; x+=(hi-lo)/1000) { | ||||
|       RealD delta = (hi-lo)*1.0e-9; | ||||
|       for (RealD x=lo; x<hi; x+=delta) { | ||||
| 	delta*=1.1; | ||||
| 	RealD f = approx(x); | ||||
| 	out<< x<<" "<<f<<std::endl; | ||||
|       } | ||||
| @@ -99,6 +73,7 @@ namespace Grid { | ||||
|     }; | ||||
|  | ||||
|     Chebyshev(){}; | ||||
|     Chebyshev(ChebyParams p){ Init(p.alpha,p.beta,p.Npoly);}; | ||||
|     Chebyshev(RealD _lo,RealD _hi,int _order, RealD (* func)(RealD) ) {Init(_lo,_hi,_order,func);}; | ||||
|     Chebyshev(RealD _lo,RealD _hi,int _order) {Init(_lo,_hi,_order);}; | ||||
|  | ||||
| @@ -193,6 +168,47 @@ namespace Grid { | ||||
|       return sum; | ||||
|     }; | ||||
|  | ||||
|     RealD approxD(RealD x) | ||||
|     { | ||||
|       RealD Un; | ||||
|       RealD Unm; | ||||
|       RealD Unp; | ||||
|        | ||||
|       RealD y=( x-0.5*(hi+lo))/(0.5*(hi-lo)); | ||||
|        | ||||
|       RealD U0=1; | ||||
|       RealD U1=2*y; | ||||
|        | ||||
|       RealD sum; | ||||
|       sum = Coeffs[1]*U0; | ||||
|       sum+= Coeffs[2]*U1*2.0; | ||||
|        | ||||
|       Un =U1; | ||||
|       Unm=U0; | ||||
|       for(int i=2;i<order-1;i++){ | ||||
| 	Unp=2*y*Un-Unm; | ||||
| 	Unm=Un; | ||||
| 	Un =Unp; | ||||
| 	sum+= Un*Coeffs[i+1]*(i+1.0); | ||||
|       } | ||||
|       return sum/(0.5*(hi-lo)); | ||||
|     }; | ||||
|      | ||||
|     RealD approxInv(RealD z, RealD x0, int maxiter, RealD resid) { | ||||
|       RealD x = x0; | ||||
|       RealD eps; | ||||
|        | ||||
|       int i; | ||||
|       for (i=0;i<maxiter;i++) { | ||||
| 	eps = approx(x) - z; | ||||
| 	if (fabs(eps / z) < resid) | ||||
| 	  return x; | ||||
| 	x = x - eps / approxD(x); | ||||
|       } | ||||
|        | ||||
|       return std::numeric_limits<double>::quiet_NaN(); | ||||
|     } | ||||
|      | ||||
|     // Implement the required interface | ||||
|     void operator() (LinearOperatorBase<Field> &Linop, const Field &in, Field &out) { | ||||
|  | ||||
|   | ||||
							
								
								
									
										152
									
								
								lib/algorithms/approx/Forecast.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										152
									
								
								lib/algorithms/approx/Forecast.h
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,152 @@ | ||||
| /************************************************************************************* | ||||
|  | ||||
| Grid physics library, www.github.com/paboyle/Grid | ||||
|  | ||||
| Source file: ./lib/algorithms/approx/Forecast.h | ||||
|  | ||||
| Copyright (C) 2015 | ||||
|  | ||||
| Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||
| Author: paboyle <paboyle@ph.ed.ac.uk> | ||||
| Author: David Murphy <dmurphy@phys.columbia.edu> | ||||
|  | ||||
| This program is free software; you can redistribute it and/or modify | ||||
| it under the terms of the GNU General Public License as published by | ||||
| the Free Software Foundation; either version 2 of the License, or | ||||
| (at your option) any later version. | ||||
|  | ||||
| This program is distributed in the hope that it will be useful, | ||||
| but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
| GNU General Public License for more details. | ||||
|  | ||||
| You should have received a copy of the GNU General Public License along | ||||
| with this program; if not, write to the Free Software Foundation, Inc., | ||||
| 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||
|  | ||||
| See the full license in the file "LICENSE" in the top level distribution directory | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
|  | ||||
| #ifndef INCLUDED_FORECAST_H | ||||
| #define INCLUDED_FORECAST_H | ||||
|  | ||||
| namespace Grid { | ||||
|  | ||||
|   // Abstract base class. | ||||
|   // Takes a matrix (Mat), a source (phi), and a vector of Fields (chi) | ||||
|   // and returns a forecasted solution to the system D*psi = phi (psi). | ||||
|   template<class Matrix, class Field> | ||||
|   class Forecast | ||||
|   { | ||||
|     public: | ||||
|       virtual Field operator()(Matrix &Mat, const Field& phi, const std::vector<Field>& chi) = 0; | ||||
|   }; | ||||
|  | ||||
|   // Implementation of Brower et al.'s chronological inverter (arXiv:hep-lat/9509012), | ||||
|   // used to forecast solutions across poles of the EOFA heatbath. | ||||
|   // | ||||
|   // Modified from CPS (cps_pp/src/util/dirac_op/d_op_base/comsrc/minresext.C) | ||||
|   template<class Matrix, class Field> | ||||
|   class ChronoForecast : public Forecast<Matrix,Field> | ||||
|   { | ||||
|     public: | ||||
|       Field operator()(Matrix &Mat, const Field& phi, const std::vector<Field>& prev_solns) | ||||
|       { | ||||
|         int degree = prev_solns.size(); | ||||
|         Field chi(phi); // forecasted solution | ||||
|  | ||||
|         // Trivial cases | ||||
|         if(degree == 0){ chi = zero; return chi; } | ||||
|         else if(degree == 1){ return prev_solns[0]; } | ||||
|  | ||||
|         RealD dot; | ||||
|         ComplexD xp; | ||||
|         Field r(phi); // residual | ||||
|         Field Mv(phi); | ||||
|         std::vector<Field> v(prev_solns); // orthonormalized previous solutions | ||||
|         std::vector<Field> MdagMv(degree,phi); | ||||
|  | ||||
|         // Array to hold the matrix elements | ||||
|         std::vector<std::vector<ComplexD>> G(degree, std::vector<ComplexD>(degree)); | ||||
|  | ||||
|         // Solution and source vectors | ||||
|         std::vector<ComplexD> a(degree); | ||||
|         std::vector<ComplexD> b(degree); | ||||
|  | ||||
|         // Orthonormalize the vector basis | ||||
|         for(int i=0; i<degree; i++){ | ||||
|           v[i] *= 1.0/std::sqrt(norm2(v[i])); | ||||
|           for(int j=i+1; j<degree; j++){ v[j] -= innerProduct(v[i],v[j]) * v[i]; } | ||||
|         } | ||||
|  | ||||
|         // Perform sparse matrix multiplication and construct rhs | ||||
|         for(int i=0; i<degree; i++){ | ||||
|           b[i] = innerProduct(v[i],phi); | ||||
|           Mat.M(v[i],Mv); | ||||
|           Mat.Mdag(Mv,MdagMv[i]); | ||||
|           G[i][i] = innerProduct(v[i],MdagMv[i]); | ||||
|         } | ||||
|  | ||||
|         // Construct the matrix | ||||
|         for(int j=0; j<degree; j++){ | ||||
|         for(int k=j+1; k<degree; k++){ | ||||
|           G[j][k] = innerProduct(v[j],MdagMv[k]); | ||||
|           G[k][j] = std::conj(G[j][k]); | ||||
|         }} | ||||
|  | ||||
|         // Gauss-Jordan elimination with partial pivoting | ||||
|         for(int i=0; i<degree; i++){ | ||||
|  | ||||
|           // Perform partial pivoting | ||||
|           int k = i; | ||||
|           for(int j=i+1; j<degree; j++){ if(std::abs(G[j][j]) > std::abs(G[k][k])){ k = j; } } | ||||
|           if(k != i){ | ||||
|             xp = b[k]; | ||||
|             b[k] = b[i]; | ||||
|             b[i] = xp; | ||||
|             for(int j=0; j<degree; j++){ | ||||
|               xp = G[k][j]; | ||||
|               G[k][j] = G[i][j]; | ||||
|               G[i][j] = xp; | ||||
|             } | ||||
|           } | ||||
|  | ||||
|           // Convert matrix to upper triangular form | ||||
|           for(int j=i+1; j<degree; j++){ | ||||
|             xp = G[j][i]/G[i][i]; | ||||
|             b[j] -= xp * b[i]; | ||||
|             for(int k=0; k<degree; k++){ G[j][k] -= xp*G[i][k]; } | ||||
|           } | ||||
|         } | ||||
|  | ||||
|         // Use Gaussian elimination to solve equations and calculate initial guess | ||||
|         chi = zero; | ||||
|         r = phi; | ||||
|         for(int i=degree-1; i>=0; i--){ | ||||
|           a[i] = 0.0; | ||||
|           for(int j=i+1; j<degree; j++){ a[i] += G[i][j] * a[j]; } | ||||
|           a[i] = (b[i]-a[i])/G[i][i]; | ||||
|           chi += a[i]*v[i]; | ||||
|           r -= a[i]*MdagMv[i]; | ||||
|         } | ||||
|  | ||||
|         RealD true_r(0.0); | ||||
|         ComplexD tmp; | ||||
|         for(int i=0; i<degree; i++){ | ||||
|           tmp = -b[i]; | ||||
|           for(int j=0; j<degree; j++){ tmp += G[i][j]*a[j]; } | ||||
|           tmp = std::conj(tmp)*tmp; | ||||
|           true_r += std::sqrt(tmp.real()); | ||||
|         } | ||||
|  | ||||
|         RealD error = std::sqrt(norm2(r)/norm2(phi)); | ||||
|         std::cout << GridLogMessage << "ChronoForecast: |res|/|src| = " << error << std::endl; | ||||
|  | ||||
|         return chi; | ||||
|       }; | ||||
|   }; | ||||
|  | ||||
| } | ||||
|  | ||||
| #endif | ||||
| @@ -1,137 +0,0 @@ | ||||
|     /************************************************************************************* | ||||
|  | ||||
|     Grid physics library, www.github.com/paboyle/Grid  | ||||
|  | ||||
|     Source file: ./lib/algorithms/iterative/DenseMatrix.h | ||||
|  | ||||
|     Copyright (C) 2015 | ||||
|  | ||||
| Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||
| Author: paboyle <paboyle@ph.ed.ac.uk> | ||||
|  | ||||
|     This program is free software; you can redistribute it and/or modify | ||||
|     it under the terms of the GNU General Public License as published by | ||||
|     the Free Software Foundation; either version 2 of the License, or | ||||
|     (at your option) any later version. | ||||
|  | ||||
|     This program is distributed in the hope that it will be useful, | ||||
|     but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
|     GNU General Public License for more details. | ||||
|  | ||||
|     You should have received a copy of the GNU General Public License along | ||||
|     with this program; if not, write to the Free Software Foundation, Inc., | ||||
|     51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||
|  | ||||
|     See the full license in the file "LICENSE" in the top level distribution directory | ||||
|     *************************************************************************************/ | ||||
|     /*  END LEGAL */ | ||||
| #ifndef GRID_DENSE_MATRIX_H | ||||
| #define GRID_DENSE_MATRIX_H | ||||
|  | ||||
| namespace Grid { | ||||
|     ///////////////////////////////////////////////////////////// | ||||
|     // Matrix untils | ||||
|     ///////////////////////////////////////////////////////////// | ||||
|  | ||||
| template<class T> using DenseVector = std::vector<T>; | ||||
| template<class T> using DenseMatrix = DenseVector<DenseVector<T> >; | ||||
|  | ||||
| template<class T> void Size(DenseVector<T> & vec, int &N)  | ||||
| {  | ||||
|   N= vec.size(); | ||||
| } | ||||
| template<class T> void Size(DenseMatrix<T> & mat, int &N,int &M)  | ||||
| {  | ||||
|   N= mat.size(); | ||||
|   M= mat[0].size(); | ||||
| } | ||||
|  | ||||
| template<class T> void SizeSquare(DenseMatrix<T> & mat, int &N)  | ||||
| {  | ||||
|   int M; Size(mat,N,M); | ||||
|   assert(N==M); | ||||
| } | ||||
|  | ||||
| template<class T> void Resize(DenseVector<T > & mat, int N) {  | ||||
|   mat.resize(N); | ||||
| } | ||||
| template<class T> void Resize(DenseMatrix<T > & mat, int N, int M) {  | ||||
|   mat.resize(N); | ||||
|   for(int i=0;i<N;i++){ | ||||
|     mat[i].resize(M); | ||||
|   } | ||||
| } | ||||
| template<class T> void Fill(DenseMatrix<T> & mat, T&val) {  | ||||
|   int N,M; | ||||
|   Size(mat,N,M); | ||||
|   for(int i=0;i<N;i++){ | ||||
|   for(int j=0;j<M;j++){ | ||||
|     mat[i][j] = val; | ||||
|   }} | ||||
| } | ||||
|  | ||||
| /** Transpose of a matrix **/ | ||||
| template<class T> DenseMatrix<T> Transpose(DenseMatrix<T> & mat){ | ||||
|   int N,M; | ||||
|   Size(mat,N,M); | ||||
|   DenseMatrix<T> C; Resize(C,M,N); | ||||
|   for(int i=0;i<M;i++){ | ||||
|   for(int j=0;j<N;j++){ | ||||
|     C[i][j] = mat[j][i]; | ||||
|   }}  | ||||
|   return C; | ||||
| } | ||||
| /** Set DenseMatrix to unit matrix **/ | ||||
| template<class T> void Unity(DenseMatrix<T> &A){ | ||||
|   int N;  SizeSquare(A,N); | ||||
|   for(int i=0;i<N;i++){ | ||||
|     for(int j=0;j<N;j++){ | ||||
|       if ( i==j ) A[i][j] = 1; | ||||
|       else        A[i][j] = 0; | ||||
|     }  | ||||
|   }  | ||||
| } | ||||
|  | ||||
| /** Add C * I to matrix **/ | ||||
| template<class T> | ||||
| void PlusUnit(DenseMatrix<T> & A,T c){ | ||||
|   int dim;  SizeSquare(A,dim); | ||||
|   for(int i=0;i<dim;i++){A[i][i] = A[i][i] + c;}  | ||||
| } | ||||
|  | ||||
| /** return the Hermitian conjugate of matrix **/ | ||||
| template<class T> | ||||
| DenseMatrix<T> HermitianConj(DenseMatrix<T> &mat){ | ||||
|  | ||||
|   int dim; SizeSquare(mat,dim); | ||||
|  | ||||
|   DenseMatrix<T> C; Resize(C,dim,dim); | ||||
|  | ||||
|   for(int i=0;i<dim;i++){ | ||||
|     for(int j=0;j<dim;j++){ | ||||
|       C[i][j] = conj(mat[j][i]); | ||||
|     }  | ||||
|   }  | ||||
|   return C; | ||||
| } | ||||
| /**Get a square submatrix**/ | ||||
| template <class T> | ||||
| DenseMatrix<T> GetSubMtx(DenseMatrix<T> &A,int row_st, int row_end, int col_st, int col_end) | ||||
| { | ||||
|   DenseMatrix<T> H; Resize(H,row_end - row_st,col_end-col_st); | ||||
|  | ||||
|   for(int i = row_st; i<row_end; i++){ | ||||
|   for(int j = col_st; j<col_end; j++){ | ||||
|     H[i-row_st][j-col_st]=A[i][j]; | ||||
|   }} | ||||
|   return H; | ||||
| } | ||||
|  | ||||
| } | ||||
|  | ||||
| #include "Householder.h" | ||||
| #include "Francis.h" | ||||
|  | ||||
| #endif | ||||
|  | ||||
| @@ -1,525 +0,0 @@ | ||||
|     /************************************************************************************* | ||||
|  | ||||
|     Grid physics library, www.github.com/paboyle/Grid  | ||||
|  | ||||
|     Source file: ./lib/algorithms/iterative/Francis.h | ||||
|  | ||||
|     Copyright (C) 2015 | ||||
|  | ||||
| Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||
|  | ||||
|     This program is free software; you can redistribute it and/or modify | ||||
|     it under the terms of the GNU General Public License as published by | ||||
|     the Free Software Foundation; either version 2 of the License, or | ||||
|     (at your option) any later version. | ||||
|  | ||||
|     This program is distributed in the hope that it will be useful, | ||||
|     but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
|     GNU General Public License for more details. | ||||
|  | ||||
|     You should have received a copy of the GNU General Public License along | ||||
|     with this program; if not, write to the Free Software Foundation, Inc., | ||||
|     51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||
|  | ||||
|     See the full license in the file "LICENSE" in the top level distribution directory | ||||
|     *************************************************************************************/ | ||||
|     /*  END LEGAL */ | ||||
| #ifndef FRANCIS_H | ||||
| #define FRANCIS_H | ||||
|  | ||||
| #include <cstdlib> | ||||
| #include <string> | ||||
| #include <cmath> | ||||
| #include <iostream> | ||||
| #include <sstream> | ||||
| #include <stdexcept> | ||||
| #include <fstream> | ||||
| #include <complex> | ||||
| #include <algorithm> | ||||
|  | ||||
| //#include <timer.h> | ||||
| //#include <lapacke.h> | ||||
| //#include <Eigen/Dense> | ||||
|  | ||||
| namespace Grid { | ||||
|  | ||||
| template <class T> int SymmEigensystem(DenseMatrix<T > &Ain, DenseVector<T> &evals, DenseMatrix<T> &evecs, RealD small); | ||||
| template <class T> int     Eigensystem(DenseMatrix<T > &Ain, DenseVector<T> &evals, DenseMatrix<T> &evecs, RealD small); | ||||
|  | ||||
| /** | ||||
|   Find the eigenvalues of an upper hessenberg matrix using the Francis QR algorithm. | ||||
| H = | ||||
|       x  x  x  x  x  x  x  x  x | ||||
|       x  x  x  x  x  x  x  x  x | ||||
|       0  x  x  x  x  x  x  x  x | ||||
|       0  0  x  x  x  x  x  x  x | ||||
|       0  0  0  x  x  x  x  x  x | ||||
|       0  0  0  0  x  x  x  x  x | ||||
|       0  0  0  0  0  x  x  x  x | ||||
|       0  0  0  0  0  0  x  x  x | ||||
|       0  0  0  0  0  0  0  x  x | ||||
| Factorization is P T P^H where T is upper triangular (mod cc blocks) and P is orthagonal/unitary. | ||||
| **/ | ||||
| template <class T> | ||||
| int QReigensystem(DenseMatrix<T> &Hin, DenseVector<T> &evals, DenseMatrix<T> &evecs, RealD small) | ||||
| { | ||||
|   DenseMatrix<T> H = Hin;  | ||||
|  | ||||
|   int N ; SizeSquare(H,N); | ||||
|   int M = N; | ||||
|  | ||||
|   Fill(evals,0); | ||||
|   Fill(evecs,0); | ||||
|  | ||||
|   T s,t,x=0,y=0,z=0; | ||||
|   T u,d; | ||||
|   T apd,amd,bc; | ||||
|   DenseVector<T> p(N,0); | ||||
|   T nrm = Norm(H);    ///DenseMatrix Norm | ||||
|   int n, m; | ||||
|   int e = 0; | ||||
|   int it = 0; | ||||
|   int tot_it = 0; | ||||
|   int l = 0; | ||||
|   int r = 0; | ||||
|   DenseMatrix<T> P; Resize(P,N,N); Unity(P); | ||||
|   DenseVector<int> trows(N,0); | ||||
|  | ||||
|   /// Check if the matrix is really hessenberg, if not abort | ||||
|   RealD sth = 0; | ||||
|   for(int j=0;j<N;j++){ | ||||
|     for(int i=j+2;i<N;i++){ | ||||
|       sth = abs(H[i][j]); | ||||
|       if(sth > small){ | ||||
| 	std::cout << "Non hessenberg H = " << sth << " > " << small << std::endl; | ||||
| 	exit(1); | ||||
|       } | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   do{ | ||||
|     std::cout << "Francis QR Step N = " << N << std::endl; | ||||
|     /** Check for convergence | ||||
|       x  x  x  x  x | ||||
|       0  x  x  x  x | ||||
|       0  0  x  x  x | ||||
|       0  0  x  x  x | ||||
|       0  0  0  0  x | ||||
|       for this matrix l = 4 | ||||
|      **/ | ||||
|     do{ | ||||
|       l = Chop_subdiag(H,nrm,e,small); | ||||
|       r = 0;    ///May have converged on more than one eval | ||||
|       ///Single eval | ||||
|       if(l == N-1){ | ||||
|         evals[e] = H[l][l]; | ||||
|         N--; e++; r++; it = 0; | ||||
|       } | ||||
|       ///RealD eval | ||||
|       if(l == N-2){ | ||||
|         trows[l+1] = 1;    ///Needed for UTSolve | ||||
|         apd = H[l][l] + H[l+1][l+1]; | ||||
|         amd = H[l][l] - H[l+1][l+1]; | ||||
|         bc =  (T)4.0*H[l+1][l]*H[l][l+1]; | ||||
|         evals[e]   = (T)0.5*( apd + sqrt(amd*amd + bc) ); | ||||
|         evals[e+1] = (T)0.5*( apd - sqrt(amd*amd + bc) ); | ||||
|         N-=2; e+=2; r++; it = 0; | ||||
|       } | ||||
|     } while(r>0); | ||||
|  | ||||
|     if(N ==0) break; | ||||
|  | ||||
|     DenseVector<T > ck; Resize(ck,3); | ||||
|     DenseVector<T> v;   Resize(v,3); | ||||
|  | ||||
|     for(int m = N-3; m >= l; m--){ | ||||
|       ///Starting vector essentially random shift. | ||||
|       if(it%10 == 0 && N >= 3 && it > 0){ | ||||
|         s = (T)1.618033989*( abs( H[N-1][N-2] ) + abs( H[N-2][N-3] ) ); | ||||
|         t = (T)0.618033989*( abs( H[N-1][N-2] ) + abs( H[N-2][N-3] ) ); | ||||
|         x = H[m][m]*H[m][m] + H[m][m+1]*H[m+1][m] - s*H[m][m] + t; | ||||
|         y = H[m+1][m]*(H[m][m] + H[m+1][m+1] - s); | ||||
|         z = H[m+1][m]*H[m+2][m+1]; | ||||
|       } | ||||
|       ///Starting vector implicit Q theorem | ||||
|       else{ | ||||
|         s = (H[N-2][N-2] + H[N-1][N-1]); | ||||
|         t = (H[N-2][N-2]*H[N-1][N-1] - H[N-2][N-1]*H[N-1][N-2]); | ||||
|         x = H[m][m]*H[m][m] + H[m][m+1]*H[m+1][m] - s*H[m][m] + t; | ||||
|         y = H[m+1][m]*(H[m][m] + H[m+1][m+1] - s); | ||||
|         z = H[m+1][m]*H[m+2][m+1]; | ||||
|       } | ||||
|       ck[0] = x; ck[1] = y; ck[2] = z; | ||||
|  | ||||
|       if(m == l) break; | ||||
|  | ||||
|       /** Some stupid thing from numerical recipies, seems to work**/ | ||||
|       // PAB.. for heaven's sake quote page, purpose, evidence it works. | ||||
|       //       what sort of comment is that!?!?!? | ||||
|       u=abs(H[m][m-1])*(abs(y)+abs(z)); | ||||
|       d=abs(x)*(abs(H[m-1][m-1])+abs(H[m][m])+abs(H[m+1][m+1])); | ||||
|       if ((T)abs(u+d) == (T)abs(d) ){ | ||||
| 	l = m; break; | ||||
|       } | ||||
|  | ||||
|       //if (u < small){l = m; break;} | ||||
|     } | ||||
|     if(it > 100000){ | ||||
|      std::cout << "QReigensystem: bugger it got stuck after 100000 iterations" << std::endl; | ||||
|      std::cout << "got " << e << " evals " << l << " " << N << std::endl; | ||||
|       exit(1); | ||||
|     } | ||||
|     normalize(ck);    ///Normalization cancels in PHP anyway | ||||
|     T beta; | ||||
|     Householder_vector<T >(ck, 0, 2, v, beta); | ||||
|     Householder_mult<T >(H,v,beta,0,l,l+2,0); | ||||
|     Householder_mult<T >(H,v,beta,0,l,l+2,1); | ||||
|     ///Accumulate eigenvector | ||||
|     Householder_mult<T >(P,v,beta,0,l,l+2,1); | ||||
|     int sw = 0;      ///Are we on the last row? | ||||
|     for(int k=l;k<N-2;k++){ | ||||
|       x = H[k+1][k]; | ||||
|       y = H[k+2][k]; | ||||
|       z = (T)0.0; | ||||
|       if(k+3 <= N-1){ | ||||
| 	z = H[k+3][k]; | ||||
|       } else{ | ||||
| 	sw = 1;  | ||||
| 	v[2] = (T)0.0; | ||||
|       } | ||||
|       ck[0] = x; ck[1] = y; ck[2] = z; | ||||
|       normalize(ck); | ||||
|       Householder_vector<T >(ck, 0, 2-sw, v, beta); | ||||
|       Householder_mult<T >(H,v, beta,0,k+1,k+3-sw,0); | ||||
|       Householder_mult<T >(H,v, beta,0,k+1,k+3-sw,1); | ||||
|       ///Accumulate eigenvector | ||||
|       Householder_mult<T >(P,v, beta,0,k+1,k+3-sw,1); | ||||
|     } | ||||
|     it++; | ||||
|     tot_it++; | ||||
|   }while(N > 1); | ||||
|   N = evals.size(); | ||||
|   ///Annoying - UT solves in reverse order; | ||||
|   DenseVector<T> tmp; Resize(tmp,N); | ||||
|   for(int i=0;i<N;i++){ | ||||
|     tmp[i] = evals[N-i-1]; | ||||
|   }  | ||||
|   evals = tmp; | ||||
|   UTeigenvectors(H, trows, evals, evecs); | ||||
|   for(int i=0;i<evals.size();i++){evecs[i] = P*evecs[i]; normalize(evecs[i]);} | ||||
|   return tot_it; | ||||
| } | ||||
|  | ||||
| template <class T> | ||||
| int my_Wilkinson(DenseMatrix<T> &Hin, DenseVector<T> &evals, DenseMatrix<T> &evecs, RealD small) | ||||
| { | ||||
|   /** | ||||
|   Find the eigenvalues of an upper Hessenberg matrix using the Wilkinson QR algorithm. | ||||
|   H = | ||||
|   x  x  0  0  0  0 | ||||
|   x  x  x  0  0  0 | ||||
|   0  x  x  x  0  0 | ||||
|   0  0  x  x  x  0 | ||||
|   0  0  0  x  x  x | ||||
|   0  0  0  0  x  x | ||||
|   Factorization is P T P^H where T is upper triangular (mod cc blocks) and P is orthagonal/unitary.  **/ | ||||
|   return my_Wilkinson(Hin, evals, evecs, small, small); | ||||
| } | ||||
|  | ||||
| template <class T> | ||||
| int my_Wilkinson(DenseMatrix<T> &Hin, DenseVector<T> &evals, DenseMatrix<T> &evecs, RealD small, RealD tol) | ||||
| { | ||||
|   int N; SizeSquare(Hin,N); | ||||
|   int M = N; | ||||
|  | ||||
|   ///I don't want to modify the input but matricies must be passed by reference | ||||
|   //Scale a matrix by its "norm" | ||||
|   //RealD Hnorm = abs( Hin.LargestDiag() ); H =  H*(1.0/Hnorm); | ||||
|   DenseMatrix<T> H;  H = Hin; | ||||
|    | ||||
|   RealD Hnorm = abs(Norm(Hin)); | ||||
|   H = H * (1.0 / Hnorm); | ||||
|  | ||||
|   // TODO use openmp and memset | ||||
|   Fill(evals,0); | ||||
|   Fill(evecs,0); | ||||
|  | ||||
|   T s, t, x = 0, y = 0, z = 0; | ||||
|   T u, d; | ||||
|   T apd, amd, bc; | ||||
|   DenseVector<T> p; Resize(p,N); Fill(p,0); | ||||
|  | ||||
|   T nrm = Norm(H);    ///DenseMatrix Norm | ||||
|   int n, m; | ||||
|   int e = 0; | ||||
|   int it = 0; | ||||
|   int tot_it = 0; | ||||
|   int l = 0; | ||||
|   int r = 0; | ||||
|   DenseMatrix<T> P; Resize(P,N,N); | ||||
|   Unity(P); | ||||
|   DenseVector<int> trows(N, 0); | ||||
|   /// Check if the matrix is really symm tridiag | ||||
|   RealD sth = 0; | ||||
|   for(int j = 0; j < N; ++j) | ||||
|   { | ||||
|     for(int i = j + 2; i < N; ++i) | ||||
|     { | ||||
|       if(abs(H[i][j]) > tol || abs(H[j][i]) > tol) | ||||
|       { | ||||
| 	std::cout << "Non Tridiagonal H(" << i << ","<< j << ") = |" << Real( real( H[j][i] ) ) << "| > " << tol << std::endl; | ||||
| 	std::cout << "Warning tridiagonalize and call again" << std::endl; | ||||
|         // exit(1); // see what is going on | ||||
|         //return; | ||||
|       } | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   do{ | ||||
|     do{ | ||||
|       //Jasper | ||||
|       //Check if the subdiagonal term is small enough (<small) | ||||
|       //if true then it is converged. | ||||
|       //check start from H.dim - e - 1 | ||||
|       //How to deal with more than 2 are converged? | ||||
|       //What if Chop_symm_subdiag return something int the middle? | ||||
|       //-------------- | ||||
|       l = Chop_symm_subdiag(H,nrm, e, small); | ||||
|       r = 0;    ///May have converged on more than one eval | ||||
|       //Jasper | ||||
|       //In this case | ||||
|       // x  x  0  0  0  0 | ||||
|       // x  x  x  0  0  0 | ||||
|       // 0  x  x  x  0  0 | ||||
|       // 0  0  x  x  x  0 | ||||
|       // 0  0  0  x  x  0 | ||||
|       // 0  0  0  0  0  x  <- l | ||||
|       //-------------- | ||||
|       ///Single eval | ||||
|       if(l == N - 1) | ||||
|       { | ||||
|         evals[e] = H[l][l]; | ||||
|         N--; | ||||
|         e++; | ||||
|         r++; | ||||
|         it = 0; | ||||
|       } | ||||
|       //Jasper | ||||
|       // x  x  0  0  0  0 | ||||
|       // x  x  x  0  0  0 | ||||
|       // 0  x  x  x  0  0 | ||||
|       // 0  0  x  x  0  0 | ||||
|       // 0  0  0  0  x  x  <- l | ||||
|       // 0  0  0  0  x  x | ||||
|       //-------------- | ||||
|       ///RealD eval | ||||
|       if(l == N - 2) | ||||
|       { | ||||
|         trows[l + 1] = 1;    ///Needed for UTSolve | ||||
|         apd = H[l][l] + H[l + 1][ l + 1]; | ||||
|         amd = H[l][l] - H[l + 1][l + 1]; | ||||
|         bc =  (T) 4.0 * H[l + 1][l] * H[l][l + 1]; | ||||
|         evals[e] = (T) 0.5 * (apd + sqrt(amd * amd + bc)); | ||||
|         evals[e + 1] = (T) 0.5 * (apd - sqrt(amd * amd + bc)); | ||||
|         N -= 2; | ||||
|         e += 2; | ||||
|         r++; | ||||
|         it = 0; | ||||
|       } | ||||
|     }while(r > 0); | ||||
|     //Jasper | ||||
|     //Already converged | ||||
|     //-------------- | ||||
|     if(N == 0) break; | ||||
|  | ||||
|     DenseVector<T> ck,v; Resize(ck,2); Resize(v,2); | ||||
|  | ||||
|     for(int m = N - 3; m >= l; m--) | ||||
|     { | ||||
|       ///Starting vector essentially random shift. | ||||
|       if(it%10 == 0 && N >= 3 && it > 0) | ||||
|       { | ||||
|         t = abs(H[N - 1][N - 2]) + abs(H[N - 2][N - 3]); | ||||
|         x = H[m][m] - t; | ||||
|         z = H[m + 1][m]; | ||||
|       } else { | ||||
|       ///Starting vector implicit Q theorem | ||||
|         d = (H[N - 2][N - 2] - H[N - 1][N - 1]) * (T) 0.5; | ||||
|         t =  H[N - 1][N - 1] - H[N - 1][N - 2] * H[N - 1][N - 2]  | ||||
| 	  / (d + sign(d) * sqrt(d * d + H[N - 1][N - 2] * H[N - 1][N - 2])); | ||||
|         x = H[m][m] - t; | ||||
|         z = H[m + 1][m]; | ||||
|       } | ||||
|       //Jasper | ||||
|       //why it is here???? | ||||
|       //----------------------- | ||||
|       if(m == l) | ||||
|         break; | ||||
|  | ||||
|       u = abs(H[m][m - 1]) * (abs(y) + abs(z)); | ||||
|       d = abs(x) * (abs(H[m - 1][m - 1]) + abs(H[m][m]) + abs(H[m + 1][m + 1])); | ||||
|       if ((T)abs(u + d) == (T)abs(d)) | ||||
|       { | ||||
|         l = m; | ||||
|         break; | ||||
|       } | ||||
|     } | ||||
|     //Jasper | ||||
|     if(it > 1000000) | ||||
|     { | ||||
|       std::cout << "Wilkinson: bugger it got stuck after 100000 iterations" << std::endl; | ||||
|       std::cout << "got " << e << " evals " << l << " " << N << std::endl; | ||||
|       exit(1); | ||||
|     } | ||||
|     // | ||||
|     T s, c; | ||||
|     Givens_calc<T>(x, z, c, s); | ||||
|     Givens_mult<T>(H, l, l + 1, c, -s, 0); | ||||
|     Givens_mult<T>(H, l, l + 1, c,  s, 1); | ||||
|     Givens_mult<T>(P, l, l + 1, c,  s, 1); | ||||
|     // | ||||
|     for(int k = l; k < N - 2; ++k) | ||||
|     { | ||||
|       x = H.A[k + 1][k]; | ||||
|       z = H.A[k + 2][k]; | ||||
|       Givens_calc<T>(x, z, c, s); | ||||
|       Givens_mult<T>(H, k + 1, k + 2, c, -s, 0); | ||||
|       Givens_mult<T>(H, k + 1, k + 2, c,  s, 1); | ||||
|       Givens_mult<T>(P, k + 1, k + 2, c,  s, 1); | ||||
|     } | ||||
|     it++; | ||||
|     tot_it++; | ||||
|   }while(N > 1); | ||||
|  | ||||
|   N = evals.size(); | ||||
|   ///Annoying - UT solves in reverse order; | ||||
|   DenseVector<T> tmp(N); | ||||
|   for(int i = 0; i < N; ++i) | ||||
|     tmp[i] = evals[N-i-1]; | ||||
|   evals = tmp; | ||||
|   // | ||||
|   UTeigenvectors(H, trows, evals, evecs); | ||||
|   //UTSymmEigenvectors(H, trows, evals, evecs); | ||||
|   for(int i = 0; i < evals.size(); ++i) | ||||
|   { | ||||
|     evecs[i] = P * evecs[i]; | ||||
|     normalize(evecs[i]); | ||||
|     evals[i] = evals[i] * Hnorm; | ||||
|   } | ||||
|   // // FIXME this is to test | ||||
|   // Hin.write("evecs3", evecs); | ||||
|   // Hin.write("evals3", evals); | ||||
|   // // check rsd | ||||
|   // for(int i = 0; i < M; i++) { | ||||
|   //   vector<T> Aevec = Hin * evecs[i]; | ||||
|   //   RealD norm2(0.); | ||||
|   //   for(int j = 0; j < M; j++) { | ||||
|   //     norm2 += (Aevec[j] - evals[i] * evecs[i][j]) * (Aevec[j] - evals[i] * evecs[i][j]); | ||||
|   //   } | ||||
|   // } | ||||
|   return tot_it; | ||||
| } | ||||
|  | ||||
| template <class T> | ||||
| void Hess(DenseMatrix<T > &A, DenseMatrix<T> &Q, int start){ | ||||
|  | ||||
|   /** | ||||
|   turn a matrix A = | ||||
|   x  x  x  x  x | ||||
|   x  x  x  x  x | ||||
|   x  x  x  x  x | ||||
|   x  x  x  x  x | ||||
|   x  x  x  x  x | ||||
|   into | ||||
|   x  x  x  x  x | ||||
|   x  x  x  x  x | ||||
|   0  x  x  x  x | ||||
|   0  0  x  x  x | ||||
|   0  0  0  x  x | ||||
|   with householder rotations | ||||
|   Slow. | ||||
|   */ | ||||
|   int N ; SizeSquare(A,N); | ||||
|   DenseVector<T > p; Resize(p,N); Fill(p,0); | ||||
|  | ||||
|   for(int k=start;k<N-2;k++){ | ||||
|     //cerr << "hess" << k << std::endl; | ||||
|     DenseVector<T > ck,v; Resize(ck,N-k-1); Resize(v,N-k-1); | ||||
|     for(int i=k+1;i<N;i++){ck[i-k-1] = A(i,k);}  ///kth column | ||||
|     normalize(ck);    ///Normalization cancels in PHP anyway | ||||
|     T beta; | ||||
|     Householder_vector<T >(ck, 0, ck.size()-1, v, beta);  ///Householder vector | ||||
|     Householder_mult<T>(A,v,beta,start,k+1,N-1,0);  ///A -> PA | ||||
|     Householder_mult<T >(A,v,beta,start,k+1,N-1,1);  ///PA -> PAP^H | ||||
|     ///Accumulate eigenvector | ||||
|     Householder_mult<T >(Q,v,beta,start,k+1,N-1,1);  ///Q -> QP^H | ||||
|   } | ||||
|   /*for(int l=0;l<N-2;l++){ | ||||
|     for(int k=l+2;k<N;k++){ | ||||
|     A(0,k,l); | ||||
|     } | ||||
|     }*/ | ||||
| } | ||||
|  | ||||
| template <class T> | ||||
| void Tri(DenseMatrix<T > &A, DenseMatrix<T> &Q, int start){ | ||||
| ///Tridiagonalize a matrix | ||||
|   int N; SizeSquare(A,N); | ||||
|   Hess(A,Q,start); | ||||
|   /*for(int l=0;l<N-2;l++){ | ||||
|     for(int k=l+2;k<N;k++){ | ||||
|     A(0,l,k); | ||||
|     } | ||||
|     }*/ | ||||
| } | ||||
|  | ||||
| template <class T> | ||||
| void ForceTridiagonal(DenseMatrix<T> &A){ | ||||
| ///Tridiagonalize a matrix | ||||
|   int N ; SizeSquare(A,N); | ||||
|   for(int l=0;l<N-2;l++){ | ||||
|     for(int k=l+2;k<N;k++){ | ||||
|       A[l][k]=0; | ||||
|       A[k][l]=0; | ||||
|     } | ||||
|   } | ||||
| } | ||||
|  | ||||
| template <class T> | ||||
| int my_SymmEigensystem(DenseMatrix<T > &Ain, DenseVector<T> &evals, DenseVector<DenseVector<T> > &evecs, RealD small){ | ||||
|   ///Solve a symmetric eigensystem, not necessarily in tridiagonal form | ||||
|   int N; SizeSquare(Ain,N); | ||||
|   DenseMatrix<T > A; A = Ain; | ||||
|   DenseMatrix<T > Q; Resize(Q,N,N); Unity(Q); | ||||
|   Tri(A,Q,0); | ||||
|   int it = my_Wilkinson<T>(A, evals, evecs, small); | ||||
|   for(int k=0;k<N;k++){evecs[k] = Q*evecs[k];} | ||||
|   return it; | ||||
| } | ||||
|  | ||||
|  | ||||
| template <class T> | ||||
| int Wilkinson(DenseMatrix<T> &Ain, DenseVector<T> &evals, DenseVector<DenseVector<T> > &evecs, RealD small){ | ||||
|   return my_Wilkinson(Ain, evals, evecs, small); | ||||
| } | ||||
|  | ||||
| template <class T> | ||||
| int SymmEigensystem(DenseMatrix<T> &Ain, DenseVector<T> &evals, DenseVector<DenseVector<T> > &evecs, RealD small){ | ||||
|   return my_SymmEigensystem(Ain, evals, evecs, small); | ||||
| } | ||||
|  | ||||
| template <class T> | ||||
| int Eigensystem(DenseMatrix<T > &Ain, DenseVector<T> &evals, DenseVector<DenseVector<T> > &evecs, RealD small){ | ||||
| ///Solve a general eigensystem, not necessarily in tridiagonal form | ||||
|   int N = Ain.dim; | ||||
|   DenseMatrix<T > A(N); A = Ain; | ||||
|   DenseMatrix<T > Q(N);Q.Unity(); | ||||
|   Hess(A,Q,0); | ||||
|   int it = QReigensystem<T>(A, evals, evecs, small); | ||||
|   for(int k=0;k<N;k++){evecs[k] = Q*evecs[k];} | ||||
|   return it; | ||||
| } | ||||
|  | ||||
| } | ||||
| #endif | ||||
| @@ -1,242 +0,0 @@ | ||||
|     /************************************************************************************* | ||||
|  | ||||
|     Grid physics library, www.github.com/paboyle/Grid  | ||||
|  | ||||
|     Source file: ./lib/algorithms/iterative/Householder.h | ||||
|  | ||||
|     Copyright (C) 2015 | ||||
|  | ||||
| Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||
|  | ||||
|     This program is free software; you can redistribute it and/or modify | ||||
|     it under the terms of the GNU General Public License as published by | ||||
|     the Free Software Foundation; either version 2 of the License, or | ||||
|     (at your option) any later version. | ||||
|  | ||||
|     This program is distributed in the hope that it will be useful, | ||||
|     but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
|     GNU General Public License for more details. | ||||
|  | ||||
|     You should have received a copy of the GNU General Public License along | ||||
|     with this program; if not, write to the Free Software Foundation, Inc., | ||||
|     51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||
|  | ||||
|     See the full license in the file "LICENSE" in the top level distribution directory | ||||
|     *************************************************************************************/ | ||||
|     /*  END LEGAL */ | ||||
| #ifndef HOUSEHOLDER_H | ||||
| #define HOUSEHOLDER_H | ||||
|  | ||||
| #define TIMER(A) std::cout << GridLogMessage << __FUNC__ << " file "<< __FILE__ <<" line " << __LINE__ << std::endl; | ||||
| #define ENTER()  std::cout << GridLogMessage << "ENTRY "<<__FUNC__ << " file "<< __FILE__ <<" line " << __LINE__ << std::endl; | ||||
| #define LEAVE()  std::cout << GridLogMessage << "EXIT  "<<__FUNC__ << " file "<< __FILE__ <<" line " << __LINE__ << std::endl; | ||||
|  | ||||
| #include <cstdlib> | ||||
| #include <string> | ||||
| #include <cmath> | ||||
| #include <iostream> | ||||
| #include <sstream> | ||||
| #include <stdexcept> | ||||
| #include <fstream> | ||||
| #include <complex> | ||||
| #include <algorithm> | ||||
|  | ||||
| namespace Grid { | ||||
| /** Comparison function for finding the max element in a vector **/ | ||||
| template <class T> bool cf(T i, T j) {  | ||||
|   return abs(i) < abs(j);  | ||||
| } | ||||
|  | ||||
| /**  | ||||
| 	Calculate a real Givens angle  | ||||
|  **/ | ||||
| template <class T> inline void Givens_calc(T y, T z, T &c, T &s){ | ||||
|  | ||||
|   RealD mz = (RealD)abs(z); | ||||
|    | ||||
|   if(mz==0.0){ | ||||
|     c = 1; s = 0; | ||||
|   } | ||||
|   if(mz >= (RealD)abs(y)){ | ||||
|     T t = -y/z; | ||||
|     s = (T)1.0 / sqrt ((T)1.0 + t * t); | ||||
|     c = s * t; | ||||
|   } else { | ||||
|     T t = -z/y; | ||||
|     c = (T)1.0 / sqrt ((T)1.0 + t * t); | ||||
|     s = c * t; | ||||
|   } | ||||
| } | ||||
|  | ||||
| template <class T> inline void Givens_mult(DenseMatrix<T> &A,  int i, int k, T c, T s, int dir) | ||||
| { | ||||
|   int q ; SizeSquare(A,q); | ||||
|  | ||||
|   if(dir == 0){ | ||||
|     for(int j=0;j<q;j++){ | ||||
|       T nu = A[i][j]; | ||||
|       T w  = A[k][j]; | ||||
|       A[i][j] = (c*nu + s*w); | ||||
|       A[k][j] = (-s*nu + c*w); | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   if(dir == 1){ | ||||
|     for(int j=0;j<q;j++){ | ||||
|       T nu = A[j][i]; | ||||
|       T w  = A[j][k]; | ||||
|       A[j][i] = (c*nu - s*w); | ||||
|       A[j][k] = (s*nu + c*w); | ||||
|     } | ||||
|   } | ||||
| } | ||||
|  | ||||
| /** | ||||
| 	from input = x; | ||||
| 	Compute the complex Householder vector, v, such that | ||||
| 	P = (I - b v transpose(v) ) | ||||
| 	b = 2/v.v | ||||
|  | ||||
| 	P | x |    | x | k = 0 | ||||
| 	| x |    | 0 |  | ||||
| 	| x | =  | 0 | | ||||
| 	| x |    | 0 | j = 3 | ||||
| 	| x |	   | x | | ||||
|  | ||||
| 	These are the "Unreduced" Householder vectors. | ||||
|  | ||||
|  **/ | ||||
| template <class T> inline void Householder_vector(DenseVector<T> input, int k, int j, DenseVector<T> &v, T &beta) | ||||
| { | ||||
|   int N ; Size(input,N); | ||||
|   T m = *max_element(input.begin() + k, input.begin() + j + 1, cf<T> ); | ||||
|  | ||||
|   if(abs(m) > 0.0){ | ||||
|     T alpha = 0; | ||||
|  | ||||
|     for(int i=k; i<j+1; i++){ | ||||
|       v[i] = input[i]/m; | ||||
|       alpha = alpha + v[i]*conj(v[i]); | ||||
|     } | ||||
|     alpha = sqrt(alpha); | ||||
|     beta = (T)1.0/(alpha*(alpha + abs(v[k]) )); | ||||
|  | ||||
|     if(abs(v[k]) > 0.0)  v[k] = v[k] + (v[k]/abs(v[k]))*alpha; | ||||
|     else                 v[k] = -alpha; | ||||
|   } else{ | ||||
|     for(int i=k; i<j+1; i++){ | ||||
|       v[i] = 0.0; | ||||
|     }  | ||||
|   } | ||||
| } | ||||
|  | ||||
| /** | ||||
| 	from input = x; | ||||
| 	Compute the complex Householder vector, v, such that | ||||
| 	P = (I - b v transpose(v) ) | ||||
| 	b = 2/v.v | ||||
|  | ||||
| 	Px = alpha*e_dir | ||||
|  | ||||
| 	These are the "Unreduced" Householder vectors. | ||||
|  | ||||
|  **/ | ||||
|  | ||||
| template <class T> inline void Householder_vector(DenseVector<T> input, int k, int j, int dir, DenseVector<T> &v, T &beta) | ||||
| { | ||||
|   int N = input.size(); | ||||
|   T m = *max_element(input.begin() + k, input.begin() + j + 1, cf); | ||||
|    | ||||
|   if(abs(m) > 0.0){ | ||||
|     T alpha = 0; | ||||
|  | ||||
|     for(int i=k; i<j+1; i++){ | ||||
|       v[i] = input[i]/m; | ||||
|       alpha = alpha + v[i]*conj(v[i]); | ||||
|     } | ||||
|      | ||||
|     alpha = sqrt(alpha); | ||||
|     beta = 1.0/(alpha*(alpha + abs(v[dir]) )); | ||||
| 	 | ||||
|     if(abs(v[dir]) > 0.0) v[dir] = v[dir] + (v[dir]/abs(v[dir]))*alpha; | ||||
|     else                  v[dir] = -alpha; | ||||
|   }else{ | ||||
|     for(int i=k; i<j+1; i++){ | ||||
|       v[i] = 0.0; | ||||
|     }  | ||||
|   } | ||||
| } | ||||
|  | ||||
| /** | ||||
| 	Compute the product PA if trans = 0 | ||||
| 	AP if trans = 1 | ||||
| 	P = (I - b v transpose(v) ) | ||||
| 	b = 2/v.v | ||||
| 	start at element l of matrix A | ||||
| 	v is of length j - k + 1 of v are nonzero | ||||
|  **/ | ||||
|  | ||||
| template <class T> inline void Householder_mult(DenseMatrix<T> &A , DenseVector<T> v, T beta, int l, int k, int j, int trans) | ||||
| { | ||||
|   int N ; SizeSquare(A,N); | ||||
|  | ||||
|   if(abs(beta) > 0.0){ | ||||
|     for(int p=l; p<N; p++){ | ||||
|       T s = 0; | ||||
|       if(trans==0){ | ||||
| 	for(int i=k;i<j+1;i++) s += conj(v[i-k])*A[i][p]; | ||||
| 	s *= beta; | ||||
| 	for(int i=k;i<j+1;i++){ A[i][p] = A[i][p]-s*conj(v[i-k]);} | ||||
|       } else { | ||||
| 	for(int i=k;i<j+1;i++){ s += conj(v[i-k])*A[p][i];} | ||||
| 	s *= beta; | ||||
| 	for(int i=k;i<j+1;i++){ A[p][i]=A[p][i]-s*conj(v[i-k]);} | ||||
|       } | ||||
|     } | ||||
|   } | ||||
| } | ||||
|  | ||||
| /** | ||||
| 	Compute the product PA if trans = 0 | ||||
| 	AP if trans = 1 | ||||
| 	P = (I - b v transpose(v) ) | ||||
| 	b = 2/v.v | ||||
| 	start at element l of matrix A | ||||
| 	v is of length j - k + 1 of v are nonzero | ||||
| 	A is tridiagonal | ||||
|  **/ | ||||
| template <class T> inline void Householder_mult_tri(DenseMatrix<T> &A , DenseVector<T> v, T beta, int l, int M, int k, int j, int trans) | ||||
| { | ||||
|   if(abs(beta) > 0.0){ | ||||
|  | ||||
|     int N ; SizeSquare(A,N); | ||||
|  | ||||
|     DenseMatrix<T> tmp; Resize(tmp,N,N); Fill(tmp,0);  | ||||
|  | ||||
|     T s; | ||||
|     for(int p=l; p<M; p++){ | ||||
|       s = 0; | ||||
|       if(trans==0){ | ||||
| 	for(int i=k;i<j+1;i++) s = s + conj(v[i-k])*A[i][p]; | ||||
|       }else{ | ||||
| 	for(int i=k;i<j+1;i++) s = s + v[i-k]*A[p][i]; | ||||
|       } | ||||
|       s = beta*s; | ||||
|       if(trans==0){ | ||||
| 	for(int i=k;i<j+1;i++) tmp[i][p] = tmp(i,p) - s*v[i-k]; | ||||
|       }else{ | ||||
| 	for(int i=k;i<j+1;i++) tmp[p][i] = tmp[p][i] - s*conj(v[i-k]); | ||||
|       } | ||||
|     } | ||||
|     for(int p=l; p<M; p++){ | ||||
|       if(trans==0){ | ||||
| 	for(int i=k;i<j+1;i++) A[i][p] = A[i][p] + tmp[i][p]; | ||||
|       }else{ | ||||
| 	for(int i=k;i<j+1;i++) A[p][i] = A[p][i] + tmp[p][i]; | ||||
|       } | ||||
|     } | ||||
|   } | ||||
| } | ||||
| } | ||||
| #endif | ||||
| @@ -33,6 +33,8 @@ directory | ||||
|  | ||||
| namespace Grid { | ||||
|  | ||||
| enum BlockCGtype { BlockCG, BlockCGrQ, CGmultiRHS }; | ||||
|  | ||||
| ////////////////////////////////////////////////////////////////////////// | ||||
| // Block conjugate gradient. Dimension zero should be the block direction | ||||
| ////////////////////////////////////////////////////////////////////////// | ||||
| @@ -40,25 +42,286 @@ template <class Field> | ||||
| class BlockConjugateGradient : public OperatorFunction<Field> { | ||||
|  public: | ||||
|  | ||||
|  | ||||
|   typedef typename Field::scalar_type scomplex; | ||||
|  | ||||
|   const int blockDim = 0; | ||||
|  | ||||
|   int blockDim ; | ||||
|   int Nblock; | ||||
|  | ||||
|   BlockCGtype CGtype; | ||||
|   bool ErrorOnNoConverge;  // throw an assert when the CG fails to converge. | ||||
|                            // Defaults true. | ||||
|   RealD Tolerance; | ||||
|   Integer MaxIterations; | ||||
|   Integer IterationsToComplete; //Number of iterations the CG took to finish. Filled in upon completion | ||||
|    | ||||
|   BlockConjugateGradient(RealD tol, Integer maxit, bool err_on_no_conv = true) | ||||
|     : Tolerance(tol), | ||||
|     MaxIterations(maxit), | ||||
|     ErrorOnNoConverge(err_on_no_conv){}; | ||||
|   BlockConjugateGradient(BlockCGtype cgtype,int _Orthog,RealD tol, Integer maxit, bool err_on_no_conv = true) | ||||
|     : Tolerance(tol), CGtype(cgtype),   blockDim(_Orthog),  MaxIterations(maxit), ErrorOnNoConverge(err_on_no_conv) | ||||
|   {}; | ||||
|  | ||||
| //////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
| // Thin QR factorisation (google it) | ||||
| //////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
| void ThinQRfact (Eigen::MatrixXcd &m_rr, | ||||
| 		 Eigen::MatrixXcd &C, | ||||
| 		 Eigen::MatrixXcd &Cinv, | ||||
| 		 Field & Q, | ||||
| 		 const Field & R) | ||||
| { | ||||
|   int Orthog = blockDim; // First dimension is block dim; this is an assumption | ||||
|   //////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   //Dimensions | ||||
|   // R_{ferm x Nblock} =  Q_{ferm x Nblock} x  C_{Nblock x Nblock} -> ferm x Nblock | ||||
|   // | ||||
|   // Rdag R = m_rr = Herm = L L^dag        <-- Cholesky decomposition (LLT routine in Eigen) | ||||
|   // | ||||
|   //   Q  C = R => Q = R C^{-1} | ||||
|   // | ||||
|   // Want  Ident = Q^dag Q = C^{-dag} R^dag R C^{-1} = C^{-dag} L L^dag C^{-1} = 1_{Nblock x Nblock}  | ||||
|   // | ||||
|   // Set C = L^{dag}, and then Q^dag Q = ident  | ||||
|   // | ||||
|   // Checks: | ||||
|   // Cdag C = Rdag R ; passes. | ||||
|   // QdagQ  = 1      ; passes | ||||
|   //////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   sliceInnerProductMatrix(m_rr,R,R,Orthog); | ||||
|  | ||||
|   // Force manifest hermitian to avoid rounding related | ||||
|   m_rr = 0.5*(m_rr+m_rr.adjoint()); | ||||
|  | ||||
| #if 0 | ||||
|   std::cout << " Calling Cholesky  ldlt on m_rr "  << m_rr <<std::endl; | ||||
|   Eigen::MatrixXcd L_ldlt = m_rr.ldlt().matrixL();  | ||||
|   std::cout << " Called Cholesky  ldlt on m_rr "  << L_ldlt <<std::endl; | ||||
|   auto  D_ldlt = m_rr.ldlt().vectorD();  | ||||
|   std::cout << " Called Cholesky  ldlt on m_rr "  << D_ldlt <<std::endl; | ||||
| #endif | ||||
|  | ||||
|   //  std::cout << " Calling Cholesky  llt on m_rr "  <<std::endl; | ||||
|   Eigen::MatrixXcd L    = m_rr.llt().matrixL();  | ||||
|   //  std::cout << " Called Cholesky  llt on m_rr "  << L <<std::endl; | ||||
|   C    = L.adjoint(); | ||||
|   Cinv = C.inverse(); | ||||
|   //////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   // Q = R C^{-1} | ||||
|   // | ||||
|   // Q_j  = R_i Cinv(i,j)  | ||||
|   // | ||||
|   // NB maddMatrix conventions are Right multiplication X[j] a[j,i] already | ||||
|   //////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   sliceMulMatrix(Q,Cinv,R,Orthog); | ||||
| } | ||||
| //////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
| // Call one of several implementations | ||||
| //////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
| void operator()(LinearOperatorBase<Field> &Linop, const Field &Src, Field &Psi)  | ||||
| { | ||||
|   int Orthog = 0; // First dimension is block dim | ||||
|   if ( CGtype == BlockCGrQ ) { | ||||
|     BlockCGrQsolve(Linop,Src,Psi); | ||||
|   } else if (CGtype == BlockCG ) { | ||||
|     BlockCGsolve(Linop,Src,Psi); | ||||
|   } else if (CGtype == CGmultiRHS ) { | ||||
|     CGmultiRHSsolve(Linop,Src,Psi); | ||||
|   } else { | ||||
|     assert(0); | ||||
|   } | ||||
| } | ||||
|  | ||||
| //////////////////////////////////////////////////////////////////////////// | ||||
| // BlockCGrQ implementation: | ||||
| //-------------------------- | ||||
| // X is guess/Solution | ||||
| // B is RHS | ||||
| // Solve A X_i = B_i    ;        i refers to Nblock index | ||||
| //////////////////////////////////////////////////////////////////////////// | ||||
| void BlockCGrQsolve(LinearOperatorBase<Field> &Linop, const Field &B, Field &X)  | ||||
| { | ||||
|   int Orthog = blockDim; // First dimension is block dim; this is an assumption | ||||
|   Nblock = B._grid->_fdimensions[Orthog]; | ||||
|  | ||||
|   std::cout<<GridLogMessage<<" Block Conjugate Gradient : Orthog "<<Orthog<<" Nblock "<<Nblock<<std::endl; | ||||
|  | ||||
|   X.checkerboard = B.checkerboard; | ||||
|   conformable(X, B); | ||||
|  | ||||
|   Field tmp(B); | ||||
|   Field Q(B); | ||||
|   Field D(B); | ||||
|   Field Z(B); | ||||
|   Field AD(B); | ||||
|  | ||||
|   Eigen::MatrixXcd m_DZ     = Eigen::MatrixXcd::Identity(Nblock,Nblock); | ||||
|   Eigen::MatrixXcd m_M      = Eigen::MatrixXcd::Identity(Nblock,Nblock); | ||||
|   Eigen::MatrixXcd m_rr     = Eigen::MatrixXcd::Zero(Nblock,Nblock); | ||||
|  | ||||
|   Eigen::MatrixXcd m_C      = Eigen::MatrixXcd::Zero(Nblock,Nblock); | ||||
|   Eigen::MatrixXcd m_Cinv   = Eigen::MatrixXcd::Zero(Nblock,Nblock); | ||||
|   Eigen::MatrixXcd m_S      = Eigen::MatrixXcd::Zero(Nblock,Nblock); | ||||
|   Eigen::MatrixXcd m_Sinv   = Eigen::MatrixXcd::Zero(Nblock,Nblock); | ||||
|  | ||||
|   Eigen::MatrixXcd m_tmp    = Eigen::MatrixXcd::Identity(Nblock,Nblock); | ||||
|   Eigen::MatrixXcd m_tmp1   = Eigen::MatrixXcd::Identity(Nblock,Nblock); | ||||
|  | ||||
|   // Initial residual computation & set up | ||||
|   std::vector<RealD> residuals(Nblock); | ||||
|   std::vector<RealD> ssq(Nblock); | ||||
|  | ||||
|   sliceNorm(ssq,B,Orthog); | ||||
|   RealD sssum=0; | ||||
|   for(int b=0;b<Nblock;b++) sssum+=ssq[b]; | ||||
|  | ||||
|   sliceNorm(residuals,B,Orthog); | ||||
|   for(int b=0;b<Nblock;b++){ assert(std::isnan(residuals[b])==0); } | ||||
|  | ||||
|   sliceNorm(residuals,X,Orthog); | ||||
|   for(int b=0;b<Nblock;b++){ assert(std::isnan(residuals[b])==0); } | ||||
|  | ||||
|   /************************************************************************ | ||||
|    * Block conjugate gradient rQ (Sebastien Birk Thesis, after Dubrulle 2001) | ||||
|    ************************************************************************ | ||||
|    * Dimensions: | ||||
|    * | ||||
|    *   X,B==(Nferm x Nblock) | ||||
|    *   A==(Nferm x Nferm) | ||||
|    *   | ||||
|    * Nferm = Nspin x Ncolour x Ncomplex x Nlattice_site | ||||
|    *  | ||||
|    * QC = R = B-AX, D = Q     ; QC => Thin QR factorisation (google it) | ||||
|    * for k:  | ||||
|    *   Z  = AD | ||||
|    *   M  = [D^dag Z]^{-1} | ||||
|    *   X  = X + D MC | ||||
|    *   QS = Q - ZM | ||||
|    *   D  = Q + D S^dag | ||||
|    *   C  = S C | ||||
|    */ | ||||
|   /////////////////////////////////////// | ||||
|   // Initial block: initial search dir is guess | ||||
|   /////////////////////////////////////// | ||||
|   std::cout << GridLogMessage<<"BlockCGrQ algorithm initialisation " <<std::endl; | ||||
|  | ||||
|   //1.  QC = R = B-AX, D = Q     ; QC => Thin QR factorisation (google it) | ||||
|  | ||||
|   Linop.HermOp(X, AD); | ||||
|   tmp = B - AD;   | ||||
|   //std::cout << GridLogMessage << " initial tmp " << norm2(tmp)<< std::endl; | ||||
|   ThinQRfact (m_rr, m_C, m_Cinv, Q, tmp); | ||||
|   //std::cout << GridLogMessage << " initial Q " << norm2(Q)<< std::endl; | ||||
|   //std::cout << GridLogMessage << " m_rr " << m_rr<<std::endl; | ||||
|   //std::cout << GridLogMessage << " m_C " << m_C<<std::endl; | ||||
|   //std::cout << GridLogMessage << " m_Cinv " << m_Cinv<<std::endl; | ||||
|   D=Q; | ||||
|  | ||||
|   std::cout << GridLogMessage<<"BlockCGrQ computed initial residual and QR fact " <<std::endl; | ||||
|  | ||||
|   /////////////////////////////////////// | ||||
|   // Timers | ||||
|   /////////////////////////////////////// | ||||
|   GridStopWatch sliceInnerTimer; | ||||
|   GridStopWatch sliceMaddTimer; | ||||
|   GridStopWatch QRTimer; | ||||
|   GridStopWatch MatrixTimer; | ||||
|   GridStopWatch SolverTimer; | ||||
|   SolverTimer.Start(); | ||||
|  | ||||
|   int k; | ||||
|   for (k = 1; k <= MaxIterations; k++) { | ||||
|  | ||||
|     //3. Z  = AD | ||||
|     MatrixTimer.Start(); | ||||
|     Linop.HermOp(D, Z);       | ||||
|     MatrixTimer.Stop(); | ||||
|     //std::cout << GridLogMessage << " norm2 Z " <<norm2(Z)<<std::endl; | ||||
|  | ||||
|     //4. M  = [D^dag Z]^{-1} | ||||
|     sliceInnerTimer.Start(); | ||||
|     sliceInnerProductMatrix(m_DZ,D,Z,Orthog); | ||||
|     sliceInnerTimer.Stop(); | ||||
|     m_M       = m_DZ.inverse(); | ||||
|     //std::cout << GridLogMessage << " m_DZ " <<m_DZ<<std::endl; | ||||
|      | ||||
|     //5. X  = X + D MC | ||||
|     m_tmp     = m_M * m_C; | ||||
|     sliceMaddTimer.Start(); | ||||
|     sliceMaddMatrix(X,m_tmp, D,X,Orthog);      | ||||
|     sliceMaddTimer.Stop(); | ||||
|  | ||||
|     //6. QS = Q - ZM | ||||
|     sliceMaddTimer.Start(); | ||||
|     sliceMaddMatrix(tmp,m_M,Z,Q,Orthog,-1.0); | ||||
|     sliceMaddTimer.Stop(); | ||||
|     QRTimer.Start(); | ||||
|     ThinQRfact (m_rr, m_S, m_Sinv, Q, tmp); | ||||
|     QRTimer.Stop(); | ||||
|      | ||||
|     //7. D  = Q + D S^dag | ||||
|     m_tmp = m_S.adjoint(); | ||||
|     sliceMaddTimer.Start(); | ||||
|     sliceMaddMatrix(D,m_tmp,D,Q,Orthog); | ||||
|     sliceMaddTimer.Stop(); | ||||
|  | ||||
|     //8. C  = S C | ||||
|     m_C = m_S*m_C; | ||||
|      | ||||
|     /********************* | ||||
|      * convergence monitor | ||||
|      ********************* | ||||
|      */ | ||||
|     m_rr = m_C.adjoint() * m_C; | ||||
|  | ||||
|     RealD max_resid=0; | ||||
|     RealD rrsum=0; | ||||
|     RealD rr; | ||||
|  | ||||
|     for(int b=0;b<Nblock;b++) { | ||||
|       rrsum+=real(m_rr(b,b)); | ||||
|       rr = real(m_rr(b,b))/ssq[b]; | ||||
|       if ( rr > max_resid ) max_resid = rr; | ||||
|     } | ||||
|  | ||||
|     std::cout << GridLogIterative << "\titeration "<<k<<" rr_sum "<<rrsum<<" ssq_sum "<< sssum | ||||
| 	      <<" ave "<<std::sqrt(rrsum/sssum) << " max "<< max_resid <<std::endl; | ||||
|  | ||||
|     if ( max_resid < Tolerance*Tolerance ) {  | ||||
|  | ||||
|       SolverTimer.Stop(); | ||||
|  | ||||
|       std::cout << GridLogMessage<<"BlockCGrQ converged in "<<k<<" iterations"<<std::endl; | ||||
|  | ||||
|       for(int b=0;b<Nblock;b++){ | ||||
| 	std::cout << GridLogMessage<< "\t\tblock "<<b<<" computed resid " | ||||
| 		  << std::sqrt(real(m_rr(b,b))/ssq[b])<<std::endl; | ||||
|       } | ||||
|       std::cout << GridLogMessage<<"\tMax residual is "<<std::sqrt(max_resid)<<std::endl; | ||||
|  | ||||
|       Linop.HermOp(X, AD); | ||||
|       AD = AD-B; | ||||
|       std::cout << GridLogMessage <<"\t True residual is " << std::sqrt(norm2(AD)/norm2(B)) <<std::endl; | ||||
|  | ||||
|       std::cout << GridLogMessage << "Time Breakdown "<<std::endl; | ||||
|       std::cout << GridLogMessage << "\tElapsed    " << SolverTimer.Elapsed()     <<std::endl; | ||||
|       std::cout << GridLogMessage << "\tMatrix     " << MatrixTimer.Elapsed()     <<std::endl; | ||||
|       std::cout << GridLogMessage << "\tInnerProd  " << sliceInnerTimer.Elapsed() <<std::endl; | ||||
|       std::cout << GridLogMessage << "\tMaddMatrix " << sliceMaddTimer.Elapsed()  <<std::endl; | ||||
|       std::cout << GridLogMessage << "\tThinQRfact " << QRTimer.Elapsed()  <<std::endl; | ||||
| 	     | ||||
|       IterationsToComplete = k; | ||||
|       return; | ||||
|     } | ||||
|  | ||||
|   } | ||||
|   std::cout << GridLogMessage << "BlockConjugateGradient(rQ) did NOT converge" << std::endl; | ||||
|  | ||||
|   if (ErrorOnNoConverge) assert(0); | ||||
|   IterationsToComplete = k; | ||||
| } | ||||
| ////////////////////////////////////////////////////////////////////////// | ||||
| // Block conjugate gradient; Original O'Leary Dimension zero should be the block direction | ||||
| ////////////////////////////////////////////////////////////////////////// | ||||
| void BlockCGsolve(LinearOperatorBase<Field> &Linop, const Field &Src, Field &Psi)  | ||||
| { | ||||
|   int Orthog = blockDim; // First dimension is block dim; this is an assumption | ||||
|   Nblock = Src._grid->_fdimensions[Orthog]; | ||||
|  | ||||
|   std::cout<<GridLogMessage<<" Block Conjugate Gradient : Orthog "<<Orthog<<" Nblock "<<Nblock<<std::endl; | ||||
| @@ -162,8 +425,9 @@ void operator()(LinearOperatorBase<Field> &Linop, const Field &Src, Field &Psi) | ||||
|      ********************* | ||||
|      */ | ||||
|     RealD max_resid=0; | ||||
|     RealD rr; | ||||
|     for(int b=0;b<Nblock;b++){ | ||||
|       RealD rr = real(m_rr(b,b))/ssq[b]; | ||||
|       rr = real(m_rr(b,b))/ssq[b]; | ||||
|       if ( rr > max_resid ) max_resid = rr; | ||||
|     } | ||||
|      | ||||
| @@ -173,13 +437,14 @@ void operator()(LinearOperatorBase<Field> &Linop, const Field &Src, Field &Psi) | ||||
|  | ||||
|       std::cout << GridLogMessage<<"BlockCG converged in "<<k<<" iterations"<<std::endl; | ||||
|       for(int b=0;b<Nblock;b++){ | ||||
| 	std::cout << GridLogMessage<< "\t\tblock "<<b<<" resid "<< std::sqrt(real(m_rr(b,b))/ssq[b])<<std::endl; | ||||
| 	std::cout << GridLogMessage<< "\t\tblock "<<b<<" computed resid " | ||||
| 		  << std::sqrt(real(m_rr(b,b))/ssq[b])<<std::endl; | ||||
|       } | ||||
|       std::cout << GridLogMessage<<"\tMax residual is "<<std::sqrt(max_resid)<<std::endl; | ||||
|  | ||||
|       Linop.HermOp(Psi, AP); | ||||
|       AP = AP-Src; | ||||
|       std::cout << GridLogMessage <<"\tTrue residual is " << std::sqrt(norm2(AP)/norm2(Src)) <<std::endl; | ||||
|       std::cout << GridLogMessage <<"\t True residual is " << std::sqrt(norm2(AP)/norm2(Src)) <<std::endl; | ||||
|  | ||||
|       std::cout << GridLogMessage << "Time Breakdown "<<std::endl; | ||||
|       std::cout << GridLogMessage << "\tElapsed    " << SolverTimer.Elapsed()     <<std::endl; | ||||
| @@ -197,35 +462,13 @@ void operator()(LinearOperatorBase<Field> &Linop, const Field &Src, Field &Psi) | ||||
|   if (ErrorOnNoConverge) assert(0); | ||||
|   IterationsToComplete = k; | ||||
| } | ||||
| }; | ||||
|  | ||||
|  | ||||
| ////////////////////////////////////////////////////////////////////////// | ||||
| // multiRHS conjugate gradient. Dimension zero should be the block direction | ||||
| // Use this for spread out across nodes | ||||
| ////////////////////////////////////////////////////////////////////////// | ||||
| template <class Field> | ||||
| class MultiRHSConjugateGradient : public OperatorFunction<Field> { | ||||
|  public: | ||||
|  | ||||
|   typedef typename Field::scalar_type scomplex; | ||||
|  | ||||
|   const int blockDim = 0; | ||||
|  | ||||
|   int Nblock; | ||||
|   bool ErrorOnNoConverge;  // throw an assert when the CG fails to converge. | ||||
|                            // Defaults true. | ||||
|   RealD Tolerance; | ||||
|   Integer MaxIterations; | ||||
|   Integer IterationsToComplete; //Number of iterations the CG took to finish. Filled in upon completion | ||||
|    | ||||
|    MultiRHSConjugateGradient(RealD tol, Integer maxit, bool err_on_no_conv = true) | ||||
|     : Tolerance(tol), | ||||
|     MaxIterations(maxit), | ||||
|     ErrorOnNoConverge(err_on_no_conv){}; | ||||
|  | ||||
| void operator()(LinearOperatorBase<Field> &Linop, const Field &Src, Field &Psi)  | ||||
| void CGmultiRHSsolve(LinearOperatorBase<Field> &Linop, const Field &Src, Field &Psi)  | ||||
| { | ||||
|   int Orthog = 0; // First dimension is block dim | ||||
|   int Orthog = blockDim; // First dimension is block dim | ||||
|   Nblock = Src._grid->_fdimensions[Orthog]; | ||||
|  | ||||
|   std::cout<<GridLogMessage<<"MultiRHS Conjugate Gradient : Orthog "<<Orthog<<" Nblock "<<Nblock<<std::endl; | ||||
| @@ -285,12 +528,10 @@ void operator()(LinearOperatorBase<Field> &Linop, const Field &Src, Field &Psi) | ||||
|     MatrixTimer.Stop(); | ||||
|  | ||||
|     // Alpha | ||||
|     //    sliceInnerProductVectorTest(v_pAp_test,P,AP,Orthog); | ||||
|     sliceInnerTimer.Start(); | ||||
|     sliceInnerProductVector(v_pAp,P,AP,Orthog); | ||||
|     sliceInnerTimer.Stop(); | ||||
|     for(int b=0;b<Nblock;b++){ | ||||
|       //      std::cout << " "<< v_pAp[b]<<" "<< v_pAp_test[b]<<std::endl; | ||||
|       v_alpha[b] = v_rr[b]/real(v_pAp[b]); | ||||
|     } | ||||
|  | ||||
| @@ -332,7 +573,7 @@ void operator()(LinearOperatorBase<Field> &Linop, const Field &Src, Field &Psi) | ||||
|  | ||||
|       std::cout << GridLogMessage<<"MultiRHS solver converged in " <<k<<" iterations"<<std::endl; | ||||
|       for(int b=0;b<Nblock;b++){ | ||||
| 	std::cout << GridLogMessage<< "\t\tBlock "<<b<<" resid "<< std::sqrt(v_rr[b]/ssq[b])<<std::endl; | ||||
| 	std::cout << GridLogMessage<< "\t\tBlock "<<b<<" computed resid "<< std::sqrt(v_rr[b]/ssq[b])<<std::endl; | ||||
|       } | ||||
|       std::cout << GridLogMessage<<"\tMax residual is "<<std::sqrt(max_resid)<<std::endl; | ||||
|  | ||||
| @@ -358,9 +599,8 @@ void operator()(LinearOperatorBase<Field> &Linop, const Field &Src, Field &Psi) | ||||
|   if (ErrorOnNoConverge) assert(0); | ||||
|   IterationsToComplete = k; | ||||
| } | ||||
|  | ||||
| }; | ||||
|  | ||||
|  | ||||
|  | ||||
| } | ||||
| #endif | ||||
|   | ||||
| @@ -52,8 +52,8 @@ class ConjugateGradient : public OperatorFunction<Field> { | ||||
|         MaxIterations(maxit), | ||||
|         ErrorOnNoConverge(err_on_no_conv){}; | ||||
|  | ||||
|   void operator()(LinearOperatorBase<Field> &Linop, const Field &src, | ||||
|                   Field &psi) { | ||||
|   void operator()(LinearOperatorBase<Field> &Linop, const Field &src, Field &psi) { | ||||
|  | ||||
|     psi.checkerboard = src.checkerboard; | ||||
|     conformable(psi, src); | ||||
|  | ||||
| @@ -78,12 +78,12 @@ class ConjugateGradient : public OperatorFunction<Field> { | ||||
|     cp = a; | ||||
|     ssq = norm2(src); | ||||
|  | ||||
|     std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradient: guess " << guess << std::endl; | ||||
|     std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradient:   src " << ssq << std::endl; | ||||
|     std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradient:    mp " << d << std::endl; | ||||
|     std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradient:   mmp " << b << std::endl; | ||||
|     std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradient:  cp,r " << cp << std::endl; | ||||
|     std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradient:     p " << a << std::endl; | ||||
|     std::cout << GridLogIterative << std::setprecision(8) << "ConjugateGradient: guess " << guess << std::endl; | ||||
|     std::cout << GridLogIterative << std::setprecision(8) << "ConjugateGradient:   src " << ssq << std::endl; | ||||
|     std::cout << GridLogIterative << std::setprecision(8) << "ConjugateGradient:    mp " << d << std::endl; | ||||
|     std::cout << GridLogIterative << std::setprecision(8) << "ConjugateGradient:   mmp " << b << std::endl; | ||||
|     std::cout << GridLogIterative << std::setprecision(8) << "ConjugateGradient:  cp,r " << cp << std::endl; | ||||
|     std::cout << GridLogIterative << std::setprecision(8) << "ConjugateGradient:     p " << a << std::endl; | ||||
|  | ||||
|     RealD rsq = Tolerance * Tolerance * ssq; | ||||
|  | ||||
| @@ -92,7 +92,7 @@ class ConjugateGradient : public OperatorFunction<Field> { | ||||
|       return; | ||||
|     } | ||||
|  | ||||
|     std::cout << GridLogIterative << std::setprecision(4) | ||||
|     std::cout << GridLogIterative << std::setprecision(8) | ||||
|               << "ConjugateGradient: k=0 residual " << cp << " target " << rsq << std::endl; | ||||
|  | ||||
|     GridStopWatch LinalgTimer; | ||||
|   | ||||
							
								
								
									
										256
									
								
								lib/algorithms/iterative/ConjugateGradientReliableUpdate.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										256
									
								
								lib/algorithms/iterative/ConjugateGradientReliableUpdate.h
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,256 @@ | ||||
|     /************************************************************************************* | ||||
|  | ||||
|     Grid physics library, www.github.com/paboyle/Grid  | ||||
|  | ||||
|     Source file: ./lib/algorithms/iterative/ConjugateGradientReliableUpdate.h | ||||
|  | ||||
|     Copyright (C) 2015 | ||||
|  | ||||
| Author: Christopher Kelly <ckelly@phys.columbia.edu> | ||||
|  | ||||
|     This program is free software; you can redistribute it and/or modify | ||||
|     it under the terms of the GNU General Public License as published by | ||||
|     the Free Software Foundation; either version 2 of the License, or | ||||
|     (at your option) any later version. | ||||
|  | ||||
|     This program is distributed in the hope that it will be useful, | ||||
|     but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
|     GNU General Public License for more details. | ||||
|  | ||||
|     You should have received a copy of the GNU General Public License along | ||||
|     with this program; if not, write to the Free Software Foundation, Inc., | ||||
|     51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||
|  | ||||
|     See the full license in the file "LICENSE" in the top level distribution directory | ||||
|     *************************************************************************************/ | ||||
|     /*  END LEGAL */ | ||||
| #ifndef GRID_CONJUGATE_GRADIENT_RELIABLE_UPDATE_H | ||||
| #define GRID_CONJUGATE_GRADIENT_RELIABLE_UPDATE_H | ||||
|  | ||||
| namespace Grid { | ||||
|  | ||||
|   template<class FieldD,class FieldF, typename std::enable_if< getPrecision<FieldD>::value == 2, int>::type = 0,typename std::enable_if< getPrecision<FieldF>::value == 1, int>::type = 0>  | ||||
|   class ConjugateGradientReliableUpdate : public LinearFunction<FieldD> { | ||||
|   public: | ||||
|     bool ErrorOnNoConverge;  // throw an assert when the CG fails to converge. | ||||
|     // Defaults true. | ||||
|     RealD Tolerance; | ||||
|     Integer MaxIterations; | ||||
|     Integer IterationsToComplete; //Number of iterations the CG took to finish. Filled in upon completion | ||||
|     Integer ReliableUpdatesPerformed; | ||||
|  | ||||
|     bool DoFinalCleanup; //Final DP cleanup, defaults to true | ||||
|     Integer IterationsToCleanup; //Final DP cleanup step iterations | ||||
|      | ||||
|     LinearOperatorBase<FieldF> &Linop_f; | ||||
|     LinearOperatorBase<FieldD> &Linop_d; | ||||
|     GridBase* SinglePrecGrid; | ||||
|     RealD Delta; //reliable update parameter | ||||
|  | ||||
|     //Optional ability to switch to a different linear operator once the tolerance reaches a certain point. Useful for single/half -> single/single | ||||
|     LinearOperatorBase<FieldF> *Linop_fallback; | ||||
|     RealD fallback_transition_tol; | ||||
|  | ||||
|      | ||||
|     ConjugateGradientReliableUpdate(RealD tol, Integer maxit, RealD _delta, GridBase* _sp_grid, LinearOperatorBase<FieldF> &_Linop_f, LinearOperatorBase<FieldD> &_Linop_d, bool err_on_no_conv = true) | ||||
|       : Tolerance(tol), | ||||
|         MaxIterations(maxit), | ||||
| 	Delta(_delta), | ||||
| 	Linop_f(_Linop_f), | ||||
| 	Linop_d(_Linop_d), | ||||
| 	SinglePrecGrid(_sp_grid), | ||||
|         ErrorOnNoConverge(err_on_no_conv), | ||||
| 	DoFinalCleanup(true), | ||||
| 	Linop_fallback(NULL) | ||||
|     {}; | ||||
|  | ||||
|     void setFallbackLinop(LinearOperatorBase<FieldF> &_Linop_fallback, const RealD _fallback_transition_tol){ | ||||
|       Linop_fallback = &_Linop_fallback; | ||||
|       fallback_transition_tol = _fallback_transition_tol;       | ||||
|     } | ||||
|      | ||||
|     void operator()(const FieldD &src, FieldD &psi) { | ||||
|       LinearOperatorBase<FieldF> *Linop_f_use = &Linop_f; | ||||
|       bool using_fallback = false; | ||||
|        | ||||
|       psi.checkerboard = src.checkerboard; | ||||
|       conformable(psi, src); | ||||
|  | ||||
|       RealD cp, c, a, d, b, ssq, qq, b_pred; | ||||
|  | ||||
|       FieldD p(src); | ||||
|       FieldD mmp(src); | ||||
|       FieldD r(src); | ||||
|  | ||||
|       // Initial residual computation & set up | ||||
|       RealD guess = norm2(psi); | ||||
|       assert(std::isnan(guess) == 0); | ||||
|      | ||||
|       Linop_d.HermOpAndNorm(psi, mmp, d, b); | ||||
|      | ||||
|       r = src - mmp; | ||||
|       p = r; | ||||
|  | ||||
|       a = norm2(p); | ||||
|       cp = a; | ||||
|       ssq = norm2(src); | ||||
|  | ||||
|       std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradientReliableUpdate: guess " << guess << std::endl; | ||||
|       std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradientReliableUpdate:   src " << ssq << std::endl; | ||||
|       std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradientReliableUpdate:    mp " << d << std::endl; | ||||
|       std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradientReliableUpdate:   mmp " << b << std::endl; | ||||
|       std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradientReliableUpdate:  cp,r " << cp << std::endl; | ||||
|       std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradientReliableUpdate:     p " << a << std::endl; | ||||
|  | ||||
|       RealD rsq = Tolerance * Tolerance * ssq; | ||||
|  | ||||
|       // Check if guess is really REALLY good :) | ||||
|       if (cp <= rsq) { | ||||
| 	std::cout << GridLogMessage << "ConjugateGradientReliableUpdate guess was REALLY good\n"; | ||||
| 	std::cout << GridLogMessage << "\tComputed residual " << sqrt(cp / ssq)<<std::endl; | ||||
| 	return; | ||||
|       } | ||||
|  | ||||
|       //Single prec initialization | ||||
|       FieldF r_f(SinglePrecGrid); | ||||
|       r_f.checkerboard = r.checkerboard; | ||||
|       precisionChange(r_f, r); | ||||
|  | ||||
|       FieldF psi_f(r_f); | ||||
|       psi_f = zero; | ||||
|  | ||||
|       FieldF p_f(r_f); | ||||
|       FieldF mmp_f(r_f); | ||||
|  | ||||
|       RealD MaxResidSinceLastRelUp = cp; //initial residual     | ||||
|      | ||||
|       std::cout << GridLogIterative << std::setprecision(4) | ||||
| 		<< "ConjugateGradient: k=0 residual " << cp << " target " << rsq << std::endl; | ||||
|  | ||||
|       GridStopWatch LinalgTimer; | ||||
|       GridStopWatch MatrixTimer; | ||||
|       GridStopWatch SolverTimer; | ||||
|  | ||||
|       SolverTimer.Start(); | ||||
|       int k = 0; | ||||
|       int l = 0; | ||||
|      | ||||
|       for (k = 1; k <= MaxIterations; k++) { | ||||
| 	c = cp; | ||||
|  | ||||
| 	MatrixTimer.Start(); | ||||
| 	Linop_f_use->HermOpAndNorm(p_f, mmp_f, d, qq); | ||||
| 	MatrixTimer.Stop(); | ||||
|  | ||||
| 	LinalgTimer.Start(); | ||||
|  | ||||
| 	a = c / d; | ||||
| 	b_pred = a * (a * qq - d) / c; | ||||
|  | ||||
| 	cp = axpy_norm(r_f, -a, mmp_f, r_f); | ||||
| 	b = cp / c; | ||||
|  | ||||
| 	// Fuse these loops ; should be really easy | ||||
| 	psi_f = a * p_f + psi_f; | ||||
| 	//p_f = p_f * b + r_f; | ||||
|  | ||||
| 	LinalgTimer.Stop(); | ||||
|  | ||||
| 	std::cout << GridLogIterative << "ConjugateGradientReliableUpdate: Iteration " << k | ||||
| 		  << " residual " << cp << " target " << rsq << std::endl; | ||||
| 	std::cout << GridLogDebug << "a = "<< a << " b_pred = "<< b_pred << "  b = "<< b << std::endl; | ||||
| 	std::cout << GridLogDebug << "qq = "<< qq << " d = "<< d << "  c = "<< c << std::endl; | ||||
|  | ||||
| 	if(cp > MaxResidSinceLastRelUp){ | ||||
| 	  std::cout << GridLogIterative << "ConjugateGradientReliableUpdate: updating MaxResidSinceLastRelUp : " << MaxResidSinceLastRelUp << " -> " << cp << std::endl; | ||||
| 	  MaxResidSinceLastRelUp = cp; | ||||
| 	} | ||||
| 	   | ||||
| 	// Stopping condition | ||||
| 	if (cp <= rsq) { | ||||
| 	  //Although not written in the paper, I assume that I have to add on the final solution | ||||
| 	  precisionChange(mmp, psi_f); | ||||
| 	  psi = psi + mmp; | ||||
| 	 | ||||
| 	 | ||||
| 	  SolverTimer.Stop(); | ||||
| 	  Linop_d.HermOpAndNorm(psi, mmp, d, qq); | ||||
| 	  p = mmp - src; | ||||
|  | ||||
| 	  RealD srcnorm = sqrt(norm2(src)); | ||||
| 	  RealD resnorm = sqrt(norm2(p)); | ||||
| 	  RealD true_residual = resnorm / srcnorm; | ||||
|  | ||||
| 	  std::cout << GridLogMessage << "ConjugateGradientReliableUpdate Converged on iteration " << k << " after " << l << " reliable updates" << std::endl; | ||||
| 	  std::cout << GridLogMessage << "\tComputed residual " << sqrt(cp / ssq)<<std::endl; | ||||
| 	  std::cout << GridLogMessage << "\tTrue residual " << true_residual<<std::endl; | ||||
| 	  std::cout << GridLogMessage << "\tTarget " << Tolerance << std::endl; | ||||
|  | ||||
| 	  std::cout << GridLogMessage << "Time breakdown "<<std::endl; | ||||
| 	  std::cout << GridLogMessage << "\tElapsed    " << SolverTimer.Elapsed() <<std::endl; | ||||
| 	  std::cout << GridLogMessage << "\tMatrix     " << MatrixTimer.Elapsed() <<std::endl; | ||||
| 	  std::cout << GridLogMessage << "\tLinalg     " << LinalgTimer.Elapsed() <<std::endl; | ||||
|  | ||||
| 	  IterationsToComplete = k;	 | ||||
| 	  ReliableUpdatesPerformed = l; | ||||
| 	   | ||||
| 	  if(DoFinalCleanup){ | ||||
| 	    //Do a final CG to cleanup | ||||
| 	    std::cout << GridLogMessage << "ConjugateGradientReliableUpdate performing final cleanup.\n"; | ||||
| 	    ConjugateGradient<FieldD> CG(Tolerance,MaxIterations); | ||||
| 	    CG.ErrorOnNoConverge = ErrorOnNoConverge; | ||||
| 	    CG(Linop_d,src,psi); | ||||
| 	    IterationsToCleanup = CG.IterationsToComplete; | ||||
| 	  } | ||||
| 	  else if (ErrorOnNoConverge) assert(true_residual / Tolerance < 10000.0); | ||||
|  | ||||
| 	  std::cout << GridLogMessage << "ConjugateGradientReliableUpdate complete.\n"; | ||||
| 	  return; | ||||
| 	} | ||||
| 	else if(cp < Delta * MaxResidSinceLastRelUp) { //reliable update | ||||
| 	  std::cout << GridLogMessage << "ConjugateGradientReliableUpdate " | ||||
| 		    << cp << "(residual) < " << Delta << "(Delta) * " << MaxResidSinceLastRelUp << "(MaxResidSinceLastRelUp) on iteration " << k << " : performing reliable update\n"; | ||||
| 	  precisionChange(mmp, psi_f); | ||||
| 	  psi = psi + mmp; | ||||
|  | ||||
| 	  Linop_d.HermOpAndNorm(psi, mmp, d, qq); | ||||
| 	  r = src - mmp; | ||||
|  | ||||
| 	  psi_f = zero; | ||||
| 	  precisionChange(r_f, r); | ||||
| 	  cp = norm2(r); | ||||
| 	  MaxResidSinceLastRelUp = cp; | ||||
|  | ||||
| 	  b = cp/c; | ||||
| 	   | ||||
| 	  std::cout << GridLogMessage << "ConjugateGradientReliableUpdate new residual " << cp << std::endl; | ||||
| 	   | ||||
| 	  l = l+1; | ||||
| 	} | ||||
|  | ||||
| 	p_f = p_f * b + r_f; //update search vector after reliable update appears to help convergence | ||||
|  | ||||
| 	if(!using_fallback && Linop_fallback != NULL && cp < fallback_transition_tol){ | ||||
| 	  std::cout << GridLogMessage << "ConjugateGradientReliableUpdate switching to fallback linear operator on iteration " << k << " at residual " << cp << std::endl; | ||||
| 	  Linop_f_use = Linop_fallback; | ||||
| 	  using_fallback = true; | ||||
| 	} | ||||
|  | ||||
| 	 | ||||
|       } | ||||
|       std::cout << GridLogMessage << "ConjugateGradientReliableUpdate did NOT converge" | ||||
| 		<< std::endl; | ||||
|        | ||||
|       if (ErrorOnNoConverge) assert(0); | ||||
|       IterationsToComplete = k; | ||||
|       ReliableUpdatesPerformed = l;       | ||||
|     }     | ||||
|   }; | ||||
|  | ||||
|  | ||||
| }; | ||||
|  | ||||
|  | ||||
|  | ||||
| #endif | ||||
| @@ -1,81 +0,0 @@ | ||||
|     /************************************************************************************* | ||||
|  | ||||
|     Grid physics library, www.github.com/paboyle/Grid  | ||||
|  | ||||
|     Source file: ./lib/algorithms/iterative/EigenSort.h | ||||
|  | ||||
|     Copyright (C) 2015 | ||||
|  | ||||
| Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||
|  | ||||
|     This program is free software; you can redistribute it and/or modify | ||||
|     it under the terms of the GNU General Public License as published by | ||||
|     the Free Software Foundation; either version 2 of the License, or | ||||
|     (at your option) any later version. | ||||
|  | ||||
|     This program is distributed in the hope that it will be useful, | ||||
|     but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
|     GNU General Public License for more details. | ||||
|  | ||||
|     You should have received a copy of the GNU General Public License along | ||||
|     with this program; if not, write to the Free Software Foundation, Inc., | ||||
|     51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||
|  | ||||
|     See the full license in the file "LICENSE" in the top level distribution directory | ||||
|     *************************************************************************************/ | ||||
|     /*  END LEGAL */ | ||||
| #ifndef GRID_EIGENSORT_H | ||||
| #define GRID_EIGENSORT_H | ||||
|  | ||||
|  | ||||
| namespace Grid { | ||||
|     ///////////////////////////////////////////////////////////// | ||||
|     // Eigen sorter to begin with | ||||
|     ///////////////////////////////////////////////////////////// | ||||
|  | ||||
| template<class Field> | ||||
| class SortEigen { | ||||
|  private: | ||||
|    | ||||
| //hacking for testing for now | ||||
|  private: | ||||
|   static bool less_lmd(RealD left,RealD right){ | ||||
|     return left > right; | ||||
|   }   | ||||
|   static bool less_pair(std::pair<RealD,Field const*>& left, | ||||
|                         std::pair<RealD,Field const*>& right){ | ||||
|     return left.first > (right.first); | ||||
|   }   | ||||
|    | ||||
|    | ||||
|  public: | ||||
|  | ||||
|   void push(DenseVector<RealD>& lmd, | ||||
|             DenseVector<Field>& evec,int N) { | ||||
|     DenseVector<Field> cpy(lmd.size(),evec[0]._grid); | ||||
|     for(int i=0;i<lmd.size();i++) cpy[i] = evec[i]; | ||||
|      | ||||
|     DenseVector<std::pair<RealD, Field const*> > emod(lmd.size());     | ||||
|     for(int i=0;i<lmd.size();++i) | ||||
|       emod[i] = std::pair<RealD,Field const*>(lmd[i],&cpy[i]); | ||||
|  | ||||
|     partial_sort(emod.begin(),emod.begin()+N,emod.end(),less_pair); | ||||
|  | ||||
|     typename DenseVector<std::pair<RealD, Field const*> >::iterator it = emod.begin(); | ||||
|     for(int i=0;i<N;++i){ | ||||
|       lmd[i]=it->first; | ||||
|       evec[i]=*(it->second); | ||||
|       ++it; | ||||
|     } | ||||
|   } | ||||
|   void push(DenseVector<RealD>& lmd,int N) { | ||||
|     std::partial_sort(lmd.begin(),lmd.begin()+N,lmd.end(),less_lmd); | ||||
|   } | ||||
|   bool saturated(RealD lmd, RealD thrs) { | ||||
|     return fabs(lmd) > fabs(thrs); | ||||
|   } | ||||
| }; | ||||
|  | ||||
| } | ||||
| #endif | ||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										352
									
								
								lib/algorithms/iterative/LocalCoherenceLanczos.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										352
									
								
								lib/algorithms/iterative/LocalCoherenceLanczos.h
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,352 @@ | ||||
|     /************************************************************************************* | ||||
|  | ||||
|     Grid physics library, www.github.com/paboyle/Grid  | ||||
|  | ||||
|     Source file: ./lib/algorithms/iterative/LocalCoherenceLanczos.h | ||||
|  | ||||
|     Copyright (C) 2015 | ||||
|  | ||||
| Author: Christoph Lehner <clehner@bnl.gov> | ||||
| Author: paboyle <paboyle@ph.ed.ac.uk> | ||||
|  | ||||
|     This program is free software; you can redistribute it and/or modify | ||||
|     it under the terms of the GNU General Public License as published by | ||||
|     the Free Software Foundation; either version 2 of the License, or | ||||
|     (at your option) any later version. | ||||
|  | ||||
|     This program is distributed in the hope that it will be useful, | ||||
|     but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
|     GNU General Public License for more details. | ||||
|  | ||||
|     You should have received a copy of the GNU General Public License along | ||||
|     with this program; if not, write to the Free Software Foundation, Inc., | ||||
|     51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||
|  | ||||
|     See the full license in the file "LICENSE" in the top level distribution directory | ||||
|     *************************************************************************************/ | ||||
|     /*  END LEGAL */ | ||||
| #ifndef GRID_LOCAL_COHERENCE_IRL_H | ||||
| #define GRID_LOCAL_COHERENCE_IRL_H | ||||
| namespace Grid {  | ||||
| struct LanczosParams : Serializable { | ||||
|  public: | ||||
|   GRID_SERIALIZABLE_CLASS_MEMBERS(LanczosParams, | ||||
| 				  ChebyParams, Cheby,/*Chebyshev*/ | ||||
| 				  int, Nstop,    /*Vecs in Lanczos must converge Nstop < Nk < Nm*/ | ||||
| 				  int, Nk,       /*Vecs in Lanczos seek converge*/ | ||||
| 				  int, Nm,       /*Total vecs in Lanczos include restart*/ | ||||
| 				  RealD, resid,  /*residual*/ | ||||
|  				  int, MaxIt,  | ||||
| 				  RealD, betastp,  /* ? */ | ||||
| 				  int, MinRes);    // Must restart | ||||
| }; | ||||
|  | ||||
| struct LocalCoherenceLanczosParams : Serializable { | ||||
|  public: | ||||
|   GRID_SERIALIZABLE_CLASS_MEMBERS(LocalCoherenceLanczosParams, | ||||
| 				  bool, doFine, | ||||
| 				  bool, doFineRead, | ||||
| 				  bool, doCoarse, | ||||
| 	       			  bool, doCoarseRead, | ||||
| 				  LanczosParams, FineParams, | ||||
| 				  LanczosParams, CoarseParams, | ||||
| 				  ChebyParams,   Smoother, | ||||
| 				  RealD        , coarse_relax_tol, | ||||
| 				  std::vector<int>, blockSize, | ||||
| 				  std::string, config, | ||||
| 				  std::vector < std::complex<double>  >, omega, | ||||
| 				  RealD, mass, | ||||
| 				  RealD, M5); | ||||
| }; | ||||
|  | ||||
| // Duplicate functionality; ProjectedFunctionHermOp could be used with the trivial function | ||||
| template<class Fobj,class CComplex,int nbasis> | ||||
| class ProjectedHermOp : public LinearFunction<Lattice<iVector<CComplex,nbasis > > > { | ||||
| public: | ||||
|   typedef iVector<CComplex,nbasis >           CoarseSiteVector; | ||||
|   typedef Lattice<CoarseSiteVector>           CoarseField; | ||||
|   typedef Lattice<CComplex>   CoarseScalar; // used for inner products on fine field | ||||
|   typedef Lattice<Fobj>          FineField; | ||||
|  | ||||
|   LinearOperatorBase<FineField> &_Linop; | ||||
|   Aggregation<Fobj,CComplex,nbasis> &_Aggregate; | ||||
|  | ||||
|   ProjectedHermOp(LinearOperatorBase<FineField>& linop,  Aggregation<Fobj,CComplex,nbasis> &aggregate) :  | ||||
|     _Linop(linop), | ||||
|     _Aggregate(aggregate)  {  }; | ||||
|  | ||||
|   void operator()(const CoarseField& in, CoarseField& out) { | ||||
|  | ||||
|     GridBase *FineGrid = _Aggregate.FineGrid; | ||||
|     FineField fin(FineGrid); | ||||
|     FineField fout(FineGrid); | ||||
|  | ||||
|     _Aggregate.PromoteFromSubspace(in,fin);    std::cout<<GridLogIRL<<"ProjectedHermop : Promote to fine"<<std::endl; | ||||
|     _Linop.HermOp(fin,fout);                   std::cout<<GridLogIRL<<"ProjectedHermop : HermOp (fine) "<<std::endl; | ||||
|     _Aggregate.ProjectToSubspace(out,fout);    std::cout<<GridLogIRL<<"ProjectedHermop : Project to coarse "<<std::endl; | ||||
|   } | ||||
| }; | ||||
|  | ||||
| template<class Fobj,class CComplex,int nbasis> | ||||
| class ProjectedFunctionHermOp : public LinearFunction<Lattice<iVector<CComplex,nbasis > > > { | ||||
| public: | ||||
|   typedef iVector<CComplex,nbasis >           CoarseSiteVector; | ||||
|   typedef Lattice<CoarseSiteVector>           CoarseField; | ||||
|   typedef Lattice<CComplex>   CoarseScalar; // used for inner products on fine field | ||||
|   typedef Lattice<Fobj>          FineField; | ||||
|  | ||||
|  | ||||
|   OperatorFunction<FineField>   & _poly; | ||||
|   LinearOperatorBase<FineField> &_Linop; | ||||
|   Aggregation<Fobj,CComplex,nbasis> &_Aggregate; | ||||
|  | ||||
|   ProjectedFunctionHermOp(OperatorFunction<FineField> & poly,LinearOperatorBase<FineField>& linop,  | ||||
| 			  Aggregation<Fobj,CComplex,nbasis> &aggregate) :  | ||||
|     _poly(poly), | ||||
|     _Linop(linop), | ||||
|     _Aggregate(aggregate)  {  }; | ||||
|  | ||||
|   void operator()(const CoarseField& in, CoarseField& out) { | ||||
|  | ||||
|     GridBase *FineGrid = _Aggregate.FineGrid; | ||||
|  | ||||
|     FineField fin(FineGrid) ;fin.checkerboard  =_Aggregate.checkerboard; | ||||
|     FineField fout(FineGrid);fout.checkerboard =_Aggregate.checkerboard; | ||||
|      | ||||
|     _Aggregate.PromoteFromSubspace(in,fin);    std::cout<<GridLogIRL<<"ProjectedFunctionHermop : Promote to fine"<<std::endl; | ||||
|     _poly(_Linop,fin,fout);                    std::cout<<GridLogIRL<<"ProjectedFunctionHermop : Poly "<<std::endl; | ||||
|     _Aggregate.ProjectToSubspace(out,fout);    std::cout<<GridLogIRL<<"ProjectedFunctionHermop : Project to coarse "<<std::endl; | ||||
|   } | ||||
| }; | ||||
|  | ||||
| template<class Fobj,class CComplex,int nbasis> | ||||
| class ImplicitlyRestartedLanczosSmoothedTester  : public ImplicitlyRestartedLanczosTester<Lattice<iVector<CComplex,nbasis > > > | ||||
| { | ||||
|  public: | ||||
|   typedef iVector<CComplex,nbasis >           CoarseSiteVector; | ||||
|   typedef Lattice<CoarseSiteVector>           CoarseField; | ||||
|   typedef Lattice<CComplex>   CoarseScalar; // used for inner products on fine field | ||||
|   typedef Lattice<Fobj>          FineField; | ||||
|  | ||||
|   LinearFunction<CoarseField> & _Poly; | ||||
|   OperatorFunction<FineField>   & _smoother; | ||||
|   LinearOperatorBase<FineField> &_Linop; | ||||
|   Aggregation<Fobj,CComplex,nbasis> &_Aggregate; | ||||
|   RealD                             _coarse_relax_tol; | ||||
|   ImplicitlyRestartedLanczosSmoothedTester(LinearFunction<CoarseField>   &Poly, | ||||
| 					   OperatorFunction<FineField>   &smoother, | ||||
| 					   LinearOperatorBase<FineField> &Linop, | ||||
| 					   Aggregation<Fobj,CComplex,nbasis> &Aggregate, | ||||
| 					   RealD coarse_relax_tol=5.0e3)  | ||||
|     : _smoother(smoother), _Linop(Linop),_Aggregate(Aggregate), _Poly(Poly), _coarse_relax_tol(coarse_relax_tol)  {    }; | ||||
|  | ||||
|   int TestConvergence(int j,RealD eresid,CoarseField &B, RealD &eval,RealD evalMaxApprox) | ||||
|   { | ||||
|     CoarseField v(B); | ||||
|     RealD eval_poly = eval; | ||||
|     // Apply operator | ||||
|     _Poly(B,v); | ||||
|  | ||||
|     RealD vnum = real(innerProduct(B,v)); // HermOp. | ||||
|     RealD vden = norm2(B); | ||||
|     RealD vv0  = norm2(v); | ||||
|     eval   = vnum/vden; | ||||
|     v -= eval*B; | ||||
|  | ||||
|     RealD vv = norm2(v) / ::pow(evalMaxApprox,2.0); | ||||
|  | ||||
|     std::cout.precision(13); | ||||
|     std::cout<<GridLogIRL  << "[" << std::setw(3)<<j<<"] " | ||||
| 	     <<"eval = "<<std::setw(25)<< eval << " (" << eval_poly << ")" | ||||
| 	     <<" |H B[i] - eval[i]B[i]|^2 / evalMaxApprox^2 " << std::setw(25) << vv | ||||
| 	     <<std::endl; | ||||
|  | ||||
|     int conv=0; | ||||
|     if( (vv<eresid*eresid) ) conv = 1; | ||||
|     return conv; | ||||
|   } | ||||
|   int ReconstructEval(int j,RealD eresid,CoarseField &B, RealD &eval,RealD evalMaxApprox) | ||||
|   { | ||||
|     GridBase *FineGrid = _Aggregate.FineGrid; | ||||
|  | ||||
|     int checkerboard   = _Aggregate.checkerboard; | ||||
|  | ||||
|     FineField fB(FineGrid);fB.checkerboard =checkerboard; | ||||
|     FineField fv(FineGrid);fv.checkerboard =checkerboard; | ||||
|  | ||||
|     _Aggregate.PromoteFromSubspace(B,fv); | ||||
|     _smoother(_Linop,fv,fB);  | ||||
|  | ||||
|     RealD eval_poly = eval; | ||||
|     _Linop.HermOp(fB,fv); | ||||
|  | ||||
|     RealD vnum = real(innerProduct(fB,fv)); // HermOp. | ||||
|     RealD vden = norm2(fB); | ||||
|     RealD vv0  = norm2(fv); | ||||
|     eval   = vnum/vden; | ||||
|     fv -= eval*fB; | ||||
|     RealD vv = norm2(fv) / ::pow(evalMaxApprox,2.0); | ||||
|  | ||||
|     std::cout.precision(13); | ||||
|     std::cout<<GridLogIRL  << "[" << std::setw(3)<<j<<"] " | ||||
| 	     <<"eval = "<<std::setw(25)<< eval << " (" << eval_poly << ")" | ||||
| 	     <<" |H B[i] - eval[i]B[i]|^2 / evalMaxApprox^2 " << std::setw(25) << vv | ||||
| 	     <<std::endl; | ||||
|     if ( j > nbasis ) eresid = eresid*_coarse_relax_tol; | ||||
|     if( (vv<eresid*eresid) ) return 1; | ||||
|     return 0; | ||||
|   } | ||||
| }; | ||||
|  | ||||
| //////////////////////////////////////////// | ||||
| // Make serializable Lanczos params | ||||
| //////////////////////////////////////////// | ||||
| template<class Fobj,class CComplex,int nbasis> | ||||
| class LocalCoherenceLanczos  | ||||
| { | ||||
| public: | ||||
|   typedef iVector<CComplex,nbasis >           CoarseSiteVector; | ||||
|   typedef Lattice<CComplex>                   CoarseScalar; // used for inner products on fine field | ||||
|   typedef Lattice<CoarseSiteVector>           CoarseField; | ||||
|   typedef Lattice<Fobj>                       FineField; | ||||
|  | ||||
| protected: | ||||
|   GridBase *_CoarseGrid; | ||||
|   GridBase *_FineGrid; | ||||
|   int _checkerboard; | ||||
|   LinearOperatorBase<FineField>                 & _FineOp; | ||||
|    | ||||
|   // FIXME replace Aggregation with vector of fine; the code reuse is too small for | ||||
|   // the hassle and complexity of cross coupling. | ||||
|   Aggregation<Fobj,CComplex,nbasis>               _Aggregate;   | ||||
|   std::vector<RealD>                              evals_fine; | ||||
|   std::vector<RealD>                              evals_coarse;  | ||||
|   std::vector<CoarseField>                        evec_coarse; | ||||
| public: | ||||
|   LocalCoherenceLanczos(GridBase *FineGrid, | ||||
| 		GridBase *CoarseGrid, | ||||
| 		LinearOperatorBase<FineField> &FineOp, | ||||
| 		int checkerboard) : | ||||
|     _CoarseGrid(CoarseGrid), | ||||
|     _FineGrid(FineGrid), | ||||
|     _Aggregate(CoarseGrid,FineGrid,checkerboard), | ||||
|     _FineOp(FineOp), | ||||
|     _checkerboard(checkerboard) | ||||
|   { | ||||
|     evals_fine.resize(0); | ||||
|     evals_coarse.resize(0); | ||||
|   }; | ||||
|   void Orthogonalise(void ) { _Aggregate.Orthogonalise(); } | ||||
|  | ||||
|   template<typename T>  static RealD normalise(T& v)  | ||||
|   { | ||||
|     RealD nn = norm2(v); | ||||
|     nn = ::sqrt(nn); | ||||
|     v = v * (1.0/nn); | ||||
|     return nn; | ||||
|   } | ||||
|  | ||||
|   void fakeFine(void) | ||||
|   { | ||||
|     int Nk = nbasis; | ||||
|     _Aggregate.subspace.resize(Nk,_FineGrid); | ||||
|     _Aggregate.subspace[0]=1.0; | ||||
|     _Aggregate.subspace[0].checkerboard=_checkerboard; | ||||
|     normalise(_Aggregate.subspace[0]); | ||||
|     PlainHermOp<FineField>    Op(_FineOp); | ||||
|     for(int k=1;k<Nk;k++){ | ||||
|       _Aggregate.subspace[k].checkerboard=_checkerboard; | ||||
|       Op(_Aggregate.subspace[k-1],_Aggregate.subspace[k]); | ||||
|       normalise(_Aggregate.subspace[k]); | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   void testFine(RealD resid)  | ||||
|   { | ||||
|     assert(evals_fine.size() == nbasis); | ||||
|     assert(_Aggregate.subspace.size() == nbasis); | ||||
|     PlainHermOp<FineField>    Op(_FineOp); | ||||
|     ImplicitlyRestartedLanczosHermOpTester<FineField> SimpleTester(Op); | ||||
|     for(int k=0;k<nbasis;k++){ | ||||
|       assert(SimpleTester.ReconstructEval(k,resid,_Aggregate.subspace[k],evals_fine[k],1.0)==1); | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   void testCoarse(RealD resid,ChebyParams cheby_smooth,RealD relax)  | ||||
|   { | ||||
|     assert(evals_fine.size() == nbasis); | ||||
|     assert(_Aggregate.subspace.size() == nbasis); | ||||
|     ////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|     // create a smoother and see if we can get a cheap convergence test and smooth inside the IRL | ||||
|     ////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|     Chebyshev<FineField>                          ChebySmooth(cheby_smooth); | ||||
|     ProjectedFunctionHermOp<Fobj,CComplex,nbasis> ChebyOp (ChebySmooth,_FineOp,_Aggregate); | ||||
|     ImplicitlyRestartedLanczosSmoothedTester<Fobj,CComplex,nbasis> ChebySmoothTester(ChebyOp,ChebySmooth,_FineOp,_Aggregate,relax); | ||||
|  | ||||
|     for(int k=0;k<evec_coarse.size();k++){ | ||||
|       if ( k < nbasis ) {  | ||||
| 	assert(ChebySmoothTester.ReconstructEval(k,resid,evec_coarse[k],evals_coarse[k],1.0)==1); | ||||
|       } else {  | ||||
| 	assert(ChebySmoothTester.ReconstructEval(k,resid*relax,evec_coarse[k],evals_coarse[k],1.0)==1); | ||||
|       } | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   void calcFine(ChebyParams cheby_parms,int Nstop,int Nk,int Nm,RealD resid,  | ||||
| 		RealD MaxIt, RealD betastp, int MinRes) | ||||
|   { | ||||
|     assert(nbasis<=Nm); | ||||
|     Chebyshev<FineField>      Cheby(cheby_parms); | ||||
|     FunctionHermOp<FineField> ChebyOp(Cheby,_FineOp); | ||||
|     PlainHermOp<FineField>    Op(_FineOp); | ||||
|  | ||||
|     evals_fine.resize(Nm); | ||||
|     _Aggregate.subspace.resize(Nm,_FineGrid); | ||||
|  | ||||
|     ImplicitlyRestartedLanczos<FineField> IRL(ChebyOp,Op,Nstop,Nk,Nm,resid,MaxIt,betastp,MinRes); | ||||
|  | ||||
|     FineField src(_FineGrid); src=1.0; src.checkerboard = _checkerboard; | ||||
|  | ||||
|     int Nconv; | ||||
|     IRL.calc(evals_fine,_Aggregate.subspace,src,Nconv,false); | ||||
|      | ||||
|     // Shrink down to number saved | ||||
|     assert(Nstop>=nbasis); | ||||
|     assert(Nconv>=nbasis); | ||||
|     evals_fine.resize(nbasis); | ||||
|     _Aggregate.subspace.resize(nbasis,_FineGrid); | ||||
|   } | ||||
|   void calcCoarse(ChebyParams cheby_op,ChebyParams cheby_smooth,RealD relax, | ||||
| 		  int Nstop, int Nk, int Nm,RealD resid,  | ||||
| 		  RealD MaxIt, RealD betastp, int MinRes) | ||||
|   { | ||||
|     Chebyshev<FineField>                          Cheby(cheby_op); | ||||
|     ProjectedHermOp<Fobj,CComplex,nbasis>         Op(_FineOp,_Aggregate); | ||||
|     ProjectedFunctionHermOp<Fobj,CComplex,nbasis> ChebyOp (Cheby,_FineOp,_Aggregate); | ||||
|     ////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|     // create a smoother and see if we can get a cheap convergence test and smooth inside the IRL | ||||
|     ////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|  | ||||
|     Chebyshev<FineField>                                           ChebySmooth(cheby_smooth); | ||||
|     ImplicitlyRestartedLanczosSmoothedTester<Fobj,CComplex,nbasis> ChebySmoothTester(ChebyOp,ChebySmooth,_FineOp,_Aggregate,relax); | ||||
|  | ||||
|     evals_coarse.resize(Nm); | ||||
|     evec_coarse.resize(Nm,_CoarseGrid); | ||||
|  | ||||
|     CoarseField src(_CoarseGrid);     src=1.0;  | ||||
|  | ||||
|     ImplicitlyRestartedLanczos<CoarseField> IRL(ChebyOp,ChebyOp,ChebySmoothTester,Nstop,Nk,Nm,resid,MaxIt,betastp,MinRes); | ||||
|     int Nconv=0; | ||||
|     IRL.calc(evals_coarse,evec_coarse,src,Nconv,false); | ||||
|     assert(Nconv>=Nstop); | ||||
|     evals_coarse.resize(Nstop); | ||||
|     evec_coarse.resize (Nstop,_CoarseGrid); | ||||
|     for (int i=0;i<Nstop;i++){ | ||||
|       std::cout << i << " Coarse eval = " << evals_coarse[i]  << std::endl; | ||||
|     } | ||||
|   } | ||||
| }; | ||||
|  | ||||
| } | ||||
| #endif | ||||
| @@ -53,16 +53,119 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||
|    *     M psi = eta | ||||
|    *********************** | ||||
|    *Odd | ||||
|    * i)   (D_oo)^{\dag} D_oo psi_o = (D_oo)^dag L^{-1}  eta_o | ||||
|    * i)                 D_oo psi_o =  L^{-1}  eta_o | ||||
|    *                        eta_o' = (D_oo)^dag (eta_o - Moe Mee^{-1} eta_e) | ||||
|    * | ||||
|    * Wilson: | ||||
|    *      (D_oo)^{\dag} D_oo psi_o = (D_oo)^dag L^{-1}  eta_o | ||||
|    * Stag: | ||||
|    *      D_oo psi_o = L^{-1}  eta =    (eta_o - Moe Mee^{-1} eta_e) | ||||
|    * | ||||
|    * L^-1 eta_o= (1              0 ) (e | ||||
|    *             (-MoeMee^{-1}   1 )    | ||||
|    * | ||||
|    *Even | ||||
|    * ii)  Mee psi_e + Meo psi_o = src_e | ||||
|    * | ||||
|    *   => sol_e = M_ee^-1 * ( src_e - Meo sol_o )... | ||||
|    * | ||||
|    *  | ||||
|    * TODO: Other options: | ||||
|    *  | ||||
|    * a) change checkerboards for Schur e<->o | ||||
|    * | ||||
|    * Left precon by Moo^-1 | ||||
|    * b) Doo^{dag} M_oo^-dag Moo^-1 Doo psi_0 =  (D_oo)^dag M_oo^-dag Moo^-1 L^{-1}  eta_o | ||||
|    *                              eta_o'     = (D_oo)^dag  M_oo^-dag Moo^-1 (eta_o - Moe Mee^{-1} eta_e) | ||||
|    * | ||||
|    * Right precon by Moo^-1 | ||||
|    * c) M_oo^-dag Doo^{dag} Doo Moo^-1 phi_0 = M_oo^-dag (D_oo)^dag L^{-1}  eta_o | ||||
|    *                              eta_o'     = M_oo^-dag (D_oo)^dag (eta_o - Moe Mee^{-1} eta_e) | ||||
|    *                              psi_o = M_oo^-1 phi_o | ||||
|    * TODO: Deflation  | ||||
|    */ | ||||
| namespace Grid { | ||||
|  | ||||
|   /////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   // Take a matrix and form a Red Black solver calling a Herm solver | ||||
|   // Use of RB info prevents making SchurRedBlackSolve conform to standard interface | ||||
|   /////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|  | ||||
|   template<class Field> class SchurRedBlackStaggeredSolve { | ||||
|   private: | ||||
|     OperatorFunction<Field> & _HermitianRBSolver; | ||||
|     int CBfactorise; | ||||
|   public: | ||||
|  | ||||
|     ///////////////////////////////////////////////////// | ||||
|     // Wrap the usual normal equations Schur trick | ||||
|     ///////////////////////////////////////////////////// | ||||
|   SchurRedBlackStaggeredSolve(OperatorFunction<Field> &HermitianRBSolver)  : | ||||
|      _HermitianRBSolver(HermitianRBSolver)  | ||||
|     {  | ||||
|       CBfactorise=0; | ||||
|     }; | ||||
|  | ||||
|     template<class Matrix> | ||||
|       void operator() (Matrix & _Matrix,const Field &in, Field &out){ | ||||
|  | ||||
|       // FIXME CGdiagonalMee not implemented virtual function | ||||
|       // FIXME use CBfactorise to control schur decomp | ||||
|       GridBase *grid = _Matrix.RedBlackGrid(); | ||||
|       GridBase *fgrid= _Matrix.Grid(); | ||||
|  | ||||
|       SchurStaggeredOperator<Matrix,Field> _HermOpEO(_Matrix); | ||||
|   | ||||
|       Field src_e(grid); | ||||
|       Field src_o(grid); | ||||
|       Field sol_e(grid); | ||||
|       Field sol_o(grid); | ||||
|       Field   tmp(grid); | ||||
|       Field  Mtmp(grid); | ||||
|       Field resid(fgrid); | ||||
|  | ||||
|       pickCheckerboard(Even,src_e,in); | ||||
|       pickCheckerboard(Odd ,src_o,in); | ||||
|       pickCheckerboard(Even,sol_e,out); | ||||
|       pickCheckerboard(Odd ,sol_o,out); | ||||
|      | ||||
|       ///////////////////////////////////////////////////// | ||||
|       // src_o = (source_o - Moe MeeInv source_e) | ||||
|       ///////////////////////////////////////////////////// | ||||
|       _Matrix.MooeeInv(src_e,tmp);     assert(  tmp.checkerboard ==Even); | ||||
|       _Matrix.Meooe   (tmp,Mtmp);      assert( Mtmp.checkerboard ==Odd);      | ||||
|       tmp=src_o-Mtmp;                  assert(  tmp.checkerboard ==Odd);      | ||||
|  | ||||
|       src_o = tmp;     assert(src_o.checkerboard ==Odd); | ||||
|       //  _Matrix.Mooee(tmp,src_o); // Extra factor of "m" in source | ||||
|  | ||||
|       ////////////////////////////////////////////////////////////// | ||||
|       // Call the red-black solver | ||||
|       ////////////////////////////////////////////////////////////// | ||||
|       std::cout<<GridLogMessage << "SchurRedBlackStaggeredSolver calling the Mpc solver" <<std::endl; | ||||
|       _HermitianRBSolver(_HermOpEO,src_o,sol_o);  assert(sol_o.checkerboard==Odd); | ||||
|  | ||||
|       /////////////////////////////////////////////////// | ||||
|       // sol_e = M_ee^-1 * ( src_e - Meo sol_o )... | ||||
|       /////////////////////////////////////////////////// | ||||
|       _Matrix.Meooe(sol_o,tmp);        assert(  tmp.checkerboard   ==Even); | ||||
|       src_e = src_e-tmp;               assert(  src_e.checkerboard ==Even); | ||||
|       _Matrix.MooeeInv(src_e,sol_e);   assert(  sol_e.checkerboard ==Even); | ||||
|       | ||||
|       setCheckerboard(out,sol_e); assert(  sol_e.checkerboard ==Even); | ||||
|       setCheckerboard(out,sol_o); assert(  sol_o.checkerboard ==Odd ); | ||||
|  | ||||
|       // Verify the unprec residual | ||||
|       _Matrix.M(out,resid);  | ||||
|       resid = resid-in; | ||||
|       RealD ns = norm2(in); | ||||
|       RealD nr = norm2(resid); | ||||
|  | ||||
|       std::cout<<GridLogMessage << "SchurRedBlackStaggered solver true unprec resid "<< std::sqrt(nr/ns) <<" nr "<< nr <<" ns "<<ns << std::endl; | ||||
|     }      | ||||
|   }; | ||||
|   template<class Field> using SchurRedBlackStagSolve = SchurRedBlackStaggeredSolve<Field>; | ||||
|  | ||||
|   /////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   // Take a matrix and form a Red Black solver calling a Herm solver | ||||
|   // Use of RB info prevents making SchurRedBlackSolve conform to standard interface | ||||
| @@ -76,12 +179,10 @@ namespace Grid { | ||||
|     ///////////////////////////////////////////////////// | ||||
|     // Wrap the usual normal equations Schur trick | ||||
|     ///////////////////////////////////////////////////// | ||||
|   SchurRedBlackDiagMooeeSolve(OperatorFunction<Field> &HermitianRBSolver)  : | ||||
|      _HermitianRBSolver(HermitianRBSolver)  | ||||
|   SchurRedBlackDiagMooeeSolve(OperatorFunction<Field> &HermitianRBSolver,int cb=0)  :  _HermitianRBSolver(HermitianRBSolver)  | ||||
|   {  | ||||
|       CBfactorise=0; | ||||
|     CBfactorise=cb; | ||||
|   }; | ||||
|  | ||||
|     template<class Matrix> | ||||
|       void operator() (Matrix & _Matrix,const Field &in, Field &out){ | ||||
|  | ||||
| @@ -141,5 +242,166 @@ namespace Grid { | ||||
|     }      | ||||
|   }; | ||||
|  | ||||
|  | ||||
|   /////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   // Take a matrix and form a Red Black solver calling a Herm solver | ||||
|   // Use of RB info prevents making SchurRedBlackSolve conform to standard interface | ||||
|   /////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   template<class Field> class SchurRedBlackDiagTwoSolve { | ||||
|   private: | ||||
|     OperatorFunction<Field> & _HermitianRBSolver; | ||||
|     int CBfactorise; | ||||
|   public: | ||||
|  | ||||
|     ///////////////////////////////////////////////////// | ||||
|     // Wrap the usual normal equations Schur trick | ||||
|     ///////////////////////////////////////////////////// | ||||
|   SchurRedBlackDiagTwoSolve(OperatorFunction<Field> &HermitianRBSolver)  : | ||||
|      _HermitianRBSolver(HermitianRBSolver)  | ||||
|     {  | ||||
|       CBfactorise=0; | ||||
|     }; | ||||
|  | ||||
|     template<class Matrix> | ||||
|       void operator() (Matrix & _Matrix,const Field &in, Field &out){ | ||||
|  | ||||
|       // FIXME CGdiagonalMee not implemented virtual function | ||||
|       // FIXME use CBfactorise to control schur decomp | ||||
|       GridBase *grid = _Matrix.RedBlackGrid(); | ||||
|       GridBase *fgrid= _Matrix.Grid(); | ||||
|  | ||||
|       SchurDiagTwoOperator<Matrix,Field> _HermOpEO(_Matrix); | ||||
|   | ||||
|       Field src_e(grid); | ||||
|       Field src_o(grid); | ||||
|       Field sol_e(grid); | ||||
|       Field sol_o(grid); | ||||
|       Field   tmp(grid); | ||||
|       Field  Mtmp(grid); | ||||
|       Field resid(fgrid); | ||||
|  | ||||
|       pickCheckerboard(Even,src_e,in); | ||||
|       pickCheckerboard(Odd ,src_o,in); | ||||
|       pickCheckerboard(Even,sol_e,out); | ||||
|       pickCheckerboard(Odd ,sol_o,out); | ||||
|      | ||||
|       ///////////////////////////////////////////////////// | ||||
|       // src_o = Mdag * (source_o - Moe MeeInv source_e) | ||||
|       ///////////////////////////////////////////////////// | ||||
|       _Matrix.MooeeInv(src_e,tmp);     assert(  tmp.checkerboard ==Even); | ||||
|       _Matrix.Meooe   (tmp,Mtmp);      assert( Mtmp.checkerboard ==Odd);      | ||||
|       tmp=src_o-Mtmp;                  assert(  tmp.checkerboard ==Odd);      | ||||
|  | ||||
|       // get the right MpcDag | ||||
|       _HermOpEO.MpcDag(tmp,src_o);     assert(src_o.checkerboard ==Odd);        | ||||
|  | ||||
|       ////////////////////////////////////////////////////////////// | ||||
|       // Call the red-black solver | ||||
|       ////////////////////////////////////////////////////////////// | ||||
|       std::cout<<GridLogMessage << "SchurRedBlack solver calling the MpcDagMp solver" <<std::endl; | ||||
| //      _HermitianRBSolver(_HermOpEO,src_o,sol_o);  assert(sol_o.checkerboard==Odd); | ||||
|       _HermitianRBSolver(_HermOpEO,src_o,tmp);  assert(tmp.checkerboard==Odd); | ||||
|       _Matrix.MooeeInv(tmp,sol_o);        assert(  sol_o.checkerboard   ==Odd); | ||||
|  | ||||
|       /////////////////////////////////////////////////// | ||||
|       // sol_e = M_ee^-1 * ( src_e - Meo sol_o )... | ||||
|       /////////////////////////////////////////////////// | ||||
|       _Matrix.Meooe(sol_o,tmp);        assert(  tmp.checkerboard   ==Even); | ||||
|       src_e = src_e-tmp;               assert(  src_e.checkerboard ==Even); | ||||
|       _Matrix.MooeeInv(src_e,sol_e);   assert(  sol_e.checkerboard ==Even); | ||||
|       | ||||
|       setCheckerboard(out,sol_e); assert(  sol_e.checkerboard ==Even); | ||||
|       setCheckerboard(out,sol_o); assert(  sol_o.checkerboard ==Odd ); | ||||
|  | ||||
|       // Verify the unprec residual | ||||
|       _Matrix.M(out,resid);  | ||||
|       resid = resid-in; | ||||
|       RealD ns = norm2(in); | ||||
|       RealD nr = norm2(resid); | ||||
|  | ||||
|       std::cout<<GridLogMessage << "SchurRedBlackDiagTwo solver true unprec resid "<< std::sqrt(nr/ns) <<" nr "<< nr <<" ns "<<ns << std::endl; | ||||
|     }      | ||||
|   }; | ||||
|   /////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   // Take a matrix and form a Red Black solver calling a Herm solver | ||||
|   // Use of RB info prevents making SchurRedBlackSolve conform to standard interface | ||||
|   /////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   template<class Field> class SchurRedBlackDiagTwoMixed { | ||||
|   private: | ||||
|     LinearFunction<Field> & _HermitianRBSolver; | ||||
|     int CBfactorise; | ||||
|   public: | ||||
|  | ||||
|     ///////////////////////////////////////////////////// | ||||
|     // Wrap the usual normal equations Schur trick | ||||
|     ///////////////////////////////////////////////////// | ||||
|   SchurRedBlackDiagTwoMixed(LinearFunction<Field> &HermitianRBSolver)  : | ||||
|      _HermitianRBSolver(HermitianRBSolver)  | ||||
|     {  | ||||
|       CBfactorise=0; | ||||
|     }; | ||||
|  | ||||
|     template<class Matrix> | ||||
|       void operator() (Matrix & _Matrix,const Field &in, Field &out){ | ||||
|  | ||||
|       // FIXME CGdiagonalMee not implemented virtual function | ||||
|       // FIXME use CBfactorise to control schur decomp | ||||
|       GridBase *grid = _Matrix.RedBlackGrid(); | ||||
|       GridBase *fgrid= _Matrix.Grid(); | ||||
|  | ||||
|       SchurDiagTwoOperator<Matrix,Field> _HermOpEO(_Matrix); | ||||
|   | ||||
|       Field src_e(grid); | ||||
|       Field src_o(grid); | ||||
|       Field sol_e(grid); | ||||
|       Field sol_o(grid); | ||||
|       Field   tmp(grid); | ||||
|       Field  Mtmp(grid); | ||||
|       Field resid(fgrid); | ||||
|  | ||||
|       pickCheckerboard(Even,src_e,in); | ||||
|       pickCheckerboard(Odd ,src_o,in); | ||||
|       pickCheckerboard(Even,sol_e,out); | ||||
|       pickCheckerboard(Odd ,sol_o,out); | ||||
|      | ||||
|       ///////////////////////////////////////////////////// | ||||
|       // src_o = Mdag * (source_o - Moe MeeInv source_e) | ||||
|       ///////////////////////////////////////////////////// | ||||
|       _Matrix.MooeeInv(src_e,tmp);     assert(  tmp.checkerboard ==Even); | ||||
|       _Matrix.Meooe   (tmp,Mtmp);      assert( Mtmp.checkerboard ==Odd);      | ||||
|       tmp=src_o-Mtmp;                  assert(  tmp.checkerboard ==Odd);      | ||||
|  | ||||
|       // get the right MpcDag | ||||
|       _HermOpEO.MpcDag(tmp,src_o);     assert(src_o.checkerboard ==Odd);        | ||||
|  | ||||
|       ////////////////////////////////////////////////////////////// | ||||
|       // Call the red-black solver | ||||
|       ////////////////////////////////////////////////////////////// | ||||
|       std::cout<<GridLogMessage << "SchurRedBlack solver calling the MpcDagMp solver" <<std::endl; | ||||
| //      _HermitianRBSolver(_HermOpEO,src_o,sol_o);  assert(sol_o.checkerboard==Odd); | ||||
| //      _HermitianRBSolver(_HermOpEO,src_o,tmp);  assert(tmp.checkerboard==Odd); | ||||
|       _HermitianRBSolver(src_o,tmp);  assert(tmp.checkerboard==Odd); | ||||
|       _Matrix.MooeeInv(tmp,sol_o);        assert(  sol_o.checkerboard   ==Odd); | ||||
|  | ||||
|       /////////////////////////////////////////////////// | ||||
|       // sol_e = M_ee^-1 * ( src_e - Meo sol_o )... | ||||
|       /////////////////////////////////////////////////// | ||||
|       _Matrix.Meooe(sol_o,tmp);        assert(  tmp.checkerboard   ==Even); | ||||
|       src_e = src_e-tmp;               assert(  src_e.checkerboard ==Even); | ||||
|       _Matrix.MooeeInv(src_e,sol_e);   assert(  sol_e.checkerboard ==Even); | ||||
|       | ||||
|       setCheckerboard(out,sol_e); assert(  sol_e.checkerboard ==Even); | ||||
|       setCheckerboard(out,sol_o); assert(  sol_o.checkerboard ==Odd ); | ||||
|  | ||||
|       // Verify the unprec residual | ||||
|       _Matrix.M(out,resid);  | ||||
|       resid = resid-in; | ||||
|       RealD ns = norm2(in); | ||||
|       RealD nr = norm2(resid); | ||||
|  | ||||
|       std::cout<<GridLogMessage << "SchurRedBlackDiagTwo solver true unprec resid "<< std::sqrt(nr/ns) <<" nr "<< nr <<" ns "<<ns << std::endl; | ||||
|     }      | ||||
|   }; | ||||
|  | ||||
| } | ||||
| #endif | ||||
|   | ||||
| @@ -1,7 +1,5 @@ | ||||
|  | ||||
|  | ||||
|  | ||||
| #include <Grid/GridCore.h> | ||||
| #include <fcntl.h> | ||||
|  | ||||
| namespace Grid { | ||||
|  | ||||
| @@ -11,7 +9,7 @@ int PointerCache::victim; | ||||
|  | ||||
| void *PointerCache::Insert(void *ptr,size_t bytes) { | ||||
|  | ||||
|   if (bytes < 4096 ) return NULL; | ||||
|   if (bytes < 4096 ) return ptr; | ||||
|  | ||||
| #ifdef GRID_OMP | ||||
|   assert(omp_in_parallel()==0); | ||||
| @@ -63,4 +61,37 @@ void *PointerCache::Lookup(size_t bytes) { | ||||
|   return NULL; | ||||
| } | ||||
|  | ||||
|  | ||||
| void check_huge_pages(void *Buf,uint64_t BYTES) | ||||
| { | ||||
| #ifdef __linux__ | ||||
|   int fd = open("/proc/self/pagemap", O_RDONLY); | ||||
|   assert(fd >= 0); | ||||
|   const int page_size = 4096; | ||||
|   uint64_t virt_pfn = (uint64_t)Buf / page_size; | ||||
|   off_t offset = sizeof(uint64_t) * virt_pfn; | ||||
|   uint64_t npages = (BYTES + page_size-1) / page_size; | ||||
|   uint64_t pagedata[npages]; | ||||
|   uint64_t ret = lseek(fd, offset, SEEK_SET); | ||||
|   assert(ret == offset); | ||||
|   ret = ::read(fd, pagedata, sizeof(uint64_t)*npages); | ||||
|   assert(ret == sizeof(uint64_t) * npages); | ||||
|   int nhugepages = npages / 512; | ||||
|   int n4ktotal, nnothuge; | ||||
|   n4ktotal = 0; | ||||
|   nnothuge = 0; | ||||
|   for (int i = 0; i < nhugepages; ++i) { | ||||
|     uint64_t baseaddr = (pagedata[i*512] & 0x7fffffffffffffULL) * page_size; | ||||
|     for (int j = 0; j < 512; ++j) { | ||||
|       uint64_t pageaddr = (pagedata[i*512+j] & 0x7fffffffffffffULL) * page_size; | ||||
|       ++n4ktotal; | ||||
|       if (pageaddr != baseaddr + j * page_size) | ||||
| 	++nnothuge; | ||||
|       } | ||||
|   } | ||||
|   int rank = CartesianCommunicator::RankWorld(); | ||||
|   printf("rank %d Allocated %d 4k pages, %d not in huge pages\n", rank, n4ktotal, nnothuge); | ||||
| #endif | ||||
| } | ||||
|  | ||||
| } | ||||
|   | ||||
| @@ -64,6 +64,8 @@ namespace Grid { | ||||
|  | ||||
|   }; | ||||
|  | ||||
|   void check_huge_pages(void *Buf,uint64_t BYTES); | ||||
|  | ||||
| //////////////////////////////////////////////////////////////////// | ||||
| // A lattice of something, but assume the something is SIMDized. | ||||
| //////////////////////////////////////////////////////////////////// | ||||
| @@ -92,18 +94,34 @@ public: | ||||
|     size_type bytes = __n*sizeof(_Tp); | ||||
|  | ||||
|     _Tp *ptr = (_Tp *) PointerCache::Lookup(bytes); | ||||
|     //    if ( ptr != NULL )  | ||||
|     //      std::cout << "alignedAllocator "<<__n << " cache hit "<< std::hex << ptr <<std::dec <<std::endl; | ||||
|  | ||||
|     ////////////////// | ||||
|     // Hack 2MB align; could make option probably doesn't need configurability | ||||
|     ////////////////// | ||||
| //define GRID_ALLOC_ALIGN (128) | ||||
| #define GRID_ALLOC_ALIGN (2*1024*1024) | ||||
| #ifdef HAVE_MM_MALLOC_H | ||||
|     if ( ptr == (_Tp *) NULL ) ptr = (_Tp *) _mm_malloc(bytes,128); | ||||
|     if ( ptr == (_Tp *) NULL ) ptr = (_Tp *) _mm_malloc(bytes,GRID_ALLOC_ALIGN); | ||||
| #else | ||||
|     if ( ptr == (_Tp *) NULL ) ptr = (_Tp *) memalign(128,bytes); | ||||
|     if ( ptr == (_Tp *) NULL ) ptr = (_Tp *) memalign(GRID_ALLOC_ALIGN,bytes); | ||||
| #endif | ||||
|  | ||||
|     //    std::cout << "alignedAllocator " << std::hex << ptr <<std::dec <<std::endl; | ||||
|     // First touch optimise in threaded loop | ||||
|     uint8_t *cp = (uint8_t *)ptr; | ||||
| #ifdef GRID_OMP | ||||
| #pragma omp parallel for | ||||
| #endif | ||||
|     for(size_type n=0;n<bytes;n+=4096){ | ||||
|       cp[n]=0; | ||||
|     } | ||||
|     return ptr; | ||||
|   } | ||||
|  | ||||
|   void deallocate(pointer __p, size_type __n) {  | ||||
|     size_type bytes = __n * sizeof(_Tp); | ||||
|  | ||||
|     pointer __freeme = (pointer)PointerCache::Insert((void *)__p,bytes); | ||||
|  | ||||
| #ifdef HAVE_MM_MALLOC_H | ||||
| @@ -182,10 +200,19 @@ public: | ||||
|   pointer allocate(size_type __n, const void* _p= 0)  | ||||
|   { | ||||
| #ifdef HAVE_MM_MALLOC_H | ||||
|     _Tp * ptr = (_Tp *) _mm_malloc(__n*sizeof(_Tp),128); | ||||
|     _Tp * ptr = (_Tp *) _mm_malloc(__n*sizeof(_Tp),GRID_ALLOC_ALIGN); | ||||
| #else | ||||
|     _Tp * ptr = (_Tp *) memalign(128,__n*sizeof(_Tp)); | ||||
|     _Tp * ptr = (_Tp *) memalign(GRID_ALLOC_ALIGN,__n*sizeof(_Tp)); | ||||
| #endif | ||||
|     size_type bytes = __n*sizeof(_Tp); | ||||
|     uint8_t *cp = (uint8_t *)ptr; | ||||
|     if ( ptr ) {  | ||||
|     // One touch per 4k page, static OMP loop to catch same loop order | ||||
| #pragma omp parallel for schedule(static) | ||||
|       for(size_type n=0;n<bytes;n+=4096){ | ||||
| 	cp[n]=0; | ||||
|       } | ||||
|     } | ||||
|     return ptr; | ||||
|   } | ||||
|   void deallocate(pointer __p, size_type) {  | ||||
|   | ||||
| @@ -44,12 +44,20 @@ namespace Grid{ | ||||
|   class GridBase : public CartesianCommunicator , public GridThread { | ||||
|  | ||||
| public: | ||||
|  | ||||
|     int dummy; | ||||
|     // Give Lattice access | ||||
|     template<class object> friend class Lattice; | ||||
|  | ||||
|     GridBase(const std::vector<int> & processor_grid) : CartesianCommunicator(processor_grid) {}; | ||||
|     GridBase(const std::vector<int> & processor_grid, | ||||
| 	     const CartesianCommunicator &parent, | ||||
| 	     int &split_rank)  | ||||
|       : CartesianCommunicator(processor_grid,parent,split_rank) {}; | ||||
|     GridBase(const std::vector<int> & processor_grid, | ||||
| 	     const CartesianCommunicator &parent)  | ||||
|       : CartesianCommunicator(processor_grid,parent,dummy) {}; | ||||
|  | ||||
|     virtual ~GridBase() = default; | ||||
|  | ||||
|     // Physics Grid information. | ||||
|     std::vector<int> _simd_layout;// Which dimensions get relayed out over simd lanes. | ||||
| @@ -63,13 +71,12 @@ public: | ||||
|     int _isites; | ||||
|     int _fsites;                  // _isites*_osites = product(dimensions). | ||||
|     int _gsites; | ||||
|     std::vector<int> _slice_block;   // subslice information | ||||
|     std::vector<int> _slice_block;// subslice information | ||||
|     std::vector<int> _slice_stride; | ||||
|     std::vector<int> _slice_nblock; | ||||
|  | ||||
|     // Might need these at some point | ||||
|     //    std::vector<int> _lstart;     // local start of array in gcoors. _processor_coor[d]*_ldimensions[d] | ||||
|     //    std::vector<int> _lend;       // local end of array in gcoors    _processor_coor[d]*_ldimensions[d]+_ldimensions_[d]-1 | ||||
|     std::vector<int> _lstart;     // local start of array in gcoors _processor_coor[d]*_ldimensions[d] | ||||
|     std::vector<int> _lend  ;     // local end of array in gcoors   _processor_coor[d]*_ldimensions[d]+_ldimensions_[d]-1 | ||||
|  | ||||
| public: | ||||
|  | ||||
| @@ -176,6 +183,7 @@ public: | ||||
|     inline int gSites(void) const { return _isites*_osites*_Nprocessors; };  | ||||
|     inline int Nd    (void) const { return _ndimension;}; | ||||
|  | ||||
|     inline const std::vector<int> LocalStarts(void)             { return _lstart;    }; | ||||
|     inline const std::vector<int> &FullDimensions(void)         { return _fdimensions;}; | ||||
|     inline const std::vector<int> &GlobalDimensions(void)       { return _gdimensions;}; | ||||
|     inline const std::vector<int> &LocalDimensions(void)        { return _ldimensions;}; | ||||
| @@ -186,17 +194,18 @@ public: | ||||
|     //////////////////////////////////////////////////////////////// | ||||
|  | ||||
|     void show_decomposition(){ | ||||
|       std::cout << GridLogMessage << "Full Dimensions    : " << _fdimensions << std::endl; | ||||
|       std::cout << GridLogMessage << "Global Dimensions  : " << _gdimensions << std::endl; | ||||
|       std::cout << GridLogMessage << "Local Dimensions   : " << _ldimensions << std::endl; | ||||
|       std::cout << GridLogMessage << "Reduced Dimensions : " << _rdimensions << std::endl; | ||||
|       std::cout << GridLogMessage << "Outer strides      : " << _ostride << std::endl; | ||||
|       std::cout << GridLogMessage << "Inner strides      : " << _istride << std::endl; | ||||
|       std::cout << GridLogMessage << "iSites             : " << _isites << std::endl; | ||||
|       std::cout << GridLogMessage << "oSites             : " << _osites << std::endl; | ||||
|       std::cout << GridLogMessage << "lSites             : " << lSites() << std::endl;         | ||||
|       std::cout << GridLogMessage << "gSites             : " << gSites() << std::endl; | ||||
|       std::cout << GridLogMessage << "Nd                 : " << _ndimension << std::endl;              | ||||
|       std::cout << GridLogMessage << "\tFull Dimensions    : " << _fdimensions << std::endl; | ||||
|       std::cout << GridLogMessage << "\tSIMD layout        : " << _simd_layout << std::endl; | ||||
|       std::cout << GridLogMessage << "\tGlobal Dimensions  : " << _gdimensions << std::endl; | ||||
|       std::cout << GridLogMessage << "\tLocal Dimensions   : " << _ldimensions << std::endl; | ||||
|       std::cout << GridLogMessage << "\tReduced Dimensions : " << _rdimensions << std::endl; | ||||
|       std::cout << GridLogMessage << "\tOuter strides      : " << _ostride << std::endl; | ||||
|       std::cout << GridLogMessage << "\tInner strides      : " << _istride << std::endl; | ||||
|       std::cout << GridLogMessage << "\tiSites             : " << _isites << std::endl; | ||||
|       std::cout << GridLogMessage << "\toSites             : " << _osites << std::endl; | ||||
|       std::cout << GridLogMessage << "\tlSites             : " << lSites() << std::endl;         | ||||
|       std::cout << GridLogMessage << "\tgSites             : " << gSites() << std::endl; | ||||
|       std::cout << GridLogMessage << "\tNd                 : " << _ndimension << std::endl;              | ||||
|     }  | ||||
|  | ||||
|     //////////////////////////////////////////////////////////////// | ||||
| @@ -210,9 +219,6 @@ public: | ||||
|       assert(lidx<lSites()); | ||||
|       Lexicographic::CoorFromIndex(lcoor,lidx,_ldimensions); | ||||
|     } | ||||
|  | ||||
|  | ||||
|  | ||||
|     void GlobalCoorToGlobalIndex(const std::vector<int> & gcoor,int & gidx){ | ||||
|       gidx=0; | ||||
|       int mult=1; | ||||
|   | ||||
| @@ -38,7 +38,7 @@ namespace Grid{ | ||||
| class GridCartesian: public GridBase { | ||||
|  | ||||
| public: | ||||
|  | ||||
|     int dummy; | ||||
|     virtual int  CheckerBoardFromOindexTable (int Oindex) { | ||||
|       return 0; | ||||
|     } | ||||
| @@ -61,10 +61,38 @@ public: | ||||
|     virtual int CheckerBoardShift(int source_cb,int dim,int shift, int osite){ | ||||
|       return shift; | ||||
|     } | ||||
|     ///////////////////////////////////////////////////////////////////////// | ||||
|     // Constructor takes a parent grid and possibly subdivides communicator. | ||||
|     ///////////////////////////////////////////////////////////////////////// | ||||
|     GridCartesian(const std::vector<int> &dimensions, | ||||
| 		  const std::vector<int> &simd_layout, | ||||
| 		  const std::vector<int> &processor_grid | ||||
| 		  ) : GridBase(processor_grid) | ||||
| 		  const std::vector<int> &processor_grid, | ||||
| 		  const GridCartesian &parent) : GridBase(processor_grid,parent,dummy) | ||||
|     { | ||||
|       Init(dimensions,simd_layout,processor_grid); | ||||
|     } | ||||
|     GridCartesian(const std::vector<int> &dimensions, | ||||
| 		  const std::vector<int> &simd_layout, | ||||
| 		  const std::vector<int> &processor_grid, | ||||
| 		  const GridCartesian &parent,int &split_rank) : GridBase(processor_grid,parent,split_rank) | ||||
|     { | ||||
|       Init(dimensions,simd_layout,processor_grid); | ||||
|     } | ||||
|     ///////////////////////////////////////////////////////////////////////// | ||||
|     // Construct from comm world | ||||
|     ///////////////////////////////////////////////////////////////////////// | ||||
|     GridCartesian(const std::vector<int> &dimensions, | ||||
| 		  const std::vector<int> &simd_layout, | ||||
| 		  const std::vector<int> &processor_grid) : GridBase(processor_grid) | ||||
|     { | ||||
|       Init(dimensions,simd_layout,processor_grid); | ||||
|     } | ||||
|  | ||||
|     virtual ~GridCartesian() = default; | ||||
|  | ||||
|     void Init(const std::vector<int> &dimensions, | ||||
| 	      const std::vector<int> &simd_layout, | ||||
| 	      const std::vector<int> &processor_grid) | ||||
|     { | ||||
|       /////////////////////// | ||||
|       // Grid information | ||||
| @@ -76,34 +104,44 @@ public: | ||||
|       _ldimensions.resize(_ndimension); | ||||
|       _rdimensions.resize(_ndimension); | ||||
|       _simd_layout.resize(_ndimension); | ||||
|       _lstart.resize(_ndimension); | ||||
|       _lend.resize(_ndimension); | ||||
|  | ||||
|       _ostride.resize(_ndimension); | ||||
|       _istride.resize(_ndimension); | ||||
|  | ||||
|       _fsites = _gsites = _osites = _isites = 1; | ||||
|  | ||||
|         for(int d=0;d<_ndimension;d++){ | ||||
|       for (int d = 0; d < _ndimension; d++) | ||||
|       { | ||||
|         _fdimensions[d] = dimensions[d];   // Global dimensions | ||||
|         _gdimensions[d] = _fdimensions[d]; // Global dimensions | ||||
|         _simd_layout[d] = simd_layout[d]; | ||||
|         _fsites = _fsites * _fdimensions[d]; | ||||
|         _gsites = _gsites * _gdimensions[d]; | ||||
|  | ||||
| 	  //FIXME check for exact division | ||||
|  | ||||
|         // Use a reduced simd grid | ||||
| 	  _ldimensions[d]= _gdimensions[d]/_processors[d];  //local dimensions | ||||
| 	  _rdimensions[d]= _ldimensions[d]/_simd_layout[d]; //overdecomposition | ||||
|         _ldimensions[d] = _gdimensions[d] / _processors[d]; //local dimensions | ||||
|         assert(_ldimensions[d] * _processors[d] == _gdimensions[d]); | ||||
|  | ||||
|         _rdimensions[d] = _ldimensions[d] / _simd_layout[d]; //overdecomposition | ||||
|         assert(_rdimensions[d] * _simd_layout[d] == _ldimensions[d]); | ||||
|  | ||||
|         _lstart[d] = _processor_coor[d] * _ldimensions[d]; | ||||
|         _lend[d] = _processor_coor[d] * _ldimensions[d] + _ldimensions[d] - 1; | ||||
|         _osites *= _rdimensions[d]; | ||||
|         _isites *= _simd_layout[d]; | ||||
|  | ||||
|         // Addressing support | ||||
| 	  if ( d==0 ) { | ||||
|         if (d == 0) | ||||
|         { | ||||
|           _ostride[d] = 1; | ||||
|           _istride[d] = 1; | ||||
| 	  } else { | ||||
| 	    _ostride[d] = _ostride[d-1]*_rdimensions[d-1]; | ||||
| 	    _istride[d] = _istride[d-1]*_simd_layout[d-1]; | ||||
|         } | ||||
|         else | ||||
|         { | ||||
|           _ostride[d] = _ostride[d - 1] * _rdimensions[d - 1]; | ||||
|           _istride[d] = _istride[d - 1] * _simd_layout[d - 1]; | ||||
|         } | ||||
|       } | ||||
|  | ||||
| @@ -114,21 +152,20 @@ public: | ||||
|       _slice_stride.resize(_ndimension); | ||||
|       _slice_nblock.resize(_ndimension); | ||||
|  | ||||
|         int block =1; | ||||
|         int nblock=1; | ||||
|         for(int d=0;d<_ndimension;d++) nblock*=_rdimensions[d]; | ||||
|       int block = 1; | ||||
|       int nblock = 1; | ||||
|       for (int d = 0; d < _ndimension; d++) | ||||
|         nblock *= _rdimensions[d]; | ||||
|  | ||||
|         for(int d=0;d<_ndimension;d++){ | ||||
|             nblock/=_rdimensions[d]; | ||||
|             _slice_block[d] =block; | ||||
|             _slice_stride[d]=_ostride[d]*_rdimensions[d]; | ||||
|             _slice_nblock[d]=nblock; | ||||
|             block = block*_rdimensions[d]; | ||||
|       for (int d = 0; d < _ndimension; d++) | ||||
|       { | ||||
|         nblock /= _rdimensions[d]; | ||||
|         _slice_block[d] = block; | ||||
|         _slice_stride[d] = _ostride[d] * _rdimensions[d]; | ||||
|         _slice_nblock[d] = nblock; | ||||
|         block = block * _rdimensions[d]; | ||||
|       } | ||||
|  | ||||
|     }; | ||||
| }; | ||||
|  | ||||
|  | ||||
| } | ||||
| #endif | ||||
|   | ||||
| @@ -112,24 +112,59 @@ public: | ||||
|       } | ||||
|     }; | ||||
|  | ||||
|     GridRedBlackCartesian(const GridBase *base) : GridRedBlackCartesian(base->_fdimensions,base->_simd_layout,base->_processors)  {}; | ||||
|     //////////////////////////////////////////////////////////// | ||||
|     // Create Redblack from original grid; require full grid pointer ? | ||||
|     //////////////////////////////////////////////////////////// | ||||
|     GridRedBlackCartesian(const GridBase *base) : GridBase(base->_processors,*base) | ||||
|     { | ||||
|       int dims = base->_ndimension; | ||||
|       std::vector<int> checker_dim_mask(dims,1); | ||||
|       int checker_dim = 0; | ||||
|       Init(base->_fdimensions,base->_simd_layout,base->_processors,checker_dim_mask,checker_dim); | ||||
|     }; | ||||
|  | ||||
|     GridRedBlackCartesian(const std::vector<int> &dimensions, | ||||
|     //////////////////////////////////////////////////////////// | ||||
|     // Create redblack from original grid, with non-trivial checker dim mask | ||||
|     //////////////////////////////////////////////////////////// | ||||
|     GridRedBlackCartesian(const GridBase *base, | ||||
| 			  const std::vector<int> &checker_dim_mask, | ||||
| 			  int checker_dim | ||||
| 			  ) :  GridBase(base->_processors,*base)  | ||||
|     { | ||||
|       Init(base->_fdimensions,base->_simd_layout,base->_processors,checker_dim_mask,checker_dim)  ; | ||||
|     } | ||||
|  | ||||
|     virtual ~GridRedBlackCartesian() = default; | ||||
| #if 0 | ||||
|     //////////////////////////////////////////////////////////// | ||||
|     // Create redblack grid ;; deprecate these. Should not | ||||
|     // need direct creation of redblack without a full grid to base on | ||||
|     //////////////////////////////////////////////////////////// | ||||
|     GridRedBlackCartesian(const GridBase *base, | ||||
| 			  const std::vector<int> &dimensions, | ||||
| 			  const std::vector<int> &simd_layout, | ||||
| 			  const std::vector<int> &processor_grid, | ||||
| 			  const std::vector<int> &checker_dim_mask, | ||||
| 			  int checker_dim | ||||
| 			  ) :  GridBase(processor_grid)  | ||||
| 			  ) :  GridBase(processor_grid,*base)  | ||||
|     { | ||||
|       Init(dimensions,simd_layout,processor_grid,checker_dim_mask,checker_dim); | ||||
|     } | ||||
|     GridRedBlackCartesian(const std::vector<int> &dimensions, | ||||
|  | ||||
|     //////////////////////////////////////////////////////////// | ||||
|     // Create redblack grid | ||||
|     //////////////////////////////////////////////////////////// | ||||
|     GridRedBlackCartesian(const GridBase *base, | ||||
| 			  const std::vector<int> &dimensions, | ||||
| 			  const std::vector<int> &simd_layout, | ||||
| 			  const std::vector<int> &processor_grid) : GridBase(processor_grid)  | ||||
| 			  const std::vector<int> &processor_grid) : GridBase(processor_grid,*base)  | ||||
|     { | ||||
|       std::vector<int> checker_dim_mask(dimensions.size(),1); | ||||
|       Init(dimensions,simd_layout,processor_grid,checker_dim_mask,0); | ||||
|       int checker_dim = 0; | ||||
|       Init(dimensions,simd_layout,processor_grid,checker_dim_mask,checker_dim); | ||||
|     } | ||||
| #endif | ||||
|  | ||||
|     void Init(const std::vector<int> &dimensions, | ||||
|               const std::vector<int> &simd_layout, | ||||
|               const std::vector<int> &processor_grid, | ||||
| @@ -140,46 +175,58 @@ public: | ||||
|       // Grid information | ||||
|       /////////////////////// | ||||
|       _checker_dim = checker_dim; | ||||
|       assert(checker_dim_mask[checker_dim]==1); | ||||
|       assert(checker_dim_mask[checker_dim] == 1); | ||||
|       _ndimension = dimensions.size(); | ||||
|       assert(checker_dim_mask.size()==_ndimension); | ||||
|       assert(processor_grid.size()==_ndimension); | ||||
|       assert(simd_layout.size()==_ndimension); | ||||
|       assert(checker_dim_mask.size() == _ndimension); | ||||
|       assert(processor_grid.size() == _ndimension); | ||||
|       assert(simd_layout.size() == _ndimension); | ||||
|  | ||||
|       _fdimensions.resize(_ndimension); | ||||
|       _gdimensions.resize(_ndimension); | ||||
|       _ldimensions.resize(_ndimension); | ||||
|       _rdimensions.resize(_ndimension); | ||||
|       _simd_layout.resize(_ndimension); | ||||
|       _lstart.resize(_ndimension); | ||||
|       _lend.resize(_ndimension); | ||||
|  | ||||
|       _ostride.resize(_ndimension); | ||||
|       _istride.resize(_ndimension); | ||||
|  | ||||
|       _fsites = _gsites = _osites = _isites = 1; | ||||
|  | ||||
|       _checker_dim_mask=checker_dim_mask; | ||||
|       _checker_dim_mask = checker_dim_mask; | ||||
|  | ||||
|       for(int d=0;d<_ndimension;d++){ | ||||
|       for (int d = 0; d < _ndimension; d++) | ||||
|       { | ||||
|         _fdimensions[d] = dimensions[d]; | ||||
|         _gdimensions[d] = _fdimensions[d]; | ||||
|         _fsites = _fsites * _fdimensions[d]; | ||||
|         _gsites = _gsites * _gdimensions[d]; | ||||
|  | ||||
| 	if (d==_checker_dim) { | ||||
| 	  _gdimensions[d] = _gdimensions[d]/2; // Remove a checkerboard | ||||
|         if (d == _checker_dim) | ||||
|         { | ||||
|           assert((_gdimensions[d] & 0x1) == 0); | ||||
|           _gdimensions[d] = _gdimensions[d] / 2; // Remove a checkerboard | ||||
| 	  _gsites /= 2; | ||||
|         } | ||||
| 	_ldimensions[d] = _gdimensions[d]/_processors[d]; | ||||
|         _ldimensions[d] = _gdimensions[d] / _processors[d]; | ||||
|         assert(_ldimensions[d] * _processors[d] == _gdimensions[d]); | ||||
|         _lstart[d] = _processor_coor[d] * _ldimensions[d]; | ||||
|         _lend[d] = _processor_coor[d] * _ldimensions[d] + _ldimensions[d] - 1; | ||||
|  | ||||
|         // Use a reduced simd grid | ||||
|         _simd_layout[d] = simd_layout[d]; | ||||
| 	_rdimensions[d]= _ldimensions[d]/_simd_layout[d]; | ||||
| 	assert(_rdimensions[d]>0); | ||||
|         _rdimensions[d] = _ldimensions[d] / _simd_layout[d]; // this is not checking if this is integer | ||||
|         assert(_rdimensions[d] * _simd_layout[d] == _ldimensions[d]); | ||||
|         assert(_rdimensions[d] > 0); | ||||
|  | ||||
|         // all elements of a simd vector must have same checkerboard. | ||||
|         // If Ls vectorised, this must still be the case; e.g. dwf rb5d | ||||
| 	if ( _simd_layout[d]>1 ) { | ||||
| 	  if ( checker_dim_mask[d] ) {  | ||||
| 	    assert( (_rdimensions[d]&0x1) == 0 ); | ||||
|         if (_simd_layout[d] > 1) | ||||
|         { | ||||
|           if (checker_dim_mask[d]) | ||||
|           { | ||||
|             assert((_rdimensions[d] & 0x1) == 0); | ||||
|           } | ||||
|         } | ||||
|  | ||||
| @@ -187,15 +234,16 @@ public: | ||||
|         _isites *= _simd_layout[d]; | ||||
|  | ||||
|         // Addressing support | ||||
| 	if ( d==0 ) { | ||||
|         if (d == 0) | ||||
|         { | ||||
|           _ostride[d] = 1; | ||||
|           _istride[d] = 1; | ||||
| 	} else { | ||||
| 	  _ostride[d] = _ostride[d-1]*_rdimensions[d-1]; | ||||
| 	  _istride[d] = _istride[d-1]*_simd_layout[d-1]; | ||||
|         } | ||||
|  | ||||
|  | ||||
|         else | ||||
|         { | ||||
|           _ostride[d] = _ostride[d - 1] * _rdimensions[d - 1]; | ||||
|           _istride[d] = _istride[d - 1] * _simd_layout[d - 1]; | ||||
|         } | ||||
|       } | ||||
|  | ||||
|       //////////////////////////////////////////////////////////////////////////////////////////// | ||||
| @@ -205,40 +253,48 @@ public: | ||||
|       _slice_stride.resize(_ndimension); | ||||
|       _slice_nblock.resize(_ndimension); | ||||
|  | ||||
|       int block =1; | ||||
|       int nblock=1; | ||||
|       for(int d=0;d<_ndimension;d++) nblock*=_rdimensions[d]; | ||||
|       int block = 1; | ||||
|       int nblock = 1; | ||||
|       for (int d = 0; d < _ndimension; d++) | ||||
|         nblock *= _rdimensions[d]; | ||||
|  | ||||
|       for(int d=0;d<_ndimension;d++){ | ||||
| 	nblock/=_rdimensions[d]; | ||||
| 	_slice_block[d] =block; | ||||
| 	_slice_stride[d]=_ostride[d]*_rdimensions[d]; | ||||
| 	_slice_nblock[d]=nblock; | ||||
| 	block = block*_rdimensions[d]; | ||||
|       for (int d = 0; d < _ndimension; d++) | ||||
|       { | ||||
|         nblock /= _rdimensions[d]; | ||||
|         _slice_block[d] = block; | ||||
|         _slice_stride[d] = _ostride[d] * _rdimensions[d]; | ||||
|         _slice_nblock[d] = nblock; | ||||
|         block = block * _rdimensions[d]; | ||||
|       } | ||||
|  | ||||
|       //////////////////////////////////////////////// | ||||
|       // Create a checkerboard lookup table | ||||
|       //////////////////////////////////////////////// | ||||
|       int rvol = 1; | ||||
|       for(int d=0;d<_ndimension;d++){ | ||||
| 	rvol=rvol * _rdimensions[d]; | ||||
|       for (int d = 0; d < _ndimension; d++) | ||||
|       { | ||||
|         rvol = rvol * _rdimensions[d]; | ||||
|       } | ||||
|       _checker_board.resize(rvol); | ||||
|       for(int osite=0;osite<_osites;osite++){ | ||||
| 	_checker_board[osite] = CheckerBoardFromOindex (osite); | ||||
|       for (int osite = 0; osite < _osites; osite++) | ||||
|       { | ||||
|         _checker_board[osite] = CheckerBoardFromOindex(osite); | ||||
|       } | ||||
|        | ||||
|     }; | ||||
| protected: | ||||
|  | ||||
|   protected: | ||||
|     virtual int oIndex(std::vector<int> &coor) | ||||
|     { | ||||
|       int idx=0; | ||||
|       for(int d=0;d<_ndimension;d++) { | ||||
| 	if( d==_checker_dim ) { | ||||
| 	  idx+=_ostride[d]*((coor[d]/2)%_rdimensions[d]); | ||||
| 	} else { | ||||
| 	  idx+=_ostride[d]*(coor[d]%_rdimensions[d]); | ||||
|       int idx = 0; | ||||
|       for (int d = 0; d < _ndimension; d++) | ||||
|       { | ||||
|         if (d == _checker_dim) | ||||
|         { | ||||
|           idx += _ostride[d] * ((coor[d] / 2) % _rdimensions[d]); | ||||
|         } | ||||
|         else | ||||
|         { | ||||
|           idx += _ostride[d] * (coor[d] % _rdimensions[d]); | ||||
|         } | ||||
|       } | ||||
|       return idx; | ||||
| @@ -246,17 +302,20 @@ protected: | ||||
|  | ||||
|     virtual int iIndex(std::vector<int> &lcoor) | ||||
|     { | ||||
|         int idx=0; | ||||
|         for(int d=0;d<_ndimension;d++) { | ||||
| 	  if( d==_checker_dim ) { | ||||
| 	    idx+=_istride[d]*(lcoor[d]/(2*_rdimensions[d])); | ||||
| 	  } else {  | ||||
| 	    idx+=_istride[d]*(lcoor[d]/_rdimensions[d]); | ||||
|       int idx = 0; | ||||
|       for (int d = 0; d < _ndimension; d++) | ||||
|       { | ||||
|         if (d == _checker_dim) | ||||
|         { | ||||
|           idx += _istride[d] * (lcoor[d] / (2 * _rdimensions[d])); | ||||
|         } | ||||
|         else | ||||
|         { | ||||
|           idx += _istride[d] * (lcoor[d] / _rdimensions[d]); | ||||
|         } | ||||
|       } | ||||
|       return idx; | ||||
|     } | ||||
| }; | ||||
|  | ||||
| } | ||||
| #endif | ||||
|   | ||||
| @@ -26,6 +26,10 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||
|     *************************************************************************************/ | ||||
|     /*  END LEGAL */ | ||||
| #include <Grid/GridCore.h> | ||||
| #include <fcntl.h> | ||||
| #include <unistd.h> | ||||
| #include <limits.h> | ||||
| #include <sys/mman.h> | ||||
|  | ||||
| namespace Grid { | ||||
|  | ||||
| @@ -33,8 +37,11 @@ namespace Grid { | ||||
| // Info that is setup once and indept of cartesian layout | ||||
| /////////////////////////////////////////////////////////////// | ||||
| void *              CartesianCommunicator::ShmCommBuf; | ||||
| uint64_t            CartesianCommunicator::MAX_MPI_SHM_BYTES   = 128*1024*1024;  | ||||
| CartesianCommunicator::CommunicatorPolicy_t  CartesianCommunicator::CommunicatorPolicy= CartesianCommunicator::CommunicatorPolicyConcurrent; | ||||
| uint64_t            CartesianCommunicator::MAX_MPI_SHM_BYTES   = 1024LL*1024LL*1024LL;  | ||||
| CartesianCommunicator::CommunicatorPolicy_t   | ||||
| CartesianCommunicator::CommunicatorPolicy= CartesianCommunicator::CommunicatorPolicyConcurrent; | ||||
| int CartesianCommunicator::nCommThreads = -1; | ||||
| int CartesianCommunicator::Hugepages = 0; | ||||
|  | ||||
| ///////////////////////////////// | ||||
| // Alloc, free shmem region | ||||
| @@ -60,6 +67,7 @@ void CartesianCommunicator::ShmBufferFreeAll(void) { | ||||
| ///////////////////////////////// | ||||
| // Grid information queries | ||||
| ///////////////////////////////// | ||||
| int                      CartesianCommunicator::Dimensions(void)        { return _ndimension; }; | ||||
| int                      CartesianCommunicator::IsBoss(void)            { return _processor==0; }; | ||||
| int                      CartesianCommunicator::BossRank(void)          { return 0; }; | ||||
| int                      CartesianCommunicator::ThisRank(void)          { return _processor; }; | ||||
| @@ -88,24 +96,175 @@ void CartesianCommunicator::GlobalSumVector(ComplexD *c,int N) | ||||
|   GlobalSumVector((double *)c,2*N); | ||||
| } | ||||
|  | ||||
| #if !defined( GRID_COMMS_MPI3) && !defined (GRID_COMMS_MPI3L) | ||||
|  | ||||
| #if defined( GRID_COMMS_MPI) || defined (GRID_COMMS_MPIT) || defined (GRID_COMMS_MPI3) | ||||
|  | ||||
| CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,const CartesianCommunicator &parent,int &srank)  | ||||
| { | ||||
|   _ndimension = processors.size(); | ||||
|   assert(_ndimension = parent._ndimension); | ||||
|    | ||||
|   ////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   // split the communicator | ||||
|   ////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   int Nparent; | ||||
|   MPI_Comm_size(parent.communicator,&Nparent); | ||||
|  | ||||
|   int childsize=1; | ||||
|   for(int d=0;d<processors.size();d++) { | ||||
|     childsize *= processors[d]; | ||||
|   } | ||||
|   int Nchild = Nparent/childsize; | ||||
|   assert (childsize * Nchild == Nparent); | ||||
|  | ||||
|   std::vector<int> ccoor(_ndimension); // coor within subcommunicator | ||||
|   std::vector<int> scoor(_ndimension); // coor of split within parent | ||||
|   std::vector<int> ssize(_ndimension); // coor of split within parent | ||||
|  | ||||
|   for(int d=0;d<_ndimension;d++){ | ||||
|     ccoor[d] = parent._processor_coor[d] % processors[d]; | ||||
|     scoor[d] = parent._processor_coor[d] / processors[d]; | ||||
|     ssize[d] = parent._processors[d]     / processors[d]; | ||||
|   } | ||||
|   int crank;  // rank within subcomm ; srank is rank of subcomm within blocks of subcomms | ||||
|   // Mpi uses the reverse Lexico convention to us | ||||
|   Lexicographic::IndexFromCoorReversed(ccoor,crank,processors); | ||||
|   Lexicographic::IndexFromCoorReversed(scoor,srank,ssize); | ||||
|  | ||||
|   MPI_Comm comm_split; | ||||
|   if ( Nchild > 1 ) {  | ||||
|  | ||||
|     /* | ||||
|     std::cout << GridLogMessage<<"Child communicator of "<< std::hex << parent.communicator << std::dec<<std::endl; | ||||
|     std::cout << GridLogMessage<<" parent grid["<< parent._ndimension<<"]    "; | ||||
|     for(int d=0;d<parent._processors.size();d++)  std::cout << parent._processors[d] << " "; | ||||
|     std::cout<<std::endl; | ||||
|  | ||||
|     std::cout << GridLogMessage<<" child grid["<< _ndimension <<"]    "; | ||||
|     for(int d=0;d<processors.size();d++)  std::cout << processors[d] << " "; | ||||
|     std::cout<<std::endl; | ||||
|  | ||||
|     std::cout << GridLogMessage<<" old rank "<< parent._processor<<" coor ["<< _ndimension <<"]    "; | ||||
|     for(int d=0;d<processors.size();d++)  std::cout << parent._processor_coor[d] << " "; | ||||
|     std::cout<<std::endl; | ||||
|  | ||||
|     std::cout << GridLogMessage<<" new rank "<< crank<<" coor ["<< _ndimension <<"]    "; | ||||
|     for(int d=0;d<processors.size();d++)  std::cout << ccoor[d] << " "; | ||||
|     std::cout<<std::endl; | ||||
|  | ||||
|     std::cout << GridLogMessage<<" new coor ["<< _ndimension <<"]    "; | ||||
|     for(int d=0;d<processors.size();d++)  std::cout << parent._processor_coor[d] << " "; | ||||
|     std::cout<<std::endl; | ||||
|     */ | ||||
|  | ||||
|     int ierr= MPI_Comm_split(parent.communicator,srank,crank,&comm_split); | ||||
|     assert(ierr==0); | ||||
|     ////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|     // Declare victory | ||||
|     ////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|     /* | ||||
|     std::cout << GridLogMessage<<"Divided communicator "<< parent._Nprocessors<<" into " | ||||
| 	      << Nchild <<" communicators with " << childsize << " ranks"<<std::endl; | ||||
|     */ | ||||
|   } else { | ||||
|     comm_split=parent.communicator; | ||||
|     srank = 0; | ||||
|   } | ||||
|  | ||||
|   ////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   // Set up from the new split communicator | ||||
|   ////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   InitFromMPICommunicator(processors,comm_split); | ||||
| } | ||||
| ////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
| // Take an MPI_Comm and self assemble | ||||
| ////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
| void CartesianCommunicator::InitFromMPICommunicator(const std::vector<int> &processors, MPI_Comm communicator_base) | ||||
| { | ||||
|   _ndimension = processors.size(); | ||||
|   _processor_coor.resize(_ndimension); | ||||
|  | ||||
|   ///////////////////////////////// | ||||
|   // Count the requested nodes | ||||
|   ///////////////////////////////// | ||||
|   _Nprocessors=1; | ||||
|   _processors = processors; | ||||
|   for(int i=0;i<_ndimension;i++){ | ||||
|     _Nprocessors*=_processors[i]; | ||||
|   } | ||||
|  | ||||
|   std::vector<int> periodic(_ndimension,1); | ||||
|   MPI_Cart_create(communicator_base, _ndimension,&_processors[0],&periodic[0],0,&communicator); | ||||
|   MPI_Comm_rank(communicator,&_processor); | ||||
|   MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]); | ||||
|  | ||||
|   if ( communicator_base != communicator_world ) { | ||||
|     std::cout << "Cartesian communicator created with a non-world communicator"<<std::endl; | ||||
|      | ||||
|     std::cout << " new communicator rank "<<_processor<< " coor ["<<_ndimension<<"] "; | ||||
|     for(int d=0;d<_processors.size();d++){ | ||||
|       std::cout << _processor_coor[d]<<" "; | ||||
|     } | ||||
|     std::cout << std::endl; | ||||
|   } | ||||
|  | ||||
|   int Size; | ||||
|   MPI_Comm_size(communicator,&Size); | ||||
|  | ||||
| #ifdef GRID_COMMS_MPIT | ||||
|   communicator_halo.resize (2*_ndimension); | ||||
|   for(int i=0;i<_ndimension*2;i++){ | ||||
|     MPI_Comm_dup(communicator,&communicator_halo[i]); | ||||
|   } | ||||
| #endif | ||||
|    | ||||
|   assert(Size==_Nprocessors); | ||||
| } | ||||
|  | ||||
| CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)  | ||||
| { | ||||
|   InitFromMPICommunicator(processors,communicator_world); | ||||
| } | ||||
|  | ||||
| #endif | ||||
|  | ||||
| #if !defined( GRID_COMMS_MPI3)  | ||||
|  | ||||
| int                      CartesianCommunicator::NodeCount(void)    { return ProcessorCount();}; | ||||
|  | ||||
| int                      CartesianCommunicator::RankCount(void)    { return ProcessorCount();}; | ||||
| #endif | ||||
| #if !defined( GRID_COMMS_MPI3) && !defined (GRID_COMMS_MPIT) | ||||
| double CartesianCommunicator::StencilSendToRecvFrom( void *xmit, | ||||
| 						     int xmit_to_rank, | ||||
| 						     void *recv, | ||||
| 						     int recv_from_rank, | ||||
| 						     int bytes, int dir) | ||||
| { | ||||
|   std::vector<CommsRequest_t> list; | ||||
|   // Discard the "dir" | ||||
|   SendToRecvFromBegin   (list,xmit,xmit_to_rank,recv,recv_from_rank,bytes); | ||||
|   SendToRecvFromComplete(list); | ||||
|   return 2.0*bytes; | ||||
| } | ||||
| double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list, | ||||
| 							 void *xmit, | ||||
| 							 int xmit_to_rank, | ||||
| 							 void *recv, | ||||
| 							 int recv_from_rank, | ||||
| 						       int bytes) | ||||
| 							 int bytes, int dir) | ||||
| { | ||||
|   // Discard the "dir" | ||||
|   SendToRecvFromBegin(list,xmit,xmit_to_rank,recv,recv_from_rank,bytes); | ||||
|   return 2.0*bytes; | ||||
| } | ||||
| void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &waitall) | ||||
| void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &waitall,int dir) | ||||
| { | ||||
|   SendToRecvFromComplete(waitall); | ||||
| } | ||||
| #endif | ||||
|  | ||||
| #if !defined( GRID_COMMS_MPI3)  | ||||
|  | ||||
| void CartesianCommunicator::StencilBarrier(void){}; | ||||
|  | ||||
| commVector<uint8_t> CartesianCommunicator::ShmBufStorageVector; | ||||
| @@ -119,8 +278,30 @@ void *CartesianCommunicator::ShmBufferTranslate(int rank,void * local_p) { | ||||
|   return NULL; | ||||
| } | ||||
| void CartesianCommunicator::ShmInitGeneric(void){ | ||||
| #if 1 | ||||
|   int mmap_flag =0; | ||||
| #ifdef MAP_ANONYMOUS | ||||
|   mmap_flag = mmap_flag| MAP_SHARED | MAP_ANONYMOUS; | ||||
| #endif | ||||
| #ifdef MAP_ANON | ||||
|   mmap_flag = mmap_flag| MAP_SHARED | MAP_ANON; | ||||
| #endif | ||||
| #ifdef MAP_HUGETLB | ||||
|   if ( Hugepages ) mmap_flag |= MAP_HUGETLB; | ||||
| #endif | ||||
|   ShmCommBuf =(void *) mmap(NULL, MAX_MPI_SHM_BYTES, PROT_READ | PROT_WRITE, mmap_flag, -1, 0);  | ||||
|   if (ShmCommBuf == (void *)MAP_FAILED) { | ||||
|     perror("mmap failed "); | ||||
|     exit(EXIT_FAILURE);   | ||||
|   } | ||||
| #ifdef MADV_HUGEPAGE | ||||
|   if (!Hugepages ) madvise(ShmCommBuf,MAX_MPI_SHM_BYTES,MADV_HUGEPAGE); | ||||
| #endif | ||||
| #else  | ||||
|   ShmBufStorageVector.resize(MAX_MPI_SHM_BYTES); | ||||
|   ShmCommBuf=(void *)&ShmBufStorageVector[0]; | ||||
| #endif | ||||
|   bzero(ShmCommBuf,MAX_MPI_SHM_BYTES); | ||||
| } | ||||
|  | ||||
| #endif | ||||
|   | ||||
| @@ -38,7 +38,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||
| #ifdef GRID_COMMS_MPI3 | ||||
| #include <mpi.h> | ||||
| #endif | ||||
| #ifdef GRID_COMMS_MPI3L | ||||
| #ifdef GRID_COMMS_MPIT | ||||
| #include <mpi.h> | ||||
| #endif | ||||
| #ifdef GRID_COMMS_SHMEM | ||||
| @@ -50,12 +50,24 @@ namespace Grid { | ||||
| class CartesianCommunicator { | ||||
|   public:     | ||||
|  | ||||
|   // 65536 ranks per node adequate for now | ||||
|  | ||||
|   //////////////////////////////////////////// | ||||
|   // Isend/Irecv/Wait, or Sendrecv blocking | ||||
|   //////////////////////////////////////////// | ||||
|   enum CommunicatorPolicy_t { CommunicatorPolicyConcurrent, CommunicatorPolicySequential }; | ||||
|   static CommunicatorPolicy_t CommunicatorPolicy; | ||||
|   static void SetCommunicatorPolicy(CommunicatorPolicy_t policy ) { CommunicatorPolicy = policy; } | ||||
|  | ||||
|   /////////////////////////////////////////// | ||||
|   // Up to 65536 ranks per node adequate for now | ||||
|   // 128MB shared memory for comms enought for 48^4 local vol comms | ||||
|   // Give external control (command line override?) of this | ||||
|  | ||||
|   /////////////////////////////////////////// | ||||
|   static const int MAXLOG2RANKSPERNODE = 16;             | ||||
|   static uint64_t  MAX_MPI_SHM_BYTES; | ||||
|   static int       nCommThreads; | ||||
|   // use explicit huge pages | ||||
|   static int       Hugepages; | ||||
|  | ||||
|   // Communicator should know nothing of the physics grid, only processor grid. | ||||
|   int              _Nprocessors;     // How many in all | ||||
| @@ -64,14 +76,19 @@ class CartesianCommunicator { | ||||
|   std::vector<int> _processor_coor;  // linear processor coordinate | ||||
|   unsigned long _ndimension; | ||||
|  | ||||
| #if defined (GRID_COMMS_MPI) || defined (GRID_COMMS_MPI3) || defined (GRID_COMMS_MPI3L) | ||||
| #if defined (GRID_COMMS_MPI) || defined (GRID_COMMS_MPI3) || defined (GRID_COMMS_MPIT) | ||||
|   static MPI_Comm communicator_world; | ||||
|  | ||||
|   MPI_Comm              communicator; | ||||
|   std::vector<MPI_Comm> communicator_halo; | ||||
|  | ||||
|   typedef MPI_Request CommsRequest_t; | ||||
|  | ||||
| #else  | ||||
|   typedef int CommsRequest_t; | ||||
| #endif | ||||
|  | ||||
|  | ||||
|   //////////////////////////////////////////////////////////////////// | ||||
|   // Helper functionality for SHM Windows common to all other impls | ||||
|   //////////////////////////////////////////////////////////////////// | ||||
| @@ -117,10 +134,6 @@ class CartesianCommunicator { | ||||
|   ///////////////////////////////// | ||||
|   static void * ShmCommBuf; | ||||
|  | ||||
|   // Isend/Irecv/Wait, or Sendrecv blocking | ||||
|   enum CommunicatorPolicy_t { CommunicatorPolicyConcurrent, CommunicatorPolicySequential }; | ||||
|   static CommunicatorPolicy_t CommunicatorPolicy; | ||||
|   static void SetCommunicatorPolicy(CommunicatorPolicy_t policy ) { CommunicatorPolicy = policy; } | ||||
|    | ||||
|   size_t heap_top; | ||||
|   size_t heap_bytes; | ||||
| @@ -137,9 +150,22 @@ class CartesianCommunicator { | ||||
|   static void Init(int *argc, char ***argv); | ||||
|  | ||||
|   //////////////////////////////////////////////// | ||||
|   // Constructor of any given grid | ||||
|   // Constructors to sub-divide a parent communicator | ||||
|   // and default to comm world | ||||
|   //////////////////////////////////////////////// | ||||
|   CartesianCommunicator(const std::vector<int> &processors,const CartesianCommunicator &parent,int &srank); | ||||
|   CartesianCommunicator(const std::vector<int> &pdimensions_in); | ||||
|   virtual ~CartesianCommunicator(); | ||||
|  | ||||
|  private: | ||||
| #if defined (GRID_COMMS_MPI) || defined (GRID_COMMS_MPIT)  | ||||
|   //////////////////////////////////////////////// | ||||
|   // Private initialise from an MPI communicator | ||||
|   // Can use after an MPI_Comm_split, but hidden from user so private | ||||
|   //////////////////////////////////////////////// | ||||
|   void InitFromMPICommunicator(const std::vector<int> &processors, MPI_Comm communicator_base); | ||||
| #endif | ||||
|  public: | ||||
|    | ||||
|   //////////////////////////////////////////////////////////////////////////////////////// | ||||
|   // Wraps MPI_Cart routines, or implements equivalent on other impls | ||||
| @@ -148,6 +174,7 @@ class CartesianCommunicator { | ||||
|   int  RankFromProcessorCoor(std::vector<int> &coor); | ||||
|   void ProcessorCoorFromRank(int rank,std::vector<int> &coor); | ||||
|    | ||||
|   int                      Dimensions(void)        ; | ||||
|   int                      IsBoss(void)            ; | ||||
|   int                      BossRank(void)          ; | ||||
|   int                      ThisRank(void)          ; | ||||
| @@ -155,6 +182,7 @@ class CartesianCommunicator { | ||||
|   const std::vector<int> & ProcessorGrid(void)     ; | ||||
|   int                      ProcessorCount(void)    ; | ||||
|   int                      NodeCount(void)    ; | ||||
|   int                      RankCount(void)    ; | ||||
|  | ||||
|   //////////////////////////////////////////////////////////////////////////////// | ||||
|   // very VERY rarely (Log, serial RNG) we need world without a grid | ||||
| @@ -175,6 +203,8 @@ class CartesianCommunicator { | ||||
|   void GlobalSumVector(ComplexF *c,int N); | ||||
|   void GlobalSum(ComplexD &c); | ||||
|   void GlobalSumVector(ComplexD *c,int N); | ||||
|   void GlobalXOR(uint32_t &); | ||||
|   void GlobalXOR(uint64_t &); | ||||
|    | ||||
|   template<class obj> void GlobalSum(obj &o){ | ||||
|     typedef typename obj::scalar_type scalar_type; | ||||
| @@ -207,14 +237,21 @@ class CartesianCommunicator { | ||||
|    | ||||
|   void SendToRecvFromComplete(std::vector<CommsRequest_t> &waitall); | ||||
|  | ||||
|   double StencilSendToRecvFrom(void *xmit, | ||||
| 			       int xmit_to_rank, | ||||
| 			       void *recv, | ||||
| 			       int recv_from_rank, | ||||
| 			       int bytes,int dir); | ||||
|  | ||||
|   double StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list, | ||||
| 				    void *xmit, | ||||
| 				    int xmit_to_rank, | ||||
| 				    void *recv, | ||||
| 				    int recv_from_rank, | ||||
| 				  int bytes); | ||||
| 				    int bytes,int dir); | ||||
|    | ||||
|   void StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &waitall); | ||||
|    | ||||
|   void StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &waitall,int i); | ||||
|   void StencilBarrier(void); | ||||
|  | ||||
|   //////////////////////////////////////////////////////////// | ||||
| @@ -227,6 +264,27 @@ class CartesianCommunicator { | ||||
|   //////////////////////////////////////////////////////////// | ||||
|   void Broadcast(int root,void* data, int bytes); | ||||
|  | ||||
|   //////////////////////////////////////////////////////////// | ||||
|   // All2All down one dimension | ||||
|   //////////////////////////////////////////////////////////// | ||||
|   template<class T> void AllToAll(int dim,std::vector<T> &in, std::vector<T> &out){ | ||||
|     assert(dim>=0); | ||||
|     assert(dim<_ndimension); | ||||
|     int numnode = _processors[dim]; | ||||
|     //    std::cerr << " AllToAll in.size()  "<<in.size()<<std::endl; | ||||
|     //    std::cerr << " AllToAll out.size() "<<out.size()<<std::endl; | ||||
|     assert(in.size()==out.size()); | ||||
|     uint64_t bytes=sizeof(T); | ||||
|     uint64_t words=in.size()/numnode; | ||||
|  | ||||
|     assert(numnode * words == in.size()); | ||||
|     assert(words < (1ULL<<32)); | ||||
|  | ||||
|     AllToAll(dim,(void *)&in[0],(void *)&out[0],words,bytes); | ||||
|   } | ||||
|   void AllToAll(int dim  ,void *in,void *out,uint64_t words,uint64_t bytes); | ||||
|   void AllToAll(void  *in,void *out,uint64_t words         ,uint64_t bytes); | ||||
|    | ||||
|   template<class obj> void Broadcast(int root,obj &data) | ||||
|     { | ||||
|       Broadcast(root,(void *)&data,sizeof(data)); | ||||
|   | ||||
| @@ -53,28 +53,14 @@ void CartesianCommunicator::Init(int *argc, char ***argv) { | ||||
|   ShmInitGeneric(); | ||||
| } | ||||
|  | ||||
| CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors) | ||||
| CartesianCommunicator::~CartesianCommunicator() | ||||
| { | ||||
|   _ndimension = processors.size(); | ||||
|   std::vector<int> periodic(_ndimension,1); | ||||
|  | ||||
|   _Nprocessors=1; | ||||
|   _processors = processors; | ||||
|   _processor_coor.resize(_ndimension); | ||||
|    | ||||
|   MPI_Cart_create(communicator_world, _ndimension,&_processors[0],&periodic[0],1,&communicator); | ||||
|   MPI_Comm_rank(communicator,&_processor); | ||||
|   MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]); | ||||
|  | ||||
|   for(int i=0;i<_ndimension;i++){ | ||||
|     _Nprocessors*=_processors[i]; | ||||
|   } | ||||
|    | ||||
|   int Size;  | ||||
|   MPI_Comm_size(communicator,&Size); | ||||
|    | ||||
|   assert(Size==_Nprocessors); | ||||
|   int MPI_is_finalised; | ||||
|   MPI_Finalized(&MPI_is_finalised); | ||||
|   if (communicator && MPI_is_finalised) | ||||
|     MPI_Comm_free(&communicator); | ||||
| } | ||||
|  | ||||
| void CartesianCommunicator::GlobalSum(uint32_t &u){ | ||||
|   int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator); | ||||
|   assert(ierr==0); | ||||
| @@ -83,6 +69,14 @@ void CartesianCommunicator::GlobalSum(uint64_t &u){ | ||||
|   int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT64_T,MPI_SUM,communicator); | ||||
|   assert(ierr==0); | ||||
| } | ||||
| void CartesianCommunicator::GlobalXOR(uint32_t &u){ | ||||
|   int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_BXOR,communicator); | ||||
|   assert(ierr==0); | ||||
| } | ||||
| void CartesianCommunicator::GlobalXOR(uint64_t &u){ | ||||
|   int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT64_T,MPI_BXOR,communicator); | ||||
|   assert(ierr==0); | ||||
| } | ||||
| void CartesianCommunicator::GlobalSum(float &f){ | ||||
|   int ierr=MPI_Allreduce(MPI_IN_PLACE,&f,1,MPI_FLOAT,MPI_SUM,communicator); | ||||
|   assert(ierr==0); | ||||
| @@ -202,6 +196,36 @@ void CartesianCommunicator::Broadcast(int root,void* data, int bytes) | ||||
| 		     root, | ||||
| 		     communicator); | ||||
|   assert(ierr==0); | ||||
| } | ||||
| void CartesianCommunicator::AllToAll(int dim,void  *in,void *out,uint64_t words,uint64_t bytes) | ||||
| { | ||||
|   std::vector<int> row(_ndimension,1); | ||||
|   assert(dim>=0 && dim<_ndimension); | ||||
|  | ||||
|   //  Split the communicator | ||||
|   row[dim] = _processors[dim]; | ||||
|  | ||||
|   int me; | ||||
|   CartesianCommunicator Comm(row,*this,me); | ||||
|   Comm.AllToAll(in,out,words,bytes); | ||||
| } | ||||
| void CartesianCommunicator::AllToAll(void  *in,void *out,uint64_t words,uint64_t bytes) | ||||
| { | ||||
|   // MPI is a pain and uses "int" arguments | ||||
|   // 64*64*64*128*16 == 500Million elements of data. | ||||
|   // When 24*4 bytes multiples get 50x 10^9 >>> 2x10^9 Y2K bug. | ||||
|   // (Turns up on 32^3 x 64 Gparity too) | ||||
|   MPI_Datatype object; | ||||
|   int iwords;  | ||||
|   int ibytes; | ||||
|   iwords = words; | ||||
|   ibytes = bytes; | ||||
|   assert(words == iwords); // safe to cast to int ? | ||||
|   assert(bytes == ibytes); // safe to cast to int ? | ||||
|   MPI_Type_contiguous(ibytes,MPI_BYTE,&object); | ||||
|   MPI_Type_commit(&object); | ||||
|   MPI_Alltoall(in,iwords,object,out,iwords,object,communicator); | ||||
|   MPI_Type_free(&object); | ||||
| } | ||||
|   /////////////////////////////////////////////////////// | ||||
|   // Should only be used prior to Grid Init finished. | ||||
| @@ -222,5 +246,7 @@ void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes) | ||||
|   assert(ierr==0); | ||||
| } | ||||
|  | ||||
|  | ||||
|  | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -37,11 +37,12 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||
| #include <sys/ipc.h> | ||||
| #include <sys/shm.h> | ||||
| #include <sys/mman.h> | ||||
| //#include <zlib.h> | ||||
| #ifndef SHM_HUGETLB | ||||
| #define SHM_HUGETLB 04000 | ||||
| #include <zlib.h> | ||||
| #ifdef HAVE_NUMAIF_H | ||||
| #include <numaif.h> | ||||
| #endif | ||||
|  | ||||
|  | ||||
| namespace Grid { | ||||
|  | ||||
| /////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
| @@ -65,6 +66,7 @@ std::vector<int> CartesianCommunicator::MyGroup; | ||||
| std::vector<void *> CartesianCommunicator::ShmCommBufs; | ||||
|  | ||||
| int CartesianCommunicator::NodeCount(void)    { return GroupSize;}; | ||||
| int CartesianCommunicator::RankCount(void)    { return WorldSize;}; | ||||
|  | ||||
|  | ||||
| #undef FORCE_COMMS | ||||
| @@ -196,7 +198,46 @@ void CartesianCommunicator::Init(int *argc, char ***argv) { | ||||
|   ShmCommBuf = 0; | ||||
|   ShmCommBufs.resize(ShmSize); | ||||
|  | ||||
| #if 1 | ||||
|   //////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   // Hugetlbf and others map filesystems as mappable huge pages | ||||
|   //////////////////////////////////////////////////////////////////////////////////////////// | ||||
| #ifdef GRID_MPI3_SHMMMAP | ||||
|   char shm_name [NAME_MAX]; | ||||
|   for(int r=0;r<ShmSize;r++){ | ||||
|      | ||||
|     size_t size = CartesianCommunicator::MAX_MPI_SHM_BYTES; | ||||
|     sprintf(shm_name,GRID_SHM_PATH "/Grid_mpi3_shm_%d_%d",GroupRank,r); | ||||
|     //sprintf(shm_name,"/var/lib/hugetlbfs/group/wheel/pagesize-2MB/" "Grid_mpi3_shm_%d_%d",GroupRank,r); | ||||
|     //    printf("Opening file %s \n",shm_name); | ||||
|     int fd=open(shm_name,O_RDWR|O_CREAT,0666); | ||||
|     if ( fd == -1) {  | ||||
|       printf("open %s failed\n",shm_name); | ||||
|       perror("open hugetlbfs"); | ||||
|       exit(0); | ||||
|     } | ||||
|     int mmap_flag = MAP_SHARED ; | ||||
| #ifdef MAP_POPULATE     | ||||
|     mmap_flag|=MAP_POPULATE; | ||||
| #endif | ||||
| #ifdef MAP_HUGETLB | ||||
|     if ( Hugepages ) mmap_flag |= MAP_HUGETLB; | ||||
| #endif | ||||
|     void *ptr = (void *) mmap(NULL, MAX_MPI_SHM_BYTES, PROT_READ | PROT_WRITE, mmap_flag,fd, 0);  | ||||
|     if ( ptr == (void *)MAP_FAILED ) {     | ||||
|       printf("mmap %s failed\n",shm_name); | ||||
|       perror("failed mmap");      assert(0);     | ||||
|     } | ||||
|     assert(((uint64_t)ptr&0x3F)==0); | ||||
|     ShmCommBufs[r] =ptr; | ||||
|      | ||||
|   } | ||||
| #endif | ||||
|   //////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   // POSIX SHMOPEN ; as far as I know Linux does not allow EXPLICIT HugePages with this case | ||||
|   // tmpfs (Larry Meadows says) does not support explicit huge page, and this is used for  | ||||
|   // the posix shm virtual file system | ||||
|   //////////////////////////////////////////////////////////////////////////////////////////// | ||||
| #ifdef GRID_MPI3_SHMOPEN | ||||
|   char shm_name [NAME_MAX]; | ||||
|   if ( ShmRank == 0 ) { | ||||
|     for(int r=0;r<ShmSize;r++){ | ||||
| @@ -210,9 +251,37 @@ void CartesianCommunicator::Init(int *argc, char ***argv) { | ||||
|       if ( fd < 0 ) {	perror("failed shm_open");	assert(0);      } | ||||
|       ftruncate(fd, size); | ||||
|        | ||||
|       void * ptr =  mmap(NULL,size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); | ||||
|       if ( ptr == MAP_FAILED ) {       perror("failed mmap");      assert(0);    } | ||||
|       int mmap_flag = MAP_SHARED; | ||||
| #ifdef MAP_POPULATE  | ||||
|       mmap_flag |= MAP_POPULATE; | ||||
| #endif | ||||
| #ifdef MAP_HUGETLB | ||||
|       if (Hugepages) mmap_flag |= MAP_HUGETLB; | ||||
| #endif | ||||
|       void * ptr =  mmap(NULL,size, PROT_READ | PROT_WRITE, mmap_flag, fd, 0); | ||||
|  | ||||
|       if ( ptr == (void * )MAP_FAILED ) {       perror("failed mmap");      assert(0);    } | ||||
|       assert(((uint64_t)ptr&0x3F)==0); | ||||
|  | ||||
| // Experiments; Experiments; Try to force numa domain on the shm segment if we have numaif.h | ||||
| #if 0 | ||||
| //#ifdef HAVE_NUMAIF_H | ||||
| 	int status; | ||||
| 	int flags=MPOL_MF_MOVE; | ||||
| #ifdef KNL | ||||
| 	int nodes=1; // numa domain == MCDRAM | ||||
| 	// Find out if in SNC2,SNC4 mode ? | ||||
| #else | ||||
| 	int nodes=r; // numa domain == MPI ID | ||||
| #endif | ||||
| 	unsigned long count=1; | ||||
| 	for(uint64_t page=0;page<size;page+=4096){ | ||||
| 	  void *pages = (void *) ( page + (uint64_t)ptr ); | ||||
| 	  uint64_t *cow_it = (uint64_t *)pages;	*cow_it = 1; | ||||
| 	  ierr= move_pages(0,count, &pages,&nodes,&status,flags); | ||||
| 	  if (ierr && (page==0)) perror("numa relocate command failed"); | ||||
| 	} | ||||
| #endif | ||||
| 	ShmCommBufs[r] =ptr; | ||||
|        | ||||
|     } | ||||
| @@ -235,23 +304,34 @@ void CartesianCommunicator::Init(int *argc, char ***argv) { | ||||
|       ShmCommBufs[r] =ptr; | ||||
|     } | ||||
|   } | ||||
|  | ||||
| #else | ||||
| #endif | ||||
|   //////////////////////////////////////////////////////////////////////////////////////////// | ||||
|   // SHMGET SHMAT and SHM_HUGETLB flag | ||||
|   //////////////////////////////////////////////////////////////////////////////////////////// | ||||
| #ifdef GRID_MPI3_SHMGET | ||||
|   std::vector<int> shmids(ShmSize); | ||||
|  | ||||
|   if ( ShmRank == 0 ) { | ||||
|     for(int r=0;r<ShmSize;r++){ | ||||
|       size_t size = CartesianCommunicator::MAX_MPI_SHM_BYTES; | ||||
|       key_t key   = 0x4545 + r; | ||||
|       if ((shmids[r]= shmget(key,size, SHM_HUGETLB | IPC_CREAT | SHM_R | SHM_W)) < 0) { | ||||
|       key_t key   = IPC_PRIVATE; | ||||
|       int flags = IPC_CREAT | SHM_R | SHM_W; | ||||
| #ifdef SHM_HUGETLB | ||||
|       if (Hugepages) flags|=SHM_HUGETLB; | ||||
| #endif | ||||
|       if ((shmids[r]= shmget(key,size, flags)) ==-1) { | ||||
| 	int errsv = errno; | ||||
| 	printf("Errno %d\n",errsv); | ||||
| 	printf("key   %d\n",key); | ||||
| 	printf("size  %lld\n",size); | ||||
| 	printf("flags %d\n",flags); | ||||
| 	perror("shmget"); | ||||
| 	exit(1); | ||||
|       } | ||||
|       } else {  | ||||
| 	printf("shmid: 0x%x\n", shmids[r]); | ||||
|       } | ||||
|     } | ||||
|   } | ||||
|   MPI_Barrier(ShmComm); | ||||
|   MPI_Bcast(&shmids[0],ShmSize*sizeof(int),MPI_BYTE,0,ShmComm); | ||||
|   MPI_Barrier(ShmComm); | ||||
| @@ -370,12 +450,27 @@ void  CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector<int> &c | ||||
|   assert(lr!=-1); | ||||
|   Lexicographic::CoorFromIndex(coor,lr,_processors); | ||||
| } | ||||
|  | ||||
| ////////////////////////////////// | ||||
| // Try to subdivide communicator | ||||
| ////////////////////////////////// | ||||
| CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,const CartesianCommunicator &parent)  | ||||
|   : CartesianCommunicator(processors)  | ||||
| { | ||||
|   std::cout << "Attempts to split MPI3 communicators will fail until implemented" <<std::endl; | ||||
| } | ||||
| CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors) | ||||
| {  | ||||
|   int ierr; | ||||
|   communicator=communicator_world; | ||||
|  | ||||
|   _ndimension = processors.size(); | ||||
|  | ||||
|   communicator_halo.resize (2*_ndimension); | ||||
|   for(int i=0;i<_ndimension*2;i++){ | ||||
|     MPI_Comm_dup(communicator,&communicator_halo[i]); | ||||
|   } | ||||
|  | ||||
|   //////////////////////////////////////////////////////////////// | ||||
|   // Assert power of two shm_size. | ||||
|   //////////////////////////////////////////////////////////////// | ||||
| @@ -509,6 +604,14 @@ void CartesianCommunicator::GlobalSum(uint64_t &u){ | ||||
|   int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT64_T,MPI_SUM,communicator); | ||||
|   assert(ierr==0); | ||||
| } | ||||
| void CartesianCommunicator::GlobalXOR(uint32_t &u){ | ||||
|   int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_BXOR,communicator); | ||||
|   assert(ierr==0); | ||||
| } | ||||
| void CartesianCommunicator::GlobalXOR(uint64_t &u){ | ||||
|   int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT64_T,MPI_BXOR,communicator); | ||||
|   assert(ierr==0); | ||||
| } | ||||
| void CartesianCommunicator::GlobalSum(float &f){ | ||||
|   int ierr=MPI_Allreduce(MPI_IN_PLACE,&f,1,MPI_FLOAT,MPI_SUM,communicator); | ||||
|   assert(ierr==0); | ||||
| @@ -590,13 +693,28 @@ void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &lis | ||||
|   } | ||||
| } | ||||
|  | ||||
| double CartesianCommunicator::StencilSendToRecvFrom( void *xmit, | ||||
| 						     int dest, | ||||
| 						     void *recv, | ||||
| 						     int from, | ||||
| 						     int bytes,int dir) | ||||
| { | ||||
|   std::vector<CommsRequest_t> list; | ||||
|   double offbytes = StencilSendToRecvFromBegin(list,xmit,dest,recv,from,bytes,dir); | ||||
|   StencilSendToRecvFromComplete(list,dir); | ||||
|   return offbytes; | ||||
| } | ||||
|  | ||||
| double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list, | ||||
| 							 void *xmit, | ||||
| 							 int dest, | ||||
| 							 void *recv, | ||||
| 							 int from, | ||||
| 						       int bytes) | ||||
| 							 int bytes,int dir) | ||||
| { | ||||
|   int ncomm  =communicator_halo.size();  | ||||
|   int commdir=dir%ncomm; | ||||
|  | ||||
|   MPI_Request xrq; | ||||
|   MPI_Request rrq; | ||||
|  | ||||
| @@ -615,26 +733,26 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques | ||||
|   gfrom = MPI_UNDEFINED; | ||||
| #endif | ||||
|   if ( gfrom ==MPI_UNDEFINED) { | ||||
|     ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq); | ||||
|     ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator_halo[commdir],&rrq); | ||||
|     assert(ierr==0); | ||||
|     list.push_back(rrq); | ||||
|     off_node_bytes+=bytes; | ||||
|   } | ||||
|  | ||||
|   if ( gdest == MPI_UNDEFINED ) { | ||||
|     ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq); | ||||
|     ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator_halo[commdir],&xrq); | ||||
|     assert(ierr==0); | ||||
|     list.push_back(xrq); | ||||
|     off_node_bytes+=bytes; | ||||
|   } | ||||
|  | ||||
|   if ( CommunicatorPolicy == CommunicatorPolicySequential ) {  | ||||
|     this->StencilSendToRecvFromComplete(list); | ||||
|     this->StencilSendToRecvFromComplete(list,dir); | ||||
|   } | ||||
|  | ||||
|   return off_node_bytes; | ||||
| } | ||||
| void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &waitall) | ||||
| void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &waitall,int dir) | ||||
| { | ||||
|   SendToRecvFromComplete(waitall); | ||||
| } | ||||
|   | ||||
							
								
								
									
										268
									
								
								lib/communicator/Communicator_mpit.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										268
									
								
								lib/communicator/Communicator_mpit.cc
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,268 @@ | ||||
|     /************************************************************************************* | ||||
|  | ||||
|     Grid physics library, www.github.com/paboyle/Grid  | ||||
|  | ||||
|     Source file: ./lib/communicator/Communicator_mpi.cc | ||||
|  | ||||
|     Copyright (C) 2015 | ||||
|  | ||||
| Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||
|  | ||||
|     This program is free software; you can redistribute it and/or modify | ||||
|     it under the terms of the GNU General Public License as published by | ||||
|     the Free Software Foundation; either version 2 of the License, or | ||||
|     (at your option) any later version. | ||||
|  | ||||
|     This program is distributed in the hope that it will be useful, | ||||
|     but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
|     GNU General Public License for more details. | ||||
|  | ||||
|     You should have received a copy of the GNU General Public License along | ||||
|     with this program; if not, write to the Free Software Foundation, Inc., | ||||
|     51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||
|  | ||||
|     See the full license in the file "LICENSE" in the top level distribution directory | ||||
|     *************************************************************************************/ | ||||
|     /*  END LEGAL */ | ||||
| #include <Grid/GridCore.h> | ||||
| #include <Grid/GridQCDcore.h> | ||||
| #include <Grid/qcd/action/ActionCore.h> | ||||
| #include <mpi.h> | ||||
|  | ||||
| namespace Grid { | ||||
|  | ||||
|  | ||||
| /////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
| // Info that is setup once and indept of cartesian layout | ||||
| /////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
| MPI_Comm CartesianCommunicator::communicator_world; | ||||
|  | ||||
| // Should error check all MPI calls. | ||||
| void CartesianCommunicator::Init(int *argc, char ***argv) { | ||||
|   int flag; | ||||
|   int provided; | ||||
|   MPI_Initialized(&flag); // needed to coexist with other libs apparently | ||||
|   if ( !flag ) { | ||||
|     MPI_Init_thread(argc,argv,MPI_THREAD_MULTIPLE,&provided); | ||||
|     if ( provided != MPI_THREAD_MULTIPLE ) { | ||||
|       QCD::WilsonKernelsStatic::Comms = QCD::WilsonKernelsStatic::CommsThenCompute; | ||||
|     } | ||||
|   } | ||||
|   MPI_Comm_dup (MPI_COMM_WORLD,&communicator_world); | ||||
|   ShmInitGeneric(); | ||||
| } | ||||
|  | ||||
| CartesianCommunicator::~CartesianCommunicator() | ||||
| { | ||||
|   if (communicator && !MPI::Is_finalized()) | ||||
|     MPI_Comm_free(&communicator); | ||||
| } | ||||
|  | ||||
|  | ||||
| void CartesianCommunicator::GlobalSum(uint32_t &u){ | ||||
|   int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator); | ||||
|   assert(ierr==0); | ||||
| } | ||||
| void CartesianCommunicator::GlobalSum(uint64_t &u){ | ||||
|   int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT64_T,MPI_SUM,communicator); | ||||
|   assert(ierr==0); | ||||
| } | ||||
| void CartesianCommunicator::GlobalXOR(uint32_t &u){ | ||||
|   int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_BXOR,communicator); | ||||
|   assert(ierr==0); | ||||
| } | ||||
| void CartesianCommunicator::GlobalXOR(uint64_t &u){ | ||||
|   int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT64_T,MPI_BXOR,communicator); | ||||
|   assert(ierr==0); | ||||
| } | ||||
| void CartesianCommunicator::GlobalSum(float &f){ | ||||
|   int ierr=MPI_Allreduce(MPI_IN_PLACE,&f,1,MPI_FLOAT,MPI_SUM,communicator); | ||||
|   assert(ierr==0); | ||||
| } | ||||
| void CartesianCommunicator::GlobalSumVector(float *f,int N) | ||||
| { | ||||
|   int ierr=MPI_Allreduce(MPI_IN_PLACE,f,N,MPI_FLOAT,MPI_SUM,communicator); | ||||
|   assert(ierr==0); | ||||
| } | ||||
| void CartesianCommunicator::GlobalSum(double &d) | ||||
| { | ||||
|   int ierr = MPI_Allreduce(MPI_IN_PLACE,&d,1,MPI_DOUBLE,MPI_SUM,communicator); | ||||
|   assert(ierr==0); | ||||
| } | ||||
| void CartesianCommunicator::GlobalSumVector(double *d,int N) | ||||
| { | ||||
|   int ierr = MPI_Allreduce(MPI_IN_PLACE,d,N,MPI_DOUBLE,MPI_SUM,communicator); | ||||
|   assert(ierr==0); | ||||
| } | ||||
| void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest) | ||||
| { | ||||
|   int ierr=MPI_Cart_shift(communicator,dim,shift,&source,&dest); | ||||
|   assert(ierr==0); | ||||
| } | ||||
| int CartesianCommunicator::RankFromProcessorCoor(std::vector<int> &coor) | ||||
| { | ||||
|   int rank; | ||||
|   int ierr=MPI_Cart_rank  (communicator, &coor[0], &rank); | ||||
|   assert(ierr==0); | ||||
|   return rank; | ||||
| } | ||||
| void  CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector<int> &coor) | ||||
| { | ||||
|   coor.resize(_ndimension); | ||||
|   int ierr=MPI_Cart_coords  (communicator, rank, _ndimension,&coor[0]); | ||||
|   assert(ierr==0); | ||||
| } | ||||
|  | ||||
| // Basic Halo comms primitive | ||||
| void CartesianCommunicator::SendToRecvFrom(void *xmit, | ||||
| 					   int dest, | ||||
| 					   void *recv, | ||||
| 					   int from, | ||||
| 					   int bytes) | ||||
| { | ||||
|   std::vector<CommsRequest_t> reqs(0); | ||||
|   SendToRecvFromBegin(reqs,xmit,dest,recv,from,bytes); | ||||
|   SendToRecvFromComplete(reqs); | ||||
| } | ||||
|  | ||||
| void CartesianCommunicator::SendRecvPacket(void *xmit, | ||||
| 					   void *recv, | ||||
| 					   int sender, | ||||
| 					   int receiver, | ||||
| 					   int bytes) | ||||
| { | ||||
|   MPI_Status stat; | ||||
|   assert(sender != receiver); | ||||
|   int tag = sender; | ||||
|   if ( _processor == sender ) { | ||||
|     MPI_Send(xmit, bytes, MPI_CHAR,receiver,tag,communicator); | ||||
|   } | ||||
|   if ( _processor == receiver ) {  | ||||
|     MPI_Recv(recv, bytes, MPI_CHAR,sender,tag,communicator,&stat); | ||||
|   } | ||||
| } | ||||
|  | ||||
| // Basic Halo comms primitive | ||||
| void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &list, | ||||
| 						void *xmit, | ||||
| 						int dest, | ||||
| 						void *recv, | ||||
| 						int from, | ||||
| 						int bytes) | ||||
| { | ||||
|   int myrank = _processor; | ||||
|   int ierr; | ||||
|   if ( CommunicatorPolicy == CommunicatorPolicyConcurrent ) {  | ||||
|     MPI_Request xrq; | ||||
|     MPI_Request rrq; | ||||
|  | ||||
|     ierr =MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq); | ||||
|     ierr|=MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq); | ||||
|      | ||||
|     assert(ierr==0); | ||||
|     list.push_back(xrq); | ||||
|     list.push_back(rrq); | ||||
|   } else {  | ||||
|     // Give the CPU to MPI immediately; can use threads to overlap optionally | ||||
|     ierr=MPI_Sendrecv(xmit,bytes,MPI_CHAR,dest,myrank, | ||||
| 		      recv,bytes,MPI_CHAR,from, from, | ||||
| 		      communicator,MPI_STATUS_IGNORE); | ||||
|     assert(ierr==0); | ||||
|   } | ||||
| } | ||||
| void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list) | ||||
| { | ||||
|   if ( CommunicatorPolicy == CommunicatorPolicyConcurrent ) {  | ||||
|     int nreq=list.size(); | ||||
|     std::vector<MPI_Status> status(nreq); | ||||
|     int ierr = MPI_Waitall(nreq,&list[0],&status[0]); | ||||
|     assert(ierr==0); | ||||
|   } | ||||
| } | ||||
|  | ||||
| void CartesianCommunicator::Barrier(void) | ||||
| { | ||||
|   int ierr = MPI_Barrier(communicator); | ||||
|   assert(ierr==0); | ||||
| } | ||||
|  | ||||
| void CartesianCommunicator::Broadcast(int root,void* data, int bytes) | ||||
| { | ||||
|   int ierr=MPI_Bcast(data, | ||||
| 		     bytes, | ||||
| 		     MPI_BYTE, | ||||
| 		     root, | ||||
| 		     communicator); | ||||
|   assert(ierr==0); | ||||
| } | ||||
|   /////////////////////////////////////////////////////// | ||||
|   // Should only be used prior to Grid Init finished. | ||||
|   // Check for this? | ||||
|   /////////////////////////////////////////////////////// | ||||
| int CartesianCommunicator::RankWorld(void){  | ||||
|   int r;  | ||||
|   MPI_Comm_rank(communicator_world,&r); | ||||
|   return r; | ||||
| } | ||||
| void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes) | ||||
| { | ||||
|   int ierr= MPI_Bcast(data, | ||||
| 		      bytes, | ||||
| 		      MPI_BYTE, | ||||
| 		      root, | ||||
| 		      communicator_world); | ||||
|   assert(ierr==0); | ||||
| } | ||||
|  | ||||
| double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list, | ||||
| 							 void *xmit, | ||||
| 							 int xmit_to_rank, | ||||
| 							 void *recv, | ||||
| 							 int recv_from_rank, | ||||
| 							 int bytes,int dir) | ||||
| { | ||||
|   int myrank = _processor; | ||||
|   int ierr; | ||||
|   int ncomm  =communicator_halo.size();  | ||||
|   int commdir=dir%ncomm; | ||||
|    | ||||
|   //  std::cout << " sending on communicator "<<dir<<" " <<communicator_halo[dir]<<std::endl; | ||||
|   // Give the CPU to MPI immediately; can use threads to overlap optionally | ||||
|   MPI_Request req[2]; | ||||
|   MPI_Irecv(recv,bytes,MPI_CHAR,recv_from_rank,recv_from_rank, communicator_halo[commdir],&req[1]); | ||||
|   MPI_Isend(xmit,bytes,MPI_CHAR,xmit_to_rank  ,myrank        , communicator_halo[commdir],&req[0]); | ||||
|  | ||||
|   list.push_back(req[0]); | ||||
|   list.push_back(req[1]); | ||||
|   return 2.0*bytes; | ||||
| } | ||||
| void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &waitall,int dir) | ||||
| {  | ||||
|   int nreq=waitall.size(); | ||||
|   MPI_Waitall(nreq, &waitall[0], MPI_STATUSES_IGNORE); | ||||
| }; | ||||
| double CartesianCommunicator::StencilSendToRecvFrom(void *xmit, | ||||
| 						    int xmit_to_rank, | ||||
| 						    void *recv, | ||||
| 						    int recv_from_rank, | ||||
| 						    int bytes,int dir) | ||||
| { | ||||
|   int myrank = _processor; | ||||
|   int ierr; | ||||
|   //  std::cout << " sending on communicator "<<dir<<" " <<communicator_halo.size()<< <std::endl; | ||||
|  | ||||
|   int ncomm  =communicator_halo.size();  | ||||
|   int commdir=dir%ncomm; | ||||
|   // Give the CPU to MPI immediately; can use threads to overlap optionally | ||||
|   MPI_Request req[2]; | ||||
|   MPI_Irecv(recv,bytes,MPI_CHAR,recv_from_rank,recv_from_rank, communicator_halo[commdir],&req[1]); | ||||
|   MPI_Isend(xmit,bytes,MPI_CHAR,xmit_to_rank  ,myrank        , communicator_halo[commdir],&req[0]); | ||||
|   MPI_Waitall(2, req, MPI_STATUSES_IGNORE); | ||||
|   return 2.0*bytes; | ||||
| } | ||||
|  | ||||
|  | ||||
|  | ||||
| } | ||||
|  | ||||
| @@ -38,6 +38,9 @@ void CartesianCommunicator::Init(int *argc, char *** arv) | ||||
|   ShmInitGeneric(); | ||||
| } | ||||
|  | ||||
| CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,const CartesianCommunicator &parent,int &srank)  | ||||
|   : CartesianCommunicator(processors) { srank=0;} | ||||
|  | ||||
| CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors) | ||||
| { | ||||
|   _processors = processors; | ||||
| @@ -53,12 +56,16 @@ CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors) | ||||
|   } | ||||
| } | ||||
|  | ||||
| CartesianCommunicator::~CartesianCommunicator(){} | ||||
|  | ||||
| void CartesianCommunicator::GlobalSum(float &){} | ||||
| void CartesianCommunicator::GlobalSumVector(float *,int N){} | ||||
| void CartesianCommunicator::GlobalSum(double &){} | ||||
| void CartesianCommunicator::GlobalSum(uint32_t &){} | ||||
| void CartesianCommunicator::GlobalSum(uint64_t &){} | ||||
| void CartesianCommunicator::GlobalSumVector(double *,int N){} | ||||
| void CartesianCommunicator::GlobalXOR(uint32_t &){} | ||||
| void CartesianCommunicator::GlobalXOR(uint64_t &){} | ||||
|  | ||||
| void CartesianCommunicator::SendRecvPacket(void *xmit, | ||||
| 					   void *recv, | ||||
| @@ -93,6 +100,14 @@ void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> & | ||||
| { | ||||
|   assert(0); | ||||
| } | ||||
| void CartesianCommunicator::AllToAll(int dim,void  *in,void *out,uint64_t words,uint64_t bytes) | ||||
| { | ||||
|   bcopy(in,out,bytes*words); | ||||
| } | ||||
| void CartesianCommunicator::AllToAll(void  *in,void *out,uint64_t words,uint64_t bytes) | ||||
| { | ||||
|   bcopy(in,out,bytes*words); | ||||
| } | ||||
|  | ||||
| int  CartesianCommunicator::RankWorld(void){return 0;} | ||||
| void CartesianCommunicator::Barrier(void){} | ||||
|   | ||||
| @@ -75,6 +75,11 @@ void CartesianCommunicator::Init(int *argc, char ***argv) { | ||||
|   ShmInitGeneric(); | ||||
| } | ||||
|  | ||||
| CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,const CartesianCommunicator &parent)  | ||||
|   : CartesianCommunicator(processors)  | ||||
| { | ||||
|   std::cout << "Attempts to split SHMEM communicators will fail " <<std::endl; | ||||
| } | ||||
| CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors) | ||||
| { | ||||
|   _ndimension = processors.size(); | ||||
|   | ||||
| @@ -42,7 +42,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||
| #include <Grid/cshift/Cshift_mpi.h> | ||||
| #endif  | ||||
|  | ||||
| #ifdef GRID_COMMS_MPI3L | ||||
| #ifdef GRID_COMMS_MPIT | ||||
| #include <Grid/cshift/Cshift_mpi.h> | ||||
| #endif  | ||||
|  | ||||
|   | ||||
							
								
								
									
										16182
									
								
								lib/json/json.hpp
									
									
									
									
									
								
							
							
						
						
									
										16182
									
								
								lib/json/json.hpp
									
									
									
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @@ -1,4 +1,4 @@ | ||||
|  /************************************************************************************* | ||||
| /************************************************************************************* | ||||
|     Grid physics library, www.github.com/paboyle/Grid  | ||||
|     Source file: ./lib/lattice/Lattice_reduction.h | ||||
|     Copyright (C) 2015 | ||||
| @@ -328,6 +328,8 @@ static void sliceMaddVector(Lattice<vobj> &R,std::vector<RealD> &a,const Lattice | ||||
|   typedef typename vobj::vector_type vector_type; | ||||
|   typedef typename vobj::tensor_reduced tensor_reduced; | ||||
|    | ||||
|   scalar_type zscale(scale); | ||||
|  | ||||
|   GridBase *grid  = X._grid; | ||||
|  | ||||
|   int Nsimd  =grid->Nsimd(); | ||||
| @@ -353,7 +355,7 @@ static void sliceMaddVector(Lattice<vobj> &R,std::vector<RealD> &a,const Lattice | ||||
|       grid->iCoorFromIindex(icoor,l); | ||||
|       int ldx =r+icoor[orthogdim]*rd; | ||||
|       scalar_type *as =(scalar_type *)&av; | ||||
|       as[l] = scalar_type(a[ldx])*scale; | ||||
|       as[l] = scalar_type(a[ldx])*zscale; | ||||
|     } | ||||
|  | ||||
|     tensor_reduced at; at=av; | ||||
| @@ -367,71 +369,7 @@ static void sliceMaddVector(Lattice<vobj> &R,std::vector<RealD> &a,const Lattice | ||||
|   } | ||||
| }; | ||||
|  | ||||
|  | ||||
| /* | ||||
| template<class vobj> | ||||
| static void sliceMaddVectorSlow (Lattice<vobj> &R,std::vector<RealD> &a,const Lattice<vobj> &X,const Lattice<vobj> &Y, | ||||
| 			     int Orthog,RealD scale=1.0)  | ||||
| {     | ||||
|   // FIXME: Implementation is slow | ||||
|   // Best base the linear combination by constructing a  | ||||
|   // set of vectors of size grid->_rdimensions[Orthog]. | ||||
|   typedef typename vobj::scalar_object sobj; | ||||
|   typedef typename vobj::scalar_type scalar_type; | ||||
|   typedef typename vobj::vector_type vector_type; | ||||
|    | ||||
|   int Nblock = X._grid->GlobalDimensions()[Orthog]; | ||||
|    | ||||
|   GridBase *FullGrid  = X._grid; | ||||
|   GridBase *SliceGrid = makeSubSliceGrid(FullGrid,Orthog); | ||||
|    | ||||
|   Lattice<vobj> Xslice(SliceGrid); | ||||
|   Lattice<vobj> Rslice(SliceGrid); | ||||
|   // If we based this on Cshift it would work for spread out | ||||
|   // but it would be even slower | ||||
|   for(int i=0;i<Nblock;i++){ | ||||
|     ExtractSlice(Rslice,Y,i,Orthog); | ||||
|     ExtractSlice(Xslice,X,i,Orthog); | ||||
|     Rslice = Rslice + Xslice*(scale*a[i]); | ||||
|     InsertSlice(Rslice,R,i,Orthog); | ||||
|   } | ||||
| }; | ||||
| template<class vobj> | ||||
| static void sliceInnerProductVectorSlow( std::vector<ComplexD> & vec, const Lattice<vobj> &lhs,const Lattice<vobj> &rhs,int Orthog)  | ||||
|   { | ||||
|     // FIXME: Implementation is slow | ||||
|     // Look at localInnerProduct implementation, | ||||
|     // and do inside a site loop with block strided iterators | ||||
|     typedef typename vobj::scalar_object sobj; | ||||
|     typedef typename vobj::scalar_type scalar_type; | ||||
|     typedef typename vobj::vector_type vector_type; | ||||
|     typedef typename vobj::tensor_reduced scalar; | ||||
|     typedef typename scalar::scalar_object  scomplex; | ||||
|    | ||||
|     int Nblock = lhs._grid->GlobalDimensions()[Orthog]; | ||||
|     vec.resize(Nblock); | ||||
|     std::vector<scomplex> sip(Nblock); | ||||
|     Lattice<scalar> IP(lhs._grid);  | ||||
|     IP=localInnerProduct(lhs,rhs); | ||||
|     sliceSum(IP,sip,Orthog); | ||||
|    | ||||
|     for(int ss=0;ss<Nblock;ss++){ | ||||
|       vec[ss] = TensorRemove(sip[ss]); | ||||
|     } | ||||
|   } | ||||
| */ | ||||
|  | ||||
| ////////////////////////////////////////////////////////////////////////////////////////// | ||||
| // FIXME: Implementation is slow | ||||
| // If we based this on Cshift it would work for spread out | ||||
| // but it would be even slower | ||||
| // | ||||
| // Repeated extract slice is inefficient | ||||
| // | ||||
| // Best base the linear combination by constructing a  | ||||
| // set of vectors of size grid->_rdimensions[Orthog]. | ||||
| ////////////////////////////////////////////////////////////////////////////////////////// | ||||
|  | ||||
| inline GridBase         *makeSubSliceGrid(const GridBase *BlockSolverGrid,int Orthog) | ||||
| { | ||||
|   int NN    = BlockSolverGrid->_ndimension; | ||||
| @@ -450,7 +388,7 @@ inline GridBase         *makeSubSliceGrid(const GridBase *BlockSolverGrid,int Or | ||||
|   } | ||||
|   return (GridBase *)new GridCartesian(latt_phys,simd_phys,mpi_phys);  | ||||
| } | ||||
|  | ||||
| */ | ||||
|  | ||||
| template<class vobj> | ||||
| static void sliceMaddMatrix (Lattice<vobj> &R,Eigen::MatrixXcd &aa,const Lattice<vobj> &X,const Lattice<vobj> &Y,int Orthog,RealD scale=1.0)  | ||||
| @@ -462,55 +400,167 @@ static void sliceMaddMatrix (Lattice<vobj> &R,Eigen::MatrixXcd &aa,const Lattice | ||||
|   int Nblock = X._grid->GlobalDimensions()[Orthog]; | ||||
|  | ||||
|   GridBase *FullGrid  = X._grid; | ||||
|   GridBase *SliceGrid = makeSubSliceGrid(FullGrid,Orthog); | ||||
|   //  GridBase *SliceGrid = makeSubSliceGrid(FullGrid,Orthog); | ||||
|  | ||||
|   Lattice<vobj> Xslice(SliceGrid); | ||||
|   Lattice<vobj> Rslice(SliceGrid); | ||||
|   //  Lattice<vobj> Xslice(SliceGrid); | ||||
|   //  Lattice<vobj> Rslice(SliceGrid); | ||||
|  | ||||
|   assert( FullGrid->_simd_layout[Orthog]==1); | ||||
|   int nh =  FullGrid->_ndimension; | ||||
|   //  int nl = SliceGrid->_ndimension; | ||||
|   int nl = nh-1; | ||||
|  | ||||
|   //FIXME package in a convenient iterator | ||||
|   //Should loop over a plane orthogonal to direction "Orthog" | ||||
|   int stride=FullGrid->_slice_stride[Orthog]; | ||||
|   int block =FullGrid->_slice_block [Orthog]; | ||||
|   int nblock=FullGrid->_slice_nblock[Orthog]; | ||||
|   int ostride=FullGrid->_ostride[Orthog]; | ||||
| #pragma omp parallel  | ||||
|   { | ||||
|     std::vector<vobj> s_x(Nblock); | ||||
|  | ||||
| #pragma omp for collapse(2) | ||||
|     for(int n=0;n<nblock;n++){ | ||||
|     for(int b=0;b<block;b++){ | ||||
|       int o  = n*stride + b; | ||||
|  | ||||
|       for(int i=0;i<Nblock;i++){ | ||||
|     ExtractSlice(Rslice,Y,i,Orthog); | ||||
|     for(int j=0;j<Nblock;j++){ | ||||
|       ExtractSlice(Xslice,X,j,Orthog); | ||||
|       Rslice = Rslice + Xslice*(scale*aa(j,i)); | ||||
| 	s_x[i] = X[o+i*ostride]; | ||||
|       } | ||||
|     InsertSlice(Rslice,R,i,Orthog); | ||||
|  | ||||
|       vobj dot; | ||||
|       for(int i=0;i<Nblock;i++){ | ||||
| 	dot = Y[o+i*ostride]; | ||||
| 	for(int j=0;j<Nblock;j++){ | ||||
| 	  dot = dot + s_x[j]*(scale*aa(j,i)); | ||||
| 	} | ||||
| 	R[o+i*ostride]=dot; | ||||
|       } | ||||
|     }} | ||||
|   } | ||||
| }; | ||||
|  | ||||
| template<class vobj> | ||||
| static void sliceMulMatrix (Lattice<vobj> &R,Eigen::MatrixXcd &aa,const Lattice<vobj> &X,int Orthog,RealD scale=1.0)  | ||||
| {     | ||||
|   typedef typename vobj::scalar_object sobj; | ||||
|   typedef typename vobj::scalar_type scalar_type; | ||||
|   typedef typename vobj::vector_type vector_type; | ||||
|  | ||||
|   int Nblock = X._grid->GlobalDimensions()[Orthog]; | ||||
|  | ||||
|   GridBase *FullGrid  = X._grid; | ||||
|   //  GridBase *SliceGrid = makeSubSliceGrid(FullGrid,Orthog); | ||||
|   //  Lattice<vobj> Xslice(SliceGrid); | ||||
|   //  Lattice<vobj> Rslice(SliceGrid); | ||||
|  | ||||
|   assert( FullGrid->_simd_layout[Orthog]==1); | ||||
|   int nh =  FullGrid->_ndimension; | ||||
|   //  int nl = SliceGrid->_ndimension; | ||||
|   int nl=1; | ||||
|  | ||||
|   //FIXME package in a convenient iterator | ||||
|   //Should loop over a plane orthogonal to direction "Orthog" | ||||
|   int stride=FullGrid->_slice_stride[Orthog]; | ||||
|   int block =FullGrid->_slice_block [Orthog]; | ||||
|   int nblock=FullGrid->_slice_nblock[Orthog]; | ||||
|   int ostride=FullGrid->_ostride[Orthog]; | ||||
| #pragma omp parallel  | ||||
|   { | ||||
|     std::vector<vobj> s_x(Nblock); | ||||
|  | ||||
| #pragma omp for collapse(2) | ||||
|     for(int n=0;n<nblock;n++){ | ||||
|     for(int b=0;b<block;b++){ | ||||
|       int o  = n*stride + b; | ||||
|  | ||||
|       for(int i=0;i<Nblock;i++){ | ||||
| 	s_x[i] = X[o+i*ostride]; | ||||
|       } | ||||
|  | ||||
|       vobj dot; | ||||
|       for(int i=0;i<Nblock;i++){ | ||||
| 	dot = s_x[0]*(scale*aa(0,i)); | ||||
| 	for(int j=1;j<Nblock;j++){ | ||||
| 	  dot = dot + s_x[j]*(scale*aa(j,i)); | ||||
| 	} | ||||
| 	R[o+i*ostride]=dot; | ||||
|       } | ||||
|     }} | ||||
|   } | ||||
|  | ||||
| }; | ||||
|  | ||||
|  | ||||
| template<class vobj> | ||||
| static void sliceInnerProductMatrix(  Eigen::MatrixXcd &mat, const Lattice<vobj> &lhs,const Lattice<vobj> &rhs,int Orthog)  | ||||
| { | ||||
|   // FIXME: Implementation is slow | ||||
|   // Not sure of best solution.. think about it | ||||
|   typedef typename vobj::scalar_object sobj; | ||||
|   typedef typename vobj::scalar_type scalar_type; | ||||
|   typedef typename vobj::vector_type vector_type; | ||||
|    | ||||
|   GridBase *FullGrid  = lhs._grid; | ||||
|   GridBase *SliceGrid = makeSubSliceGrid(FullGrid,Orthog); | ||||
|   //  GridBase *SliceGrid = makeSubSliceGrid(FullGrid,Orthog); | ||||
|    | ||||
|   int Nblock = FullGrid->GlobalDimensions()[Orthog]; | ||||
|    | ||||
|   Lattice<vobj> Lslice(SliceGrid); | ||||
|   Lattice<vobj> Rslice(SliceGrid); | ||||
|   //  Lattice<vobj> Lslice(SliceGrid); | ||||
|   //  Lattice<vobj> Rslice(SliceGrid); | ||||
|    | ||||
|   mat = Eigen::MatrixXcd::Zero(Nblock,Nblock); | ||||
|  | ||||
|   assert( FullGrid->_simd_layout[Orthog]==1); | ||||
|   int nh =  FullGrid->_ndimension; | ||||
|   //  int nl = SliceGrid->_ndimension; | ||||
|   int nl = nh-1; | ||||
|  | ||||
|   //FIXME package in a convenient iterator | ||||
|   //Should loop over a plane orthogonal to direction "Orthog" | ||||
|   int stride=FullGrid->_slice_stride[Orthog]; | ||||
|   int block =FullGrid->_slice_block [Orthog]; | ||||
|   int nblock=FullGrid->_slice_nblock[Orthog]; | ||||
|   int ostride=FullGrid->_ostride[Orthog]; | ||||
|  | ||||
|   typedef typename vobj::vector_typeD vector_typeD; | ||||
|  | ||||
| #pragma omp parallel  | ||||
|   { | ||||
|     std::vector<vobj> Left(Nblock); | ||||
|     std::vector<vobj> Right(Nblock); | ||||
|     Eigen::MatrixXcd  mat_thread = Eigen::MatrixXcd::Zero(Nblock,Nblock); | ||||
|  | ||||
| #pragma omp for collapse(2) | ||||
|     for(int n=0;n<nblock;n++){ | ||||
|     for(int b=0;b<block;b++){ | ||||
|  | ||||
|       int o  = n*stride + b; | ||||
|  | ||||
|       for(int i=0;i<Nblock;i++){ | ||||
|     ExtractSlice(Lslice,lhs,i,Orthog); | ||||
|     for(int j=0;j<Nblock;j++){ | ||||
|       ExtractSlice(Rslice,rhs,j,Orthog); | ||||
|       mat(i,j) = innerProduct(Lslice,Rslice); | ||||
| 	Left [i] = lhs[o+i*ostride]; | ||||
| 	Right[i] = rhs[o+i*ostride]; | ||||
|       } | ||||
|   } | ||||
| #undef FORCE_DIAG | ||||
| #ifdef FORCE_DIAG | ||||
|  | ||||
|       for(int i=0;i<Nblock;i++){ | ||||
|       for(int j=0;j<Nblock;j++){ | ||||
|       if ( i != j ) mat(i,j)=0.0; | ||||
| 	auto tmp = innerProduct(Left[i],Right[j]); | ||||
| 	auto rtmp = TensorRemove(tmp); | ||||
| 	mat_thread(i,j) += Reduce(rtmp); | ||||
|       }} | ||||
|     }} | ||||
| #pragma omp critical | ||||
|     { | ||||
|       mat += mat_thread; | ||||
|     }   | ||||
|   } | ||||
| #endif | ||||
|  | ||||
|   for(int i=0;i<Nblock;i++){ | ||||
|   for(int j=0;j<Nblock;j++){ | ||||
|     ComplexD sum = mat(i,j); | ||||
|     FullGrid->GlobalSum(sum); | ||||
|     mat(i,j)=sum; | ||||
|   }} | ||||
|  | ||||
|   return; | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -109,8 +109,8 @@ inline void blockProject(Lattice<iVector<CComplex,nbasis > > &coarseData, | ||||
|  | ||||
|   coarseData=zero; | ||||
|  | ||||
|   // Loop with a cache friendly loop ordering | ||||
|   for(int sf=0;sf<fine->oSites();sf++){ | ||||
|   // Loop over coars parallel, and then loop over fine associated with coarse. | ||||
|   parallel_for(int sf=0;sf<fine->oSites();sf++){ | ||||
|  | ||||
|     int sc; | ||||
|     std::vector<int> coor_c(_ndimension); | ||||
| @@ -119,6 +119,7 @@ inline void blockProject(Lattice<iVector<CComplex,nbasis > > &coarseData, | ||||
|     for(int d=0;d<_ndimension;d++) coor_c[d]=coor_f[d]/block_r[d]; | ||||
|     Lexicographic::IndexFromCoor(coor_c,sc,coarse->_rdimensions); | ||||
|  | ||||
| PARALLEL_CRITICAL | ||||
|     for(int i=0;i<nbasis;i++) { | ||||
|  | ||||
|       coarseData._odata[sc](i)=coarseData._odata[sc](i) | ||||
| @@ -139,6 +140,7 @@ inline void blockZAXPY(Lattice<vobj> &fineZ, | ||||
|   GridBase * coarse= coarseA._grid; | ||||
|  | ||||
|   fineZ.checkerboard=fineX.checkerboard; | ||||
|   assert(fineX.checkerboard==fineY.checkerboard); | ||||
|   subdivides(coarse,fine); // require they map | ||||
|   conformable(fineX,fineY); | ||||
|   conformable(fineX,fineZ); | ||||
| @@ -180,9 +182,10 @@ template<class vobj,class CComplex> | ||||
|   GridBase *coarse(CoarseInner._grid); | ||||
|   GridBase *fine  (fineX._grid); | ||||
|  | ||||
|   Lattice<dotp> fine_inner(fine); | ||||
|   Lattice<dotp> fine_inner(fine); fine_inner.checkerboard = fineX.checkerboard; | ||||
|   Lattice<dotp> coarse_inner(coarse); | ||||
|  | ||||
|   // Precision promotion? | ||||
|   fine_inner = localInnerProduct(fineX,fineY); | ||||
|   blockSum(coarse_inner,fine_inner); | ||||
|   parallel_for(int ss=0;ss<coarse->oSites();ss++){ | ||||
| @@ -193,7 +196,7 @@ template<class vobj,class CComplex> | ||||
| inline void blockNormalise(Lattice<CComplex> &ip,Lattice<vobj> &fineX) | ||||
| { | ||||
|   GridBase *coarse = ip._grid; | ||||
|   Lattice<vobj> zz(fineX._grid); zz=zero; | ||||
|   Lattice<vobj> zz(fineX._grid); zz=zero; zz.checkerboard=fineX.checkerboard; | ||||
|   blockInnerProduct(ip,fineX,fineX); | ||||
|   ip = pow(ip,-0.5); | ||||
|   blockZAXPY(fineX,ip,fineX,zz); | ||||
| @@ -216,20 +219,26 @@ inline void blockSum(Lattice<vobj> &coarseData,const Lattice<vobj> &fineData) | ||||
|     block_r[d] = fine->_rdimensions[d] / coarse->_rdimensions[d]; | ||||
|   } | ||||
|  | ||||
|   // Turn this around to loop threaded over sc and interior loop  | ||||
|   // over sf would thread better | ||||
|   coarseData=zero; | ||||
|   for(int sf=0;sf<fine->oSites();sf++){ | ||||
|   parallel_region { | ||||
|  | ||||
|     int sc; | ||||
|     std::vector<int> coor_c(_ndimension); | ||||
|     std::vector<int> coor_f(_ndimension); | ||||
|  | ||||
|     parallel_for_internal(int sf=0;sf<fine->oSites();sf++){ | ||||
|      | ||||
|       Lexicographic::CoorFromIndex(coor_f,sf,fine->_rdimensions); | ||||
|       for(int d=0;d<_ndimension;d++) coor_c[d]=coor_f[d]/block_r[d]; | ||||
|       Lexicographic::IndexFromCoor(coor_c,sc,coarse->_rdimensions); | ||||
|        | ||||
| PARALLEL_CRITICAL | ||||
|       coarseData._odata[sc]=coarseData._odata[sc]+fineData._odata[sf]; | ||||
|  | ||||
|     } | ||||
|   } | ||||
|   return; | ||||
| } | ||||
|  | ||||
| @@ -238,7 +247,7 @@ inline void blockPick(GridBase *coarse,const Lattice<vobj> &unpicked,Lattice<vob | ||||
| { | ||||
|   GridBase * fine = unpicked._grid; | ||||
|  | ||||
|   Lattice<vobj> zz(fine); | ||||
|   Lattice<vobj> zz(fine); zz.checkerboard = unpicked.checkerboard; | ||||
|   Lattice<iScalar<vInteger> > fcoor(fine); | ||||
|  | ||||
|   zz = zero; | ||||
| @@ -303,12 +312,13 @@ inline void blockPromote(const Lattice<iVector<CComplex,nbasis > > &coarseData, | ||||
|   } | ||||
|  | ||||
|   // Loop with a cache friendly loop ordering | ||||
|   for(int sf=0;sf<fine->oSites();sf++){ | ||||
|  | ||||
|   parallel_region { | ||||
|     int sc; | ||||
|     std::vector<int> coor_c(_ndimension); | ||||
|     std::vector<int> coor_f(_ndimension); | ||||
|  | ||||
|     parallel_for_internal(int sf=0;sf<fine->oSites();sf++){ | ||||
|  | ||||
|       Lexicographic::CoorFromIndex(coor_f,sf,fine->_rdimensions); | ||||
|       for(int d=0;d<_ndimension;d++) coor_c[d]=coor_f[d]/block_r[d]; | ||||
|       Lexicographic::IndexFromCoor(coor_c,sc,coarse->_rdimensions); | ||||
| @@ -316,7 +326,7 @@ inline void blockPromote(const Lattice<iVector<CComplex,nbasis > > &coarseData, | ||||
|       for(int i=0;i<nbasis;i++) { | ||||
| 	if(i==0) fineData._odata[sf]=coarseData._odata[sc](i) * Basis[i]._odata[sf]; | ||||
| 	else     fineData._odata[sf]=fineData._odata[sf]+coarseData._odata[sc](i)*Basis[i]._odata[sf]; | ||||
|  | ||||
|       } | ||||
|     } | ||||
|   } | ||||
|   return; | ||||
| @@ -551,7 +561,10 @@ void Replicate(Lattice<vobj> &coarse,Lattice<vobj> & fine) | ||||
|  | ||||
| //Copy SIMD-vectorized lattice to array of scalar objects in lexicographic order | ||||
| template<typename vobj, typename sobj> | ||||
| typename std::enable_if<isSIMDvectorized<vobj>::value && !isSIMDvectorized<sobj>::value, void>::type unvectorizeToLexOrdArray(std::vector<sobj> &out, const Lattice<vobj> &in){ | ||||
| typename std::enable_if<isSIMDvectorized<vobj>::value && !isSIMDvectorized<sobj>::value, void>::type  | ||||
| unvectorizeToLexOrdArray(std::vector<sobj> &out, const Lattice<vobj> &in) | ||||
| { | ||||
|  | ||||
|   typedef typename vobj::vector_type vtype; | ||||
|    | ||||
|   GridBase* in_grid = in._grid; | ||||
| @@ -590,6 +603,54 @@ typename std::enable_if<isSIMDvectorized<vobj>::value && !isSIMDvectorized<sobj> | ||||
|     extract1(in_vobj, out_ptrs, 0); | ||||
|   } | ||||
| } | ||||
| //Copy SIMD-vectorized lattice to array of scalar objects in lexicographic order | ||||
| template<typename vobj, typename sobj> | ||||
| typename std::enable_if<isSIMDvectorized<vobj>::value  | ||||
|                     && !isSIMDvectorized<sobj>::value, void>::type  | ||||
| vectorizeFromLexOrdArray( std::vector<sobj> &in, Lattice<vobj> &out) | ||||
| { | ||||
|  | ||||
|   typedef typename vobj::vector_type vtype; | ||||
|    | ||||
|   GridBase* grid = out._grid; | ||||
|   assert(in.size()==grid->lSites()); | ||||
|    | ||||
|   int ndim     = grid->Nd(); | ||||
|   int nsimd    = vtype::Nsimd(); | ||||
|  | ||||
|   std::vector<std::vector<int> > icoor(nsimd); | ||||
|        | ||||
|   for(int lane=0; lane < nsimd; lane++){ | ||||
|     icoor[lane].resize(ndim); | ||||
|     grid->iCoorFromIindex(icoor[lane],lane); | ||||
|   } | ||||
|    | ||||
|   parallel_for(uint64_t oidx = 0; oidx < grid->oSites(); oidx++){ //loop over outer index | ||||
|     //Assemble vector of pointers to output elements | ||||
|     std::vector<sobj*> ptrs(nsimd); | ||||
|  | ||||
|     std::vector<int> ocoor(ndim); | ||||
|     grid->oCoorFromOindex(ocoor, oidx); | ||||
|  | ||||
|     std::vector<int> lcoor(grid->Nd()); | ||||
|        | ||||
|     for(int lane=0; lane < nsimd; lane++){ | ||||
|  | ||||
|       for(int mu=0;mu<ndim;mu++){ | ||||
| 	lcoor[mu] = ocoor[mu] + grid->_rdimensions[mu]*icoor[lane][mu]; | ||||
|       } | ||||
|  | ||||
|       int lex; | ||||
|       Lexicographic::IndexFromCoor(lcoor, lex, grid->_ldimensions); | ||||
|       ptrs[lane] = &in[lex]; | ||||
|     } | ||||
|      | ||||
|     //pack from those ptrs | ||||
|     vobj vecobj; | ||||
|     merge1(vecobj, ptrs, 0); | ||||
|     out._odata[oidx] = vecobj;  | ||||
|   } | ||||
| } | ||||
|  | ||||
| //Convert a Lattice from one precision to another | ||||
| template<class VobjOut, class VobjIn> | ||||
| @@ -615,7 +676,7 @@ void precisionChange(Lattice<VobjOut> &out, const Lattice<VobjIn> &in){ | ||||
|   std::vector<SobjOut> in_slex_conv(in_grid->lSites()); | ||||
|   unvectorizeToLexOrdArray(in_slex_conv, in); | ||||
|      | ||||
|   parallel_for(int out_oidx=0;out_oidx<out_grid->oSites();out_oidx++){ | ||||
|   parallel_for(uint64_t out_oidx=0;out_oidx<out_grid->oSites();out_oidx++){ | ||||
|     std::vector<int> out_ocoor(ndim); | ||||
|     out_grid->oCoorFromOindex(out_ocoor, out_oidx); | ||||
|  | ||||
| @@ -634,5 +695,314 @@ void precisionChange(Lattice<VobjOut> &out, const Lattice<VobjIn> &in){ | ||||
|   } | ||||
| } | ||||
|  | ||||
| //////////////////////////////////////////////////////////////////////////////// | ||||
| // Communicate between grids | ||||
| //////////////////////////////////////////////////////////////////////////////// | ||||
| // | ||||
| // All to all plan | ||||
| // | ||||
| // Subvolume on fine grid is v.    Vectors a,b,c,d  | ||||
| // | ||||
| /////////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
| // SIMPLEST CASE: | ||||
| /////////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
| // Mesh of nodes (2) ; subdivide to  1 subdivisions | ||||
| // | ||||
| // Lex ord:    | ||||
| //          N0 va0 vb0  N1 va1 vb1  | ||||
| // | ||||
| // For each dimension do an all to all | ||||
| // | ||||
| // full AllToAll(0) | ||||
| //          N0 va0 va1    N1 vb0 vb1 | ||||
| // | ||||
| // REARRANGE | ||||
| //          N0 va01       N1 vb01 | ||||
| // | ||||
| // Must also rearrange data to get into the NEW lex order of grid at each stage. Some kind of "insert/extract". | ||||
| // NB: Easiest to programme if keep in lex order. | ||||
| // | ||||
| /////////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
| // SIMPLE CASE: | ||||
| /////////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
| // | ||||
| // Mesh of nodes (2x2) ; subdivide to  1x1 subdivisions | ||||
| // | ||||
| // Lex ord:    | ||||
| //          N0 va0 vb0 vc0 vd0       N1 va1 vb1 vc1 vd1   | ||||
| //          N2 va2 vb2 vc2 vd2       N3 va3 vb3 vc3 vd3  | ||||
| // | ||||
| // Ratio = full[dim] / split[dim] | ||||
| // | ||||
| // For each dimension do an all to all; get Nvec -> Nvec / ratio | ||||
| //                                          Ldim -> Ldim * ratio | ||||
| //                                          LocalVol -> LocalVol * ratio | ||||
| // full AllToAll(0) | ||||
| //          N0 va0 vb0 va1 vb1       N1 vc0 vd0 vc1 vd1    | ||||
| //          N2 va2 vb2 va3 vb3       N3 vc2 vd2 vc3 vd3  | ||||
| // | ||||
| // REARRANGE | ||||
| //          N0 va01 vb01      N1 vc01 vd01 | ||||
| //          N2 va23 vb23      N3 vc23 vd23 | ||||
| // | ||||
| // full AllToAll(1)           // Not what is wanted. FIXME | ||||
| //          N0 va01 va23      N1 vc01 vc23  | ||||
| //          N2 vb01 vb23      N3 vd01 vd23 | ||||
| //  | ||||
| // REARRANGE | ||||
| //          N0 va0123      N1 vc0123 | ||||
| //          N2 vb0123      N3 vd0123 | ||||
| // | ||||
| // Must also rearrange data to get into the NEW lex order of grid at each stage. Some kind of "insert/extract". | ||||
| // NB: Easiest to programme if keep in lex order. | ||||
| // | ||||
| ///////////////////////////////////////////////////////// | ||||
|  | ||||
| template<class Vobj> | ||||
| void Grid_split(std::vector<Lattice<Vobj> > & full,Lattice<Vobj>   & split) | ||||
| { | ||||
|   typedef typename Vobj::scalar_object Sobj; | ||||
|  | ||||
|   int full_vecs   = full.size(); | ||||
|  | ||||
|   assert(full_vecs>=1); | ||||
|  | ||||
|   GridBase * full_grid = full[0]._grid; | ||||
|   GridBase *split_grid = split._grid; | ||||
|  | ||||
|   int       ndim  = full_grid->_ndimension; | ||||
|   int  full_nproc = full_grid->_Nprocessors; | ||||
|   int split_nproc =split_grid->_Nprocessors; | ||||
|  | ||||
|   //////////////////////////////// | ||||
|   // Checkerboard management | ||||
|   //////////////////////////////// | ||||
|   int cb = full[0].checkerboard; | ||||
|   split.checkerboard = cb; | ||||
|  | ||||
|   ////////////////////////////// | ||||
|   // Checks | ||||
|   ////////////////////////////// | ||||
|   assert(full_grid->_ndimension==split_grid->_ndimension); | ||||
|   for(int n=0;n<full_vecs;n++){ | ||||
|     assert(full[n].checkerboard == cb); | ||||
|     for(int d=0;d<ndim;d++){ | ||||
|       assert(full[n]._grid->_gdimensions[d]==split._grid->_gdimensions[d]); | ||||
|       assert(full[n]._grid->_fdimensions[d]==split._grid->_fdimensions[d]); | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   int   nvector   =full_nproc/split_nproc;  | ||||
|   assert(nvector*split_nproc==full_nproc); | ||||
|   assert(nvector == full_vecs); | ||||
|  | ||||
|   std::vector<int> ratio(ndim); | ||||
|   for(int d=0;d<ndim;d++){ | ||||
|     ratio[d] = full_grid->_processors[d]/ split_grid->_processors[d]; | ||||
|   } | ||||
|  | ||||
|   uint64_t lsites = full_grid->lSites(); | ||||
|   uint64_t     sz = lsites * nvector; | ||||
|   std::vector<Sobj> tmpdata(sz); | ||||
|   std::vector<Sobj> alldata(sz); | ||||
|   std::vector<Sobj> scalardata(lsites);  | ||||
|  | ||||
|   for(int v=0;v<nvector;v++){ | ||||
|     unvectorizeToLexOrdArray(scalardata,full[v]);     | ||||
|     parallel_for(int site=0;site<lsites;site++){ | ||||
|       alldata[v*lsites+site] = scalardata[site]; | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   int nvec = nvector; // Counts down to 1 as we collapse dims | ||||
|   std::vector<int> ldims = full_grid->_ldimensions; | ||||
|   std::vector<int> lcoor(ndim); | ||||
|  | ||||
|   for(int d=ndim-1;d>=0;d--){ | ||||
|  | ||||
|     if ( ratio[d] != 1 ) { | ||||
|  | ||||
|       full_grid ->AllToAll(d,alldata,tmpdata); | ||||
|       //      std::cout << GridLogMessage << "Grid_split: dim " <<d<<" ratio "<<ratio[d]<<" nvec "<<nvec<<" procs "<<split_grid->_processors[d]<<std::endl; | ||||
|       //      for(int v=0;v<nvec;v++){ | ||||
|       //	std::cout << "Grid_split: alldata["<<v<<"] " << alldata[v] <<std::endl; | ||||
|       //	std::cout << "Grid_split: tmpdata["<<v<<"] " << tmpdata[v] <<std::endl; | ||||
|       //      } | ||||
|       ////////////////////////////////////////// | ||||
|       //Local volume for this dimension is expanded by ratio of processor extents | ||||
|       // Number of vectors is decreased by same factor | ||||
|       // Rearrange to lexico for bigger volume | ||||
|       ////////////////////////////////////////// | ||||
|       nvec    /= ratio[d]; | ||||
|  | ||||
|       auto rdims = ldims; rdims[d]  *=   ratio[d]; | ||||
|       auto rsites= lsites*ratio[d]; | ||||
|       for(int v=0;v<nvec;v++){ | ||||
|  | ||||
| 	// For loop over each site within old subvol | ||||
| 	for(int lsite=0;lsite<lsites;lsite++){ | ||||
|  | ||||
| 	  Lexicographic::CoorFromIndex(lcoor, lsite, ldims);	   | ||||
|  | ||||
| 	  for(int r=0;r<ratio[d];r++){ // ratio*nvec terms | ||||
|  | ||||
| 	    auto rcoor = lcoor;	    rcoor[d]  += r*ldims[d]; | ||||
|  | ||||
| 	    int rsite; Lexicographic::IndexFromCoor(rcoor, rsite, rdims);	   | ||||
| 	    rsite += v * rsites; | ||||
|  | ||||
| 	    int rmul=nvec*lsites; | ||||
| 	    int vmul=     lsites; | ||||
| 	    alldata[rsite] = tmpdata[lsite+r*rmul+v*vmul]; | ||||
| 	    //	    if ( lsite==0 ) { | ||||
| 	    //	      std::cout << "Grid_split: grow alldata["<<rsite<<"] " << alldata[rsite] << " <- tmpdata["<< lsite+r*rmul+v*vmul<<"] "<<tmpdata[lsite+r*rmul+v*vmul]  <<std::endl; | ||||
| 	    //	    }	       | ||||
| 	  } | ||||
| 	} | ||||
|       } | ||||
|       ldims[d]*= ratio[d]; | ||||
|       lsites  *= ratio[d]; | ||||
|  | ||||
|       if ( split_grid->_processors[d] > 1 ) { | ||||
| 	tmpdata = alldata; | ||||
| 	split_grid->AllToAll(d,tmpdata,alldata); | ||||
|       } | ||||
|     } | ||||
|   } | ||||
|   vectorizeFromLexOrdArray(alldata,split);     | ||||
| } | ||||
|  | ||||
| template<class Vobj> | ||||
| void Grid_split(Lattice<Vobj> &full,Lattice<Vobj>   & split) | ||||
| { | ||||
|   int nvector = full._grid->_Nprocessors / split._grid->_Nprocessors; | ||||
|   std::vector<Lattice<Vobj> > full_v(nvector,full._grid); | ||||
|   for(int n=0;n<nvector;n++){ | ||||
|     full_v[n] = full; | ||||
|   } | ||||
|   Grid_split(full_v,split); | ||||
| } | ||||
|  | ||||
| template<class Vobj> | ||||
| void Grid_unsplit(std::vector<Lattice<Vobj> > & full,Lattice<Vobj>   & split) | ||||
| { | ||||
|   typedef typename Vobj::scalar_object Sobj; | ||||
|  | ||||
|   int full_vecs   = full.size(); | ||||
|  | ||||
|   assert(full_vecs>=1); | ||||
|  | ||||
|   GridBase * full_grid = full[0]._grid; | ||||
|   GridBase *split_grid = split._grid; | ||||
|  | ||||
|   int       ndim  = full_grid->_ndimension; | ||||
|   int  full_nproc = full_grid->_Nprocessors; | ||||
|   int split_nproc =split_grid->_Nprocessors; | ||||
|  | ||||
|   //////////////////////////////// | ||||
|   // Checkerboard management | ||||
|   //////////////////////////////// | ||||
|   int cb = full[0].checkerboard; | ||||
|   split.checkerboard = cb; | ||||
|  | ||||
|   ////////////////////////////// | ||||
|   // Checks | ||||
|   ////////////////////////////// | ||||
|   assert(full_grid->_ndimension==split_grid->_ndimension); | ||||
|   for(int n=0;n<full_vecs;n++){ | ||||
|     assert(full[n].checkerboard == cb); | ||||
|     for(int d=0;d<ndim;d++){ | ||||
|       assert(full[n]._grid->_gdimensions[d]==split._grid->_gdimensions[d]); | ||||
|       assert(full[n]._grid->_fdimensions[d]==split._grid->_fdimensions[d]); | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   int   nvector   =full_nproc/split_nproc;  | ||||
|   assert(nvector*split_nproc==full_nproc); | ||||
|   assert(nvector == full_vecs); | ||||
|  | ||||
|   std::vector<int> ratio(ndim); | ||||
|   for(int d=0;d<ndim;d++){ | ||||
|     ratio[d] = full_grid->_processors[d]/ split_grid->_processors[d]; | ||||
|   } | ||||
|  | ||||
|   uint64_t lsites = full_grid->lSites(); | ||||
|   uint64_t     sz = lsites * nvector; | ||||
|   std::vector<Sobj> tmpdata(sz); | ||||
|   std::vector<Sobj> alldata(sz); | ||||
|   std::vector<Sobj> scalardata(lsites);  | ||||
|  | ||||
|   unvectorizeToLexOrdArray(alldata,split);     | ||||
|  | ||||
|   ///////////////////////////////////////////////////////////////// | ||||
|   // Start from split grid and work towards full grid | ||||
|   ///////////////////////////////////////////////////////////////// | ||||
|   std::vector<int> lcoor(ndim); | ||||
|   std::vector<int> rcoor(ndim); | ||||
|  | ||||
|   int nvec = 1; | ||||
|   lsites = split_grid->lSites(); | ||||
|   std::vector<int> ldims = split_grid->_ldimensions; | ||||
|  | ||||
|   //  for(int d=ndim-1;d>=0;d--){ | ||||
|   for(int d=0;d<ndim;d++){ | ||||
|  | ||||
|     if ( ratio[d] != 1 ) { | ||||
|  | ||||
|  | ||||
|       if ( split_grid->_processors[d] > 1 ) { | ||||
| 	tmpdata = alldata; | ||||
| 	split_grid->AllToAll(d,tmpdata,alldata); | ||||
|       } | ||||
|  | ||||
|       ////////////////////////////////////////// | ||||
|       //Local volume for this dimension is expanded by ratio of processor extents | ||||
|       // Number of vectors is decreased by same factor | ||||
|       // Rearrange to lexico for bigger volume | ||||
|       ////////////////////////////////////////// | ||||
|       auto rsites= lsites/ratio[d]; | ||||
|       auto rdims = ldims; rdims[d]/=ratio[d]; | ||||
|  | ||||
|       for(int v=0;v<nvec;v++){ | ||||
|  | ||||
| 	// rsite, rcoor --> smaller local volume | ||||
| 	// lsite, lcoor --> bigger original (single node?) volume | ||||
| 	// For loop over each site within smaller subvol | ||||
| 	for(int rsite=0;rsite<rsites;rsite++){ | ||||
|  | ||||
| 	  Lexicographic::CoorFromIndex(rcoor, rsite, rdims);	   | ||||
| 	  int lsite; | ||||
|  | ||||
| 	  for(int r=0;r<ratio[d];r++){  | ||||
|  | ||||
| 	    lcoor = rcoor; lcoor[d] += r*rdims[d]; | ||||
| 	    Lexicographic::IndexFromCoor(lcoor, lsite, ldims); lsite += v * lsites; | ||||
|  | ||||
| 	    int rmul=nvec*rsites; | ||||
| 	    int vmul=     rsites; | ||||
| 	    tmpdata[rsite+r*rmul+v*vmul]=alldata[lsite]; | ||||
|  | ||||
| 	  } | ||||
| 	} | ||||
|       } | ||||
|       nvec   *= ratio[d]; | ||||
|       ldims[d]=rdims[d]; | ||||
|       lsites  =rsites; | ||||
|  | ||||
|       full_grid ->AllToAll(d,tmpdata,alldata); | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   lsites = full_grid->lSites(); | ||||
|   for(int v=0;v<nvector;v++){ | ||||
|     assert(v<full.size()); | ||||
|     parallel_for(int site=0;site<lsites;site++){ | ||||
|       scalardata[site] = alldata[v*lsites+site]; | ||||
|     } | ||||
|     vectorizeFromLexOrdArray(scalardata,full[v]);     | ||||
|   } | ||||
| } | ||||
|  | ||||
|   | ||||
| } | ||||
| #endif | ||||
|   | ||||
| @@ -62,14 +62,20 @@ namespace Grid { | ||||
|     return ret; | ||||
|   } | ||||
|  | ||||
|   template<class obj> Lattice<obj> expMat(const Lattice<obj> &rhs, ComplexD alpha, Integer Nexp = DEFAULT_MAT_EXP){ | ||||
|   template<class obj> Lattice<obj> expMat(const Lattice<obj> &rhs, RealD alpha, Integer Nexp = DEFAULT_MAT_EXP){ | ||||
|     Lattice<obj> ret(rhs._grid); | ||||
|     ret.checkerboard = rhs.checkerboard; | ||||
|     conformable(ret,rhs); | ||||
|     parallel_for(int ss=0;ss<rhs._grid->oSites();ss++){ | ||||
|       ret._odata[ss]=Exponentiate(rhs._odata[ss],alpha, Nexp); | ||||
|     } | ||||
|  | ||||
|     return ret; | ||||
|  | ||||
|      | ||||
|      | ||||
|  | ||||
|      | ||||
|   } | ||||
|  | ||||
|  | ||||
|   | ||||
| @@ -50,7 +50,7 @@ namespace Grid { | ||||
|     return (status==0) ? res.get() : name ; | ||||
|   } | ||||
|    | ||||
| GridStopWatch Logger::StopWatch; | ||||
| GridStopWatch Logger::GlobalStopWatch; | ||||
| int Logger::timestamp; | ||||
| std::ostream Logger::devnull(0); | ||||
|  | ||||
| @@ -59,13 +59,15 @@ void GridLogTimestamp(int on){ | ||||
| } | ||||
|  | ||||
| Colours GridLogColours(0); | ||||
| GridLogger GridLogError(1, "Error", GridLogColours, "RED"); | ||||
| GridLogger GridLogIRL    (1, "IRL"   , GridLogColours, "NORMAL"); | ||||
| GridLogger GridLogSolver (1, "Solver", GridLogColours, "NORMAL"); | ||||
| GridLogger GridLogError  (1, "Error" , GridLogColours, "RED"); | ||||
| GridLogger GridLogWarning(1, "Warning", GridLogColours, "YELLOW"); | ||||
| GridLogger GridLogMessage(1, "Message", GridLogColours, "NORMAL"); | ||||
| GridLogger GridLogDebug(1, "Debug", GridLogColours, "PURPLE"); | ||||
| GridLogger GridLogDebug  (1, "Debug", GridLogColours, "PURPLE"); | ||||
| GridLogger GridLogPerformance(1, "Performance", GridLogColours, "GREEN"); | ||||
| GridLogger GridLogIterative(1, "Iterative", GridLogColours, "BLUE"); | ||||
| GridLogger GridLogIntegrator(1, "Integrator", GridLogColours, "BLUE"); | ||||
| GridLogger GridLogIterative  (1, "Iterative", GridLogColours, "BLUE"); | ||||
| GridLogger GridLogIntegrator (1, "Integrator", GridLogColours, "BLUE"); | ||||
|  | ||||
| void GridLogConfigure(std::vector<std::string> &logstreams) { | ||||
|   GridLogError.Active(0); | ||||
| @@ -95,7 +97,7 @@ void GridLogConfigure(std::vector<std::string> &logstreams) { | ||||
| //////////////////////////////////////////////////////////// | ||||
| void Grid_quiesce_nodes(void) { | ||||
|   int me = 0; | ||||
| #if defined(GRID_COMMS_MPI) || defined(GRID_COMMS_MPI3) || defined(GRID_COMMS_MPI3L) | ||||
| #if defined(GRID_COMMS_MPI) || defined(GRID_COMMS_MPI3) || defined(GRID_COMMS_MPIT) | ||||
|   MPI_Comm_rank(MPI_COMM_WORLD, &me); | ||||
| #endif | ||||
| #ifdef GRID_COMMS_SHMEM | ||||
|   | ||||
| @@ -85,12 +85,15 @@ class Logger { | ||||
| protected: | ||||
|   Colours &Painter; | ||||
|   int active; | ||||
|   int timing_mode; | ||||
|   static int timestamp; | ||||
|   std::string name, topName; | ||||
|   std::string COLOUR; | ||||
|  | ||||
| public: | ||||
|   static GridStopWatch StopWatch; | ||||
|   static GridStopWatch GlobalStopWatch; | ||||
|   GridStopWatch         LocalStopWatch; | ||||
|   GridStopWatch *StopWatch; | ||||
|   static std::ostream devnull; | ||||
|  | ||||
|   std::string background() {return Painter.colour["NORMAL"];} | ||||
| @@ -101,22 +104,38 @@ public: | ||||
|     name(nm), | ||||
|     topName(topNm), | ||||
|     Painter(col_class), | ||||
|     COLOUR(col) {} ; | ||||
|     timing_mode(0), | ||||
|     COLOUR(col)  | ||||
|     { | ||||
|       StopWatch = & GlobalStopWatch; | ||||
|     }; | ||||
|    | ||||
|   void Active(int on) {active = on;}; | ||||
|   int  isActive(void) {return active;}; | ||||
|   static void Timestamp(int on) {timestamp = on;}; | ||||
|   void Reset(void) {  | ||||
|     StopWatch->Reset();  | ||||
|     StopWatch->Start();  | ||||
|   } | ||||
|   void TimingMode(int on) {  | ||||
|     timing_mode = on;  | ||||
|     if(on) {  | ||||
|       StopWatch = &LocalStopWatch; | ||||
|       Reset();  | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   friend std::ostream& operator<< (std::ostream& stream, Logger& log){ | ||||
|  | ||||
|     if ( log.active ) { | ||||
|       stream << log.background()<< std::setw(8) << std::left << log.topName << log.background()<< " : "; | ||||
|       stream << log.colour() << std::setw(10) << std::left << log.name << log.background() << " : "; | ||||
|       stream << log.background()<<  std::left << log.topName << log.background()<< " : "; | ||||
|       stream << log.colour() <<  std::left << log.name << log.background() << " : "; | ||||
|       if ( log.timestamp ) { | ||||
| 	StopWatch.Stop(); | ||||
| 	GridTime now = StopWatch.Elapsed(); | ||||
| 	StopWatch.Start(); | ||||
| 	stream << log.evidence()<< now << log.background() << " : " ; | ||||
| 	log.StopWatch->Stop(); | ||||
| 	GridTime now = log.StopWatch->Elapsed(); | ||||
| 	if ( log.timing_mode==1 ) log.StopWatch->Reset(); | ||||
| 	log.StopWatch->Start(); | ||||
| 	stream << log.evidence()<< std::setw(6)<<now << log.background() << " : " ; | ||||
|       } | ||||
|       stream << log.colour(); | ||||
|       return stream; | ||||
| @@ -135,6 +154,8 @@ public: | ||||
|  | ||||
| void GridLogConfigure(std::vector<std::string> &logstreams); | ||||
|  | ||||
| extern GridLogger GridLogIRL; | ||||
| extern GridLogger GridLogSolver; | ||||
| extern GridLogger GridLogError; | ||||
| extern GridLogger GridLogWarning; | ||||
| extern GridLogger GridLogMessage; | ||||
|   | ||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @@ -27,6 +27,7 @@ directory | ||||
| #ifndef GRID_ILDG_IO_H | ||||
| #define GRID_ILDG_IO_H | ||||
|  | ||||
| #ifdef HAVE_LIME | ||||
| #include <algorithm> | ||||
| #include <fstream> | ||||
| #include <iomanip> | ||||
| @@ -37,213 +38,759 @@ directory | ||||
| #include <sys/utsname.h> | ||||
| #include <unistd.h> | ||||
|  | ||||
| #ifdef HAVE_LIME | ||||
|  | ||||
| extern "C" {  // for linkage | ||||
| //C-Lime is a must have for this functionality | ||||
| extern "C" {   | ||||
| #include "lime.h" | ||||
| } | ||||
|  | ||||
| namespace Grid { | ||||
| namespace QCD { | ||||
|  | ||||
| inline void ILDGGrid(GridBase *grid, ILDGField &header) { | ||||
|   assert(grid->_ndimension == 4);  // emit error if not | ||||
|   header.dimension.resize(4); | ||||
|   header.boundary.resize(4); | ||||
|   for (int d = 0; d < 4; d++) { | ||||
|     header.dimension[d] = grid->_fdimensions[d]; | ||||
|     // Read boundary conditions from ... ? | ||||
|     header.boundary[d] = std::string("periodic"); | ||||
|   } | ||||
| } | ||||
|   ///////////////////////////////// | ||||
|   // Encode word types as strings | ||||
|   ///////////////////////////////// | ||||
|  template<class word> inline std::string ScidacWordMnemonic(void){ return std::string("unknown"); } | ||||
|  template<> inline std::string ScidacWordMnemonic<double>  (void){ return std::string("D"); } | ||||
|  template<> inline std::string ScidacWordMnemonic<float>   (void){ return std::string("F"); } | ||||
|  template<> inline std::string ScidacWordMnemonic< int32_t>(void){ return std::string("I32_t"); } | ||||
|  template<> inline std::string ScidacWordMnemonic<uint32_t>(void){ return std::string("U32_t"); } | ||||
|  template<> inline std::string ScidacWordMnemonic< int64_t>(void){ return std::string("I64_t"); } | ||||
|  template<> inline std::string ScidacWordMnemonic<uint64_t>(void){ return std::string("U64_t"); } | ||||
|  | ||||
| inline void ILDGChecksum(uint32_t *buf, uint32_t buf_size_bytes, | ||||
|                          uint32_t &csum) { | ||||
|   BinaryIO::Uint32Checksum(buf, buf_size_bytes, csum); | ||||
| } | ||||
|   ///////////////////////////////////////// | ||||
|   // Encode a generic tensor as a string | ||||
|   ///////////////////////////////////////// | ||||
|  template<class vobj> std::string ScidacRecordTypeString(int &colors, int &spins, int & typesize,int &datacount) {  | ||||
|  | ||||
| ////////////////////////////////////////////////////////////////////// | ||||
| // Utilities ; these are QCD aware | ||||
| ////////////////////////////////////////////////////////////////////// | ||||
| template <class GaugeField> | ||||
| inline void ILDGStatistics(GaugeField &data, ILDGField &header) { | ||||
|   // How to convert data precision etc... | ||||
|   header.link_trace = Grid::QCD::WilsonLoops<PeriodicGimplR>::linkTrace(data); | ||||
|   header.plaquette = Grid::QCD::WilsonLoops<PeriodicGimplR>::avgPlaquette(data); | ||||
|   // header.polyakov = | ||||
| } | ||||
|    typedef typename getPrecision<vobj>::real_scalar_type stype; | ||||
|  | ||||
| // Forcing QCD here | ||||
| template <class fobj, class sobj> | ||||
| struct ILDGMunger { | ||||
|   void operator()(fobj &in, sobj &out, uint32_t &csum) { | ||||
|     for (int mu = 0; mu < 4; mu++) { | ||||
|       for (int i = 0; i < 3; i++) { | ||||
|         for (int j = 0; j < 3; j++) { | ||||
|           out(mu)()(i, j) = in(mu)()(i, j); | ||||
|    int _ColourN       = indexRank<ColourIndex,vobj>(); | ||||
|    int _ColourScalar  =  isScalar<ColourIndex,vobj>(); | ||||
|    int _ColourVector  =  isVector<ColourIndex,vobj>(); | ||||
|    int _ColourMatrix  =  isMatrix<ColourIndex,vobj>(); | ||||
|  | ||||
|    int _SpinN       = indexRank<SpinIndex,vobj>(); | ||||
|    int _SpinScalar  =  isScalar<SpinIndex,vobj>(); | ||||
|    int _SpinVector  =  isVector<SpinIndex,vobj>(); | ||||
|    int _SpinMatrix  =  isMatrix<SpinIndex,vobj>(); | ||||
|  | ||||
|    int _LorentzN       = indexRank<LorentzIndex,vobj>(); | ||||
|    int _LorentzScalar  =  isScalar<LorentzIndex,vobj>(); | ||||
|    int _LorentzVector  =  isVector<LorentzIndex,vobj>(); | ||||
|    int _LorentzMatrix  =  isMatrix<LorentzIndex,vobj>(); | ||||
|  | ||||
|    std::stringstream stream; | ||||
|  | ||||
|    stream << "GRID_"; | ||||
|    stream << ScidacWordMnemonic<stype>(); | ||||
|  | ||||
|    if ( _LorentzVector )   stream << "_LorentzVector"<<_LorentzN; | ||||
|    if ( _LorentzMatrix )   stream << "_LorentzMatrix"<<_LorentzN; | ||||
|  | ||||
|    if ( _SpinVector )   stream << "_SpinVector"<<_SpinN; | ||||
|    if ( _SpinMatrix )   stream << "_SpinMatrix"<<_SpinN; | ||||
|  | ||||
|    if ( _ColourVector )   stream << "_ColourVector"<<_ColourN; | ||||
|    if ( _ColourMatrix )   stream << "_ColourMatrix"<<_ColourN; | ||||
|  | ||||
|    if ( _ColourScalar && _LorentzScalar && _SpinScalar )   stream << "_Complex"; | ||||
|  | ||||
|  | ||||
|    typesize = sizeof(typename vobj::scalar_type); | ||||
|  | ||||
|    if ( _ColourMatrix ) typesize*= _ColourN*_ColourN; | ||||
|    else                 typesize*= _ColourN; | ||||
|  | ||||
|    if ( _SpinMatrix )   typesize*= _SpinN*_SpinN; | ||||
|    else                 typesize*= _SpinN; | ||||
|  | ||||
|    colors    = _ColourN; | ||||
|    spins     = _SpinN; | ||||
|    datacount = _LorentzN; | ||||
|  | ||||
|    return stream.str(); | ||||
|  } | ||||
|       } | ||||
|     } | ||||
|     ILDGChecksum((uint32_t *)&in, sizeof(in), csum); | ||||
|   | ||||
|  template<class vobj> std::string ScidacRecordTypeString(Lattice<vobj> & lat,int &colors, int &spins, int & typesize,int &datacount) {  | ||||
|    return ScidacRecordTypeString<vobj>(colors,spins,typesize,datacount); | ||||
|  }; | ||||
| }; | ||||
|  | ||||
| template <class fobj, class sobj> | ||||
| struct ILDGUnmunger { | ||||
|   void operator()(sobj &in, fobj &out, uint32_t &csum) { | ||||
|     for (int mu = 0; mu < 4; mu++) { | ||||
|       for (int i = 0; i < 3; i++) { | ||||
|         for (int j = 0; j < 3; j++) { | ||||
|           out(mu)()(i, j) = in(mu)()(i, j); | ||||
|         } | ||||
|       } | ||||
|     } | ||||
|     ILDGChecksum((uint32_t *)&out, sizeof(out), csum); | ||||
|   }; | ||||
| }; | ||||
|  | ||||
| //////////////////////////////////////////////////////////////////////////////// | ||||
| // Write and read from fstream; compute header offset for payload | ||||
| //////////////////////////////////////////////////////////////////////////////// | ||||
| enum ILDGstate {ILDGread, ILDGwrite}; | ||||
|  //////////////////////////////////////////////////////////// | ||||
|  // Helper to fill out metadata | ||||
|  //////////////////////////////////////////////////////////// | ||||
|  template<class vobj> void ScidacMetaData(Lattice<vobj> & field, | ||||
| 					  FieldMetaData &header, | ||||
| 					  scidacRecord & _scidacRecord, | ||||
| 					  scidacFile   & _scidacFile)  | ||||
|  { | ||||
|    typedef typename getPrecision<vobj>::real_scalar_type stype; | ||||
|  | ||||
|    ///////////////////////////////////// | ||||
|    // Pull Grid's metadata | ||||
|    ///////////////////////////////////// | ||||
|    PrepareMetaData(field,header); | ||||
|  | ||||
|    ///////////////////////////////////// | ||||
|    // Scidac Private File structure | ||||
|    ///////////////////////////////////// | ||||
|    _scidacFile              = scidacFile(field._grid); | ||||
|  | ||||
|    ///////////////////////////////////// | ||||
|    // Scidac Private Record structure | ||||
|    ///////////////////////////////////// | ||||
|    scidacRecord sr; | ||||
|    sr.datatype   = ScidacRecordTypeString(field,sr.colors,sr.spins,sr.typesize,sr.datacount); | ||||
|    sr.date       = header.creation_date; | ||||
|    sr.precision  = ScidacWordMnemonic<stype>(); | ||||
|    sr.recordtype = GRID_IO_FIELD; | ||||
|  | ||||
|    _scidacRecord = sr; | ||||
|  | ||||
|    //   std::cout << GridLogMessage << "Build SciDAC datatype " <<sr.datatype<<std::endl; | ||||
|  } | ||||
|   | ||||
|  /////////////////////////////////////////////////////// | ||||
|  // Scidac checksum | ||||
|  /////////////////////////////////////////////////////// | ||||
|  static int scidacChecksumVerify(scidacChecksum &scidacChecksum_,uint32_t scidac_csuma,uint32_t scidac_csumb) | ||||
|  { | ||||
|    uint32_t scidac_checksuma = stoull(scidacChecksum_.suma,0,16); | ||||
|    uint32_t scidac_checksumb = stoull(scidacChecksum_.sumb,0,16); | ||||
|    if ( scidac_csuma !=scidac_checksuma) return 0; | ||||
|    if ( scidac_csumb !=scidac_checksumb) return 0; | ||||
|    return 1; | ||||
|  } | ||||
|  | ||||
| //////////////////////////////////////////////////////////////////////////////////// | ||||
| // Lime, ILDG and Scidac I/O classes | ||||
| //////////////////////////////////////////////////////////////////////////////////// | ||||
| class GridLimeReader : public BinaryIO { | ||||
|  public: | ||||
|    /////////////////////////////////////////////////// | ||||
|    // FIXME: format for RNG? Now just binary out instead | ||||
|    /////////////////////////////////////////////////// | ||||
|  | ||||
| class ILDGIO : public BinaryIO { | ||||
|    FILE       *File; | ||||
|   LimeWriter *LimeW; | ||||
|   LimeRecordHeader *LimeHeader; | ||||
|    LimeReader *LimeR; | ||||
|    std::string filename; | ||||
|  | ||||
|  | ||||
|  public: | ||||
|   ILDGIO(std::string file, ILDGstate RW) { | ||||
|       filename = file; | ||||
|     if (RW == ILDGwrite){ | ||||
|       File = fopen(file.c_str(), "w"); | ||||
|       // check if opened correctly | ||||
|  | ||||
|       LimeW = limeCreateWriter(File); | ||||
|     } else { | ||||
|       File = fopen(file.c_str(), "r"); | ||||
|       // check if opened correctly | ||||
|  | ||||
|    ///////////////////////////////////////////// | ||||
|    // Open the file | ||||
|    ///////////////////////////////////////////// | ||||
|    void open(const std::string &_filename)  | ||||
|    { | ||||
|      filename= _filename; | ||||
|      File = fopen(filename.c_str(), "r"); | ||||
|      LimeR = limeCreateReader(File); | ||||
|    } | ||||
|    ///////////////////////////////////////////// | ||||
|    // Close the file | ||||
|    ///////////////////////////////////////////// | ||||
|    void close(void){ | ||||
|      fclose(File); | ||||
|      //     limeDestroyReader(LimeR); | ||||
|    } | ||||
|  | ||||
|   ~ILDGIO() { fclose(File); } | ||||
|   //////////////////////////////////////////// | ||||
|   // Read a generic lattice field and verify checksum | ||||
|   //////////////////////////////////////////// | ||||
|   template<class vobj> | ||||
|   void readLimeLatticeBinaryObject(Lattice<vobj> &field,std::string record_name) | ||||
|   { | ||||
|     typedef typename vobj::scalar_object sobj; | ||||
|     scidacChecksum scidacChecksum_; | ||||
|     uint32_t nersc_csum,scidac_csuma,scidac_csumb; | ||||
|  | ||||
|   int createHeader(std::string message, int MB, int ME, size_t PayloadSize, LimeWriter* L){ | ||||
|     std::string format = getFormatString<vobj>(); | ||||
|  | ||||
|     while ( limeReaderNextRecord(LimeR) == LIME_SUCCESS ) {  | ||||
|  | ||||
|       uint64_t file_bytes =limeReaderBytes(LimeR); | ||||
|  | ||||
|       //      std::cout << GridLogMessage << limeReaderType(LimeR) << " "<< file_bytes <<" bytes "<<std::endl; | ||||
|       //      std::cout << GridLogMessage<< " readLimeObject seeking "<<  record_name <<" found record :" <<limeReaderType(LimeR) <<std::endl; | ||||
|  | ||||
|       if ( !strncmp(limeReaderType(LimeR), record_name.c_str(),strlen(record_name.c_str()) )  ) { | ||||
|  | ||||
| 	//	std::cout << GridLogMessage<< " readLimeLatticeBinaryObject matches ! " <<std::endl; | ||||
|  | ||||
| 	uint64_t PayloadSize = sizeof(sobj) * field._grid->_gsites; | ||||
|  | ||||
| 	//	std::cout << "R sizeof(sobj)= " <<sizeof(sobj)<<std::endl; | ||||
| 	//	std::cout << "R Gsites " <<field._grid->_gsites<<std::endl; | ||||
| 	//	std::cout << "R Payload expected " <<PayloadSize<<std::endl; | ||||
| 	//	std::cout << "R file size " <<file_bytes <<std::endl; | ||||
|  | ||||
| 	assert(PayloadSize == file_bytes);// Must match or user error | ||||
|  | ||||
| 	uint64_t offset= ftello(File); | ||||
| 	//	std::cout << " ReadLatticeObject from offset "<<offset << std::endl; | ||||
| 	BinarySimpleMunger<sobj,sobj> munge; | ||||
| 	BinaryIO::readLatticeObject< vobj, sobj >(field, filename, munge, offset, format,nersc_csum,scidac_csuma,scidac_csumb); | ||||
|  | ||||
| 	///////////////////////////////////////////// | ||||
| 	// Insist checksum is next record | ||||
| 	///////////////////////////////////////////// | ||||
| 	readLimeObject(scidacChecksum_,std::string("scidacChecksum"),std::string(SCIDAC_CHECKSUM)); | ||||
|  | ||||
| 	///////////////////////////////////////////// | ||||
| 	// Verify checksums | ||||
| 	///////////////////////////////////////////// | ||||
| 	assert(scidacChecksumVerify(scidacChecksum_,scidac_csuma,scidac_csumb)==1); | ||||
| 	return; | ||||
|       } | ||||
|     } | ||||
|   } | ||||
|   //////////////////////////////////////////// | ||||
|   // Read a generic serialisable object | ||||
|   //////////////////////////////////////////// | ||||
|   template<class serialisable_object> | ||||
|   void readLimeObject(serialisable_object &object,std::string object_name,std::string record_name) | ||||
|   { | ||||
|     std::string xmlstring; | ||||
|     // should this be a do while; can we miss a first record?? | ||||
|     while ( limeReaderNextRecord(LimeR) == LIME_SUCCESS ) {  | ||||
|  | ||||
|       //      std::cout << GridLogMessage<< " readLimeObject seeking "<< record_name <<" found record :" <<limeReaderType(LimeR) <<std::endl; | ||||
|       uint64_t nbytes = limeReaderBytes(LimeR);//size of this record (configuration) | ||||
|  | ||||
|       if ( !strncmp(limeReaderType(LimeR), record_name.c_str(),strlen(record_name.c_str()) )  ) { | ||||
|  | ||||
| 	//	std::cout << GridLogMessage<< " readLimeObject matches ! " << record_name <<std::endl; | ||||
| 	std::vector<char> xmlc(nbytes+1,'\0'); | ||||
| 	limeReaderReadData((void *)&xmlc[0], &nbytes, LimeR);     | ||||
| 	//	std::cout << GridLogMessage<< " readLimeObject matches XML " << &xmlc[0] <<std::endl; | ||||
|  | ||||
| 	XmlReader RD(&xmlc[0],""); | ||||
| 	read(RD,object_name,object); | ||||
| 	return; | ||||
|       } | ||||
|  | ||||
|     }   | ||||
|     assert(0); | ||||
|   } | ||||
| }; | ||||
|  | ||||
| class GridLimeWriter : public BinaryIO { | ||||
|  public: | ||||
|    /////////////////////////////////////////////////// | ||||
|    // FIXME: format for RNG? Now just binary out instead | ||||
|    // FIXME: collective calls or not ? | ||||
|    //      : must know if I am the I/O boss | ||||
|    /////////////////////////////////////////////////// | ||||
|    FILE       *File; | ||||
|    LimeWriter *LimeW; | ||||
|    std::string filename; | ||||
|  | ||||
|    void open(const std::string &_filename) {  | ||||
|      filename= _filename; | ||||
|      File = fopen(filename.c_str(), "w"); | ||||
|      LimeW = limeCreateWriter(File); assert(LimeW != NULL ); | ||||
|    } | ||||
|    ///////////////////////////////////////////// | ||||
|    // Close the file | ||||
|    ///////////////////////////////////////////// | ||||
|    void close(void) { | ||||
|      fclose(File); | ||||
|      //  limeDestroyWriter(LimeW); | ||||
|    } | ||||
|   /////////////////////////////////////////////////////// | ||||
|   // Lime utility functions | ||||
|   /////////////////////////////////////////////////////// | ||||
|   int createLimeRecordHeader(std::string message, int MB, int ME, size_t PayloadSize) | ||||
|   { | ||||
|     LimeRecordHeader *h; | ||||
|     h = limeCreateHeader(MB, ME, const_cast<char *>(message.c_str()), PayloadSize); | ||||
|     int status = limeWriteRecordHeader(h, L); | ||||
|     if (status < 0) { | ||||
|       std::cerr << "ILDG Header error\n"; | ||||
|       return status; | ||||
|     } | ||||
|     assert(limeWriteRecordHeader(h, LimeW) >= 0); | ||||
|     limeDestroyHeader(h); | ||||
|     return LIME_SUCCESS; | ||||
|   } | ||||
|   //////////////////////////////////////////// | ||||
|   // Write a generic serialisable object | ||||
|   //////////////////////////////////////////// | ||||
|   template<class serialisable_object> | ||||
|   void writeLimeObject(int MB,int ME,serialisable_object &object,std::string object_name,std::string record_name) | ||||
|   { | ||||
|     std::string xmlstring; | ||||
|     { | ||||
|       XmlWriter WR("",""); | ||||
|       write(WR,object_name,object); | ||||
|       xmlstring = WR.XmlString(); | ||||
|     } | ||||
|     //    std::cout << "WriteLimeObject" << record_name <<std::endl; | ||||
|     uint64_t nbytes = xmlstring.size(); | ||||
|     //    std::cout << " xmlstring "<< nbytes<< " " << xmlstring <<std::endl; | ||||
|     int err; | ||||
|     LimeRecordHeader *h = limeCreateHeader(MB, ME,const_cast<char *>(record_name.c_str()), nbytes);  | ||||
|     assert(h!= NULL); | ||||
|  | ||||
|   unsigned int writeHeader(ILDGField &header) { | ||||
|     // write header in LIME | ||||
|     n_uint64_t nbytes; | ||||
|     int MB_flag = 1, ME_flag = 0; | ||||
|     err=limeWriteRecordHeader(h, LimeW);                    assert(err>=0); | ||||
|     err=limeWriteRecordData(&xmlstring[0], &nbytes, LimeW); assert(err>=0); | ||||
|     err=limeWriterCloseRecord(LimeW);                       assert(err>=0); | ||||
|     limeDestroyHeader(h); | ||||
|     //    std::cout << " File offset is now"<<ftello(File) << std::endl; | ||||
|   } | ||||
|   //////////////////////////////////////////// | ||||
|   // Write a generic lattice field and csum | ||||
|   //////////////////////////////////////////// | ||||
|   template<class vobj> | ||||
|   void writeLimeLatticeBinaryObject(Lattice<vobj> &field,std::string record_name) | ||||
|   { | ||||
|     //////////////////////////////////////////// | ||||
|     // Create record header | ||||
|     //////////////////////////////////////////// | ||||
|     typedef typename vobj::scalar_object sobj; | ||||
|     int err; | ||||
|     uint32_t nersc_csum,scidac_csuma,scidac_csumb; | ||||
|     uint64_t PayloadSize = sizeof(sobj) * field._grid->_gsites; | ||||
|     createLimeRecordHeader(record_name, 0, 0, PayloadSize); | ||||
|  | ||||
|     char message[] = "ildg-format"; | ||||
|     nbytes = strlen(message); | ||||
|     LimeHeader = limeCreateHeader(MB_flag, ME_flag, message, nbytes); | ||||
|     limeWriteRecordHeader(LimeHeader, LimeW); | ||||
|     limeDestroyHeader(LimeHeader); | ||||
|     // save the xml header here | ||||
|     // use the xml_writer to c++ streams in pugixml | ||||
|     // and convert to char message | ||||
|     limeWriteRecordData(message, &nbytes, LimeW); | ||||
|     limeWriterCloseRecord(LimeW); | ||||
|     //    std::cout << "W sizeof(sobj)"      <<sizeof(sobj)<<std::endl; | ||||
|     //    std::cout << "W Gsites "           <<field._grid->_gsites<<std::endl; | ||||
|     //    std::cout << "W Payload expected " <<PayloadSize<<std::endl; | ||||
|  | ||||
|     return 0; | ||||
|   } | ||||
|  | ||||
|   unsigned int readHeader(ILDGField &header) { | ||||
|     return 0; | ||||
|     //////////////////////////////////////////////////////////////////// | ||||
|     // NB: FILE and iostream are jointly writing disjoint sequences in the | ||||
|     // the same file through different file handles (integer units). | ||||
|     //  | ||||
|     // These are both buffered, so why I think this code is right is as follows. | ||||
|     // | ||||
|     // i)  write record header to FILE *File, telegraphing the size.  | ||||
|     // ii) ftello reads the offset from FILE *File . | ||||
|     // iii) iostream / MPI Open independently seek this offset. Write sequence direct to disk. | ||||
|     //      Closes iostream and flushes. | ||||
|     // iv) fseek on FILE * to end of this disjoint section. | ||||
|     //  v) Continue writing scidac record. | ||||
|     //////////////////////////////////////////////////////////////////// | ||||
|     uint64_t offset = ftello(File); | ||||
|     //    std::cout << " Writing to offset "<<offset << std::endl; | ||||
|     std::string format = getFormatString<vobj>(); | ||||
|     BinarySimpleMunger<sobj,sobj> munge; | ||||
|     BinaryIO::writeLatticeObject<vobj,sobj>(field, filename, munge, offset, format,nersc_csum,scidac_csuma,scidac_csumb); | ||||
|     //    fseek(File,0,SEEK_END);    offset = ftello(File);std::cout << " offset now "<<offset << std::endl; | ||||
|     err=limeWriterCloseRecord(LimeW);  assert(err>=0); | ||||
|  | ||||
|     //////////////////////////////////////// | ||||
|     // Write checksum element, propagaing forward from the BinaryIO | ||||
|     // Always pair a checksum with a binary object, and close message | ||||
|     //////////////////////////////////////// | ||||
|     scidacChecksum checksum; | ||||
|     std::stringstream streama; streama << std::hex << scidac_csuma; | ||||
|     std::stringstream streamb; streamb << std::hex << scidac_csumb; | ||||
|     checksum.suma= streama.str(); | ||||
|     checksum.sumb= streamb.str(); | ||||
|     //    std::cout << GridLogMessage<<" writing scidac checksums "<<std::hex<<scidac_csuma<<"/"<<scidac_csumb<<std::dec<<std::endl; | ||||
|     writeLimeObject(0,1,checksum,std::string("scidacChecksum"),std::string(SCIDAC_CHECKSUM)); | ||||
|   } | ||||
| }; | ||||
|  | ||||
| class ScidacWriter : public GridLimeWriter { | ||||
|  public: | ||||
|  | ||||
|    template<class SerialisableUserFile> | ||||
|    void writeScidacFileRecord(GridBase *grid,SerialisableUserFile &_userFile) | ||||
|    { | ||||
|      scidacFile    _scidacFile(grid); | ||||
|      writeLimeObject(1,0,_scidacFile,_scidacFile.SerialisableClassName(),std::string(SCIDAC_PRIVATE_FILE_XML)); | ||||
|      writeLimeObject(0,1,_userFile,_userFile.SerialisableClassName(),std::string(SCIDAC_FILE_XML)); | ||||
|    } | ||||
|   //////////////////////////////////////////////// | ||||
|   // Write generic lattice field in scidac format | ||||
|   //////////////////////////////////////////////// | ||||
|   template <class vobj, class userRecord> | ||||
|   void writeScidacFieldRecord(Lattice<vobj> &field,userRecord _userRecord)  | ||||
|   { | ||||
|     GridBase * grid = field._grid; | ||||
|  | ||||
|     //////////////////////////////////////// | ||||
|     // fill the Grid header | ||||
|     //////////////////////////////////////// | ||||
|     FieldMetaData header; | ||||
|     scidacRecord  _scidacRecord; | ||||
|     scidacFile    _scidacFile; | ||||
|  | ||||
|     ScidacMetaData(field,header,_scidacRecord,_scidacFile); | ||||
|  | ||||
|     ////////////////////////////////////////////// | ||||
|     // Fill the Lime file record by record | ||||
|     ////////////////////////////////////////////// | ||||
|     writeLimeObject(1,0,header ,std::string("FieldMetaData"),std::string(GRID_FORMAT)); // Open message  | ||||
|     writeLimeObject(0,0,_userRecord,_userRecord.SerialisableClassName(),std::string(SCIDAC_RECORD_XML)); | ||||
|     writeLimeObject(0,0,_scidacRecord,_scidacRecord.SerialisableClassName(),std::string(SCIDAC_PRIVATE_RECORD_XML)); | ||||
|     writeLimeLatticeBinaryObject(field,std::string(ILDG_BINARY_DATA));      // Closes message with checksum | ||||
|   } | ||||
| }; | ||||
|  | ||||
|  | ||||
| class ScidacReader : public GridLimeReader { | ||||
|  public: | ||||
|  | ||||
|    template<class SerialisableUserFile> | ||||
|    void readScidacFileRecord(GridBase *grid,SerialisableUserFile &_userFile) | ||||
|    { | ||||
|      scidacFile    _scidacFile(grid); | ||||
|      readLimeObject(_scidacFile,_scidacFile.SerialisableClassName(),std::string(SCIDAC_PRIVATE_FILE_XML)); | ||||
|      readLimeObject(_userFile,_userFile.SerialisableClassName(),std::string(SCIDAC_FILE_XML)); | ||||
|    } | ||||
|   //////////////////////////////////////////////// | ||||
|   // Write generic lattice field in scidac format | ||||
|   //////////////////////////////////////////////// | ||||
|   template <class vobj, class userRecord> | ||||
|   void readScidacFieldRecord(Lattice<vobj> &field,userRecord &_userRecord)  | ||||
|   { | ||||
|     typedef typename vobj::scalar_object sobj; | ||||
|     GridBase * grid = field._grid; | ||||
|  | ||||
|     //////////////////////////////////////// | ||||
|     // fill the Grid header | ||||
|     //////////////////////////////////////// | ||||
|     FieldMetaData header; | ||||
|     scidacRecord  _scidacRecord; | ||||
|     scidacFile    _scidacFile; | ||||
|  | ||||
|     ////////////////////////////////////////////// | ||||
|     // Fill the Lime file record by record | ||||
|     ////////////////////////////////////////////// | ||||
|     readLimeObject(header ,std::string("FieldMetaData"),std::string(GRID_FORMAT)); // Open message  | ||||
|     readLimeObject(_userRecord,_userRecord.SerialisableClassName(),std::string(SCIDAC_RECORD_XML)); | ||||
|     readLimeObject(_scidacRecord,_scidacRecord.SerialisableClassName(),std::string(SCIDAC_PRIVATE_RECORD_XML)); | ||||
|     readLimeLatticeBinaryObject(field,std::string(ILDG_BINARY_DATA)); | ||||
|   } | ||||
|   void skipPastBinaryRecord(void) { | ||||
|     std::string rec_name(ILDG_BINARY_DATA); | ||||
|     while ( limeReaderNextRecord(LimeR) == LIME_SUCCESS ) {  | ||||
|       if ( !strncmp(limeReaderType(LimeR), rec_name.c_str(),strlen(rec_name.c_str()) )  ) { | ||||
| 	skipPastObjectRecord(std::string(SCIDAC_CHECKSUM)); | ||||
| 	return; | ||||
|       } | ||||
|     }     | ||||
|   } | ||||
|   void skipPastObjectRecord(std::string rec_name) { | ||||
|     while ( limeReaderNextRecord(LimeR) == LIME_SUCCESS ) {  | ||||
|       if ( !strncmp(limeReaderType(LimeR), rec_name.c_str(),strlen(rec_name.c_str()) )  ) { | ||||
| 	return; | ||||
|       } | ||||
|     } | ||||
|   } | ||||
|   void skipScidacFieldRecord() { | ||||
|     skipPastObjectRecord(std::string(GRID_FORMAT)); | ||||
|     skipPastObjectRecord(std::string(SCIDAC_RECORD_XML)); | ||||
|     skipPastObjectRecord(std::string(SCIDAC_PRIVATE_RECORD_XML)); | ||||
|     skipPastBinaryRecord(); | ||||
|   } | ||||
| }; | ||||
|  | ||||
|  | ||||
| class IldgWriter : public ScidacWriter { | ||||
|  public: | ||||
|  | ||||
|   /////////////////////////////////// | ||||
|   // A little helper | ||||
|   /////////////////////////////////// | ||||
|   void writeLimeIldgLFN(std::string &LFN) | ||||
|   { | ||||
|     uint64_t PayloadSize = LFN.size(); | ||||
|     int err; | ||||
|     createLimeRecordHeader(ILDG_DATA_LFN, 0 , 0, PayloadSize); | ||||
|     err=limeWriteRecordData(const_cast<char*>(LFN.c_str()), &PayloadSize,LimeW); assert(err>=0); | ||||
|     err=limeWriterCloseRecord(LimeW); assert(err>=0); | ||||
|   } | ||||
|  | ||||
|   //////////////////////////////////////////////////////////////// | ||||
|   // Special ILDG operations ; gauge configs only. | ||||
|   // Don't require scidac records EXCEPT checksum | ||||
|   // Use Grid MetaData object if present. | ||||
|   //////////////////////////////////////////////////////////////// | ||||
|   template <class vsimd> | ||||
|   uint32_t readConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu) { | ||||
|     typedef Lattice<iLorentzColourMatrix<vsimd> > GaugeField; | ||||
|     typedef LorentzColourMatrixD sobjd; | ||||
|     typedef LorentzColourMatrixF sobjf; | ||||
|     typedef iLorentzColourMatrix<vsimd> itype; | ||||
|     typedef LorentzColourMatrix sobj; | ||||
|     GridBase *grid = Umu._grid; | ||||
|  | ||||
|     ILDGField header; | ||||
|     readHeader(header); | ||||
|  | ||||
|     // now just the conf, ignore the header | ||||
|     std::string format = std::string("IEEE64BIG"); | ||||
|     do {limeReaderNextRecord(LimeR);} | ||||
|     while (strncmp(limeReaderType(LimeR), "ildg-binary-data",16)); | ||||
|  | ||||
|     n_uint64_t nbytes = limeReaderBytes(LimeR);//size of this record (configuration) | ||||
|  | ||||
|  | ||||
|     ILDGtype ILDGt(true, LimeR); | ||||
|     // this is special for double prec data, just for the moment | ||||
|     uint32_t csum = BinaryIO::readObjectParallel< itype, sobjd >( | ||||
|        Umu, filename, ILDGMunger<sobjd, sobj>(), 0, format, ILDGt); | ||||
|  | ||||
|     // Check configuration  | ||||
|     // todo | ||||
|  | ||||
|     return csum; | ||||
|   } | ||||
|  | ||||
|   template <class vsimd> | ||||
|   uint32_t writeConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu, std::string format) { | ||||
|   void writeConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu,int sequence,std::string LFN,std::string description)  | ||||
|   { | ||||
|     GridBase * grid = Umu._grid; | ||||
|     typedef Lattice<iLorentzColourMatrix<vsimd> > GaugeField; | ||||
|     typedef iLorentzColourMatrix<vsimd> vobj; | ||||
|     typedef typename vobj::scalar_object sobj; | ||||
|     typedef LorentzColourMatrixD fobj; | ||||
|  | ||||
|     ILDGField header; | ||||
|     // fill the header | ||||
|     header.floating_point = format; | ||||
|     //////////////////////////////////////// | ||||
|     // fill the Grid header | ||||
|     //////////////////////////////////////// | ||||
|     FieldMetaData header; | ||||
|     scidacRecord  _scidacRecord; | ||||
|     scidacFile    _scidacFile; | ||||
|  | ||||
|     ILDGUnmunger<fobj, sobj> munge; | ||||
|     unsigned int offset = writeHeader(header); | ||||
|     ScidacMetaData(Umu,header,_scidacRecord,_scidacFile); | ||||
|  | ||||
|     BinaryIO::Uint32Checksum<vobj, fobj>(Umu, munge, header.checksum); | ||||
|     std::string format = header.floating_point; | ||||
|     header.ensemble_id    = description; | ||||
|     header.ensemble_label = description; | ||||
|     header.sequence_number = sequence; | ||||
|     header.ildg_lfn = LFN; | ||||
|  | ||||
|     // Write data record header | ||||
|     n_uint64_t PayloadSize = sizeof(fobj) * Umu._grid->_gsites; | ||||
|     createHeader("ildg-binary-data", 0, 1, PayloadSize, LimeW); | ||||
|     assert ( (format == std::string("IEEE32BIG"))   | ||||
|            ||(format == std::string("IEEE64BIG")) ); | ||||
|  | ||||
|     ILDGtype ILDGt(true, LimeW); | ||||
|     uint32_t csum = BinaryIO::writeObjectParallel<vobj, fobj>( | ||||
|        Umu, filename, munge, 0, header.floating_point, ILDGt); | ||||
|     ////////////////////////////////////////////////////// | ||||
|     // Fill ILDG header data struct | ||||
|     ////////////////////////////////////////////////////// | ||||
|     ildgFormat ildgfmt ; | ||||
|     ildgfmt.field     = std::string("su3gauge"); | ||||
|  | ||||
|     limeWriterCloseRecord(LimeW); | ||||
|     if ( format == std::string("IEEE32BIG") ) {  | ||||
|       ildgfmt.precision = 32; | ||||
|     } else {  | ||||
|       ildgfmt.precision = 64; | ||||
|     } | ||||
|     ildgfmt.version = 1.0; | ||||
|     ildgfmt.lx = header.dimension[0]; | ||||
|     ildgfmt.ly = header.dimension[1]; | ||||
|     ildgfmt.lz = header.dimension[2]; | ||||
|     ildgfmt.lt = header.dimension[3]; | ||||
|     assert(header.nd==4); | ||||
|     assert(header.nd==header.dimension.size()); | ||||
|  | ||||
|     // Last record | ||||
|     // the logical file name LNF | ||||
|     // look into documentation on how to generate this string | ||||
|     std::string LNF = "empty";  | ||||
|     ////////////////////////////////////////////////////////////////////////////// | ||||
|     // Fill the USQCD info field | ||||
|     ////////////////////////////////////////////////////////////////////////////// | ||||
|     usqcdInfo info; | ||||
|     info.version=1.0; | ||||
|     info.plaq   = header.plaquette; | ||||
|     info.linktr = header.link_trace; | ||||
|  | ||||
|     std::cout << GridLogMessage << " Writing config; IldgIO "<<std::endl; | ||||
|     ////////////////////////////////////////////// | ||||
|     // Fill the Lime file record by record | ||||
|     ////////////////////////////////////////////// | ||||
|     writeLimeObject(1,0,header ,std::string("FieldMetaData"),std::string(GRID_FORMAT)); // Open message  | ||||
|     writeLimeObject(0,0,_scidacFile,_scidacFile.SerialisableClassName(),std::string(SCIDAC_PRIVATE_FILE_XML)); | ||||
|     writeLimeObject(0,1,info,info.SerialisableClassName(),std::string(SCIDAC_FILE_XML)); | ||||
|     writeLimeObject(1,0,_scidacRecord,_scidacRecord.SerialisableClassName(),std::string(SCIDAC_PRIVATE_RECORD_XML)); | ||||
|     writeLimeObject(0,0,info,info.SerialisableClassName(),std::string(SCIDAC_RECORD_XML)); | ||||
|     writeLimeObject(0,0,ildgfmt,std::string("ildgFormat")   ,std::string(ILDG_FORMAT)); // rec | ||||
|     writeLimeIldgLFN(header.ildg_lfn);                                                 // rec | ||||
|     writeLimeLatticeBinaryObject(Umu,std::string(ILDG_BINARY_DATA));      // Closes message with checksum | ||||
|     //    limeDestroyWriter(LimeW); | ||||
|     fclose(File); | ||||
|   } | ||||
| }; | ||||
|  | ||||
|     PayloadSize = sizeof(LNF); | ||||
|     createHeader("ildg-binary-lfn", 1 , 1, PayloadSize, LimeW); | ||||
|     limeWriteRecordData(const_cast<char*>(LNF.c_str()), &PayloadSize, LimeW); | ||||
| class IldgReader : public GridLimeReader { | ||||
|  public: | ||||
|  | ||||
|     limeWriterCloseRecord(LimeW); | ||||
|   //////////////////////////////////////////////////////////////// | ||||
|   // Read either Grid/SciDAC/ILDG configuration | ||||
|   // Don't require scidac records EXCEPT checksum | ||||
|   // Use Grid MetaData object if present. | ||||
|   // Else use ILDG MetaData object if present. | ||||
|   // Else use SciDAC MetaData object if present. | ||||
|   //////////////////////////////////////////////////////////////// | ||||
|   template <class vsimd> | ||||
|   void readConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu, FieldMetaData &FieldMetaData_) { | ||||
|  | ||||
|     return csum; | ||||
|     typedef Lattice<iLorentzColourMatrix<vsimd> > GaugeField; | ||||
|     typedef typename GaugeField::vector_object  vobj; | ||||
|     typedef typename vobj::scalar_object sobj; | ||||
|  | ||||
|     typedef LorentzColourMatrixF fobj; | ||||
|     typedef LorentzColourMatrixD dobj; | ||||
|  | ||||
|     GridBase *grid = Umu._grid; | ||||
|  | ||||
|     std::vector<int> dims = Umu._grid->FullDimensions(); | ||||
|  | ||||
|     assert(dims.size()==4); | ||||
|  | ||||
|     // Metadata holders | ||||
|     ildgFormat     ildgFormat_    ; | ||||
|     std::string    ildgLFN_       ; | ||||
|     scidacChecksum scidacChecksum_;  | ||||
|     usqcdInfo      usqcdInfo_     ; | ||||
|  | ||||
|     // track what we read from file | ||||
|     int found_ildgFormat    =0; | ||||
|     int found_ildgLFN       =0; | ||||
|     int found_scidacChecksum=0; | ||||
|     int found_usqcdInfo     =0; | ||||
|     int found_ildgBinary =0; | ||||
|     int found_FieldMetaData =0; | ||||
|  | ||||
|     uint32_t nersc_csum; | ||||
|     uint32_t scidac_csuma; | ||||
|     uint32_t scidac_csumb; | ||||
|  | ||||
|     // Binary format | ||||
|     std::string format; | ||||
|  | ||||
|     ////////////////////////////////////////////////////////////////////////// | ||||
|     // Loop over all records | ||||
|     // -- Order is poorly guaranteed except ILDG header preceeds binary section. | ||||
|     // -- Run like an event loop. | ||||
|     // -- Impose trust hierarchy. Grid takes precedence & look for ILDG, and failing | ||||
|     //    that Scidac.  | ||||
|     // -- Insist on Scidac checksum record. | ||||
|     ////////////////////////////////////////////////////////////////////////// | ||||
|  | ||||
|     while ( limeReaderNextRecord(LimeR) == LIME_SUCCESS ) {  | ||||
|  | ||||
|       uint64_t nbytes = limeReaderBytes(LimeR);//size of this record (configuration) | ||||
|        | ||||
|       ////////////////////////////////////////////////////////////////// | ||||
|       // If not BINARY_DATA read a string and parse | ||||
|       ////////////////////////////////////////////////////////////////// | ||||
|       if ( strncmp(limeReaderType(LimeR), ILDG_BINARY_DATA,strlen(ILDG_BINARY_DATA) )  ) { | ||||
| 	 | ||||
| 	// Copy out the string | ||||
| 	std::vector<char> xmlc(nbytes+1,'\0'); | ||||
| 	limeReaderReadData((void *)&xmlc[0], &nbytes, LimeR);     | ||||
| 	//	std::cout << GridLogMessage<< "Non binary record :" <<limeReaderType(LimeR) <<std::endl; //<<"\n"<<(&xmlc[0])<<std::endl; | ||||
|  | ||||
| 	////////////////////////////////// | ||||
| 	// ILDG format record | ||||
| 	if ( !strncmp(limeReaderType(LimeR), ILDG_FORMAT,strlen(ILDG_FORMAT)) ) {  | ||||
|  | ||||
| 	  XmlReader RD(&xmlc[0],""); | ||||
| 	  read(RD,"ildgFormat",ildgFormat_); | ||||
|  | ||||
| 	  if ( ildgFormat_.precision == 64 ) format = std::string("IEEE64BIG"); | ||||
| 	  if ( ildgFormat_.precision == 32 ) format = std::string("IEEE32BIG"); | ||||
|  | ||||
| 	  assert( ildgFormat_.lx == dims[0]); | ||||
| 	  assert( ildgFormat_.ly == dims[1]); | ||||
| 	  assert( ildgFormat_.lz == dims[2]); | ||||
| 	  assert( ildgFormat_.lt == dims[3]); | ||||
|  | ||||
| 	  found_ildgFormat = 1; | ||||
| 	} | ||||
|  | ||||
|   // format for RNG? Now just binary out | ||||
| }; | ||||
| } | ||||
| } | ||||
| 	if ( !strncmp(limeReaderType(LimeR), ILDG_DATA_LFN,strlen(ILDG_DATA_LFN)) ) { | ||||
| 	  FieldMetaData_.ildg_lfn = std::string(&xmlc[0]); | ||||
| 	  found_ildgLFN = 1; | ||||
| 	} | ||||
|  | ||||
| 	if ( !strncmp(limeReaderType(LimeR), GRID_FORMAT,strlen(ILDG_FORMAT)) ) {  | ||||
|  | ||||
| 	  XmlReader RD(&xmlc[0],""); | ||||
| 	  read(RD,"FieldMetaData",FieldMetaData_); | ||||
|  | ||||
| 	  format = FieldMetaData_.floating_point; | ||||
|  | ||||
| 	  assert(FieldMetaData_.dimension[0] == dims[0]); | ||||
| 	  assert(FieldMetaData_.dimension[1] == dims[1]); | ||||
| 	  assert(FieldMetaData_.dimension[2] == dims[2]); | ||||
| 	  assert(FieldMetaData_.dimension[3] == dims[3]); | ||||
|  | ||||
| 	  found_FieldMetaData = 1; | ||||
| 	} | ||||
|  | ||||
| 	if ( !strncmp(limeReaderType(LimeR), SCIDAC_RECORD_XML,strlen(SCIDAC_RECORD_XML)) ) {  | ||||
| 	  std::string xmls(&xmlc[0]); | ||||
| 	  // is it a USQCD info field | ||||
| 	  if ( xmls.find(std::string("usqcdInfo")) != std::string::npos ) {  | ||||
| 	    //	    std::cout << GridLogMessage<<"...found a usqcdInfo field"<<std::endl; | ||||
| 	    XmlReader RD(&xmlc[0],""); | ||||
| 	    read(RD,"usqcdInfo",usqcdInfo_); | ||||
| 	    found_usqcdInfo = 1; | ||||
| 	  } | ||||
| 	} | ||||
|  | ||||
| 	if ( !strncmp(limeReaderType(LimeR), SCIDAC_CHECKSUM,strlen(SCIDAC_CHECKSUM)) ) {  | ||||
| 	  XmlReader RD(&xmlc[0],""); | ||||
| 	  read(RD,"scidacChecksum",scidacChecksum_); | ||||
| 	  found_scidacChecksum = 1; | ||||
| 	} | ||||
|  | ||||
|       } else {   | ||||
| 	///////////////////////////////// | ||||
| 	// Binary data | ||||
| 	///////////////////////////////// | ||||
| 	std::cout << GridLogMessage << "ILDG Binary record found : "  ILDG_BINARY_DATA << std::endl; | ||||
| 	uint64_t offset= ftello(File); | ||||
| 	if ( format == std::string("IEEE64BIG") ) { | ||||
| 	  GaugeSimpleMunger<dobj, sobj> munge; | ||||
| 	  BinaryIO::readLatticeObject< vobj, dobj >(Umu, filename, munge, offset, format,nersc_csum,scidac_csuma,scidac_csumb); | ||||
| 	} else {  | ||||
| 	  GaugeSimpleMunger<fobj, sobj> munge; | ||||
| 	  BinaryIO::readLatticeObject< vobj, fobj >(Umu, filename, munge, offset, format,nersc_csum,scidac_csuma,scidac_csumb); | ||||
| 	} | ||||
|  | ||||
| 	found_ildgBinary = 1; | ||||
|       } | ||||
|  | ||||
|     } | ||||
|  | ||||
|     ////////////////////////////////////////////////////// | ||||
|     // Minimally must find binary segment and checksum | ||||
|     // Since this is an ILDG reader require ILDG format | ||||
|     ////////////////////////////////////////////////////// | ||||
|     assert(found_ildgBinary); | ||||
|     assert(found_ildgFormat); | ||||
|     assert(found_scidacChecksum); | ||||
|  | ||||
|     // Must find something with the lattice dimensions | ||||
|     assert(found_FieldMetaData||found_ildgFormat); | ||||
|  | ||||
|     if ( found_FieldMetaData ) { | ||||
|  | ||||
|       std::cout << GridLogMessage<<"Grid MetaData was record found: configuration was probably written by Grid ! Yay ! "<<std::endl; | ||||
|  | ||||
|     } else {  | ||||
|  | ||||
|       assert(found_ildgFormat); | ||||
|       assert ( ildgFormat_.field == std::string("su3gauge") ); | ||||
|  | ||||
|       /////////////////////////////////////////////////////////////////////////////////////// | ||||
|       // Populate our Grid metadata as best we can | ||||
|       /////////////////////////////////////////////////////////////////////////////////////// | ||||
|  | ||||
|       std::ostringstream vers; vers << ildgFormat_.version; | ||||
|       FieldMetaData_.hdr_version = vers.str(); | ||||
|       FieldMetaData_.data_type = std::string("4D_SU3_GAUGE_3X3"); | ||||
|  | ||||
|       FieldMetaData_.nd=4; | ||||
|       FieldMetaData_.dimension.resize(4); | ||||
|  | ||||
|       FieldMetaData_.dimension[0] = ildgFormat_.lx ; | ||||
|       FieldMetaData_.dimension[1] = ildgFormat_.ly ; | ||||
|       FieldMetaData_.dimension[2] = ildgFormat_.lz ; | ||||
|       FieldMetaData_.dimension[3] = ildgFormat_.lt ; | ||||
|  | ||||
|       if ( found_usqcdInfo ) {  | ||||
| 	FieldMetaData_.plaquette = usqcdInfo_.plaq; | ||||
| 	FieldMetaData_.link_trace= usqcdInfo_.linktr; | ||||
| 	std::cout << GridLogMessage <<"This configuration was probably written by USQCD "<<std::endl; | ||||
| 	std::cout << GridLogMessage <<"USQCD xml record Plaquette : "<<FieldMetaData_.plaquette<<std::endl; | ||||
| 	std::cout << GridLogMessage <<"USQCD xml record LinkTrace : "<<FieldMetaData_.link_trace<<std::endl; | ||||
|       } else {  | ||||
| 	FieldMetaData_.plaquette = 0.0; | ||||
| 	FieldMetaData_.link_trace= 0.0; | ||||
| 	std::cout << GridLogWarning << "This configuration is unsafe with no plaquette records that can verify it !!! "<<std::endl; | ||||
|       } | ||||
|     } | ||||
|  | ||||
|     //////////////////////////////////////////////////////////// | ||||
|     // Really really want to mandate a scidac checksum | ||||
|     //////////////////////////////////////////////////////////// | ||||
|     if ( found_scidacChecksum ) { | ||||
|       FieldMetaData_.scidac_checksuma = stoull(scidacChecksum_.suma,0,16); | ||||
|       FieldMetaData_.scidac_checksumb = stoull(scidacChecksum_.sumb,0,16); | ||||
|       scidacChecksumVerify(scidacChecksum_,scidac_csuma,scidac_csumb); | ||||
|       assert( scidac_csuma ==FieldMetaData_.scidac_checksuma); | ||||
|       assert( scidac_csumb ==FieldMetaData_.scidac_checksumb); | ||||
|       std::cout << GridLogMessage<<"SciDAC checksums match " << std::endl; | ||||
|     } else {  | ||||
|       std::cout << GridLogWarning<<"SciDAC checksums not found. This is unsafe. " << std::endl; | ||||
|       assert(0); // Can I insist always checksum ? | ||||
|     } | ||||
|  | ||||
|     if ( found_FieldMetaData || found_usqcdInfo ) { | ||||
|       FieldMetaData checker; | ||||
|       GaugeStatistics(Umu,checker); | ||||
|       assert(fabs(checker.plaquette  - FieldMetaData_.plaquette )<1.0e-5); | ||||
|       assert(fabs(checker.link_trace - FieldMetaData_.link_trace)<1.0e-5); | ||||
|       std::cout << GridLogMessage<<"Plaquette and link trace match " << std::endl; | ||||
|     } | ||||
|   } | ||||
|  }; | ||||
|  | ||||
| }} | ||||
|  | ||||
| //HAVE_LIME | ||||
| #endif | ||||
|   | ||||
| @@ -34,47 +34,203 @@ extern "C" { // for linkage | ||||
|  | ||||
| namespace Grid { | ||||
|  | ||||
| struct ILDGtype { | ||||
|   bool is_ILDG; | ||||
|   LimeWriter* LW; | ||||
|   LimeReader* LR; | ||||
| ///////////////////////////////////////////////////////////////////////////////// | ||||
| // Data representation of records that enter ILDG and SciDac formats | ||||
| ///////////////////////////////////////////////////////////////////////////////// | ||||
|  | ||||
|   ILDGtype(bool is, LimeWriter* L) : is_ILDG(is), LW(L), LR(NULL) {} | ||||
|   ILDGtype(bool is, LimeReader* L) : is_ILDG(is), LW(NULL), LR(L) {} | ||||
|   ILDGtype() : is_ILDG(false), LW(NULL), LR(NULL) {} | ||||
| #define GRID_FORMAT      "grid-format" | ||||
| #define ILDG_FORMAT      "ildg-format" | ||||
| #define ILDG_BINARY_DATA "ildg-binary-data" | ||||
| #define ILDG_DATA_LFN    "ildg-data-lfn" | ||||
| #define SCIDAC_CHECKSUM           "scidac-checksum" | ||||
| #define SCIDAC_PRIVATE_FILE_XML   "scidac-private-file-xml" | ||||
| #define SCIDAC_FILE_XML           "scidac-file-xml" | ||||
| #define SCIDAC_PRIVATE_RECORD_XML "scidac-private-record-xml" | ||||
| #define SCIDAC_RECORD_XML         "scidac-record-xml" | ||||
| #define SCIDAC_BINARY_DATA        "scidac-binary-data" | ||||
| // Unused SCIDAC records names; could move to support this functionality | ||||
| #define SCIDAC_SITELIST           "scidac-sitelist" | ||||
|  | ||||
|   //////////////////////////////////////////////////////////// | ||||
|   const int GRID_IO_SINGLEFILE = 0; // hardcode lift from QIO compat | ||||
|   const int GRID_IO_MULTIFILE  = 1; // hardcode lift from QIO compat | ||||
|   const int GRID_IO_FIELD      = 0; // hardcode lift from QIO compat | ||||
|   const int GRID_IO_GLOBAL     = 1; // hardcode lift from QIO compat | ||||
|   //////////////////////////////////////////////////////////// | ||||
|  | ||||
| ///////////////////////////////////////////////////////////////////////////////// | ||||
| // QIO uses mandatory "private" records fixed format | ||||
| // Private is in principle "opaque" however it can't be changed now because that would break existing  | ||||
| // file compatability, so should be correct to assume the undocumented but defacto file structure. | ||||
| ///////////////////////////////////////////////////////////////////////////////// | ||||
|  | ||||
| struct emptyUserRecord : Serializable {  | ||||
|   GRID_SERIALIZABLE_CLASS_MEMBERS(emptyUserRecord,int,dummy); | ||||
|   emptyUserRecord() { dummy=0; }; | ||||
| }; | ||||
|  | ||||
| class ILDGField { | ||||
| //////////////////////// | ||||
| // Scidac private file xml | ||||
| // <?xml version="1.0" encoding="UTF-8"?><scidacFile><version>1.1</version><spacetime>4</spacetime><dims>16 16 16 32 </dims><volfmt>0</volfmt></scidacFile> | ||||
| //////////////////////// | ||||
| struct scidacFile : Serializable { | ||||
|  public: | ||||
|   // header strings (not in order) | ||||
|   std::vector<int> dimension; | ||||
|   std::vector<std::string> boundary; | ||||
|   int data_start; | ||||
|   std::string hdr_version; | ||||
|   std::string storage_format; | ||||
|   // Checks on data | ||||
|   double link_trace; | ||||
|   double plaquette; | ||||
|   uint32_t checksum; | ||||
|   unsigned int sequence_number; | ||||
|   std::string data_type; | ||||
|   std::string ensemble_id; | ||||
|   std::string ensemble_label; | ||||
|   std::string creator; | ||||
|   std::string creator_hardware; | ||||
|   std::string creation_date; | ||||
|   std::string archive_date; | ||||
|   std::string floating_point; | ||||
| }; | ||||
| } | ||||
| #else | ||||
| namespace Grid { | ||||
|   GRID_SERIALIZABLE_CLASS_MEMBERS(scidacFile, | ||||
|                                   double, version, | ||||
|                                   int, spacetime, | ||||
| 				  std::string, dims, // must convert to int | ||||
|                                   int, volfmt); | ||||
|  | ||||
| struct ILDGtype { | ||||
|   bool is_ILDG; | ||||
|   ILDGtype() : is_ILDG(false) {} | ||||
| }; | ||||
| } | ||||
|   std::vector<int> getDimensions(void) {  | ||||
|     std::stringstream stream(dims); | ||||
|     std::vector<int> dimensions; | ||||
|     int n; | ||||
|     while(stream >> n){ | ||||
|       dimensions.push_back(n); | ||||
|     } | ||||
|     return dimensions; | ||||
|   } | ||||
|  | ||||
|   void setDimensions(std::vector<int> dimensions) {  | ||||
|     char delimiter = ' '; | ||||
|     std::stringstream stream; | ||||
|     for(int i=0;i<dimensions.size();i++){  | ||||
|       stream << dimensions[i]; | ||||
|       if ( i != dimensions.size()-1) {  | ||||
| 	stream << delimiter <<std::endl; | ||||
|       } | ||||
|     } | ||||
|     dims = stream.str(); | ||||
|   } | ||||
|  | ||||
|   // Constructor provides Grid | ||||
|   scidacFile() =default; // default constructor | ||||
|   scidacFile(GridBase * grid){ | ||||
|     version      = 1.0; | ||||
|     spacetime    = grid->_ndimension; | ||||
|     setDimensions(grid->FullDimensions());  | ||||
|     volfmt       = GRID_IO_SINGLEFILE; | ||||
|   } | ||||
|  | ||||
| }; | ||||
|  | ||||
| /////////////////////////////////////////////////////////////////////// | ||||
| // scidac-private-record-xml : example | ||||
| // <scidacRecord> | ||||
| // <version>1.1</version><date>Tue Jul 26 21:14:44 2011 UTC</date><recordtype>0</recordtype> | ||||
| // <datatype>QDP_D3_ColorMatrix</datatype><precision>D</precision><colors>3</colors><spins>4</spins> | ||||
| // <typesize>144</typesize><datacount>4</datacount> | ||||
| // </scidacRecord> | ||||
| /////////////////////////////////////////////////////////////////////// | ||||
|  | ||||
| struct scidacRecord : Serializable { | ||||
|  public: | ||||
|   GRID_SERIALIZABLE_CLASS_MEMBERS(scidacRecord, | ||||
|                                   double, version, | ||||
|                                   std::string, date, | ||||
| 				  int, recordtype, | ||||
| 				  std::string, datatype, | ||||
| 				  std::string, precision, | ||||
| 				  int, colors, | ||||
| 				  int, spins, | ||||
| 				  int, typesize, | ||||
| 				  int, datacount); | ||||
|  | ||||
|   scidacRecord() { version =1.0; } | ||||
|  | ||||
| }; | ||||
|  | ||||
| //////////////////////// | ||||
| // ILDG format | ||||
| //////////////////////// | ||||
| struct ildgFormat : Serializable { | ||||
| public: | ||||
|   GRID_SERIALIZABLE_CLASS_MEMBERS(ildgFormat, | ||||
| 				  double, version, | ||||
| 				  std::string, field, | ||||
| 				  int, precision, | ||||
| 				  int, lx, | ||||
| 				  int, ly, | ||||
| 				  int, lz, | ||||
| 				  int, lt); | ||||
|   ildgFormat() { version=1.0; }; | ||||
| }; | ||||
| //////////////////////// | ||||
| // USQCD info | ||||
| //////////////////////// | ||||
| struct usqcdInfo : Serializable {  | ||||
|  public: | ||||
|   GRID_SERIALIZABLE_CLASS_MEMBERS(usqcdInfo, | ||||
| 				  double, version, | ||||
| 				  double, plaq, | ||||
| 				  double, linktr, | ||||
| 				  std::string, info); | ||||
|   usqcdInfo() {  | ||||
|     version=1.0;  | ||||
|   }; | ||||
| }; | ||||
| //////////////////////// | ||||
| // Scidac Checksum | ||||
| //////////////////////// | ||||
| struct scidacChecksum : Serializable {  | ||||
|  public: | ||||
|   GRID_SERIALIZABLE_CLASS_MEMBERS(scidacChecksum, | ||||
| 				  double, version, | ||||
| 				  std::string, suma, | ||||
| 				  std::string, sumb); | ||||
|   scidacChecksum() {  | ||||
|     version=1.0;  | ||||
|   }; | ||||
| }; | ||||
| //////////////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
| // Type:           scidac-file-xml         <title>MILC ILDG archival gauge configuration</title> | ||||
| //////////////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|  | ||||
| //////////////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
| // Type:            | ||||
| //////////////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|  | ||||
| //////////////////////// | ||||
| // Scidac private file xml  | ||||
| // <?xml version="1.0" encoding="UTF-8"?><scidacFile><version>1.1</version><spacetime>4</spacetime><dims>16 16 16 32 </dims><volfmt>0</volfmt></scidacFile>  | ||||
| ////////////////////////                                                                                                                                                                               | ||||
|  | ||||
| #if 0 | ||||
| //////////////////////////////////////////////////////////////////////////////////////// | ||||
| // From http://www.physics.utah.edu/~detar/scidac/qio_2p3.pdf | ||||
| //////////////////////////////////////////////////////////////////////////////////////// | ||||
| struct usqcdPropFile : Serializable {  | ||||
|  public: | ||||
|   GRID_SERIALIZABLE_CLASS_MEMBERS(usqcdPropFile, | ||||
| 				  double, version, | ||||
| 				  std::string, type, | ||||
| 				  std::string, info); | ||||
|   usqcdPropFile() {  | ||||
|     version=1.0;  | ||||
|   }; | ||||
| }; | ||||
| struct usqcdSourceInfo : Serializable {  | ||||
|  public: | ||||
|   GRID_SERIALIZABLE_CLASS_MEMBERS(usqcdSourceInfo, | ||||
| 				  double, version, | ||||
| 				  std::string, info); | ||||
|   usqcdSourceInfo() {  | ||||
|     version=1.0;  | ||||
|   }; | ||||
| }; | ||||
| struct usqcdPropInfo : Serializable {  | ||||
|  public: | ||||
|   GRID_SERIALIZABLE_CLASS_MEMBERS(usqcdPropInfo, | ||||
| 				  double, version, | ||||
| 				  int, spin, | ||||
| 				  int, color, | ||||
| 				  std::string, info); | ||||
|   usqcdPropInfo() {  | ||||
|     version=1.0;  | ||||
|   }; | ||||
| }; | ||||
| #endif | ||||
|  | ||||
| } | ||||
| #endif | ||||
| #endif | ||||
|   | ||||
							
								
								
									
										329
									
								
								lib/parallelIO/MetaData.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										329
									
								
								lib/parallelIO/MetaData.h
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,329 @@ | ||||
| /************************************************************************************* | ||||
|  | ||||
|     Grid physics library, www.github.com/paboyle/Grid  | ||||
|  | ||||
|     Source file: ./lib/parallelIO/NerscIO.h | ||||
|  | ||||
|     Copyright (C) 2015 | ||||
|  | ||||
|  | ||||
|     Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||
|  | ||||
|     This program is free software; you can redistribute it and/or modify | ||||
|     it under the terms of the GNU General Public License as published by | ||||
|     the Free Software Foundation; either version 2 of the License, or | ||||
|     (at your option) any later version. | ||||
|  | ||||
|     This program is distributed in the hope that it will be useful, | ||||
|     but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
|     GNU General Public License for more details. | ||||
|  | ||||
|     You should have received a copy of the GNU General Public License along | ||||
|     with this program; if not, write to the Free Software Foundation, Inc., | ||||
|     51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||
|  | ||||
|     See the full license in the file "LICENSE" in the top level distribution directory | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
|  | ||||
| #include <algorithm> | ||||
| #include <iostream> | ||||
| #include <iomanip> | ||||
| #include <fstream> | ||||
| #include <map> | ||||
| #include <unistd.h> | ||||
| #include <sys/utsname.h> | ||||
| #include <pwd.h> | ||||
|  | ||||
| namespace Grid { | ||||
|  | ||||
|   /////////////////////////////////////////////////////// | ||||
|   // Precision mapping | ||||
|   /////////////////////////////////////////////////////// | ||||
|   template<class vobj> static std::string getFormatString (void) | ||||
|   { | ||||
|     std::string format; | ||||
|     typedef typename getPrecision<vobj>::real_scalar_type stype; | ||||
|     if ( sizeof(stype) == sizeof(float) ) { | ||||
|       format = std::string("IEEE32BIG"); | ||||
|     } | ||||
|     if ( sizeof(stype) == sizeof(double) ) { | ||||
|       format = std::string("IEEE64BIG"); | ||||
|     } | ||||
|     return format; | ||||
|   } | ||||
|   //////////////////////////////////////////////////////////////////////////////// | ||||
|   // header specification/interpretation | ||||
|   //////////////////////////////////////////////////////////////////////////////// | ||||
|     class FieldMetaData : Serializable { | ||||
|     public: | ||||
|  | ||||
|       GRID_SERIALIZABLE_CLASS_MEMBERS(FieldMetaData, | ||||
| 				      int, nd, | ||||
| 				      std::vector<int>, dimension, | ||||
| 				      std::vector<std::string>, boundary, | ||||
| 				      int, data_start, | ||||
| 				      std::string, hdr_version, | ||||
| 				      std::string, storage_format, | ||||
| 				      double, link_trace, | ||||
| 				      double, plaquette, | ||||
| 				      uint32_t, checksum, | ||||
| 				      uint32_t, scidac_checksuma, | ||||
| 				      uint32_t, scidac_checksumb, | ||||
| 				      unsigned int, sequence_number, | ||||
| 				      std::string, data_type, | ||||
| 				      std::string, ensemble_id, | ||||
| 				      std::string, ensemble_label, | ||||
| 				      std::string, ildg_lfn, | ||||
| 				      std::string, creator, | ||||
| 				      std::string, creator_hardware, | ||||
| 				      std::string, creation_date, | ||||
| 				      std::string, archive_date, | ||||
| 				      std::string, floating_point); | ||||
|       FieldMetaData(void) {  | ||||
| 	nd=4; | ||||
| 	dimension.resize(4); | ||||
| 	boundary.resize(4); | ||||
| 	scidac_checksuma=0; | ||||
| 	scidac_checksumb=0; | ||||
| 	checksum=0; | ||||
|       } | ||||
|     }; | ||||
|  | ||||
|  | ||||
|  | ||||
|   namespace QCD { | ||||
|  | ||||
|     using namespace Grid; | ||||
|  | ||||
|  | ||||
|     ////////////////////////////////////////////////////////////////////// | ||||
|     // Bit and Physical Checksumming and QA of data | ||||
|     ////////////////////////////////////////////////////////////////////// | ||||
|     inline void GridMetaData(GridBase *grid,FieldMetaData &header) | ||||
|     { | ||||
|       int nd = grid->_ndimension; | ||||
|       header.nd = nd; | ||||
|       header.dimension.resize(nd); | ||||
|       header.boundary.resize(nd); | ||||
|       header.data_start = 0; | ||||
|       for(int d=0;d<nd;d++) { | ||||
| 	header.dimension[d] = grid->_fdimensions[d]; | ||||
|       } | ||||
|       for(int d=0;d<nd;d++) { | ||||
| 	header.boundary[d] = std::string("PERIODIC"); | ||||
|       } | ||||
|     } | ||||
|  | ||||
|     inline void MachineCharacteristics(FieldMetaData &header) | ||||
|     { | ||||
|       // Who | ||||
|       struct passwd *pw = getpwuid (getuid()); | ||||
|       if (pw) header.creator = std::string(pw->pw_name);  | ||||
|  | ||||
|       // When | ||||
|       std::time_t t = std::time(nullptr); | ||||
|       std::tm tm_ = *std::localtime(&t); | ||||
|       std::ostringstream oss;  | ||||
|       //      oss << std::put_time(&tm_, "%c %Z"); | ||||
|       header.creation_date = oss.str(); | ||||
|       header.archive_date  = header.creation_date; | ||||
|  | ||||
|       // What | ||||
|       struct utsname name;  uname(&name); | ||||
|       header.creator_hardware = std::string(name.nodename)+"-"; | ||||
|       header.creator_hardware+= std::string(name.machine)+"-"; | ||||
|       header.creator_hardware+= std::string(name.sysname)+"-"; | ||||
|       header.creator_hardware+= std::string(name.release); | ||||
|     } | ||||
|  | ||||
| #define dump_meta_data(field, s)					\ | ||||
|       s << "BEGIN_HEADER"      << std::endl;				\ | ||||
|       s << "HDR_VERSION = "    << field.hdr_version    << std::endl;	\ | ||||
|       s << "DATATYPE = "       << field.data_type      << std::endl;	\ | ||||
|       s << "STORAGE_FORMAT = " << field.storage_format << std::endl;	\ | ||||
|       for(int i=0;i<4;i++){						\ | ||||
| 	s << "DIMENSION_" << i+1 << " = " << field.dimension[i] << std::endl ; \ | ||||
|       }									\ | ||||
|       s << "LINK_TRACE = " << std::setprecision(10) << field.link_trace << std::endl; \ | ||||
|       s << "PLAQUETTE  = " << std::setprecision(10) << field.plaquette  << std::endl; \ | ||||
|       for(int i=0;i<4;i++){						\ | ||||
| 	s << "BOUNDARY_"<<i+1<<" = " << field.boundary[i] << std::endl;	\ | ||||
|       }									\ | ||||
| 									\ | ||||
|       s << "CHECKSUM = "<< std::hex << std::setw(10) << field.checksum << std::dec<<std::endl; \ | ||||
|       s << "SCIDAC_CHECKSUMA = "<< std::hex << std::setw(10) << field.scidac_checksuma << std::dec<<std::endl; \ | ||||
|       s << "SCIDAC_CHECKSUMB = "<< std::hex << std::setw(10) << field.scidac_checksumb << std::dec<<std::endl; \ | ||||
|       s << "ENSEMBLE_ID = "     << field.ensemble_id      << std::endl;	\ | ||||
|       s << "ENSEMBLE_LABEL = "  << field.ensemble_label   << std::endl;	\ | ||||
|       s << "SEQUENCE_NUMBER = " << field.sequence_number  << std::endl;	\ | ||||
|       s << "CREATOR = "         << field.creator          << std::endl;	\ | ||||
|       s << "CREATOR_HARDWARE = "<< field.creator_hardware << std::endl;	\ | ||||
|       s << "CREATION_DATE = "   << field.creation_date    << std::endl;	\ | ||||
|       s << "ARCHIVE_DATE = "    << field.archive_date     << std::endl;	\ | ||||
|       s << "FLOATING_POINT = "  << field.floating_point   << std::endl;	\ | ||||
|       s << "END_HEADER"         << std::endl; | ||||
|  | ||||
| template<class vobj> inline void PrepareMetaData(Lattice<vobj> & field, FieldMetaData &header) | ||||
| { | ||||
|   GridBase *grid = field._grid; | ||||
|   std::string format = getFormatString<vobj>(); | ||||
|    header.floating_point = format; | ||||
|    header.checksum = 0x0; // Nersc checksum unused in ILDG, Scidac | ||||
|    GridMetaData(grid,header);  | ||||
|    MachineCharacteristics(header); | ||||
|  } | ||||
|  inline void GaugeStatistics(Lattice<vLorentzColourMatrixF> & data,FieldMetaData &header) | ||||
|  { | ||||
|    // How to convert data precision etc... | ||||
|    header.link_trace=Grid::QCD::WilsonLoops<PeriodicGimplF>::linkTrace(data); | ||||
|    header.plaquette =Grid::QCD::WilsonLoops<PeriodicGimplF>::avgPlaquette(data); | ||||
|  } | ||||
|  inline void GaugeStatistics(Lattice<vLorentzColourMatrixD> & data,FieldMetaData &header) | ||||
|  { | ||||
|    // How to convert data precision etc... | ||||
|    header.link_trace=Grid::QCD::WilsonLoops<PeriodicGimplD>::linkTrace(data); | ||||
|    header.plaquette =Grid::QCD::WilsonLoops<PeriodicGimplD>::avgPlaquette(data); | ||||
|  } | ||||
|  template<> inline void PrepareMetaData<vLorentzColourMatrixF>(Lattice<vLorentzColourMatrixF> & field, FieldMetaData &header) | ||||
|  { | ||||
|     | ||||
|    GridBase *grid = field._grid; | ||||
|    std::string format = getFormatString<vLorentzColourMatrixF>(); | ||||
|    header.floating_point = format; | ||||
|    header.checksum = 0x0; // Nersc checksum unused in ILDG, Scidac | ||||
|    GridMetaData(grid,header);  | ||||
|    GaugeStatistics(field,header); | ||||
|    MachineCharacteristics(header); | ||||
|  } | ||||
|  template<> inline void PrepareMetaData<vLorentzColourMatrixD>(Lattice<vLorentzColourMatrixD> & field, FieldMetaData &header) | ||||
|  { | ||||
|    GridBase *grid = field._grid; | ||||
|    std::string format = getFormatString<vLorentzColourMatrixD>(); | ||||
|    header.floating_point = format; | ||||
|    header.checksum = 0x0; // Nersc checksum unused in ILDG, Scidac | ||||
|    GridMetaData(grid,header);  | ||||
|    GaugeStatistics(field,header); | ||||
|    MachineCharacteristics(header); | ||||
|  } | ||||
|  | ||||
|     ////////////////////////////////////////////////////////////////////// | ||||
|     // Utilities ; these are QCD aware | ||||
|     ////////////////////////////////////////////////////////////////////// | ||||
|     inline void reconstruct3(LorentzColourMatrix & cm) | ||||
|     { | ||||
|       const int x=0; | ||||
|       const int y=1; | ||||
|       const int z=2; | ||||
|       for(int mu=0;mu<Nd;mu++){ | ||||
| 	cm(mu)()(2,x) = adj(cm(mu)()(0,y)*cm(mu)()(1,z)-cm(mu)()(0,z)*cm(mu)()(1,y)); //x= yz-zy | ||||
| 	cm(mu)()(2,y) = adj(cm(mu)()(0,z)*cm(mu)()(1,x)-cm(mu)()(0,x)*cm(mu)()(1,z)); //y= zx-xz | ||||
| 	cm(mu)()(2,z) = adj(cm(mu)()(0,x)*cm(mu)()(1,y)-cm(mu)()(0,y)*cm(mu)()(1,x)); //z= xy-yx | ||||
|       } | ||||
|     } | ||||
|  | ||||
|     //////////////////////////////////////////////////////////////////////////////// | ||||
|     // Some data types for intermediate storage | ||||
|     //////////////////////////////////////////////////////////////////////////////// | ||||
|     template<typename vtype> using iLorentzColour2x3 = iVector<iVector<iVector<vtype, Nc>, 2>, Nd >; | ||||
|  | ||||
|     typedef iLorentzColour2x3<Complex>  LorentzColour2x3; | ||||
|     typedef iLorentzColour2x3<ComplexF> LorentzColour2x3F; | ||||
|     typedef iLorentzColour2x3<ComplexD> LorentzColour2x3D; | ||||
|  | ||||
| ///////////////////////////////////////////////////////////////////////////////// | ||||
| // Simple classes for precision conversion | ||||
| ///////////////////////////////////////////////////////////////////////////////// | ||||
| template <class fobj, class sobj> | ||||
| struct BinarySimpleUnmunger { | ||||
|   typedef typename getPrecision<fobj>::real_scalar_type fobj_stype; | ||||
|   typedef typename getPrecision<sobj>::real_scalar_type sobj_stype; | ||||
|    | ||||
|   void operator()(sobj &in, fobj &out) { | ||||
|     // take word by word and transform accoding to the status | ||||
|     fobj_stype *out_buffer = (fobj_stype *)&out; | ||||
|     sobj_stype *in_buffer = (sobj_stype *)∈ | ||||
|     size_t fobj_words = sizeof(out) / sizeof(fobj_stype); | ||||
|     size_t sobj_words = sizeof(in) / sizeof(sobj_stype); | ||||
|     assert(fobj_words == sobj_words); | ||||
|      | ||||
|     for (unsigned int word = 0; word < sobj_words; word++) | ||||
|       out_buffer[word] = in_buffer[word];  // type conversion on the fly | ||||
|      | ||||
|   } | ||||
| }; | ||||
|  | ||||
| template <class fobj, class sobj> | ||||
| struct BinarySimpleMunger { | ||||
|   typedef typename getPrecision<fobj>::real_scalar_type fobj_stype; | ||||
|   typedef typename getPrecision<sobj>::real_scalar_type sobj_stype; | ||||
|  | ||||
|   void operator()(fobj &in, sobj &out) { | ||||
|     // take word by word and transform accoding to the status | ||||
|     fobj_stype *in_buffer = (fobj_stype *)∈ | ||||
|     sobj_stype *out_buffer = (sobj_stype *)&out; | ||||
|     size_t fobj_words = sizeof(in) / sizeof(fobj_stype); | ||||
|     size_t sobj_words = sizeof(out) / sizeof(sobj_stype); | ||||
|     assert(fobj_words == sobj_words); | ||||
|      | ||||
|     for (unsigned int word = 0; word < sobj_words; word++) | ||||
|       out_buffer[word] = in_buffer[word];  // type conversion on the fly | ||||
|      | ||||
|   } | ||||
| }; | ||||
|  | ||||
|  | ||||
|     template<class fobj,class sobj> | ||||
|     struct GaugeSimpleMunger{ | ||||
|       void operator()(fobj &in, sobj &out) { | ||||
|         for (int mu = 0; mu < Nd; mu++) { | ||||
|           for (int i = 0; i < Nc; i++) { | ||||
|           for (int j = 0; j < Nc; j++) { | ||||
| 	    out(mu)()(i, j) = in(mu)()(i, j); | ||||
| 	  }} | ||||
|         } | ||||
|       }; | ||||
|     }; | ||||
|  | ||||
|     template <class fobj, class sobj> | ||||
|     struct GaugeSimpleUnmunger { | ||||
|  | ||||
|       void operator()(sobj &in, fobj &out) { | ||||
|         for (int mu = 0; mu < Nd; mu++) { | ||||
|           for (int i = 0; i < Nc; i++) { | ||||
|           for (int j = 0; j < Nc; j++) { | ||||
| 	    out(mu)()(i, j) = in(mu)()(i, j); | ||||
| 	  }} | ||||
|         } | ||||
|       }; | ||||
|     }; | ||||
|  | ||||
|     template<class fobj,class sobj> | ||||
|     struct Gauge3x2munger{ | ||||
|       void operator() (fobj &in,sobj &out){ | ||||
| 	for(int mu=0;mu<Nd;mu++){ | ||||
| 	  for(int i=0;i<2;i++){ | ||||
| 	  for(int j=0;j<3;j++){ | ||||
| 	    out(mu)()(i,j) = in(mu)(i)(j); | ||||
| 	  }} | ||||
| 	} | ||||
| 	reconstruct3(out); | ||||
|       } | ||||
|     }; | ||||
|  | ||||
|     template<class fobj,class sobj> | ||||
|     struct Gauge3x2unmunger{ | ||||
|       void operator() (sobj &in,fobj &out){ | ||||
| 	for(int mu=0;mu<Nd;mu++){ | ||||
| 	  for(int i=0;i<2;i++){ | ||||
| 	  for(int j=0;j<3;j++){ | ||||
| 	    out(mu)(i)(j) = in(mu)()(i,j); | ||||
| 	  }} | ||||
| 	} | ||||
|       } | ||||
|     }; | ||||
|   } | ||||
|  | ||||
|  | ||||
| } | ||||
| @@ -30,182 +30,11 @@ | ||||
| #ifndef GRID_NERSC_IO_H | ||||
| #define GRID_NERSC_IO_H | ||||
|  | ||||
| #include <algorithm> | ||||
| #include <iostream> | ||||
| #include <iomanip> | ||||
| #include <fstream> | ||||
| #include <map> | ||||
|  | ||||
| #include <unistd.h> | ||||
| #include <sys/utsname.h> | ||||
| #include <pwd.h> | ||||
|  | ||||
| namespace Grid { | ||||
|   namespace QCD { | ||||
|  | ||||
|     using namespace Grid; | ||||
|  | ||||
|     //////////////////////////////////////////////////////////////////////////////// | ||||
|     // Some data types for intermediate storage | ||||
|     //////////////////////////////////////////////////////////////////////////////// | ||||
|     template<typename vtype> using iLorentzColour2x3 = iVector<iVector<iVector<vtype, Nc>, 2>, 4 >; | ||||
|  | ||||
|     typedef iLorentzColour2x3<Complex>  LorentzColour2x3; | ||||
|     typedef iLorentzColour2x3<ComplexF> LorentzColour2x3F; | ||||
|     typedef iLorentzColour2x3<ComplexD> LorentzColour2x3D; | ||||
|  | ||||
|     //////////////////////////////////////////////////////////////////////////////// | ||||
|     // header specification/interpretation | ||||
|     //////////////////////////////////////////////////////////////////////////////// | ||||
|     class NerscField { | ||||
|     public: | ||||
|       // header strings (not in order) | ||||
|       int dimension[4]; | ||||
|       std::string boundary[4];  | ||||
|       int data_start; | ||||
|       std::string hdr_version; | ||||
|       std::string storage_format; | ||||
|       // Checks on data | ||||
|       double link_trace; | ||||
|       double plaquette; | ||||
|       uint32_t checksum; | ||||
|       unsigned int sequence_number; | ||||
|       std::string data_type; | ||||
|       std::string ensemble_id ; | ||||
|       std::string ensemble_label ; | ||||
|       std::string creator ; | ||||
|       std::string creator_hardware ; | ||||
|       std::string creation_date ; | ||||
|       std::string archive_date ; | ||||
|       std::string floating_point; | ||||
|     }; | ||||
|  | ||||
|     ////////////////////////////////////////////////////////////////////// | ||||
|     // Bit and Physical Checksumming and QA of data | ||||
|     ////////////////////////////////////////////////////////////////////// | ||||
|  | ||||
|     inline void NerscGrid(GridBase *grid,NerscField &header) | ||||
|     { | ||||
|       assert(grid->_ndimension==4); | ||||
|       for(int d=0;d<4;d++) { | ||||
| 	header.dimension[d] = grid->_fdimensions[d]; | ||||
|       } | ||||
|       for(int d=0;d<4;d++) { | ||||
| 	header.boundary[d] = std::string("PERIODIC"); | ||||
|       } | ||||
|     } | ||||
|     template<class GaugeField> | ||||
|     inline void NerscStatistics(GaugeField & data,NerscField &header) | ||||
|     { | ||||
|       // How to convert data precision etc... | ||||
|       header.link_trace=Grid::QCD::WilsonLoops<PeriodicGimplR>::linkTrace(data); | ||||
|       header.plaquette =Grid::QCD::WilsonLoops<PeriodicGimplR>::avgPlaquette(data); | ||||
|     } | ||||
|  | ||||
|     inline void NerscMachineCharacteristics(NerscField &header) | ||||
|     { | ||||
|       // Who | ||||
|       struct passwd *pw = getpwuid (getuid()); | ||||
|       if (pw) header.creator = std::string(pw->pw_name);  | ||||
|  | ||||
|       // When | ||||
|       std::time_t t = std::time(nullptr); | ||||
|       std::tm tm = *std::localtime(&t); | ||||
|       std::ostringstream oss;  | ||||
|       //  oss << std::put_time(&tm, "%c %Z"); | ||||
|       header.creation_date = oss.str(); | ||||
|       header.archive_date  = header.creation_date; | ||||
|  | ||||
|       // What | ||||
|       struct utsname name;  uname(&name); | ||||
|       header.creator_hardware = std::string(name.nodename)+"-"; | ||||
|       header.creator_hardware+= std::string(name.machine)+"-"; | ||||
|       header.creator_hardware+= std::string(name.sysname)+"-"; | ||||
|       header.creator_hardware+= std::string(name.release); | ||||
|  | ||||
|     } | ||||
|     ////////////////////////////////////////////////////////////////////// | ||||
|     // Utilities ; these are QCD aware | ||||
|     ////////////////////////////////////////////////////////////////////// | ||||
|     inline void NerscChecksum(uint32_t *buf,uint32_t buf_size_bytes,uint32_t &csum) | ||||
|     { | ||||
|       BinaryIO::Uint32Checksum(buf,buf_size_bytes,csum); | ||||
|     } | ||||
|     inline void reconstruct3(LorentzColourMatrix & cm) | ||||
|     { | ||||
|       const int x=0; | ||||
|       const int y=1; | ||||
|       const int z=2; | ||||
|       for(int mu=0;mu<4;mu++){ | ||||
| 	cm(mu)()(2,x) = adj(cm(mu)()(0,y)*cm(mu)()(1,z)-cm(mu)()(0,z)*cm(mu)()(1,y)); //x= yz-zy | ||||
| 	cm(mu)()(2,y) = adj(cm(mu)()(0,z)*cm(mu)()(1,x)-cm(mu)()(0,x)*cm(mu)()(1,z)); //y= zx-xz | ||||
| 	cm(mu)()(2,z) = adj(cm(mu)()(0,x)*cm(mu)()(1,y)-cm(mu)()(0,y)*cm(mu)()(1,x)); //z= xy-yx | ||||
|       } | ||||
|     } | ||||
|  | ||||
|     template<class fobj,class sobj> | ||||
|     struct NerscSimpleMunger{ | ||||
|       void operator()(fobj &in, sobj &out, uint32_t &csum) { | ||||
|         for (int mu = 0; mu < Nd; mu++) { | ||||
|           for (int i = 0; i < Nc; i++) { | ||||
|             for (int j = 0; j < Nc; j++) { | ||||
|               out(mu)()(i, j) = in(mu)()(i, j); | ||||
|             } | ||||
|           } | ||||
|         } | ||||
|         NerscChecksum((uint32_t *)&in, sizeof(in), csum); | ||||
|       }; | ||||
|     }; | ||||
|  | ||||
|     template <class fobj, class sobj> | ||||
|     struct NerscSimpleUnmunger { | ||||
|       void operator()(sobj &in, fobj &out, uint32_t &csum) { | ||||
|         for (int mu = 0; mu < Nd; mu++) { | ||||
|           for (int i = 0; i < Nc; i++) { | ||||
|             for (int j = 0; j < Nc; j++) { | ||||
|               out(mu)()(i, j) = in(mu)()(i, j); | ||||
|             } | ||||
|           } | ||||
|         } | ||||
|         NerscChecksum((uint32_t *)&out, sizeof(out), csum); | ||||
|       }; | ||||
|     }; | ||||
|  | ||||
|     template<class fobj,class sobj> | ||||
|     struct Nersc3x2munger{ | ||||
|       void operator() (fobj &in,sobj &out,uint32_t &csum){ | ||||
|       | ||||
| 	NerscChecksum((uint32_t *)&in,sizeof(in),csum);  | ||||
|  | ||||
| 	for(int mu=0;mu<4;mu++){ | ||||
| 	  for(int i=0;i<2;i++){ | ||||
| 	    for(int j=0;j<3;j++){ | ||||
| 	      out(mu)()(i,j) = in(mu)(i)(j); | ||||
| 	    }} | ||||
| 	} | ||||
| 	reconstruct3(out); | ||||
|       } | ||||
|     }; | ||||
|  | ||||
|     template<class fobj,class sobj> | ||||
|     struct Nersc3x2unmunger{ | ||||
|  | ||||
|       void operator() (sobj &in,fobj &out,uint32_t &csum){ | ||||
|  | ||||
|  | ||||
| 	for(int mu=0;mu<4;mu++){ | ||||
| 	  for(int i=0;i<2;i++){ | ||||
| 	    for(int j=0;j<3;j++){ | ||||
| 	      out(mu)(i)(j) = in(mu)()(i,j); | ||||
| 	    }} | ||||
| 	} | ||||
|  | ||||
| 	NerscChecksum((uint32_t *)&out,sizeof(out),csum);  | ||||
|  | ||||
|       } | ||||
|     }; | ||||
|  | ||||
|  | ||||
|     //////////////////////////////////////////////////////////////////////////////// | ||||
|     // Write and read from fstream; comput header offset for payload | ||||
|     //////////////////////////////////////////////////////////////////////////////// | ||||
| @@ -216,42 +45,17 @@ namespace Grid { | ||||
| 	std::ofstream fout(file,std::ios::out); | ||||
|       } | ||||
|    | ||||
| #define dump_nersc_header(field, s)					\ | ||||
|       s << "BEGIN_HEADER"      << std::endl;				\ | ||||
|       s << "HDR_VERSION = "    << field.hdr_version    << std::endl;	\ | ||||
|       s << "DATATYPE = "       << field.data_type      << std::endl;	\ | ||||
|       s << "STORAGE_FORMAT = " << field.storage_format << std::endl;	\ | ||||
|       for(int i=0;i<4;i++){						\ | ||||
| 	s << "DIMENSION_" << i+1 << " = " << field.dimension[i] << std::endl ; \ | ||||
|       }									\ | ||||
|       s << "LINK_TRACE = " << std::setprecision(10) << field.link_trace << std::endl; \ | ||||
|       s << "PLAQUETTE  = " << std::setprecision(10) << field.plaquette  << std::endl; \ | ||||
|       for(int i=0;i<4;i++){						\ | ||||
| 	s << "BOUNDARY_"<<i+1<<" = " << field.boundary[i] << std::endl;	\ | ||||
|       }									\ | ||||
| 									\ | ||||
|       s << "CHECKSUM = "<< std::hex << std::setw(10) << field.checksum << std::dec<<std::endl; \ | ||||
|       s << "ENSEMBLE_ID = "     << field.ensemble_id      << std::endl;	\ | ||||
|       s << "ENSEMBLE_LABEL = "  << field.ensemble_label   << std::endl;	\ | ||||
|       s << "SEQUENCE_NUMBER = " << field.sequence_number  << std::endl;	\ | ||||
|       s << "CREATOR = "         << field.creator          << std::endl;	\ | ||||
|       s << "CREATOR_HARDWARE = "<< field.creator_hardware << std::endl;	\ | ||||
|       s << "CREATION_DATE = "   << field.creation_date    << std::endl;	\ | ||||
|       s << "ARCHIVE_DATE = "    << field.archive_date     << std::endl;	\ | ||||
|       s << "FLOATING_POINT = "  << field.floating_point   << std::endl;	\ | ||||
|       s << "END_HEADER"         << std::endl; | ||||
|    | ||||
|       static inline unsigned int writeHeader(NerscField &field,std::string file) | ||||
|       static inline unsigned int writeHeader(FieldMetaData &field,std::string file) | ||||
|       { | ||||
|       std::ofstream fout(file,std::ios::out|std::ios::in); | ||||
|       fout.seekp(0,std::ios::beg); | ||||
|       dump_nersc_header(field, fout); | ||||
|       dump_meta_data(field, fout); | ||||
|       field.data_start = fout.tellp(); | ||||
|       return field.data_start; | ||||
|     } | ||||
|  | ||||
|       // for the header-reader | ||||
|       static inline int readHeader(std::string file,GridBase *grid,  NerscField &field) | ||||
|       static inline int readHeader(std::string file,GridBase *grid,  FieldMetaData &field) | ||||
|       { | ||||
|       int offset=0; | ||||
|       std::map<std::string,std::string> header; | ||||
| @@ -326,18 +130,18 @@ namespace Grid { | ||||
|     ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
|     // Now the meat: the object readers | ||||
|     ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||
| #define PARALLEL_READ | ||||
| #define PARALLEL_WRITE | ||||
|  | ||||
|     template<class vsimd> | ||||
|       static inline void readConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu,NerscField& header,std::string file) | ||||
|     static inline void readConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu, | ||||
| 					 FieldMetaData& header, | ||||
| 					 std::string file) | ||||
|     { | ||||
|       typedef Lattice<iLorentzColourMatrix<vsimd> > GaugeField; | ||||
|  | ||||
|       GridBase *grid = Umu._grid; | ||||
|       int offset = readHeader(file,Umu._grid,header); | ||||
|  | ||||
|       NerscField clone(header); | ||||
|       FieldMetaData clone(header); | ||||
|  | ||||
|       std::string format(header.floating_point); | ||||
|  | ||||
| @@ -346,76 +150,78 @@ namespace Grid { | ||||
|       int ieee64big = (format == std::string("IEEE64BIG")); | ||||
|       int ieee64    = (format == std::string("IEEE64")); | ||||
|  | ||||
|       uint32_t csum; | ||||
|       uint32_t nersc_csum,scidac_csuma,scidac_csumb; | ||||
|       // depending on datatype, set up munger; | ||||
|       // munger is a function of <floating point, Real, data_type> | ||||
|       if ( header.data_type == std::string("4D_SU3_GAUGE") ) { | ||||
| 	if ( ieee32 || ieee32big ) { | ||||
| #ifdef PARALLEL_READ | ||||
| 	csum=BinaryIO::readObjectParallel<iLorentzColourMatrix<vsimd>, LorentzColour2x3F>  | ||||
| 	  (Umu,file,Nersc3x2munger<LorentzColour2x3F,LorentzColourMatrix>(), offset,format); | ||||
| #else | ||||
| 	csum=BinaryIO::readObjectSerial<iLorentzColourMatrix<vsimd>, LorentzColour2x3F>  | ||||
| 	  (Umu,file,Nersc3x2munger<LorentzColour2x3F,LorentzColourMatrix>(), offset,format); | ||||
| #endif | ||||
| 	  BinaryIO::readLatticeObject<iLorentzColourMatrix<vsimd>, LorentzColour2x3F>  | ||||
| 	    (Umu,file,Gauge3x2munger<LorentzColour2x3F,LorentzColourMatrix>(), offset,format, | ||||
| 	     nersc_csum,scidac_csuma,scidac_csumb); | ||||
| 	} | ||||
| 	if ( ieee64 || ieee64big ) { | ||||
| #ifdef PARALLEL_READ | ||||
| 	csum=BinaryIO::readObjectParallel<iLorentzColourMatrix<vsimd>, LorentzColour2x3D>  | ||||
| 	  (Umu,file,Nersc3x2munger<LorentzColour2x3D,LorentzColourMatrix>(),offset,format); | ||||
| #else  | ||||
| 	csum=BinaryIO::readObjectSerial<iLorentzColourMatrix<vsimd>, LorentzColour2x3D>  | ||||
| 	  (Umu,file,Nersc3x2munger<LorentzColour2x3D,LorentzColourMatrix>(),offset,format); | ||||
| #endif | ||||
| 	  BinaryIO::readLatticeObject<iLorentzColourMatrix<vsimd>, LorentzColour2x3D>  | ||||
| 	    (Umu,file,Gauge3x2munger<LorentzColour2x3D,LorentzColourMatrix>(),offset,format, | ||||
| 	     nersc_csum,scidac_csuma,scidac_csumb); | ||||
| 	} | ||||
|       } else if ( header.data_type == std::string("4D_SU3_GAUGE_3x3") ) { | ||||
| 	if ( ieee32 || ieee32big ) { | ||||
| #ifdef PARALLEL_READ | ||||
| 	  csum=BinaryIO::readObjectParallel<iLorentzColourMatrix<vsimd>,LorentzColourMatrixF> | ||||
| 	    (Umu,file,NerscSimpleMunger<LorentzColourMatrixF,LorentzColourMatrix>(),offset,format); | ||||
| #else | ||||
| 	  csum=BinaryIO::readObjectSerial<iLorentzColourMatrix<vsimd>,LorentzColourMatrixF> | ||||
| 	    (Umu,file,NerscSimpleMunger<LorentzColourMatrixF,LorentzColourMatrix>(),offset,format); | ||||
| #endif | ||||
| 	  BinaryIO::readLatticeObject<iLorentzColourMatrix<vsimd>,LorentzColourMatrixF> | ||||
| 	    (Umu,file,GaugeSimpleMunger<LorentzColourMatrixF,LorentzColourMatrix>(),offset,format, | ||||
| 	     nersc_csum,scidac_csuma,scidac_csumb); | ||||
| 	} | ||||
| 	if ( ieee64 || ieee64big ) { | ||||
| #ifdef PARALLEL_READ | ||||
| 	  csum=BinaryIO::readObjectParallel<iLorentzColourMatrix<vsimd>,LorentzColourMatrixD> | ||||
| 	    (Umu,file,NerscSimpleMunger<LorentzColourMatrixD,LorentzColourMatrix>(),offset,format); | ||||
| #else | ||||
| 	  csum=BinaryIO::readObjectSerial<iLorentzColourMatrix<vsimd>,LorentzColourMatrixD> | ||||
| 	    (Umu,file,NerscSimpleMunger<LorentzColourMatrixD,LorentzColourMatrix>(),offset,format); | ||||
| #endif | ||||
| 	  BinaryIO::readLatticeObject<iLorentzColourMatrix<vsimd>,LorentzColourMatrixD> | ||||
| 	    (Umu,file,GaugeSimpleMunger<LorentzColourMatrixD,LorentzColourMatrix>(),offset,format, | ||||
| 	     nersc_csum,scidac_csuma,scidac_csumb); | ||||
| 	} | ||||
|       } else { | ||||
| 	assert(0); | ||||
|       } | ||||
|  | ||||
|       NerscStatistics<GaugeField>(Umu,clone); | ||||
|       GaugeStatistics(Umu,clone); | ||||
|  | ||||
|       std::cout<<GridLogMessage <<"NERSC Configuration "<<file<<" checksum "<<std::hex<<            csum<< std::dec | ||||
|       std::cout<<GridLogMessage <<"NERSC Configuration "<<file<<" checksum "<<std::hex<<nersc_csum<< std::dec | ||||
| 	       <<" header   "<<std::hex<<header.checksum<<std::dec <<std::endl; | ||||
|       std::cout<<GridLogMessage <<"NERSC Configuration "<<file<<" plaquette "<<clone.plaquette | ||||
| 	       <<" header    "<<header.plaquette<<std::endl; | ||||
|       std::cout<<GridLogMessage <<"NERSC Configuration "<<file<<" link_trace "<<clone.link_trace | ||||
| 	       <<" header    "<<header.link_trace<<std::endl; | ||||
|  | ||||
|       if ( fabs(clone.plaquette -header.plaquette ) >=  1.0e-5 ) {  | ||||
| 	std::cout << " Plaquette mismatch "<<std::endl; | ||||
| 	std::cout << Umu[0]<<std::endl; | ||||
| 	std::cout << Umu[1]<<std::endl; | ||||
|       } | ||||
|       if ( nersc_csum != header.checksum ) {  | ||||
| 	std::cerr << " checksum mismatch " << std::endl; | ||||
| 	std::cerr << " plaqs " << clone.plaquette << " " << header.plaquette << std::endl; | ||||
| 	std::cerr << " trace " << clone.link_trace<< " " << header.link_trace<< std::endl; | ||||
| 	std::cerr << " nersc_csum  " <<std::hex<< nersc_csum << " " << header.checksum<< std::dec<< std::endl; | ||||
| 	exit(0); | ||||
|       } | ||||
|       assert(fabs(clone.plaquette -header.plaquette ) < 1.0e-5 ); | ||||
|       assert(fabs(clone.link_trace-header.link_trace) < 1.0e-6 ); | ||||
|       assert(csum == header.checksum ); | ||||
|       assert(nersc_csum == header.checksum ); | ||||
|        | ||||
|       std::cout<<GridLogMessage <<"NERSC Configuration "<<file<< " and plaquette, link trace, and checksum agree"<<std::endl; | ||||
|     } | ||||
|  | ||||
|       template<class vsimd> | ||||
|       static inline void writeConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu,std::string file, int two_row,int bits32) | ||||
|       static inline void writeConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu, | ||||
| 					    std::string file,  | ||||
| 					    int two_row, | ||||
| 					    int bits32) | ||||
|       { | ||||
| 	typedef Lattice<iLorentzColourMatrix<vsimd> > GaugeField; | ||||
|  | ||||
| 	typedef iLorentzColourMatrix<vsimd> vobj; | ||||
| 	typedef typename vobj::scalar_object sobj; | ||||
|  | ||||
| 	FieldMetaData header; | ||||
| 	/////////////////////////////////////////// | ||||
| 	// Following should become arguments | ||||
| 	NerscField header; | ||||
| 	/////////////////////////////////////////// | ||||
| 	header.sequence_number = 1; | ||||
| 	header.ensemble_id     = "UKQCD"; | ||||
| 	header.ensemble_label  = "DWF"; | ||||
| @@ -425,45 +231,32 @@ namespace Grid { | ||||
|    | ||||
| 	GridBase *grid = Umu._grid; | ||||
|  | ||||
| 	NerscGrid(grid,header); | ||||
| 	NerscStatistics<GaugeField>(Umu,header); | ||||
| 	NerscMachineCharacteristics(header); | ||||
| 	GridMetaData(grid,header); | ||||
| 	assert(header.nd==4); | ||||
| 	GaugeStatistics(Umu,header); | ||||
| 	MachineCharacteristics(header); | ||||
|  | ||||
| 	uint32_t csum; | ||||
| 	int offset; | ||||
|    | ||||
| 	truncate(file); | ||||
|  | ||||
| 	if ( two_row ) {  | ||||
|  | ||||
| 	  header.floating_point = std::string("IEEE64BIG"); | ||||
| 	  header.data_type      = std::string("4D_SU3_GAUGE"); | ||||
| 	  Nersc3x2unmunger<fobj2D,sobj> munge; | ||||
| 	  BinaryIO::Uint32Checksum<vobj,fobj2D>(Umu, munge,header.checksum); | ||||
| 	  offset = writeHeader(header,file); | ||||
| #ifdef PARALLEL_WRITE | ||||
| 	  csum=BinaryIO::writeObjectParallel<vobj,fobj2D>(Umu,file,munge,offset,header.floating_point); | ||||
| #else | ||||
| 	  csum=BinaryIO::writeObjectSerial<vobj,fobj2D>(Umu,file,munge,offset,header.floating_point); | ||||
| #endif | ||||
| 	} else {  | ||||
| 	// Sod it -- always write 3x3 double | ||||
| 	header.floating_point = std::string("IEEE64BIG"); | ||||
| 	header.data_type      = std::string("4D_SU3_GAUGE_3x3"); | ||||
| 	  NerscSimpleUnmunger<fobj3D,sobj> munge; | ||||
| 	  BinaryIO::Uint32Checksum<vobj,fobj3D>(Umu, munge,header.checksum); | ||||
| 	GaugeSimpleUnmunger<fobj3D,sobj> munge; | ||||
| 	offset = writeHeader(header,file); | ||||
| #ifdef PARALLEL_WRITE | ||||
| 	  csum=BinaryIO::writeObjectParallel<vobj,fobj3D>(Umu,file,munge,offset,header.floating_point); | ||||
| #else | ||||
| 	  csum=BinaryIO::writeObjectSerial<vobj,fobj3D>(Umu,file,munge,offset,header.floating_point); | ||||
| #endif | ||||
| 	} | ||||
|  | ||||
| 	std::cout<<GridLogMessage <<"Written NERSC Configuration on "<< file << " checksum "<<std::hex<<csum<< std::dec<<" plaq "<< header.plaquette <<std::endl; | ||||
| 	uint32_t nersc_csum,scidac_csuma,scidac_csumb; | ||||
| 	BinaryIO::writeLatticeObject<vobj,fobj3D>(Umu,file,munge,offset,header.floating_point, | ||||
| 								  nersc_csum,scidac_csuma,scidac_csumb); | ||||
| 	header.checksum = nersc_csum; | ||||
| 	writeHeader(header,file); | ||||
|  | ||||
| 	std::cout<<GridLogMessage <<"Written NERSC Configuration on "<< file << " checksum " | ||||
| 		 <<std::hex<<header.checksum | ||||
| 		 <<std::dec<<" plaq "<< header.plaquette <<std::endl; | ||||
|  | ||||
|       } | ||||
|  | ||||
|  | ||||
|       /////////////////////////////// | ||||
|       // RNG state | ||||
|       /////////////////////////////// | ||||
| @@ -472,19 +265,19 @@ namespace Grid { | ||||
| 	typedef typename GridParallelRNG::RngStateType RngStateType; | ||||
|  | ||||
| 	// Following should become arguments | ||||
| 	NerscField header; | ||||
| 	FieldMetaData header; | ||||
| 	header.sequence_number = 1; | ||||
| 	header.ensemble_id     = "UKQCD"; | ||||
| 	header.ensemble_label  = "DWF"; | ||||
|  | ||||
| 	GridBase *grid = parallel._grid; | ||||
|  | ||||
| 	NerscGrid(grid,header); | ||||
| 	GridMetaData(grid,header); | ||||
| 	assert(header.nd==4); | ||||
| 	header.link_trace=0.0; | ||||
| 	header.plaquette=0.0; | ||||
| 	NerscMachineCharacteristics(header); | ||||
| 	MachineCharacteristics(header); | ||||
|  | ||||
| 	uint32_t csum; | ||||
| 	int offset; | ||||
|    | ||||
| #ifdef RNG_RANLUX | ||||
| @@ -502,15 +295,19 @@ namespace Grid { | ||||
|  | ||||
| 	truncate(file); | ||||
| 	offset = writeHeader(header,file); | ||||
| 	csum=BinaryIO::writeRNGSerial(serial,parallel,file,offset); | ||||
| 	header.checksum = csum; | ||||
| 	uint32_t nersc_csum,scidac_csuma,scidac_csumb; | ||||
| 	BinaryIO::writeRNG(serial,parallel,file,offset,nersc_csum,scidac_csuma,scidac_csumb); | ||||
| 	header.checksum = nersc_csum; | ||||
| 	offset = writeHeader(header,file); | ||||
|  | ||||
| 	std::cout<<GridLogMessage <<"Written NERSC RNG STATE "<<file<< " checksum "<<std::hex<<csum<<std::dec<<std::endl; | ||||
| 	std::cout<<GridLogMessage  | ||||
| 		 <<"Written NERSC RNG STATE "<<file<< " checksum " | ||||
| 		 <<std::hex<<header.checksum | ||||
| 		 <<std::dec<<std::endl; | ||||
|  | ||||
|       } | ||||
|      | ||||
|       static inline void readRNGState(GridSerialRNG &serial,GridParallelRNG & parallel,NerscField& header,std::string file) | ||||
|       static inline void readRNGState(GridSerialRNG &serial,GridParallelRNG & parallel,FieldMetaData& header,std::string file) | ||||
|       { | ||||
| 	typedef typename GridParallelRNG::RngStateType RngStateType; | ||||
|  | ||||
| @@ -518,7 +315,7 @@ namespace Grid { | ||||
|  | ||||
| 	int offset = readHeader(file,grid,header); | ||||
|  | ||||
| 	NerscField clone(header); | ||||
| 	FieldMetaData clone(header); | ||||
|  | ||||
| 	std::string format(header.floating_point); | ||||
| 	std::string data_type(header.data_type); | ||||
| @@ -538,15 +335,19 @@ namespace Grid { | ||||
|  | ||||
| 	// depending on datatype, set up munger; | ||||
| 	// munger is a function of <floating point, Real, data_type> | ||||
| 	uint32_t csum=BinaryIO::readRNGSerial(serial,parallel,file,offset); | ||||
| 	uint32_t nersc_csum,scidac_csuma,scidac_csumb; | ||||
| 	BinaryIO::readRNG(serial,parallel,file,offset,nersc_csum,scidac_csuma,scidac_csumb); | ||||
|  | ||||
| 	assert(csum == header.checksum ); | ||||
| 	if ( nersc_csum != header.checksum ) {  | ||||
| 	  std::cerr << "checksum mismatch "<<std::hex<< nersc_csum <<" "<<header.checksum<<std::dec<<std::endl; | ||||
| 	  exit(0); | ||||
| 	} | ||||
| 	assert(nersc_csum == header.checksum ); | ||||
|  | ||||
| 	std::cout<<GridLogMessage <<"Read NERSC RNG file "<<file<< " format "<< data_type <<std::endl; | ||||
|       } | ||||
|  | ||||
|     }; | ||||
|  | ||||
|  | ||||
|   }} | ||||
| #endif | ||||
|   | ||||
| @@ -40,7 +40,7 @@ const PerformanceCounter::PerformanceCounterConfig PerformanceCounter::Performan | ||||
|   { PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES          ,  "CPUCYCLES.........." , INSTRUCTIONS}, | ||||
|   { PERF_TYPE_HARDWARE, PERF_COUNT_HW_INSTRUCTIONS        ,  "INSTRUCTIONS......." , CPUCYCLES   }, | ||||
|     // 4 | ||||
| #ifdef AVX512 | ||||
| #ifdef KNL | ||||
|     { PERF_TYPE_RAW, RawConfig(0x40,0x04), "ALL_LOADS..........", CPUCYCLES    }, | ||||
|     { PERF_TYPE_RAW, RawConfig(0x01,0x04), "L1_MISS_LOADS......", L1D_READ_ACCESS  }, | ||||
|     { PERF_TYPE_RAW, RawConfig(0x40,0x04), "ALL_LOADS..........", L1D_READ_ACCESS    }, | ||||
|   | ||||
							
								
								
									
										100
									
								
								lib/qcd/action/fermion/AbstractEOFAFermion.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										100
									
								
								lib/qcd/action/fermion/AbstractEOFAFermion.h
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,100 @@ | ||||
| /************************************************************************************* | ||||
|  | ||||
| Grid physics library, www.github.com/paboyle/Grid | ||||
|  | ||||
| Source file: ./lib/qcd/action/fermion/AbstractEOFAFermion.h | ||||
|  | ||||
| Copyright (C) 2017 | ||||
|  | ||||
| Author: Peter Boyle <pabobyle@ph.ed.ac.uk> | ||||
| Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||
| Author: David Murphy <dmurphy@phys.columbia.edu> | ||||
|  | ||||
| This program is free software; you can redistribute it and/or modify | ||||
| it under the terms of the GNU General Public License as published by | ||||
| the Free Software Foundation; either version 2 of the License, or | ||||
| (at your option) any later version. | ||||
|  | ||||
| This program is distributed in the hope that it will be useful, | ||||
| but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
| GNU General Public License for more details. | ||||
|  | ||||
| You should have received a copy of the GNU General Public License along | ||||
| with this program; if not, write to the Free Software Foundation, Inc., | ||||
| 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||
|  | ||||
| See the full license in the file "LICENSE" in the top level distribution directory | ||||
| *************************************************************************************/ | ||||
| /*  END LEGAL */ | ||||
| #ifndef  GRID_QCD_ABSTRACT_EOFA_FERMION_H | ||||
| #define  GRID_QCD_ABSTRACT_EOFA_FERMION_H | ||||
|  | ||||
| #include <Grid/qcd/action/fermion/CayleyFermion5D.h> | ||||
|  | ||||
| namespace Grid { | ||||
| namespace QCD { | ||||
|  | ||||
|   // DJM: Abstract base class for EOFA fermion types. | ||||
|   // Defines layout of additional EOFA-specific parameters and operators. | ||||
|   // Use to construct EOFA pseudofermion actions that are agnostic to | ||||
|   // Shamir / Mobius / etc., and ensure that no one can construct EOFA | ||||
|   // pseudofermion action with non-EOFA fermion type. | ||||
|   template<class Impl> | ||||
|   class AbstractEOFAFermion : public CayleyFermion5D<Impl> { | ||||
|     public: | ||||
|       INHERIT_IMPL_TYPES(Impl); | ||||
|  | ||||
|     public: | ||||
|       // Fermion operator: D(mq1) + shift*\gamma_{5}*R_{5}*\Delta_{\pm}(mq2,mq3)*P_{\pm} | ||||
|       RealD mq1; | ||||
|       RealD mq2; | ||||
|       RealD mq3; | ||||
|       RealD shift; | ||||
|       int pm; | ||||
|  | ||||
|       RealD alpha; // Mobius scale | ||||
|       RealD k;     // EOFA normalization constant | ||||
|  | ||||
|       virtual void Instantiatable(void) = 0; | ||||
|  | ||||
|       // EOFA-specific operations | ||||
|       // Force user to implement in derived classes | ||||
|       virtual void  Omega    (const FermionField& in, FermionField& out, int sign, int dag) = 0; | ||||
|       virtual void  Dtilde   (const FermionField& in, FermionField& out) = 0; | ||||
|       virtual void  DtildeInv(const FermionField& in, FermionField& out) = 0; | ||||
|  | ||||
|       // Implement derivatives in base class: | ||||
|       // for EOFA both DWF and Mobius just need d(Dw)/dU | ||||
|       virtual void MDeriv(GaugeField& mat, const FermionField& U, const FermionField& V, int dag){ | ||||
|         this->DhopDeriv(mat, U, V, dag); | ||||
|       }; | ||||
|       virtual void MoeDeriv(GaugeField& mat, const FermionField& U, const FermionField& V, int dag){ | ||||
|         this->DhopDerivOE(mat, U, V, dag); | ||||
|       }; | ||||
|       virtual void MeoDeriv(GaugeField& mat, const FermionField& U, const FermionField& V, int dag){ | ||||
|         this->DhopDerivEO(mat, U, V, dag); | ||||
|       }; | ||||
|  | ||||
|       // Recompute 5D coefficients for different value of shift constant | ||||
|       // (needed for heatbath loop over poles) | ||||
|       virtual void RefreshShiftCoefficients(RealD new_shift) = 0; | ||||
|  | ||||
|       // Constructors | ||||
|       AbstractEOFAFermion(GaugeField& _Umu, GridCartesian& FiveDimGrid, GridRedBlackCartesian& FiveDimRedBlackGrid, | ||||
|         GridCartesian& FourDimGrid, GridRedBlackCartesian& FourDimRedBlackGrid, | ||||
|         RealD _mq1, RealD _mq2, RealD _mq3, RealD _shift, int _pm, | ||||
|         RealD _M5, RealD _b, RealD _c, const ImplParams& p=ImplParams()) | ||||
|         : CayleyFermion5D<Impl>(_Umu, FiveDimGrid, FiveDimRedBlackGrid, FourDimGrid, FourDimRedBlackGrid, | ||||
|           _mq1, _M5, p), mq1(_mq1), mq2(_mq2), mq3(_mq3), shift(_shift), pm(_pm) | ||||
|       { | ||||
|         int Ls = this->Ls; | ||||
|         this->alpha = _b + _c; | ||||
|         this->k = this->alpha * (_mq3-_mq2) * std::pow(this->alpha+1.0,2*Ls) / | ||||
|                     ( std::pow(this->alpha+1.0,Ls) + _mq2*std::pow(this->alpha-1.0,Ls) ) / | ||||
|                     ( std::pow(this->alpha+1.0,Ls) + _mq3*std::pow(this->alpha-1.0,Ls) ); | ||||
|       }; | ||||
|   }; | ||||
| }} | ||||
|  | ||||
| #endif | ||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user