mirror of
				https://github.com/paboyle/Grid.git
				synced 2025-11-03 21:44:33 +00:00 
			
		
		
		
	Compare commits
	
		
			1236 Commits
		
	
	
		
			feature/mu
			...
			feature/co
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| de8b2dcca3 | |||
| efe000341d | |||
| 11086c5c25 | |||
| 
						 | 
					91a7fe247b | ||
| 
						 | 
					8a1be021d3 | ||
| fd66325321 | |||
| c637c0c48c | |||
| c4b472176c | |||
| 856476a890 | |||
| c509bd3fe2 | |||
| 49b934310b | |||
| 01e8cf5017 | |||
| 12f4499502 | |||
| 05aec72887 | |||
| 136d3802cb | |||
| a4c55406ed | |||
| c7f33ca2a8 | |||
| 0e3035c51d | |||
| 10fc263675 | |||
| bccfd4cbb3 | |||
| 0b50d4a328 | |||
| e232257cb6 | |||
| 09451b5e48 | |||
| 6364aa8acf | |||
| b9e84ecab7 | |||
| 41032fef44 | |||
| d77bc88170 | |||
| 494b3c9e57 | |||
| 2ba19a9e07 | |||
| 5d7cc29eaf | |||
| f22a27d7f9 | |||
| 
						 | 
					33a0bbb17b | ||
| f592ec8baa | |||
| 8b007b5c24 | |||
| 9bb170576d | |||
| 
						 | 
					a7e3977b75 | ||
| 
						 | 
					995f20e45d | ||
| 
						 | 
					d058b4e681 | ||
| 8e0d2f3402 | |||
| 2ac57370f1 | |||
| 344e832a4e | |||
| cfe281f1a4 | |||
| f5422c7334 | |||
| 68c76a410d | |||
| 69b6ba0a73 | |||
| 65349b07a7 | |||
| 7cd9914f0e | |||
| 
						 | 
					f3f24b3017 | ||
| 
						 | 
					8ef4657805 | ||
| 
						 | 
					78c1086f8b | ||
| 
						 | 
					68c13045d6 | ||
| 
						 | 
					e9b6f58fdc | ||
| 
						 | 
					839605c45c | ||
| 1ff1422e07 | |||
| 32376f0437 | |||
| 0c6e581336 | |||
| 
						 | 
					e0a79a5bbf | ||
| 
						 | 
					4c016cc1a4 | ||
| 
						 | 
					2205b1e63e | ||
| 
						 | 
					6f421c7a6f | ||
| 
						 | 
					b62b9ac214 | ||
| 88d9922e4f | |||
| 9734e3ee58 | |||
| 
						 | 
					8c3a599148 | ||
| 
						 | 
					4a47b11876 | ||
| 
						 | 
					f1382cf81d | ||
| 
						 | 
					85699daef2 | ||
| 1651111d18 | |||
| 1ed4ea344d | |||
| 8f514ae550 | |||
| 4a7415e83c | |||
| 0ffcfea724 | |||
| febe41cc1d | |||
| 62173395b8 | |||
| b48611b80f | |||
| 6b559d68aa | |||
| 1982cc58dd | |||
| 2e2e5ce596 | |||
| 7d84dca8e9 | |||
| 2d3916418e | |||
| 21304e2139 | |||
| 7b850eb48b | |||
| a3ace57e01 | |||
| b1c3cbe35e | |||
| f31d6bfec2 | |||
| a7cfa26901 | |||
| f333f3e575 | |||
| 2b4e253473 | |||
| 0ba3d469c7 | |||
| f709329d96 | |||
| f05b25dae4 | |||
| 3e1d268fa3 | |||
| 
						 | 
					109c74bed8 | ||
| 3023287fd9 | |||
| b3d6805638 | |||
| 291bc2a1f0 | |||
| 2f368c33fc | |||
| 9592115341 | |||
| 
						 | 
					24c07694bc | ||
| 
						 | 
					f0229025e2 | ||
| 
						 | 
					6de9a45a09 | ||
| 
						 | 
					03c3d495a2 | ||
| 
						 | 
					49f25e08e8 | ||
| efc0c65056 | |||
| 936eaac8e1 | |||
| fe6a372f75 | |||
| 148fc052bd | |||
| c073341a10 | |||
| 78299daaac | |||
| 866449c804 | |||
| d69a52079f | |||
| 9f4f8a14a3 | |||
| f6593dc881 | |||
| 
						 | 
					b46d31d4b6 | ||
| 58567fc650 | |||
| 
						 | 
					7c57cac670 | ||
| d0b21bf1ff | |||
| a1825d1f59 | |||
| 5a3e83ff7b | |||
| 52569d98d8 | |||
| b351103c29 | |||
| 118cca4681 | |||
| 44de727cd2 | |||
| 888ebc3cf9 | |||
| 6c031a1b81 | |||
| 02aa4bd762 | |||
| 9aafa8ee60 | |||
| 430b98b354 | |||
| 84189867ef | |||
| 4ab8cfbe2a | |||
| aadd9f4468 | |||
| 8fbb27ce13 | |||
| 21bba95909 | |||
| 6448fe7121 | |||
| 2458a11d1d | |||
| d0ca7c3fe6 | |||
| 57f899d79c | |||
| e881a0c157 | |||
| f411657118 | |||
| 
						 | 
					7458c6174b | ||
| 
						 | 
					21b269d0f9 | ||
| 
						 | 
					083af92ac2 | ||
| 
						 | 
					2c162577b5 | ||
| 
						 | 
					b1c4e96382 | ||
| 
						 | 
					a55c6f34f3 | ||
| 
						 | 
					beed527ea3 | ||
| eaa633cf69 | |||
| c632455129 | |||
| c012899ed5 | |||
| 
						 | 
					8bab544c2f | ||
| 
						 | 
					76fc06a5dc | ||
| 4af6c7e7aa | |||
| f60fbcfc4d | |||
| 464c81706e | |||
| 408130b808 | |||
| 375edd1370 | |||
| 6d912f6c67 | |||
| 6d1d28955e | |||
| 920b471761 | |||
| 63c21767ba | |||
| 7b6b712565 | |||
| 35abd05ee9 | |||
| dd36e60f6a | |||
| cb6c548e21 | |||
| 02c4ccf621 | |||
| fd24588212 | |||
| b800bb3ecb | |||
| f8abd0978b | |||
| 12c7c493bf | |||
| 
						 | 
					c7c9072313 | ||
| 2bf3be5fae | |||
| 3a40e4fc69 | |||
| 2e69e03f6f | |||
| a09f9bb528 | |||
| f0e341d726 | |||
| 6f09df0daf | |||
| 26cee605b8 | |||
| b3fa18c229 | |||
| 2940c9bcfd | |||
| 0bb532f72b | |||
| fada2aa0f7 | |||
| c193e4e675 | |||
| 3ee682f676 | |||
| d85ec3bac2 | |||
| b52d8eb1e3 | |||
| ee630d2e8b | |||
| 2f0af79869 | |||
| 1b7fb79ec0 | |||
| 2db1a4628c | |||
| 6aa047d842 | |||
| 8779c32ae1 | |||
| c527dc3358 | |||
| 6b42577b6b | |||
| fb3596f968 | |||
| f3a0158213 | |||
| 0250aa9347 | |||
| 3df6743396 | |||
| fb7d021b9d | |||
| 5f206df775 | |||
| 7727e81113 | |||
| c4115544a5 | |||
| 08c47328ba | |||
| 09001aedca | |||
| 2c67304716 | |||
| dc6d8686de | |||
| cc2780bea3 | |||
| 6e5a2b7922 | |||
| f4878d3a13 | |||
| 89d2fac92e | |||
| f2d3e41cf2 | |||
| 3c27bb36d4 | |||
| 603d59f389 | |||
| 07a0ef3f95 | |||
| 503259f9c9 | |||
| 5be6a51044 | |||
| ac69f042b1 | |||
| 133d5c2e34 | |||
| 2a94244890 | |||
| a15a2dfd29 | |||
| 093bb02633 | |||
| 99a85116f8 | |||
| 
						 | 
					27cdb79063 | ||
| f4cbfd63ff | |||
| 2b794b6aa7 | |||
| d0244a059f | |||
| dcdd891d7d | |||
| 6d2df9de79 | |||
| 41d4e37bae | |||
| ee5c0cc9b6 | |||
| 0a4020eb4d | |||
| b2de26589b | |||
| 0677adb4dd | |||
| 231cc95be6 | |||
| 639f9cab82 | |||
| 4eac4e575e | |||
| 3f0f92cda6 | |||
| d2650e89bd | |||
| 2962123cba | |||
| 830168ec37 | |||
| 584c921ca0 | |||
| 81347b4d16 | |||
| 2cfa0b0e6b | |||
| 
						 | 
					fa5dee76b1 | ||
| 
						 | 
					8d1679c6b8 | ||
| 
						 | 
					3791a38f7c | ||
| 
						 | 
					142f7b0c86 | ||
| 
						 | 
					891ad66eab | ||
| 
						 | 
					60c43151c5 | ||
| 
						 | 
					e036800261 | ||
| 
						 | 
					62900def36 | ||
| 
						 | 
					e3a309a73f | ||
| 
						 | 
					ad6c1c0c4e | ||
| 
						 | 
					00b92a91b5 | ||
| 
						 | 
					65533741f7 | ||
| 
						 | 
					dc0259fbda | ||
| 
						 | 
					131a6785d4 | ||
| 
						 | 
					44f4f5c8e2 | ||
| 
						 | 
					2679df034f | ||
| bf71162b97 | |||
| 299e828d83 | |||
| ef5452cddf | |||
| 80de748737 | |||
| 
						 | 
					71e1006ba8 | ||
| 00f31ae83f | |||
| cce339deaf | |||
| 
						 | 
					24128ff109 | ||
| 
						 | 
					34e9d3f0ca | ||
| 
						 | 
					c995788259 | ||
| 
						 | 
					94c7198001 | ||
| 
						 | 
					04d86fe9f3 | ||
| 
						 | 
					b78074b6a0 | ||
| 
						 | 
					7dfd3cdae8 | ||
| 
						 | 
					cecee1ef2c | ||
| 
						 | 
					355d4b58be | ||
| 
						 | 
					2c54a536f3 | ||
| 
						 | 
					d868a45120 | ||
| 
						 | 
					9deae8c962 | ||
| 
						 | 
					db86cdd7bd | ||
| 
						 | 
					ec9939c1ba | ||
| 
						 | 
					f74617c124 | ||
| 
						 | 
					8c6a3921ed | ||
| a8a15dd9d0 | |||
| 3ce68a751a | |||
| 
						 | 
					daa0977d01 | ||
| 
						 | 
					a2929f4384 | ||
| 
						 | 
					7fe3974c0a | ||
| 
						 | 
					f7e86f81a0 | ||
| 
						 | 
					fecec803d9 | ||
| 
						 | 
					8fe9a13cdd | ||
| d2c42e6f42 | |||
| 
						 | 
					2881b3e8e5 | ||
| 049cc518f4 | |||
| 2e1c66897f | |||
| adcef36189 | |||
| 
						 | 
					2f121c41c9 | ||
| e0ed7e300f | |||
| 485207901b | |||
| c760f0a4c3 | |||
| c84eeedec3 | |||
| 
						 | 
					1ac3526f33 | ||
| 
						 | 
					0de090ee74 | ||
| 91405de3f7 | |||
| 
						 | 
					8fccda301a | ||
| 
						 | 
					7a0abfac89 | ||
| 
						 | 
					ae37fda699 | ||
| 
						 | 
					b5fc5e2030 | ||
| 
						 | 
					cc5d025ea4 | ||
| 
						 | 
					ddcb53bce2 | ||
| 
						 | 
					d1c80e1d46 | ||
| 
						 | 
					c73cc7d354 | ||
| 
						 | 
					49fdc324a0 | ||
| 
						 | 
					f32714a2d1 | ||
| 
						 | 
					73a955be20 | ||
| 
						 | 
					66b7a0f871 | ||
| 
						 | 
					2ab9d4bc56 | ||
| 
						 | 
					4f41cd114d | ||
| 
						 | 
					11c4f5e32c | ||
| 
						 | 
					e9b9550298 | ||
| 
						 | 
					7564fedf68 | ||
| 8db0ef9736 | |||
| 
						 | 
					95d4b46446 | ||
| 
						 | 
					5dfd216a34 | ||
| 
						 | 
					c2e8d0aa88 | ||
| 
						 | 
					0fe5aeffbb | ||
| 
						 | 
					7fbc469046 | ||
| 
						 | 
					bf96a4bdbf | ||
| 
						 | 
					84685c9bc3 | ||
| 
						 | 
					a8d4156997 | ||
| 
						 | 
					c18074869b | ||
| 
						 | 
					f4c6d39238 | ||
| 200d35b38a | |||
| eb52e84d09 | |||
| 72abc34764 | |||
| e3164d4c7b | |||
| 
						 | 
					f5db386c55 | ||
| 
						 | 
					294ee70a7a | ||
| 
						 | 
					013ea4e8d1 | ||
| 
						 | 
					7fbbb31a50 | ||
| 
						 | 
					0e127b1fc7 | ||
| 
						 | 
					68c028b0a6 | ||
| 255d4992e1 | |||
| a0d399e5ce | |||
| fd3b2e945a | |||
| 
						 | 
					6c27c72585 | ||
| 
						 | 
					9c003d2d72 | ||
| 
						 | 
					4b8710970c | ||
| 
						 | 
					68d686ec38 | ||
| 
						 | 
					c48b69ca81 | ||
| 
						 | 
					df8c208f5c | ||
| 
						 | 
					61812ab7f1 | ||
| b999984501 | |||
| 
						 | 
					7836cc2d74 | ||
| a61e0df54b | |||
| 9d835afa35 | |||
| 5e3be47117 | |||
| 48de706dd5 | |||
| f871fb0c6d | |||
| 93771f3099 | |||
| 8cb205725b | |||
| 9ad580d82f | |||
| 899f961d0d | |||
| 54d789204f | |||
| 25828746f3 | |||
| f362c00739 | |||
| 
						 | 
					25d1cadd3b | ||
| 
						 | 
					c24d53bbd1 | ||
| 2017e4e3b4 | |||
| 27a4d4c951 | |||
| 2f92721249 | |||
| 3c7a4106ed | |||
| 3252059daf | |||
| 
						 | 
					6eed167f0c | ||
| 
						 | 
					4ad0df6fde | ||
| 661381e881 | |||
| 
						 | 
					68a5079f33 | ||
| 
						 | 
					8634e19f1b | ||
| 
						 | 
					9ada378e38 | ||
| 
						 | 
					9d9692d439 | ||
| 0659ae4014 | |||
| bfbf2f1fa0 | |||
| dd6b796a01 | |||
| 
						 | 
					52a856b4a8 | ||
| 
						 | 
					04190ee7f3 | ||
| 
						 | 
					587bfcc0f4 | ||
| 
						 | 
					2700992ef5 | ||
| 
						 | 
					8c658de179 | ||
| 
						 | 
					ba37d51ee9 | ||
| 
						 | 
					4f4181c54a | ||
| 
						 | 
					4d4ac2517b | ||
| 
						 | 
					e568c24d1d | ||
| 
						 | 
					b458326744 | ||
| 
						 | 
					6e7d5e2243 | ||
| 
						 | 
					b35169f1dd | ||
| 
						 | 
					441ad7498d | ||
| 
						 | 
					6f6c5c549a | ||
| 
						 | 
					1584e17b54 | ||
| 
						 | 
					12982a4455 | ||
| 
						 | 
					172f412102 | ||
| 
						 | 
					a64497265d | ||
| ca639c195f | |||
| edc28dcfbf | |||
| 
						 | 
					c45f24a1b5 | ||
| 
						 | 
					aaf37ee4d7 | ||
| 
						 | 
					1dddd17e3c | ||
| 
						 | 
					661f1d3e8e | ||
| 
						 | 
					edcf9b9293 | ||
| 
						 | 
					fe6860b4dd | ||
| 
						 | 
					d6406b13e1 | ||
| 
						 | 
					e369d7306d | ||
| 
						 | 
					9f8d63e104 | ||
| 
						 | 
					9b0240d101 | ||
| 
						 | 
					b27f0e5a53 | ||
| 
						 | 
					75e4483407 | ||
| 
						 | 
					0734e9ddd4 | ||
| 
						 | 
					809b1cdd58 | ||
| 
						 | 
					1be8089604 | ||
| 
						 | 
					3e0eff6468 | ||
| 
						 | 
					7ecc47ac89 | ||
| 
						 | 
					e9f1ac09de | ||
| 
						 | 
					fa0d8feff4 | ||
| 49b8501fd4 | |||
| d47484717e | |||
| 
						 | 
					05b44aef6b | ||
| 
						 | 
					03e9832efa | ||
| 
						 | 
					28a375d35d | ||
| 
						 | 
					3b06381745 | ||
| 
						 | 
					91a0a3f820 | ||
| 
						 | 
					8f44c799a6 | ||
| 
						 | 
					96272f3841 | ||
| 
						 | 
					5c936d88a0 | ||
| 
						 | 
					1c64ee926e | ||
| 
						 | 
					2cbb72a81c | ||
| 
						 | 
					31d83ee046 | ||
| 
						 | 
					a9e8758a01 | ||
| 
						 | 
					3e125c5b61 | ||
| 
						 | 
					eac6ec4b5e | ||
| 
						 | 
					213f8db6a2 | ||
| 
						 | 
					6358f35b7e | ||
| 
						 | 
					43f5a0df50 | ||
| 
						 | 
					c897878776 | ||
| cc6eb51e3e | |||
| 
						 | 
					507009089b | ||
| 
						 | 
					2baf193031 | ||
| 
						 | 
					362ba0443a | ||
| 
						 | 
					276a2353df | ||
| b234784c8e | |||
| 6ea2a8b7ca | |||
| c1d0359aaa | |||
| 047ee4ad0b | |||
| a13106da0c | |||
| 75113e6523 | |||
| 325c73d051 | |||
| b25a59e95e | |||
| 
						 | 
					c5b9147b53 | ||
| 
						 | 
					64ac815fd9 | ||
| 
						 | 
					a1be533329 | ||
| 7c4533797f | |||
| af84fd65bb | |||
| 
						 | 
					1a2613086a | ||
| 
						 | 
					4f110c09a5 | ||
| 6764362237 | |||
| 2fa2b0e0b1 | |||
| b61292f735 | |||
| ce7720e221 | |||
| 853a5528dc | |||
| 169f405c9c | |||
| c6125b01ce | |||
| b0b5b34bff | |||
| 1c9722357d | |||
| 141da3ae71 | |||
| 94edf9cf8b | |||
| c11a3ca0a7 | |||
| 
						 | 
					870b1a85ae | ||
| 
						 | 
					b5510427f9 | ||
| 
						 | 
					26ed65c8f8 | ||
| 
						 | 
					f7f043d8cf | ||
| 
						 | 
					ddcaa6ad29 | ||
| 334da7f452 | |||
| 4669ecd4ba | |||
| 4573b34cac | |||
| 17f57e85d1 | |||
| c8d4d184ee | |||
| 17f27b1ebd | |||
| a16bbecb8a | |||
| 7c9b0dd842 | |||
| 6b7228b3e6 | |||
| f117552334 | |||
| a21a160029 | |||
| 1569a374a9 | |||
| eddf023b8a | |||
| 6b8ffbe735 | |||
| 81050535a5 | |||
| 7dcf5c90e3 | |||
| 9ce00f26f9 | |||
| 85c253ed4a | |||
| ccfc0a5a89 | |||
| d3f857b1c9 | |||
| fb62035aa0 | |||
| 0260bc7705 | |||
| 68e6a58f12 | |||
| 
						 | 
					73ced656eb | ||
| 
						 | 
					f69008edf1 | ||
| 
						 | 
					57a49ed22f | ||
| 
						 | 
					ff6413a764 | ||
| 
						 | 
					2530bfed01 | ||
| 640515e3d8 | |||
| 
						 | 
					f089bf5629 | ||
| 
						 | 
					276f113f28 | ||
| 97c579f637 | |||
| a13c109111 | |||
| 
						 | 
					ab6afd18ac | ||
| 
						 | 
					5bde64d48b | ||
| 
						 | 
					2f5add4d5f | ||
| c5a885dcd6 | |||
| 
						 | 
					74f79c5ac7 | ||
| 
						 | 
					58c30c0cb1 | ||
| 
						 | 
					917a92118a | ||
| a4d8512fb8 | |||
| 5ec903044d | |||
| 
						 | 
					04f9cf088d | ||
| 
						 | 
					99107038f9 | ||
| 8a0cf0194f | |||
| 
						 | 
					b78456bdf4 | ||
| 
						 | 
					08543b6b11 | ||
| 
						 | 
					63ba33371f | ||
| 
						 | 
					683a7d2ddd | ||
| 1c680d4b7a | |||
| 
						 | 
					afdcbf79d1 | ||
| 
						 | 
					3c3ec4e267 | ||
| 
						 | 
					bbe1d5b49e | ||
| 
						 | 
					0f6009a29f | ||
| 
						 | 
					1cfed3de7c | ||
| 
						 | 
					c9c073eee4 | ||
| 
						 | 
					f290b2e908 | ||
| 
						 | 
					5f8225461b | ||
| 
						 | 
					edbc0d49d7 | ||
| e9323460c7 | |||
| 20e186a1e0 | |||
| 
						 | 
					6ef4af989b | ||
| 
						 | 
					ccde8b817f | ||
| 
						 | 
					68168bf72d | ||
| 
						 | 
					e93d0feaa7 | ||
| 
						 | 
					8f601d9b39 | ||
| 
						 | 
					5436308e4a | ||
| 
						 | 
					07fe7d0cbe | ||
| 
						 | 
					60b57706c4 | ||
| 
						 | 
					58c2f60b69 | ||
| 
						 | 
					bfa3a7b3b0 | ||
| 
						 | 
					954e38bebe | ||
| 
						 | 
					b1a38bde7a | ||
| 
						 | 
					2581875edc | ||
| 
						 | 
					f212b0a963 | ||
| 
						 | 
					62702dbcb8 | ||
| 41d6cab033 | |||
| 5a31e747c9 | |||
| cbc73a3fd1 | |||
| 
						 | 
					6c6d43eb4e | ||
| 
						 | 
					e1dcfd3553 | ||
| 
						 | 
					888838473a | ||
| 
						 | 
					01568b0e62 | ||
| 
						 | 
					d5ce66f6ab | ||
| 
						 | 
					d86936a3de | ||
| 
						 | 
					ee5cf6c8c5 | ||
| d516938707 | |||
| 72344d1418 | |||
| 7ecf6ab38b | |||
| 2d4d70d3ec | |||
| 78f8d47528 | |||
| b85f987b0b | |||
| f57afe2079 | |||
| 
						 | 
					0fb84fa34b | ||
| 
						 | 
					8462bbfe63 | ||
| 229977c955 | |||
| e485a07133 | |||
| 
						 | 
					0880747edb | ||
| 
						 | 
					b801e1fcd6 | ||
| 70ec2faa98 | |||
| 
						 | 
					a66cecc509 | ||
| 
						 | 
					0f6cdf3d4b | ||
| 
						 | 
					1e63b73a14 | ||
| 2f849ee252 | |||
| bb6ed44339 | |||
| 360cface33 | |||
| 
						 | 
					80302e95a8 | ||
| caf2f6b274 | |||
| c49be8988b | |||
| 971c2379bd | |||
| 
						 | 
					94b0d66e4c | ||
| 
						 | 
					5e8af396fd | ||
| 9942723189 | |||
| a7d19dbb64 | |||
| 90dbe03e17 | |||
| 8b14096990 | |||
| 
						 | 
					b938202081 | ||
| e79ef469ac | |||
| 485c5db0fe | |||
| 
						 | 
					c793947209 | ||
| 3e9ee053a1 | |||
| dda6c69d5b | |||
| cd51b9af99 | |||
| 
						 | 
					c399c2b44d | ||
| 
						 | 
					af7de7a294 | ||
| 
						 | 
					1dc86efd26 | ||
| f32555dcc5 | |||
| 30391cb2eb | |||
| e93c883470 | |||
| 
						 | 
					2e88408f5c | ||
| fcac5c0772 | |||
| 90f4000935 | |||
| 480708b9a0 | |||
| c4baf876d4 | |||
| 2f4dac3531 | |||
| 3ec6890850 | |||
| 018801d973 | |||
| 1d83521daa | |||
| fc5670c6a4 | |||
| d9c435e282 | |||
| 614a0e8277 | |||
| 
						 | 
					aaf39222c3 | ||
| 550142bd6a | |||
| c0a929aef7 | |||
| 37fe944224 | |||
| 
						 | 
					315a42843f | ||
| 83a101db83 | |||
| c4274e1660 | |||
| ba6db55cb0 | |||
| e5ea84d531 | |||
| 15767a1491 | |||
| 4d2a32ae7a | |||
| 5b937e3644 | |||
| e418b044f7 | |||
| b8b05f143f | |||
| 6ec42b4b82 | |||
| abb7d4d2f5 | |||
| 16ebbfff29 | |||
| 4828226095 | |||
| 8a049f27b8 | |||
| 43578a3eb4 | |||
| fdbd42e542 | |||
| e7e4cee4f3 | |||
| 
						 | 
					ec3954ff5f | ||
| 
						 | 
					0f468e2179 | ||
| 
						 | 
					8e61286741 | ||
| 
						 | 
					4790e99817 | ||
| 
						 | 
					2dd63aa7a4 | ||
| 
						 | 
					559a501140 | ||
| 
						 | 
					945684c470 | ||
| 
						 | 
					e30a80a234 | ||
| 
						 | 
					69e4ecc1d2 | ||
| 
						 | 
					5f483df16b | ||
| 
						 | 
					4680a977c3 | ||
| 
						 | 
					de42456171 | ||
| 
						 | 
					d55212c998 | ||
| 
						 | 
					c96483e3bd | ||
| 
						 | 
					c6e1f64573 | ||
| 
						 | 
					ae31a6a760 | ||
| 
						 | 
					dd8f2a64fe | ||
| 
						 | 
					724cf02d4a | ||
| 
						 | 
					7b8b2731e7 | ||
| 
						 | 
					237a8ec918 | ||
| 
						 | 
					49a0ae73eb | ||
| 
						 | 
					6ab60c5b70 | ||
| 
						 | 
					8c692b7ffd | ||
| 
						 | 
					2976132bdd | ||
| 
						 | 
					48177f2f2d | ||
| 
						 | 
					c4ce70a821 | ||
| 
						 | 
					315f1146cd | ||
| 
						 | 
					a3e009ba54 | ||
| 
						 | 
					eb7cf239d9 | ||
| 
						 | 
					13ae371ef8 | ||
| 
						 | 
					9f79a87102 | ||
| 
						 | 
					4ded1ceeb0 | ||
| 
						 | 
					9f202782c5 | ||
| 
						 | 
					8bc12e0ce1 | ||
| 
						 | 
					cc2f00f827 | ||
| 
						 | 
					cd61e2e6d6 | ||
| 
						 | 
					323ed1a588 | ||
| 
						 | 
					68c66d2e4b | ||
| 
						 | 
					1671adfd49 | ||
| 
						 | 
					594a262dcc | ||
| 
						 | 
					7f8ca54285 | ||
| 
						 | 
					c5b23c367e | ||
| 
						 | 
					b6fe03eb26 | ||
| 
						 | 
					f37ed4958b | ||
| 
						 | 
					896f3a8002 | ||
| 
						 | 
					5f85473d6b | ||
| 
						 | 
					871649238c | ||
| 
						 | 
					ac3b0ebc58 | ||
| 
						 | 
					7c86d2085b | ||
| 
						 | 
					9292be0b69 | ||
| 
						 | 
					f0fcdf75b5 | ||
| 
						 | 
					53bffb83d4 | ||
| 
						 | 
					10141f90c9 | ||
| 
						 | 
					cd44e851f1 | ||
| 
						 | 
					a414430817 | ||
| 
						 | 
					f20728baa9 | ||
| 
						 | 
					d2e68c4355 | ||
| 
						 | 
					1cb745c8dc | ||
| 
						 | 
					faf4278019 | ||
| 
						 | 
					194e4b94bb | ||
| 
						 | 
					bfc1411c1f | ||
| 
						 | 
					161637e573 | ||
| 
						 | 
					fb24e3a7d2 | ||
| 
						 | 
					655a69259a | ||
| 
						 | 
					4e0cf0cc28 | ||
| 
						 | 
					507c4e9efc | ||
| 
						 | 
					cdf550845f | ||
| 
						 | 
					3db7a5387b | ||
| 
						 | 
					f8a5194c70 | ||
| 
						 | 
					cff3bae155 | ||
| 
						 | 
					90dffc73c8 | ||
| a1151fc734 | |||
| 
						 | 
					ab3baeb38f | ||
| 
						 | 
					389731d373 | ||
| 6e3ce7423e | |||
| 15f15a7cfd | |||
| 0e5f626226 | |||
| 
						 | 
					04f92ccddf | ||
| 
						 | 
					3b2d805398 | ||
| 
						 | 
					97b9c6f03d | ||
| 
						 | 
					63982819c6 | ||
| 
						 | 
					6fec507bef | ||
| 
						 | 
					219b3bd34f | ||
| 
						 | 
					9dc885d297 | ||
| 
						 | 
					a70c1feecc | ||
| 
						 | 
					38328100c9 | ||
| 
						 | 
					9732519c41 | ||
| 
						 | 
					fa4eeb28c4 | ||
| 
						 | 
					b00d2d2c39 | ||
| 
						 | 
					f1b3e21830 | ||
| 
						 | 
					b7f8c5b823 | ||
| 
						 | 
					3923683e9b | ||
| 
						 | 
					e199fda9dc | ||
| 7bb405e790 | |||
| 
						 | 
					10f7a17ae4 | ||
| 
						 | 
					26f14d7dd7 | ||
| ec16eacc6a | |||
| 
						 | 
					cf858deb16 | ||
| 
						 | 
					a3affac963 | ||
| d9d1f43ba2 | |||
| b7cd721308 | |||
| 29f026c375 | |||
| 58c7a13d54 | |||
| 
						 | 
					24162c9ead | ||
| 
						 | 
					73434db636 | ||
| 
						 | 
					e564d11687 | ||
| 
						 | 
					0b2162f375 | ||
| 
						 | 
					5610570182 | ||
| 
						 | 
					44f65526e0 | ||
| 
						 | 
					43e48542ab | ||
| 
						 | 
					0b85f1bfc8 | ||
| 
						 | 
					9947cfbf14 | ||
| 
						 | 
					357badce5e | ||
| 
						 | 
					0091eec23a | ||
| 
						 | 
					9e9c2962df | ||
| 
						 | 
					bda97212a9 | ||
| 
						 | 
					b91282ad46 | ||
| 
						 | 
					0a68470f9a | ||
| 
						 | 
					6ecf280723 | ||
| 
						 | 
					7eeab7f995 | ||
| 
						 | 
					9b32d51cd1 | ||
| 
						 | 
					7b3ed160aa | ||
| 
						 | 
					1a0163f45c | ||
| 
						 | 
					c6411f8514 | ||
| 
						 | 
					9028e278e4 | ||
| dd62f2f371 | |||
| 0d612039ed | |||
| e8ac75055c | |||
| 8b30c5956c | |||
| 185da83454 | |||
| 6718fa8c4f | |||
| 
						 | 
					4ce63af7d5 | ||
| 
						 | 
					6cf635d61c | ||
| 
						 | 
					39558cce52 | ||
| 
						 | 
					935cd1e173 | ||
| 
						 | 
					55e39df30f | ||
| 67c3fa0f5f | |||
| 65d4f17976 | |||
| e2fe97277b | |||
| 
						 | 
					84f9c37ed4 | ||
| bcf6f3890c | |||
| 591a38c487 | |||
| 
						 | 
					581be32ed2 | ||
| 842754bea9 | |||
| 
						 | 
					6bc136b1d0 | ||
| 0887566134 | |||
| 61fc50d616 | |||
| a9c8d7dad0 | |||
| 259d504ef0 | |||
| f3a77f4b7f | |||
| 26d7b829a0 | |||
| 64161a8743 | |||
| 2401360784 | |||
| 
						 | 
					2cfb50cbe5 | ||
| f9aa39e1c4 | |||
| 
						 | 
					df152648d6 | ||
| 0fbf445edd | |||
| e78794688a | |||
| 9e31307963 | |||
| 29e2eddea8 | |||
| 0a038ea15a | |||
| 62eb1f0e59 | |||
| 5422251959 | |||
| 
						 | 
					9579c9c327 | ||
| 
						 | 
					3729c7a7a6 | ||
| 
						 | 
					c24d4c8d0e | ||
| 
						 | 
					a14038051f | ||
| 
						 | 
					3e560b9462 | ||
| 
						 | 
					d93c6760ec | ||
| 
						 | 
					ae3b7713a9 | ||
| cbd8fbe771 | |||
| d391f05cb7 | |||
| 3127b52c90 | |||
| 01f00385a4 | |||
| 59aae5f5ec | |||
| 624246409c | |||
| 2a9ebddad5 | |||
| ff7afe6e17 | |||
| 33cb509d4b | |||
| 456c78c233 | |||
| 2fd4989029 | |||
| 2427a21428 | |||
| 514993ed17 | |||
| 
						 | 
					4e965c168e | ||
| 
						 | 
					f260af546e | ||
| 
						 | 
					28ceacec45 | ||
| 
						 | 
					e6a3e375cf | ||
| 
						 | 
					4987edbd44 | ||
| 
						 | 
					ad140bb6e7 | ||
| 
						 | 
					1f04e56038 | ||
| 
						 | 
					4bfc8c85c3 | ||
| 
						 | 
					e55397bc13 | ||
| 
						 | 
					649b8c9aca | ||
| 
						 | 
					0afa22747d | ||
| a3fe874a5b | |||
| f403ab0133 | |||
| 
						 | 
					94b8fb5686 | ||
| 
						 | 
					1f1d77b01a | ||
| 
						 | 
					6a15e2e8ef | ||
| 074d17429f | |||
| 
						 | 
					fa43206c79 | ||
| 
						 | 
					25f73018f4 | ||
| 
						 | 
					1d7ccc6b2c | ||
| 
						 | 
					a367835bf2 | ||
| 
						 | 
					d7743591ea | ||
| 
						 | 
					c6cbe533ea | ||
| 
						 | 
					8402ab6cf9 | ||
| 
						 | 
					c63095345e | ||
| 
						 | 
					59d9ccf70c | ||
| 
						 | 
					a7ae46b61e | ||
| 
						 | 
					cd63052205 | ||
| 
						 | 
					699d537cd6 | ||
| 
						 | 
					9031f0ed95 | ||
| 
						 | 
					26b3d441bb | ||
| 
						 | 
					99bc4cde56 | ||
| 
						 | 
					e843d83d9d | ||
| 
						 | 
					0f75ea52b7 | ||
| 
						 | 
					8107b785cc | ||
| 
						 | 
					37b777d801 | ||
| 
						 | 
					7382787856 | ||
| 
						 | 
					781c611ca0 | ||
| 
						 | 
					b069090b52 | ||
| 
						 | 
					0c1c1d9900 | ||
| 
						 | 
					7f4ed6c2e5 | ||
| 
						 | 
					56d32a4afb | ||
| 
						 | 
					b8ee496ed6 | ||
| 
						 | 
					1860b1698c | ||
| 
						 | 
					9b8d1cc3da | ||
| 
						 | 
					0c668bf46a | ||
| 
						 | 
					149c3f9e9c | ||
| 
						 | 
					b87416dac4 | ||
| 
						 | 
					176bf37372 | ||
| 
						 | 
					c519aab19d | ||
| 
						 | 
					b3d342ca22 | ||
| 
						 | 
					e1f928398d | ||
| 
						 | 
					69929f20bb | ||
| 
						 | 
					8c579d2d4a | ||
| 
						 | 
					840814c776 | ||
| 
						 | 
					fc7d07ade0 | ||
| 
						 | 
					b3be9195b4 | ||
| 
						 | 
					9e3c187a4d | ||
| 
						 | 
					8363edfcdb | ||
| 
						 | 
					74af31564f | ||
| 
						 | 
					e0819d395f | ||
| 
						 | 
					a493429218 | ||
| 
						 | 
					915f610da0 | ||
| 
						 | 
					c79606a5dc | ||
| 
						 | 
					95af55128e | ||
| 
						 | 
					9f2a57e334 | ||
| 
						 | 
					c645d33db5 | ||
| 
						 | 
					e0f1349524 | ||
| 
						 | 
					360efd0088 | ||
| 
						 | 
					7b42ac9982 | ||
| 
						 | 
					c5c647e35e | ||
| a4e5fd1000 | |||
| 682e7d7839 | |||
| 
						 | 
					8e057721a9 | ||
| 
						 | 
					fa5e4add47 | ||
| 
						 | 
					6f81906b00 | ||
| 
						 | 
					79b761f923 | ||
| 
						 | 
					0d4e31ca58 | ||
| 
						 | 
					a2d83d4f3d | ||
| 
						 | 
					89bacb0470 | ||
| 
						 | 
					b07a354a33 | ||
| 
						 | 
					19010ff66a | ||
| 
						 | 
					27ea2afe86 | ||
| 
						 | 
					78e8704eac | ||
| 
						 | 
					67131d82f2 | ||
| 
						 | 
					615a9448b9 | ||
| 
						 | 
					00164f5ce5 | ||
| 
						 | 
					a7f72eb994 | ||
| 
						 | 
					501fa1614a | ||
| 
						 | 
					5bf42e1e15 | ||
| 
						 | 
					fe4d9b003c | ||
| 
						 | 
					4a699b4da3 | ||
| 
						 | 
					689323f4ee | ||
| 
						 | 
					749189fd72 | ||
| 
						 | 
					f941c4ee18 | ||
| 
						 | 
					84b441800f | ||
| 
						 | 
					1ef424b139 | ||
| 
						 | 
					5a477ed29e | ||
| 
						 | 
					54128d579a | ||
| 
						 | 
					e7b1933e88 | ||
| 
						 | 
					1bad64ac6a | ||
| 
						 | 
					15dfa9f663 | ||
| 
						 | 
					2185b0d651 | ||
| 
						 | 
					f61c0b5d03 | ||
| 
						 | 
					074db32e54 | ||
| 
						 | 
					aa66f41c69 | ||
| 
						 | 
					f96c800d25 | ||
| 
						 | 
					32a52d7583 | ||
| 
						 | 
					fa04b6d3c2 | ||
| 
						 | 
					7fab183c0e | ||
| 
						 | 
					9ec9850bdb | ||
| 
						 | 
					0c4ddaea0b | ||
| 
						 | 
					00ebc150ad | ||
| 
						 | 
					0f3e9ae57d | ||
| 
						 | 
					034de160bf | ||
| 
						 | 
					76bcf6cd8c | ||
| 
						 | 
					91b8bf0613 | ||
| 
						 | 
					14507fd6e4 | ||
| 
						 | 
					2db05ac214 | ||
| 
						 | 
					31f99574fa | ||
| 
						 | 
					a34c8a2961 | ||
| 
						 | 
					ccd20df827 | ||
| 
						 | 
					e9be293444 | ||
| 
						 | 
					d577211cc3 | ||
| 
						 | 
					f4336e480a | ||
| 
						 | 
					e4d461cb03 | ||
| 
						 | 
					3d63b4894e | ||
| 
						 | 
					08583afaff | ||
| 
						 | 
					b395a312af | ||
| 
						 | 
					66295b99aa | ||
| 
						 | 
					b8654be0ef | ||
| 
						 | 
					a479325349 | ||
| 
						 | 
					f6c3f6bf2d | ||
| 
						 | 
					d83868fdbb | ||
| 
						 | 
					303e0b927d | ||
| 
						 | 
					28ba8a0f48 | ||
| 
						 | 
					f9e28577f3 | ||
| 
						 | 
					e0cae833da | ||
| 
						 | 
					8a3aae98f6 | ||
| 
						 | 
					8309f2364b | ||
| 
						 | 
					d5f661ba70 | ||
| 
						 | 
					cac1750078 | ||
| 
						 | 
					e17cd35151 | ||
| 
						 | 
					ccdec7a7ab | ||
| 
						 | 
					93642d813d | ||
| 
						 | 
					1ab8d5cc13 | ||
| 
						 | 
					789e892865 | ||
| 
						 | 
					53cfa44d7a | ||
| 
						 | 
					0bc381f982 | ||
| 
						 | 
					2986aa76f8 | ||
| 
						 | 
					657779374b | ||
| 
						 | 
					ec8cd11c1f | ||
| 
						 | 
					cbda4f66e0 | ||
| 
						 | 
					6579dd30ff | ||
| 
						 | 
					031c94e02e | ||
| 
						 | 
					6391b2a1d0 | ||
| 
						 | 
					2e50b55ae4 | ||
| 
						 | 
					c433939795 | ||
| 
						 | 
					b6a4c31b48 | ||
| 
						 | 
					98b1439ff9 | ||
| 
						 | 
					27936900e6 | ||
| 
						 | 
					564738b1ff | ||
| 
						 | 
					cd3e810d25 | ||
| 
						 | 
					317ddfedee | ||
| 
						 | 
					e325929851 | ||
| 
						 | 
					47af3565f4 | ||
| 
						 | 
					4b4d187935 | ||
| 
						 | 
					9aff354ab5 | ||
| 
						 | 
					cb9ff20249 | ||
| 
						 | 
					a80e43dbcf | ||
| 
						 | 
					9fe6ac71ea | ||
| 5c392a6ecc | |||
| 
						 | 
					f1fa00b71b | ||
| 
						 | 
					bf58557fb1 | ||
| 
						 | 
					10cb37f504 | ||
| 
						 | 
					1374c943d4 | ||
| 
						 | 
					a1d80282ec | ||
| 
						 | 
					4eb8bbbebe | ||
| 
						 | 
					d1c6288c5f | ||
| 
						 | 
					dd949bc428 | ||
| 
						 | 
					bb7378cfc3 | ||
| 
						 | 
					f0e084a88c | ||
| 
						 | 
					153672d8ec | ||
| 
						 | 
					08ca338875 | ||
| 
						 | 
					f7cbf82c04 | ||
| 
						 | 
					07009c569a | ||
| 
						 | 
					15d690e9b9 | ||
| 63b2bc1936 | |||
| 
						 | 
					d810e8c8fb | ||
| 
						 | 
					09f4cdb11e | ||
| 
						 | 
					1e54882f71 | ||
| 
						 | 
					27caff92c6 | ||
| d38cee73bf | |||
| 8784f2a88d | |||
| c497864b5d | |||
| 05c1c88440 | |||
| 
						 | 
					d54807b8c0 | ||
| 
						 | 
					f6ba2b95ce | ||
| 
						 | 
					5625b47c7d | ||
| 
						 | 
					1edcf902b7 | ||
| 
						 | 
					e5c19e1fd7 | ||
| 
						 | 
					a11d0a33d1 | ||
| 
						 | 
					4f8b6f26b4 | ||
| 
						 | 
					073525c5b3 | ||
| 
						 | 
					eb6153080a | ||
| 
						 | 
					f7072d1ac2 | ||
| a021933002 | |||
| 
						 | 
					b99622d9fb | ||
| 937c77ead2 | |||
| 95e5a2ade3 | |||
| 
						 | 
					56478d63a5 | ||
| df21668f2c | |||
| 
						 | 
					482368e9de | ||
| 
						 | 
					fddeb29d6b | ||
| 
						 | 
					a9ec5cf564 | ||
| 
						 | 
					946a8671b9 | ||
| 
						 | 
					a6eeea777b | ||
| 
						 | 
					771a1b8e79 | ||
| 
						 | 
					bfb68e6f02 | ||
| 
						 | 
					77f7737ccc | ||
| 
						 | 
					9a827d0242 | ||
| 
						 | 
					999c623590 | ||
| 
						 | 
					18c335198a | ||
| 
						 | 
					f9df685cde | ||
| 
						 | 
					17c5b0f152 | ||
| 
						 | 
					5918769f97 | ||
| 
						 | 
					b542d349b8 | ||
| 
						 | 
					91eaace19d | ||
| 
						 | 
					bbaf1ada91 | ||
| 
						 | 
					1950ac9294 | ||
| 
						 | 
					13fa70ac1a | ||
| 
						 | 
					7cb2b11f26 | ||
| 
						 | 
					1184ed29ae | ||
| 
						 | 
					203c7bf6fa | ||
| 
						 | 
					c709883f3f | ||
| 
						 | 
					aed5de4d50 | ||
| 
						 | 
					ba27cc6571 | ||
| 
						 | 
					d856327250 | ||
| 
						 | 
					d75369cb56 | ||
| 
						 | 
					bf973d0d56 | ||
| 
						 | 
					837bf8a5be | ||
| 
						 | 
					c05b2199f6 | ||
| 
						 | 
					a5fe07c077 | ||
| 
						 | 
					b83b2b1415 | ||
| 
						 | 
					91676d1dda | ||
| 
						 | 
					b331be9101 | ||
| 
						 | 
					49c20a9fa8 | ||
| 
						 | 
					7359df3501 | ||
| 
						 | 
					59bd1fe21b | ||
| a56e3b40c4 | |||
| 
						 | 
					4e907fef2c | ||
| 
						 | 
					67888b657f | ||
| 
						 | 
					74af885d4e | ||
| 
						 | 
					ac3611bb19 | ||
| 
						 | 
					d36d2fb40d | ||
| 
						 | 
					5b9267e88d | ||
| 
						 | 
					15fd4003ef | ||
| 
						 | 
					4b4c2a715b | ||
| 
						 | 
					54a5e6c1d0 | ||
| 
						 | 
					73aeca7dea | ||
| 
						 | 
					f365a83fae | ||
| 
						 | 
					34a9aeb331 | ||
| 5846566728 | |||
| 
						 | 
					cc4afb978d | ||
| 21b02760c3 | |||
| 
						 | 
					2bcb704af2 | ||
| 
						 | 
					edabb3577f | ||
| 
						 | 
					ce5df177ee | ||
| 
						 | 
					a0bb8e5b46 | ||
| 
						 | 
					46f88e6d72 | ||
| 
						 | 
					dd8f1ea189 | ||
| 
						 | 
					b61835c1a5 | ||
| 
						 | 
					459f70e8d4 | ||
| 
						 | 
					061e48fd73 | ||
| 
						 | 
					ab50145001 | ||
| 
						 | 
					9d45fca8bc | ||
| 
						 | 
					ac9e6b63c0 | ||
| 
						 | 
					e140b3f802 | ||
| 
						 | 
					d9d3d30cc7 | ||
| 
						 | 
					47a12ec7b5 | ||
| 
						 | 
					ec1e2f7a40 | ||
| 
						 | 
					41f73ec083 | ||
| 
						 | 
					6d0786ff9d | ||
| 
						 | 
					b7f93aeb4d | ||
| 
						 | 
					202a7fe900 | ||
| 
						 | 
					8d168ded4a | ||
| 
						 | 
					75ee6cfc86 | ||
| 
						 | 
					fde71c3c52 | ||
| 
						 | 
					7d867a8134 | ||
| 
						 | 
					9939b267d2 | ||
| 
						 | 
					323e9c439a | ||
| 
						 | 
					28396f1048 | ||
| 
						 | 
					67b34e5789 | ||
| 
						 | 
					8f4b3049cd | ||
| 
						 | 
					2a6e673a91 | ||
| 
						 | 
					9b6cde173f | ||
| 
						 | 
					9f280b82c4 | ||
| 
						 | 
					7a53dc3715 | ||
| 
						 | 
					875e1a841f | ||
| 
						 | 
					0366288b1c | ||
| 
						 | 
					6293d438cd | ||
| 
						 | 
					852ade029a | ||
| 
						 | 
					9fa07eecde | ||
| 
						 | 
					f64fb7bd77 | ||
| 
						 | 
					2a35449b91 | ||
| 
						 | 
					184af5bd05 | ||
| 
						 | 
					097c9637ee | ||
| 
						 | 
					d9593c4b81 | ||
| 
						 | 
					ac740f73ce | ||
| 
						 | 
					75dc7794b9 | ||
| 
						 | 
					dee68fc728 | ||
| 
						 | 
					a2d3643634 | ||
| 
						 | 
					57002924bc | ||
| 
						 | 
					7d2d5e8d3d | ||
| 
						 | 
					4a29ab0d0a | ||
| 
						 | 
					0165bcb58e | ||
| 
						 | 
					deca1ecc50 | ||
| 
						 | 
					349d75e483 | ||
| 
						 | 
					e51475703a | ||
| 
						 | 
					1feddf4ba6 | ||
| 
						 | 
					600d7ddc2e | ||
| 
						 | 
					e504260f3d | ||
| 
						 | 
					08b0e472aa | ||
| 
						 | 
					c11d69787e | ||
| 
						 | 
					dc6b2d30d2 | ||
| 
						 | 
					7a3bd5c66c | ||
| 
						 | 
					18211eb5b1 | ||
| 
						 | 
					863bb2ad10 | ||
| 
						 | 
					5e4bea8f20 | ||
| 
						 | 
					6ebf9f15b7 | ||
| 
						 | 
					1d7aa673a4 | ||
| 
						 | 
					b9104f3072 | ||
| b672717096 | |||
| 284ee194b1 | |||
| 
						 | 
					1bd311ba9c | ||
| 
						 | 
					41af8c12d7 | ||
| 
						 | 
					20e92a7009 | ||
| 
						 | 
					5633a2db20 | ||
| 
						 | 
					2d433ba307 | ||
| 
						 | 
					42f0afcbfa | ||
| 
						 | 
					20ac13fdf3 | ||
| 
						 | 
					e38612e6fa | ||
| 
						 | 
					c2b2b71c5d | ||
| 
						 | 
					009f48a904 | ||
| 
						 | 
					b8e45ae490 | ||
| 
						 | 
					b35fc4e7f9 | ||
| 
						 | 
					60f11bfd72 | ||
| 
						 | 
					8d442b502d | ||
| 
						 | 
					e5c8b7369e | ||
| 
						 | 
					c504b4dbad | ||
| 
						 | 
					622a21bec6 | ||
| 
						 | 
					eec79e0a1e | ||
| 
						 | 
					23135aa58a | ||
| 
						 | 
					08b314fd0f | ||
| 
						 | 
					34332fe393 | ||
| 
						 | 
					c2010f21ab | ||
| 
						 | 
					98f610ce53 | ||
| 
						 | 
					d44cc204d1 | ||
| 
						 | 
					5cfc0180aa | ||
| 
						 | 
					914f180fa3 | ||
| 
						 | 
					6cb563a40c | ||
| 
						 | 
					77e0af9c2e | ||
| 
						 | 
					ca1077c560 | ||
| 
						 | 
					db3837be22 | ||
| 
						 | 
					2f0dd83016 | ||
| 
						 | 
					62a64d9108 | ||
| 
						 | 
					49331a3e72 | ||
| 
						 | 
					51d84ec057 | ||
| 
						 | 
					db14fb30df | ||
| 
						 | 
					b9356d3866 | ||
| 
						 | 
					99a73f4287 | ||
| 
						 | 
					f302eea91e | ||
| 
						 | 
					5553b8d2b8 | ||
| 
						 | 
					a6ccbbe108 | ||
| 
						 | 
					3ac27e5596 | ||
| 
						 | 
					d2003f24f4 | ||
| 
						 | 
					6299dd35f5 | ||
| 
						 | 
					a39daecb62 | ||
| 
						 | 
					159770e21b | ||
| 
						 | 
					dc5a6404ea | ||
| 
						 | 
					44260643f6 | ||
| 
						 | 
					1425afc72f | ||
| 
						 | 
					bd466a55a8 | ||
| 
						 | 
					752048f410 | ||
| 
						 | 
					b694996302 | ||
| 
						 | 
					c8e6f58e24 | ||
| 
						 | 
					888988ad37 | ||
| 
						 | 
					c382c351a5 | ||
| 
						 | 
					af2d6ce2e0 | ||
| 
						 | 
					ac1253bb76 | ||
| 
						 | 
					e4a105a30b | ||
| 
						 | 
					26ebe41fef | ||
| 
						 | 
					363611ae21 | ||
| 
						 | 
					3b8a791e28 | ||
| 
						 | 
					6fd82228bf | ||
| 
						 | 
					ca6efc685e | ||
| 1e496fee74 | |||
| 
						 | 
					b8ae787b5e | ||
| 
						 | 
					fbe2c3b5f9 | ||
| 
						 | 
					1ed69816b9 | ||
| 
						 | 
					9f755e0379 | ||
| 
						 | 
					4512dbdf58 | ||
| 
						 | 
					483fd3cfa1 | ||
| 
						 | 
					3750b9ffee | ||
| 
						 | 
					5e549ebd8b | ||
| 
						 | 
					fff484eca5 | ||
| 
						 | 
					5fdc05782b | ||
| 
						 | 
					85516e9c7c | ||
| 
						 | 
					0c006fbfaa | ||
| 
						 | 
					54c10a42cc | ||
| 
						 | 
					a04eb7df5d | ||
| 
						 | 
					ef0fe2bcc1 | ||
| 
						 | 
					0cd6b1858c | 
							
								
								
									
										21
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										21
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							@@ -83,6 +83,7 @@ ltmain.sh
 | 
			
		||||
.Trashes
 | 
			
		||||
ehthumbs.db
 | 
			
		||||
Thumbs.db
 | 
			
		||||
.dirstamp
 | 
			
		||||
 | 
			
		||||
# build directory #
 | 
			
		||||
###################
 | 
			
		||||
@@ -93,14 +94,12 @@ build*/*
 | 
			
		||||
*.xcodeproj/*
 | 
			
		||||
build.sh
 | 
			
		||||
.vscode
 | 
			
		||||
*.code-workspace
 | 
			
		||||
 | 
			
		||||
# Eigen source #
 | 
			
		||||
################
 | 
			
		||||
lib/Eigen/*
 | 
			
		||||
 | 
			
		||||
# FFTW source #
 | 
			
		||||
################
 | 
			
		||||
lib/fftw/*
 | 
			
		||||
Grid/Eigen
 | 
			
		||||
Eigen/*
 | 
			
		||||
 | 
			
		||||
# libtool macros #
 | 
			
		||||
##################
 | 
			
		||||
@@ -111,15 +110,7 @@ m4/libtool.m4
 | 
			
		||||
################
 | 
			
		||||
gh-pages/
 | 
			
		||||
 | 
			
		||||
# Buck files #
 | 
			
		||||
##############
 | 
			
		||||
.buck*
 | 
			
		||||
buck-out
 | 
			
		||||
BUCK
 | 
			
		||||
make-bin-BUCK.sh
 | 
			
		||||
 | 
			
		||||
# generated sources #
 | 
			
		||||
#####################
 | 
			
		||||
lib/qcd/spin/gamma-gen/*.h
 | 
			
		||||
lib/qcd/spin/gamma-gen/*.cc
 | 
			
		||||
 | 
			
		||||
Grid/qcd/spin/gamma-gen/*.h
 | 
			
		||||
Grid/qcd/spin/gamma-gen/*.cc
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										25
									
								
								.travis.yml
									
									
									
									
									
								
							
							
						
						
									
										25
									
								
								.travis.yml
									
									
									
									
									
								
							@@ -9,6 +9,11 @@ matrix:
 | 
			
		||||
    - os:        osx
 | 
			
		||||
      osx_image: xcode8.3
 | 
			
		||||
      compiler: clang
 | 
			
		||||
      env: PREC=single
 | 
			
		||||
    - os:        osx
 | 
			
		||||
      osx_image: xcode8.3
 | 
			
		||||
      compiler: clang
 | 
			
		||||
      env: PREC=double
 | 
			
		||||
      
 | 
			
		||||
before_install:
 | 
			
		||||
    - export GRIDDIR=`pwd`
 | 
			
		||||
@@ -16,9 +21,11 @@ before_install:
 | 
			
		||||
    - if [[ "$TRAVIS_OS_NAME" == "linux" ]] && [[ "$CC" == "clang" ]]; then export PATH="${GRIDDIR}/clang/bin:${PATH}"; fi
 | 
			
		||||
    - if [[ "$TRAVIS_OS_NAME" == "linux" ]] && [[ "$CC" == "clang" ]]; then export LD_LIBRARY_PATH="${GRIDDIR}/clang/lib:${LD_LIBRARY_PATH}"; fi
 | 
			
		||||
    - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew update; fi
 | 
			
		||||
    - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew install libmpc; fi
 | 
			
		||||
    - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew install libmpc openssl; fi
 | 
			
		||||
    
 | 
			
		||||
install:
 | 
			
		||||
    - export CWD=`pwd`
 | 
			
		||||
    - echo $CWD
 | 
			
		||||
    - export CC=$CC$VERSION
 | 
			
		||||
    - export CXX=$CXX$VERSION
 | 
			
		||||
    - echo $PATH
 | 
			
		||||
@@ -31,16 +38,24 @@ install:
 | 
			
		||||
    - which $CXX
 | 
			
		||||
    - $CXX --version
 | 
			
		||||
    - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then export LDFLAGS='-L/usr/local/lib'; fi
 | 
			
		||||
    - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then export EXTRACONF='--with-openssl=/usr/local/opt/openssl'; fi
 | 
			
		||||
    
 | 
			
		||||
script:
 | 
			
		||||
    - ./bootstrap.sh
 | 
			
		||||
    - mkdir build
 | 
			
		||||
    - cd build
 | 
			
		||||
    - ../configure --enable-precision=single --enable-simd=SSE4 --enable-comms=none
 | 
			
		||||
    - mkdir lime
 | 
			
		||||
    - cd lime
 | 
			
		||||
    - mkdir build
 | 
			
		||||
    - cd build
 | 
			
		||||
    - wget http://usqcd-software.github.io/downloads/c-lime/lime-1.3.2.tar.gz
 | 
			
		||||
    - tar xf lime-1.3.2.tar.gz
 | 
			
		||||
    - cd lime-1.3.2
 | 
			
		||||
    - ./configure --prefix=$CWD/build/lime/install
 | 
			
		||||
    - make -j4
 | 
			
		||||
    - ./benchmarks/Benchmark_dwf --threads 1 --debug-signals
 | 
			
		||||
    - echo make clean
 | 
			
		||||
    - ../configure --enable-precision=double --enable-simd=SSE4 --enable-comms=none
 | 
			
		||||
    - make install
 | 
			
		||||
    - cd $CWD/build
 | 
			
		||||
    - ../configure --enable-precision=$PREC --enable-simd=SSE4 --enable-comms=none --with-lime=$CWD/build/lime/install ${EXTRACONF}
 | 
			
		||||
    - make -j4 
 | 
			
		||||
    - ./benchmarks/Benchmark_dwf --threads 1 --debug-signals
 | 
			
		||||
    - make check
 | 
			
		||||
 
 | 
			
		||||
@@ -48,6 +48,7 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
#include <Grid/serialisation/Serialisation.h>
 | 
			
		||||
#include <Grid/threads/Threads.h>
 | 
			
		||||
#include <Grid/util/Util.h>
 | 
			
		||||
#include <Grid/util/Sha.h>
 | 
			
		||||
#include <Grid/communicator/Communicator.h> 
 | 
			
		||||
#include <Grid/cartesian/Cartesian.h>    
 | 
			
		||||
#include <Grid/tensors/Tensors.h>      
 | 
			
		||||
@@ -1,4 +1,9 @@
 | 
			
		||||
#pragma once
 | 
			
		||||
// Force Eigen to use MKL if Grid has been configured with --enable-mkl
 | 
			
		||||
#ifdef USE_MKL
 | 
			
		||||
#define EIGEN_USE_MKL_ALL
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#if defined __GNUC__
 | 
			
		||||
#pragma GCC diagnostic push
 | 
			
		||||
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
 | 
			
		||||
							
								
								
									
										63
									
								
								Grid/Makefile.am
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										63
									
								
								Grid/Makefile.am
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,63 @@
 | 
			
		||||
extra_sources=
 | 
			
		||||
extra_headers=
 | 
			
		||||
 | 
			
		||||
if BUILD_COMMS_MPI3
 | 
			
		||||
  extra_sources+=communicator/Communicator_mpi3.cc
 | 
			
		||||
  extra_sources+=communicator/Communicator_base.cc
 | 
			
		||||
  extra_sources+=communicator/SharedMemoryMPI.cc
 | 
			
		||||
  extra_sources+=communicator/SharedMemory.cc
 | 
			
		||||
endif
 | 
			
		||||
 | 
			
		||||
if BUILD_COMMS_NONE
 | 
			
		||||
  extra_sources+=communicator/Communicator_none.cc
 | 
			
		||||
  extra_sources+=communicator/Communicator_base.cc
 | 
			
		||||
  extra_sources+=communicator/SharedMemoryNone.cc
 | 
			
		||||
  extra_sources+=communicator/SharedMemory.cc
 | 
			
		||||
endif
 | 
			
		||||
 | 
			
		||||
if BUILD_HDF5
 | 
			
		||||
  extra_sources+=serialisation/Hdf5IO.cc 
 | 
			
		||||
  extra_headers+=serialisation/Hdf5IO.h
 | 
			
		||||
  extra_headers+=serialisation/Hdf5Type.h
 | 
			
		||||
endif
 | 
			
		||||
 | 
			
		||||
all: version-cache
 | 
			
		||||
 | 
			
		||||
version-cache:
 | 
			
		||||
	@if [ `git status --porcelain | grep -v '??' | wc -l` -gt 0 ]; then\
 | 
			
		||||
		a="uncommited changes";\
 | 
			
		||||
	else\
 | 
			
		||||
		a="clean";\
 | 
			
		||||
	fi;\
 | 
			
		||||
	echo "`git log -n 1 --format=format:"#define GITHASH \\"%H:%d $$a\\"%n" HEAD`" > vertmp;\
 | 
			
		||||
	if [ -e version-cache ]; then\
 | 
			
		||||
		d=`diff vertmp version-cache`;\
 | 
			
		||||
		if [ "$${d}" != "" ]; then\
 | 
			
		||||
			mv vertmp version-cache;\
 | 
			
		||||
			rm -f Version.h;\
 | 
			
		||||
		fi;\
 | 
			
		||||
	else\
 | 
			
		||||
		mv vertmp version-cache;\
 | 
			
		||||
		rm -f Version.h;\
 | 
			
		||||
	fi;\
 | 
			
		||||
	rm -f vertmp
 | 
			
		||||
 | 
			
		||||
Version.h:
 | 
			
		||||
	cp version-cache Version.h
 | 
			
		||||
 | 
			
		||||
.PHONY: version-cache
 | 
			
		||||
 | 
			
		||||
#
 | 
			
		||||
# Libraries
 | 
			
		||||
#
 | 
			
		||||
include Make.inc
 | 
			
		||||
include Eigen.inc
 | 
			
		||||
 | 
			
		||||
lib_LIBRARIES = libGrid.a
 | 
			
		||||
 | 
			
		||||
CCFILES += $(extra_sources)
 | 
			
		||||
HFILES  += $(extra_headers) Config.h Version.h
 | 
			
		||||
 | 
			
		||||
libGrid_a_SOURCES              = $(CCFILES)
 | 
			
		||||
libGrid_adir                   = $(includedir)/Grid
 | 
			
		||||
nobase_dist_pkginclude_HEADERS = $(HFILES) $(eigen_files) $(eigen_unsupp_files)
 | 
			
		||||
@@ -37,37 +37,31 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
#include <Grid/algorithms/approx/Chebyshev.h>
 | 
			
		||||
#include <Grid/algorithms/approx/Remez.h>
 | 
			
		||||
#include <Grid/algorithms/approx/MultiShiftFunction.h>
 | 
			
		||||
#include <Grid/algorithms/approx/Forecast.h>
 | 
			
		||||
 | 
			
		||||
#include <Grid/algorithms/iterative/Deflation.h>
 | 
			
		||||
#include <Grid/algorithms/iterative/ConjugateGradient.h>
 | 
			
		||||
#include <Grid/algorithms/iterative/ConjugateResidual.h>
 | 
			
		||||
#include <Grid/algorithms/iterative/NormalEquations.h>
 | 
			
		||||
#include <Grid/algorithms/iterative/SchurRedBlack.h>
 | 
			
		||||
#include <Grid/algorithms/iterative/ConjugateGradientMultiShift.h>
 | 
			
		||||
#include <Grid/algorithms/iterative/ConjugateGradientMixedPrec.h>
 | 
			
		||||
 | 
			
		||||
// Lanczos support
 | 
			
		||||
//#include <Grid/algorithms/iterative/MatrixUtils.h>
 | 
			
		||||
#include <Grid/algorithms/iterative/BlockConjugateGradient.h>
 | 
			
		||||
#include <Grid/algorithms/iterative/ConjugateGradientReliableUpdate.h>
 | 
			
		||||
#include <Grid/algorithms/iterative/MinimalResidual.h>
 | 
			
		||||
#include <Grid/algorithms/iterative/GeneralisedMinimalResidual.h>
 | 
			
		||||
#include <Grid/algorithms/iterative/CommunicationAvoidingGeneralisedMinimalResidual.h>
 | 
			
		||||
#include <Grid/algorithms/iterative/FlexibleGeneralisedMinimalResidual.h>
 | 
			
		||||
#include <Grid/algorithms/iterative/FlexibleCommunicationAvoidingGeneralisedMinimalResidual.h>
 | 
			
		||||
#include <Grid/algorithms/iterative/MixedPrecisionFlexibleGeneralisedMinimalResidual.h>
 | 
			
		||||
#include <Grid/algorithms/iterative/ImplicitlyRestartedLanczos.h>
 | 
			
		||||
#include <Grid/algorithms/CoarsenedMatrix.h>
 | 
			
		||||
#include <Grid/algorithms/FFT.h>
 | 
			
		||||
 | 
			
		||||
// Eigen/lanczos
 | 
			
		||||
// EigCg
 | 
			
		||||
// MCR
 | 
			
		||||
// Pcg
 | 
			
		||||
// Multishift CG
 | 
			
		||||
// Hdcg
 | 
			
		||||
// GCR
 | 
			
		||||
// etc..
 | 
			
		||||
 | 
			
		||||
// integrator/Leapfrog
 | 
			
		||||
// integrator/Omelyan
 | 
			
		||||
// integrator/ForceGradient
 | 
			
		||||
 | 
			
		||||
// montecarlo/hmc
 | 
			
		||||
// montecarlo/rhmc
 | 
			
		||||
// montecarlo/metropolis
 | 
			
		||||
// etc...
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
@@ -103,29 +103,32 @@ namespace Grid {
 | 
			
		||||
    GridBase *CoarseGrid;
 | 
			
		||||
    GridBase *FineGrid;
 | 
			
		||||
    std::vector<Lattice<Fobj> > subspace;
 | 
			
		||||
    int checkerboard;
 | 
			
		||||
 | 
			
		||||
    Aggregation(GridBase *_CoarseGrid,GridBase *_FineGrid) : 
 | 
			
		||||
  Aggregation(GridBase *_CoarseGrid,GridBase *_FineGrid,int _checkerboard) : 
 | 
			
		||||
    CoarseGrid(_CoarseGrid),
 | 
			
		||||
      FineGrid(_FineGrid),
 | 
			
		||||
      subspace(nbasis,_FineGrid)
 | 
			
		||||
      subspace(nbasis,_FineGrid),
 | 
			
		||||
      checkerboard(_checkerboard)
 | 
			
		||||
	{
 | 
			
		||||
	};
 | 
			
		||||
  
 | 
			
		||||
    void Orthogonalise(void){
 | 
			
		||||
      CoarseScalar InnerProd(CoarseGrid); 
 | 
			
		||||
      std::cout << GridLogMessage <<" Gramm-Schmidt pass 1"<<std::endl;
 | 
			
		||||
      blockOrthogonalise(InnerProd,subspace);
 | 
			
		||||
      std::cout << GridLogMessage <<" Gramm-Schmidt pass 2"<<std::endl;
 | 
			
		||||
      blockOrthogonalise(InnerProd,subspace);
 | 
			
		||||
      //      std::cout << GridLogMessage <<" Gramm-Schmidt checking orthogonality"<<std::endl;
 | 
			
		||||
      //      CheckOrthogonal();
 | 
			
		||||
    } 
 | 
			
		||||
    void CheckOrthogonal(void){
 | 
			
		||||
      CoarseVector iProj(CoarseGrid); 
 | 
			
		||||
      CoarseVector eProj(CoarseGrid); 
 | 
			
		||||
      Lattice<CComplex> pokey(CoarseGrid);
 | 
			
		||||
 | 
			
		||||
      
 | 
			
		||||
      for(int i=0;i<nbasis;i++){
 | 
			
		||||
	blockProject(iProj,subspace[i],subspace);
 | 
			
		||||
 | 
			
		||||
	eProj=zero; 
 | 
			
		||||
	for(int ss=0;ss<CoarseGrid->oSites();ss++){
 | 
			
		||||
	parallel_for(int ss=0;ss<CoarseGrid->oSites();ss++){
 | 
			
		||||
	  eProj._odata[ss](i)=CComplex(1.0);
 | 
			
		||||
	}
 | 
			
		||||
	eProj=eProj - iProj;
 | 
			
		||||
@@ -137,6 +140,7 @@ namespace Grid {
 | 
			
		||||
      blockProject(CoarseVec,FineVec,subspace);
 | 
			
		||||
    }
 | 
			
		||||
    void PromoteFromSubspace(const CoarseVector &CoarseVec,FineField &FineVec){
 | 
			
		||||
      FineVec.checkerboard = subspace[0].checkerboard;
 | 
			
		||||
      blockPromote(CoarseVec,FineVec,subspace);
 | 
			
		||||
    }
 | 
			
		||||
    void CreateSubspaceRandom(GridParallelRNG &RNG){
 | 
			
		||||
@@ -147,6 +151,7 @@ namespace Grid {
 | 
			
		||||
      Orthogonalise();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /*
 | 
			
		||||
    virtual void CreateSubspaceLanczos(GridParallelRNG  &RNG,LinearOperatorBase<FineField> &hermop,int nn=nbasis) 
 | 
			
		||||
    {
 | 
			
		||||
      // Run a Lanczos with sloppy convergence
 | 
			
		||||
@@ -195,7 +200,7 @@ namespace Grid {
 | 
			
		||||
	  std::cout << GridLogMessage <<"subspace["<<b<<"] = "<<norm2(subspace[b])<<std::endl;
 | 
			
		||||
	}
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    */
 | 
			
		||||
    virtual void CreateSubspace(GridParallelRNG  &RNG,LinearOperatorBase<FineField> &hermop,int nn=nbasis) {
 | 
			
		||||
 | 
			
		||||
      RealD scale;
 | 
			
		||||
@@ -206,6 +211,7 @@ namespace Grid {
 | 
			
		||||
 | 
			
		||||
      for(int b=0;b<nn;b++){
 | 
			
		||||
	
 | 
			
		||||
	subspace[b] = zero;
 | 
			
		||||
	gaussian(RNG,noise);
 | 
			
		||||
	scale = std::pow(norm2(noise),-0.5); 
 | 
			
		||||
	noise=noise*scale;
 | 
			
		||||
@@ -291,12 +297,57 @@ namespace Grid {
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    RealD Mdag (const CoarseVector &in, CoarseVector &out){
 | 
			
		||||
      return M(in,out);
 | 
			
		||||
      // // corresponds to Petrov-Galerkin coarsening
 | 
			
		||||
      // return M(in,out);
 | 
			
		||||
 | 
			
		||||
      // corresponds to Galerkin coarsening
 | 
			
		||||
      CoarseVector tmp(Grid());
 | 
			
		||||
      G5C(tmp, in);
 | 
			
		||||
      M(tmp, out);
 | 
			
		||||
      G5C(out, out);
 | 
			
		||||
      return norm2(out);
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    // Defer support for further coarsening for now
 | 
			
		||||
    void Mdiag    (const CoarseVector &in,  CoarseVector &out){};
 | 
			
		||||
    void Mdir     (const CoarseVector &in,  CoarseVector &out,int dir, int disp){};
 | 
			
		||||
    void Mdir(const CoarseVector &in, CoarseVector &out, int dir, int disp){
 | 
			
		||||
 | 
			
		||||
      conformable(_grid,in._grid);
 | 
			
		||||
      conformable(in._grid,out._grid);
 | 
			
		||||
 | 
			
		||||
      SimpleCompressor<siteVector> compressor;
 | 
			
		||||
      Stencil.HaloExchange(in,compressor);
 | 
			
		||||
 | 
			
		||||
      auto point = [dir, disp](){
 | 
			
		||||
        if(dir == 0 and disp == 0)
 | 
			
		||||
          return 8;
 | 
			
		||||
        else
 | 
			
		||||
          return (4 * dir + 1 - disp) / 2;
 | 
			
		||||
      }();
 | 
			
		||||
 | 
			
		||||
      parallel_for(int ss=0;ss<Grid()->oSites();ss++){
 | 
			
		||||
        siteVector res = zero;
 | 
			
		||||
        siteVector nbr;
 | 
			
		||||
        int ptype;
 | 
			
		||||
        StencilEntry *SE;
 | 
			
		||||
 | 
			
		||||
        SE=Stencil.GetEntry(ptype,point,ss);
 | 
			
		||||
 | 
			
		||||
        if(SE->_is_local&&SE->_permute) {
 | 
			
		||||
          permute(nbr,in._odata[SE->_offset],ptype);
 | 
			
		||||
        } else if(SE->_is_local) {
 | 
			
		||||
          nbr = in._odata[SE->_offset];
 | 
			
		||||
        } else {
 | 
			
		||||
          nbr = Stencil.CommBuf()[SE->_offset];
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        res = res + A[point]._odata[ss]*nbr;
 | 
			
		||||
 | 
			
		||||
        vstream(out._odata[ss],res);
 | 
			
		||||
      }
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    void Mdiag(const CoarseVector &in, CoarseVector &out){
 | 
			
		||||
      Mdir(in, out, 0, 0); // use the self coupling (= last) point of the stencil
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    CoarsenedMatrix(GridCartesian &CoarseGrid) 	: 
 | 
			
		||||
 | 
			
		||||
@@ -412,7 +463,7 @@ namespace Grid {
 | 
			
		||||
      std::cout<<GridLogMessage<<"Computed Coarse Operator"<<std::endl;
 | 
			
		||||
#endif
 | 
			
		||||
      //      ForceHermitian();
 | 
			
		||||
      AssertHermitian();
 | 
			
		||||
      // AssertHermitian();
 | 
			
		||||
      // ForceDiagonal();
 | 
			
		||||
    }
 | 
			
		||||
    void ForceDiagonal(void) {
 | 
			
		||||
@@ -230,6 +230,7 @@ namespace Grid {
 | 
			
		||||
      // Barrel shift and collect global pencil
 | 
			
		||||
      std::vector<int> lcoor(Nd), gcoor(Nd);
 | 
			
		||||
      result = source;
 | 
			
		||||
      int pc = processor_coor[dim];
 | 
			
		||||
      for(int p=0;p<processors[dim];p++) {
 | 
			
		||||
        PARALLEL_REGION
 | 
			
		||||
        {
 | 
			
		||||
@@ -240,7 +241,8 @@ namespace Grid {
 | 
			
		||||
          for(int idx=0;idx<sgrid->lSites();idx++) {
 | 
			
		||||
            sgrid->LocalIndexToLocalCoor(idx,cbuf);
 | 
			
		||||
            peekLocalSite(s,result,cbuf);
 | 
			
		||||
            cbuf[dim]+=p*L;
 | 
			
		||||
	    cbuf[dim]+=((pc+p) % processors[dim])*L;
 | 
			
		||||
	    //            cbuf[dim]+=p*L;
 | 
			
		||||
            pokeLocalSite(s,pgbuf,cbuf);
 | 
			
		||||
          }
 | 
			
		||||
        }
 | 
			
		||||
@@ -278,7 +280,6 @@ namespace Grid {
 | 
			
		||||
      flops+= flops_call*NN;
 | 
			
		||||
      
 | 
			
		||||
      // writing out result
 | 
			
		||||
      int pc = processor_coor[dim];
 | 
			
		||||
      PARALLEL_REGION
 | 
			
		||||
      {
 | 
			
		||||
        std::vector<int> clbuf(Nd), cgbuf(Nd);
 | 
			
		||||
@@ -51,7 +51,7 @@ namespace Grid {
 | 
			
		||||
 | 
			
		||||
      virtual void Op     (const Field &in, Field &out) = 0; // Abstract base
 | 
			
		||||
      virtual void AdjOp  (const Field &in, Field &out) = 0; // Abstract base
 | 
			
		||||
      virtual void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2)=0;
 | 
			
		||||
      virtual void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2) = 0;
 | 
			
		||||
      virtual void HermOp(const Field &in, Field &out)=0;
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
@@ -162,15 +162,10 @@ namespace Grid {
 | 
			
		||||
	_Mat.M(in,out);
 | 
			
		||||
      }
 | 
			
		||||
      void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){
 | 
			
		||||
	ComplexD dot;
 | 
			
		||||
 | 
			
		||||
	_Mat.M(in,out);
 | 
			
		||||
	
 | 
			
		||||
	dot= innerProduct(in,out);
 | 
			
		||||
	n1=real(dot);
 | 
			
		||||
 | 
			
		||||
	dot = innerProduct(out,out);
 | 
			
		||||
	n2=real(dot);
 | 
			
		||||
	ComplexD dot= innerProduct(in,out); n1=real(dot);
 | 
			
		||||
	n2=norm2(out);
 | 
			
		||||
      }
 | 
			
		||||
      void HermOp(const Field &in, Field &out){
 | 
			
		||||
	_Mat.M(in,out);
 | 
			
		||||
@@ -189,13 +184,15 @@ namespace Grid {
 | 
			
		||||
      virtual  RealD MpcDag   (const Field &in, Field &out) =0;
 | 
			
		||||
      virtual void MpcDagMpc(const Field &in, Field &out,RealD &ni,RealD &no) {
 | 
			
		||||
      Field tmp(in._grid);
 | 
			
		||||
      tmp.checkerboard = in.checkerboard;
 | 
			
		||||
	ni=Mpc(in,tmp);
 | 
			
		||||
	no=MpcDag(tmp,out);
 | 
			
		||||
      }
 | 
			
		||||
      void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){
 | 
			
		||||
      virtual void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){
 | 
			
		||||
      out.checkerboard = in.checkerboard;
 | 
			
		||||
	MpcDagMpc(in,out,n1,n2);
 | 
			
		||||
      }
 | 
			
		||||
      void HermOp(const Field &in, Field &out){
 | 
			
		||||
      virtual void HermOp(const Field &in, Field &out){
 | 
			
		||||
	RealD n1,n2;
 | 
			
		||||
	HermOpAndNorm(in,out,n1,n2);
 | 
			
		||||
      }
 | 
			
		||||
@@ -212,7 +209,6 @@ namespace Grid {
 | 
			
		||||
      void OpDir  (const Field &in, Field &out,int dir,int disp) {
 | 
			
		||||
	assert(0);
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
    };
 | 
			
		||||
    template<class Matrix,class Field>
 | 
			
		||||
      class SchurDiagMooeeOperator :  public SchurOperatorBase<Field> {
 | 
			
		||||
@@ -222,12 +218,14 @@ namespace Grid {
 | 
			
		||||
      SchurDiagMooeeOperator (Matrix &Mat): _Mat(Mat){};
 | 
			
		||||
      virtual  RealD Mpc      (const Field &in, Field &out) {
 | 
			
		||||
      Field tmp(in._grid);
 | 
			
		||||
//	std::cout <<"grid pointers: in._grid="<< in._grid << " out._grid=" << out._grid << "  _Mat.Grid=" << _Mat.Grid() << " _Mat.RedBlackGrid=" << _Mat.RedBlackGrid() << std::endl;
 | 
			
		||||
      tmp.checkerboard = !in.checkerboard;
 | 
			
		||||
	//std::cout <<"grid pointers: in._grid="<< in._grid << " out._grid=" << out._grid << "  _Mat.Grid=" << _Mat.Grid() << " _Mat.RedBlackGrid=" << _Mat.RedBlackGrid() << std::endl;
 | 
			
		||||
 | 
			
		||||
	_Mat.Meooe(in,tmp);
 | 
			
		||||
	_Mat.MooeeInv(tmp,out);
 | 
			
		||||
	_Mat.Meooe(out,tmp);
 | 
			
		||||
 | 
			
		||||
      //std::cout << "cb in " << in.checkerboard << "  cb out " << out.checkerboard << std::endl;
 | 
			
		||||
	_Mat.Mooee(in,out);
 | 
			
		||||
	return axpy_norm(out,-1.0,tmp,out);
 | 
			
		||||
      }
 | 
			
		||||
@@ -270,7 +268,6 @@ namespace Grid {
 | 
			
		||||
	return axpy_norm(out,-1.0,tmp,in);
 | 
			
		||||
      }
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    template<class Matrix,class Field>
 | 
			
		||||
      class SchurDiagTwoOperator :  public SchurOperatorBase<Field> {
 | 
			
		||||
    protected:
 | 
			
		||||
@@ -299,6 +296,82 @@ namespace Grid {
 | 
			
		||||
	return axpy_norm(out,-1.0,tmp,in);
 | 
			
		||||
      }
 | 
			
		||||
    };
 | 
			
		||||
    ///////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
    // Left  handed Moo^-1 ; (Moo - Moe Mee^-1 Meo) psi = eta  -->  ( 1 - Moo^-1 Moe Mee^-1 Meo ) psi = Moo^-1 eta
 | 
			
		||||
    // Right handed Moo^-1 ; (Moo - Moe Mee^-1 Meo) Moo^-1 Moo psi = eta  -->  ( 1 - Moe Mee^-1 Meo ) Moo^-1 phi=eta ; psi = Moo^-1 phi
 | 
			
		||||
    ///////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
    template<class Matrix,class Field> using SchurDiagOneRH = SchurDiagTwoOperator<Matrix,Field> ;
 | 
			
		||||
    template<class Matrix,class Field> using SchurDiagOneLH = SchurDiagOneOperator<Matrix,Field> ;
 | 
			
		||||
    ///////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
    //  Staggered use
 | 
			
		||||
    ///////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
    template<class Matrix,class Field>
 | 
			
		||||
      class SchurStaggeredOperator :  public SchurOperatorBase<Field> {
 | 
			
		||||
    protected:
 | 
			
		||||
      Matrix &_Mat;
 | 
			
		||||
      Field tmp;
 | 
			
		||||
      RealD mass;
 | 
			
		||||
      double tMpc;
 | 
			
		||||
      double tIP;
 | 
			
		||||
      double tMeo;
 | 
			
		||||
      double taxpby_norm;
 | 
			
		||||
      uint64_t ncall;
 | 
			
		||||
    public:
 | 
			
		||||
      void Report(void)
 | 
			
		||||
      {
 | 
			
		||||
	std::cout << GridLogMessage << " HermOpAndNorm.Mpc "<< tMpc/ncall<<" usec "<<std::endl;
 | 
			
		||||
	std::cout << GridLogMessage << " HermOpAndNorm.IP  "<< tIP /ncall<<" usec "<<std::endl;
 | 
			
		||||
	std::cout << GridLogMessage << " Mpc.MeoMoe        "<< tMeo/ncall<<" usec "<<std::endl;
 | 
			
		||||
	std::cout << GridLogMessage << " Mpc.axpby_norm    "<< taxpby_norm/ncall<<" usec "<<std::endl;
 | 
			
		||||
      }
 | 
			
		||||
      SchurStaggeredOperator (Matrix &Mat): _Mat(Mat), tmp(_Mat.RedBlackGrid()) 
 | 
			
		||||
      { 
 | 
			
		||||
	assert( _Mat.isTrivialEE() );
 | 
			
		||||
	mass = _Mat.Mass();
 | 
			
		||||
	tMpc=0;
 | 
			
		||||
	tIP =0;
 | 
			
		||||
        tMeo=0;
 | 
			
		||||
        taxpby_norm=0;
 | 
			
		||||
	ncall=0;
 | 
			
		||||
      }
 | 
			
		||||
      virtual void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){
 | 
			
		||||
	ncall++;
 | 
			
		||||
	tMpc-=usecond();
 | 
			
		||||
	n2 = Mpc(in,out);
 | 
			
		||||
	tMpc+=usecond();
 | 
			
		||||
	tIP-=usecond();
 | 
			
		||||
	ComplexD dot= innerProduct(in,out);
 | 
			
		||||
	tIP+=usecond();
 | 
			
		||||
	n1 = real(dot);
 | 
			
		||||
      }
 | 
			
		||||
      virtual void HermOp(const Field &in, Field &out){
 | 
			
		||||
	ncall++;
 | 
			
		||||
	tMpc-=usecond();
 | 
			
		||||
	_Mat.Meooe(in,out);
 | 
			
		||||
	_Mat.Meooe(out,tmp);
 | 
			
		||||
	tMpc+=usecond();
 | 
			
		||||
	taxpby_norm-=usecond();
 | 
			
		||||
	axpby(out,-1.0,mass*mass,tmp,in);
 | 
			
		||||
	taxpby_norm+=usecond();
 | 
			
		||||
      }
 | 
			
		||||
      virtual  RealD Mpc      (const Field &in, Field &out) {
 | 
			
		||||
	tMeo-=usecond();
 | 
			
		||||
	_Mat.Meooe(in,out);
 | 
			
		||||
	_Mat.Meooe(out,tmp);
 | 
			
		||||
	tMeo+=usecond();
 | 
			
		||||
	taxpby_norm-=usecond();
 | 
			
		||||
	RealD nn=axpby_norm(out,-1.0,mass*mass,tmp,in);
 | 
			
		||||
	taxpby_norm+=usecond();
 | 
			
		||||
	return nn;
 | 
			
		||||
      }
 | 
			
		||||
      virtual  RealD MpcDag   (const Field &in, Field &out){
 | 
			
		||||
	return Mpc(in,out);
 | 
			
		||||
      }
 | 
			
		||||
      virtual void MpcDagMpc(const Field &in, Field &out,RealD &ni,RealD &no) {
 | 
			
		||||
	assert(0);// Never need with staggered
 | 
			
		||||
      }
 | 
			
		||||
    };
 | 
			
		||||
    template<class Matrix,class Field> using SchurStagOperator = SchurStaggeredOperator<Matrix,Field>;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    /////////////////////////////////////////////////////////////
 | 
			
		||||
@@ -307,6 +380,12 @@ namespace Grid {
 | 
			
		||||
    template<class Field> class OperatorFunction {
 | 
			
		||||
    public:
 | 
			
		||||
      virtual void operator() (LinearOperatorBase<Field> &Linop, const Field &in, Field &out) = 0;
 | 
			
		||||
      virtual void operator() (LinearOperatorBase<Field> &Linop, const std::vector<Field> &in,std::vector<Field> &out) {
 | 
			
		||||
	assert(in.size()==out.size());
 | 
			
		||||
	for(int k=0;k<in.size();k++){
 | 
			
		||||
	  (*this)(Linop,in[k],out[k]);
 | 
			
		||||
	}
 | 
			
		||||
      };
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    template<class Field> class LinearFunction {
 | 
			
		||||
@@ -314,6 +393,14 @@ namespace Grid {
 | 
			
		||||
      virtual void operator() (const Field &in, Field &out) = 0;
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    template<class Field> class IdentityLinearFunction : public LinearFunction<Field> {
 | 
			
		||||
    public:
 | 
			
		||||
      void operator() (const Field &in, Field &out){
 | 
			
		||||
	out = in;
 | 
			
		||||
      };
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    /////////////////////////////////////////////////////////////
 | 
			
		||||
    // Base classes for Multishift solvers for operators
 | 
			
		||||
    /////////////////////////////////////////////////////////////
 | 
			
		||||
@@ -336,6 +423,64 @@ namespace Grid {
 | 
			
		||||
     };
 | 
			
		||||
    */
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Hermitian operator Linear function and operator function
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
    template<class Field>
 | 
			
		||||
    class HermOpOperatorFunction : public OperatorFunction<Field> {
 | 
			
		||||
      void operator() (LinearOperatorBase<Field> &Linop, const Field &in, Field &out) {
 | 
			
		||||
	Linop.HermOp(in,out);
 | 
			
		||||
      };
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    template<typename Field>
 | 
			
		||||
      class PlainHermOp : public LinearFunction<Field> {
 | 
			
		||||
    public:
 | 
			
		||||
      LinearOperatorBase<Field> &_Linop;
 | 
			
		||||
      
 | 
			
		||||
      PlainHermOp(LinearOperatorBase<Field>& linop) : _Linop(linop) 
 | 
			
		||||
      {}
 | 
			
		||||
      
 | 
			
		||||
      void operator()(const Field& in, Field& out) {
 | 
			
		||||
	_Linop.HermOp(in,out);
 | 
			
		||||
      }
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    template<typename Field>
 | 
			
		||||
    class FunctionHermOp : public LinearFunction<Field> {
 | 
			
		||||
    public:
 | 
			
		||||
      OperatorFunction<Field>   & _poly;
 | 
			
		||||
      LinearOperatorBase<Field> &_Linop;
 | 
			
		||||
      
 | 
			
		||||
      FunctionHermOp(OperatorFunction<Field> & poly,LinearOperatorBase<Field>& linop) 
 | 
			
		||||
	: _poly(poly), _Linop(linop) {};
 | 
			
		||||
      
 | 
			
		||||
      void operator()(const Field& in, Field& out) {
 | 
			
		||||
	_poly(_Linop,in,out);
 | 
			
		||||
      }
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
  template<class Field>
 | 
			
		||||
  class Polynomial : public OperatorFunction<Field> {
 | 
			
		||||
  private:
 | 
			
		||||
    std::vector<RealD> Coeffs;
 | 
			
		||||
  public:
 | 
			
		||||
    Polynomial(std::vector<RealD> &_Coeffs) : Coeffs(_Coeffs) { };
 | 
			
		||||
 | 
			
		||||
    // Implement the required interface
 | 
			
		||||
    void operator() (LinearOperatorBase<Field> &Linop, const Field &in, Field &out) {
 | 
			
		||||
 | 
			
		||||
      Field AtoN(in._grid);
 | 
			
		||||
      Field Mtmp(in._grid);
 | 
			
		||||
      AtoN = in;
 | 
			
		||||
      out = AtoN*Coeffs[0];
 | 
			
		||||
      for(int n=1;n<Coeffs.size();n++){
 | 
			
		||||
	Mtmp = AtoN;
 | 
			
		||||
	Linop.HermOp(Mtmp,AtoN);
 | 
			
		||||
	out=out+AtoN*Coeffs[n];
 | 
			
		||||
      }
 | 
			
		||||
    };
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -55,6 +55,14 @@ namespace Grid {
 | 
			
		||||
    template<class Field> class CheckerBoardedSparseMatrixBase : public SparseMatrixBase<Field> {
 | 
			
		||||
    public:
 | 
			
		||||
      virtual GridBase *RedBlackGrid(void)=0;
 | 
			
		||||
 | 
			
		||||
      //////////////////////////////////////////////////////////////////////
 | 
			
		||||
      // Query the even even properties to make algorithmic decisions
 | 
			
		||||
      //////////////////////////////////////////////////////////////////////
 | 
			
		||||
      virtual RealD  Mass(void)        { return 0.0; };
 | 
			
		||||
      virtual int    ConstEE(void)     { return 0; }; // Disable assumptions unless overridden
 | 
			
		||||
      virtual int    isTrivialEE(void) { return 0; }; // by a derived class that knows better
 | 
			
		||||
 | 
			
		||||
      // half checkerboard operaions
 | 
			
		||||
      virtual  void Meooe    (const Field &in, Field &out)=0;
 | 
			
		||||
      virtual  void Mooee    (const Field &in, Field &out)=0;
 | 
			
		||||
@@ -8,6 +8,7 @@
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Christoph Lehner <clehner@bnl.gov>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
@@ -33,41 +34,12 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Simple general polynomial with user supplied coefficients
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  template<class Field>
 | 
			
		||||
  class HermOpOperatorFunction : public OperatorFunction<Field> {
 | 
			
		||||
    void operator() (LinearOperatorBase<Field> &Linop, const Field &in, Field &out) {
 | 
			
		||||
      Linop.HermOp(in,out);
 | 
			
		||||
    };
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  template<class Field>
 | 
			
		||||
  class Polynomial : public OperatorFunction<Field> {
 | 
			
		||||
  private:
 | 
			
		||||
    std::vector<RealD> Coeffs;
 | 
			
		||||
  public:
 | 
			
		||||
    Polynomial(std::vector<RealD> &_Coeffs) : Coeffs(_Coeffs) { };
 | 
			
		||||
 | 
			
		||||
    // Implement the required interface
 | 
			
		||||
    void operator() (LinearOperatorBase<Field> &Linop, const Field &in, Field &out) {
 | 
			
		||||
 | 
			
		||||
      Field AtoN(in._grid);
 | 
			
		||||
      Field Mtmp(in._grid);
 | 
			
		||||
      AtoN = in;
 | 
			
		||||
      out = AtoN*Coeffs[0];
 | 
			
		||||
//            std::cout <<"Poly in " <<norm2(in)<<" size "<< Coeffs.size()<<std::endl;
 | 
			
		||||
//            std::cout <<"Coeffs[0]= "<<Coeffs[0]<< " 0 " <<norm2(out)<<std::endl;
 | 
			
		||||
      for(int n=1;n<Coeffs.size();n++){
 | 
			
		||||
	Mtmp = AtoN;
 | 
			
		||||
	Linop.HermOp(Mtmp,AtoN);
 | 
			
		||||
	out=out+AtoN*Coeffs[n];
 | 
			
		||||
//            std::cout <<"Coeffs "<<n<<"= "<< Coeffs[n]<< " 0 " <<std::endl;
 | 
			
		||||
//		std::cout << n<<" " <<norm2(out)<<std::endl;
 | 
			
		||||
      }
 | 
			
		||||
    };
 | 
			
		||||
  };
 | 
			
		||||
struct ChebyParams : Serializable {
 | 
			
		||||
  GRID_SERIALIZABLE_CLASS_MEMBERS(ChebyParams,
 | 
			
		||||
				  RealD, alpha,  
 | 
			
		||||
				  RealD, beta,   
 | 
			
		||||
				  int, Npoly);
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Generic Chebyshev approximations
 | 
			
		||||
@@ -83,7 +55,9 @@ namespace Grid {
 | 
			
		||||
  public:
 | 
			
		||||
    void csv(std::ostream &out){
 | 
			
		||||
      RealD diff = hi-lo;
 | 
			
		||||
      for (RealD x=lo-0.2*diff; x<hi+0.2*diff; x+=(hi-lo)/1000) {
 | 
			
		||||
      RealD delta = (hi-lo)*1.0e-9;
 | 
			
		||||
      for (RealD x=lo; x<hi; x+=delta) {
 | 
			
		||||
	delta*=1.1;
 | 
			
		||||
	RealD f = approx(x);
 | 
			
		||||
	out<< x<<" "<<f<<std::endl;
 | 
			
		||||
      }
 | 
			
		||||
@@ -99,6 +73,7 @@ namespace Grid {
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    Chebyshev(){};
 | 
			
		||||
    Chebyshev(ChebyParams p){ Init(p.alpha,p.beta,p.Npoly);};
 | 
			
		||||
    Chebyshev(RealD _lo,RealD _hi,int _order, RealD (* func)(RealD) ) {Init(_lo,_hi,_order,func);};
 | 
			
		||||
    Chebyshev(RealD _lo,RealD _hi,int _order) {Init(_lo,_hi,_order);};
 | 
			
		||||
 | 
			
		||||
@@ -193,6 +168,47 @@ namespace Grid {
 | 
			
		||||
      return sum;
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    RealD approxD(RealD x)
 | 
			
		||||
    {
 | 
			
		||||
      RealD Un;
 | 
			
		||||
      RealD Unm;
 | 
			
		||||
      RealD Unp;
 | 
			
		||||
      
 | 
			
		||||
      RealD y=( x-0.5*(hi+lo))/(0.5*(hi-lo));
 | 
			
		||||
      
 | 
			
		||||
      RealD U0=1;
 | 
			
		||||
      RealD U1=2*y;
 | 
			
		||||
      
 | 
			
		||||
      RealD sum;
 | 
			
		||||
      sum = Coeffs[1]*U0;
 | 
			
		||||
      sum+= Coeffs[2]*U1*2.0;
 | 
			
		||||
      
 | 
			
		||||
      Un =U1;
 | 
			
		||||
      Unm=U0;
 | 
			
		||||
      for(int i=2;i<order-1;i++){
 | 
			
		||||
	Unp=2*y*Un-Unm;
 | 
			
		||||
	Unm=Un;
 | 
			
		||||
	Un =Unp;
 | 
			
		||||
	sum+= Un*Coeffs[i+1]*(i+1.0);
 | 
			
		||||
      }
 | 
			
		||||
      return sum/(0.5*(hi-lo));
 | 
			
		||||
    };
 | 
			
		||||
    
 | 
			
		||||
    RealD approxInv(RealD z, RealD x0, int maxiter, RealD resid) {
 | 
			
		||||
      RealD x = x0;
 | 
			
		||||
      RealD eps;
 | 
			
		||||
      
 | 
			
		||||
      int i;
 | 
			
		||||
      for (i=0;i<maxiter;i++) {
 | 
			
		||||
	eps = approx(x) - z;
 | 
			
		||||
	if (fabs(eps / z) < resid)
 | 
			
		||||
	  return x;
 | 
			
		||||
	x = x - eps / approxD(x);
 | 
			
		||||
      }
 | 
			
		||||
      
 | 
			
		||||
      return std::numeric_limits<double>::quiet_NaN();
 | 
			
		||||
    }
 | 
			
		||||
    
 | 
			
		||||
    // Implement the required interface
 | 
			
		||||
    void operator() (LinearOperatorBase<Field> &Linop, const Field &in, Field &out) {
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										152
									
								
								Grid/algorithms/approx/Forecast.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										152
									
								
								Grid/algorithms/approx/Forecast.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,152 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/algorithms/approx/Forecast.h
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: David Murphy <dmurphy@phys.columbia.edu>
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
 | 
			
		||||
#ifndef INCLUDED_FORECAST_H
 | 
			
		||||
#define INCLUDED_FORECAST_H
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
  // Abstract base class.
 | 
			
		||||
  // Takes a matrix (Mat), a source (phi), and a vector of Fields (chi)
 | 
			
		||||
  // and returns a forecasted solution to the system D*psi = phi (psi).
 | 
			
		||||
  template<class Matrix, class Field>
 | 
			
		||||
  class Forecast
 | 
			
		||||
  {
 | 
			
		||||
    public:
 | 
			
		||||
      virtual Field operator()(Matrix &Mat, const Field& phi, const std::vector<Field>& chi) = 0;
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  // Implementation of Brower et al.'s chronological inverter (arXiv:hep-lat/9509012),
 | 
			
		||||
  // used to forecast solutions across poles of the EOFA heatbath.
 | 
			
		||||
  //
 | 
			
		||||
  // Modified from CPS (cps_pp/src/util/dirac_op/d_op_base/comsrc/minresext.C)
 | 
			
		||||
  template<class Matrix, class Field>
 | 
			
		||||
  class ChronoForecast : public Forecast<Matrix,Field>
 | 
			
		||||
  {
 | 
			
		||||
    public:
 | 
			
		||||
      Field operator()(Matrix &Mat, const Field& phi, const std::vector<Field>& prev_solns)
 | 
			
		||||
      {
 | 
			
		||||
        int degree = prev_solns.size();
 | 
			
		||||
        Field chi(phi); // forecasted solution
 | 
			
		||||
 | 
			
		||||
        // Trivial cases
 | 
			
		||||
        if(degree == 0){ chi = zero; return chi; }
 | 
			
		||||
        else if(degree == 1){ return prev_solns[0]; }
 | 
			
		||||
 | 
			
		||||
        RealD dot;
 | 
			
		||||
        ComplexD xp;
 | 
			
		||||
        Field r(phi); // residual
 | 
			
		||||
        Field Mv(phi);
 | 
			
		||||
        std::vector<Field> v(prev_solns); // orthonormalized previous solutions
 | 
			
		||||
        std::vector<Field> MdagMv(degree,phi);
 | 
			
		||||
 | 
			
		||||
        // Array to hold the matrix elements
 | 
			
		||||
        std::vector<std::vector<ComplexD>> G(degree, std::vector<ComplexD>(degree));
 | 
			
		||||
 | 
			
		||||
        // Solution and source vectors
 | 
			
		||||
        std::vector<ComplexD> a(degree);
 | 
			
		||||
        std::vector<ComplexD> b(degree);
 | 
			
		||||
 | 
			
		||||
        // Orthonormalize the vector basis
 | 
			
		||||
        for(int i=0; i<degree; i++){
 | 
			
		||||
          v[i] *= 1.0/std::sqrt(norm2(v[i]));
 | 
			
		||||
          for(int j=i+1; j<degree; j++){ v[j] -= innerProduct(v[i],v[j]) * v[i]; }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        // Perform sparse matrix multiplication and construct rhs
 | 
			
		||||
        for(int i=0; i<degree; i++){
 | 
			
		||||
          b[i] = innerProduct(v[i],phi);
 | 
			
		||||
          Mat.M(v[i],Mv);
 | 
			
		||||
          Mat.Mdag(Mv,MdagMv[i]);
 | 
			
		||||
          G[i][i] = innerProduct(v[i],MdagMv[i]);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        // Construct the matrix
 | 
			
		||||
        for(int j=0; j<degree; j++){
 | 
			
		||||
        for(int k=j+1; k<degree; k++){
 | 
			
		||||
          G[j][k] = innerProduct(v[j],MdagMv[k]);
 | 
			
		||||
          G[k][j] = std::conj(G[j][k]);
 | 
			
		||||
        }}
 | 
			
		||||
 | 
			
		||||
        // Gauss-Jordan elimination with partial pivoting
 | 
			
		||||
        for(int i=0; i<degree; i++){
 | 
			
		||||
 | 
			
		||||
          // Perform partial pivoting
 | 
			
		||||
          int k = i;
 | 
			
		||||
          for(int j=i+1; j<degree; j++){ if(std::abs(G[j][j]) > std::abs(G[k][k])){ k = j; } }
 | 
			
		||||
          if(k != i){
 | 
			
		||||
            xp = b[k];
 | 
			
		||||
            b[k] = b[i];
 | 
			
		||||
            b[i] = xp;
 | 
			
		||||
            for(int j=0; j<degree; j++){
 | 
			
		||||
              xp = G[k][j];
 | 
			
		||||
              G[k][j] = G[i][j];
 | 
			
		||||
              G[i][j] = xp;
 | 
			
		||||
            }
 | 
			
		||||
          }
 | 
			
		||||
 | 
			
		||||
          // Convert matrix to upper triangular form
 | 
			
		||||
          for(int j=i+1; j<degree; j++){
 | 
			
		||||
            xp = G[j][i]/G[i][i];
 | 
			
		||||
            b[j] -= xp * b[i];
 | 
			
		||||
            for(int k=0; k<degree; k++){ G[j][k] -= xp*G[i][k]; }
 | 
			
		||||
          }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        // Use Gaussian elimination to solve equations and calculate initial guess
 | 
			
		||||
        chi = zero;
 | 
			
		||||
        r = phi;
 | 
			
		||||
        for(int i=degree-1; i>=0; i--){
 | 
			
		||||
          a[i] = 0.0;
 | 
			
		||||
          for(int j=i+1; j<degree; j++){ a[i] += G[i][j] * a[j]; }
 | 
			
		||||
          a[i] = (b[i]-a[i])/G[i][i];
 | 
			
		||||
          chi += a[i]*v[i];
 | 
			
		||||
          r -= a[i]*MdagMv[i];
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        RealD true_r(0.0);
 | 
			
		||||
        ComplexD tmp;
 | 
			
		||||
        for(int i=0; i<degree; i++){
 | 
			
		||||
          tmp = -b[i];
 | 
			
		||||
          for(int j=0; j<degree; j++){ tmp += G[i][j]*a[j]; }
 | 
			
		||||
          tmp = std::conj(tmp)*tmp;
 | 
			
		||||
          true_r += std::sqrt(tmp.real());
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        RealD error = std::sqrt(norm2(r)/norm2(phi));
 | 
			
		||||
        std::cout << GridLogMessage << "ChronoForecast: |res|/|src| = " << error << std::endl;
 | 
			
		||||
 | 
			
		||||
        return chi;
 | 
			
		||||
      };
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
@@ -33,7 +33,7 @@ directory
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
enum BlockCGtype { BlockCG, BlockCGrQ, CGmultiRHS };
 | 
			
		||||
enum BlockCGtype { BlockCG, BlockCGrQ, CGmultiRHS, BlockCGVec, BlockCGrQVec };
 | 
			
		||||
 | 
			
		||||
//////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Block conjugate gradient. Dimension zero should be the block direction
 | 
			
		||||
@@ -42,7 +42,6 @@ template <class Field>
 | 
			
		||||
class BlockConjugateGradient : public OperatorFunction<Field> {
 | 
			
		||||
 public:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  typedef typename Field::scalar_type scomplex;
 | 
			
		||||
 | 
			
		||||
  int blockDim ;
 | 
			
		||||
@@ -54,21 +53,15 @@ class BlockConjugateGradient : public OperatorFunction<Field> {
 | 
			
		||||
  RealD Tolerance;
 | 
			
		||||
  Integer MaxIterations;
 | 
			
		||||
  Integer IterationsToComplete; //Number of iterations the CG took to finish. Filled in upon completion
 | 
			
		||||
  Integer PrintInterval; //GridLogMessages or Iterative
 | 
			
		||||
  
 | 
			
		||||
  BlockConjugateGradient(BlockCGtype cgtype,int _Orthog,RealD tol, Integer maxit, bool err_on_no_conv = true)
 | 
			
		||||
    : Tolerance(tol), CGtype(cgtype),   blockDim(_Orthog),  MaxIterations(maxit), ErrorOnNoConverge(err_on_no_conv)
 | 
			
		||||
    : Tolerance(tol), CGtype(cgtype),   blockDim(_Orthog),  MaxIterations(maxit), ErrorOnNoConverge(err_on_no_conv),PrintInterval(100)
 | 
			
		||||
  {};
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Thin QR factorisation (google it)
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
void ThinQRfact (Eigen::MatrixXcd &m_rr,
 | 
			
		||||
		 Eigen::MatrixXcd &C,
 | 
			
		||||
		 Eigen::MatrixXcd &Cinv,
 | 
			
		||||
		 Field & Q,
 | 
			
		||||
		 const Field & R)
 | 
			
		||||
{
 | 
			
		||||
  int Orthog = blockDim; // First dimension is block dim; this is an assumption
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  //Dimensions
 | 
			
		||||
  // R_{ferm x Nblock} =  Q_{ferm x Nblock} x  C_{Nblock x Nblock} -> ferm x Nblock
 | 
			
		||||
@@ -85,17 +78,22 @@ void ThinQRfact (Eigen::MatrixXcd &m_rr,
 | 
			
		||||
  // Cdag C = Rdag R ; passes.
 | 
			
		||||
  // QdagQ  = 1      ; passes
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
void ThinQRfact (Eigen::MatrixXcd &m_rr,
 | 
			
		||||
		 Eigen::MatrixXcd &C,
 | 
			
		||||
		 Eigen::MatrixXcd &Cinv,
 | 
			
		||||
		 Field & Q,
 | 
			
		||||
		 const Field & R)
 | 
			
		||||
{
 | 
			
		||||
  int Orthog = blockDim; // First dimension is block dim; this is an assumption
 | 
			
		||||
  sliceInnerProductMatrix(m_rr,R,R,Orthog);
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Cholesky from Eigen
 | 
			
		||||
  // There exists a ldlt that is documented as more stable
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Force manifest hermitian to avoid rounding related
 | 
			
		||||
  m_rr = 0.5*(m_rr+m_rr.adjoint());
 | 
			
		||||
 | 
			
		||||
  Eigen::MatrixXcd L    = m_rr.llt().matrixL(); 
 | 
			
		||||
 | 
			
		||||
  C    = L.adjoint();
 | 
			
		||||
  Cinv = C.inverse();
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Q = R C^{-1}
 | 
			
		||||
  //
 | 
			
		||||
@@ -103,9 +101,27 @@ void ThinQRfact (Eigen::MatrixXcd &m_rr,
 | 
			
		||||
  //
 | 
			
		||||
  // NB maddMatrix conventions are Right multiplication X[j] a[j,i] already
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // FIXME:: make a sliceMulMatrix to avoid zero vector
 | 
			
		||||
  sliceMulMatrix(Q,Cinv,R,Orthog);
 | 
			
		||||
}
 | 
			
		||||
// see comments above
 | 
			
		||||
void ThinQRfact (Eigen::MatrixXcd &m_rr,
 | 
			
		||||
		 Eigen::MatrixXcd &C,
 | 
			
		||||
		 Eigen::MatrixXcd &Cinv,
 | 
			
		||||
		 std::vector<Field> & Q,
 | 
			
		||||
		 const std::vector<Field> & R)
 | 
			
		||||
{
 | 
			
		||||
  InnerProductMatrix(m_rr,R,R);
 | 
			
		||||
 | 
			
		||||
  m_rr = 0.5*(m_rr+m_rr.adjoint());
 | 
			
		||||
 | 
			
		||||
  Eigen::MatrixXcd L    = m_rr.llt().matrixL(); 
 | 
			
		||||
 | 
			
		||||
  C    = L.adjoint();
 | 
			
		||||
  Cinv = C.inverse();
 | 
			
		||||
 | 
			
		||||
  MulMatrix(Q,Cinv,R);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Call one of several implementations
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
@@ -113,14 +129,20 @@ void operator()(LinearOperatorBase<Field> &Linop, const Field &Src, Field &Psi)
 | 
			
		||||
{
 | 
			
		||||
  if ( CGtype == BlockCGrQ ) {
 | 
			
		||||
    BlockCGrQsolve(Linop,Src,Psi);
 | 
			
		||||
  } else if (CGtype == BlockCG ) {
 | 
			
		||||
    BlockCGsolve(Linop,Src,Psi);
 | 
			
		||||
  } else if (CGtype == CGmultiRHS ) {
 | 
			
		||||
    CGmultiRHSsolve(Linop,Src,Psi);
 | 
			
		||||
  } else {
 | 
			
		||||
    assert(0);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
virtual void operator()(LinearOperatorBase<Field> &Linop, const std::vector<Field> &Src, std::vector<Field> &Psi) 
 | 
			
		||||
{
 | 
			
		||||
  if ( CGtype == BlockCGrQVec ) {
 | 
			
		||||
    BlockCGrQsolveVec(Linop,Src,Psi);
 | 
			
		||||
  } else {
 | 
			
		||||
    assert(0);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// BlockCGrQ implementation:
 | 
			
		||||
@@ -133,7 +155,8 @@ void BlockCGrQsolve(LinearOperatorBase<Field> &Linop, const Field &B, Field &X)
 | 
			
		||||
{
 | 
			
		||||
  int Orthog = blockDim; // First dimension is block dim; this is an assumption
 | 
			
		||||
  Nblock = B._grid->_fdimensions[Orthog];
 | 
			
		||||
 | 
			
		||||
/* FAKE */
 | 
			
		||||
  Nblock=8;
 | 
			
		||||
  std::cout<<GridLogMessage<<" Block Conjugate Gradient : Orthog "<<Orthog<<" Nblock "<<Nblock<<std::endl;
 | 
			
		||||
 | 
			
		||||
  X.checkerboard = B.checkerboard;
 | 
			
		||||
@@ -196,15 +219,10 @@ void BlockCGrQsolve(LinearOperatorBase<Field> &Linop, const Field &B, Field &X)
 | 
			
		||||
  std::cout << GridLogMessage<<"BlockCGrQ algorithm initialisation " <<std::endl;
 | 
			
		||||
 | 
			
		||||
  //1.  QC = R = B-AX, D = Q     ; QC => Thin QR factorisation (google it)
 | 
			
		||||
 | 
			
		||||
  Linop.HermOp(X, AD);
 | 
			
		||||
  tmp = B - AD;  
 | 
			
		||||
  //std::cout << GridLogMessage << " initial tmp " << norm2(tmp)<< std::endl;
 | 
			
		||||
 | 
			
		||||
  ThinQRfact (m_rr, m_C, m_Cinv, Q, tmp);
 | 
			
		||||
  //std::cout << GridLogMessage << " initial Q " << norm2(Q)<< std::endl;
 | 
			
		||||
  //std::cout << GridLogMessage << " m_rr " << m_rr<<std::endl;
 | 
			
		||||
  //std::cout << GridLogMessage << " m_C " << m_C<<std::endl;
 | 
			
		||||
  //std::cout << GridLogMessage << " m_Cinv " << m_Cinv<<std::endl;
 | 
			
		||||
  D=Q;
 | 
			
		||||
 | 
			
		||||
  std::cout << GridLogMessage<<"BlockCGrQ computed initial residual and QR fact " <<std::endl;
 | 
			
		||||
@@ -226,14 +244,12 @@ void BlockCGrQsolve(LinearOperatorBase<Field> &Linop, const Field &B, Field &X)
 | 
			
		||||
    MatrixTimer.Start();
 | 
			
		||||
    Linop.HermOp(D, Z);      
 | 
			
		||||
    MatrixTimer.Stop();
 | 
			
		||||
    //std::cout << GridLogMessage << " norm2 Z " <<norm2(Z)<<std::endl;
 | 
			
		||||
 | 
			
		||||
    //4. M  = [D^dag Z]^{-1}
 | 
			
		||||
    sliceInnerTimer.Start();
 | 
			
		||||
    sliceInnerProductMatrix(m_DZ,D,Z,Orthog);
 | 
			
		||||
    sliceInnerTimer.Stop();
 | 
			
		||||
    m_M       = m_DZ.inverse();
 | 
			
		||||
    //std::cout << GridLogMessage << " m_DZ " <<m_DZ<<std::endl;
 | 
			
		||||
    
 | 
			
		||||
    //5. X  = X + D MC
 | 
			
		||||
    m_tmp     = m_M * m_C;
 | 
			
		||||
@@ -251,6 +267,7 @@ void BlockCGrQsolve(LinearOperatorBase<Field> &Linop, const Field &B, Field &X)
 | 
			
		||||
    
 | 
			
		||||
    //7. D  = Q + D S^dag
 | 
			
		||||
    m_tmp = m_S.adjoint();
 | 
			
		||||
 | 
			
		||||
    sliceMaddTimer.Start();
 | 
			
		||||
    sliceMaddMatrix(D,m_tmp,D,Q,Orthog);
 | 
			
		||||
    sliceMaddTimer.Stop();
 | 
			
		||||
@@ -311,152 +328,6 @@ void BlockCGrQsolve(LinearOperatorBase<Field> &Linop, const Field &B, Field &X)
 | 
			
		||||
  IterationsToComplete = k;
 | 
			
		||||
}
 | 
			
		||||
//////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Block conjugate gradient; Original O'Leary Dimension zero should be the block direction
 | 
			
		||||
//////////////////////////////////////////////////////////////////////////
 | 
			
		||||
void BlockCGsolve(LinearOperatorBase<Field> &Linop, const Field &Src, Field &Psi) 
 | 
			
		||||
{
 | 
			
		||||
  int Orthog = blockDim; // First dimension is block dim; this is an assumption
 | 
			
		||||
  Nblock = Src._grid->_fdimensions[Orthog];
 | 
			
		||||
 | 
			
		||||
  std::cout<<GridLogMessage<<" Block Conjugate Gradient : Orthog "<<Orthog<<" Nblock "<<Nblock<<std::endl;
 | 
			
		||||
 | 
			
		||||
  Psi.checkerboard = Src.checkerboard;
 | 
			
		||||
  conformable(Psi, Src);
 | 
			
		||||
 | 
			
		||||
  Field P(Src);
 | 
			
		||||
  Field AP(Src);
 | 
			
		||||
  Field R(Src);
 | 
			
		||||
  
 | 
			
		||||
  Eigen::MatrixXcd m_pAp    = Eigen::MatrixXcd::Identity(Nblock,Nblock);
 | 
			
		||||
  Eigen::MatrixXcd m_pAp_inv= Eigen::MatrixXcd::Identity(Nblock,Nblock);
 | 
			
		||||
  Eigen::MatrixXcd m_rr     = Eigen::MatrixXcd::Zero(Nblock,Nblock);
 | 
			
		||||
  Eigen::MatrixXcd m_rr_inv = Eigen::MatrixXcd::Zero(Nblock,Nblock);
 | 
			
		||||
 | 
			
		||||
  Eigen::MatrixXcd m_alpha      = Eigen::MatrixXcd::Zero(Nblock,Nblock);
 | 
			
		||||
  Eigen::MatrixXcd m_beta   = Eigen::MatrixXcd::Zero(Nblock,Nblock);
 | 
			
		||||
 | 
			
		||||
  // Initial residual computation & set up
 | 
			
		||||
  std::vector<RealD> residuals(Nblock);
 | 
			
		||||
  std::vector<RealD> ssq(Nblock);
 | 
			
		||||
 | 
			
		||||
  sliceNorm(ssq,Src,Orthog);
 | 
			
		||||
  RealD sssum=0;
 | 
			
		||||
  for(int b=0;b<Nblock;b++) sssum+=ssq[b];
 | 
			
		||||
 | 
			
		||||
  sliceNorm(residuals,Src,Orthog);
 | 
			
		||||
  for(int b=0;b<Nblock;b++){ assert(std::isnan(residuals[b])==0); }
 | 
			
		||||
 | 
			
		||||
  sliceNorm(residuals,Psi,Orthog);
 | 
			
		||||
  for(int b=0;b<Nblock;b++){ assert(std::isnan(residuals[b])==0); }
 | 
			
		||||
 | 
			
		||||
  // Initial search dir is guess
 | 
			
		||||
  Linop.HermOp(Psi, AP);
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
  /************************************************************************
 | 
			
		||||
   * Block conjugate gradient (Stephen Pickles, thesis 1995, pp 71, O Leary 1980)
 | 
			
		||||
   ************************************************************************
 | 
			
		||||
   * O'Leary : R = B - A X
 | 
			
		||||
   * O'Leary : P = M R ; preconditioner M = 1
 | 
			
		||||
   * O'Leary : alpha = PAP^{-1} RMR
 | 
			
		||||
   * O'Leary : beta  = RMR^{-1}_old RMR_new
 | 
			
		||||
   * O'Leary : X=X+Palpha
 | 
			
		||||
   * O'Leary : R_new=R_old-AP alpha
 | 
			
		||||
   * O'Leary : P=MR_new+P beta
 | 
			
		||||
   */
 | 
			
		||||
 | 
			
		||||
  R = Src - AP;  
 | 
			
		||||
  P = R;
 | 
			
		||||
  sliceInnerProductMatrix(m_rr,R,R,Orthog);
 | 
			
		||||
 | 
			
		||||
  GridStopWatch sliceInnerTimer;
 | 
			
		||||
  GridStopWatch sliceMaddTimer;
 | 
			
		||||
  GridStopWatch MatrixTimer;
 | 
			
		||||
  GridStopWatch SolverTimer;
 | 
			
		||||
  SolverTimer.Start();
 | 
			
		||||
 | 
			
		||||
  int k;
 | 
			
		||||
  for (k = 1; k <= MaxIterations; k++) {
 | 
			
		||||
 | 
			
		||||
    RealD rrsum=0;
 | 
			
		||||
    for(int b=0;b<Nblock;b++) rrsum+=real(m_rr(b,b));
 | 
			
		||||
 | 
			
		||||
    std::cout << GridLogIterative << "\titeration "<<k<<" rr_sum "<<rrsum<<" ssq_sum "<< sssum
 | 
			
		||||
	      <<" / "<<std::sqrt(rrsum/sssum) <<std::endl;
 | 
			
		||||
 | 
			
		||||
    MatrixTimer.Start();
 | 
			
		||||
    Linop.HermOp(P, AP);
 | 
			
		||||
    MatrixTimer.Stop();
 | 
			
		||||
 | 
			
		||||
    // Alpha
 | 
			
		||||
    sliceInnerTimer.Start();
 | 
			
		||||
    sliceInnerProductMatrix(m_pAp,P,AP,Orthog);
 | 
			
		||||
    sliceInnerTimer.Stop();
 | 
			
		||||
    m_pAp_inv = m_pAp.inverse();
 | 
			
		||||
    m_alpha   = m_pAp_inv * m_rr ;
 | 
			
		||||
 | 
			
		||||
    // Psi, R update
 | 
			
		||||
    sliceMaddTimer.Start();
 | 
			
		||||
    sliceMaddMatrix(Psi,m_alpha, P,Psi,Orthog);     // add alpha *  P to psi
 | 
			
		||||
    sliceMaddMatrix(R  ,m_alpha,AP,  R,Orthog,-1.0);// sub alpha * AP to resid
 | 
			
		||||
    sliceMaddTimer.Stop();
 | 
			
		||||
 | 
			
		||||
    // Beta
 | 
			
		||||
    m_rr_inv = m_rr.inverse();
 | 
			
		||||
    sliceInnerTimer.Start();
 | 
			
		||||
    sliceInnerProductMatrix(m_rr,R,R,Orthog);
 | 
			
		||||
    sliceInnerTimer.Stop();
 | 
			
		||||
    m_beta = m_rr_inv *m_rr;
 | 
			
		||||
 | 
			
		||||
    // Search update
 | 
			
		||||
    sliceMaddTimer.Start();
 | 
			
		||||
    sliceMaddMatrix(AP,m_beta,P,R,Orthog);
 | 
			
		||||
    sliceMaddTimer.Stop();
 | 
			
		||||
    P= AP;
 | 
			
		||||
 | 
			
		||||
    /*********************
 | 
			
		||||
     * convergence monitor
 | 
			
		||||
     *********************
 | 
			
		||||
     */
 | 
			
		||||
    RealD max_resid=0;
 | 
			
		||||
    RealD rr;
 | 
			
		||||
    for(int b=0;b<Nblock;b++){
 | 
			
		||||
      rr = real(m_rr(b,b))/ssq[b];
 | 
			
		||||
      if ( rr > max_resid ) max_resid = rr;
 | 
			
		||||
    }
 | 
			
		||||
    
 | 
			
		||||
    if ( max_resid < Tolerance*Tolerance ) { 
 | 
			
		||||
 | 
			
		||||
      SolverTimer.Stop();
 | 
			
		||||
 | 
			
		||||
      std::cout << GridLogMessage<<"BlockCG converged in "<<k<<" iterations"<<std::endl;
 | 
			
		||||
      for(int b=0;b<Nblock;b++){
 | 
			
		||||
	std::cout << GridLogMessage<< "\t\tblock "<<b<<" computed resid "
 | 
			
		||||
		  << std::sqrt(real(m_rr(b,b))/ssq[b])<<std::endl;
 | 
			
		||||
      }
 | 
			
		||||
      std::cout << GridLogMessage<<"\tMax residual is "<<std::sqrt(max_resid)<<std::endl;
 | 
			
		||||
 | 
			
		||||
      Linop.HermOp(Psi, AP);
 | 
			
		||||
      AP = AP-Src;
 | 
			
		||||
      std::cout << GridLogMessage <<"\t True residual is " << std::sqrt(norm2(AP)/norm2(Src)) <<std::endl;
 | 
			
		||||
 | 
			
		||||
      std::cout << GridLogMessage << "Time Breakdown "<<std::endl;
 | 
			
		||||
      std::cout << GridLogMessage << "\tElapsed    " << SolverTimer.Elapsed()     <<std::endl;
 | 
			
		||||
      std::cout << GridLogMessage << "\tMatrix     " << MatrixTimer.Elapsed()     <<std::endl;
 | 
			
		||||
      std::cout << GridLogMessage << "\tInnerProd  " << sliceInnerTimer.Elapsed() <<std::endl;
 | 
			
		||||
      std::cout << GridLogMessage << "\tMaddMatrix " << sliceMaddTimer.Elapsed()  <<std::endl;
 | 
			
		||||
	    
 | 
			
		||||
      IterationsToComplete = k;
 | 
			
		||||
      return;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
  }
 | 
			
		||||
  std::cout << GridLogMessage << "BlockConjugateGradient did NOT converge" << std::endl;
 | 
			
		||||
 | 
			
		||||
  if (ErrorOnNoConverge) assert(0);
 | 
			
		||||
  IterationsToComplete = k;
 | 
			
		||||
}
 | 
			
		||||
//////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// multiRHS conjugate gradient. Dimension zero should be the block direction
 | 
			
		||||
// Use this for spread out across nodes
 | 
			
		||||
//////////////////////////////////////////////////////////////////////////
 | 
			
		||||
@@ -594,6 +465,233 @@ void CGmultiRHSsolve(LinearOperatorBase<Field> &Linop, const Field &Src, Field &
 | 
			
		||||
  IterationsToComplete = k;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void InnerProductMatrix(Eigen::MatrixXcd &m , const std::vector<Field> &X, const std::vector<Field> &Y){
 | 
			
		||||
  for(int b=0;b<Nblock;b++){
 | 
			
		||||
  for(int bp=0;bp<Nblock;bp++) {
 | 
			
		||||
    m(b,bp) = innerProduct(X[b],Y[bp]);  
 | 
			
		||||
  }}
 | 
			
		||||
}
 | 
			
		||||
void MaddMatrix(std::vector<Field> &AP, Eigen::MatrixXcd &m , const std::vector<Field> &X,const std::vector<Field> &Y,RealD scale=1.0){
 | 
			
		||||
  // Should make this cache friendly with site outermost, parallel_for
 | 
			
		||||
  // Deal with case AP aliases with either Y or X
 | 
			
		||||
  std::vector<Field> tmp(Nblock,X[0]);
 | 
			
		||||
  for(int b=0;b<Nblock;b++){
 | 
			
		||||
    tmp[b]   = Y[b];
 | 
			
		||||
    for(int bp=0;bp<Nblock;bp++) {
 | 
			
		||||
      tmp[b] = tmp[b] + (scale*m(bp,b))*X[bp]; 
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  for(int b=0;b<Nblock;b++){
 | 
			
		||||
    AP[b] = tmp[b];
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
void MulMatrix(std::vector<Field> &AP, Eigen::MatrixXcd &m , const std::vector<Field> &X){
 | 
			
		||||
  // Should make this cache friendly with site outermost, parallel_for
 | 
			
		||||
  for(int b=0;b<Nblock;b++){
 | 
			
		||||
    AP[b] = zero;
 | 
			
		||||
    for(int bp=0;bp<Nblock;bp++) {
 | 
			
		||||
      AP[b] += (m(bp,b))*X[bp]; 
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
double normv(const std::vector<Field> &P){
 | 
			
		||||
  double nn = 0.0;
 | 
			
		||||
  for(int b=0;b<Nblock;b++) {
 | 
			
		||||
    nn+=norm2(P[b]);
 | 
			
		||||
  }
 | 
			
		||||
  return nn;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// BlockCGrQvec implementation:
 | 
			
		||||
//--------------------------
 | 
			
		||||
// X is guess/Solution
 | 
			
		||||
// B is RHS
 | 
			
		||||
// Solve A X_i = B_i    ;        i refers to Nblock index
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
void BlockCGrQsolveVec(LinearOperatorBase<Field> &Linop, const std::vector<Field> &B, std::vector<Field> &X) 
 | 
			
		||||
{
 | 
			
		||||
  Nblock = B.size();
 | 
			
		||||
  assert(Nblock == X.size());
 | 
			
		||||
 | 
			
		||||
  std::cout<<GridLogMessage<<" Block Conjugate Gradient Vec rQ : Nblock "<<Nblock<<std::endl;
 | 
			
		||||
 | 
			
		||||
  for(int b=0;b<Nblock;b++){ 
 | 
			
		||||
    X[b].checkerboard = B[b].checkerboard;
 | 
			
		||||
    conformable(X[b], B[b]);
 | 
			
		||||
    conformable(X[b], X[0]); 
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  Field Fake(B[0]);
 | 
			
		||||
 | 
			
		||||
  std::vector<Field> tmp(Nblock,Fake);
 | 
			
		||||
  std::vector<Field>   Q(Nblock,Fake);
 | 
			
		||||
  std::vector<Field>   D(Nblock,Fake);
 | 
			
		||||
  std::vector<Field>   Z(Nblock,Fake);
 | 
			
		||||
  std::vector<Field>  AD(Nblock,Fake);
 | 
			
		||||
 | 
			
		||||
  Eigen::MatrixXcd m_DZ     = Eigen::MatrixXcd::Identity(Nblock,Nblock);
 | 
			
		||||
  Eigen::MatrixXcd m_M      = Eigen::MatrixXcd::Identity(Nblock,Nblock);
 | 
			
		||||
  Eigen::MatrixXcd m_rr     = Eigen::MatrixXcd::Zero(Nblock,Nblock);
 | 
			
		||||
 | 
			
		||||
  Eigen::MatrixXcd m_C      = Eigen::MatrixXcd::Zero(Nblock,Nblock);
 | 
			
		||||
  Eigen::MatrixXcd m_Cinv   = Eigen::MatrixXcd::Zero(Nblock,Nblock);
 | 
			
		||||
  Eigen::MatrixXcd m_S      = Eigen::MatrixXcd::Zero(Nblock,Nblock);
 | 
			
		||||
  Eigen::MatrixXcd m_Sinv   = Eigen::MatrixXcd::Zero(Nblock,Nblock);
 | 
			
		||||
 | 
			
		||||
  Eigen::MatrixXcd m_tmp    = Eigen::MatrixXcd::Identity(Nblock,Nblock);
 | 
			
		||||
  Eigen::MatrixXcd m_tmp1   = Eigen::MatrixXcd::Identity(Nblock,Nblock);
 | 
			
		||||
 | 
			
		||||
  // Initial residual computation & set up
 | 
			
		||||
  std::vector<RealD> residuals(Nblock);
 | 
			
		||||
  std::vector<RealD> ssq(Nblock);
 | 
			
		||||
 | 
			
		||||
  RealD sssum=0;
 | 
			
		||||
  for(int b=0;b<Nblock;b++){ ssq[b] = norm2(B[b]);}
 | 
			
		||||
  for(int b=0;b<Nblock;b++) sssum+=ssq[b];
 | 
			
		||||
 | 
			
		||||
  for(int b=0;b<Nblock;b++){ residuals[b] = norm2(B[b]);}
 | 
			
		||||
  for(int b=0;b<Nblock;b++){ assert(std::isnan(residuals[b])==0); }
 | 
			
		||||
 | 
			
		||||
  for(int b=0;b<Nblock;b++){ residuals[b] = norm2(X[b]);}
 | 
			
		||||
  for(int b=0;b<Nblock;b++){ assert(std::isnan(residuals[b])==0); }
 | 
			
		||||
 | 
			
		||||
  /************************************************************************
 | 
			
		||||
   * Block conjugate gradient rQ (Sebastien Birk Thesis, after Dubrulle 2001)
 | 
			
		||||
   ************************************************************************
 | 
			
		||||
   * Dimensions:
 | 
			
		||||
   *
 | 
			
		||||
   *   X,B==(Nferm x Nblock)
 | 
			
		||||
   *   A==(Nferm x Nferm)
 | 
			
		||||
   *  
 | 
			
		||||
   * Nferm = Nspin x Ncolour x Ncomplex x Nlattice_site
 | 
			
		||||
   * 
 | 
			
		||||
   * QC = R = B-AX, D = Q     ; QC => Thin QR factorisation (google it)
 | 
			
		||||
   * for k: 
 | 
			
		||||
   *   Z  = AD
 | 
			
		||||
   *   M  = [D^dag Z]^{-1}
 | 
			
		||||
   *   X  = X + D MC
 | 
			
		||||
   *   QS = Q - ZM
 | 
			
		||||
   *   D  = Q + D S^dag
 | 
			
		||||
   *   C  = S C
 | 
			
		||||
   */
 | 
			
		||||
  ///////////////////////////////////////
 | 
			
		||||
  // Initial block: initial search dir is guess
 | 
			
		||||
  ///////////////////////////////////////
 | 
			
		||||
  std::cout << GridLogMessage<<"BlockCGrQvec algorithm initialisation " <<std::endl;
 | 
			
		||||
 | 
			
		||||
  //1.  QC = R = B-AX, D = Q     ; QC => Thin QR factorisation (google it)
 | 
			
		||||
  for(int b=0;b<Nblock;b++) {
 | 
			
		||||
    Linop.HermOp(X[b], AD[b]);
 | 
			
		||||
    tmp[b] = B[b] - AD[b];  
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  ThinQRfact (m_rr, m_C, m_Cinv, Q, tmp);
 | 
			
		||||
 | 
			
		||||
  for(int b=0;b<Nblock;b++) D[b]=Q[b];
 | 
			
		||||
 | 
			
		||||
  std::cout << GridLogMessage<<"BlockCGrQ vec computed initial residual and QR fact " <<std::endl;
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////////////////
 | 
			
		||||
  // Timers
 | 
			
		||||
  ///////////////////////////////////////
 | 
			
		||||
  GridStopWatch sliceInnerTimer;
 | 
			
		||||
  GridStopWatch sliceMaddTimer;
 | 
			
		||||
  GridStopWatch QRTimer;
 | 
			
		||||
  GridStopWatch MatrixTimer;
 | 
			
		||||
  GridStopWatch SolverTimer;
 | 
			
		||||
  SolverTimer.Start();
 | 
			
		||||
 | 
			
		||||
  int k;
 | 
			
		||||
  for (k = 1; k <= MaxIterations; k++) {
 | 
			
		||||
 | 
			
		||||
    //3. Z  = AD
 | 
			
		||||
    MatrixTimer.Start();
 | 
			
		||||
    for(int b=0;b<Nblock;b++) Linop.HermOp(D[b], Z[b]);      
 | 
			
		||||
    MatrixTimer.Stop();
 | 
			
		||||
 | 
			
		||||
    //4. M  = [D^dag Z]^{-1}
 | 
			
		||||
    sliceInnerTimer.Start();
 | 
			
		||||
    InnerProductMatrix(m_DZ,D,Z);
 | 
			
		||||
    sliceInnerTimer.Stop();
 | 
			
		||||
    m_M       = m_DZ.inverse();
 | 
			
		||||
    
 | 
			
		||||
    //5. X  = X + D MC
 | 
			
		||||
    m_tmp     = m_M * m_C;
 | 
			
		||||
    sliceMaddTimer.Start();
 | 
			
		||||
    MaddMatrix(X,m_tmp, D,X);     
 | 
			
		||||
    sliceMaddTimer.Stop();
 | 
			
		||||
 | 
			
		||||
    //6. QS = Q - ZM
 | 
			
		||||
    sliceMaddTimer.Start();
 | 
			
		||||
    MaddMatrix(tmp,m_M,Z,Q,-1.0);
 | 
			
		||||
    sliceMaddTimer.Stop();
 | 
			
		||||
    QRTimer.Start();
 | 
			
		||||
    ThinQRfact (m_rr, m_S, m_Sinv, Q, tmp);
 | 
			
		||||
    QRTimer.Stop();
 | 
			
		||||
    
 | 
			
		||||
    //7. D  = Q + D S^dag
 | 
			
		||||
    m_tmp = m_S.adjoint();
 | 
			
		||||
    sliceMaddTimer.Start();
 | 
			
		||||
    MaddMatrix(D,m_tmp,D,Q);
 | 
			
		||||
    sliceMaddTimer.Stop();
 | 
			
		||||
 | 
			
		||||
    //8. C  = S C
 | 
			
		||||
    m_C = m_S*m_C;
 | 
			
		||||
    
 | 
			
		||||
    /*********************
 | 
			
		||||
     * convergence monitor
 | 
			
		||||
     *********************
 | 
			
		||||
     */
 | 
			
		||||
    m_rr = m_C.adjoint() * m_C;
 | 
			
		||||
 | 
			
		||||
    RealD max_resid=0;
 | 
			
		||||
    RealD rrsum=0;
 | 
			
		||||
    RealD rr;
 | 
			
		||||
 | 
			
		||||
    for(int b=0;b<Nblock;b++) {
 | 
			
		||||
      rrsum+=real(m_rr(b,b));
 | 
			
		||||
      rr = real(m_rr(b,b))/ssq[b];
 | 
			
		||||
      if ( rr > max_resid ) max_resid = rr;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    std::cout << GridLogIterative << "\t Block Iteration "<<k<<" ave resid "<< sqrt(rrsum/sssum) << " max "<< sqrt(max_resid) <<std::endl;
 | 
			
		||||
 | 
			
		||||
    if ( max_resid < Tolerance*Tolerance ) { 
 | 
			
		||||
 | 
			
		||||
      SolverTimer.Stop();
 | 
			
		||||
 | 
			
		||||
      std::cout << GridLogMessage<<"BlockCGrQ converged in "<<k<<" iterations"<<std::endl;
 | 
			
		||||
 | 
			
		||||
      for(int b=0;b<Nblock;b++){
 | 
			
		||||
	std::cout << GridLogMessage<< "\t\tblock "<<b<<" computed resid "<< std::sqrt(real(m_rr(b,b))/ssq[b])<<std::endl;
 | 
			
		||||
      }
 | 
			
		||||
      std::cout << GridLogMessage<<"\tMax residual is "<<std::sqrt(max_resid)<<std::endl;
 | 
			
		||||
 | 
			
		||||
      for(int b=0;b<Nblock;b++) Linop.HermOp(X[b], AD[b]);
 | 
			
		||||
      for(int b=0;b<Nblock;b++) AD[b] = AD[b]-B[b];
 | 
			
		||||
      std::cout << GridLogMessage <<"\t True residual is " << std::sqrt(normv(AD)/normv(B)) <<std::endl;
 | 
			
		||||
 | 
			
		||||
      std::cout << GridLogMessage << "Time Breakdown "<<std::endl;
 | 
			
		||||
      std::cout << GridLogMessage << "\tElapsed    " << SolverTimer.Elapsed()     <<std::endl;
 | 
			
		||||
      std::cout << GridLogMessage << "\tMatrix     " << MatrixTimer.Elapsed()     <<std::endl;
 | 
			
		||||
      std::cout << GridLogMessage << "\tInnerProd  " << sliceInnerTimer.Elapsed() <<std::endl;
 | 
			
		||||
      std::cout << GridLogMessage << "\tMaddMatrix " << sliceMaddTimer.Elapsed()  <<std::endl;
 | 
			
		||||
      std::cout << GridLogMessage << "\tThinQRfact " << QRTimer.Elapsed()  <<std::endl;
 | 
			
		||||
	    
 | 
			
		||||
      IterationsToComplete = k;
 | 
			
		||||
      return;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
  }
 | 
			
		||||
  std::cout << GridLogMessage << "BlockConjugateGradient(rQ) did NOT converge" << std::endl;
 | 
			
		||||
 | 
			
		||||
  if (ErrorOnNoConverge) assert(0);
 | 
			
		||||
  IterationsToComplete = k;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
@@ -0,0 +1,244 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/algorithms/iterative/CommunicationAvoidingGeneralisedMinimalResidual.h
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Daniel Richtmann <daniel.richtmann@ur.de>
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#ifndef GRID_COMMUNICATION_AVOIDING_GENERALISED_MINIMAL_RESIDUAL_H
 | 
			
		||||
#define GRID_COMMUNICATION_AVOIDING_GENERALISED_MINIMAL_RESIDUAL_H
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
template<class Field>
 | 
			
		||||
class CommunicationAvoidingGeneralisedMinimalResidual : public OperatorFunction<Field> {
 | 
			
		||||
 public:
 | 
			
		||||
  bool ErrorOnNoConverge; // Throw an assert when CAGMRES fails to converge,
 | 
			
		||||
                          // defaults to true
 | 
			
		||||
 | 
			
		||||
  RealD   Tolerance;
 | 
			
		||||
 | 
			
		||||
  Integer MaxIterations;
 | 
			
		||||
  Integer RestartLength;
 | 
			
		||||
  Integer MaxNumberOfRestarts;
 | 
			
		||||
  Integer IterationCount; // Number of iterations the CAGMRES took to finish,
 | 
			
		||||
                          // filled in upon completion
 | 
			
		||||
 | 
			
		||||
  GridStopWatch MatrixTimer;
 | 
			
		||||
  GridStopWatch LinalgTimer;
 | 
			
		||||
  GridStopWatch QrTimer;
 | 
			
		||||
  GridStopWatch CompSolutionTimer;
 | 
			
		||||
 | 
			
		||||
  Eigen::MatrixXcd H;
 | 
			
		||||
 | 
			
		||||
  std::vector<std::complex<double>> y;
 | 
			
		||||
  std::vector<std::complex<double>> gamma;
 | 
			
		||||
  std::vector<std::complex<double>> c;
 | 
			
		||||
  std::vector<std::complex<double>> s;
 | 
			
		||||
 | 
			
		||||
  CommunicationAvoidingGeneralisedMinimalResidual(RealD   tol,
 | 
			
		||||
                                                  Integer maxit,
 | 
			
		||||
                                                  Integer restart_length,
 | 
			
		||||
                                                  bool    err_on_no_conv = true)
 | 
			
		||||
      : Tolerance(tol)
 | 
			
		||||
      , MaxIterations(maxit)
 | 
			
		||||
      , RestartLength(restart_length)
 | 
			
		||||
      , MaxNumberOfRestarts(MaxIterations/RestartLength + ((MaxIterations%RestartLength == 0) ? 0 : 1))
 | 
			
		||||
      , ErrorOnNoConverge(err_on_no_conv)
 | 
			
		||||
      , H(Eigen::MatrixXcd::Zero(RestartLength, RestartLength + 1)) // sizes taken from DD-αAMG code base
 | 
			
		||||
      , y(RestartLength + 1, 0.)
 | 
			
		||||
      , gamma(RestartLength + 1, 0.)
 | 
			
		||||
      , c(RestartLength + 1, 0.)
 | 
			
		||||
      , s(RestartLength + 1, 0.) {};
 | 
			
		||||
 | 
			
		||||
  void operator()(LinearOperatorBase<Field> &LinOp, const Field &src, Field &psi) {
 | 
			
		||||
 | 
			
		||||
    std::cout << GridLogWarning << "This algorithm currently doesn't differ from regular GMRES" << std::endl;
 | 
			
		||||
 | 
			
		||||
    psi.checkerboard = src.checkerboard;
 | 
			
		||||
    conformable(psi, src);
 | 
			
		||||
 | 
			
		||||
    RealD guess = norm2(psi);
 | 
			
		||||
    assert(std::isnan(guess) == 0);
 | 
			
		||||
 | 
			
		||||
    RealD cp;
 | 
			
		||||
    RealD ssq = norm2(src);
 | 
			
		||||
    RealD rsq = Tolerance * Tolerance * ssq;
 | 
			
		||||
 | 
			
		||||
    Field r(src._grid);
 | 
			
		||||
 | 
			
		||||
    std::cout << std::setprecision(4) << std::scientific;
 | 
			
		||||
    std::cout << GridLogIterative << "CommunicationAvoidingGeneralisedMinimalResidual: guess " << guess << std::endl;
 | 
			
		||||
    std::cout << GridLogIterative << "CommunicationAvoidingGeneralisedMinimalResidual:   src " << ssq   << std::endl;
 | 
			
		||||
 | 
			
		||||
    MatrixTimer.Reset();
 | 
			
		||||
    LinalgTimer.Reset();
 | 
			
		||||
    QrTimer.Reset();
 | 
			
		||||
    CompSolutionTimer.Reset();
 | 
			
		||||
 | 
			
		||||
    GridStopWatch SolverTimer;
 | 
			
		||||
    SolverTimer.Start();
 | 
			
		||||
 | 
			
		||||
    IterationCount = 0;
 | 
			
		||||
 | 
			
		||||
    for (int k=0; k<MaxNumberOfRestarts; k++) {
 | 
			
		||||
 | 
			
		||||
      cp = outerLoopBody(LinOp, src, psi, rsq);
 | 
			
		||||
 | 
			
		||||
      // Stopping condition
 | 
			
		||||
      if (cp <= rsq) {
 | 
			
		||||
 | 
			
		||||
        SolverTimer.Stop();
 | 
			
		||||
 | 
			
		||||
        LinOp.Op(psi,r);
 | 
			
		||||
        axpy(r,-1.0,src,r);
 | 
			
		||||
 | 
			
		||||
        RealD srcnorm       = sqrt(ssq);
 | 
			
		||||
        RealD resnorm       = sqrt(norm2(r));
 | 
			
		||||
        RealD true_residual = resnorm / srcnorm;
 | 
			
		||||
 | 
			
		||||
        std::cout << GridLogMessage        << "CommunicationAvoidingGeneralisedMinimalResidual: Converged on iteration " << IterationCount
 | 
			
		||||
                  << " computed residual " << sqrt(cp / ssq)
 | 
			
		||||
                  << " true residual "     << true_residual
 | 
			
		||||
                  << " target "            << Tolerance << std::endl;
 | 
			
		||||
 | 
			
		||||
        std::cout << GridLogMessage << "CAGMRES Time elapsed: Total   " <<       SolverTimer.Elapsed() << std::endl;
 | 
			
		||||
        std::cout << GridLogMessage << "CAGMRES Time elapsed: Matrix  " <<       MatrixTimer.Elapsed() << std::endl;
 | 
			
		||||
        std::cout << GridLogMessage << "CAGMRES Time elapsed: Linalg  " <<       LinalgTimer.Elapsed() << std::endl;
 | 
			
		||||
        std::cout << GridLogMessage << "CAGMRES Time elapsed: QR      " <<           QrTimer.Elapsed() << std::endl;
 | 
			
		||||
        std::cout << GridLogMessage << "CAGMRES Time elapsed: CompSol " << CompSolutionTimer.Elapsed() << std::endl;
 | 
			
		||||
        return;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    std::cout << GridLogMessage << "CommunicationAvoidingGeneralisedMinimalResidual did NOT converge" << std::endl;
 | 
			
		||||
 | 
			
		||||
    if (ErrorOnNoConverge)
 | 
			
		||||
      assert(0);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  RealD outerLoopBody(LinearOperatorBase<Field> &LinOp, const Field &src, Field &psi, RealD rsq) {
 | 
			
		||||
 | 
			
		||||
    RealD cp = 0;
 | 
			
		||||
 | 
			
		||||
    Field w(src._grid);
 | 
			
		||||
    Field r(src._grid);
 | 
			
		||||
 | 
			
		||||
    // this should probably be made a class member so that it is only allocated once, not in every restart
 | 
			
		||||
    std::vector<Field> v(RestartLength + 1, src._grid); for (auto &elem : v) elem = zero;
 | 
			
		||||
 | 
			
		||||
    MatrixTimer.Start();
 | 
			
		||||
    LinOp.Op(psi, w);
 | 
			
		||||
    MatrixTimer.Stop();
 | 
			
		||||
 | 
			
		||||
    LinalgTimer.Start();
 | 
			
		||||
    r = src - w;
 | 
			
		||||
 | 
			
		||||
    gamma[0] = sqrt(norm2(r));
 | 
			
		||||
 | 
			
		||||
    v[0] = (1. / gamma[0]) * r;
 | 
			
		||||
    LinalgTimer.Stop();
 | 
			
		||||
 | 
			
		||||
    for (int i=0; i<RestartLength; i++) {
 | 
			
		||||
 | 
			
		||||
      IterationCount++;
 | 
			
		||||
 | 
			
		||||
      arnoldiStep(LinOp, v, w, i);
 | 
			
		||||
 | 
			
		||||
      qrUpdate(i);
 | 
			
		||||
 | 
			
		||||
      cp = std::norm(gamma[i+1]);
 | 
			
		||||
 | 
			
		||||
      std::cout << GridLogIterative << "CommunicationAvoidingGeneralisedMinimalResidual: Iteration " << IterationCount
 | 
			
		||||
                << " residual " << cp << " target " << rsq << std::endl;
 | 
			
		||||
 | 
			
		||||
      if ((i == RestartLength - 1) || (IterationCount == MaxIterations) || (cp <= rsq)) {
 | 
			
		||||
 | 
			
		||||
        computeSolution(v, psi, i);
 | 
			
		||||
 | 
			
		||||
        return cp;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    assert(0); // Never reached
 | 
			
		||||
    return cp;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void arnoldiStep(LinearOperatorBase<Field> &LinOp, std::vector<Field> &v, Field &w, int iter) {
 | 
			
		||||
 | 
			
		||||
    MatrixTimer.Start();
 | 
			
		||||
    LinOp.Op(v[iter], w);
 | 
			
		||||
    MatrixTimer.Stop();
 | 
			
		||||
 | 
			
		||||
    LinalgTimer.Start();
 | 
			
		||||
    for (int i = 0; i <= iter; ++i) {
 | 
			
		||||
      H(iter, i) = innerProduct(v[i], w);
 | 
			
		||||
      w = w - H(iter, i) * v[i];
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    H(iter, iter + 1) = sqrt(norm2(w));
 | 
			
		||||
    v[iter + 1] = (1. / H(iter, iter + 1)) * w;
 | 
			
		||||
    LinalgTimer.Stop();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void qrUpdate(int iter) {
 | 
			
		||||
 | 
			
		||||
    QrTimer.Start();
 | 
			
		||||
    for (int i = 0; i < iter ; ++i) {
 | 
			
		||||
      auto tmp       = -s[i] * H(iter, i) + c[i] * H(iter, i + 1);
 | 
			
		||||
      H(iter, i)     = std::conj(c[i]) * H(iter, i) + std::conj(s[i]) * H(iter, i + 1);
 | 
			
		||||
      H(iter, i + 1) = tmp;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Compute new Givens Rotation
 | 
			
		||||
    ComplexD nu = sqrt(std::norm(H(iter, iter)) + std::norm(H(iter, iter + 1)));
 | 
			
		||||
    c[iter]     = H(iter, iter) / nu;
 | 
			
		||||
    s[iter]     = H(iter, iter + 1) / nu;
 | 
			
		||||
 | 
			
		||||
    // Apply new Givens rotation
 | 
			
		||||
    H(iter, iter)     = nu;
 | 
			
		||||
    H(iter, iter + 1) = 0.;
 | 
			
		||||
 | 
			
		||||
    gamma[iter + 1] = -s[iter] * gamma[iter];
 | 
			
		||||
    gamma[iter]     = std::conj(c[iter]) * gamma[iter];
 | 
			
		||||
    QrTimer.Stop();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void computeSolution(std::vector<Field> const &v, Field &psi, int iter) {
 | 
			
		||||
 | 
			
		||||
    CompSolutionTimer.Start();
 | 
			
		||||
    for (int i = iter; i >= 0; i--) {
 | 
			
		||||
      y[i] = gamma[i];
 | 
			
		||||
      for (int k = i + 1; k <= iter; k++)
 | 
			
		||||
        y[i] = y[i] - H(k, i) * y[k];
 | 
			
		||||
      y[i] = y[i] / H(i, i);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    for (int i = 0; i <= iter; i++)
 | 
			
		||||
      psi = psi + v[i] * y[i];
 | 
			
		||||
    CompSolutionTimer.Stop();
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
@@ -52,8 +52,9 @@ class ConjugateGradient : public OperatorFunction<Field> {
 | 
			
		||||
        MaxIterations(maxit),
 | 
			
		||||
        ErrorOnNoConverge(err_on_no_conv){};
 | 
			
		||||
 | 
			
		||||
  void operator()(LinearOperatorBase<Field> &Linop, const Field &src,
 | 
			
		||||
                  Field &psi) {
 | 
			
		||||
  void operator()(LinearOperatorBase<Field> &Linop, const Field &src, Field &psi) {
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    psi.checkerboard = src.checkerboard;
 | 
			
		||||
    conformable(psi, src);
 | 
			
		||||
 | 
			
		||||
@@ -70,7 +71,6 @@ class ConjugateGradient : public OperatorFunction<Field> {
 | 
			
		||||
    
 | 
			
		||||
    Linop.HermOpAndNorm(psi, mmp, d, b);
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    r = src - mmp;
 | 
			
		||||
    p = r;
 | 
			
		||||
 | 
			
		||||
@@ -78,12 +78,12 @@ class ConjugateGradient : public OperatorFunction<Field> {
 | 
			
		||||
    cp = a;
 | 
			
		||||
    ssq = norm2(src);
 | 
			
		||||
 | 
			
		||||
    std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradient: guess " << guess << std::endl;
 | 
			
		||||
    std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradient:   src " << ssq << std::endl;
 | 
			
		||||
    std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradient:    mp " << d << std::endl;
 | 
			
		||||
    std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradient:   mmp " << b << std::endl;
 | 
			
		||||
    std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradient:  cp,r " << cp << std::endl;
 | 
			
		||||
    std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradient:     p " << a << std::endl;
 | 
			
		||||
    std::cout << GridLogIterative << std::setprecision(8) << "ConjugateGradient: guess " << guess << std::endl;
 | 
			
		||||
    std::cout << GridLogIterative << std::setprecision(8) << "ConjugateGradient:   src " << ssq << std::endl;
 | 
			
		||||
    std::cout << GridLogIterative << std::setprecision(8) << "ConjugateGradient:    mp " << d << std::endl;
 | 
			
		||||
    std::cout << GridLogIterative << std::setprecision(8) << "ConjugateGradient:   mmp " << b << std::endl;
 | 
			
		||||
    std::cout << GridLogIterative << std::setprecision(8) << "ConjugateGradient:  cp,r " << cp << std::endl;
 | 
			
		||||
    std::cout << GridLogIterative << std::setprecision(8) << "ConjugateGradient:     p " << a << std::endl;
 | 
			
		||||
 | 
			
		||||
    RealD rsq = Tolerance * Tolerance * ssq;
 | 
			
		||||
 | 
			
		||||
@@ -92,42 +92,48 @@ class ConjugateGradient : public OperatorFunction<Field> {
 | 
			
		||||
      return;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    std::cout << GridLogIterative << std::setprecision(4)
 | 
			
		||||
    std::cout << GridLogIterative << std::setprecision(8)
 | 
			
		||||
              << "ConjugateGradient: k=0 residual " << cp << " target " << rsq << std::endl;
 | 
			
		||||
 | 
			
		||||
    GridStopWatch LinalgTimer;
 | 
			
		||||
    GridStopWatch InnerTimer;
 | 
			
		||||
    GridStopWatch AxpyNormTimer;
 | 
			
		||||
    GridStopWatch LinearCombTimer;
 | 
			
		||||
    GridStopWatch MatrixTimer;
 | 
			
		||||
    GridStopWatch SolverTimer;
 | 
			
		||||
 | 
			
		||||
    SolverTimer.Start();
 | 
			
		||||
    int k;
 | 
			
		||||
    for (k = 1; k <= MaxIterations; k++) {
 | 
			
		||||
    for (k = 1; k <= MaxIterations*1000; k++) {
 | 
			
		||||
      c = cp;
 | 
			
		||||
 | 
			
		||||
      MatrixTimer.Start();
 | 
			
		||||
      Linop.HermOpAndNorm(p, mmp, d, qq);
 | 
			
		||||
      Linop.HermOp(p, mmp);
 | 
			
		||||
      MatrixTimer.Stop();
 | 
			
		||||
 | 
			
		||||
      LinalgTimer.Start();
 | 
			
		||||
      //  RealD    qqck = norm2(mmp);
 | 
			
		||||
      //  ComplexD dck  = innerProduct(p,mmp);
 | 
			
		||||
 | 
			
		||||
      InnerTimer.Start();
 | 
			
		||||
      ComplexD dc  = innerProduct(p,mmp);
 | 
			
		||||
      InnerTimer.Stop();
 | 
			
		||||
      d = dc.real();
 | 
			
		||||
      a = c / d;
 | 
			
		||||
      b_pred = a * (a * qq - d) / c;
 | 
			
		||||
 | 
			
		||||
      AxpyNormTimer.Start();
 | 
			
		||||
      cp = axpy_norm(r, -a, mmp, r);
 | 
			
		||||
      AxpyNormTimer.Stop();
 | 
			
		||||
      b = cp / c;
 | 
			
		||||
 | 
			
		||||
      // Fuse these loops ; should be really easy
 | 
			
		||||
      psi = a * p + psi;
 | 
			
		||||
      p = p * b + r;
 | 
			
		||||
 | 
			
		||||
      LinearCombTimer.Start();
 | 
			
		||||
      parallel_for(int ss=0;ss<src._grid->oSites();ss++){
 | 
			
		||||
	vstream(psi[ss], a      *  p[ss] + psi[ss]);
 | 
			
		||||
	vstream(p  [ss], b      *  p[ss] + r[ss]);
 | 
			
		||||
      }
 | 
			
		||||
      LinearCombTimer.Stop();
 | 
			
		||||
      LinalgTimer.Stop();
 | 
			
		||||
 | 
			
		||||
      std::cout << GridLogIterative << "ConjugateGradient: Iteration " << k
 | 
			
		||||
                << " residual " << cp << " target " << rsq << std::endl;
 | 
			
		||||
      std::cout << GridLogDebug << "a = "<< a << " b_pred = "<< b_pred << "  b = "<< b << std::endl;
 | 
			
		||||
      std::cout << GridLogDebug << "qq = "<< qq << " d = "<< d << "  c = "<< c << std::endl;
 | 
			
		||||
                << " residual^2 " << sqrt(cp/ssq) << " target " << Tolerance << std::endl;
 | 
			
		||||
 | 
			
		||||
      // Stopping condition
 | 
			
		||||
      if (cp <= rsq) {
 | 
			
		||||
@@ -148,6 +154,9 @@ class ConjugateGradient : public OperatorFunction<Field> {
 | 
			
		||||
	std::cout << GridLogMessage << "\tElapsed    " << SolverTimer.Elapsed() <<std::endl;
 | 
			
		||||
	std::cout << GridLogMessage << "\tMatrix     " << MatrixTimer.Elapsed() <<std::endl;
 | 
			
		||||
	std::cout << GridLogMessage << "\tLinalg     " << LinalgTimer.Elapsed() <<std::endl;
 | 
			
		||||
	std::cout << GridLogMessage << "\tInner      " << InnerTimer.Elapsed() <<std::endl;
 | 
			
		||||
	std::cout << GridLogMessage << "\tAxpyNorm   " << AxpyNormTimer.Elapsed() <<std::endl;
 | 
			
		||||
	std::cout << GridLogMessage << "\tLinearComb " << LinearCombTimer.Elapsed() <<std::endl;
 | 
			
		||||
 | 
			
		||||
        if (ErrorOnNoConverge) assert(true_residual / Tolerance < 10000.0);
 | 
			
		||||
 | 
			
		||||
@@ -43,6 +43,7 @@ namespace Grid {
 | 
			
		||||
public:                                                
 | 
			
		||||
    RealD   Tolerance;
 | 
			
		||||
    Integer MaxIterations;
 | 
			
		||||
    Integer IterationsToComplete; //Number of iterations the CG took to finish. Filled in upon completion
 | 
			
		||||
    int verbose;
 | 
			
		||||
    MultiShiftFunction shifts;
 | 
			
		||||
 | 
			
		||||
@@ -164,6 +165,15 @@ void operator() (LinearOperatorBase<Field> &Linop, const Field &src, std::vector
 | 
			
		||||
    axpby(psi[s],0.,-bs[s]*alpha[s],src,src);
 | 
			
		||||
  }
 | 
			
		||||
 
 | 
			
		||||
  ///////////////////////////////////////
 | 
			
		||||
  // Timers
 | 
			
		||||
  ///////////////////////////////////////
 | 
			
		||||
  GridStopWatch AXPYTimer;
 | 
			
		||||
  GridStopWatch ShiftTimer;
 | 
			
		||||
  GridStopWatch QRTimer;
 | 
			
		||||
  GridStopWatch MatrixTimer;
 | 
			
		||||
  GridStopWatch SolverTimer;
 | 
			
		||||
  SolverTimer.Start();
 | 
			
		||||
  
 | 
			
		||||
  // Iteration loop
 | 
			
		||||
  int k;
 | 
			
		||||
@@ -171,7 +181,9 @@ void operator() (LinearOperatorBase<Field> &Linop, const Field &src, std::vector
 | 
			
		||||
  for (k=1;k<=MaxIterations;k++){
 | 
			
		||||
    
 | 
			
		||||
    a = c /cp;
 | 
			
		||||
    AXPYTimer.Start();
 | 
			
		||||
    axpy(p,a,p,r);
 | 
			
		||||
    AXPYTimer.Stop();
 | 
			
		||||
    
 | 
			
		||||
    // Note to self - direction ps is iterated seperately
 | 
			
		||||
    // for each shift. Does not appear to have any scope
 | 
			
		||||
@@ -180,6 +192,7 @@ void operator() (LinearOperatorBase<Field> &Linop, const Field &src, std::vector
 | 
			
		||||
    // However SAME r is used. Could load "r" and update
 | 
			
		||||
    // ALL ps[s]. 2/3 Bandwidth saving
 | 
			
		||||
    // New Kernel: Load r, vector of coeffs, vector of pointers ps
 | 
			
		||||
    AXPYTimer.Start();
 | 
			
		||||
    for(int s=0;s<nshift;s++){
 | 
			
		||||
      if ( ! converged[s] ) { 
 | 
			
		||||
	if (s==0){
 | 
			
		||||
@@ -190,22 +203,34 @@ void operator() (LinearOperatorBase<Field> &Linop, const Field &src, std::vector
 | 
			
		||||
	}
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
    AXPYTimer.Stop();
 | 
			
		||||
    
 | 
			
		||||
    cp=c;
 | 
			
		||||
    MatrixTimer.Start();  
 | 
			
		||||
    //Linop.HermOpAndNorm(p,mmp,d,qq); // d is used
 | 
			
		||||
    // The below is faster on KNL
 | 
			
		||||
    Linop.HermOp(p,mmp); 
 | 
			
		||||
    d=real(innerProduct(p,mmp));
 | 
			
		||||
    
 | 
			
		||||
    Linop.HermOpAndNorm(p,mmp,d,qq);
 | 
			
		||||
    MatrixTimer.Stop();  
 | 
			
		||||
 | 
			
		||||
    AXPYTimer.Start();
 | 
			
		||||
    axpy(mmp,mass[0],p,mmp);
 | 
			
		||||
    AXPYTimer.Stop();
 | 
			
		||||
    RealD rn = norm2(p);
 | 
			
		||||
    d += rn*mass[0];
 | 
			
		||||
    
 | 
			
		||||
    bp=b;
 | 
			
		||||
    b=-cp/d;
 | 
			
		||||
    
 | 
			
		||||
    AXPYTimer.Start();
 | 
			
		||||
    c=axpy_norm(r,b,mmp,r);
 | 
			
		||||
    AXPYTimer.Stop();
 | 
			
		||||
 | 
			
		||||
    // Toggle the recurrence history
 | 
			
		||||
    bs[0] = b;
 | 
			
		||||
    iz = 1-iz;
 | 
			
		||||
    ShiftTimer.Start();
 | 
			
		||||
    for(int s=1;s<nshift;s++){
 | 
			
		||||
      if((!converged[s])){
 | 
			
		||||
	RealD z0 = z[s][1-iz];
 | 
			
		||||
@@ -215,6 +240,7 @@ void operator() (LinearOperatorBase<Field> &Linop, const Field &src, std::vector
 | 
			
		||||
	bs[s] = b*z[s][iz]/z0; // NB sign  rel to Mike
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
    ShiftTimer.Stop();
 | 
			
		||||
    
 | 
			
		||||
    for(int s=0;s<nshift;s++){
 | 
			
		||||
      int ss = s;
 | 
			
		||||
@@ -257,6 +283,9 @@ void operator() (LinearOperatorBase<Field> &Linop, const Field &src, std::vector
 | 
			
		||||
    
 | 
			
		||||
    if ( all_converged ){
 | 
			
		||||
 | 
			
		||||
    SolverTimer.Stop();
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
      std::cout<<GridLogMessage<< "CGMultiShift: All shifts have converged iteration "<<k<<std::endl;
 | 
			
		||||
      std::cout<<GridLogMessage<< "CGMultiShift: Checking solutions"<<std::endl;
 | 
			
		||||
      
 | 
			
		||||
@@ -269,8 +298,19 @@ void operator() (LinearOperatorBase<Field> &Linop, const Field &src, std::vector
 | 
			
		||||
	RealD cn = norm2(src);
 | 
			
		||||
	std::cout<<GridLogMessage<<"CGMultiShift: shift["<<s<<"] true residual "<<std::sqrt(rn/cn)<<std::endl;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      std::cout << GridLogMessage << "Time Breakdown "<<std::endl;
 | 
			
		||||
      std::cout << GridLogMessage << "\tElapsed    " << SolverTimer.Elapsed()     <<std::endl;
 | 
			
		||||
      std::cout << GridLogMessage << "\tAXPY    " << AXPYTimer.Elapsed()     <<std::endl;
 | 
			
		||||
      std::cout << GridLogMessage << "\tMarix    " << MatrixTimer.Elapsed()     <<std::endl;
 | 
			
		||||
      std::cout << GridLogMessage << "\tShift    " << ShiftTimer.Elapsed()     <<std::endl;
 | 
			
		||||
 | 
			
		||||
      IterationsToComplete = k;	
 | 
			
		||||
 | 
			
		||||
      return;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
   
 | 
			
		||||
  }
 | 
			
		||||
  // ugly hack
 | 
			
		||||
  std::cout<<GridLogMessage<<"CG multi shift did not converge"<<std::endl;
 | 
			
		||||
							
								
								
									
										256
									
								
								Grid/algorithms/iterative/ConjugateGradientReliableUpdate.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										256
									
								
								Grid/algorithms/iterative/ConjugateGradientReliableUpdate.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,256 @@
 | 
			
		||||
    /*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
    Source file: ./lib/algorithms/iterative/ConjugateGradientReliableUpdate.h
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Christopher Kelly <ckelly@phys.columbia.edu>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#ifndef GRID_CONJUGATE_GRADIENT_RELIABLE_UPDATE_H
 | 
			
		||||
#define GRID_CONJUGATE_GRADIENT_RELIABLE_UPDATE_H
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
  template<class FieldD,class FieldF, typename std::enable_if< getPrecision<FieldD>::value == 2, int>::type = 0,typename std::enable_if< getPrecision<FieldF>::value == 1, int>::type = 0> 
 | 
			
		||||
  class ConjugateGradientReliableUpdate : public LinearFunction<FieldD> {
 | 
			
		||||
  public:
 | 
			
		||||
    bool ErrorOnNoConverge;  // throw an assert when the CG fails to converge.
 | 
			
		||||
    // Defaults true.
 | 
			
		||||
    RealD Tolerance;
 | 
			
		||||
    Integer MaxIterations;
 | 
			
		||||
    Integer IterationsToComplete; //Number of iterations the CG took to finish. Filled in upon completion
 | 
			
		||||
    Integer ReliableUpdatesPerformed;
 | 
			
		||||
 | 
			
		||||
    bool DoFinalCleanup; //Final DP cleanup, defaults to true
 | 
			
		||||
    Integer IterationsToCleanup; //Final DP cleanup step iterations
 | 
			
		||||
    
 | 
			
		||||
    LinearOperatorBase<FieldF> &Linop_f;
 | 
			
		||||
    LinearOperatorBase<FieldD> &Linop_d;
 | 
			
		||||
    GridBase* SinglePrecGrid;
 | 
			
		||||
    RealD Delta; //reliable update parameter
 | 
			
		||||
 | 
			
		||||
    //Optional ability to switch to a different linear operator once the tolerance reaches a certain point. Useful for single/half -> single/single
 | 
			
		||||
    LinearOperatorBase<FieldF> *Linop_fallback;
 | 
			
		||||
    RealD fallback_transition_tol;
 | 
			
		||||
 | 
			
		||||
    
 | 
			
		||||
    ConjugateGradientReliableUpdate(RealD tol, Integer maxit, RealD _delta, GridBase* _sp_grid, LinearOperatorBase<FieldF> &_Linop_f, LinearOperatorBase<FieldD> &_Linop_d, bool err_on_no_conv = true)
 | 
			
		||||
      : Tolerance(tol),
 | 
			
		||||
        MaxIterations(maxit),
 | 
			
		||||
	Delta(_delta),
 | 
			
		||||
	Linop_f(_Linop_f),
 | 
			
		||||
	Linop_d(_Linop_d),
 | 
			
		||||
	SinglePrecGrid(_sp_grid),
 | 
			
		||||
        ErrorOnNoConverge(err_on_no_conv),
 | 
			
		||||
	DoFinalCleanup(true),
 | 
			
		||||
	Linop_fallback(NULL)
 | 
			
		||||
    {};
 | 
			
		||||
 | 
			
		||||
    void setFallbackLinop(LinearOperatorBase<FieldF> &_Linop_fallback, const RealD _fallback_transition_tol){
 | 
			
		||||
      Linop_fallback = &_Linop_fallback;
 | 
			
		||||
      fallback_transition_tol = _fallback_transition_tol;      
 | 
			
		||||
    }
 | 
			
		||||
    
 | 
			
		||||
    void operator()(const FieldD &src, FieldD &psi) {
 | 
			
		||||
      LinearOperatorBase<FieldF> *Linop_f_use = &Linop_f;
 | 
			
		||||
      bool using_fallback = false;
 | 
			
		||||
      
 | 
			
		||||
      psi.checkerboard = src.checkerboard;
 | 
			
		||||
      conformable(psi, src);
 | 
			
		||||
 | 
			
		||||
      RealD cp, c, a, d, b, ssq, qq, b_pred;
 | 
			
		||||
 | 
			
		||||
      FieldD p(src);
 | 
			
		||||
      FieldD mmp(src);
 | 
			
		||||
      FieldD r(src);
 | 
			
		||||
 | 
			
		||||
      // Initial residual computation & set up
 | 
			
		||||
      RealD guess = norm2(psi);
 | 
			
		||||
      assert(std::isnan(guess) == 0);
 | 
			
		||||
    
 | 
			
		||||
      Linop_d.HermOpAndNorm(psi, mmp, d, b);
 | 
			
		||||
    
 | 
			
		||||
      r = src - mmp;
 | 
			
		||||
      p = r;
 | 
			
		||||
 | 
			
		||||
      a = norm2(p);
 | 
			
		||||
      cp = a;
 | 
			
		||||
      ssq = norm2(src);
 | 
			
		||||
 | 
			
		||||
      std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradientReliableUpdate: guess " << guess << std::endl;
 | 
			
		||||
      std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradientReliableUpdate:   src " << ssq << std::endl;
 | 
			
		||||
      std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradientReliableUpdate:    mp " << d << std::endl;
 | 
			
		||||
      std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradientReliableUpdate:   mmp " << b << std::endl;
 | 
			
		||||
      std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradientReliableUpdate:  cp,r " << cp << std::endl;
 | 
			
		||||
      std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradientReliableUpdate:     p " << a << std::endl;
 | 
			
		||||
 | 
			
		||||
      RealD rsq = Tolerance * Tolerance * ssq;
 | 
			
		||||
 | 
			
		||||
      // Check if guess is really REALLY good :)
 | 
			
		||||
      if (cp <= rsq) {
 | 
			
		||||
	std::cout << GridLogMessage << "ConjugateGradientReliableUpdate guess was REALLY good\n";
 | 
			
		||||
	std::cout << GridLogMessage << "\tComputed residual " << sqrt(cp / ssq)<<std::endl;
 | 
			
		||||
	return;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      //Single prec initialization
 | 
			
		||||
      FieldF r_f(SinglePrecGrid);
 | 
			
		||||
      r_f.checkerboard = r.checkerboard;
 | 
			
		||||
      precisionChange(r_f, r);
 | 
			
		||||
 | 
			
		||||
      FieldF psi_f(r_f);
 | 
			
		||||
      psi_f = zero;
 | 
			
		||||
 | 
			
		||||
      FieldF p_f(r_f);
 | 
			
		||||
      FieldF mmp_f(r_f);
 | 
			
		||||
 | 
			
		||||
      RealD MaxResidSinceLastRelUp = cp; //initial residual    
 | 
			
		||||
    
 | 
			
		||||
      std::cout << GridLogIterative << std::setprecision(4)
 | 
			
		||||
		<< "ConjugateGradient: k=0 residual " << cp << " target " << rsq << std::endl;
 | 
			
		||||
 | 
			
		||||
      GridStopWatch LinalgTimer;
 | 
			
		||||
      GridStopWatch MatrixTimer;
 | 
			
		||||
      GridStopWatch SolverTimer;
 | 
			
		||||
 | 
			
		||||
      SolverTimer.Start();
 | 
			
		||||
      int k = 0;
 | 
			
		||||
      int l = 0;
 | 
			
		||||
    
 | 
			
		||||
      for (k = 1; k <= MaxIterations; k++) {
 | 
			
		||||
	c = cp;
 | 
			
		||||
 | 
			
		||||
	MatrixTimer.Start();
 | 
			
		||||
	Linop_f_use->HermOpAndNorm(p_f, mmp_f, d, qq);
 | 
			
		||||
	MatrixTimer.Stop();
 | 
			
		||||
 | 
			
		||||
	LinalgTimer.Start();
 | 
			
		||||
 | 
			
		||||
	a = c / d;
 | 
			
		||||
	b_pred = a * (a * qq - d) / c;
 | 
			
		||||
 | 
			
		||||
	cp = axpy_norm(r_f, -a, mmp_f, r_f);
 | 
			
		||||
	b = cp / c;
 | 
			
		||||
 | 
			
		||||
	// Fuse these loops ; should be really easy
 | 
			
		||||
	psi_f = a * p_f + psi_f;
 | 
			
		||||
	//p_f = p_f * b + r_f;
 | 
			
		||||
 | 
			
		||||
	LinalgTimer.Stop();
 | 
			
		||||
 | 
			
		||||
	std::cout << GridLogIterative << "ConjugateGradientReliableUpdate: Iteration " << k
 | 
			
		||||
		  << " residual " << cp << " target " << rsq << std::endl;
 | 
			
		||||
	std::cout << GridLogDebug << "a = "<< a << " b_pred = "<< b_pred << "  b = "<< b << std::endl;
 | 
			
		||||
	std::cout << GridLogDebug << "qq = "<< qq << " d = "<< d << "  c = "<< c << std::endl;
 | 
			
		||||
 | 
			
		||||
	if(cp > MaxResidSinceLastRelUp){
 | 
			
		||||
	  std::cout << GridLogIterative << "ConjugateGradientReliableUpdate: updating MaxResidSinceLastRelUp : " << MaxResidSinceLastRelUp << " -> " << cp << std::endl;
 | 
			
		||||
	  MaxResidSinceLastRelUp = cp;
 | 
			
		||||
	}
 | 
			
		||||
	  
 | 
			
		||||
	// Stopping condition
 | 
			
		||||
	if (cp <= rsq) {
 | 
			
		||||
	  //Although not written in the paper, I assume that I have to add on the final solution
 | 
			
		||||
	  precisionChange(mmp, psi_f);
 | 
			
		||||
	  psi = psi + mmp;
 | 
			
		||||
	
 | 
			
		||||
	
 | 
			
		||||
	  SolverTimer.Stop();
 | 
			
		||||
	  Linop_d.HermOpAndNorm(psi, mmp, d, qq);
 | 
			
		||||
	  p = mmp - src;
 | 
			
		||||
 | 
			
		||||
	  RealD srcnorm = sqrt(norm2(src));
 | 
			
		||||
	  RealD resnorm = sqrt(norm2(p));
 | 
			
		||||
	  RealD true_residual = resnorm / srcnorm;
 | 
			
		||||
 | 
			
		||||
	  std::cout << GridLogMessage << "ConjugateGradientReliableUpdate Converged on iteration " << k << " after " << l << " reliable updates" << std::endl;
 | 
			
		||||
	  std::cout << GridLogMessage << "\tComputed residual " << sqrt(cp / ssq)<<std::endl;
 | 
			
		||||
	  std::cout << GridLogMessage << "\tTrue residual " << true_residual<<std::endl;
 | 
			
		||||
	  std::cout << GridLogMessage << "\tTarget " << Tolerance << std::endl;
 | 
			
		||||
 | 
			
		||||
	  std::cout << GridLogMessage << "Time breakdown "<<std::endl;
 | 
			
		||||
	  std::cout << GridLogMessage << "\tElapsed    " << SolverTimer.Elapsed() <<std::endl;
 | 
			
		||||
	  std::cout << GridLogMessage << "\tMatrix     " << MatrixTimer.Elapsed() <<std::endl;
 | 
			
		||||
	  std::cout << GridLogMessage << "\tLinalg     " << LinalgTimer.Elapsed() <<std::endl;
 | 
			
		||||
 | 
			
		||||
	  IterationsToComplete = k;	
 | 
			
		||||
	  ReliableUpdatesPerformed = l;
 | 
			
		||||
	  
 | 
			
		||||
	  if(DoFinalCleanup){
 | 
			
		||||
	    //Do a final CG to cleanup
 | 
			
		||||
	    std::cout << GridLogMessage << "ConjugateGradientReliableUpdate performing final cleanup.\n";
 | 
			
		||||
	    ConjugateGradient<FieldD> CG(Tolerance,MaxIterations);
 | 
			
		||||
	    CG.ErrorOnNoConverge = ErrorOnNoConverge;
 | 
			
		||||
	    CG(Linop_d,src,psi);
 | 
			
		||||
	    IterationsToCleanup = CG.IterationsToComplete;
 | 
			
		||||
	  }
 | 
			
		||||
	  else if (ErrorOnNoConverge) assert(true_residual / Tolerance < 10000.0);
 | 
			
		||||
 | 
			
		||||
	  std::cout << GridLogMessage << "ConjugateGradientReliableUpdate complete.\n";
 | 
			
		||||
	  return;
 | 
			
		||||
	}
 | 
			
		||||
	else if(cp < Delta * MaxResidSinceLastRelUp) { //reliable update
 | 
			
		||||
	  std::cout << GridLogMessage << "ConjugateGradientReliableUpdate "
 | 
			
		||||
		    << cp << "(residual) < " << Delta << "(Delta) * " << MaxResidSinceLastRelUp << "(MaxResidSinceLastRelUp) on iteration " << k << " : performing reliable update\n";
 | 
			
		||||
	  precisionChange(mmp, psi_f);
 | 
			
		||||
	  psi = psi + mmp;
 | 
			
		||||
 | 
			
		||||
	  Linop_d.HermOpAndNorm(psi, mmp, d, qq);
 | 
			
		||||
	  r = src - mmp;
 | 
			
		||||
 | 
			
		||||
	  psi_f = zero;
 | 
			
		||||
	  precisionChange(r_f, r);
 | 
			
		||||
	  cp = norm2(r);
 | 
			
		||||
	  MaxResidSinceLastRelUp = cp;
 | 
			
		||||
 | 
			
		||||
	  b = cp/c;
 | 
			
		||||
	  
 | 
			
		||||
	  std::cout << GridLogMessage << "ConjugateGradientReliableUpdate new residual " << cp << std::endl;
 | 
			
		||||
	  
 | 
			
		||||
	  l = l+1;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	p_f = p_f * b + r_f; //update search vector after reliable update appears to help convergence
 | 
			
		||||
 | 
			
		||||
	if(!using_fallback && Linop_fallback != NULL && cp < fallback_transition_tol){
 | 
			
		||||
	  std::cout << GridLogMessage << "ConjugateGradientReliableUpdate switching to fallback linear operator on iteration " << k << " at residual " << cp << std::endl;
 | 
			
		||||
	  Linop_f_use = Linop_fallback;
 | 
			
		||||
	  using_fallback = true;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	
 | 
			
		||||
      }
 | 
			
		||||
      std::cout << GridLogMessage << "ConjugateGradientReliableUpdate did NOT converge"
 | 
			
		||||
		<< std::endl;
 | 
			
		||||
      
 | 
			
		||||
      if (ErrorOnNoConverge) assert(0);
 | 
			
		||||
      IterationsToComplete = k;
 | 
			
		||||
      ReliableUpdatesPerformed = l;      
 | 
			
		||||
    }    
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
							
								
								
									
										104
									
								
								Grid/algorithms/iterative/Deflation.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										104
									
								
								Grid/algorithms/iterative/Deflation.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,104 @@
 | 
			
		||||
    /*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
    Source file: ./lib/algorithms/iterative/ImplicitlyRestartedLanczos.h
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#ifndef GRID_DEFLATION_H
 | 
			
		||||
#define GRID_DEFLATION_H
 | 
			
		||||
 | 
			
		||||
namespace Grid { 
 | 
			
		||||
 | 
			
		||||
template<class Field>
 | 
			
		||||
class ZeroGuesser: public LinearFunction<Field> {
 | 
			
		||||
public:
 | 
			
		||||
  virtual void operator()(const Field &src, Field &guess) { guess = zero; };
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template<class Field>
 | 
			
		||||
class SourceGuesser: public LinearFunction<Field> {
 | 
			
		||||
public:
 | 
			
		||||
  virtual void operator()(const Field &src, Field &guess) { guess = src; };
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
////////////////////////////////
 | 
			
		||||
// Fine grid deflation
 | 
			
		||||
////////////////////////////////
 | 
			
		||||
template<class Field>
 | 
			
		||||
class DeflatedGuesser: public LinearFunction<Field> {
 | 
			
		||||
private:
 | 
			
		||||
  const std::vector<Field> &evec;
 | 
			
		||||
  const std::vector<RealD> &eval;
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
 | 
			
		||||
  DeflatedGuesser(const std::vector<Field> & _evec,const std::vector<RealD> & _eval) : evec(_evec), eval(_eval) {};
 | 
			
		||||
 | 
			
		||||
  virtual void operator()(const Field &src,Field &guess) {
 | 
			
		||||
    guess = zero;
 | 
			
		||||
    assert(evec.size()==eval.size());
 | 
			
		||||
    auto N = evec.size();
 | 
			
		||||
    for (int i=0;i<N;i++) {
 | 
			
		||||
      const Field& tmp = evec[i];
 | 
			
		||||
      axpy(guess,TensorRemove(innerProduct(tmp,src)) / eval[i],tmp,guess);
 | 
			
		||||
    }
 | 
			
		||||
    guess.checkerboard = src.checkerboard;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template<class FineField, class CoarseField>
 | 
			
		||||
class LocalCoherenceDeflatedGuesser: public LinearFunction<FineField> {
 | 
			
		||||
private:
 | 
			
		||||
  const std::vector<FineField>   &subspace;
 | 
			
		||||
  const std::vector<CoarseField> &evec_coarse;
 | 
			
		||||
  const std::vector<RealD>       &eval_coarse;
 | 
			
		||||
public:
 | 
			
		||||
  
 | 
			
		||||
  LocalCoherenceDeflatedGuesser(const std::vector<FineField>   &_subspace,
 | 
			
		||||
				const std::vector<CoarseField> &_evec_coarse,
 | 
			
		||||
				const std::vector<RealD>       &_eval_coarse)
 | 
			
		||||
    : subspace(_subspace), 
 | 
			
		||||
      evec_coarse(_evec_coarse), 
 | 
			
		||||
      eval_coarse(_eval_coarse)  
 | 
			
		||||
  {
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  void operator()(const FineField &src,FineField &guess) { 
 | 
			
		||||
    int N = (int)evec_coarse.size();
 | 
			
		||||
    CoarseField src_coarse(evec_coarse[0]._grid);
 | 
			
		||||
    CoarseField guess_coarse(evec_coarse[0]._grid);    guess_coarse = zero;
 | 
			
		||||
    blockProject(src_coarse,src,subspace);    
 | 
			
		||||
    for (int i=0;i<N;i++) {
 | 
			
		||||
      const CoarseField & tmp = evec_coarse[i];
 | 
			
		||||
      axpy(guess_coarse,TensorRemove(innerProduct(tmp,src_coarse)) / eval_coarse[i],tmp,guess_coarse);
 | 
			
		||||
    }
 | 
			
		||||
    blockPromote(guess_coarse,guess,subspace);
 | 
			
		||||
    guess.checkerboard = src.checkerboard;
 | 
			
		||||
  };
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
@@ -0,0 +1,256 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/algorithms/iterative/FlexibleCommunicationAvoidingGeneralisedMinimalResidual.h
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Daniel Richtmann <daniel.richtmann@ur.de>
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#ifndef GRID_FLEXIBLE_COMMUNICATION_AVOIDING_GENERALISED_MINIMAL_RESIDUAL_H
 | 
			
		||||
#define GRID_FLEXIBLE_COMMUNICATION_AVOIDING_GENERALISED_MINIMAL_RESIDUAL_H
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
template<class Field>
 | 
			
		||||
class FlexibleCommunicationAvoidingGeneralisedMinimalResidual : public OperatorFunction<Field> {
 | 
			
		||||
 public:
 | 
			
		||||
  bool ErrorOnNoConverge; // Throw an assert when FCAGMRES fails to converge,
 | 
			
		||||
                          // defaults to true
 | 
			
		||||
 | 
			
		||||
  RealD   Tolerance;
 | 
			
		||||
 | 
			
		||||
  Integer MaxIterations;
 | 
			
		||||
  Integer RestartLength;
 | 
			
		||||
  Integer MaxNumberOfRestarts;
 | 
			
		||||
  Integer IterationCount; // Number of iterations the FCAGMRES took to finish,
 | 
			
		||||
                          // filled in upon completion
 | 
			
		||||
 | 
			
		||||
  GridStopWatch MatrixTimer;
 | 
			
		||||
  GridStopWatch PrecTimer;
 | 
			
		||||
  GridStopWatch LinalgTimer;
 | 
			
		||||
  GridStopWatch QrTimer;
 | 
			
		||||
  GridStopWatch CompSolutionTimer;
 | 
			
		||||
 | 
			
		||||
  Eigen::MatrixXcd H;
 | 
			
		||||
 | 
			
		||||
  std::vector<std::complex<double>> y;
 | 
			
		||||
  std::vector<std::complex<double>> gamma;
 | 
			
		||||
  std::vector<std::complex<double>> c;
 | 
			
		||||
  std::vector<std::complex<double>> s;
 | 
			
		||||
 | 
			
		||||
  LinearFunction<Field> &Preconditioner;
 | 
			
		||||
 | 
			
		||||
  FlexibleCommunicationAvoidingGeneralisedMinimalResidual(RealD   tol,
 | 
			
		||||
                                                          Integer maxit,
 | 
			
		||||
                                                          LinearFunction<Field> &Prec,
 | 
			
		||||
                                                          Integer restart_length,
 | 
			
		||||
                                                          bool    err_on_no_conv = true)
 | 
			
		||||
      : Tolerance(tol)
 | 
			
		||||
      , MaxIterations(maxit)
 | 
			
		||||
      , RestartLength(restart_length)
 | 
			
		||||
      , MaxNumberOfRestarts(MaxIterations/RestartLength + ((MaxIterations%RestartLength == 0) ? 0 : 1))
 | 
			
		||||
      , ErrorOnNoConverge(err_on_no_conv)
 | 
			
		||||
      , H(Eigen::MatrixXcd::Zero(RestartLength, RestartLength + 1)) // sizes taken from DD-αAMG code base
 | 
			
		||||
      , y(RestartLength + 1, 0.)
 | 
			
		||||
      , gamma(RestartLength + 1, 0.)
 | 
			
		||||
      , c(RestartLength + 1, 0.)
 | 
			
		||||
      , s(RestartLength + 1, 0.)
 | 
			
		||||
      , Preconditioner(Prec) {};
 | 
			
		||||
 | 
			
		||||
  void operator()(LinearOperatorBase<Field> &LinOp, const Field &src, Field &psi) {
 | 
			
		||||
 | 
			
		||||
    std::cout << GridLogWarning << "This algorithm currently doesn't differ from regular FGMRES" << std::endl;
 | 
			
		||||
 | 
			
		||||
    psi.checkerboard = src.checkerboard;
 | 
			
		||||
    conformable(psi, src);
 | 
			
		||||
 | 
			
		||||
    RealD guess = norm2(psi);
 | 
			
		||||
    assert(std::isnan(guess) == 0);
 | 
			
		||||
 | 
			
		||||
    RealD cp;
 | 
			
		||||
    RealD ssq = norm2(src);
 | 
			
		||||
    RealD rsq = Tolerance * Tolerance * ssq;
 | 
			
		||||
 | 
			
		||||
    Field r(src._grid);
 | 
			
		||||
 | 
			
		||||
    std::cout << std::setprecision(4) << std::scientific;
 | 
			
		||||
    std::cout << GridLogIterative << "FlexibleCommunicationAvoidingGeneralisedMinimalResidual: guess " << guess << std::endl;
 | 
			
		||||
    std::cout << GridLogIterative << "FlexibleCommunicationAvoidingGeneralisedMinimalResidual:   src " << ssq   << std::endl;
 | 
			
		||||
 | 
			
		||||
    PrecTimer.Reset();
 | 
			
		||||
    MatrixTimer.Reset();
 | 
			
		||||
    LinalgTimer.Reset();
 | 
			
		||||
    QrTimer.Reset();
 | 
			
		||||
    CompSolutionTimer.Reset();
 | 
			
		||||
 | 
			
		||||
    GridStopWatch SolverTimer;
 | 
			
		||||
    SolverTimer.Start();
 | 
			
		||||
 | 
			
		||||
    IterationCount = 0;
 | 
			
		||||
 | 
			
		||||
    for (int k=0; k<MaxNumberOfRestarts; k++) {
 | 
			
		||||
 | 
			
		||||
      cp = outerLoopBody(LinOp, src, psi, rsq);
 | 
			
		||||
 | 
			
		||||
      // Stopping condition
 | 
			
		||||
      if (cp <= rsq) {
 | 
			
		||||
 | 
			
		||||
        SolverTimer.Stop();
 | 
			
		||||
 | 
			
		||||
        LinOp.Op(psi,r);
 | 
			
		||||
        axpy(r,-1.0,src,r);
 | 
			
		||||
 | 
			
		||||
        RealD srcnorm       = sqrt(ssq);
 | 
			
		||||
        RealD resnorm       = sqrt(norm2(r));
 | 
			
		||||
        RealD true_residual = resnorm / srcnorm;
 | 
			
		||||
 | 
			
		||||
        std::cout << GridLogMessage        << "FlexibleCommunicationAvoidingGeneralisedMinimalResidual: Converged on iteration " << IterationCount
 | 
			
		||||
                  << " computed residual " << sqrt(cp / ssq)
 | 
			
		||||
                  << " true residual "     << true_residual
 | 
			
		||||
                  << " target "            << Tolerance << std::endl;
 | 
			
		||||
 | 
			
		||||
        std::cout << GridLogMessage << "FCAGMRES Time elapsed: Total   " <<       SolverTimer.Elapsed() << std::endl;
 | 
			
		||||
        std::cout << GridLogMessage << "FCAGMRES Time elapsed: Precon  " <<         PrecTimer.Elapsed() << std::endl;
 | 
			
		||||
        std::cout << GridLogMessage << "FCAGMRES Time elapsed: Matrix  " <<       MatrixTimer.Elapsed() << std::endl;
 | 
			
		||||
        std::cout << GridLogMessage << "FCAGMRES Time elapsed: Linalg  " <<       LinalgTimer.Elapsed() << std::endl;
 | 
			
		||||
        std::cout << GridLogMessage << "FCAGMRES Time elapsed: QR      " <<           QrTimer.Elapsed() << std::endl;
 | 
			
		||||
        std::cout << GridLogMessage << "FCAGMRES Time elapsed: CompSol " << CompSolutionTimer.Elapsed() << std::endl;
 | 
			
		||||
        return;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    std::cout << GridLogMessage << "FlexibleCommunicationAvoidingGeneralisedMinimalResidual did NOT converge" << std::endl;
 | 
			
		||||
 | 
			
		||||
    if (ErrorOnNoConverge)
 | 
			
		||||
      assert(0);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  RealD outerLoopBody(LinearOperatorBase<Field> &LinOp, const Field &src, Field &psi, RealD rsq) {
 | 
			
		||||
 | 
			
		||||
    RealD cp = 0;
 | 
			
		||||
 | 
			
		||||
    Field w(src._grid);
 | 
			
		||||
    Field r(src._grid);
 | 
			
		||||
 | 
			
		||||
    // these should probably be made class members so that they are only allocated once, not in every restart
 | 
			
		||||
    std::vector<Field> v(RestartLength + 1, src._grid); for (auto &elem : v) elem = zero;
 | 
			
		||||
    std::vector<Field> z(RestartLength + 1, src._grid); for (auto &elem : z) elem = zero;
 | 
			
		||||
 | 
			
		||||
    MatrixTimer.Start();
 | 
			
		||||
    LinOp.Op(psi, w);
 | 
			
		||||
    MatrixTimer.Stop();
 | 
			
		||||
 | 
			
		||||
    LinalgTimer.Start();
 | 
			
		||||
    r = src - w;
 | 
			
		||||
 | 
			
		||||
    gamma[0] = sqrt(norm2(r));
 | 
			
		||||
 | 
			
		||||
    v[0] = (1. / gamma[0]) * r;
 | 
			
		||||
    LinalgTimer.Stop();
 | 
			
		||||
 | 
			
		||||
    for (int i=0; i<RestartLength; i++) {
 | 
			
		||||
 | 
			
		||||
      IterationCount++;
 | 
			
		||||
 | 
			
		||||
      arnoldiStep(LinOp, v, z, w, i);
 | 
			
		||||
 | 
			
		||||
      qrUpdate(i);
 | 
			
		||||
 | 
			
		||||
      cp = std::norm(gamma[i+1]);
 | 
			
		||||
 | 
			
		||||
      std::cout << GridLogIterative << "FlexibleCommunicationAvoidingGeneralisedMinimalResidual: Iteration " << IterationCount
 | 
			
		||||
                << " residual " << cp << " target " << rsq << std::endl;
 | 
			
		||||
 | 
			
		||||
      if ((i == RestartLength - 1) || (IterationCount == MaxIterations) || (cp <= rsq)) {
 | 
			
		||||
 | 
			
		||||
        computeSolution(z, psi, i);
 | 
			
		||||
 | 
			
		||||
        return cp;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    assert(0); // Never reached
 | 
			
		||||
    return cp;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void arnoldiStep(LinearOperatorBase<Field> &LinOp, std::vector<Field> &v, std::vector<Field> &z, Field &w, int iter) {
 | 
			
		||||
 | 
			
		||||
    PrecTimer.Start();
 | 
			
		||||
    Preconditioner(v[iter], z[iter]);
 | 
			
		||||
    PrecTimer.Stop();
 | 
			
		||||
 | 
			
		||||
    MatrixTimer.Start();
 | 
			
		||||
    LinOp.Op(z[iter], w);
 | 
			
		||||
    MatrixTimer.Stop();
 | 
			
		||||
 | 
			
		||||
    LinalgTimer.Start();
 | 
			
		||||
    for (int i = 0; i <= iter; ++i) {
 | 
			
		||||
      H(iter, i) = innerProduct(v[i], w);
 | 
			
		||||
      w = w - H(iter, i) * v[i];
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    H(iter, iter + 1) = sqrt(norm2(w));
 | 
			
		||||
    v[iter + 1] = (1. / H(iter, iter + 1)) * w;
 | 
			
		||||
    LinalgTimer.Stop();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void qrUpdate(int iter) {
 | 
			
		||||
 | 
			
		||||
    QrTimer.Start();
 | 
			
		||||
    for (int i = 0; i < iter ; ++i) {
 | 
			
		||||
      auto tmp       = -s[i] * H(iter, i) + c[i] * H(iter, i + 1);
 | 
			
		||||
      H(iter, i)     = std::conj(c[i]) * H(iter, i) + std::conj(s[i]) * H(iter, i + 1);
 | 
			
		||||
      H(iter, i + 1) = tmp;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Compute new Givens Rotation
 | 
			
		||||
    ComplexD nu = sqrt(std::norm(H(iter, iter)) + std::norm(H(iter, iter + 1)));
 | 
			
		||||
    c[iter]     = H(iter, iter) / nu;
 | 
			
		||||
    s[iter]     = H(iter, iter + 1) / nu;
 | 
			
		||||
 | 
			
		||||
    // Apply new Givens rotation
 | 
			
		||||
    H(iter, iter)     = nu;
 | 
			
		||||
    H(iter, iter + 1) = 0.;
 | 
			
		||||
 | 
			
		||||
    gamma[iter + 1] = -s[iter] * gamma[iter];
 | 
			
		||||
    gamma[iter]     = std::conj(c[iter]) * gamma[iter];
 | 
			
		||||
    QrTimer.Stop();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void computeSolution(std::vector<Field> const &z, Field &psi, int iter) {
 | 
			
		||||
 | 
			
		||||
    CompSolutionTimer.Start();
 | 
			
		||||
    for (int i = iter; i >= 0; i--) {
 | 
			
		||||
      y[i] = gamma[i];
 | 
			
		||||
      for (int k = i + 1; k <= iter; k++)
 | 
			
		||||
        y[i] = y[i] - H(k, i) * y[k];
 | 
			
		||||
      y[i] = y[i] / H(i, i);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    for (int i = 0; i <= iter; i++)
 | 
			
		||||
      psi = psi + z[i] * y[i];
 | 
			
		||||
    CompSolutionTimer.Stop();
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
							
								
								
									
										254
									
								
								Grid/algorithms/iterative/FlexibleGeneralisedMinimalResidual.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										254
									
								
								Grid/algorithms/iterative/FlexibleGeneralisedMinimalResidual.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,254 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/algorithms/iterative/FlexibleGeneralisedMinimalResidual.h
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Daniel Richtmann <daniel.richtmann@ur.de>
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#ifndef GRID_FLEXIBLE_GENERALISED_MINIMAL_RESIDUAL_H
 | 
			
		||||
#define GRID_FLEXIBLE_GENERALISED_MINIMAL_RESIDUAL_H
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
template<class Field>
 | 
			
		||||
class FlexibleGeneralisedMinimalResidual : public OperatorFunction<Field> {
 | 
			
		||||
 public:
 | 
			
		||||
  bool ErrorOnNoConverge; // Throw an assert when FGMRES fails to converge,
 | 
			
		||||
                          // defaults to true
 | 
			
		||||
 | 
			
		||||
  RealD   Tolerance;
 | 
			
		||||
 | 
			
		||||
  Integer MaxIterations;
 | 
			
		||||
  Integer RestartLength;
 | 
			
		||||
  Integer MaxNumberOfRestarts;
 | 
			
		||||
  Integer IterationCount; // Number of iterations the FGMRES took to finish,
 | 
			
		||||
                          // filled in upon completion
 | 
			
		||||
 | 
			
		||||
  GridStopWatch MatrixTimer;
 | 
			
		||||
  GridStopWatch PrecTimer;
 | 
			
		||||
  GridStopWatch LinalgTimer;
 | 
			
		||||
  GridStopWatch QrTimer;
 | 
			
		||||
  GridStopWatch CompSolutionTimer;
 | 
			
		||||
 | 
			
		||||
  Eigen::MatrixXcd H;
 | 
			
		||||
 | 
			
		||||
  std::vector<std::complex<double>> y;
 | 
			
		||||
  std::vector<std::complex<double>> gamma;
 | 
			
		||||
  std::vector<std::complex<double>> c;
 | 
			
		||||
  std::vector<std::complex<double>> s;
 | 
			
		||||
 | 
			
		||||
  LinearFunction<Field> &Preconditioner;
 | 
			
		||||
 | 
			
		||||
  FlexibleGeneralisedMinimalResidual(RealD   tol,
 | 
			
		||||
                                     Integer maxit,
 | 
			
		||||
                                     LinearFunction<Field> &Prec,
 | 
			
		||||
                                     Integer restart_length,
 | 
			
		||||
                                     bool    err_on_no_conv = true)
 | 
			
		||||
      : Tolerance(tol)
 | 
			
		||||
      , MaxIterations(maxit)
 | 
			
		||||
      , RestartLength(restart_length)
 | 
			
		||||
      , MaxNumberOfRestarts(MaxIterations/RestartLength + ((MaxIterations%RestartLength == 0) ? 0 : 1))
 | 
			
		||||
      , ErrorOnNoConverge(err_on_no_conv)
 | 
			
		||||
      , H(Eigen::MatrixXcd::Zero(RestartLength, RestartLength + 1)) // sizes taken from DD-αAMG code base
 | 
			
		||||
      , y(RestartLength + 1, 0.)
 | 
			
		||||
      , gamma(RestartLength + 1, 0.)
 | 
			
		||||
      , c(RestartLength + 1, 0.)
 | 
			
		||||
      , s(RestartLength + 1, 0.)
 | 
			
		||||
      , Preconditioner(Prec) {};
 | 
			
		||||
 | 
			
		||||
  void operator()(LinearOperatorBase<Field> &LinOp, const Field &src, Field &psi) {
 | 
			
		||||
 | 
			
		||||
    psi.checkerboard = src.checkerboard;
 | 
			
		||||
    conformable(psi, src);
 | 
			
		||||
 | 
			
		||||
    RealD guess = norm2(psi);
 | 
			
		||||
    assert(std::isnan(guess) == 0);
 | 
			
		||||
 | 
			
		||||
    RealD cp;
 | 
			
		||||
    RealD ssq = norm2(src);
 | 
			
		||||
    RealD rsq = Tolerance * Tolerance * ssq;
 | 
			
		||||
 | 
			
		||||
    Field r(src._grid);
 | 
			
		||||
 | 
			
		||||
    std::cout << std::setprecision(4) << std::scientific;
 | 
			
		||||
    std::cout << GridLogIterative << "FlexibleGeneralisedMinimalResidual: guess " << guess << std::endl;
 | 
			
		||||
    std::cout << GridLogIterative << "FlexibleGeneralisedMinimalResidual:   src " << ssq   << std::endl;
 | 
			
		||||
 | 
			
		||||
    PrecTimer.Reset();
 | 
			
		||||
    MatrixTimer.Reset();
 | 
			
		||||
    LinalgTimer.Reset();
 | 
			
		||||
    QrTimer.Reset();
 | 
			
		||||
    CompSolutionTimer.Reset();
 | 
			
		||||
 | 
			
		||||
    GridStopWatch SolverTimer;
 | 
			
		||||
    SolverTimer.Start();
 | 
			
		||||
 | 
			
		||||
    IterationCount = 0;
 | 
			
		||||
 | 
			
		||||
    for (int k=0; k<MaxNumberOfRestarts; k++) {
 | 
			
		||||
 | 
			
		||||
      cp = outerLoopBody(LinOp, src, psi, rsq);
 | 
			
		||||
 | 
			
		||||
      // Stopping condition
 | 
			
		||||
      if (cp <= rsq) {
 | 
			
		||||
 | 
			
		||||
        SolverTimer.Stop();
 | 
			
		||||
 | 
			
		||||
        LinOp.Op(psi,r);
 | 
			
		||||
        axpy(r,-1.0,src,r);
 | 
			
		||||
 | 
			
		||||
        RealD srcnorm       = sqrt(ssq);
 | 
			
		||||
        RealD resnorm       = sqrt(norm2(r));
 | 
			
		||||
        RealD true_residual = resnorm / srcnorm;
 | 
			
		||||
 | 
			
		||||
        std::cout << GridLogMessage        << "FlexibleGeneralisedMinimalResidual: Converged on iteration " << IterationCount
 | 
			
		||||
                  << " computed residual " << sqrt(cp / ssq)
 | 
			
		||||
                  << " true residual "     << true_residual
 | 
			
		||||
                  << " target "            << Tolerance << std::endl;
 | 
			
		||||
 | 
			
		||||
        std::cout << GridLogMessage << "FGMRES Time elapsed: Total   " <<       SolverTimer.Elapsed() << std::endl;
 | 
			
		||||
        std::cout << GridLogMessage << "FGMRES Time elapsed: Precon  " <<         PrecTimer.Elapsed() << std::endl;
 | 
			
		||||
        std::cout << GridLogMessage << "FGMRES Time elapsed: Matrix  " <<       MatrixTimer.Elapsed() << std::endl;
 | 
			
		||||
        std::cout << GridLogMessage << "FGMRES Time elapsed: Linalg  " <<       LinalgTimer.Elapsed() << std::endl;
 | 
			
		||||
        std::cout << GridLogMessage << "FGMRES Time elapsed: QR      " <<           QrTimer.Elapsed() << std::endl;
 | 
			
		||||
        std::cout << GridLogMessage << "FGMRES Time elapsed: CompSol " << CompSolutionTimer.Elapsed() << std::endl;
 | 
			
		||||
        return;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    std::cout << GridLogMessage << "FlexibleGeneralisedMinimalResidual did NOT converge" << std::endl;
 | 
			
		||||
 | 
			
		||||
    if (ErrorOnNoConverge)
 | 
			
		||||
      assert(0);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  RealD outerLoopBody(LinearOperatorBase<Field> &LinOp, const Field &src, Field &psi, RealD rsq) {
 | 
			
		||||
 | 
			
		||||
    RealD cp = 0;
 | 
			
		||||
 | 
			
		||||
    Field w(src._grid);
 | 
			
		||||
    Field r(src._grid);
 | 
			
		||||
 | 
			
		||||
    // these should probably be made class members so that they are only allocated once, not in every restart
 | 
			
		||||
    std::vector<Field> v(RestartLength + 1, src._grid); for (auto &elem : v) elem = zero;
 | 
			
		||||
    std::vector<Field> z(RestartLength + 1, src._grid); for (auto &elem : z) elem = zero;
 | 
			
		||||
 | 
			
		||||
    MatrixTimer.Start();
 | 
			
		||||
    LinOp.Op(psi, w);
 | 
			
		||||
    MatrixTimer.Stop();
 | 
			
		||||
 | 
			
		||||
    LinalgTimer.Start();
 | 
			
		||||
    r = src - w;
 | 
			
		||||
 | 
			
		||||
    gamma[0] = sqrt(norm2(r));
 | 
			
		||||
 | 
			
		||||
    v[0] = (1. / gamma[0]) * r;
 | 
			
		||||
    LinalgTimer.Stop();
 | 
			
		||||
 | 
			
		||||
    for (int i=0; i<RestartLength; i++) {
 | 
			
		||||
 | 
			
		||||
      IterationCount++;
 | 
			
		||||
 | 
			
		||||
      arnoldiStep(LinOp, v, z, w, i);
 | 
			
		||||
 | 
			
		||||
      qrUpdate(i);
 | 
			
		||||
 | 
			
		||||
      cp = std::norm(gamma[i+1]);
 | 
			
		||||
 | 
			
		||||
      std::cout << GridLogIterative << "FlexibleGeneralisedMinimalResidual: Iteration " << IterationCount
 | 
			
		||||
                << " residual " << cp << " target " << rsq << std::endl;
 | 
			
		||||
 | 
			
		||||
      if ((i == RestartLength - 1) || (IterationCount == MaxIterations) || (cp <= rsq)) {
 | 
			
		||||
 | 
			
		||||
        computeSolution(z, psi, i);
 | 
			
		||||
 | 
			
		||||
        return cp;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    assert(0); // Never reached
 | 
			
		||||
    return cp;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void arnoldiStep(LinearOperatorBase<Field> &LinOp, std::vector<Field> &v, std::vector<Field> &z, Field &w, int iter) {
 | 
			
		||||
 | 
			
		||||
    PrecTimer.Start();
 | 
			
		||||
    Preconditioner(v[iter], z[iter]);
 | 
			
		||||
    PrecTimer.Stop();
 | 
			
		||||
 | 
			
		||||
    MatrixTimer.Start();
 | 
			
		||||
    LinOp.Op(z[iter], w);
 | 
			
		||||
    MatrixTimer.Stop();
 | 
			
		||||
 | 
			
		||||
    LinalgTimer.Start();
 | 
			
		||||
    for (int i = 0; i <= iter; ++i) {
 | 
			
		||||
      H(iter, i) = innerProduct(v[i], w);
 | 
			
		||||
      w = w - H(iter, i) * v[i];
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    H(iter, iter + 1) = sqrt(norm2(w));
 | 
			
		||||
    v[iter + 1] = (1. / H(iter, iter + 1)) * w;
 | 
			
		||||
    LinalgTimer.Stop();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void qrUpdate(int iter) {
 | 
			
		||||
 | 
			
		||||
    QrTimer.Start();
 | 
			
		||||
    for (int i = 0; i < iter ; ++i) {
 | 
			
		||||
      auto tmp       = -s[i] * H(iter, i) + c[i] * H(iter, i + 1);
 | 
			
		||||
      H(iter, i)     = std::conj(c[i]) * H(iter, i) + std::conj(s[i]) * H(iter, i + 1);
 | 
			
		||||
      H(iter, i + 1) = tmp;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Compute new Givens Rotation
 | 
			
		||||
    ComplexD nu = sqrt(std::norm(H(iter, iter)) + std::norm(H(iter, iter + 1)));
 | 
			
		||||
    c[iter]     = H(iter, iter) / nu;
 | 
			
		||||
    s[iter]     = H(iter, iter + 1) / nu;
 | 
			
		||||
 | 
			
		||||
    // Apply new Givens rotation
 | 
			
		||||
    H(iter, iter)     = nu;
 | 
			
		||||
    H(iter, iter + 1) = 0.;
 | 
			
		||||
 | 
			
		||||
    gamma[iter + 1] = -s[iter] * gamma[iter];
 | 
			
		||||
    gamma[iter]     = std::conj(c[iter]) * gamma[iter];
 | 
			
		||||
    QrTimer.Stop();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void computeSolution(std::vector<Field> const &z, Field &psi, int iter) {
 | 
			
		||||
 | 
			
		||||
    CompSolutionTimer.Start();
 | 
			
		||||
    for (int i = iter; i >= 0; i--) {
 | 
			
		||||
      y[i] = gamma[i];
 | 
			
		||||
      for (int k = i + 1; k <= iter; k++)
 | 
			
		||||
        y[i] = y[i] - H(k, i) * y[k];
 | 
			
		||||
      y[i] = y[i] / H(i, i);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    for (int i = 0; i <= iter; i++)
 | 
			
		||||
      psi = psi + z[i] * y[i];
 | 
			
		||||
    CompSolutionTimer.Stop();
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
							
								
								
									
										242
									
								
								Grid/algorithms/iterative/GeneralisedMinimalResidual.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										242
									
								
								Grid/algorithms/iterative/GeneralisedMinimalResidual.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,242 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/algorithms/iterative/GeneralisedMinimalResidual.h
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Daniel Richtmann <daniel.richtmann@ur.de>
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#ifndef GRID_GENERALISED_MINIMAL_RESIDUAL_H
 | 
			
		||||
#define GRID_GENERALISED_MINIMAL_RESIDUAL_H
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
template<class Field>
 | 
			
		||||
class GeneralisedMinimalResidual : public OperatorFunction<Field> {
 | 
			
		||||
 public:
 | 
			
		||||
  bool ErrorOnNoConverge; // Throw an assert when GMRES fails to converge,
 | 
			
		||||
                          // defaults to true
 | 
			
		||||
 | 
			
		||||
  RealD   Tolerance;
 | 
			
		||||
 | 
			
		||||
  Integer MaxIterations;
 | 
			
		||||
  Integer RestartLength;
 | 
			
		||||
  Integer MaxNumberOfRestarts;
 | 
			
		||||
  Integer IterationCount; // Number of iterations the GMRES took to finish,
 | 
			
		||||
                          // filled in upon completion
 | 
			
		||||
 | 
			
		||||
  GridStopWatch MatrixTimer;
 | 
			
		||||
  GridStopWatch LinalgTimer;
 | 
			
		||||
  GridStopWatch QrTimer;
 | 
			
		||||
  GridStopWatch CompSolutionTimer;
 | 
			
		||||
 | 
			
		||||
  Eigen::MatrixXcd H;
 | 
			
		||||
 | 
			
		||||
  std::vector<std::complex<double>> y;
 | 
			
		||||
  std::vector<std::complex<double>> gamma;
 | 
			
		||||
  std::vector<std::complex<double>> c;
 | 
			
		||||
  std::vector<std::complex<double>> s;
 | 
			
		||||
 | 
			
		||||
  GeneralisedMinimalResidual(RealD   tol,
 | 
			
		||||
                             Integer maxit,
 | 
			
		||||
                             Integer restart_length,
 | 
			
		||||
                             bool    err_on_no_conv = true)
 | 
			
		||||
      : Tolerance(tol)
 | 
			
		||||
      , MaxIterations(maxit)
 | 
			
		||||
      , RestartLength(restart_length)
 | 
			
		||||
      , MaxNumberOfRestarts(MaxIterations/RestartLength + ((MaxIterations%RestartLength == 0) ? 0 : 1))
 | 
			
		||||
      , ErrorOnNoConverge(err_on_no_conv)
 | 
			
		||||
      , H(Eigen::MatrixXcd::Zero(RestartLength, RestartLength + 1)) // sizes taken from DD-αAMG code base
 | 
			
		||||
      , y(RestartLength + 1, 0.)
 | 
			
		||||
      , gamma(RestartLength + 1, 0.)
 | 
			
		||||
      , c(RestartLength + 1, 0.)
 | 
			
		||||
      , s(RestartLength + 1, 0.) {};
 | 
			
		||||
 | 
			
		||||
  void operator()(LinearOperatorBase<Field> &LinOp, const Field &src, Field &psi) {
 | 
			
		||||
 | 
			
		||||
    psi.checkerboard = src.checkerboard;
 | 
			
		||||
    conformable(psi, src);
 | 
			
		||||
 | 
			
		||||
    RealD guess = norm2(psi);
 | 
			
		||||
    assert(std::isnan(guess) == 0);
 | 
			
		||||
 | 
			
		||||
    RealD cp;
 | 
			
		||||
    RealD ssq = norm2(src);
 | 
			
		||||
    RealD rsq = Tolerance * Tolerance * ssq;
 | 
			
		||||
 | 
			
		||||
    Field r(src._grid);
 | 
			
		||||
 | 
			
		||||
    std::cout << std::setprecision(4) << std::scientific;
 | 
			
		||||
    std::cout << GridLogIterative << "GeneralisedMinimalResidual: guess " << guess << std::endl;
 | 
			
		||||
    std::cout << GridLogIterative << "GeneralisedMinimalResidual:   src " << ssq   << std::endl;
 | 
			
		||||
 | 
			
		||||
    MatrixTimer.Reset();
 | 
			
		||||
    LinalgTimer.Reset();
 | 
			
		||||
    QrTimer.Reset();
 | 
			
		||||
    CompSolutionTimer.Reset();
 | 
			
		||||
 | 
			
		||||
    GridStopWatch SolverTimer;
 | 
			
		||||
    SolverTimer.Start();
 | 
			
		||||
 | 
			
		||||
    IterationCount = 0;
 | 
			
		||||
 | 
			
		||||
    for (int k=0; k<MaxNumberOfRestarts; k++) {
 | 
			
		||||
 | 
			
		||||
      cp = outerLoopBody(LinOp, src, psi, rsq);
 | 
			
		||||
 | 
			
		||||
      // Stopping condition
 | 
			
		||||
      if (cp <= rsq) {
 | 
			
		||||
 | 
			
		||||
        SolverTimer.Stop();
 | 
			
		||||
 | 
			
		||||
        LinOp.Op(psi,r);
 | 
			
		||||
        axpy(r,-1.0,src,r);
 | 
			
		||||
 | 
			
		||||
        RealD srcnorm       = sqrt(ssq);
 | 
			
		||||
        RealD resnorm       = sqrt(norm2(r));
 | 
			
		||||
        RealD true_residual = resnorm / srcnorm;
 | 
			
		||||
 | 
			
		||||
        std::cout << GridLogMessage        << "GeneralisedMinimalResidual: Converged on iteration " << IterationCount
 | 
			
		||||
                  << " computed residual " << sqrt(cp / ssq)
 | 
			
		||||
                  << " true residual "     << true_residual
 | 
			
		||||
                  << " target "            << Tolerance << std::endl;
 | 
			
		||||
 | 
			
		||||
        std::cout << GridLogMessage << "GMRES Time elapsed: Total   " <<       SolverTimer.Elapsed() << std::endl;
 | 
			
		||||
        std::cout << GridLogMessage << "GMRES Time elapsed: Matrix  " <<       MatrixTimer.Elapsed() << std::endl;
 | 
			
		||||
        std::cout << GridLogMessage << "GMRES Time elapsed: Linalg  " <<       LinalgTimer.Elapsed() << std::endl;
 | 
			
		||||
        std::cout << GridLogMessage << "GMRES Time elapsed: QR      " <<           QrTimer.Elapsed() << std::endl;
 | 
			
		||||
        std::cout << GridLogMessage << "GMRES Time elapsed: CompSol " << CompSolutionTimer.Elapsed() << std::endl;
 | 
			
		||||
        return;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    std::cout << GridLogMessage << "GeneralisedMinimalResidual did NOT converge" << std::endl;
 | 
			
		||||
 | 
			
		||||
    if (ErrorOnNoConverge)
 | 
			
		||||
      assert(0);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  RealD outerLoopBody(LinearOperatorBase<Field> &LinOp, const Field &src, Field &psi, RealD rsq) {
 | 
			
		||||
 | 
			
		||||
    RealD cp = 0;
 | 
			
		||||
 | 
			
		||||
    Field w(src._grid);
 | 
			
		||||
    Field r(src._grid);
 | 
			
		||||
 | 
			
		||||
    // this should probably be made a class member so that it is only allocated once, not in every restart
 | 
			
		||||
    std::vector<Field> v(RestartLength + 1, src._grid); for (auto &elem : v) elem = zero;
 | 
			
		||||
 | 
			
		||||
    MatrixTimer.Start();
 | 
			
		||||
    LinOp.Op(psi, w);
 | 
			
		||||
    MatrixTimer.Stop();
 | 
			
		||||
 | 
			
		||||
    LinalgTimer.Start();
 | 
			
		||||
    r = src - w;
 | 
			
		||||
 | 
			
		||||
    gamma[0] = sqrt(norm2(r));
 | 
			
		||||
 | 
			
		||||
    v[0] = (1. / gamma[0]) * r;
 | 
			
		||||
    LinalgTimer.Stop();
 | 
			
		||||
 | 
			
		||||
    for (int i=0; i<RestartLength; i++) {
 | 
			
		||||
 | 
			
		||||
      IterationCount++;
 | 
			
		||||
 | 
			
		||||
      arnoldiStep(LinOp, v, w, i);
 | 
			
		||||
 | 
			
		||||
      qrUpdate(i);
 | 
			
		||||
 | 
			
		||||
      cp = std::norm(gamma[i+1]);
 | 
			
		||||
 | 
			
		||||
      std::cout << GridLogIterative << "GeneralisedMinimalResidual: Iteration " << IterationCount
 | 
			
		||||
                << " residual " << cp << " target " << rsq << std::endl;
 | 
			
		||||
 | 
			
		||||
      if ((i == RestartLength - 1) || (IterationCount == MaxIterations) || (cp <= rsq)) {
 | 
			
		||||
 | 
			
		||||
        computeSolution(v, psi, i);
 | 
			
		||||
 | 
			
		||||
        return cp;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    assert(0); // Never reached
 | 
			
		||||
    return cp;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void arnoldiStep(LinearOperatorBase<Field> &LinOp, std::vector<Field> &v, Field &w, int iter) {
 | 
			
		||||
 | 
			
		||||
    MatrixTimer.Start();
 | 
			
		||||
    LinOp.Op(v[iter], w);
 | 
			
		||||
    MatrixTimer.Stop();
 | 
			
		||||
 | 
			
		||||
    LinalgTimer.Start();
 | 
			
		||||
    for (int i = 0; i <= iter; ++i) {
 | 
			
		||||
      H(iter, i) = innerProduct(v[i], w);
 | 
			
		||||
      w = w - H(iter, i) * v[i];
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    H(iter, iter + 1) = sqrt(norm2(w));
 | 
			
		||||
    v[iter + 1] = (1. / H(iter, iter + 1)) * w;
 | 
			
		||||
    LinalgTimer.Stop();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void qrUpdate(int iter) {
 | 
			
		||||
 | 
			
		||||
    QrTimer.Start();
 | 
			
		||||
    for (int i = 0; i < iter ; ++i) {
 | 
			
		||||
      auto tmp       = -s[i] * H(iter, i) + c[i] * H(iter, i + 1);
 | 
			
		||||
      H(iter, i)     = std::conj(c[i]) * H(iter, i) + std::conj(s[i]) * H(iter, i + 1);
 | 
			
		||||
      H(iter, i + 1) = tmp;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Compute new Givens Rotation
 | 
			
		||||
    ComplexD nu = sqrt(std::norm(H(iter, iter)) + std::norm(H(iter, iter + 1)));
 | 
			
		||||
    c[iter]     = H(iter, iter) / nu;
 | 
			
		||||
    s[iter]     = H(iter, iter + 1) / nu;
 | 
			
		||||
 | 
			
		||||
    // Apply new Givens rotation
 | 
			
		||||
    H(iter, iter)     = nu;
 | 
			
		||||
    H(iter, iter + 1) = 0.;
 | 
			
		||||
 | 
			
		||||
    gamma[iter + 1] = -s[iter] * gamma[iter];
 | 
			
		||||
    gamma[iter]     = std::conj(c[iter]) * gamma[iter];
 | 
			
		||||
    QrTimer.Stop();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void computeSolution(std::vector<Field> const &v, Field &psi, int iter) {
 | 
			
		||||
 | 
			
		||||
    CompSolutionTimer.Start();
 | 
			
		||||
    for (int i = iter; i >= 0; i--) {
 | 
			
		||||
      y[i] = gamma[i];
 | 
			
		||||
      for (int k = i + 1; k <= iter; k++)
 | 
			
		||||
        y[i] = y[i] - H(k, i) * y[k];
 | 
			
		||||
      y[i] = y[i] / H(i, i);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    for (int i = 0; i <= iter; i++)
 | 
			
		||||
      psi = psi + v[i] * y[i];
 | 
			
		||||
    CompSolutionTimer.Stop();
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
							
								
								
									
										842
									
								
								Grid/algorithms/iterative/ImplicitlyRestartedLanczos.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										842
									
								
								Grid/algorithms/iterative/ImplicitlyRestartedLanczos.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,842 @@
 | 
			
		||||
    /*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
    Source file: ./lib/algorithms/iterative/ImplicitlyRestartedLanczos.h
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Chulwoo Jung <chulwoo@bnl.gov>
 | 
			
		||||
Author: Christoph Lehner <clehner@bnl.gov>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#ifndef GRID_BIRL_H
 | 
			
		||||
#define GRID_BIRL_H
 | 
			
		||||
 | 
			
		||||
#include <string.h> //memset
 | 
			
		||||
//#include <zlib.h>
 | 
			
		||||
#include <sys/stat.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid { 
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////////////
 | 
			
		||||
  // Move following 100 LOC to lattice/Lattice_basis.h
 | 
			
		||||
  ////////////////////////////////////////////////////////
 | 
			
		||||
template<class Field>
 | 
			
		||||
void basisOrthogonalize(std::vector<Field> &basis,Field &w,int k) 
 | 
			
		||||
{
 | 
			
		||||
  for(int j=0; j<k; ++j){
 | 
			
		||||
    auto ip = innerProduct(basis[j],w);
 | 
			
		||||
    w = w - ip*basis[j];
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Field>
 | 
			
		||||
void basisRotate(std::vector<Field> &basis,Eigen::MatrixXd& Qt,int j0, int j1, int k0,int k1,int Nm) 
 | 
			
		||||
{
 | 
			
		||||
  typedef typename Field::vector_object vobj;
 | 
			
		||||
  GridBase* grid = basis[0]._grid;
 | 
			
		||||
      
 | 
			
		||||
  parallel_region
 | 
			
		||||
  {
 | 
			
		||||
 | 
			
		||||
    std::vector < vobj , commAllocator<vobj> > B(Nm); // Thread private
 | 
			
		||||
       
 | 
			
		||||
    parallel_for_internal(int ss=0;ss < grid->oSites();ss++){
 | 
			
		||||
      for(int j=j0; j<j1; ++j) B[j]=0.;
 | 
			
		||||
      
 | 
			
		||||
      for(int j=j0; j<j1; ++j){
 | 
			
		||||
	for(int k=k0; k<k1; ++k){
 | 
			
		||||
	  B[j] +=Qt(j,k) * basis[k]._odata[ss];
 | 
			
		||||
	}
 | 
			
		||||
      }
 | 
			
		||||
      for(int j=j0; j<j1; ++j){
 | 
			
		||||
	  basis[j]._odata[ss] = B[j];
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Extract a single rotated vector
 | 
			
		||||
template<class Field>
 | 
			
		||||
void basisRotateJ(Field &result,std::vector<Field> &basis,Eigen::MatrixXd& Qt,int j, int k0,int k1,int Nm) 
 | 
			
		||||
{
 | 
			
		||||
  typedef typename Field::vector_object vobj;
 | 
			
		||||
  GridBase* grid = basis[0]._grid;
 | 
			
		||||
 | 
			
		||||
  result.checkerboard = basis[0].checkerboard;
 | 
			
		||||
  parallel_for(int ss=0;ss < grid->oSites();ss++){
 | 
			
		||||
    vobj B = zero;
 | 
			
		||||
    for(int k=k0; k<k1; ++k){
 | 
			
		||||
      B +=Qt(j,k) * basis[k]._odata[ss];
 | 
			
		||||
    }
 | 
			
		||||
    result._odata[ss] = B;
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Field>
 | 
			
		||||
void basisReorderInPlace(std::vector<Field> &_v,std::vector<RealD>& sort_vals, std::vector<int>& idx) 
 | 
			
		||||
{
 | 
			
		||||
  int vlen = idx.size();
 | 
			
		||||
 | 
			
		||||
  assert(vlen>=1);
 | 
			
		||||
  assert(vlen<=sort_vals.size());
 | 
			
		||||
  assert(vlen<=_v.size());
 | 
			
		||||
 | 
			
		||||
  for (size_t i=0;i<vlen;i++) {
 | 
			
		||||
 | 
			
		||||
    if (idx[i] != i) {
 | 
			
		||||
 | 
			
		||||
      //////////////////////////////////////
 | 
			
		||||
      // idx[i] is a table of desired sources giving a permutation.
 | 
			
		||||
      // Swap v[i] with v[idx[i]].
 | 
			
		||||
      // Find  j>i for which _vnew[j] = _vold[i],
 | 
			
		||||
      // track the move idx[j] => idx[i]
 | 
			
		||||
      // track the move idx[i] => i
 | 
			
		||||
      //////////////////////////////////////
 | 
			
		||||
      size_t j;
 | 
			
		||||
      for (j=i;j<idx.size();j++)
 | 
			
		||||
	if (idx[j]==i)
 | 
			
		||||
	  break;
 | 
			
		||||
 | 
			
		||||
      assert(idx[i] > i);     assert(j!=idx.size());      assert(idx[j]==i);
 | 
			
		||||
 | 
			
		||||
      std::swap(_v[i]._odata,_v[idx[i]]._odata); // should use vector move constructor, no data copy
 | 
			
		||||
      std::swap(sort_vals[i],sort_vals[idx[i]]);
 | 
			
		||||
 | 
			
		||||
      idx[j] = idx[i];
 | 
			
		||||
      idx[i] = i;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
inline std::vector<int> basisSortGetIndex(std::vector<RealD>& sort_vals) 
 | 
			
		||||
{
 | 
			
		||||
  std::vector<int> idx(sort_vals.size());
 | 
			
		||||
  std::iota(idx.begin(), idx.end(), 0);
 | 
			
		||||
 | 
			
		||||
  // sort indexes based on comparing values in v
 | 
			
		||||
  std::sort(idx.begin(), idx.end(), [&sort_vals](int i1, int i2) {
 | 
			
		||||
    return ::fabs(sort_vals[i1]) < ::fabs(sort_vals[i2]);
 | 
			
		||||
  });
 | 
			
		||||
  return idx;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Field>
 | 
			
		||||
void basisSortInPlace(std::vector<Field> & _v,std::vector<RealD>& sort_vals, bool reverse) 
 | 
			
		||||
{
 | 
			
		||||
  std::vector<int> idx = basisSortGetIndex(sort_vals);
 | 
			
		||||
  if (reverse)
 | 
			
		||||
    std::reverse(idx.begin(), idx.end());
 | 
			
		||||
  
 | 
			
		||||
  basisReorderInPlace(_v,sort_vals,idx);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/////////////////////////////////////////////////////////////
 | 
			
		||||
// Implicitly restarted lanczos
 | 
			
		||||
/////////////////////////////////////////////////////////////
 | 
			
		||||
template<class Field> class ImplicitlyRestartedLanczosTester 
 | 
			
		||||
{
 | 
			
		||||
 public:
 | 
			
		||||
  virtual int TestConvergence(int j,RealD resid,Field &evec, RealD &eval,RealD evalMaxApprox)=0;
 | 
			
		||||
  virtual int ReconstructEval(int j,RealD resid,Field &evec, RealD &eval,RealD evalMaxApprox)=0;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
enum IRLdiagonalisation { 
 | 
			
		||||
  IRLdiagonaliseWithDSTEGR,
 | 
			
		||||
  IRLdiagonaliseWithQR,
 | 
			
		||||
  IRLdiagonaliseWithEigen
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template<class Field> class ImplicitlyRestartedLanczosHermOpTester  : public ImplicitlyRestartedLanczosTester<Field>
 | 
			
		||||
{
 | 
			
		||||
 public:
 | 
			
		||||
 | 
			
		||||
  LinearFunction<Field>       &_HermOp;
 | 
			
		||||
  ImplicitlyRestartedLanczosHermOpTester(LinearFunction<Field> &HermOp) : _HermOp(HermOp)  {  };
 | 
			
		||||
  int ReconstructEval(int j,RealD resid,Field &B, RealD &eval,RealD evalMaxApprox)
 | 
			
		||||
  {
 | 
			
		||||
    return TestConvergence(j,resid,B,eval,evalMaxApprox);
 | 
			
		||||
  }
 | 
			
		||||
  int TestConvergence(int j,RealD eresid,Field &B, RealD &eval,RealD evalMaxApprox)
 | 
			
		||||
  {
 | 
			
		||||
    Field v(B);
 | 
			
		||||
    RealD eval_poly = eval;
 | 
			
		||||
    // Apply operator
 | 
			
		||||
    _HermOp(B,v);
 | 
			
		||||
 | 
			
		||||
    RealD vnum = real(innerProduct(B,v)); // HermOp.
 | 
			
		||||
    RealD vden = norm2(B);
 | 
			
		||||
    RealD vv0  = norm2(v);
 | 
			
		||||
    eval   = vnum/vden;
 | 
			
		||||
    v -= eval*B;
 | 
			
		||||
 | 
			
		||||
    RealD vv = norm2(v) / ::pow(evalMaxApprox,2.0);
 | 
			
		||||
 | 
			
		||||
    std::cout.precision(13);
 | 
			
		||||
    std::cout<<GridLogIRL  << "[" << std::setw(3)<<j<<"] "
 | 
			
		||||
	     <<"eval = "<<std::setw(25)<< eval << " (" << eval_poly << ")"
 | 
			
		||||
	     <<" |H B[i] - eval[i]B[i]|^2 / evalMaxApprox^2 " << std::setw(25) << vv
 | 
			
		||||
	     <<std::endl;
 | 
			
		||||
 | 
			
		||||
    int conv=0;
 | 
			
		||||
    if( (vv<eresid*eresid) ) conv = 1;
 | 
			
		||||
 | 
			
		||||
    return conv;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template<class Field> 
 | 
			
		||||
class ImplicitlyRestartedLanczos {
 | 
			
		||||
 private:
 | 
			
		||||
  const RealD small = 1.0e-8;
 | 
			
		||||
  int MaxIter;
 | 
			
		||||
  int MinRestart; // Minimum number of restarts; only check for convergence after
 | 
			
		||||
  int Nstop;   // Number of evecs checked for convergence
 | 
			
		||||
  int Nk;      // Number of converged sought
 | 
			
		||||
  //  int Np;      // Np -- Number of spare vecs in krylov space //  == Nm - Nk
 | 
			
		||||
  int Nm;      // Nm -- total number of vectors
 | 
			
		||||
  IRLdiagonalisation diagonalisation;
 | 
			
		||||
  int orth_period;
 | 
			
		||||
    
 | 
			
		||||
  RealD OrthoTime;
 | 
			
		||||
  RealD eresid, betastp;
 | 
			
		||||
  ////////////////////////////////
 | 
			
		||||
  // Embedded objects
 | 
			
		||||
  ////////////////////////////////
 | 
			
		||||
  LinearFunction<Field>       &_PolyOp;
 | 
			
		||||
  LinearFunction<Field>       &_HermOp;
 | 
			
		||||
  ImplicitlyRestartedLanczosTester<Field> &_Tester;
 | 
			
		||||
  // Default tester provided (we need a ref to something in default case)
 | 
			
		||||
  ImplicitlyRestartedLanczosHermOpTester<Field> SimpleTester;
 | 
			
		||||
  /////////////////////////
 | 
			
		||||
  // Constructor
 | 
			
		||||
  /////////////////////////
 | 
			
		||||
  
 | 
			
		||||
public:       
 | 
			
		||||
 | 
			
		||||
  //////////////////////////////////////////////////////////////////
 | 
			
		||||
  // PAB:
 | 
			
		||||
  //////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Too many options  & knobs. 
 | 
			
		||||
  // Eliminate:
 | 
			
		||||
  //   orth_period
 | 
			
		||||
  //   betastp
 | 
			
		||||
  //   MinRestart
 | 
			
		||||
  //
 | 
			
		||||
  // Do we really need orth_period
 | 
			
		||||
  // What is the theoretical basis & guarantees of betastp ?
 | 
			
		||||
  // Nstop=Nk viable?
 | 
			
		||||
  // MinRestart avoidable with new convergence test?
 | 
			
		||||
  // Could cut to PolyOp, HermOp, Tester, Nk, Nm, resid, maxiter (+diagonalisation)
 | 
			
		||||
  // HermOp could be eliminated if we dropped the Power method for max eval.
 | 
			
		||||
  // -- also: The eval, eval2, eval2_copy stuff is still unnecessarily unclear
 | 
			
		||||
  //////////////////////////////////////////////////////////////////
 | 
			
		||||
 ImplicitlyRestartedLanczos(LinearFunction<Field> & PolyOp,
 | 
			
		||||
			    LinearFunction<Field> & HermOp,
 | 
			
		||||
			    ImplicitlyRestartedLanczosTester<Field> & Tester,
 | 
			
		||||
			    int _Nstop, // sought vecs
 | 
			
		||||
			    int _Nk, // sought vecs
 | 
			
		||||
			    int _Nm, // spare vecs
 | 
			
		||||
			    RealD _eresid, // resid in lmdue deficit 
 | 
			
		||||
			    int _MaxIter, // Max iterations
 | 
			
		||||
			    RealD _betastp=0.0, // if beta(k) < betastp: converged
 | 
			
		||||
			    int _MinRestart=1, int _orth_period = 1,
 | 
			
		||||
			    IRLdiagonalisation _diagonalisation= IRLdiagonaliseWithEigen) :
 | 
			
		||||
    SimpleTester(HermOp), _PolyOp(PolyOp),      _HermOp(HermOp), _Tester(Tester),
 | 
			
		||||
    Nstop(_Nstop)  ,      Nk(_Nk),      Nm(_Nm),
 | 
			
		||||
    eresid(_eresid),      betastp(_betastp),
 | 
			
		||||
    MaxIter(_MaxIter)  ,      MinRestart(_MinRestart),
 | 
			
		||||
    orth_period(_orth_period), diagonalisation(_diagonalisation)  { };
 | 
			
		||||
 | 
			
		||||
    ImplicitlyRestartedLanczos(LinearFunction<Field> & PolyOp,
 | 
			
		||||
			       LinearFunction<Field> & HermOp,
 | 
			
		||||
			       int _Nstop, // sought vecs
 | 
			
		||||
			       int _Nk, // sought vecs
 | 
			
		||||
			       int _Nm, // spare vecs
 | 
			
		||||
			       RealD _eresid, // resid in lmdue deficit 
 | 
			
		||||
			       int _MaxIter, // Max iterations
 | 
			
		||||
			       RealD _betastp=0.0, // if beta(k) < betastp: converged
 | 
			
		||||
			       int _MinRestart=1, int _orth_period = 1,
 | 
			
		||||
			       IRLdiagonalisation _diagonalisation= IRLdiagonaliseWithEigen) :
 | 
			
		||||
    SimpleTester(HermOp),  _PolyOp(PolyOp),      _HermOp(HermOp), _Tester(SimpleTester),
 | 
			
		||||
    Nstop(_Nstop)  ,      Nk(_Nk),      Nm(_Nm),
 | 
			
		||||
    eresid(_eresid),      betastp(_betastp),
 | 
			
		||||
    MaxIter(_MaxIter)  ,      MinRestart(_MinRestart),
 | 
			
		||||
    orth_period(_orth_period), diagonalisation(_diagonalisation)  { };
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////
 | 
			
		||||
  // Helpers
 | 
			
		||||
  ////////////////////////////////
 | 
			
		||||
  template<typename T>  static RealD normalise(T& v) 
 | 
			
		||||
  {
 | 
			
		||||
    RealD nn = norm2(v);
 | 
			
		||||
    nn = sqrt(nn);
 | 
			
		||||
    v = v * (1.0/nn);
 | 
			
		||||
    return nn;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void orthogonalize(Field& w, std::vector<Field>& evec,int k)
 | 
			
		||||
  {
 | 
			
		||||
    OrthoTime-=usecond()/1e6;
 | 
			
		||||
    basisOrthogonalize(evec,w,k);
 | 
			
		||||
    normalise(w);
 | 
			
		||||
    OrthoTime+=usecond()/1e6;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
/* Rudy Arthur's thesis pp.137
 | 
			
		||||
------------------------
 | 
			
		||||
Require: M > K P = M − K †
 | 
			
		||||
Compute the factorization AVM = VM HM + fM eM 
 | 
			
		||||
repeat
 | 
			
		||||
  Q=I
 | 
			
		||||
  for i = 1,...,P do
 | 
			
		||||
    QiRi =HM −θiI Q = QQi
 | 
			
		||||
    H M = Q †i H M Q i
 | 
			
		||||
  end for
 | 
			
		||||
  βK =HM(K+1,K) σK =Q(M,K)
 | 
			
		||||
  r=vK+1βK +rσK
 | 
			
		||||
  VK =VM(1:M)Q(1:M,1:K)
 | 
			
		||||
  HK =HM(1:K,1:K)
 | 
			
		||||
  →AVK =VKHK +fKe†K † Extend to an M = K + P step factorization AVM = VMHM + fMeM
 | 
			
		||||
until convergence
 | 
			
		||||
*/
 | 
			
		||||
  void calc(std::vector<RealD>& eval, std::vector<Field>& evec,  const Field& src, int& Nconv, bool reverse=false)
 | 
			
		||||
  {
 | 
			
		||||
    GridBase *grid = src._grid;
 | 
			
		||||
    assert(grid == evec[0]._grid);
 | 
			
		||||
    
 | 
			
		||||
    GridLogIRL.TimingMode(1);
 | 
			
		||||
    std::cout << GridLogIRL <<"**************************************************************************"<< std::endl;
 | 
			
		||||
    std::cout << GridLogIRL <<" ImplicitlyRestartedLanczos::calc() starting iteration 0 /  "<< MaxIter<< std::endl;
 | 
			
		||||
    std::cout << GridLogIRL <<"**************************************************************************"<< std::endl;
 | 
			
		||||
    std::cout << GridLogIRL <<" -- seek   Nk    = " << Nk    <<" vectors"<< std::endl;
 | 
			
		||||
    std::cout << GridLogIRL <<" -- accept Nstop = " << Nstop <<" vectors"<< std::endl;
 | 
			
		||||
    std::cout << GridLogIRL <<" -- total  Nm    = " << Nm    <<" vectors"<< std::endl;
 | 
			
		||||
    std::cout << GridLogIRL <<" -- size of eval = " << eval.size() << std::endl;
 | 
			
		||||
    std::cout << GridLogIRL <<" -- size of evec = " << evec.size() << std::endl;
 | 
			
		||||
    if ( diagonalisation == IRLdiagonaliseWithDSTEGR ) {
 | 
			
		||||
      std::cout << GridLogIRL << "Diagonalisation is DSTEGR "<<std::endl;
 | 
			
		||||
    } else if ( diagonalisation == IRLdiagonaliseWithQR ) { 
 | 
			
		||||
      std::cout << GridLogIRL << "Diagonalisation is QR "<<std::endl;
 | 
			
		||||
    }  else if ( diagonalisation == IRLdiagonaliseWithEigen ) { 
 | 
			
		||||
      std::cout << GridLogIRL << "Diagonalisation is Eigen "<<std::endl;
 | 
			
		||||
    }
 | 
			
		||||
    std::cout << GridLogIRL <<"**************************************************************************"<< std::endl;
 | 
			
		||||
	
 | 
			
		||||
    assert(Nm <= evec.size() && Nm <= eval.size());
 | 
			
		||||
    
 | 
			
		||||
    // quickly get an idea of the largest eigenvalue to more properly normalize the residuum
 | 
			
		||||
    RealD evalMaxApprox = 0.0;
 | 
			
		||||
    {
 | 
			
		||||
      auto src_n = src;
 | 
			
		||||
      auto tmp = src;
 | 
			
		||||
      const int _MAX_ITER_IRL_MEVAPP_ = 50;
 | 
			
		||||
      for (int i=0;i<_MAX_ITER_IRL_MEVAPP_;i++) {
 | 
			
		||||
	normalise(src_n);
 | 
			
		||||
	_HermOp(src_n,tmp);
 | 
			
		||||
	RealD vnum = real(innerProduct(src_n,tmp)); // HermOp.
 | 
			
		||||
	RealD vden = norm2(src_n);
 | 
			
		||||
	RealD na = vnum/vden;
 | 
			
		||||
	if (fabs(evalMaxApprox/na - 1.0) < 0.05)
 | 
			
		||||
	  i=_MAX_ITER_IRL_MEVAPP_;
 | 
			
		||||
	evalMaxApprox = na;
 | 
			
		||||
	std::cout << GridLogIRL << " Approximation of largest eigenvalue: " << evalMaxApprox << std::endl;
 | 
			
		||||
	src_n = tmp;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
	
 | 
			
		||||
    std::vector<RealD> lme(Nm);  
 | 
			
		||||
    std::vector<RealD> lme2(Nm);
 | 
			
		||||
    std::vector<RealD> eval2(Nm);
 | 
			
		||||
    std::vector<RealD> eval2_copy(Nm);
 | 
			
		||||
    Eigen::MatrixXd Qt = Eigen::MatrixXd::Zero(Nm,Nm);
 | 
			
		||||
 | 
			
		||||
    Field f(grid);
 | 
			
		||||
    Field v(grid);
 | 
			
		||||
    int k1 = 1;
 | 
			
		||||
    int k2 = Nk;
 | 
			
		||||
    RealD beta_k;
 | 
			
		||||
 | 
			
		||||
    Nconv = 0;
 | 
			
		||||
  
 | 
			
		||||
    // Set initial vector
 | 
			
		||||
    evec[0] = src;
 | 
			
		||||
    normalise(evec[0]);
 | 
			
		||||
	
 | 
			
		||||
    // Initial Nk steps
 | 
			
		||||
    OrthoTime=0.;
 | 
			
		||||
    for(int k=0; k<Nk; ++k) step(eval,lme,evec,f,Nm,k);
 | 
			
		||||
    std::cout<<GridLogIRL <<"Initial "<< Nk <<"steps done "<<std::endl;
 | 
			
		||||
    std::cout<<GridLogIRL <<"Initial steps:OrthoTime "<<OrthoTime<< "seconds"<<std::endl;
 | 
			
		||||
 | 
			
		||||
    //////////////////////////////////
 | 
			
		||||
    // Restarting loop begins
 | 
			
		||||
    //////////////////////////////////
 | 
			
		||||
    int iter;
 | 
			
		||||
    for(iter = 0; iter<MaxIter; ++iter){
 | 
			
		||||
      
 | 
			
		||||
      OrthoTime=0.;
 | 
			
		||||
 | 
			
		||||
      std::cout<< GridLogMessage <<" **********************"<< std::endl;
 | 
			
		||||
      std::cout<< GridLogMessage <<" Restart iteration = "<< iter << std::endl;
 | 
			
		||||
      std::cout<< GridLogMessage <<" **********************"<< std::endl;
 | 
			
		||||
 | 
			
		||||
      std::cout<<GridLogIRL <<" running "<<Nm-Nk <<" steps: "<<std::endl;
 | 
			
		||||
      for(int k=Nk; k<Nm; ++k) step(eval,lme,evec,f,Nm,k);
 | 
			
		||||
      f *= lme[Nm-1];
 | 
			
		||||
 | 
			
		||||
      std::cout<<GridLogIRL <<" "<<Nm-Nk <<" steps done "<<std::endl;
 | 
			
		||||
      std::cout<<GridLogIRL <<"Initial steps:OrthoTime "<<OrthoTime<< "seconds"<<std::endl;
 | 
			
		||||
	  
 | 
			
		||||
      //////////////////////////////////
 | 
			
		||||
      // getting eigenvalues
 | 
			
		||||
      //////////////////////////////////
 | 
			
		||||
      for(int k=0; k<Nm; ++k){
 | 
			
		||||
	eval2[k] = eval[k+k1-1];
 | 
			
		||||
	lme2[k] = lme[k+k1-1];
 | 
			
		||||
      }
 | 
			
		||||
      Qt = Eigen::MatrixXd::Identity(Nm,Nm);
 | 
			
		||||
      diagonalize(eval2,lme2,Nm,Nm,Qt,grid);
 | 
			
		||||
      std::cout<<GridLogIRL <<" diagonalized "<<std::endl;
 | 
			
		||||
 | 
			
		||||
      //////////////////////////////////
 | 
			
		||||
      // sorting
 | 
			
		||||
      //////////////////////////////////
 | 
			
		||||
      eval2_copy = eval2;
 | 
			
		||||
      std::partial_sort(eval2.begin(),eval2.begin()+Nm,eval2.end(),std::greater<RealD>());
 | 
			
		||||
      std::cout<<GridLogIRL <<" evals sorted "<<std::endl;
 | 
			
		||||
      const int chunk=8;
 | 
			
		||||
      for(int io=0; io<k2;io+=chunk){
 | 
			
		||||
	std::cout<<GridLogIRL << "eval "<< std::setw(3) << io ;
 | 
			
		||||
	for(int ii=0;ii<chunk;ii++){
 | 
			
		||||
	  if ( (io+ii)<k2 )
 | 
			
		||||
	    std::cout<< " "<< std::setw(12)<< eval2[io+ii];
 | 
			
		||||
	}
 | 
			
		||||
	std::cout << std::endl;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      //////////////////////////////////
 | 
			
		||||
      // Implicitly shifted QR transformations
 | 
			
		||||
      //////////////////////////////////
 | 
			
		||||
      Qt = Eigen::MatrixXd::Identity(Nm,Nm);
 | 
			
		||||
      for(int ip=k2; ip<Nm; ++ip){ 
 | 
			
		||||
	QR_decomp(eval,lme,Nm,Nm,Qt,eval2[ip],k1,Nm);
 | 
			
		||||
      }
 | 
			
		||||
      std::cout<<GridLogIRL <<"QR decomposed "<<std::endl;
 | 
			
		||||
 | 
			
		||||
      assert(k2<Nm);      assert(k2<Nm);      assert(k1>0);
 | 
			
		||||
 | 
			
		||||
      basisRotate(evec,Qt,k1-1,k2+1,0,Nm,Nm); /// big constraint on the basis
 | 
			
		||||
      std::cout<<GridLogIRL <<"basisRotated  by Qt"<<std::endl;
 | 
			
		||||
      
 | 
			
		||||
      ////////////////////////////////////////////////////
 | 
			
		||||
      // Compressed vector f and beta(k2)
 | 
			
		||||
      ////////////////////////////////////////////////////
 | 
			
		||||
      f *= Qt(k2-1,Nm-1);
 | 
			
		||||
      f += lme[k2-1] * evec[k2];
 | 
			
		||||
      beta_k = norm2(f);
 | 
			
		||||
      beta_k = sqrt(beta_k);
 | 
			
		||||
      std::cout<<GridLogIRL<<" beta(k) = "<<beta_k<<std::endl;
 | 
			
		||||
	  
 | 
			
		||||
      RealD betar = 1.0/beta_k;
 | 
			
		||||
      evec[k2] = betar * f;
 | 
			
		||||
      lme[k2-1] = beta_k;
 | 
			
		||||
	  
 | 
			
		||||
      ////////////////////////////////////////////////////
 | 
			
		||||
      // Convergence test
 | 
			
		||||
      ////////////////////////////////////////////////////
 | 
			
		||||
      for(int k=0; k<Nm; ++k){    
 | 
			
		||||
	eval2[k] = eval[k];
 | 
			
		||||
	lme2[k] = lme[k];
 | 
			
		||||
      }
 | 
			
		||||
      Qt = Eigen::MatrixXd::Identity(Nm,Nm);
 | 
			
		||||
      diagonalize(eval2,lme2,Nk,Nm,Qt,grid);
 | 
			
		||||
      std::cout<<GridLogIRL <<" Diagonalized "<<std::endl;
 | 
			
		||||
	  
 | 
			
		||||
      Nconv = 0;
 | 
			
		||||
      if (iter >= MinRestart) {
 | 
			
		||||
 | 
			
		||||
	std::cout << GridLogIRL << "Test convergence: rotate subset of vectors to test convergence " << std::endl;
 | 
			
		||||
 | 
			
		||||
	Field B(grid); B.checkerboard = evec[0].checkerboard;
 | 
			
		||||
 | 
			
		||||
	//  power of two search pattern;  not every evalue in eval2 is assessed.
 | 
			
		||||
	int allconv =1;
 | 
			
		||||
	for(int jj = 1; jj<=Nstop; jj*=2){
 | 
			
		||||
	  int j = Nstop-jj;
 | 
			
		||||
	  RealD e = eval2_copy[j]; // Discard the evalue
 | 
			
		||||
	  basisRotateJ(B,evec,Qt,j,0,Nk,Nm);	    
 | 
			
		||||
	  if( !_Tester.TestConvergence(j,eresid,B,e,evalMaxApprox) ) {
 | 
			
		||||
	    allconv=0;
 | 
			
		||||
	  }
 | 
			
		||||
	}
 | 
			
		||||
	// Do evec[0] for good measure
 | 
			
		||||
	{ 
 | 
			
		||||
	  int j=0;
 | 
			
		||||
	  RealD e = eval2_copy[0]; 
 | 
			
		||||
	  basisRotateJ(B,evec,Qt,j,0,Nk,Nm);	    
 | 
			
		||||
	  if( !_Tester.TestConvergence(j,eresid,B,e,evalMaxApprox) ) allconv=0;
 | 
			
		||||
	}
 | 
			
		||||
	if ( allconv ) Nconv = Nstop;
 | 
			
		||||
 | 
			
		||||
	// test if we converged, if so, terminate
 | 
			
		||||
	std::cout<<GridLogIRL<<" #modes converged: >= "<<Nconv<<"/"<<Nstop<<std::endl;
 | 
			
		||||
	//	if( Nconv>=Nstop || beta_k < betastp){
 | 
			
		||||
	if( Nconv>=Nstop){
 | 
			
		||||
	  goto converged;
 | 
			
		||||
	}
 | 
			
		||||
	  
 | 
			
		||||
      } else {
 | 
			
		||||
	std::cout << GridLogIRL << "iter < MinRestart: do not yet test for convergence\n";
 | 
			
		||||
      } // end of iter loop
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    std::cout<<GridLogError<<"\n NOT converged.\n";
 | 
			
		||||
    abort();
 | 
			
		||||
	
 | 
			
		||||
  converged:
 | 
			
		||||
    {
 | 
			
		||||
      Field B(grid); B.checkerboard = evec[0].checkerboard;
 | 
			
		||||
      basisRotate(evec,Qt,0,Nk,0,Nk,Nm);	    
 | 
			
		||||
      std::cout << GridLogIRL << " Rotated basis"<<std::endl;
 | 
			
		||||
      Nconv=0;
 | 
			
		||||
      //////////////////////////////////////////////////////////////////////
 | 
			
		||||
      // Full final convergence test; unconditionally applied
 | 
			
		||||
      //////////////////////////////////////////////////////////////////////
 | 
			
		||||
      for(int j = 0; j<=Nk; j++){
 | 
			
		||||
	B=evec[j];
 | 
			
		||||
	if( _Tester.ReconstructEval(j,eresid,B,eval2[j],evalMaxApprox) ) {
 | 
			
		||||
	  Nconv++;
 | 
			
		||||
	}
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      if ( Nconv < Nstop )
 | 
			
		||||
	std::cout << GridLogIRL << "Nconv ("<<Nconv<<") < Nstop ("<<Nstop<<")"<<std::endl;
 | 
			
		||||
 | 
			
		||||
      eval=eval2;
 | 
			
		||||
      
 | 
			
		||||
      //Keep only converged
 | 
			
		||||
      eval.resize(Nconv);// Nstop?
 | 
			
		||||
      evec.resize(Nconv,grid);// Nstop?
 | 
			
		||||
      basisSortInPlace(evec,eval,reverse);
 | 
			
		||||
      
 | 
			
		||||
    }
 | 
			
		||||
       
 | 
			
		||||
    std::cout << GridLogIRL <<"**************************************************************************"<< std::endl;
 | 
			
		||||
    std::cout << GridLogIRL << "ImplicitlyRestartedLanczos CONVERGED ; Summary :\n";
 | 
			
		||||
    std::cout << GridLogIRL <<"**************************************************************************"<< std::endl;
 | 
			
		||||
    std::cout << GridLogIRL << " -- Iterations  = "<< iter   << "\n";
 | 
			
		||||
    std::cout << GridLogIRL << " -- beta(k)     = "<< beta_k << "\n";
 | 
			
		||||
    std::cout << GridLogIRL << " -- Nconv       = "<< Nconv  << "\n";
 | 
			
		||||
    std::cout << GridLogIRL <<"**************************************************************************"<< std::endl;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
 private:
 | 
			
		||||
/* Saad PP. 195
 | 
			
		||||
1. Choose an initial vector v1 of 2-norm unity. Set β1 ≡ 0, v0 ≡ 0
 | 
			
		||||
2. For k = 1,2,...,m Do:
 | 
			
		||||
3. wk:=Avk−βkv_{k−1}      
 | 
			
		||||
4. αk:=(wk,vk)       // 
 | 
			
		||||
5. wk:=wk−αkvk       // wk orthog vk 
 | 
			
		||||
6. βk+1 := ∥wk∥2. If βk+1 = 0 then Stop
 | 
			
		||||
7. vk+1 := wk/βk+1
 | 
			
		||||
8. EndDo
 | 
			
		||||
 */
 | 
			
		||||
  void step(std::vector<RealD>& lmd,
 | 
			
		||||
	    std::vector<RealD>& lme, 
 | 
			
		||||
	    std::vector<Field>& evec,
 | 
			
		||||
	    Field& w,int Nm,int k)
 | 
			
		||||
  {
 | 
			
		||||
    const RealD tiny = 1.0e-20;
 | 
			
		||||
    assert( k< Nm );
 | 
			
		||||
 | 
			
		||||
    GridStopWatch gsw_op,gsw_o;
 | 
			
		||||
 | 
			
		||||
    Field& evec_k = evec[k];
 | 
			
		||||
 | 
			
		||||
    _PolyOp(evec_k,w);    std::cout<<GridLogIRL << "PolyOp" <<std::endl;
 | 
			
		||||
 | 
			
		||||
    if(k>0) w -= lme[k-1] * evec[k-1];
 | 
			
		||||
 | 
			
		||||
    ComplexD zalph = innerProduct(evec_k,w); // 4. αk:=(wk,vk)
 | 
			
		||||
    RealD     alph = real(zalph);
 | 
			
		||||
 | 
			
		||||
    w = w - alph * evec_k;// 5. wk:=wk−αkvk
 | 
			
		||||
 | 
			
		||||
    RealD beta = normalise(w); // 6. βk+1 := ∥wk∥2. If βk+1 = 0 then Stop
 | 
			
		||||
    // 7. vk+1 := wk/βk+1
 | 
			
		||||
 | 
			
		||||
    lmd[k] = alph;
 | 
			
		||||
    lme[k] = beta;
 | 
			
		||||
 | 
			
		||||
    if (k>0 && k % orth_period == 0) {
 | 
			
		||||
      orthogonalize(w,evec,k); // orthonormalise
 | 
			
		||||
      std::cout<<GridLogIRL << "Orthogonalised " <<std::endl;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if(k < Nm-1) evec[k+1] = w;
 | 
			
		||||
 | 
			
		||||
    std::cout<<GridLogIRL << "alpha[" << k << "] = " << zalph << " beta[" << k << "] = "<<beta<<std::endl;
 | 
			
		||||
    if ( beta < tiny ) 
 | 
			
		||||
      std::cout<<GridLogIRL << " beta is tiny "<<beta<<std::endl;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void diagonalize_Eigen(std::vector<RealD>& lmd, std::vector<RealD>& lme, 
 | 
			
		||||
			 int Nk, int Nm,  
 | 
			
		||||
			 Eigen::MatrixXd & Qt, // Nm x Nm
 | 
			
		||||
			 GridBase *grid)
 | 
			
		||||
  {
 | 
			
		||||
    Eigen::MatrixXd TriDiag = Eigen::MatrixXd::Zero(Nk,Nk);
 | 
			
		||||
 | 
			
		||||
    for(int i=0;i<Nk;i++)   TriDiag(i,i)   = lmd[i];
 | 
			
		||||
    for(int i=0;i<Nk-1;i++) TriDiag(i,i+1) = lme[i];
 | 
			
		||||
    for(int i=0;i<Nk-1;i++) TriDiag(i+1,i) = lme[i];
 | 
			
		||||
    
 | 
			
		||||
    Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> eigensolver(TriDiag);
 | 
			
		||||
 | 
			
		||||
    for (int i = 0; i < Nk; i++) {
 | 
			
		||||
      lmd[Nk-1-i] = eigensolver.eigenvalues()(i);
 | 
			
		||||
    }
 | 
			
		||||
    for (int i = 0; i < Nk; i++) {
 | 
			
		||||
      for (int j = 0; j < Nk; j++) {
 | 
			
		||||
	Qt(Nk-1-i,j) = eigensolver.eigenvectors()(j,i);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // File could end here if settle on Eigen ??? !!!
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  void QR_decomp(std::vector<RealD>& lmd,   // Nm 
 | 
			
		||||
		 std::vector<RealD>& lme,   // Nm 
 | 
			
		||||
		 int Nk, int Nm,            // Nk, Nm
 | 
			
		||||
		 Eigen::MatrixXd& Qt,       // Nm x Nm matrix
 | 
			
		||||
		 RealD Dsh, int kmin, int kmax)
 | 
			
		||||
  {
 | 
			
		||||
    int k = kmin-1;
 | 
			
		||||
    RealD x;
 | 
			
		||||
    
 | 
			
		||||
    RealD Fden = 1.0/hypot(lmd[k]-Dsh,lme[k]);
 | 
			
		||||
    RealD c = ( lmd[k] -Dsh) *Fden;
 | 
			
		||||
    RealD s = -lme[k] *Fden;
 | 
			
		||||
      
 | 
			
		||||
    RealD tmpa1 = lmd[k];
 | 
			
		||||
    RealD tmpa2 = lmd[k+1];
 | 
			
		||||
    RealD tmpb  = lme[k];
 | 
			
		||||
 | 
			
		||||
    lmd[k]   = c*c*tmpa1 +s*s*tmpa2 -2.0*c*s*tmpb;
 | 
			
		||||
    lmd[k+1] = s*s*tmpa1 +c*c*tmpa2 +2.0*c*s*tmpb;
 | 
			
		||||
    lme[k]   = c*s*(tmpa1-tmpa2) +(c*c-s*s)*tmpb;
 | 
			
		||||
    x        =-s*lme[k+1];
 | 
			
		||||
    lme[k+1] = c*lme[k+1];
 | 
			
		||||
      
 | 
			
		||||
    for(int i=0; i<Nk; ++i){
 | 
			
		||||
      RealD Qtmp1 = Qt(k,i);
 | 
			
		||||
      RealD Qtmp2 = Qt(k+1,i);
 | 
			
		||||
      Qt(k,i)  = c*Qtmp1 - s*Qtmp2;
 | 
			
		||||
      Qt(k+1,i)= s*Qtmp1 + c*Qtmp2; 
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Givens transformations
 | 
			
		||||
    for(int k = kmin; k < kmax-1; ++k){
 | 
			
		||||
      
 | 
			
		||||
      RealD Fden = 1.0/hypot(x,lme[k-1]);
 | 
			
		||||
      RealD c = lme[k-1]*Fden;
 | 
			
		||||
      RealD s = - x*Fden;
 | 
			
		||||
	
 | 
			
		||||
      RealD tmpa1 = lmd[k];
 | 
			
		||||
      RealD tmpa2 = lmd[k+1];
 | 
			
		||||
      RealD tmpb  = lme[k];
 | 
			
		||||
 | 
			
		||||
      lmd[k]   = c*c*tmpa1 +s*s*tmpa2 -2.0*c*s*tmpb;
 | 
			
		||||
      lmd[k+1] = s*s*tmpa1 +c*c*tmpa2 +2.0*c*s*tmpb;
 | 
			
		||||
      lme[k]   = c*s*(tmpa1-tmpa2) +(c*c-s*s)*tmpb;
 | 
			
		||||
      lme[k-1] = c*lme[k-1] -s*x;
 | 
			
		||||
 | 
			
		||||
      if(k != kmax-2){
 | 
			
		||||
	x = -s*lme[k+1];
 | 
			
		||||
	lme[k+1] = c*lme[k+1];
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      for(int i=0; i<Nk; ++i){
 | 
			
		||||
	RealD Qtmp1 = Qt(k,i);
 | 
			
		||||
	RealD Qtmp2 = Qt(k+1,i);
 | 
			
		||||
	Qt(k,i)     = c*Qtmp1 -s*Qtmp2;
 | 
			
		||||
	Qt(k+1,i)   = s*Qtmp1 +c*Qtmp2;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void diagonalize(std::vector<RealD>& lmd, std::vector<RealD>& lme, 
 | 
			
		||||
		   int Nk, int Nm,   
 | 
			
		||||
		   Eigen::MatrixXd & Qt,
 | 
			
		||||
		   GridBase *grid)
 | 
			
		||||
  {
 | 
			
		||||
    Qt = Eigen::MatrixXd::Identity(Nm,Nm);
 | 
			
		||||
    if ( diagonalisation == IRLdiagonaliseWithDSTEGR ) {
 | 
			
		||||
      diagonalize_lapack(lmd,lme,Nk,Nm,Qt,grid);
 | 
			
		||||
    } else if ( diagonalisation == IRLdiagonaliseWithQR ) { 
 | 
			
		||||
      diagonalize_QR(lmd,lme,Nk,Nm,Qt,grid);
 | 
			
		||||
    }  else if ( diagonalisation == IRLdiagonaliseWithEigen ) { 
 | 
			
		||||
      diagonalize_Eigen(lmd,lme,Nk,Nm,Qt,grid);
 | 
			
		||||
    } else { 
 | 
			
		||||
      assert(0);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
#ifdef USE_LAPACK
 | 
			
		||||
void LAPACK_dstegr(char *jobz, char *range, int *n, double *d, double *e,
 | 
			
		||||
                   double *vl, double *vu, int *il, int *iu, double *abstol,
 | 
			
		||||
                   int *m, double *w, double *z, int *ldz, int *isuppz,
 | 
			
		||||
                   double *work, int *lwork, int *iwork, int *liwork,
 | 
			
		||||
                   int *info);
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
void diagonalize_lapack(std::vector<RealD>& lmd,
 | 
			
		||||
			std::vector<RealD>& lme, 
 | 
			
		||||
			int Nk, int Nm,  
 | 
			
		||||
			Eigen::MatrixXd& Qt,
 | 
			
		||||
			GridBase *grid)
 | 
			
		||||
{
 | 
			
		||||
#ifdef USE_LAPACK
 | 
			
		||||
  const int size = Nm;
 | 
			
		||||
  int NN = Nk;
 | 
			
		||||
  double evals_tmp[NN];
 | 
			
		||||
  double evec_tmp[NN][NN];
 | 
			
		||||
  memset(evec_tmp[0],0,sizeof(double)*NN*NN);
 | 
			
		||||
  double DD[NN];
 | 
			
		||||
  double EE[NN];
 | 
			
		||||
  for (int i = 0; i< NN; i++) {
 | 
			
		||||
    for (int j = i - 1; j <= i + 1; j++) {
 | 
			
		||||
      if ( j < NN && j >= 0 ) {
 | 
			
		||||
	if (i==j) DD[i] = lmd[i];
 | 
			
		||||
	if (i==j) evals_tmp[i] = lmd[i];
 | 
			
		||||
	if (j==(i-1)) EE[j] = lme[j];
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  int evals_found;
 | 
			
		||||
  int lwork = ( (18*NN) > (1+4*NN+NN*NN)? (18*NN):(1+4*NN+NN*NN)) ;
 | 
			
		||||
  int liwork =  3+NN*10 ;
 | 
			
		||||
  int iwork[liwork];
 | 
			
		||||
  double work[lwork];
 | 
			
		||||
  int isuppz[2*NN];
 | 
			
		||||
  char jobz = 'V'; // calculate evals & evecs
 | 
			
		||||
  char range = 'I'; // calculate all evals
 | 
			
		||||
  //    char range = 'A'; // calculate all evals
 | 
			
		||||
  char uplo = 'U'; // refer to upper half of original matrix
 | 
			
		||||
  char compz = 'I'; // Compute eigenvectors of tridiagonal matrix
 | 
			
		||||
  int ifail[NN];
 | 
			
		||||
  int info;
 | 
			
		||||
  int total = grid->_Nprocessors;
 | 
			
		||||
  int node  = grid->_processor;
 | 
			
		||||
  int interval = (NN/total)+1;
 | 
			
		||||
  double vl = 0.0, vu = 0.0;
 | 
			
		||||
  int il = interval*node+1 , iu = interval*(node+1);
 | 
			
		||||
  if (iu > NN)  iu=NN;
 | 
			
		||||
  double tol = 0.0;
 | 
			
		||||
  if (1) {
 | 
			
		||||
    memset(evals_tmp,0,sizeof(double)*NN);
 | 
			
		||||
    if ( il <= NN){
 | 
			
		||||
      LAPACK_dstegr(&jobz, &range, &NN,
 | 
			
		||||
		    (double*)DD, (double*)EE,
 | 
			
		||||
		    &vl, &vu, &il, &iu, // these four are ignored if second parameteris 'A'
 | 
			
		||||
		    &tol, // tolerance
 | 
			
		||||
		    &evals_found, evals_tmp, (double*)evec_tmp, &NN,
 | 
			
		||||
		    isuppz,
 | 
			
		||||
		    work, &lwork, iwork, &liwork,
 | 
			
		||||
		    &info);
 | 
			
		||||
      for (int i = iu-1; i>= il-1; i--){
 | 
			
		||||
	evals_tmp[i] = evals_tmp[i - (il-1)];
 | 
			
		||||
	if (il>1) evals_tmp[i-(il-1)]=0.;
 | 
			
		||||
	for (int j = 0; j< NN; j++){
 | 
			
		||||
	  evec_tmp[i][j] = evec_tmp[i - (il-1)][j];
 | 
			
		||||
	  if (il>1) evec_tmp[i-(il-1)][j]=0.;
 | 
			
		||||
	}
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
    {
 | 
			
		||||
      grid->GlobalSumVector(evals_tmp,NN);
 | 
			
		||||
      grid->GlobalSumVector((double*)evec_tmp,NN*NN);
 | 
			
		||||
    }
 | 
			
		||||
  } 
 | 
			
		||||
  // Safer to sort instead of just reversing it, 
 | 
			
		||||
  // but the document of the routine says evals are sorted in increasing order. 
 | 
			
		||||
  // qr gives evals in decreasing order.
 | 
			
		||||
  for(int i=0;i<NN;i++){
 | 
			
		||||
    lmd [NN-1-i]=evals_tmp[i];
 | 
			
		||||
    for(int j=0;j<NN;j++){
 | 
			
		||||
      Qt((NN-1-i),j)=evec_tmp[i][j];
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
#else 
 | 
			
		||||
  assert(0);
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void diagonalize_QR(std::vector<RealD>& lmd, std::vector<RealD>& lme, 
 | 
			
		||||
		    int Nk, int Nm,   
 | 
			
		||||
		    Eigen::MatrixXd & Qt,
 | 
			
		||||
		    GridBase *grid)
 | 
			
		||||
{
 | 
			
		||||
  int QRiter = 100*Nm;
 | 
			
		||||
  int kmin = 1;
 | 
			
		||||
  int kmax = Nk;
 | 
			
		||||
  
 | 
			
		||||
  // (this should be more sophisticated)
 | 
			
		||||
  for(int iter=0; iter<QRiter; ++iter){
 | 
			
		||||
    
 | 
			
		||||
    // determination of 2x2 leading submatrix
 | 
			
		||||
    RealD dsub = lmd[kmax-1]-lmd[kmax-2];
 | 
			
		||||
    RealD dd = sqrt(dsub*dsub + 4.0*lme[kmax-2]*lme[kmax-2]);
 | 
			
		||||
    RealD Dsh = 0.5*(lmd[kmax-2]+lmd[kmax-1] +dd*(dsub/fabs(dsub)));
 | 
			
		||||
    // (Dsh: shift)
 | 
			
		||||
    
 | 
			
		||||
    // transformation
 | 
			
		||||
    QR_decomp(lmd,lme,Nk,Nm,Qt,Dsh,kmin,kmax); // Nk, Nm
 | 
			
		||||
    
 | 
			
		||||
    // Convergence criterion (redef of kmin and kamx)
 | 
			
		||||
    for(int j=kmax-1; j>= kmin; --j){
 | 
			
		||||
      RealD dds = fabs(lmd[j-1])+fabs(lmd[j]);
 | 
			
		||||
      if(fabs(lme[j-1])+dds > dds){
 | 
			
		||||
	kmax = j+1;
 | 
			
		||||
	goto continued;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
    QRiter = iter;
 | 
			
		||||
    return;
 | 
			
		||||
    
 | 
			
		||||
  continued:
 | 
			
		||||
    for(int j=0; j<kmax-1; ++j){
 | 
			
		||||
      RealD dds = fabs(lmd[j])+fabs(lmd[j+1]);
 | 
			
		||||
      if(fabs(lme[j])+dds > dds){
 | 
			
		||||
	kmin = j+1;
 | 
			
		||||
	break;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  std::cout << GridLogError << "[QL method] Error - Too many iteration: "<<QRiter<<"\n";
 | 
			
		||||
  abort();
 | 
			
		||||
}
 | 
			
		||||
};
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
							
								
								
									
										406
									
								
								Grid/algorithms/iterative/LocalCoherenceLanczos.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										406
									
								
								Grid/algorithms/iterative/LocalCoherenceLanczos.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,406 @@
 | 
			
		||||
    /*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
    Source file: ./lib/algorithms/iterative/LocalCoherenceLanczos.h
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Christoph Lehner <clehner@bnl.gov>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#ifndef GRID_LOCAL_COHERENCE_IRL_H
 | 
			
		||||
#define GRID_LOCAL_COHERENCE_IRL_H
 | 
			
		||||
 | 
			
		||||
namespace Grid { 
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
struct LanczosParams : Serializable {
 | 
			
		||||
 public:
 | 
			
		||||
  GRID_SERIALIZABLE_CLASS_MEMBERS(LanczosParams,
 | 
			
		||||
				  ChebyParams, Cheby,/*Chebyshev*/
 | 
			
		||||
				  int, Nstop,    /*Vecs in Lanczos must converge Nstop < Nk < Nm*/
 | 
			
		||||
				  int, Nk,       /*Vecs in Lanczos seek converge*/
 | 
			
		||||
				  int, Nm,       /*Total vecs in Lanczos include restart*/
 | 
			
		||||
				  RealD, resid,  /*residual*/
 | 
			
		||||
 				  int, MaxIt, 
 | 
			
		||||
				  RealD, betastp,  /* ? */
 | 
			
		||||
				  int, MinRes);    // Must restart
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct LocalCoherenceLanczosParams : Serializable {
 | 
			
		||||
 public:
 | 
			
		||||
  GRID_SERIALIZABLE_CLASS_MEMBERS(LocalCoherenceLanczosParams,
 | 
			
		||||
				  bool, saveEvecs,
 | 
			
		||||
				  bool, doFine,
 | 
			
		||||
				  bool, doFineRead,
 | 
			
		||||
				  bool, doCoarse,
 | 
			
		||||
	       			  bool, doCoarseRead,
 | 
			
		||||
				  LanczosParams, FineParams,
 | 
			
		||||
				  LanczosParams, CoarseParams,
 | 
			
		||||
				  ChebyParams,   Smoother,
 | 
			
		||||
				  RealD        , coarse_relax_tol,
 | 
			
		||||
				  std::vector<int>, blockSize,
 | 
			
		||||
				  std::string, config,
 | 
			
		||||
				  std::vector < std::complex<double>  >, omega,
 | 
			
		||||
				  RealD, mass,
 | 
			
		||||
				  RealD, M5);
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
// Duplicate functionality; ProjectedFunctionHermOp could be used with the trivial function
 | 
			
		||||
template<class Fobj,class CComplex,int nbasis>
 | 
			
		||||
class ProjectedHermOp : public LinearFunction<Lattice<iVector<CComplex,nbasis > > > {
 | 
			
		||||
public:
 | 
			
		||||
  typedef iVector<CComplex,nbasis >           CoarseSiteVector;
 | 
			
		||||
  typedef Lattice<CoarseSiteVector>           CoarseField;
 | 
			
		||||
  typedef Lattice<CComplex>   CoarseScalar; // used for inner products on fine field
 | 
			
		||||
  typedef Lattice<Fobj>          FineField;
 | 
			
		||||
 | 
			
		||||
  LinearOperatorBase<FineField> &_Linop;
 | 
			
		||||
  std::vector<FineField>        &subspace;
 | 
			
		||||
 | 
			
		||||
  ProjectedHermOp(LinearOperatorBase<FineField>& linop, std::vector<FineField> & _subspace) : 
 | 
			
		||||
    _Linop(linop), subspace(_subspace)
 | 
			
		||||
  {  
 | 
			
		||||
    assert(subspace.size() >0);
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  void operator()(const CoarseField& in, CoarseField& out) {
 | 
			
		||||
    GridBase *FineGrid = subspace[0]._grid;    
 | 
			
		||||
    int   checkerboard = subspace[0].checkerboard;
 | 
			
		||||
      
 | 
			
		||||
    FineField fin (FineGrid);     fin.checkerboard= checkerboard;
 | 
			
		||||
    FineField fout(FineGrid);   fout.checkerboard = checkerboard;
 | 
			
		||||
 | 
			
		||||
    blockPromote(in,fin,subspace);       std::cout<<GridLogIRL<<"ProjectedHermop : Promote to fine"<<std::endl;
 | 
			
		||||
    _Linop.HermOp(fin,fout);             std::cout<<GridLogIRL<<"ProjectedHermop : HermOp (fine) "<<std::endl;
 | 
			
		||||
    blockProject(out,fout,subspace);     std::cout<<GridLogIRL<<"ProjectedHermop : Project to coarse "<<std::endl;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template<class Fobj,class CComplex,int nbasis>
 | 
			
		||||
class ProjectedFunctionHermOp : public LinearFunction<Lattice<iVector<CComplex,nbasis > > > {
 | 
			
		||||
public:
 | 
			
		||||
  typedef iVector<CComplex,nbasis >           CoarseSiteVector;
 | 
			
		||||
  typedef Lattice<CoarseSiteVector>           CoarseField;
 | 
			
		||||
  typedef Lattice<CComplex>   CoarseScalar; // used for inner products on fine field
 | 
			
		||||
  typedef Lattice<Fobj>          FineField;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  OperatorFunction<FineField>   & _poly;
 | 
			
		||||
  LinearOperatorBase<FineField> &_Linop;
 | 
			
		||||
  std::vector<FineField>        &subspace;
 | 
			
		||||
 | 
			
		||||
  ProjectedFunctionHermOp(OperatorFunction<FineField> & poly,
 | 
			
		||||
			  LinearOperatorBase<FineField>& linop, 
 | 
			
		||||
			  std::vector<FineField> & _subspace) :
 | 
			
		||||
    _poly(poly),
 | 
			
		||||
    _Linop(linop),
 | 
			
		||||
    subspace(_subspace)
 | 
			
		||||
  {  };
 | 
			
		||||
 | 
			
		||||
  void operator()(const CoarseField& in, CoarseField& out) {
 | 
			
		||||
    
 | 
			
		||||
    GridBase *FineGrid = subspace[0]._grid;    
 | 
			
		||||
    int   checkerboard = subspace[0].checkerboard;
 | 
			
		||||
 | 
			
		||||
    FineField fin (FineGrid); fin.checkerboard =checkerboard;
 | 
			
		||||
    FineField fout(FineGrid);fout.checkerboard =checkerboard;
 | 
			
		||||
    
 | 
			
		||||
    blockPromote(in,fin,subspace);             std::cout<<GridLogIRL<<"ProjectedFunctionHermop : Promote to fine"<<std::endl;
 | 
			
		||||
    _poly(_Linop,fin,fout);                    std::cout<<GridLogIRL<<"ProjectedFunctionHermop : Poly "<<std::endl;
 | 
			
		||||
    blockProject(out,fout,subspace);           std::cout<<GridLogIRL<<"ProjectedFunctionHermop : Project to coarse "<<std::endl;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template<class Fobj,class CComplex,int nbasis>
 | 
			
		||||
class ImplicitlyRestartedLanczosSmoothedTester  : public ImplicitlyRestartedLanczosTester<Lattice<iVector<CComplex,nbasis > > >
 | 
			
		||||
{
 | 
			
		||||
 public:
 | 
			
		||||
  typedef iVector<CComplex,nbasis >           CoarseSiteVector;
 | 
			
		||||
  typedef Lattice<CoarseSiteVector>           CoarseField;
 | 
			
		||||
  typedef Lattice<CComplex>   CoarseScalar; // used for inner products on fine field
 | 
			
		||||
  typedef Lattice<Fobj>          FineField;
 | 
			
		||||
 | 
			
		||||
  LinearFunction<CoarseField> & _Poly;
 | 
			
		||||
  OperatorFunction<FineField>   & _smoother;
 | 
			
		||||
  LinearOperatorBase<FineField> &_Linop;
 | 
			
		||||
  RealD                          _coarse_relax_tol;
 | 
			
		||||
  std::vector<FineField>        &_subspace;
 | 
			
		||||
  
 | 
			
		||||
  ImplicitlyRestartedLanczosSmoothedTester(LinearFunction<CoarseField>   &Poly,
 | 
			
		||||
					   OperatorFunction<FineField>   &smoother,
 | 
			
		||||
					   LinearOperatorBase<FineField> &Linop,
 | 
			
		||||
					   std::vector<FineField>        &subspace,
 | 
			
		||||
					   RealD coarse_relax_tol=5.0e3) 
 | 
			
		||||
    : _smoother(smoother), _Linop(Linop), _Poly(Poly), _subspace(subspace),
 | 
			
		||||
      _coarse_relax_tol(coarse_relax_tol)  
 | 
			
		||||
  {    };
 | 
			
		||||
 | 
			
		||||
  int TestConvergence(int j,RealD eresid,CoarseField &B, RealD &eval,RealD evalMaxApprox)
 | 
			
		||||
  {
 | 
			
		||||
    CoarseField v(B);
 | 
			
		||||
    RealD eval_poly = eval;
 | 
			
		||||
 | 
			
		||||
    // Apply operator
 | 
			
		||||
    _Poly(B,v);
 | 
			
		||||
 | 
			
		||||
    RealD vnum = real(innerProduct(B,v)); // HermOp.
 | 
			
		||||
    RealD vden = norm2(B);
 | 
			
		||||
    RealD vv0  = norm2(v);
 | 
			
		||||
    eval   = vnum/vden;
 | 
			
		||||
    v -= eval*B;
 | 
			
		||||
 | 
			
		||||
    RealD vv = norm2(v) / ::pow(evalMaxApprox,2.0);
 | 
			
		||||
 | 
			
		||||
    std::cout.precision(13);
 | 
			
		||||
    std::cout<<GridLogIRL  << "[" << std::setw(3)<<j<<"] "
 | 
			
		||||
	     <<"eval = "<<std::setw(25)<< eval << " (" << eval_poly << ")"
 | 
			
		||||
	     <<" |H B[i] - eval[i]B[i]|^2 / evalMaxApprox^2 " << std::setw(25) << vv
 | 
			
		||||
	     <<std::endl;
 | 
			
		||||
 | 
			
		||||
    int conv=0;
 | 
			
		||||
    if( (vv<eresid*eresid) ) conv = 1;
 | 
			
		||||
    return conv;
 | 
			
		||||
  }
 | 
			
		||||
  int ReconstructEval(int j,RealD eresid,CoarseField &B, RealD &eval,RealD evalMaxApprox)
 | 
			
		||||
  {
 | 
			
		||||
    GridBase *FineGrid = _subspace[0]._grid;    
 | 
			
		||||
    int checkerboard   = _subspace[0].checkerboard;
 | 
			
		||||
    FineField fB(FineGrid);fB.checkerboard =checkerboard;
 | 
			
		||||
    FineField fv(FineGrid);fv.checkerboard =checkerboard;
 | 
			
		||||
 | 
			
		||||
    blockPromote(B,fv,_subspace);  
 | 
			
		||||
    
 | 
			
		||||
    _smoother(_Linop,fv,fB); 
 | 
			
		||||
 | 
			
		||||
    RealD eval_poly = eval;
 | 
			
		||||
    _Linop.HermOp(fB,fv);
 | 
			
		||||
 | 
			
		||||
    RealD vnum = real(innerProduct(fB,fv)); // HermOp.
 | 
			
		||||
    RealD vden = norm2(fB);
 | 
			
		||||
    RealD vv0  = norm2(fv);
 | 
			
		||||
    eval   = vnum/vden;
 | 
			
		||||
    fv -= eval*fB;
 | 
			
		||||
    RealD vv = norm2(fv) / ::pow(evalMaxApprox,2.0);
 | 
			
		||||
 | 
			
		||||
    std::cout.precision(13);
 | 
			
		||||
    std::cout<<GridLogIRL  << "[" << std::setw(3)<<j<<"] "
 | 
			
		||||
	     <<"eval = "<<std::setw(25)<< eval << " (" << eval_poly << ")"
 | 
			
		||||
	     <<" |H B[i] - eval[i]B[i]|^2 / evalMaxApprox^2 " << std::setw(25) << vv
 | 
			
		||||
	     <<std::endl;
 | 
			
		||||
    if ( j > nbasis ) eresid = eresid*_coarse_relax_tol;
 | 
			
		||||
    if( (vv<eresid*eresid) ) return 1;
 | 
			
		||||
    return 0;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////
 | 
			
		||||
// Make serializable Lanczos params
 | 
			
		||||
////////////////////////////////////////////
 | 
			
		||||
template<class Fobj,class CComplex,int nbasis>
 | 
			
		||||
class LocalCoherenceLanczos 
 | 
			
		||||
{
 | 
			
		||||
public:
 | 
			
		||||
  typedef iVector<CComplex,nbasis >           CoarseSiteVector;
 | 
			
		||||
  typedef Lattice<CComplex>                   CoarseScalar; // used for inner products on fine field
 | 
			
		||||
  typedef Lattice<CoarseSiteVector>           CoarseField;
 | 
			
		||||
  typedef Lattice<Fobj>                       FineField;
 | 
			
		||||
 | 
			
		||||
protected:
 | 
			
		||||
  GridBase *_CoarseGrid;
 | 
			
		||||
  GridBase *_FineGrid;
 | 
			
		||||
  int _checkerboard;
 | 
			
		||||
  LinearOperatorBase<FineField>                 & _FineOp;
 | 
			
		||||
  
 | 
			
		||||
  std::vector<RealD>                              &evals_fine;
 | 
			
		||||
  std::vector<RealD>                              &evals_coarse; 
 | 
			
		||||
  std::vector<FineField>                          &subspace;
 | 
			
		||||
  std::vector<CoarseField>                        &evec_coarse;
 | 
			
		||||
 | 
			
		||||
private:
 | 
			
		||||
  std::vector<RealD>                              _evals_fine;
 | 
			
		||||
  std::vector<RealD>                              _evals_coarse; 
 | 
			
		||||
  std::vector<FineField>                          _subspace;
 | 
			
		||||
  std::vector<CoarseField>                        _evec_coarse;
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
 | 
			
		||||
  LocalCoherenceLanczos(GridBase *FineGrid,
 | 
			
		||||
			GridBase *CoarseGrid,
 | 
			
		||||
			LinearOperatorBase<FineField> &FineOp,
 | 
			
		||||
			int checkerboard) :
 | 
			
		||||
    _CoarseGrid(CoarseGrid),
 | 
			
		||||
    _FineGrid(FineGrid),
 | 
			
		||||
    _FineOp(FineOp),
 | 
			
		||||
    _checkerboard(checkerboard),
 | 
			
		||||
    evals_fine  (_evals_fine),
 | 
			
		||||
    evals_coarse(_evals_coarse),
 | 
			
		||||
    subspace    (_subspace),
 | 
			
		||||
    evec_coarse(_evec_coarse)
 | 
			
		||||
  {
 | 
			
		||||
    evals_fine.resize(0);
 | 
			
		||||
    evals_coarse.resize(0);
 | 
			
		||||
  };
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Alternate constructore, external storage for use by Hadrons module
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  LocalCoherenceLanczos(GridBase *FineGrid,
 | 
			
		||||
			GridBase *CoarseGrid,
 | 
			
		||||
			LinearOperatorBase<FineField> &FineOp,
 | 
			
		||||
			int checkerboard,
 | 
			
		||||
			std::vector<FineField>   &ext_subspace,
 | 
			
		||||
			std::vector<CoarseField> &ext_coarse,
 | 
			
		||||
			std::vector<RealD>       &ext_eval_fine,
 | 
			
		||||
			std::vector<RealD>       &ext_eval_coarse
 | 
			
		||||
			) :
 | 
			
		||||
    _CoarseGrid(CoarseGrid),
 | 
			
		||||
    _FineGrid(FineGrid),
 | 
			
		||||
    _FineOp(FineOp),
 | 
			
		||||
    _checkerboard(checkerboard),
 | 
			
		||||
    evals_fine  (ext_eval_fine), 
 | 
			
		||||
    evals_coarse(ext_eval_coarse),
 | 
			
		||||
    subspace    (ext_subspace),
 | 
			
		||||
    evec_coarse (ext_coarse)
 | 
			
		||||
  {
 | 
			
		||||
    evals_fine.resize(0);
 | 
			
		||||
    evals_coarse.resize(0);
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  void Orthogonalise(void ) {
 | 
			
		||||
    CoarseScalar InnerProd(_CoarseGrid);
 | 
			
		||||
    std::cout << GridLogMessage <<" Gramm-Schmidt pass 1"<<std::endl;
 | 
			
		||||
    blockOrthogonalise(InnerProd,subspace);
 | 
			
		||||
    std::cout << GridLogMessage <<" Gramm-Schmidt pass 2"<<std::endl;
 | 
			
		||||
    blockOrthogonalise(InnerProd,subspace);
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  template<typename T>  static RealD normalise(T& v) 
 | 
			
		||||
  {
 | 
			
		||||
    RealD nn = norm2(v);
 | 
			
		||||
    nn = ::sqrt(nn);
 | 
			
		||||
    v = v * (1.0/nn);
 | 
			
		||||
    return nn;
 | 
			
		||||
  }
 | 
			
		||||
  /*
 | 
			
		||||
  void fakeFine(void)
 | 
			
		||||
  {
 | 
			
		||||
    int Nk = nbasis;
 | 
			
		||||
    subspace.resize(Nk,_FineGrid);
 | 
			
		||||
    subspace[0]=1.0;
 | 
			
		||||
    subspace[0].checkerboard=_checkerboard;
 | 
			
		||||
    normalise(subspace[0]);
 | 
			
		||||
    PlainHermOp<FineField>    Op(_FineOp);
 | 
			
		||||
    for(int k=1;k<Nk;k++){
 | 
			
		||||
      subspace[k].checkerboard=_checkerboard;
 | 
			
		||||
      Op(subspace[k-1],subspace[k]);
 | 
			
		||||
      normalise(subspace[k]);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  */
 | 
			
		||||
 | 
			
		||||
  void testFine(RealD resid) 
 | 
			
		||||
  {
 | 
			
		||||
    assert(evals_fine.size() == nbasis);
 | 
			
		||||
    assert(subspace.size() == nbasis);
 | 
			
		||||
    PlainHermOp<FineField>    Op(_FineOp);
 | 
			
		||||
    ImplicitlyRestartedLanczosHermOpTester<FineField> SimpleTester(Op);
 | 
			
		||||
    for(int k=0;k<nbasis;k++){
 | 
			
		||||
      assert(SimpleTester.ReconstructEval(k,resid,subspace[k],evals_fine[k],1.0)==1);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void testCoarse(RealD resid,ChebyParams cheby_smooth,RealD relax) 
 | 
			
		||||
  {
 | 
			
		||||
    assert(evals_fine.size() == nbasis);
 | 
			
		||||
    assert(subspace.size() == nbasis);
 | 
			
		||||
    //////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
    // create a smoother and see if we can get a cheap convergence test and smooth inside the IRL
 | 
			
		||||
    //////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
    Chebyshev<FineField>                          ChebySmooth(cheby_smooth);
 | 
			
		||||
    ProjectedFunctionHermOp<Fobj,CComplex,nbasis> ChebyOp (ChebySmooth,_FineOp,subspace);
 | 
			
		||||
    ImplicitlyRestartedLanczosSmoothedTester<Fobj,CComplex,nbasis> ChebySmoothTester(ChebyOp,ChebySmooth,_FineOp,subspace,relax);
 | 
			
		||||
 | 
			
		||||
    for(int k=0;k<evec_coarse.size();k++){
 | 
			
		||||
      if ( k < nbasis ) { 
 | 
			
		||||
	assert(ChebySmoothTester.ReconstructEval(k,resid,evec_coarse[k],evals_coarse[k],1.0)==1);
 | 
			
		||||
      } else { 
 | 
			
		||||
	assert(ChebySmoothTester.ReconstructEval(k,resid*relax,evec_coarse[k],evals_coarse[k],1.0)==1);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void calcFine(ChebyParams cheby_parms,int Nstop,int Nk,int Nm,RealD resid, 
 | 
			
		||||
		RealD MaxIt, RealD betastp, int MinRes)
 | 
			
		||||
  {
 | 
			
		||||
    assert(nbasis<=Nm);
 | 
			
		||||
    Chebyshev<FineField>      Cheby(cheby_parms);
 | 
			
		||||
    FunctionHermOp<FineField> ChebyOp(Cheby,_FineOp);
 | 
			
		||||
    PlainHermOp<FineField>    Op(_FineOp);
 | 
			
		||||
 | 
			
		||||
    evals_fine.resize(Nm);
 | 
			
		||||
    subspace.resize(Nm,_FineGrid);
 | 
			
		||||
 | 
			
		||||
    ImplicitlyRestartedLanczos<FineField> IRL(ChebyOp,Op,Nstop,Nk,Nm,resid,MaxIt,betastp,MinRes);
 | 
			
		||||
 | 
			
		||||
    FineField src(_FineGrid); src=1.0; src.checkerboard = _checkerboard;
 | 
			
		||||
 | 
			
		||||
    int Nconv;
 | 
			
		||||
    IRL.calc(evals_fine,subspace,src,Nconv,false);
 | 
			
		||||
    
 | 
			
		||||
    // Shrink down to number saved
 | 
			
		||||
    assert(Nstop>=nbasis);
 | 
			
		||||
    assert(Nconv>=nbasis);
 | 
			
		||||
    evals_fine.resize(nbasis);
 | 
			
		||||
    subspace.resize(nbasis,_FineGrid);
 | 
			
		||||
  }
 | 
			
		||||
  void calcCoarse(ChebyParams cheby_op,ChebyParams cheby_smooth,RealD relax,
 | 
			
		||||
		  int Nstop, int Nk, int Nm,RealD resid, 
 | 
			
		||||
		  RealD MaxIt, RealD betastp, int MinRes)
 | 
			
		||||
  {
 | 
			
		||||
    Chebyshev<FineField>                          Cheby(cheby_op);
 | 
			
		||||
    ProjectedHermOp<Fobj,CComplex,nbasis>         Op(_FineOp,subspace);
 | 
			
		||||
    ProjectedFunctionHermOp<Fobj,CComplex,nbasis> ChebyOp (Cheby,_FineOp,subspace);
 | 
			
		||||
    //////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
    // create a smoother and see if we can get a cheap convergence test and smooth inside the IRL
 | 
			
		||||
    //////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
 | 
			
		||||
    Chebyshev<FineField>                                           ChebySmooth(cheby_smooth);
 | 
			
		||||
    ImplicitlyRestartedLanczosSmoothedTester<Fobj,CComplex,nbasis> ChebySmoothTester(ChebyOp,ChebySmooth,_FineOp,subspace,relax);
 | 
			
		||||
 | 
			
		||||
    evals_coarse.resize(Nm);
 | 
			
		||||
    evec_coarse.resize(Nm,_CoarseGrid);
 | 
			
		||||
 | 
			
		||||
    CoarseField src(_CoarseGrid);     src=1.0; 
 | 
			
		||||
 | 
			
		||||
    ImplicitlyRestartedLanczos<CoarseField> IRL(ChebyOp,ChebyOp,ChebySmoothTester,Nstop,Nk,Nm,resid,MaxIt,betastp,MinRes);
 | 
			
		||||
    int Nconv=0;
 | 
			
		||||
    IRL.calc(evals_coarse,evec_coarse,src,Nconv,false);
 | 
			
		||||
    assert(Nconv>=Nstop);
 | 
			
		||||
    evals_coarse.resize(Nstop);
 | 
			
		||||
    evec_coarse.resize (Nstop,_CoarseGrid);
 | 
			
		||||
    for (int i=0;i<Nstop;i++){
 | 
			
		||||
      std::cout << i << " Coarse eval = " << evals_coarse[i]  << std::endl;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
							
								
								
									
										156
									
								
								Grid/algorithms/iterative/MinimalResidual.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										156
									
								
								Grid/algorithms/iterative/MinimalResidual.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,156 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/algorithms/iterative/MinimalResidual.h
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Daniel Richtmann <daniel.richtmann@ur.de>
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#ifndef GRID_MINIMAL_RESIDUAL_H
 | 
			
		||||
#define GRID_MINIMAL_RESIDUAL_H
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
template<class Field> class MinimalResidual : public OperatorFunction<Field> {
 | 
			
		||||
 public:
 | 
			
		||||
  bool ErrorOnNoConverge; // throw an assert when the MR fails to converge.
 | 
			
		||||
                          // Defaults true.
 | 
			
		||||
  RealD   Tolerance;
 | 
			
		||||
  Integer MaxIterations;
 | 
			
		||||
  RealD   overRelaxParam;
 | 
			
		||||
  Integer IterationsToComplete; // Number of iterations the MR took to finish.
 | 
			
		||||
                                // Filled in upon completion
 | 
			
		||||
 | 
			
		||||
  MinimalResidual(RealD tol, Integer maxit, Real ovrelparam = 1.0, bool err_on_no_conv = true)
 | 
			
		||||
    : Tolerance(tol), MaxIterations(maxit), overRelaxParam(ovrelparam), ErrorOnNoConverge(err_on_no_conv){};
 | 
			
		||||
 | 
			
		||||
  void operator()(LinearOperatorBase<Field> &Linop, const Field &src, Field &psi) {
 | 
			
		||||
 | 
			
		||||
    psi.checkerboard = src.checkerboard;
 | 
			
		||||
    conformable(psi, src);
 | 
			
		||||
 | 
			
		||||
    Complex a, c;
 | 
			
		||||
    Real    d;
 | 
			
		||||
 | 
			
		||||
    Field Mr(src);
 | 
			
		||||
    Field r(src);
 | 
			
		||||
 | 
			
		||||
    // Initial residual computation & set up
 | 
			
		||||
    RealD guess = norm2(psi);
 | 
			
		||||
    assert(std::isnan(guess) == 0);
 | 
			
		||||
 | 
			
		||||
    RealD ssq = norm2(src);
 | 
			
		||||
    RealD rsq = Tolerance * Tolerance * ssq;
 | 
			
		||||
 | 
			
		||||
    Linop.Op(psi, Mr);
 | 
			
		||||
 | 
			
		||||
    r = src - Mr;
 | 
			
		||||
 | 
			
		||||
    RealD cp = norm2(r);
 | 
			
		||||
 | 
			
		||||
    std::cout << std::setprecision(4) << std::scientific;
 | 
			
		||||
    std::cout << GridLogIterative << "MinimalResidual: guess " << guess << std::endl;
 | 
			
		||||
    std::cout << GridLogIterative << "MinimalResidual:   src " << ssq << std::endl;
 | 
			
		||||
    std::cout << GridLogIterative << "MinimalResidual:    mp " << d << std::endl;
 | 
			
		||||
    std::cout << GridLogIterative << "MinimalResidual:  cp,r " << cp << std::endl;
 | 
			
		||||
 | 
			
		||||
    if (cp <= rsq) {
 | 
			
		||||
      return;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    std::cout << GridLogIterative << "MinimalResidual: k=0 residual " << cp << " target " << rsq << std::endl;
 | 
			
		||||
 | 
			
		||||
    GridStopWatch LinalgTimer;
 | 
			
		||||
    GridStopWatch MatrixTimer;
 | 
			
		||||
    GridStopWatch SolverTimer;
 | 
			
		||||
 | 
			
		||||
    SolverTimer.Start();
 | 
			
		||||
    int k;
 | 
			
		||||
    for (k = 1; k <= MaxIterations; k++) {
 | 
			
		||||
 | 
			
		||||
      MatrixTimer.Start();
 | 
			
		||||
      Linop.Op(r, Mr);
 | 
			
		||||
      MatrixTimer.Stop();
 | 
			
		||||
 | 
			
		||||
      LinalgTimer.Start();
 | 
			
		||||
 | 
			
		||||
      c = innerProduct(Mr, r);
 | 
			
		||||
 | 
			
		||||
      d = norm2(Mr);
 | 
			
		||||
 | 
			
		||||
      a = c / d;
 | 
			
		||||
 | 
			
		||||
      a = a * overRelaxParam;
 | 
			
		||||
 | 
			
		||||
      psi = psi + r * a;
 | 
			
		||||
 | 
			
		||||
      r = r - Mr * a;
 | 
			
		||||
 | 
			
		||||
      cp = norm2(r);
 | 
			
		||||
 | 
			
		||||
      LinalgTimer.Stop();
 | 
			
		||||
 | 
			
		||||
      std::cout << GridLogIterative << "MinimalResidual: Iteration " << k
 | 
			
		||||
                << " residual " << cp << " target " << rsq << std::endl;
 | 
			
		||||
      std::cout << GridLogDebug << "a = " << a << " c = " << c << " d = " << d << std::endl;
 | 
			
		||||
 | 
			
		||||
      // Stopping condition
 | 
			
		||||
      if (cp <= rsq) {
 | 
			
		||||
        SolverTimer.Stop();
 | 
			
		||||
 | 
			
		||||
        Linop.Op(psi, Mr);
 | 
			
		||||
        r = src - Mr;
 | 
			
		||||
 | 
			
		||||
        RealD srcnorm       = sqrt(ssq);
 | 
			
		||||
        RealD resnorm       = sqrt(norm2(r));
 | 
			
		||||
        RealD true_residual = resnorm / srcnorm;
 | 
			
		||||
 | 
			
		||||
        std::cout << GridLogMessage        << "MinimalResidual Converged on iteration " << k
 | 
			
		||||
                  << " computed residual " << sqrt(cp / ssq)
 | 
			
		||||
                  << " true residual "     << true_residual
 | 
			
		||||
                  << " target "            << Tolerance << std::endl;
 | 
			
		||||
 | 
			
		||||
        std::cout << GridLogMessage << "MR Time elapsed: Total   " << SolverTimer.Elapsed() << std::endl;
 | 
			
		||||
        std::cout << GridLogMessage << "MR Time elapsed: Matrix  " << MatrixTimer.Elapsed() << std::endl;
 | 
			
		||||
        std::cout << GridLogMessage << "MR Time elapsed: Linalg  " << LinalgTimer.Elapsed() << std::endl;
 | 
			
		||||
 | 
			
		||||
        if (ErrorOnNoConverge)
 | 
			
		||||
          assert(true_residual / Tolerance < 10000.0);
 | 
			
		||||
 | 
			
		||||
        IterationsToComplete = k;
 | 
			
		||||
 | 
			
		||||
        return;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    std::cout << GridLogMessage << "MinimalResidual did NOT converge"
 | 
			
		||||
              << std::endl;
 | 
			
		||||
 | 
			
		||||
    if (ErrorOnNoConverge)
 | 
			
		||||
      assert(0);
 | 
			
		||||
 | 
			
		||||
    IterationsToComplete = k;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
} // namespace Grid
 | 
			
		||||
#endif
 | 
			
		||||
@@ -0,0 +1,273 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/algorithms/iterative/MixedPrecisionFlexibleGeneralisedMinimalResidual.h
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Daniel Richtmann <daniel.richtmann@ur.de>
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#ifndef GRID_MIXED_PRECISION_FLEXIBLE_GENERALISED_MINIMAL_RESIDUAL_H
 | 
			
		||||
#define GRID_MIXED_PRECISION_FLEXIBLE_GENERALISED_MINIMAL_RESIDUAL_H
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
template<class FieldD, class FieldF, typename std::enable_if<getPrecision<FieldD>::value == 2, int>::type = 0, typename std::enable_if< getPrecision<FieldF>::value == 1, int>::type = 0>
 | 
			
		||||
class MixedPrecisionFlexibleGeneralisedMinimalResidual : public OperatorFunction<FieldD> {
 | 
			
		||||
 public:
 | 
			
		||||
  bool ErrorOnNoConverge; // Throw an assert when MPFGMRES fails to converge,
 | 
			
		||||
                          // defaults to true
 | 
			
		||||
 | 
			
		||||
  RealD   Tolerance;
 | 
			
		||||
 | 
			
		||||
  Integer MaxIterations;
 | 
			
		||||
  Integer RestartLength;
 | 
			
		||||
  Integer MaxNumberOfRestarts;
 | 
			
		||||
  Integer IterationCount; // Number of iterations the MPFGMRES took to finish,
 | 
			
		||||
                          // filled in upon completion
 | 
			
		||||
 | 
			
		||||
  GridStopWatch MatrixTimer;
 | 
			
		||||
  GridStopWatch PrecTimer;
 | 
			
		||||
  GridStopWatch LinalgTimer;
 | 
			
		||||
  GridStopWatch QrTimer;
 | 
			
		||||
  GridStopWatch CompSolutionTimer;
 | 
			
		||||
  GridStopWatch ChangePrecTimer;
 | 
			
		||||
 | 
			
		||||
  Eigen::MatrixXcd H;
 | 
			
		||||
 | 
			
		||||
  std::vector<std::complex<double>> y;
 | 
			
		||||
  std::vector<std::complex<double>> gamma;
 | 
			
		||||
  std::vector<std::complex<double>> c;
 | 
			
		||||
  std::vector<std::complex<double>> s;
 | 
			
		||||
 | 
			
		||||
  GridBase* SinglePrecGrid;
 | 
			
		||||
 | 
			
		||||
  LinearFunction<FieldF> &Preconditioner;
 | 
			
		||||
 | 
			
		||||
  MixedPrecisionFlexibleGeneralisedMinimalResidual(RealD   tol,
 | 
			
		||||
                                                   Integer maxit,
 | 
			
		||||
                                                   GridBase * sp_grid,
 | 
			
		||||
                                                   LinearFunction<FieldF> &Prec,
 | 
			
		||||
                                                   Integer restart_length,
 | 
			
		||||
                                                   bool    err_on_no_conv = true)
 | 
			
		||||
      : Tolerance(tol)
 | 
			
		||||
      , MaxIterations(maxit)
 | 
			
		||||
      , RestartLength(restart_length)
 | 
			
		||||
      , MaxNumberOfRestarts(MaxIterations/RestartLength + ((MaxIterations%RestartLength == 0) ? 0 : 1))
 | 
			
		||||
      , ErrorOnNoConverge(err_on_no_conv)
 | 
			
		||||
      , H(Eigen::MatrixXcd::Zero(RestartLength, RestartLength + 1)) // sizes taken from DD-αAMG code base
 | 
			
		||||
      , y(RestartLength + 1, 0.)
 | 
			
		||||
      , gamma(RestartLength + 1, 0.)
 | 
			
		||||
      , c(RestartLength + 1, 0.)
 | 
			
		||||
      , s(RestartLength + 1, 0.)
 | 
			
		||||
      , SinglePrecGrid(sp_grid)
 | 
			
		||||
      , Preconditioner(Prec) {};
 | 
			
		||||
 | 
			
		||||
  void operator()(LinearOperatorBase<FieldD> &LinOp, const FieldD &src, FieldD &psi) {
 | 
			
		||||
 | 
			
		||||
    psi.checkerboard = src.checkerboard;
 | 
			
		||||
    conformable(psi, src);
 | 
			
		||||
 | 
			
		||||
    RealD guess = norm2(psi);
 | 
			
		||||
    assert(std::isnan(guess) == 0);
 | 
			
		||||
 | 
			
		||||
    RealD cp;
 | 
			
		||||
    RealD ssq = norm2(src);
 | 
			
		||||
    RealD rsq = Tolerance * Tolerance * ssq;
 | 
			
		||||
 | 
			
		||||
    FieldD r(src._grid);
 | 
			
		||||
 | 
			
		||||
    std::cout << std::setprecision(4) << std::scientific;
 | 
			
		||||
    std::cout << GridLogIterative << "MPFGMRES: guess " << guess << std::endl;
 | 
			
		||||
    std::cout << GridLogIterative << "MPFGMRES:   src " << ssq   << std::endl;
 | 
			
		||||
 | 
			
		||||
    PrecTimer.Reset();
 | 
			
		||||
    MatrixTimer.Reset();
 | 
			
		||||
    LinalgTimer.Reset();
 | 
			
		||||
    QrTimer.Reset();
 | 
			
		||||
    CompSolutionTimer.Reset();
 | 
			
		||||
    ChangePrecTimer.Reset();
 | 
			
		||||
 | 
			
		||||
    GridStopWatch SolverTimer;
 | 
			
		||||
    SolverTimer.Start();
 | 
			
		||||
 | 
			
		||||
    IterationCount = 0;
 | 
			
		||||
 | 
			
		||||
    for (int k=0; k<MaxNumberOfRestarts; k++) {
 | 
			
		||||
 | 
			
		||||
      cp = outerLoopBody(LinOp, src, psi, rsq);
 | 
			
		||||
 | 
			
		||||
      // Stopping condition
 | 
			
		||||
      if (cp <= rsq) {
 | 
			
		||||
 | 
			
		||||
        SolverTimer.Stop();
 | 
			
		||||
 | 
			
		||||
        LinOp.Op(psi,r);
 | 
			
		||||
        axpy(r,-1.0,src,r);
 | 
			
		||||
 | 
			
		||||
        RealD srcnorm       = sqrt(ssq);
 | 
			
		||||
        RealD resnorm       = sqrt(norm2(r));
 | 
			
		||||
        RealD true_residual = resnorm / srcnorm;
 | 
			
		||||
 | 
			
		||||
        std::cout << GridLogMessage        << "MPFGMRES: Converged on iteration " << IterationCount
 | 
			
		||||
                  << " computed residual " << sqrt(cp / ssq)
 | 
			
		||||
                  << " true residual "     << true_residual
 | 
			
		||||
                  << " target "            << Tolerance << std::endl;
 | 
			
		||||
 | 
			
		||||
        std::cout << GridLogMessage << "MPFGMRES Time elapsed: Total      " <<       SolverTimer.Elapsed() << std::endl;
 | 
			
		||||
        std::cout << GridLogMessage << "MPFGMRES Time elapsed: Precon     " <<         PrecTimer.Elapsed() << std::endl;
 | 
			
		||||
        std::cout << GridLogMessage << "MPFGMRES Time elapsed: Matrix     " <<       MatrixTimer.Elapsed() << std::endl;
 | 
			
		||||
        std::cout << GridLogMessage << "MPFGMRES Time elapsed: Linalg     " <<       LinalgTimer.Elapsed() << std::endl;
 | 
			
		||||
        std::cout << GridLogMessage << "MPFGMRES Time elapsed: QR         " <<           QrTimer.Elapsed() << std::endl;
 | 
			
		||||
        std::cout << GridLogMessage << "MPFGMRES Time elapsed: CompSol    " << CompSolutionTimer.Elapsed() << std::endl;
 | 
			
		||||
        std::cout << GridLogMessage << "MPFGMRES Time elapsed: PrecChange " <<   ChangePrecTimer.Elapsed() << std::endl;
 | 
			
		||||
        return;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    std::cout << GridLogMessage << "MPFGMRES did NOT converge" << std::endl;
 | 
			
		||||
 | 
			
		||||
    if (ErrorOnNoConverge)
 | 
			
		||||
      assert(0);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  RealD outerLoopBody(LinearOperatorBase<FieldD> &LinOp, const FieldD &src, FieldD &psi, RealD rsq) {
 | 
			
		||||
 | 
			
		||||
    RealD cp = 0;
 | 
			
		||||
 | 
			
		||||
    FieldD w(src._grid);
 | 
			
		||||
    FieldD r(src._grid);
 | 
			
		||||
 | 
			
		||||
    // these should probably be made class members so that they are only allocated once, not in every restart
 | 
			
		||||
    std::vector<FieldD> v(RestartLength + 1, src._grid); for (auto &elem : v) elem = zero;
 | 
			
		||||
    std::vector<FieldD> z(RestartLength + 1, src._grid); for (auto &elem : z) elem = zero;
 | 
			
		||||
 | 
			
		||||
    MatrixTimer.Start();
 | 
			
		||||
    LinOp.Op(psi, w);
 | 
			
		||||
    MatrixTimer.Stop();
 | 
			
		||||
 | 
			
		||||
    LinalgTimer.Start();
 | 
			
		||||
    r = src - w;
 | 
			
		||||
 | 
			
		||||
    gamma[0] = sqrt(norm2(r));
 | 
			
		||||
 | 
			
		||||
    v[0] = (1. / gamma[0]) * r;
 | 
			
		||||
    LinalgTimer.Stop();
 | 
			
		||||
 | 
			
		||||
    for (int i=0; i<RestartLength; i++) {
 | 
			
		||||
 | 
			
		||||
      IterationCount++;
 | 
			
		||||
 | 
			
		||||
      arnoldiStep(LinOp, v, z, w, i);
 | 
			
		||||
 | 
			
		||||
      qrUpdate(i);
 | 
			
		||||
 | 
			
		||||
      cp = std::norm(gamma[i+1]);
 | 
			
		||||
 | 
			
		||||
      std::cout << GridLogIterative << "MPFGMRES: Iteration " << IterationCount
 | 
			
		||||
                << " residual " << cp << " target " << rsq << std::endl;
 | 
			
		||||
 | 
			
		||||
      if ((i == RestartLength - 1) || (IterationCount == MaxIterations) || (cp <= rsq)) {
 | 
			
		||||
 | 
			
		||||
        computeSolution(z, psi, i);
 | 
			
		||||
 | 
			
		||||
        return cp;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    assert(0); // Never reached
 | 
			
		||||
    return cp;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void arnoldiStep(LinearOperatorBase<FieldD> &LinOp, std::vector<FieldD> &v, std::vector<FieldD> &z, FieldD &w, int iter) {
 | 
			
		||||
 | 
			
		||||
    FieldF v_f(SinglePrecGrid);
 | 
			
		||||
    FieldF z_f(SinglePrecGrid);
 | 
			
		||||
 | 
			
		||||
    ChangePrecTimer.Start();
 | 
			
		||||
    precisionChange(v_f, v[iter]);
 | 
			
		||||
    precisionChange(z_f, z[iter]);
 | 
			
		||||
    ChangePrecTimer.Stop();
 | 
			
		||||
 | 
			
		||||
    PrecTimer.Start();
 | 
			
		||||
    Preconditioner(v_f, z_f);
 | 
			
		||||
    PrecTimer.Stop();
 | 
			
		||||
 | 
			
		||||
    ChangePrecTimer.Start();
 | 
			
		||||
    precisionChange(z[iter], z_f);
 | 
			
		||||
    ChangePrecTimer.Stop();
 | 
			
		||||
 | 
			
		||||
    MatrixTimer.Start();
 | 
			
		||||
    LinOp.Op(z[iter], w);
 | 
			
		||||
    MatrixTimer.Stop();
 | 
			
		||||
 | 
			
		||||
    LinalgTimer.Start();
 | 
			
		||||
    for (int i = 0; i <= iter; ++i) {
 | 
			
		||||
      H(iter, i) = innerProduct(v[i], w);
 | 
			
		||||
      w = w - H(iter, i) * v[i];
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    H(iter, iter + 1) = sqrt(norm2(w));
 | 
			
		||||
    v[iter + 1] = (1. / H(iter, iter + 1)) * w;
 | 
			
		||||
    LinalgTimer.Stop();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void qrUpdate(int iter) {
 | 
			
		||||
 | 
			
		||||
    QrTimer.Start();
 | 
			
		||||
    for (int i = 0; i < iter ; ++i) {
 | 
			
		||||
      auto tmp       = -s[i] * H(iter, i) + c[i] * H(iter, i + 1);
 | 
			
		||||
      H(iter, i)     = std::conj(c[i]) * H(iter, i) + std::conj(s[i]) * H(iter, i + 1);
 | 
			
		||||
      H(iter, i + 1) = tmp;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Compute new Givens Rotation
 | 
			
		||||
    ComplexD nu = sqrt(std::norm(H(iter, iter)) + std::norm(H(iter, iter + 1)));
 | 
			
		||||
    c[iter]     = H(iter, iter) / nu;
 | 
			
		||||
    s[iter]     = H(iter, iter + 1) / nu;
 | 
			
		||||
 | 
			
		||||
    // Apply new Givens rotation
 | 
			
		||||
    H(iter, iter)     = nu;
 | 
			
		||||
    H(iter, iter + 1) = 0.;
 | 
			
		||||
 | 
			
		||||
    gamma[iter + 1] = -s[iter] * gamma[iter];
 | 
			
		||||
    gamma[iter]     = std::conj(c[iter]) * gamma[iter];
 | 
			
		||||
    QrTimer.Stop();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void computeSolution(std::vector<FieldD> const &z, FieldD &psi, int iter) {
 | 
			
		||||
 | 
			
		||||
    CompSolutionTimer.Start();
 | 
			
		||||
    for (int i = iter; i >= 0; i--) {
 | 
			
		||||
      y[i] = gamma[i];
 | 
			
		||||
      for (int k = i + 1; k <= iter; k++)
 | 
			
		||||
        y[i] = y[i] - H(k, i) * y[k];
 | 
			
		||||
      y[i] = y[i] / H(i, i);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    for (int i = 0; i <= iter; i++)
 | 
			
		||||
      psi = psi + z[i] * y[i];
 | 
			
		||||
    CompSolutionTimer.Stop();
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
@@ -139,7 +139,10 @@ namespace Grid {
 | 
			
		||||
      MatTimer.Start();
 | 
			
		||||
      Linop.HermOpAndNorm(psi,Az,zAz,zAAz); 
 | 
			
		||||
      MatTimer.Stop();
 | 
			
		||||
 | 
			
		||||
      LinalgTimer.Start();
 | 
			
		||||
      r=src-Az;
 | 
			
		||||
      LinalgTimer.Stop();
 | 
			
		||||
 | 
			
		||||
      /////////////////////
 | 
			
		||||
      // p = Prec(r)
 | 
			
		||||
@@ -152,8 +155,10 @@ namespace Grid {
 | 
			
		||||
      Linop.HermOp(z,tmp); 
 | 
			
		||||
      MatTimer.Stop();
 | 
			
		||||
 | 
			
		||||
      LinalgTimer.Start();
 | 
			
		||||
      ttmp=tmp;
 | 
			
		||||
      tmp=tmp-r;
 | 
			
		||||
      LinalgTimer.Stop();
 | 
			
		||||
 | 
			
		||||
      /*
 | 
			
		||||
      std::cout<<GridLogMessage<<r<<std::endl;
 | 
			
		||||
@@ -166,12 +171,14 @@ namespace Grid {
 | 
			
		||||
      Linop.HermOpAndNorm(z,Az,zAz,zAAz); 
 | 
			
		||||
      MatTimer.Stop();
 | 
			
		||||
 | 
			
		||||
      LinalgTimer.Start();
 | 
			
		||||
      //p[0],q[0],qq[0] 
 | 
			
		||||
      p[0]= z;
 | 
			
		||||
      q[0]= Az;
 | 
			
		||||
      qq[0]= zAAz;
 | 
			
		||||
 | 
			
		||||
      cp =norm2(r);
 | 
			
		||||
      LinalgTimer.Stop();
 | 
			
		||||
 | 
			
		||||
      for(int k=0;k<nstep;k++){
 | 
			
		||||
 | 
			
		||||
@@ -181,12 +188,14 @@ namespace Grid {
 | 
			
		||||
	int peri_k = k %mmax;
 | 
			
		||||
	int peri_kp= kp%mmax;
 | 
			
		||||
 | 
			
		||||
        LinalgTimer.Start();
 | 
			
		||||
	rq= real(innerProduct(r,q[peri_k])); // what if rAr not real?
 | 
			
		||||
	a = rq/qq[peri_k];
 | 
			
		||||
 | 
			
		||||
	axpy(psi,a,p[peri_k],psi);         
 | 
			
		||||
 | 
			
		||||
	cp = axpy_norm(r,-a,q[peri_k],r);
 | 
			
		||||
        LinalgTimer.Stop();
 | 
			
		||||
 | 
			
		||||
	if((k==nstep-1)||(cp<rsq)){
 | 
			
		||||
	  return cp;
 | 
			
		||||
@@ -202,6 +211,8 @@ namespace Grid {
 | 
			
		||||
	Linop.HermOpAndNorm(z,Az,zAz,zAAz);
 | 
			
		||||
	Linop.HermOp(z,tmp);
 | 
			
		||||
	MatTimer.Stop();
 | 
			
		||||
 | 
			
		||||
        LinalgTimer.Start();
 | 
			
		||||
        tmp=tmp-r;
 | 
			
		||||
	std::cout<<GridLogMessage<< " Preconditioner resid " <<sqrt(norm2(tmp)/norm2(r))<<std::endl; 
 | 
			
		||||
 | 
			
		||||
@@ -219,9 +230,9 @@ namespace Grid {
 | 
			
		||||
 | 
			
		||||
	}
 | 
			
		||||
	qq[peri_kp]=norm2(q[peri_kp]); // could use axpy_norm
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
        LinalgTimer.Stop();
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      assert(0); // never reached
 | 
			
		||||
      return cp;
 | 
			
		||||
    }
 | 
			
		||||
							
								
								
									
										473
									
								
								Grid/algorithms/iterative/SchurRedBlack.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										473
									
								
								Grid/algorithms/iterative/SchurRedBlack.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,473 @@
 | 
			
		||||
    /*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
    Source file: ./lib/algorithms/iterative/SchurRedBlack.h
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#ifndef GRID_SCHUR_RED_BLACK_H
 | 
			
		||||
#define GRID_SCHUR_RED_BLACK_H
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  /*
 | 
			
		||||
   * Red black Schur decomposition
 | 
			
		||||
   *
 | 
			
		||||
   *  M = (Mee Meo) =  (1             0 )   (Mee   0               )  (1 Mee^{-1} Meo)
 | 
			
		||||
   *      (Moe Moo)    (Moe Mee^-1    1 )   (0   Moo-Moe Mee^-1 Meo)  (0   1         )
 | 
			
		||||
   *                =         L                     D                     U
 | 
			
		||||
   *
 | 
			
		||||
   * L^-1 = (1              0 )
 | 
			
		||||
   *        (-MoeMee^{-1}   1 )   
 | 
			
		||||
   * L^{dag} = ( 1       Mee^{-dag} Moe^{dag} )
 | 
			
		||||
   *           ( 0       1                    )
 | 
			
		||||
   * L^{-d}  = ( 1      -Mee^{-dag} Moe^{dag} )
 | 
			
		||||
   *           ( 0       1                    )
 | 
			
		||||
   *
 | 
			
		||||
   * U^-1 = (1   -Mee^{-1} Meo)
 | 
			
		||||
   *        (0    1           )
 | 
			
		||||
   * U^{dag} = ( 1                 0)
 | 
			
		||||
   *           (Meo^dag Mee^{-dag} 1)
 | 
			
		||||
   * U^{-dag} = (  1                 0)
 | 
			
		||||
   *            (-Meo^dag Mee^{-dag} 1)
 | 
			
		||||
   ***********************
 | 
			
		||||
   *     M psi = eta
 | 
			
		||||
   ***********************
 | 
			
		||||
   *Odd
 | 
			
		||||
   * i)                 D_oo psi_o =  L^{-1}  eta_o
 | 
			
		||||
   *                        eta_o' = (D_oo)^dag (eta_o - Moe Mee^{-1} eta_e)
 | 
			
		||||
   *
 | 
			
		||||
   * Wilson:
 | 
			
		||||
   *      (D_oo)^{\dag} D_oo psi_o = (D_oo)^dag L^{-1}  eta_o
 | 
			
		||||
   * Stag:
 | 
			
		||||
   *      D_oo psi_o = L^{-1}  eta =    (eta_o - Moe Mee^{-1} eta_e)
 | 
			
		||||
   *
 | 
			
		||||
   * L^-1 eta_o= (1              0 ) (e
 | 
			
		||||
   *             (-MoeMee^{-1}   1 )   
 | 
			
		||||
   *
 | 
			
		||||
   *Even
 | 
			
		||||
   * ii)  Mee psi_e + Meo psi_o = src_e
 | 
			
		||||
   *
 | 
			
		||||
   *   => sol_e = M_ee^-1 * ( src_e - Meo sol_o )...
 | 
			
		||||
   *
 | 
			
		||||
   * 
 | 
			
		||||
   * TODO: Other options:
 | 
			
		||||
   * 
 | 
			
		||||
   * a) change checkerboards for Schur e<->o
 | 
			
		||||
   *
 | 
			
		||||
   * Left precon by Moo^-1
 | 
			
		||||
   * b) Doo^{dag} M_oo^-dag Moo^-1 Doo psi_0 =  (D_oo)^dag M_oo^-dag Moo^-1 L^{-1}  eta_o
 | 
			
		||||
   *                              eta_o'     = (D_oo)^dag  M_oo^-dag Moo^-1 (eta_o - Moe Mee^{-1} eta_e)
 | 
			
		||||
   *
 | 
			
		||||
   * Right precon by Moo^-1
 | 
			
		||||
   * c) M_oo^-dag Doo^{dag} Doo Moo^-1 phi_0 = M_oo^-dag (D_oo)^dag L^{-1}  eta_o
 | 
			
		||||
   *                              eta_o'     = M_oo^-dag (D_oo)^dag (eta_o - Moe Mee^{-1} eta_e)
 | 
			
		||||
   *                              psi_o = M_oo^-1 phi_o
 | 
			
		||||
   * TODO: Deflation 
 | 
			
		||||
   */
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Use base class to share code
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Take a matrix and form a Red Black solver calling a Herm solver
 | 
			
		||||
  // Use of RB info prevents making SchurRedBlackSolve conform to standard interface
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  template<class Field> class SchurRedBlackBase {
 | 
			
		||||
  protected:
 | 
			
		||||
    typedef CheckerBoardedSparseMatrixBase<Field> Matrix;
 | 
			
		||||
    OperatorFunction<Field> & _HermitianRBSolver;
 | 
			
		||||
    int CBfactorise;
 | 
			
		||||
    bool subGuess;
 | 
			
		||||
  public:
 | 
			
		||||
 | 
			
		||||
    SchurRedBlackBase(OperatorFunction<Field> &HermitianRBSolver, const bool initSubGuess = false)  :
 | 
			
		||||
    _HermitianRBSolver(HermitianRBSolver) 
 | 
			
		||||
    { 
 | 
			
		||||
      CBfactorise = 0;
 | 
			
		||||
      subtractGuess(initSubGuess);
 | 
			
		||||
    };
 | 
			
		||||
    void subtractGuess(const bool initSubGuess)
 | 
			
		||||
    {
 | 
			
		||||
      subGuess = initSubGuess;
 | 
			
		||||
    }
 | 
			
		||||
    bool isSubtractGuess(void)
 | 
			
		||||
    {
 | 
			
		||||
      return subGuess;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /////////////////////////////////////////////////////////////
 | 
			
		||||
    // Shared code
 | 
			
		||||
    /////////////////////////////////////////////////////////////
 | 
			
		||||
    void operator() (Matrix & _Matrix,const Field &in, Field &out){
 | 
			
		||||
      ZeroGuesser<Field> guess;
 | 
			
		||||
      (*this)(_Matrix,in,out,guess);
 | 
			
		||||
    }
 | 
			
		||||
    void operator()(Matrix &_Matrix, const std::vector<Field> &in, std::vector<Field> &out) 
 | 
			
		||||
    {
 | 
			
		||||
      ZeroGuesser<Field> guess;
 | 
			
		||||
      (*this)(_Matrix,in,out,guess);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template<class Guesser>
 | 
			
		||||
    void operator()(Matrix &_Matrix, const std::vector<Field> &in, std::vector<Field> &out,Guesser &guess) 
 | 
			
		||||
    {
 | 
			
		||||
      GridBase *grid = _Matrix.RedBlackGrid();
 | 
			
		||||
      GridBase *fgrid= _Matrix.Grid();
 | 
			
		||||
      int nblock = in.size();
 | 
			
		||||
 | 
			
		||||
      std::vector<Field> src_o(nblock,grid);
 | 
			
		||||
      std::vector<Field> sol_o(nblock,grid);
 | 
			
		||||
      
 | 
			
		||||
      std::vector<Field> guess_save;
 | 
			
		||||
 | 
			
		||||
      Field resid(fgrid);
 | 
			
		||||
      Field tmp(grid);
 | 
			
		||||
 | 
			
		||||
      ////////////////////////////////////////////////
 | 
			
		||||
      // Prepare RedBlack source
 | 
			
		||||
      ////////////////////////////////////////////////
 | 
			
		||||
      for(int b=0;b<nblock;b++){
 | 
			
		||||
	RedBlackSource(_Matrix,in[b],tmp,src_o[b]);
 | 
			
		||||
      }
 | 
			
		||||
      ////////////////////////////////////////////////
 | 
			
		||||
      // Make the guesses
 | 
			
		||||
      ////////////////////////////////////////////////
 | 
			
		||||
      if ( subGuess ) guess_save.resize(nblock,grid);
 | 
			
		||||
 | 
			
		||||
      for(int b=0;b<nblock;b++){
 | 
			
		||||
	guess(src_o[b],sol_o[b]); 
 | 
			
		||||
 | 
			
		||||
	if ( subGuess ) { 
 | 
			
		||||
	  guess_save[b] = sol_o[b];
 | 
			
		||||
	}
 | 
			
		||||
      }
 | 
			
		||||
      //////////////////////////////////////////////////////////////
 | 
			
		||||
      // Call the block solver
 | 
			
		||||
      //////////////////////////////////////////////////////////////
 | 
			
		||||
      std::cout<<GridLogMessage << "SchurRedBlackBase calling the solver for "<<nblock<<" RHS" <<std::endl;
 | 
			
		||||
      RedBlackSolve(_Matrix,src_o,sol_o);
 | 
			
		||||
 | 
			
		||||
      ////////////////////////////////////////////////
 | 
			
		||||
      // A2A boolean behavioural control & reconstruct other checkerboard
 | 
			
		||||
      ////////////////////////////////////////////////
 | 
			
		||||
      for(int b=0;b<nblock;b++) {
 | 
			
		||||
 | 
			
		||||
	if (subGuess)   sol_o[b] = sol_o[b] - guess_save[b];
 | 
			
		||||
 | 
			
		||||
	///////// Needs even source //////////////
 | 
			
		||||
	pickCheckerboard(Even,tmp,in[b]);
 | 
			
		||||
	RedBlackSolution(_Matrix,sol_o[b],tmp,out[b]);
 | 
			
		||||
 | 
			
		||||
	/////////////////////////////////////////////////
 | 
			
		||||
	// Check unprec residual if possible
 | 
			
		||||
	/////////////////////////////////////////////////
 | 
			
		||||
	if ( ! subGuess ) {
 | 
			
		||||
	  _Matrix.M(out[b],resid); 
 | 
			
		||||
	  resid = resid-in[b];
 | 
			
		||||
	  RealD ns = norm2(in[b]);
 | 
			
		||||
	  RealD nr = norm2(resid);
 | 
			
		||||
	
 | 
			
		||||
	  std::cout<<GridLogMessage<< "SchurRedBlackBase solver true unprec resid["<<b<<"] "<<std::sqrt(nr/ns) << std::endl;
 | 
			
		||||
	} else {
 | 
			
		||||
	  std::cout<<GridLogMessage<< "SchurRedBlackBase Guess subtracted after solve["<<b<<"] " << std::endl;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
    template<class Guesser>
 | 
			
		||||
    void operator() (Matrix & _Matrix,const Field &in, Field &out,Guesser &guess){
 | 
			
		||||
 | 
			
		||||
      // FIXME CGdiagonalMee not implemented virtual function
 | 
			
		||||
      // FIXME use CBfactorise to control schur decomp
 | 
			
		||||
      GridBase *grid = _Matrix.RedBlackGrid();
 | 
			
		||||
      GridBase *fgrid= _Matrix.Grid();
 | 
			
		||||
 | 
			
		||||
      Field resid(fgrid);
 | 
			
		||||
      Field src_o(grid);
 | 
			
		||||
      Field src_e(grid);
 | 
			
		||||
      Field sol_o(grid);
 | 
			
		||||
 | 
			
		||||
      ////////////////////////////////////////////////
 | 
			
		||||
      // RedBlack source
 | 
			
		||||
      ////////////////////////////////////////////////
 | 
			
		||||
      RedBlackSource(_Matrix,in,src_e,src_o);
 | 
			
		||||
 | 
			
		||||
      ////////////////////////////////
 | 
			
		||||
      // Construct the guess
 | 
			
		||||
      ////////////////////////////////
 | 
			
		||||
      Field   tmp(grid);
 | 
			
		||||
      guess(src_o,sol_o);
 | 
			
		||||
 | 
			
		||||
      Field  guess_save(grid);
 | 
			
		||||
      guess_save = sol_o;
 | 
			
		||||
 | 
			
		||||
      //////////////////////////////////////////////////////////////
 | 
			
		||||
      // Call the red-black solver
 | 
			
		||||
      //////////////////////////////////////////////////////////////
 | 
			
		||||
      RedBlackSolve(_Matrix,src_o,sol_o);
 | 
			
		||||
 | 
			
		||||
      ////////////////////////////////////////////////
 | 
			
		||||
      // Fionn A2A boolean behavioural control
 | 
			
		||||
      ////////////////////////////////////////////////
 | 
			
		||||
      if (subGuess)      sol_o= sol_o-guess_save;
 | 
			
		||||
 | 
			
		||||
      ///////////////////////////////////////////////////
 | 
			
		||||
      // RedBlack solution needs the even source
 | 
			
		||||
      ///////////////////////////////////////////////////
 | 
			
		||||
      RedBlackSolution(_Matrix,sol_o,src_e,out);
 | 
			
		||||
 | 
			
		||||
      // Verify the unprec residual
 | 
			
		||||
      if ( ! subGuess ) {
 | 
			
		||||
        _Matrix.M(out,resid); 
 | 
			
		||||
        resid = resid-in;
 | 
			
		||||
        RealD ns = norm2(in);
 | 
			
		||||
        RealD nr = norm2(resid);
 | 
			
		||||
 | 
			
		||||
        std::cout<<GridLogMessage << "SchurRedBlackBase solver true unprec resid "<< std::sqrt(nr/ns) << std::endl;
 | 
			
		||||
      } else {
 | 
			
		||||
        std::cout << GridLogMessage << "SchurRedBlackBase Guess subtracted after solve." << std::endl;
 | 
			
		||||
      }
 | 
			
		||||
    }     
 | 
			
		||||
    
 | 
			
		||||
    /////////////////////////////////////////////////////////////
 | 
			
		||||
    // Override in derived. Not virtual as template methods
 | 
			
		||||
    /////////////////////////////////////////////////////////////
 | 
			
		||||
    virtual void RedBlackSource  (Matrix & _Matrix,const Field &src, Field &src_e,Field &src_o)                =0;
 | 
			
		||||
    virtual void RedBlackSolution(Matrix & _Matrix,const Field &sol_o, const Field &src_e,Field &sol)          =0;
 | 
			
		||||
    virtual void RedBlackSolve   (Matrix & _Matrix,const Field &src_o, Field &sol_o)                           =0;
 | 
			
		||||
    virtual void RedBlackSolve   (Matrix & _Matrix,const std::vector<Field> &src_o,  std::vector<Field> &sol_o)=0;
 | 
			
		||||
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  template<class Field> class SchurRedBlackStaggeredSolve : public SchurRedBlackBase<Field> {
 | 
			
		||||
  public:
 | 
			
		||||
    typedef CheckerBoardedSparseMatrixBase<Field> Matrix;
 | 
			
		||||
 | 
			
		||||
    SchurRedBlackStaggeredSolve(OperatorFunction<Field> &HermitianRBSolver, const bool initSubGuess = false) 
 | 
			
		||||
      :    SchurRedBlackBase<Field> (HermitianRBSolver,initSubGuess) 
 | 
			
		||||
    {
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    //////////////////////////////////////////////////////
 | 
			
		||||
    // Override RedBlack specialisation
 | 
			
		||||
    //////////////////////////////////////////////////////
 | 
			
		||||
    virtual void RedBlackSource(Matrix & _Matrix,const Field &src, Field &src_e,Field &src_o)
 | 
			
		||||
    {
 | 
			
		||||
      GridBase *grid = _Matrix.RedBlackGrid();
 | 
			
		||||
      GridBase *fgrid= _Matrix.Grid();
 | 
			
		||||
 | 
			
		||||
      Field   tmp(grid);
 | 
			
		||||
      Field  Mtmp(grid);
 | 
			
		||||
 | 
			
		||||
      pickCheckerboard(Even,src_e,src);
 | 
			
		||||
      pickCheckerboard(Odd ,src_o,src);
 | 
			
		||||
 | 
			
		||||
      /////////////////////////////////////////////////////
 | 
			
		||||
      // src_o = (source_o - Moe MeeInv source_e)
 | 
			
		||||
      /////////////////////////////////////////////////////
 | 
			
		||||
      _Matrix.MooeeInv(src_e,tmp);     assert(  tmp.checkerboard ==Even);
 | 
			
		||||
      _Matrix.Meooe   (tmp,Mtmp);      assert( Mtmp.checkerboard ==Odd);     
 | 
			
		||||
      tmp=src_o-Mtmp;                  assert(  tmp.checkerboard ==Odd);     
 | 
			
		||||
 | 
			
		||||
      _Matrix.Mooee(tmp,src_o); // Extra factor of "m" in source from dumb choice of matrix norm.
 | 
			
		||||
    }
 | 
			
		||||
    virtual void RedBlackSolution(Matrix & _Matrix,const Field &sol_o, const Field &src_e_c,Field &sol)
 | 
			
		||||
    {
 | 
			
		||||
      GridBase *grid = _Matrix.RedBlackGrid();
 | 
			
		||||
      GridBase *fgrid= _Matrix.Grid();
 | 
			
		||||
 | 
			
		||||
      Field   tmp(grid);
 | 
			
		||||
      Field   sol_e(grid);
 | 
			
		||||
      Field   src_e(grid);
 | 
			
		||||
 | 
			
		||||
      src_e = src_e_c; // Const correctness
 | 
			
		||||
 | 
			
		||||
      ///////////////////////////////////////////////////
 | 
			
		||||
      // sol_e = M_ee^-1 * ( src_e - Meo sol_o )...
 | 
			
		||||
      ///////////////////////////////////////////////////
 | 
			
		||||
      _Matrix.Meooe(sol_o,tmp);        assert(  tmp.checkerboard   ==Even);
 | 
			
		||||
      src_e = src_e-tmp;               assert(  src_e.checkerboard ==Even);
 | 
			
		||||
      _Matrix.MooeeInv(src_e,sol_e);   assert(  sol_e.checkerboard ==Even);
 | 
			
		||||
     
 | 
			
		||||
      setCheckerboard(sol,sol_e); assert(  sol_e.checkerboard ==Even);
 | 
			
		||||
      setCheckerboard(sol,sol_o); assert(  sol_o.checkerboard ==Odd );
 | 
			
		||||
    }
 | 
			
		||||
    virtual void RedBlackSolve   (Matrix & _Matrix,const Field &src_o, Field &sol_o)
 | 
			
		||||
    {
 | 
			
		||||
      SchurStaggeredOperator<Matrix,Field> _HermOpEO(_Matrix);
 | 
			
		||||
      this->_HermitianRBSolver(_HermOpEO,src_o,sol_o);  assert(sol_o.checkerboard==Odd);
 | 
			
		||||
    };
 | 
			
		||||
    virtual void RedBlackSolve   (Matrix & _Matrix,const std::vector<Field> &src_o,  std::vector<Field> &sol_o)
 | 
			
		||||
    {
 | 
			
		||||
      SchurStaggeredOperator<Matrix,Field> _HermOpEO(_Matrix);
 | 
			
		||||
      this->_HermitianRBSolver(_HermOpEO,src_o,sol_o); 
 | 
			
		||||
    }
 | 
			
		||||
  };
 | 
			
		||||
  template<class Field> using SchurRedBlackStagSolve = SchurRedBlackStaggeredSolve<Field>;
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Site diagonal has Mooee on it.
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  template<class Field> class SchurRedBlackDiagMooeeSolve : public SchurRedBlackBase<Field> {
 | 
			
		||||
  public:
 | 
			
		||||
    typedef CheckerBoardedSparseMatrixBase<Field> Matrix;
 | 
			
		||||
 | 
			
		||||
    SchurRedBlackDiagMooeeSolve(OperatorFunction<Field> &HermitianRBSolver, const bool initSubGuess = false)  
 | 
			
		||||
      : SchurRedBlackBase<Field> (HermitianRBSolver,initSubGuess) {};
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    //////////////////////////////////////////////////////
 | 
			
		||||
    // Override RedBlack specialisation
 | 
			
		||||
    //////////////////////////////////////////////////////
 | 
			
		||||
    virtual void RedBlackSource(Matrix & _Matrix,const Field &src, Field &src_e,Field &src_o)
 | 
			
		||||
    {
 | 
			
		||||
      GridBase *grid = _Matrix.RedBlackGrid();
 | 
			
		||||
      GridBase *fgrid= _Matrix.Grid();
 | 
			
		||||
 | 
			
		||||
      Field   tmp(grid);
 | 
			
		||||
      Field  Mtmp(grid);
 | 
			
		||||
 | 
			
		||||
      pickCheckerboard(Even,src_e,src);
 | 
			
		||||
      pickCheckerboard(Odd ,src_o,src);
 | 
			
		||||
 | 
			
		||||
      /////////////////////////////////////////////////////
 | 
			
		||||
      // src_o = Mdag * (source_o - Moe MeeInv source_e)
 | 
			
		||||
      /////////////////////////////////////////////////////
 | 
			
		||||
      _Matrix.MooeeInv(src_e,tmp);     assert(  tmp.checkerboard ==Even);
 | 
			
		||||
      _Matrix.Meooe   (tmp,Mtmp);      assert( Mtmp.checkerboard ==Odd);     
 | 
			
		||||
      tmp=src_o-Mtmp;                  assert(  tmp.checkerboard ==Odd);     
 | 
			
		||||
 | 
			
		||||
      // get the right MpcDag
 | 
			
		||||
      SchurDiagMooeeOperator<Matrix,Field> _HermOpEO(_Matrix);
 | 
			
		||||
      _HermOpEO.MpcDag(tmp,src_o);     assert(src_o.checkerboard ==Odd);       
 | 
			
		||||
 | 
			
		||||
    }
 | 
			
		||||
    virtual void RedBlackSolution(Matrix & _Matrix,const Field &sol_o, const Field &src_e,Field &sol)
 | 
			
		||||
    {
 | 
			
		||||
      GridBase *grid = _Matrix.RedBlackGrid();
 | 
			
		||||
      GridBase *fgrid= _Matrix.Grid();
 | 
			
		||||
 | 
			
		||||
      Field   tmp(grid);
 | 
			
		||||
      Field  sol_e(grid);
 | 
			
		||||
      Field  src_e_i(grid);
 | 
			
		||||
      ///////////////////////////////////////////////////
 | 
			
		||||
      // sol_e = M_ee^-1 * ( src_e - Meo sol_o )...
 | 
			
		||||
      ///////////////////////////////////////////////////
 | 
			
		||||
      _Matrix.Meooe(sol_o,tmp);          assert(  tmp.checkerboard   ==Even);
 | 
			
		||||
      src_e_i = src_e-tmp;               assert(  src_e_i.checkerboard ==Even);
 | 
			
		||||
      _Matrix.MooeeInv(src_e_i,sol_e);   assert(  sol_e.checkerboard ==Even);
 | 
			
		||||
     
 | 
			
		||||
      setCheckerboard(sol,sol_e); assert(  sol_e.checkerboard ==Even);
 | 
			
		||||
      setCheckerboard(sol,sol_o); assert(  sol_o.checkerboard ==Odd );
 | 
			
		||||
    }
 | 
			
		||||
    virtual void RedBlackSolve   (Matrix & _Matrix,const Field &src_o, Field &sol_o)
 | 
			
		||||
    {
 | 
			
		||||
      SchurDiagMooeeOperator<Matrix,Field> _HermOpEO(_Matrix);
 | 
			
		||||
      this->_HermitianRBSolver(_HermOpEO,src_o,sol_o);  assert(sol_o.checkerboard==Odd);
 | 
			
		||||
    };
 | 
			
		||||
    virtual void RedBlackSolve   (Matrix & _Matrix,const std::vector<Field> &src_o,  std::vector<Field> &sol_o)
 | 
			
		||||
    {
 | 
			
		||||
      SchurDiagMooeeOperator<Matrix,Field> _HermOpEO(_Matrix);
 | 
			
		||||
      this->_HermitianRBSolver(_HermOpEO,src_o,sol_o); 
 | 
			
		||||
    }
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Site diagonal is identity, right preconditioned by Mee^inv
 | 
			
		||||
  // ( 1 - Meo Moo^inv Moe Mee^inv  ) phi =( 1 - Meo Moo^inv Moe Mee^inv  ) Mee psi =  = eta  = eta
 | 
			
		||||
  //=> psi = MeeInv phi
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  template<class Field> class SchurRedBlackDiagTwoSolve : public SchurRedBlackBase<Field> {
 | 
			
		||||
  public:
 | 
			
		||||
    typedef CheckerBoardedSparseMatrixBase<Field> Matrix;
 | 
			
		||||
 | 
			
		||||
    /////////////////////////////////////////////////////
 | 
			
		||||
    // Wrap the usual normal equations Schur trick
 | 
			
		||||
    /////////////////////////////////////////////////////
 | 
			
		||||
  SchurRedBlackDiagTwoSolve(OperatorFunction<Field> &HermitianRBSolver, const bool initSubGuess = false)  
 | 
			
		||||
    : SchurRedBlackBase<Field>(HermitianRBSolver,initSubGuess) {};
 | 
			
		||||
 | 
			
		||||
    virtual void RedBlackSource(Matrix & _Matrix,const Field &src, Field &src_e,Field &src_o)
 | 
			
		||||
    {
 | 
			
		||||
      GridBase *grid = _Matrix.RedBlackGrid();
 | 
			
		||||
      GridBase *fgrid= _Matrix.Grid();
 | 
			
		||||
 | 
			
		||||
      SchurDiagTwoOperator<Matrix,Field> _HermOpEO(_Matrix);
 | 
			
		||||
      
 | 
			
		||||
      Field   tmp(grid);
 | 
			
		||||
      Field  Mtmp(grid);
 | 
			
		||||
 | 
			
		||||
      pickCheckerboard(Even,src_e,src);
 | 
			
		||||
      pickCheckerboard(Odd ,src_o,src);
 | 
			
		||||
    
 | 
			
		||||
      /////////////////////////////////////////////////////
 | 
			
		||||
      // src_o = Mdag * (source_o - Moe MeeInv source_e)
 | 
			
		||||
      /////////////////////////////////////////////////////
 | 
			
		||||
      _Matrix.MooeeInv(src_e,tmp);     assert(  tmp.checkerboard ==Even);
 | 
			
		||||
      _Matrix.Meooe   (tmp,Mtmp);      assert( Mtmp.checkerboard ==Odd);     
 | 
			
		||||
      tmp=src_o-Mtmp;                  assert(  tmp.checkerboard ==Odd);     
 | 
			
		||||
 | 
			
		||||
      // get the right MpcDag
 | 
			
		||||
      _HermOpEO.MpcDag(tmp,src_o);     assert(src_o.checkerboard ==Odd);       
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    virtual void RedBlackSolution(Matrix & _Matrix,const Field &sol_o, const Field &src_e,Field &sol)
 | 
			
		||||
    {
 | 
			
		||||
      GridBase *grid = _Matrix.RedBlackGrid();
 | 
			
		||||
      GridBase *fgrid= _Matrix.Grid();
 | 
			
		||||
 | 
			
		||||
      Field   sol_o_i(grid);
 | 
			
		||||
      Field   tmp(grid);
 | 
			
		||||
      Field   sol_e(grid);
 | 
			
		||||
 | 
			
		||||
      ////////////////////////////////////////////////
 | 
			
		||||
      // MooeeInv due to pecond
 | 
			
		||||
      ////////////////////////////////////////////////
 | 
			
		||||
      _Matrix.MooeeInv(sol_o,tmp);
 | 
			
		||||
      sol_o_i = tmp;
 | 
			
		||||
 | 
			
		||||
      ///////////////////////////////////////////////////
 | 
			
		||||
      // sol_e = M_ee^-1 * ( src_e - Meo sol_o )...
 | 
			
		||||
      ///////////////////////////////////////////////////
 | 
			
		||||
      _Matrix.Meooe(sol_o_i,tmp);    assert(  tmp.checkerboard   ==Even);
 | 
			
		||||
      tmp = src_e-tmp;               assert(  src_e.checkerboard ==Even);
 | 
			
		||||
      _Matrix.MooeeInv(tmp,sol_e);   assert(  sol_e.checkerboard ==Even);
 | 
			
		||||
     
 | 
			
		||||
      setCheckerboard(sol,sol_e);    assert(  sol_e.checkerboard ==Even);
 | 
			
		||||
      setCheckerboard(sol,sol_o_i);  assert(  sol_o_i.checkerboard ==Odd );
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    virtual void RedBlackSolve   (Matrix & _Matrix,const Field &src_o, Field &sol_o)
 | 
			
		||||
    {
 | 
			
		||||
      SchurDiagTwoOperator<Matrix,Field> _HermOpEO(_Matrix);
 | 
			
		||||
      this->_HermitianRBSolver(_HermOpEO,src_o,sol_o);
 | 
			
		||||
    };
 | 
			
		||||
    virtual void RedBlackSolve   (Matrix & _Matrix,const std::vector<Field> &src_o,  std::vector<Field> &sol_o)
 | 
			
		||||
    {
 | 
			
		||||
      SchurDiagTwoOperator<Matrix,Field> _HermOpEO(_Matrix);
 | 
			
		||||
      this->_HermitianRBSolver(_HermOpEO,src_o,sol_o); 
 | 
			
		||||
    }
 | 
			
		||||
  };
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
							
								
								
									
										125
									
								
								Grid/allocator/AlignedAllocator.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										125
									
								
								Grid/allocator/AlignedAllocator.cc
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,125 @@
 | 
			
		||||
#include <Grid/GridCore.h>
 | 
			
		||||
#include <fcntl.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
MemoryStats *MemoryProfiler::stats = nullptr;
 | 
			
		||||
bool         MemoryProfiler::debug = false;
 | 
			
		||||
 | 
			
		||||
int PointerCache::victim;
 | 
			
		||||
 | 
			
		||||
PointerCache::PointerCacheEntry PointerCache::Entries[PointerCache::Ncache];
 | 
			
		||||
 | 
			
		||||
void *PointerCache::Insert(void *ptr,size_t bytes) {
 | 
			
		||||
 | 
			
		||||
  if (bytes < 4096 ) return ptr;
 | 
			
		||||
 | 
			
		||||
#ifdef GRID_OMP
 | 
			
		||||
  assert(omp_in_parallel()==0);
 | 
			
		||||
#endif 
 | 
			
		||||
 | 
			
		||||
  void * ret = NULL;
 | 
			
		||||
  int v = -1;
 | 
			
		||||
 | 
			
		||||
  for(int e=0;e<Ncache;e++) {
 | 
			
		||||
    if ( Entries[e].valid==0 ) {
 | 
			
		||||
      v=e; 
 | 
			
		||||
      break;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  if ( v==-1 ) {
 | 
			
		||||
    v=victim;
 | 
			
		||||
    victim = (victim+1)%Ncache;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  if ( Entries[v].valid ) {
 | 
			
		||||
    ret = Entries[v].address;
 | 
			
		||||
    Entries[v].valid = 0;
 | 
			
		||||
    Entries[v].address = NULL;
 | 
			
		||||
    Entries[v].bytes = 0;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  Entries[v].address=ptr;
 | 
			
		||||
  Entries[v].bytes  =bytes;
 | 
			
		||||
  Entries[v].valid  =1;
 | 
			
		||||
 | 
			
		||||
  return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void *PointerCache::Lookup(size_t bytes) {
 | 
			
		||||
 | 
			
		||||
 if (bytes < 4096 ) return NULL;
 | 
			
		||||
 | 
			
		||||
#ifdef _OPENMP
 | 
			
		||||
  assert(omp_in_parallel()==0);
 | 
			
		||||
#endif 
 | 
			
		||||
 | 
			
		||||
  for(int e=0;e<Ncache;e++){
 | 
			
		||||
    if ( Entries[e].valid && ( Entries[e].bytes == bytes ) ) {
 | 
			
		||||
      Entries[e].valid = 0;
 | 
			
		||||
      return Entries[e].address;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  return NULL;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
void check_huge_pages(void *Buf,uint64_t BYTES)
 | 
			
		||||
{
 | 
			
		||||
#ifdef __linux__
 | 
			
		||||
  int fd = open("/proc/self/pagemap", O_RDONLY);
 | 
			
		||||
  assert(fd >= 0);
 | 
			
		||||
  const int page_size = 4096;
 | 
			
		||||
  uint64_t virt_pfn = (uint64_t)Buf / page_size;
 | 
			
		||||
  off_t offset = sizeof(uint64_t) * virt_pfn;
 | 
			
		||||
  uint64_t npages = (BYTES + page_size-1) / page_size;
 | 
			
		||||
  uint64_t pagedata[npages];
 | 
			
		||||
  uint64_t ret = lseek(fd, offset, SEEK_SET);
 | 
			
		||||
  assert(ret == offset);
 | 
			
		||||
  ret = ::read(fd, pagedata, sizeof(uint64_t)*npages);
 | 
			
		||||
  assert(ret == sizeof(uint64_t) * npages);
 | 
			
		||||
  int nhugepages = npages / 512;
 | 
			
		||||
  int n4ktotal, nnothuge;
 | 
			
		||||
  n4ktotal = 0;
 | 
			
		||||
  nnothuge = 0;
 | 
			
		||||
  for (int i = 0; i < nhugepages; ++i) {
 | 
			
		||||
    uint64_t baseaddr = (pagedata[i*512] & 0x7fffffffffffffULL) * page_size;
 | 
			
		||||
    for (int j = 0; j < 512; ++j) {
 | 
			
		||||
      uint64_t pageaddr = (pagedata[i*512+j] & 0x7fffffffffffffULL) * page_size;
 | 
			
		||||
      ++n4ktotal;
 | 
			
		||||
      if (pageaddr != baseaddr + j * page_size)
 | 
			
		||||
	++nnothuge;
 | 
			
		||||
      }
 | 
			
		||||
  }
 | 
			
		||||
  int rank = CartesianCommunicator::RankWorld();
 | 
			
		||||
  printf("rank %d Allocated %d 4k pages, %d not in huge pages\n", rank, n4ktotal, nnothuge);
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
std::string sizeString(const size_t bytes)
 | 
			
		||||
{
 | 
			
		||||
  constexpr unsigned int bufSize = 256;
 | 
			
		||||
  const char             *suffixes[7] = {"", "K", "M", "G", "T", "P", "E"};
 | 
			
		||||
  char                   buf[256];
 | 
			
		||||
  size_t                 s     = 0;
 | 
			
		||||
  double                 count = bytes;
 | 
			
		||||
  
 | 
			
		||||
  while (count >= 1024 && s < 7)
 | 
			
		||||
  {
 | 
			
		||||
      s++;
 | 
			
		||||
      count /= 1024;
 | 
			
		||||
  }
 | 
			
		||||
  if (count - floor(count) == 0.0)
 | 
			
		||||
  {
 | 
			
		||||
      snprintf(buf, bufSize, "%d %sB", (int)count, suffixes[s]);
 | 
			
		||||
  }
 | 
			
		||||
  else
 | 
			
		||||
  {
 | 
			
		||||
      snprintf(buf, bufSize, "%.1f %sB", count, suffixes[s]);
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  return std::string(buf);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
@@ -64,6 +64,66 @@ namespace Grid {
 | 
			
		||||
 | 
			
		||||
  };
 | 
			
		||||
  
 | 
			
		||||
  std::string sizeString(size_t bytes);
 | 
			
		||||
 | 
			
		||||
  struct MemoryStats
 | 
			
		||||
  {
 | 
			
		||||
    size_t totalAllocated{0}, maxAllocated{0}, 
 | 
			
		||||
           currentlyAllocated{0}, totalFreed{0};
 | 
			
		||||
  };
 | 
			
		||||
    
 | 
			
		||||
  class MemoryProfiler
 | 
			
		||||
  {
 | 
			
		||||
  public:
 | 
			
		||||
    static MemoryStats *stats;
 | 
			
		||||
    static bool        debug;
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  #define memString(bytes) std::to_string(bytes) + " (" + sizeString(bytes) + ")"
 | 
			
		||||
  #define profilerDebugPrint \
 | 
			
		||||
  if (MemoryProfiler::stats)\
 | 
			
		||||
  {\
 | 
			
		||||
    auto s = MemoryProfiler::stats;\
 | 
			
		||||
    std::cout << GridLogDebug << "[Memory debug] Stats " << MemoryProfiler::stats << std::endl;\
 | 
			
		||||
    std::cout << GridLogDebug << "[Memory debug] total  : " << memString(s->totalAllocated) \
 | 
			
		||||
              << std::endl;\
 | 
			
		||||
    std::cout << GridLogDebug << "[Memory debug] max    : " << memString(s->maxAllocated) \
 | 
			
		||||
              << std::endl;\
 | 
			
		||||
    std::cout << GridLogDebug << "[Memory debug] current: " << memString(s->currentlyAllocated) \
 | 
			
		||||
              << std::endl;\
 | 
			
		||||
    std::cout << GridLogDebug << "[Memory debug] freed  : " << memString(s->totalFreed) \
 | 
			
		||||
              << std::endl;\
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  #define profilerAllocate(bytes)\
 | 
			
		||||
  if (MemoryProfiler::stats)\
 | 
			
		||||
  {\
 | 
			
		||||
    auto s = MemoryProfiler::stats;\
 | 
			
		||||
    s->totalAllocated     += (bytes);\
 | 
			
		||||
    s->currentlyAllocated += (bytes);\
 | 
			
		||||
    s->maxAllocated        = std::max(s->maxAllocated, s->currentlyAllocated);\
 | 
			
		||||
  }\
 | 
			
		||||
  if (MemoryProfiler::debug)\
 | 
			
		||||
  {\
 | 
			
		||||
    std::cout << GridLogDebug << "[Memory debug] allocating " << memString(bytes) << std::endl;\
 | 
			
		||||
    profilerDebugPrint;\
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  #define profilerFree(bytes)\
 | 
			
		||||
  if (MemoryProfiler::stats)\
 | 
			
		||||
  {\
 | 
			
		||||
    auto s = MemoryProfiler::stats;\
 | 
			
		||||
    s->totalFreed         += (bytes);\
 | 
			
		||||
    s->currentlyAllocated -= (bytes);\
 | 
			
		||||
  }\
 | 
			
		||||
  if (MemoryProfiler::debug)\
 | 
			
		||||
  {\
 | 
			
		||||
    std::cout << GridLogDebug << "[Memory debug] freeing " << memString(bytes) << std::endl;\
 | 
			
		||||
    profilerDebugPrint;\
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  void check_huge_pages(void *Buf,uint64_t BYTES);
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////////////////////////////
 | 
			
		||||
// A lattice of something, but assume the something is SIMDized.
 | 
			
		||||
////////////////////////////////////////////////////////////////////
 | 
			
		||||
@@ -90,6 +150,7 @@ public:
 | 
			
		||||
  pointer allocate(size_type __n, const void* _p= 0)
 | 
			
		||||
  { 
 | 
			
		||||
    size_type bytes = __n*sizeof(_Tp);
 | 
			
		||||
    profilerAllocate(bytes);
 | 
			
		||||
 | 
			
		||||
    _Tp *ptr = (_Tp *) PointerCache::Lookup(bytes);
 | 
			
		||||
    //    if ( ptr != NULL ) 
 | 
			
		||||
@@ -120,6 +181,8 @@ public:
 | 
			
		||||
  void deallocate(pointer __p, size_type __n) { 
 | 
			
		||||
    size_type bytes = __n * sizeof(_Tp);
 | 
			
		||||
 | 
			
		||||
    profilerFree(bytes);
 | 
			
		||||
 | 
			
		||||
    pointer __freeme = (pointer)PointerCache::Insert((void *)__p,bytes);
 | 
			
		||||
 | 
			
		||||
#ifdef HAVE_MM_MALLOC_H
 | 
			
		||||
@@ -170,10 +233,13 @@ public:
 | 
			
		||||
#ifdef GRID_COMMS_SHMEM
 | 
			
		||||
  pointer allocate(size_type __n, const void* _p= 0)
 | 
			
		||||
  {
 | 
			
		||||
    size_type bytes = __n*sizeof(_Tp);
 | 
			
		||||
 | 
			
		||||
    profilerAllocate(bytes);
 | 
			
		||||
#ifdef CRAY
 | 
			
		||||
    _Tp *ptr = (_Tp *) shmem_align(__n*sizeof(_Tp),64);
 | 
			
		||||
    _Tp *ptr = (_Tp *) shmem_align(bytes,64);
 | 
			
		||||
#else
 | 
			
		||||
    _Tp *ptr = (_Tp *) shmem_align(64,__n*sizeof(_Tp));
 | 
			
		||||
    _Tp *ptr = (_Tp *) shmem_align(64,bytes);
 | 
			
		||||
#endif
 | 
			
		||||
#ifdef PARANOID_SYMMETRIC_HEAP
 | 
			
		||||
    static void * bcast;
 | 
			
		||||
@@ -191,27 +257,39 @@ public:
 | 
			
		||||
#endif 
 | 
			
		||||
    return ptr;
 | 
			
		||||
  }
 | 
			
		||||
  void deallocate(pointer __p, size_type) { 
 | 
			
		||||
  void deallocate(pointer __p, size_type __n) { 
 | 
			
		||||
    size_type bytes = __n*sizeof(_Tp);
 | 
			
		||||
 | 
			
		||||
    profilerFree(bytes);
 | 
			
		||||
    shmem_free((void *)__p);
 | 
			
		||||
  }
 | 
			
		||||
#else
 | 
			
		||||
  pointer allocate(size_type __n, const void* _p= 0) 
 | 
			
		||||
  {
 | 
			
		||||
#ifdef HAVE_MM_MALLOC_H
 | 
			
		||||
    _Tp * ptr = (_Tp *) _mm_malloc(__n*sizeof(_Tp),GRID_ALLOC_ALIGN);
 | 
			
		||||
#else
 | 
			
		||||
    _Tp * ptr = (_Tp *) memalign(GRID_ALLOC_ALIGN,__n*sizeof(_Tp));
 | 
			
		||||
#endif
 | 
			
		||||
    size_type bytes = __n*sizeof(_Tp);
 | 
			
		||||
    
 | 
			
		||||
    profilerAllocate(bytes);
 | 
			
		||||
#ifdef HAVE_MM_MALLOC_H
 | 
			
		||||
    _Tp * ptr = (_Tp *) _mm_malloc(bytes, GRID_ALLOC_ALIGN);
 | 
			
		||||
#else
 | 
			
		||||
    _Tp * ptr = (_Tp *) memalign(GRID_ALLOC_ALIGN, bytes);
 | 
			
		||||
#endif
 | 
			
		||||
    uint8_t *cp = (uint8_t *)ptr;
 | 
			
		||||
    if ( ptr ) { 
 | 
			
		||||
    // One touch per 4k page, static OMP loop to catch same loop order
 | 
			
		||||
#ifdef GRID_OMP
 | 
			
		||||
#pragma omp parallel for schedule(static)
 | 
			
		||||
#endif
 | 
			
		||||
      for(size_type n=0;n<bytes;n+=4096){
 | 
			
		||||
	cp[n]=0;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
    return ptr;
 | 
			
		||||
  }
 | 
			
		||||
  void deallocate(pointer __p, size_type) { 
 | 
			
		||||
  void deallocate(pointer __p, size_type __n) {
 | 
			
		||||
    size_type bytes = __n*sizeof(_Tp);
 | 
			
		||||
 | 
			
		||||
    profilerFree(bytes);
 | 
			
		||||
#ifdef HAVE_MM_MALLOC_H
 | 
			
		||||
    _mm_free((void *)__p); 
 | 
			
		||||
#else
 | 
			
		||||
@@ -44,11 +44,21 @@ namespace Grid{
 | 
			
		||||
  class GridBase : public CartesianCommunicator , public GridThread {
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
 | 
			
		||||
    int dummy;
 | 
			
		||||
    // Give Lattice access
 | 
			
		||||
    template<class object> friend class Lattice;
 | 
			
		||||
 | 
			
		||||
    GridBase(const std::vector<int> & processor_grid) : CartesianCommunicator(processor_grid) {};
 | 
			
		||||
    GridBase(const std::vector<int> & processor_grid,
 | 
			
		||||
	     const CartesianCommunicator &parent,
 | 
			
		||||
	     int &split_rank) 
 | 
			
		||||
      : CartesianCommunicator(processor_grid,parent,split_rank) {};
 | 
			
		||||
    GridBase(const std::vector<int> & processor_grid,
 | 
			
		||||
	     const CartesianCommunicator &parent) 
 | 
			
		||||
      : CartesianCommunicator(processor_grid,parent,dummy) {};
 | 
			
		||||
 | 
			
		||||
    virtual ~GridBase() = default;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    // Physics Grid information.
 | 
			
		||||
    std::vector<int> _simd_layout;// Which dimensions get relayed out over simd lanes.
 | 
			
		||||
@@ -69,6 +79,8 @@ public:
 | 
			
		||||
    std::vector<int> _lstart;     // local start of array in gcoors _processor_coor[d]*_ldimensions[d]
 | 
			
		||||
    std::vector<int> _lend  ;     // local end of array in gcoors   _processor_coor[d]*_ldimensions[d]+_ldimensions_[d]-1
 | 
			
		||||
 | 
			
		||||
    bool _isCheckerBoarded; 
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
 | 
			
		||||
    ////////////////////////////////////////////////////////////////
 | 
			
		||||
@@ -210,9 +222,6 @@ public:
 | 
			
		||||
      assert(lidx<lSites());
 | 
			
		||||
      Lexicographic::CoorFromIndex(lcoor,lidx,_ldimensions);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    void GlobalCoorToGlobalIndex(const std::vector<int> & gcoor,int & gidx){
 | 
			
		||||
      gidx=0;
 | 
			
		||||
      int mult=1;
 | 
			
		||||
@@ -38,7 +38,7 @@ namespace Grid{
 | 
			
		||||
class GridCartesian: public GridBase {
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
 | 
			
		||||
    int dummy;
 | 
			
		||||
    virtual int  CheckerBoardFromOindexTable (int Oindex) {
 | 
			
		||||
      return 0;
 | 
			
		||||
    }
 | 
			
		||||
@@ -61,13 +61,43 @@ public:
 | 
			
		||||
    virtual int CheckerBoardShift(int source_cb,int dim,int shift, int osite){
 | 
			
		||||
      return shift;
 | 
			
		||||
    }
 | 
			
		||||
    /////////////////////////////////////////////////////////////////////////
 | 
			
		||||
    // Constructor takes a parent grid and possibly subdivides communicator.
 | 
			
		||||
    /////////////////////////////////////////////////////////////////////////
 | 
			
		||||
    GridCartesian(const std::vector<int> &dimensions,
 | 
			
		||||
		  const std::vector<int> &simd_layout,
 | 
			
		||||
		  const std::vector<int> &processor_grid,
 | 
			
		||||
		  const GridCartesian &parent) : GridBase(processor_grid,parent,dummy)
 | 
			
		||||
    {
 | 
			
		||||
      Init(dimensions,simd_layout,processor_grid);
 | 
			
		||||
    }
 | 
			
		||||
    GridCartesian(const std::vector<int> &dimensions,
 | 
			
		||||
		  const std::vector<int> &simd_layout,
 | 
			
		||||
		  const std::vector<int> &processor_grid,
 | 
			
		||||
		  const GridCartesian &parent,int &split_rank) : GridBase(processor_grid,parent,split_rank)
 | 
			
		||||
    {
 | 
			
		||||
      Init(dimensions,simd_layout,processor_grid);
 | 
			
		||||
    }
 | 
			
		||||
    /////////////////////////////////////////////////////////////////////////
 | 
			
		||||
    // Construct from comm world
 | 
			
		||||
    /////////////////////////////////////////////////////////////////////////
 | 
			
		||||
    GridCartesian(const std::vector<int> &dimensions,
 | 
			
		||||
		  const std::vector<int> &simd_layout,
 | 
			
		||||
		  const std::vector<int> &processor_grid) : GridBase(processor_grid)
 | 
			
		||||
    {
 | 
			
		||||
      Init(dimensions,simd_layout,processor_grid);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    virtual ~GridCartesian() = default;
 | 
			
		||||
 | 
			
		||||
    void Init(const std::vector<int> &dimensions,
 | 
			
		||||
	      const std::vector<int> &simd_layout,
 | 
			
		||||
	      const std::vector<int> &processor_grid)
 | 
			
		||||
    {
 | 
			
		||||
      ///////////////////////
 | 
			
		||||
      // Grid information
 | 
			
		||||
      ///////////////////////
 | 
			
		||||
      _isCheckerBoarded = false;
 | 
			
		||||
      _ndimension = dimensions.size();
 | 
			
		||||
 | 
			
		||||
      _fdimensions.resize(_ndimension);
 | 
			
		||||
@@ -93,6 +123,7 @@ public:
 | 
			
		||||
 | 
			
		||||
        // Use a reduced simd grid
 | 
			
		||||
        _ldimensions[d] = _gdimensions[d] / _processors[d]; //local dimensions
 | 
			
		||||
        //std::cout << _ldimensions[d] << "  " << _gdimensions[d] << "  " << _processors[d] << std::endl;
 | 
			
		||||
        assert(_ldimensions[d] * _processors[d] == _gdimensions[d]);
 | 
			
		||||
 | 
			
		||||
        _rdimensions[d] = _ldimensions[d] / _simd_layout[d]; //overdecomposition
 | 
			
		||||
@@ -137,6 +168,7 @@ public:
 | 
			
		||||
        block = block * _rdimensions[d];
 | 
			
		||||
      }
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
};
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
@@ -112,33 +112,67 @@ public:
 | 
			
		||||
      }
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    GridRedBlackCartesian(const GridBase *base) : GridRedBlackCartesian(base->_fdimensions,base->_simd_layout,base->_processors)  {};
 | 
			
		||||
    ////////////////////////////////////////////////////////////
 | 
			
		||||
    // Create Redblack from original grid; require full grid pointer ?
 | 
			
		||||
    ////////////////////////////////////////////////////////////
 | 
			
		||||
    GridRedBlackCartesian(const GridBase *base) : GridBase(base->_processors,*base)
 | 
			
		||||
    {
 | 
			
		||||
      int dims = base->_ndimension;
 | 
			
		||||
      std::vector<int> checker_dim_mask(dims,1);
 | 
			
		||||
      int checker_dim = 0;
 | 
			
		||||
      Init(base->_fdimensions,base->_simd_layout,base->_processors,checker_dim_mask,checker_dim);
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    GridRedBlackCartesian(const std::vector<int> &dimensions,
 | 
			
		||||
    ////////////////////////////////////////////////////////////
 | 
			
		||||
    // Create redblack from original grid, with non-trivial checker dim mask
 | 
			
		||||
    ////////////////////////////////////////////////////////////
 | 
			
		||||
    GridRedBlackCartesian(const GridBase *base,
 | 
			
		||||
			  const std::vector<int> &checker_dim_mask,
 | 
			
		||||
			  int checker_dim
 | 
			
		||||
			  ) :  GridBase(base->_processors,*base) 
 | 
			
		||||
    {
 | 
			
		||||
      Init(base->_fdimensions,base->_simd_layout,base->_processors,checker_dim_mask,checker_dim)  ;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    virtual ~GridRedBlackCartesian() = default;
 | 
			
		||||
#if 0
 | 
			
		||||
    ////////////////////////////////////////////////////////////
 | 
			
		||||
    // Create redblack grid ;; deprecate these. Should not
 | 
			
		||||
    // need direct creation of redblack without a full grid to base on
 | 
			
		||||
    ////////////////////////////////////////////////////////////
 | 
			
		||||
    GridRedBlackCartesian(const GridBase *base,
 | 
			
		||||
			  const std::vector<int> &dimensions,
 | 
			
		||||
			  const std::vector<int> &simd_layout,
 | 
			
		||||
			  const std::vector<int> &processor_grid,
 | 
			
		||||
			  const std::vector<int> &checker_dim_mask,
 | 
			
		||||
			  int checker_dim
 | 
			
		||||
			  ) :  GridBase(processor_grid) 
 | 
			
		||||
			  ) :  GridBase(processor_grid,*base) 
 | 
			
		||||
    {
 | 
			
		||||
      Init(dimensions,simd_layout,processor_grid,checker_dim_mask,checker_dim);
 | 
			
		||||
    }
 | 
			
		||||
    GridRedBlackCartesian(const std::vector<int> &dimensions,
 | 
			
		||||
 | 
			
		||||
    ////////////////////////////////////////////////////////////
 | 
			
		||||
    // Create redblack grid
 | 
			
		||||
    ////////////////////////////////////////////////////////////
 | 
			
		||||
    GridRedBlackCartesian(const GridBase *base,
 | 
			
		||||
			  const std::vector<int> &dimensions,
 | 
			
		||||
			  const std::vector<int> &simd_layout,
 | 
			
		||||
			  const std::vector<int> &processor_grid) : GridBase(processor_grid) 
 | 
			
		||||
			  const std::vector<int> &processor_grid) : GridBase(processor_grid,*base) 
 | 
			
		||||
    {
 | 
			
		||||
      std::vector<int> checker_dim_mask(dimensions.size(),1);
 | 
			
		||||
      Init(dimensions,simd_layout,processor_grid,checker_dim_mask,0);
 | 
			
		||||
      int checker_dim = 0;
 | 
			
		||||
      Init(dimensions,simd_layout,processor_grid,checker_dim_mask,checker_dim);
 | 
			
		||||
    }
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
    void Init(const std::vector<int> &dimensions,
 | 
			
		||||
              const std::vector<int> &simd_layout,
 | 
			
		||||
              const std::vector<int> &processor_grid,
 | 
			
		||||
              const std::vector<int> &checker_dim_mask,
 | 
			
		||||
              int checker_dim)
 | 
			
		||||
    {
 | 
			
		||||
      ///////////////////////
 | 
			
		||||
      // Grid information
 | 
			
		||||
      ///////////////////////
 | 
			
		||||
 | 
			
		||||
      _isCheckerBoarded = true;
 | 
			
		||||
      _checker_dim = checker_dim;
 | 
			
		||||
      assert(checker_dim_mask[checker_dim] == 1);
 | 
			
		||||
      _ndimension = dimensions.size();
 | 
			
		||||
@@ -172,6 +206,7 @@ public:
 | 
			
		||||
        {
 | 
			
		||||
          assert((_gdimensions[d] & 0x1) == 0);
 | 
			
		||||
          _gdimensions[d] = _gdimensions[d] / 2; // Remove a checkerboard
 | 
			
		||||
	  _gsites /= 2;
 | 
			
		||||
        }
 | 
			
		||||
        _ldimensions[d] = _gdimensions[d] / _processors[d];
 | 
			
		||||
        assert(_ldimensions[d] * _processors[d] == _gdimensions[d]);
 | 
			
		||||
@@ -28,6 +28,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
#ifndef GRID_COMMUNICATOR_H
 | 
			
		||||
#define GRID_COMMUNICATOR_H
 | 
			
		||||
 | 
			
		||||
#include <Grid/communicator/SharedMemory.h>
 | 
			
		||||
#include <Grid/communicator/Communicator_base.h>
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
							
								
								
									
										76
									
								
								Grid/communicator/Communicator_base.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										76
									
								
								Grid/communicator/Communicator_base.cc
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,76 @@
 | 
			
		||||
    /*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
    Source file: ./lib/communicator/Communicator_none.cc
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#include <Grid/GridCore.h>
 | 
			
		||||
#include <fcntl.h>
 | 
			
		||||
#include <unistd.h>
 | 
			
		||||
#include <limits.h>
 | 
			
		||||
#include <sys/mman.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
///////////////////////////////////////////////////////////////
 | 
			
		||||
// Info that is setup once and indept of cartesian layout
 | 
			
		||||
///////////////////////////////////////////////////////////////
 | 
			
		||||
CartesianCommunicator::CommunicatorPolicy_t  
 | 
			
		||||
CartesianCommunicator::CommunicatorPolicy= CartesianCommunicator::CommunicatorPolicyConcurrent;
 | 
			
		||||
int CartesianCommunicator::nCommThreads = -1;
 | 
			
		||||
 | 
			
		||||
/////////////////////////////////
 | 
			
		||||
// Grid information queries
 | 
			
		||||
/////////////////////////////////
 | 
			
		||||
int                      CartesianCommunicator::Dimensions(void)        { return _ndimension; };
 | 
			
		||||
int                      CartesianCommunicator::IsBoss(void)            { return _processor==0; };
 | 
			
		||||
int                      CartesianCommunicator::BossRank(void)          { return 0; };
 | 
			
		||||
int                      CartesianCommunicator::ThisRank(void)          { return _processor; };
 | 
			
		||||
const std::vector<int> & CartesianCommunicator::ThisProcessorCoor(void) { return _processor_coor; };
 | 
			
		||||
const std::vector<int> & CartesianCommunicator::ProcessorGrid(void)     { return _processors; };
 | 
			
		||||
int                      CartesianCommunicator::ProcessorCount(void)    { return _Nprocessors; };
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// very VERY rarely (Log, serial RNG) we need world without a grid
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
 | 
			
		||||
void CartesianCommunicator::GlobalSum(ComplexF &c)
 | 
			
		||||
{
 | 
			
		||||
  GlobalSumVector((float *)&c,2);
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::GlobalSumVector(ComplexF *c,int N)
 | 
			
		||||
{
 | 
			
		||||
  GlobalSumVector((float *)c,2*N);
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::GlobalSum(ComplexD &c)
 | 
			
		||||
{
 | 
			
		||||
  GlobalSumVector((double *)&c,2);
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::GlobalSumVector(ComplexD *c,int N)
 | 
			
		||||
{
 | 
			
		||||
  GlobalSumVector((double *)c,2*N);
 | 
			
		||||
}
 | 
			
		||||
  
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -32,116 +32,33 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
///////////////////////////////////
 | 
			
		||||
// Processor layout information
 | 
			
		||||
///////////////////////////////////
 | 
			
		||||
#ifdef GRID_COMMS_MPI
 | 
			
		||||
#include <mpi.h>
 | 
			
		||||
#endif
 | 
			
		||||
#ifdef GRID_COMMS_MPI3
 | 
			
		||||
#include <mpi.h>
 | 
			
		||||
#endif
 | 
			
		||||
#ifdef GRID_COMMS_MPIT
 | 
			
		||||
#include <mpi.h>
 | 
			
		||||
#endif
 | 
			
		||||
#ifdef GRID_COMMS_SHMEM
 | 
			
		||||
#include <mpp/shmem.h>
 | 
			
		||||
#endif
 | 
			
		||||
#include <Grid/communicator/SharedMemory.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
class CartesianCommunicator {
 | 
			
		||||
  public:    
 | 
			
		||||
class CartesianCommunicator : public SharedMemory {
 | 
			
		||||
 | 
			
		||||
public:    
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////
 | 
			
		||||
  // Isend/Irecv/Wait, or Sendrecv blocking
 | 
			
		||||
  // Policies
 | 
			
		||||
  ////////////////////////////////////////////
 | 
			
		||||
  enum CommunicatorPolicy_t { CommunicatorPolicyConcurrent, CommunicatorPolicySequential };
 | 
			
		||||
  static CommunicatorPolicy_t CommunicatorPolicy;
 | 
			
		||||
  static void SetCommunicatorPolicy(CommunicatorPolicy_t policy ) { CommunicatorPolicy = policy; }
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////////////////////
 | 
			
		||||
  // Up to 65536 ranks per node adequate for now
 | 
			
		||||
  // 128MB shared memory for comms enought for 48^4 local vol comms
 | 
			
		||||
  // Give external control (command line override?) of this
 | 
			
		||||
  ///////////////////////////////////////////
 | 
			
		||||
  static const int MAXLOG2RANKSPERNODE = 16;            
 | 
			
		||||
  static uint64_t  MAX_MPI_SHM_BYTES;
 | 
			
		||||
  static int       nCommThreads;
 | 
			
		||||
  // use explicit huge pages
 | 
			
		||||
  static int       Hugepages;
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////
 | 
			
		||||
  // Communicator should know nothing of the physics grid, only processor grid.
 | 
			
		||||
  ////////////////////////////////////////////
 | 
			
		||||
  int              _Nprocessors;     // How many in all
 | 
			
		||||
  std::vector<int> _processors;      // Which dimensions get relayed out over processors lanes.
 | 
			
		||||
  int              _processor;       // linear processor rank
 | 
			
		||||
  std::vector<int> _processor_coor;  // linear processor coordinate
 | 
			
		||||
  unsigned long    _ndimension;
 | 
			
		||||
 | 
			
		||||
#if defined (GRID_COMMS_MPI) || defined (GRID_COMMS_MPI3) || defined (GRID_COMMS_MPIT)
 | 
			
		||||
  static MPI_Comm communicator_world;
 | 
			
		||||
 | 
			
		||||
  MPI_Comm              communicator;
 | 
			
		||||
  std::vector<MPI_Comm> communicator_halo;
 | 
			
		||||
 | 
			
		||||
  typedef MPI_Request CommsRequest_t;
 | 
			
		||||
#else 
 | 
			
		||||
  typedef int CommsRequest_t;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Helper functionality for SHM Windows common to all other impls
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Longer term; drop this in favour of a master / slave model with 
 | 
			
		||||
  // cartesian communicator on a subset of ranks, slave ranks controlled
 | 
			
		||||
  // by group leader with data xfer via shared memory
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////
 | 
			
		||||
#ifdef GRID_COMMS_MPI3
 | 
			
		||||
 | 
			
		||||
  static int ShmRank;
 | 
			
		||||
  static int ShmSize;
 | 
			
		||||
  static int GroupRank;
 | 
			
		||||
  static int GroupSize;
 | 
			
		||||
  static int WorldRank;
 | 
			
		||||
  static int WorldSize;
 | 
			
		||||
 | 
			
		||||
  std::vector<int>  WorldDims;
 | 
			
		||||
  std::vector<int>  GroupDims;
 | 
			
		||||
  std::vector<int>  ShmDims;
 | 
			
		||||
  
 | 
			
		||||
  std::vector<int> GroupCoor;
 | 
			
		||||
  std::vector<int> ShmCoor;
 | 
			
		||||
  std::vector<int> WorldCoor;
 | 
			
		||||
 | 
			
		||||
  static std::vector<int> GroupRanks; 
 | 
			
		||||
  static std::vector<int> MyGroup;
 | 
			
		||||
  static int ShmSetup;
 | 
			
		||||
  static MPI_Win ShmWindow; 
 | 
			
		||||
  static MPI_Comm ShmComm;
 | 
			
		||||
  
 | 
			
		||||
  std::vector<int>  LexicographicToWorldRank;
 | 
			
		||||
  
 | 
			
		||||
  static std::vector<void *> ShmCommBufs;
 | 
			
		||||
 | 
			
		||||
#else 
 | 
			
		||||
  static void ShmInitGeneric(void);
 | 
			
		||||
  static commVector<uint8_t> ShmBufStorageVector;
 | 
			
		||||
#endif 
 | 
			
		||||
 | 
			
		||||
  /////////////////////////////////
 | 
			
		||||
  // Grid information and queries
 | 
			
		||||
  // Implemented in Communicator_base.C
 | 
			
		||||
  /////////////////////////////////
 | 
			
		||||
  static void * ShmCommBuf;
 | 
			
		||||
 | 
			
		||||
  
 | 
			
		||||
  size_t heap_top;
 | 
			
		||||
  size_t heap_bytes;
 | 
			
		||||
 | 
			
		||||
  void *ShmBufferSelf(void);
 | 
			
		||||
  void *ShmBuffer(int rank);
 | 
			
		||||
  void *ShmBufferTranslate(int rank,void * local_p);
 | 
			
		||||
  void *ShmBufferMalloc(size_t bytes);
 | 
			
		||||
  void ShmBufferFreeAll(void) ;
 | 
			
		||||
  static Grid_MPI_Comm      communicator_world;
 | 
			
		||||
  Grid_MPI_Comm             communicator;
 | 
			
		||||
  std::vector<Grid_MPI_Comm> communicator_halo;
 | 
			
		||||
  
 | 
			
		||||
  ////////////////////////////////////////////////
 | 
			
		||||
  // Must call in Grid startup
 | 
			
		||||
@@ -149,9 +66,23 @@ class CartesianCommunicator {
 | 
			
		||||
  static void Init(int *argc, char ***argv);
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////
 | 
			
		||||
  // Constructor of any given grid
 | 
			
		||||
  // Constructors to sub-divide a parent communicator
 | 
			
		||||
  // and default to comm world
 | 
			
		||||
  ////////////////////////////////////////////////
 | 
			
		||||
  CartesianCommunicator(const std::vector<int> &processors,const CartesianCommunicator &parent,int &srank);
 | 
			
		||||
  CartesianCommunicator(const std::vector<int> &pdimensions_in);
 | 
			
		||||
  virtual ~CartesianCommunicator();
 | 
			
		||||
 | 
			
		||||
 private:
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////
 | 
			
		||||
  // Private initialise from an MPI communicator
 | 
			
		||||
  // Can use after an MPI_Comm_split, but hidden from user so private
 | 
			
		||||
  ////////////////////////////////////////////////
 | 
			
		||||
  void InitFromMPICommunicator(const std::vector<int> &processors, Grid_MPI_Comm communicator_base);
 | 
			
		||||
 | 
			
		||||
 public:
 | 
			
		||||
 | 
			
		||||
  
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Wraps MPI_Cart routines, or implements equivalent on other impls
 | 
			
		||||
@@ -167,8 +98,6 @@ class CartesianCommunicator {
 | 
			
		||||
  const std::vector<int> & ThisProcessorCoor(void) ;
 | 
			
		||||
  const std::vector<int> & ProcessorGrid(void)     ;
 | 
			
		||||
  int                      ProcessorCount(void)    ;
 | 
			
		||||
  int                      NodeCount(void)    ;
 | 
			
		||||
  int                      RankCount(void)    ;
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // very VERY rarely (Log, serial RNG) we need world without a grid
 | 
			
		||||
@@ -250,6 +179,23 @@ class CartesianCommunicator {
 | 
			
		||||
  ////////////////////////////////////////////////////////////
 | 
			
		||||
  void Broadcast(int root,void* data, int bytes);
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////////////////
 | 
			
		||||
  // All2All down one dimension
 | 
			
		||||
  ////////////////////////////////////////////////////////////
 | 
			
		||||
  template<class T> void AllToAll(int dim,std::vector<T> &in, std::vector<T> &out){
 | 
			
		||||
    assert(dim>=0);
 | 
			
		||||
    assert(dim<_ndimension);
 | 
			
		||||
    assert(in.size()==out.size());
 | 
			
		||||
    int numnode = _processors[dim];
 | 
			
		||||
    uint64_t bytes=sizeof(T);
 | 
			
		||||
    uint64_t words=in.size()/numnode;
 | 
			
		||||
    assert(numnode * words == in.size());
 | 
			
		||||
    assert(words < (1ULL<<31));
 | 
			
		||||
    AllToAll(dim,(void *)&in[0],(void *)&out[0],words,bytes);
 | 
			
		||||
  }
 | 
			
		||||
  void AllToAll(int dim  ,void *in,void *out,uint64_t words,uint64_t bytes);
 | 
			
		||||
  void AllToAll(void  *in,void *out,uint64_t words         ,uint64_t bytes);
 | 
			
		||||
  
 | 
			
		||||
  template<class obj> void Broadcast(int root,obj &data)
 | 
			
		||||
    {
 | 
			
		||||
      Broadcast(root,(void *)&data,sizeof(data));
 | 
			
		||||
							
								
								
									
										508
									
								
								Grid/communicator/Communicator_mpi3.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										508
									
								
								Grid/communicator/Communicator_mpi3.cc
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,508 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
    Source file: ./lib/communicator/Communicator_mpi.cc
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#include <Grid/GridCore.h>
 | 
			
		||||
#include <Grid/communicator/SharedMemory.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
Grid_MPI_Comm       CartesianCommunicator::communicator_world;
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////
 | 
			
		||||
// First initialise of comms system
 | 
			
		||||
////////////////////////////////////////////
 | 
			
		||||
void CartesianCommunicator::Init(int *argc, char ***argv) 
 | 
			
		||||
{
 | 
			
		||||
 | 
			
		||||
  int flag;
 | 
			
		||||
  int provided;
 | 
			
		||||
 | 
			
		||||
  MPI_Initialized(&flag); // needed to coexist with other libs apparently
 | 
			
		||||
  if ( !flag ) {
 | 
			
		||||
    MPI_Init_thread(argc,argv,MPI_THREAD_MULTIPLE,&provided);
 | 
			
		||||
    //If only 1 comms thread we require any threading mode other than SINGLE, but for multiple comms threads we need MULTIPLE
 | 
			
		||||
    if( (nCommThreads == 1 && provided == MPI_THREAD_SINGLE) ||
 | 
			
		||||
        (nCommThreads > 1 && provided != MPI_THREAD_MULTIPLE) )
 | 
			
		||||
      assert(0);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Never clean up as done once.
 | 
			
		||||
  MPI_Comm_dup (MPI_COMM_WORLD,&communicator_world);
 | 
			
		||||
 | 
			
		||||
  GlobalSharedMemory::Init(communicator_world);
 | 
			
		||||
  GlobalSharedMemory::SharedMemoryAllocate(
 | 
			
		||||
		   GlobalSharedMemory::MAX_MPI_SHM_BYTES,
 | 
			
		||||
		   GlobalSharedMemory::Hugepages);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
///////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Use cartesian communicators now even in MPI3
 | 
			
		||||
///////////////////////////////////////////////////////////////////////////
 | 
			
		||||
void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest)
 | 
			
		||||
{
 | 
			
		||||
  int ierr=MPI_Cart_shift(communicator,dim,shift,&source,&dest);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
}
 | 
			
		||||
int CartesianCommunicator::RankFromProcessorCoor(std::vector<int> &coor)
 | 
			
		||||
{
 | 
			
		||||
  int rank;
 | 
			
		||||
  int ierr=MPI_Cart_rank  (communicator, &coor[0], &rank);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
  return rank;
 | 
			
		||||
}
 | 
			
		||||
void  CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector<int> &coor)
 | 
			
		||||
{
 | 
			
		||||
  coor.resize(_ndimension);
 | 
			
		||||
  int ierr=MPI_Cart_coords  (communicator, rank, _ndimension,&coor[0]);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Initialises from communicator_world
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors) 
 | 
			
		||||
{
 | 
			
		||||
  MPI_Comm optimal_comm;
 | 
			
		||||
  ////////////////////////////////////////////////////
 | 
			
		||||
  // Remap using the shared memory optimising routine
 | 
			
		||||
  // The remap creates a comm which must be freed
 | 
			
		||||
  ////////////////////////////////////////////////////
 | 
			
		||||
  GlobalSharedMemory::OptimalCommunicator    (processors,optimal_comm);
 | 
			
		||||
  InitFromMPICommunicator(processors,optimal_comm);
 | 
			
		||||
  SetCommunicator(optimal_comm);
 | 
			
		||||
  ///////////////////////////////////////////////////
 | 
			
		||||
  // Free the temp communicator
 | 
			
		||||
  ///////////////////////////////////////////////////
 | 
			
		||||
  MPI_Comm_free(&optimal_comm);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//////////////////////////////////
 | 
			
		||||
// Try to subdivide communicator
 | 
			
		||||
//////////////////////////////////
 | 
			
		||||
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,const CartesianCommunicator &parent,int &srank)    
 | 
			
		||||
{
 | 
			
		||||
  _ndimension = processors.size();
 | 
			
		||||
 | 
			
		||||
  int parent_ndimension = parent._ndimension; assert(_ndimension >= parent._ndimension);
 | 
			
		||||
  std::vector<int> parent_processor_coor(_ndimension,0);
 | 
			
		||||
  std::vector<int> parent_processors    (_ndimension,1);
 | 
			
		||||
 | 
			
		||||
  // Can make 5d grid from 4d etc...
 | 
			
		||||
  int pad = _ndimension-parent_ndimension;
 | 
			
		||||
  for(int d=0;d<parent_ndimension;d++){
 | 
			
		||||
    parent_processor_coor[pad+d]=parent._processor_coor[d];
 | 
			
		||||
    parent_processors    [pad+d]=parent._processors[d];
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // split the communicator
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  //  int Nparent = parent._processors ; 
 | 
			
		||||
  int Nparent;
 | 
			
		||||
  MPI_Comm_size(parent.communicator,&Nparent);
 | 
			
		||||
 | 
			
		||||
  int childsize=1;
 | 
			
		||||
  for(int d=0;d<processors.size();d++) {
 | 
			
		||||
    childsize *= processors[d];
 | 
			
		||||
  }
 | 
			
		||||
  int Nchild = Nparent/childsize;
 | 
			
		||||
  assert (childsize * Nchild == Nparent);
 | 
			
		||||
 | 
			
		||||
  std::vector<int> ccoor(_ndimension); // coor within subcommunicator
 | 
			
		||||
  std::vector<int> scoor(_ndimension); // coor of split within parent
 | 
			
		||||
  std::vector<int> ssize(_ndimension); // coor of split within parent
 | 
			
		||||
 | 
			
		||||
  for(int d=0;d<_ndimension;d++){
 | 
			
		||||
    ccoor[d] = parent_processor_coor[d] % processors[d];
 | 
			
		||||
    scoor[d] = parent_processor_coor[d] / processors[d];
 | 
			
		||||
    ssize[d] = parent_processors[d]     / processors[d];
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // rank within subcomm ; srank is rank of subcomm within blocks of subcomms
 | 
			
		||||
  int crank;  
 | 
			
		||||
  // Mpi uses the reverse Lexico convention to us; so reversed routines called
 | 
			
		||||
  Lexicographic::IndexFromCoorReversed(ccoor,crank,processors); // processors is the split grid dimensions
 | 
			
		||||
  Lexicographic::IndexFromCoorReversed(scoor,srank,ssize);      // ssize is the number of split grids
 | 
			
		||||
 | 
			
		||||
  MPI_Comm comm_split;
 | 
			
		||||
  if ( Nchild > 1 ) { 
 | 
			
		||||
 | 
			
		||||
    if(0){
 | 
			
		||||
      std::cout << GridLogMessage<<"Child communicator of "<< std::hex << parent.communicator << std::dec<<std::endl;
 | 
			
		||||
      std::cout << GridLogMessage<<" parent grid["<< parent._ndimension<<"]    ";
 | 
			
		||||
      for(int d=0;d<parent._ndimension;d++)  std::cout << parent._processors[d] << " ";
 | 
			
		||||
      std::cout<<std::endl;
 | 
			
		||||
      
 | 
			
		||||
      std::cout << GridLogMessage<<" child grid["<< _ndimension <<"]    ";
 | 
			
		||||
      for(int d=0;d<processors.size();d++)  std::cout << processors[d] << " ";
 | 
			
		||||
      std::cout<<std::endl;
 | 
			
		||||
      
 | 
			
		||||
      std::cout << GridLogMessage<<" old rank "<< parent._processor<<" coor ["<< parent._ndimension <<"]    ";
 | 
			
		||||
      for(int d=0;d<parent._ndimension;d++)  std::cout << parent._processor_coor[d] << " ";
 | 
			
		||||
      std::cout<<std::endl;
 | 
			
		||||
      
 | 
			
		||||
      std::cout << GridLogMessage<<" new split "<< srank<<" scoor ["<< _ndimension <<"]    ";
 | 
			
		||||
      for(int d=0;d<processors.size();d++)  std::cout << scoor[d] << " ";
 | 
			
		||||
      std::cout<<std::endl;
 | 
			
		||||
      
 | 
			
		||||
      std::cout << GridLogMessage<<" new rank "<< crank<<" coor ["<< _ndimension <<"]    ";
 | 
			
		||||
      for(int d=0;d<processors.size();d++)  std::cout << ccoor[d] << " ";
 | 
			
		||||
      std::cout<<std::endl;
 | 
			
		||||
 | 
			
		||||
      //////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
      // Declare victory
 | 
			
		||||
      //////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
      std::cout << GridLogMessage<<"Divided communicator "<< parent._Nprocessors<<" into "
 | 
			
		||||
		<< Nchild <<" communicators with " << childsize << " ranks"<<std::endl;
 | 
			
		||||
      std::cout << " Split communicator " <<comm_split <<std::endl;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    ////////////////////////////////////////////////////////////////
 | 
			
		||||
    // Split the communicator
 | 
			
		||||
    ////////////////////////////////////////////////////////////////
 | 
			
		||||
    int ierr= MPI_Comm_split(parent.communicator,srank,crank,&comm_split);
 | 
			
		||||
    assert(ierr==0);
 | 
			
		||||
 | 
			
		||||
  } else {
 | 
			
		||||
    srank = 0;
 | 
			
		||||
    int ierr = MPI_Comm_dup (parent.communicator,&comm_split);
 | 
			
		||||
    assert(ierr==0);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Set up from the new split communicator
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  InitFromMPICommunicator(processors,comm_split);
 | 
			
		||||
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Take the right SHM buffers
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  SetCommunicator(comm_split);
 | 
			
		||||
  
 | 
			
		||||
  ///////////////////////////////////////////////
 | 
			
		||||
  // Free the temp communicator 
 | 
			
		||||
  ///////////////////////////////////////////////
 | 
			
		||||
  MPI_Comm_free(&comm_split);
 | 
			
		||||
 | 
			
		||||
  if(0){ 
 | 
			
		||||
    std::cout << " ndim " <<_ndimension<<" " << parent._ndimension << std::endl;
 | 
			
		||||
    for(int d=0;d<processors.size();d++){
 | 
			
		||||
      std::cout << d<< " " << _processor_coor[d] <<" " <<  ccoor[d]<<std::endl;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  for(int d=0;d<processors.size();d++){
 | 
			
		||||
    assert(_processor_coor[d] == ccoor[d] );
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void CartesianCommunicator::InitFromMPICommunicator(const std::vector<int> &processors, MPI_Comm communicator_base)
 | 
			
		||||
{
 | 
			
		||||
  ////////////////////////////////////////////////////
 | 
			
		||||
  // Creates communicator, and the communicator_halo
 | 
			
		||||
  ////////////////////////////////////////////////////
 | 
			
		||||
  _ndimension = processors.size();
 | 
			
		||||
  _processor_coor.resize(_ndimension);
 | 
			
		||||
 | 
			
		||||
  /////////////////////////////////
 | 
			
		||||
  // Count the requested nodes
 | 
			
		||||
  /////////////////////////////////
 | 
			
		||||
  _Nprocessors=1;
 | 
			
		||||
  _processors = processors;
 | 
			
		||||
  for(int i=0;i<_ndimension;i++){
 | 
			
		||||
    _Nprocessors*=_processors[i];
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  std::vector<int> periodic(_ndimension,1);
 | 
			
		||||
  MPI_Cart_create(communicator_base, _ndimension,&_processors[0],&periodic[0],0,&communicator);
 | 
			
		||||
  MPI_Comm_rank(communicator,&_processor);
 | 
			
		||||
  MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]);
 | 
			
		||||
 | 
			
		||||
  if ( 0 && (communicator_base != communicator_world) ) {
 | 
			
		||||
    std::cout << "InitFromMPICommunicator Cartesian communicator created with a non-world communicator"<<std::endl;
 | 
			
		||||
    std::cout << " new communicator rank "<<_processor<< " coor ["<<_ndimension<<"] ";
 | 
			
		||||
    for(int d=0;d<_processors.size();d++){
 | 
			
		||||
      std::cout << _processor_coor[d]<<" ";
 | 
			
		||||
    }
 | 
			
		||||
    std::cout << std::endl;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  int Size;
 | 
			
		||||
  MPI_Comm_size(communicator,&Size);
 | 
			
		||||
 | 
			
		||||
  communicator_halo.resize (2*_ndimension);
 | 
			
		||||
  for(int i=0;i<_ndimension*2;i++){
 | 
			
		||||
    MPI_Comm_dup(communicator,&communicator_halo[i]);
 | 
			
		||||
  }
 | 
			
		||||
  assert(Size==_Nprocessors);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
CartesianCommunicator::~CartesianCommunicator()
 | 
			
		||||
{
 | 
			
		||||
  int MPI_is_finalised;
 | 
			
		||||
  MPI_Finalized(&MPI_is_finalised);
 | 
			
		||||
  if (communicator && !MPI_is_finalised) {
 | 
			
		||||
    MPI_Comm_free(&communicator);
 | 
			
		||||
    for(int i=0;i<communicator_halo.size();i++){
 | 
			
		||||
      MPI_Comm_free(&communicator_halo[i]);
 | 
			
		||||
    }
 | 
			
		||||
  }  
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::GlobalSum(uint32_t &u){
 | 
			
		||||
  int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::GlobalSum(uint64_t &u){
 | 
			
		||||
  int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT64_T,MPI_SUM,communicator);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::GlobalXOR(uint32_t &u){
 | 
			
		||||
  int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_BXOR,communicator);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::GlobalXOR(uint64_t &u){
 | 
			
		||||
  int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT64_T,MPI_BXOR,communicator);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::GlobalSum(float &f){
 | 
			
		||||
  int ierr=MPI_Allreduce(MPI_IN_PLACE,&f,1,MPI_FLOAT,MPI_SUM,communicator);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::GlobalSumVector(float *f,int N)
 | 
			
		||||
{
 | 
			
		||||
  int ierr=MPI_Allreduce(MPI_IN_PLACE,f,N,MPI_FLOAT,MPI_SUM,communicator);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::GlobalSum(double &d)
 | 
			
		||||
{
 | 
			
		||||
  int ierr = MPI_Allreduce(MPI_IN_PLACE,&d,1,MPI_DOUBLE,MPI_SUM,communicator);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::GlobalSumVector(double *d,int N)
 | 
			
		||||
{
 | 
			
		||||
  int ierr = MPI_Allreduce(MPI_IN_PLACE,d,N,MPI_DOUBLE,MPI_SUM,communicator);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
}
 | 
			
		||||
// Basic Halo comms primitive
 | 
			
		||||
void CartesianCommunicator::SendToRecvFrom(void *xmit,
 | 
			
		||||
					   int dest,
 | 
			
		||||
					   void *recv,
 | 
			
		||||
					   int from,
 | 
			
		||||
					   int bytes)
 | 
			
		||||
{
 | 
			
		||||
  std::vector<CommsRequest_t> reqs(0);
 | 
			
		||||
  //    unsigned long  xcrc = crc32(0L, Z_NULL, 0);
 | 
			
		||||
  //    unsigned long  rcrc = crc32(0L, Z_NULL, 0);
 | 
			
		||||
  //    xcrc = crc32(xcrc,(unsigned char *)xmit,bytes);
 | 
			
		||||
  SendToRecvFromBegin(reqs,xmit,dest,recv,from,bytes);
 | 
			
		||||
  SendToRecvFromComplete(reqs);
 | 
			
		||||
  //    rcrc = crc32(rcrc,(unsigned char *)recv,bytes);
 | 
			
		||||
  //    printf("proc %d SendToRecvFrom %d bytes %lx %lx\n",_processor,bytes,xcrc,rcrc);
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::SendRecvPacket(void *xmit,
 | 
			
		||||
					   void *recv,
 | 
			
		||||
					   int sender,
 | 
			
		||||
					   int receiver,
 | 
			
		||||
					   int bytes)
 | 
			
		||||
{
 | 
			
		||||
  MPI_Status stat;
 | 
			
		||||
  assert(sender != receiver);
 | 
			
		||||
  int tag = sender;
 | 
			
		||||
  if ( _processor == sender ) {
 | 
			
		||||
    MPI_Send(xmit, bytes, MPI_CHAR,receiver,tag,communicator);
 | 
			
		||||
  }
 | 
			
		||||
  if ( _processor == receiver ) { 
 | 
			
		||||
    MPI_Recv(recv, bytes, MPI_CHAR,sender,tag,communicator,&stat);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
// Basic Halo comms primitive
 | 
			
		||||
void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &list,
 | 
			
		||||
						void *xmit,
 | 
			
		||||
						int dest,
 | 
			
		||||
						void *recv,
 | 
			
		||||
						int from,
 | 
			
		||||
						int bytes)
 | 
			
		||||
{
 | 
			
		||||
  int myrank = _processor;
 | 
			
		||||
  int ierr;
 | 
			
		||||
 | 
			
		||||
  if ( CommunicatorPolicy == CommunicatorPolicyConcurrent ) { 
 | 
			
		||||
    MPI_Request xrq;
 | 
			
		||||
    MPI_Request rrq;
 | 
			
		||||
 | 
			
		||||
    ierr =MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq);
 | 
			
		||||
    ierr|=MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq);
 | 
			
		||||
    
 | 
			
		||||
    assert(ierr==0);
 | 
			
		||||
    list.push_back(xrq);
 | 
			
		||||
    list.push_back(rrq);
 | 
			
		||||
  } else { 
 | 
			
		||||
    // Give the CPU to MPI immediately; can use threads to overlap optionally
 | 
			
		||||
    ierr=MPI_Sendrecv(xmit,bytes,MPI_CHAR,dest,myrank,
 | 
			
		||||
		      recv,bytes,MPI_CHAR,from, from,
 | 
			
		||||
		      communicator,MPI_STATUS_IGNORE);
 | 
			
		||||
    assert(ierr==0);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
double CartesianCommunicator::StencilSendToRecvFrom( void *xmit,
 | 
			
		||||
						     int dest,
 | 
			
		||||
						     void *recv,
 | 
			
		||||
						     int from,
 | 
			
		||||
						     int bytes,int dir)
 | 
			
		||||
{
 | 
			
		||||
  std::vector<CommsRequest_t> list;
 | 
			
		||||
  double offbytes = StencilSendToRecvFromBegin(list,xmit,dest,recv,from,bytes,dir);
 | 
			
		||||
  StencilSendToRecvFromComplete(list,dir);
 | 
			
		||||
  return offbytes;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
 | 
			
		||||
							 void *xmit,
 | 
			
		||||
							 int dest,
 | 
			
		||||
							 void *recv,
 | 
			
		||||
							 int from,
 | 
			
		||||
							 int bytes,int dir)
 | 
			
		||||
{
 | 
			
		||||
  int ncomm  =communicator_halo.size(); 
 | 
			
		||||
  int commdir=dir%ncomm;
 | 
			
		||||
 | 
			
		||||
  MPI_Request xrq;
 | 
			
		||||
  MPI_Request rrq;
 | 
			
		||||
 | 
			
		||||
  int ierr;
 | 
			
		||||
  int gdest = ShmRanks[dest];
 | 
			
		||||
  int gfrom = ShmRanks[from];
 | 
			
		||||
  int gme   = ShmRanks[_processor];
 | 
			
		||||
 | 
			
		||||
  assert(dest != _processor);
 | 
			
		||||
  assert(from != _processor);
 | 
			
		||||
  assert(gme  == ShmRank);
 | 
			
		||||
  double off_node_bytes=0.0;
 | 
			
		||||
 | 
			
		||||
  if ( gfrom ==MPI_UNDEFINED) {
 | 
			
		||||
    ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator_halo[commdir],&rrq);
 | 
			
		||||
    assert(ierr==0);
 | 
			
		||||
    list.push_back(rrq);
 | 
			
		||||
    off_node_bytes+=bytes;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  if ( gdest == MPI_UNDEFINED ) {
 | 
			
		||||
    ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator_halo[commdir],&xrq);
 | 
			
		||||
    assert(ierr==0);
 | 
			
		||||
    list.push_back(xrq);
 | 
			
		||||
    off_node_bytes+=bytes;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  if ( CommunicatorPolicy == CommunicatorPolicySequential ) { 
 | 
			
		||||
    this->StencilSendToRecvFromComplete(list,dir);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  return off_node_bytes;
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &waitall,int dir)
 | 
			
		||||
{
 | 
			
		||||
  SendToRecvFromComplete(waitall);
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::StencilBarrier(void)
 | 
			
		||||
{
 | 
			
		||||
  MPI_Barrier  (ShmComm);
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
 | 
			
		||||
{
 | 
			
		||||
  int nreq=list.size();
 | 
			
		||||
 | 
			
		||||
  if (nreq==0) return;
 | 
			
		||||
 | 
			
		||||
  std::vector<MPI_Status> status(nreq);
 | 
			
		||||
  int ierr = MPI_Waitall(nreq,&list[0],&status[0]);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
  list.resize(0);
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::Barrier(void)
 | 
			
		||||
{
 | 
			
		||||
  int ierr = MPI_Barrier(communicator);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::Broadcast(int root,void* data, int bytes)
 | 
			
		||||
{
 | 
			
		||||
  int ierr=MPI_Bcast(data,
 | 
			
		||||
		     bytes,
 | 
			
		||||
		     MPI_BYTE,
 | 
			
		||||
		     root,
 | 
			
		||||
		     communicator);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
}
 | 
			
		||||
int CartesianCommunicator::RankWorld(void){ 
 | 
			
		||||
  int r; 
 | 
			
		||||
  MPI_Comm_rank(communicator_world,&r);
 | 
			
		||||
  return r;
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes)
 | 
			
		||||
{
 | 
			
		||||
  int ierr= MPI_Bcast(data,
 | 
			
		||||
		      bytes,
 | 
			
		||||
		      MPI_BYTE,
 | 
			
		||||
		      root,
 | 
			
		||||
		      communicator_world);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void CartesianCommunicator::AllToAll(int dim,void  *in,void *out,uint64_t words,uint64_t bytes)
 | 
			
		||||
{
 | 
			
		||||
  std::vector<int> row(_ndimension,1);
 | 
			
		||||
  assert(dim>=0 && dim<_ndimension);
 | 
			
		||||
 | 
			
		||||
  //  Split the communicator
 | 
			
		||||
  row[dim] = _processors[dim];
 | 
			
		||||
 | 
			
		||||
  int me;
 | 
			
		||||
  CartesianCommunicator Comm(row,*this,me);
 | 
			
		||||
  Comm.AllToAll(in,out,words,bytes);
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::AllToAll(void  *in,void *out,uint64_t words,uint64_t bytes)
 | 
			
		||||
{
 | 
			
		||||
  // MPI is a pain and uses "int" arguments
 | 
			
		||||
  // 64*64*64*128*16 == 500Million elements of data.
 | 
			
		||||
  // When 24*4 bytes multiples get 50x 10^9 >>> 2x10^9 Y2K bug.
 | 
			
		||||
  // (Turns up on 32^3 x 64 Gparity too)
 | 
			
		||||
  MPI_Datatype object;
 | 
			
		||||
  int iwords; 
 | 
			
		||||
  int ibytes;
 | 
			
		||||
  iwords = words;
 | 
			
		||||
  ibytes = bytes;
 | 
			
		||||
  assert(words == iwords); // safe to cast to int ?
 | 
			
		||||
  assert(bytes == ibytes); // safe to cast to int ?
 | 
			
		||||
  MPI_Type_contiguous(ibytes,MPI_BYTE,&object);
 | 
			
		||||
  MPI_Type_commit(&object);
 | 
			
		||||
  MPI_Alltoall(in,iwords,object,out,iwords,object,communicator);
 | 
			
		||||
  MPI_Type_free(&object);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -32,10 +32,21 @@ namespace Grid {
 | 
			
		||||
///////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Info that is setup once and indept of cartesian layout
 | 
			
		||||
///////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
Grid_MPI_Comm       CartesianCommunicator::communicator_world;
 | 
			
		||||
 | 
			
		||||
void CartesianCommunicator::Init(int *argc, char *** arv)
 | 
			
		||||
{
 | 
			
		||||
  ShmInitGeneric();
 | 
			
		||||
  GlobalSharedMemory::Init(communicator_world);
 | 
			
		||||
  GlobalSharedMemory::SharedMemoryAllocate(
 | 
			
		||||
		   GlobalSharedMemory::MAX_MPI_SHM_BYTES,
 | 
			
		||||
		   GlobalSharedMemory::Hugepages);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,const CartesianCommunicator &parent,int &srank) 
 | 
			
		||||
  : CartesianCommunicator(processors) 
 | 
			
		||||
{
 | 
			
		||||
  srank=0;
 | 
			
		||||
  SetCommunicator(communicator_world);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
 | 
			
		||||
@@ -51,8 +62,11 @@ CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
 | 
			
		||||
    assert(_processors[d]==1);
 | 
			
		||||
    _processor_coor[d] = 0;
 | 
			
		||||
  }
 | 
			
		||||
  SetCommunicator(communicator_world);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
CartesianCommunicator::~CartesianCommunicator(){}
 | 
			
		||||
 | 
			
		||||
void CartesianCommunicator::GlobalSum(float &){}
 | 
			
		||||
void CartesianCommunicator::GlobalSumVector(float *,int N){}
 | 
			
		||||
void CartesianCommunicator::GlobalSum(double &){}
 | 
			
		||||
@@ -95,6 +109,14 @@ void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &
 | 
			
		||||
{
 | 
			
		||||
  assert(0);
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::AllToAll(int dim,void  *in,void *out,uint64_t words,uint64_t bytes)
 | 
			
		||||
{
 | 
			
		||||
  bcopy(in,out,bytes*words);
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::AllToAll(void  *in,void *out,uint64_t words,uint64_t bytes)
 | 
			
		||||
{
 | 
			
		||||
  bcopy(in,out,bytes*words);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int  CartesianCommunicator::RankWorld(void){return 0;}
 | 
			
		||||
void CartesianCommunicator::Barrier(void){}
 | 
			
		||||
@@ -108,6 +130,36 @@ void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest
 | 
			
		||||
  dest=0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
double CartesianCommunicator::StencilSendToRecvFrom( void *xmit,
 | 
			
		||||
						     int xmit_to_rank,
 | 
			
		||||
						     void *recv,
 | 
			
		||||
						     int recv_from_rank,
 | 
			
		||||
						     int bytes, int dir)
 | 
			
		||||
{
 | 
			
		||||
  std::vector<CommsRequest_t> list;
 | 
			
		||||
  // Discard the "dir"
 | 
			
		||||
  SendToRecvFromBegin   (list,xmit,xmit_to_rank,recv,recv_from_rank,bytes);
 | 
			
		||||
  SendToRecvFromComplete(list);
 | 
			
		||||
  return 2.0*bytes;
 | 
			
		||||
}
 | 
			
		||||
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
 | 
			
		||||
							 void *xmit,
 | 
			
		||||
							 int xmit_to_rank,
 | 
			
		||||
							 void *recv,
 | 
			
		||||
							 int recv_from_rank,
 | 
			
		||||
							 int bytes, int dir)
 | 
			
		||||
{
 | 
			
		||||
  // Discard the "dir"
 | 
			
		||||
  SendToRecvFromBegin(list,xmit,xmit_to_rank,recv,recv_from_rank,bytes);
 | 
			
		||||
  return 2.0*bytes;
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &waitall,int dir)
 | 
			
		||||
{
 | 
			
		||||
  SendToRecvFromComplete(waitall);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void CartesianCommunicator::StencilBarrier(void){};
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										92
									
								
								Grid/communicator/SharedMemory.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										92
									
								
								Grid/communicator/SharedMemory.cc
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,92 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
    Source file: ./lib/communicator/SharedMemory.cc
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
 | 
			
		||||
#include <Grid/GridCore.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid { 
 | 
			
		||||
 | 
			
		||||
// static data
 | 
			
		||||
 | 
			
		||||
uint64_t            GlobalSharedMemory::MAX_MPI_SHM_BYTES   = 1024LL*1024LL*1024LL; 
 | 
			
		||||
int                 GlobalSharedMemory::Hugepages = 0;
 | 
			
		||||
int                 GlobalSharedMemory::_ShmSetup;
 | 
			
		||||
int                 GlobalSharedMemory::_ShmAlloc;
 | 
			
		||||
uint64_t            GlobalSharedMemory::_ShmAllocBytes;
 | 
			
		||||
 | 
			
		||||
std::vector<void *> GlobalSharedMemory::WorldShmCommBufs;
 | 
			
		||||
 | 
			
		||||
Grid_MPI_Comm       GlobalSharedMemory::WorldShmComm;
 | 
			
		||||
int                 GlobalSharedMemory::WorldShmRank;
 | 
			
		||||
int                 GlobalSharedMemory::WorldShmSize;
 | 
			
		||||
std::vector<int>    GlobalSharedMemory::WorldShmRanks;
 | 
			
		||||
 | 
			
		||||
Grid_MPI_Comm       GlobalSharedMemory::WorldComm;
 | 
			
		||||
int                 GlobalSharedMemory::WorldSize;
 | 
			
		||||
int                 GlobalSharedMemory::WorldRank;
 | 
			
		||||
 | 
			
		||||
int                 GlobalSharedMemory::WorldNodes;
 | 
			
		||||
int                 GlobalSharedMemory::WorldNode;
 | 
			
		||||
 | 
			
		||||
void GlobalSharedMemory::SharedMemoryFree(void)
 | 
			
		||||
{
 | 
			
		||||
  assert(_ShmAlloc);
 | 
			
		||||
  assert(_ShmAllocBytes>0);
 | 
			
		||||
  for(int r=0;r<WorldShmSize;r++){
 | 
			
		||||
    munmap(WorldShmCommBufs[r],_ShmAllocBytes);
 | 
			
		||||
  }
 | 
			
		||||
  _ShmAlloc = 0;
 | 
			
		||||
  _ShmAllocBytes = 0;
 | 
			
		||||
}
 | 
			
		||||
/////////////////////////////////
 | 
			
		||||
// Alloc, free shmem region
 | 
			
		||||
/////////////////////////////////
 | 
			
		||||
void *SharedMemory::ShmBufferMalloc(size_t bytes){
 | 
			
		||||
  //  bytes = (bytes+sizeof(vRealD))&(~(sizeof(vRealD)-1));// align up bytes
 | 
			
		||||
  void *ptr = (void *)heap_top;
 | 
			
		||||
  heap_top  += bytes;
 | 
			
		||||
  heap_bytes+= bytes;
 | 
			
		||||
  if (heap_bytes >= heap_size) {
 | 
			
		||||
    std::cout<< " ShmBufferMalloc exceeded shared heap size -- try increasing with --shm <MB> flag" <<std::endl;
 | 
			
		||||
    std::cout<< " Parameter specified in units of MB (megabytes) " <<std::endl;
 | 
			
		||||
    std::cout<< " Current value is " << (heap_size/(1024*1024)) <<std::endl;
 | 
			
		||||
    assert(heap_bytes<heap_size);
 | 
			
		||||
  }
 | 
			
		||||
  return ptr;
 | 
			
		||||
}
 | 
			
		||||
void SharedMemory::ShmBufferFreeAll(void) { 
 | 
			
		||||
  heap_top  =(size_t)ShmBufferSelf();
 | 
			
		||||
  heap_bytes=0;
 | 
			
		||||
}
 | 
			
		||||
void *SharedMemory::ShmBufferSelf(void)
 | 
			
		||||
{
 | 
			
		||||
  return ShmCommBufs[ShmRank];
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										165
									
								
								Grid/communicator/SharedMemory.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										165
									
								
								Grid/communicator/SharedMemory.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,165 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
    Source file: ./lib/communicator/SharedMemory.cc
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
// TODO
 | 
			
		||||
// 1) move includes into SharedMemory.cc
 | 
			
		||||
//
 | 
			
		||||
// 2) split shared memory into a) optimal communicator creation from comm world
 | 
			
		||||
// 
 | 
			
		||||
//                             b) shared memory buffers container
 | 
			
		||||
//                                -- static globally shared; init once
 | 
			
		||||
//                                -- per instance set of buffers.
 | 
			
		||||
//                                   
 | 
			
		||||
 | 
			
		||||
#pragma once 
 | 
			
		||||
 | 
			
		||||
#include <Grid/GridCore.h>
 | 
			
		||||
 | 
			
		||||
#if defined (GRID_COMMS_MPI3) 
 | 
			
		||||
#include <mpi.h>
 | 
			
		||||
#endif 
 | 
			
		||||
#include <semaphore.h>
 | 
			
		||||
#include <fcntl.h>
 | 
			
		||||
#include <unistd.h>
 | 
			
		||||
#include <limits.h>
 | 
			
		||||
#include <sys/types.h>
 | 
			
		||||
#include <sys/ipc.h>
 | 
			
		||||
#include <sys/shm.h>
 | 
			
		||||
#include <sys/mman.h>
 | 
			
		||||
#include <zlib.h>
 | 
			
		||||
#ifdef HAVE_NUMAIF_H
 | 
			
		||||
#include <numaif.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
#if defined (GRID_COMMS_MPI3) 
 | 
			
		||||
  typedef MPI_Comm    Grid_MPI_Comm;
 | 
			
		||||
  typedef MPI_Request CommsRequest_t;
 | 
			
		||||
#else 
 | 
			
		||||
  typedef int CommsRequest_t;
 | 
			
		||||
  typedef int Grid_MPI_Comm;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
class GlobalSharedMemory {
 | 
			
		||||
 private:
 | 
			
		||||
  static const int     MAXLOG2RANKSPERNODE = 16;            
 | 
			
		||||
 | 
			
		||||
  // Init once lock on the buffer allocation
 | 
			
		||||
  static int      _ShmSetup;
 | 
			
		||||
  static int      _ShmAlloc;
 | 
			
		||||
  static uint64_t _ShmAllocBytes;
 | 
			
		||||
 | 
			
		||||
 public:
 | 
			
		||||
  static int      ShmSetup(void)      { return _ShmSetup; }
 | 
			
		||||
  static int      ShmAlloc(void)      { return _ShmAlloc; }
 | 
			
		||||
  static uint64_t ShmAllocBytes(void) { return _ShmAllocBytes; }
 | 
			
		||||
  static uint64_t      MAX_MPI_SHM_BYTES;
 | 
			
		||||
  static int           Hugepages;
 | 
			
		||||
 | 
			
		||||
  static std::vector<void *> WorldShmCommBufs;
 | 
			
		||||
 | 
			
		||||
  static Grid_MPI_Comm WorldComm;
 | 
			
		||||
  static int           WorldRank;
 | 
			
		||||
  static int           WorldSize;
 | 
			
		||||
 | 
			
		||||
  static Grid_MPI_Comm WorldShmComm;
 | 
			
		||||
  static int           WorldShmRank;
 | 
			
		||||
  static int           WorldShmSize;
 | 
			
		||||
 | 
			
		||||
  static int           WorldNodes;
 | 
			
		||||
  static int           WorldNode;
 | 
			
		||||
 | 
			
		||||
  static std::vector<int>  WorldShmRanks;
 | 
			
		||||
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Create an optimal reordered communicator that makes MPI_Cart_create get it right
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  static void Init(Grid_MPI_Comm comm); // Typically MPI_COMM_WORLD
 | 
			
		||||
  static void OptimalCommunicator(const std::vector<int> &processors,Grid_MPI_Comm & optimal_comm);  // Turns MPI_COMM_WORLD into right layout for Cartesian
 | 
			
		||||
  ///////////////////////////////////////////////////
 | 
			
		||||
  // Provide shared memory facilities off comm world
 | 
			
		||||
  ///////////////////////////////////////////////////
 | 
			
		||||
  static void SharedMemoryAllocate(uint64_t bytes, int flags);
 | 
			
		||||
  static void SharedMemoryFree(void);
 | 
			
		||||
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
//////////////////////////////
 | 
			
		||||
// one per communicator
 | 
			
		||||
//////////////////////////////
 | 
			
		||||
class SharedMemory 
 | 
			
		||||
{
 | 
			
		||||
 private:
 | 
			
		||||
  static const int     MAXLOG2RANKSPERNODE = 16;            
 | 
			
		||||
 | 
			
		||||
  size_t heap_top;
 | 
			
		||||
  size_t heap_bytes;
 | 
			
		||||
  size_t heap_size;
 | 
			
		||||
 | 
			
		||||
 protected:
 | 
			
		||||
 | 
			
		||||
  Grid_MPI_Comm    ShmComm; // for barriers
 | 
			
		||||
  int    ShmRank; 
 | 
			
		||||
  int    ShmSize;
 | 
			
		||||
  std::vector<void *> ShmCommBufs;
 | 
			
		||||
  std::vector<int>    ShmRanks;// Mapping comm ranks to Shm ranks
 | 
			
		||||
 | 
			
		||||
 public:
 | 
			
		||||
  SharedMemory() {};
 | 
			
		||||
  ~SharedMemory();
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // set the buffers & sizes
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  void SetCommunicator(Grid_MPI_Comm comm);
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // For this instance ; disjoint buffer sets between splits if split grid
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  void ShmBarrier(void); 
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////////////////////////////
 | 
			
		||||
  // Call on any instance
 | 
			
		||||
  ///////////////////////////////////////////////////
 | 
			
		||||
  void SharedMemoryTest(void);
 | 
			
		||||
  void *ShmBufferSelf(void);
 | 
			
		||||
  void *ShmBuffer    (int rank);
 | 
			
		||||
  void *ShmBufferTranslate(int rank,void * local_p);
 | 
			
		||||
  void *ShmBufferMalloc(size_t bytes);
 | 
			
		||||
  void  ShmBufferFreeAll(void) ;
 | 
			
		||||
  
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Make info on Nodes & ranks and Shared memory available
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  int NodeCount(void) { return GlobalSharedMemory::WorldNodes;};
 | 
			
		||||
  int RankCount(void) { return GlobalSharedMemory::WorldSize;};
 | 
			
		||||
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										651
									
								
								Grid/communicator/SharedMemoryMPI.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										651
									
								
								Grid/communicator/SharedMemoryMPI.cc
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,651 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
    Source file: ./lib/communicator/SharedMemory.cc
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
 | 
			
		||||
#include <Grid/GridCore.h>
 | 
			
		||||
#include <pwd.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid { 
 | 
			
		||||
 | 
			
		||||
/*Construct from an MPI communicator*/
 | 
			
		||||
void GlobalSharedMemory::Init(Grid_MPI_Comm comm)
 | 
			
		||||
{
 | 
			
		||||
  assert(_ShmSetup==0);
 | 
			
		||||
  WorldComm = comm;
 | 
			
		||||
  MPI_Comm_rank(WorldComm,&WorldRank);
 | 
			
		||||
  MPI_Comm_size(WorldComm,&WorldSize);
 | 
			
		||||
  // WorldComm, WorldSize, WorldRank
 | 
			
		||||
 | 
			
		||||
  /////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Split into groups that can share memory
 | 
			
		||||
  /////////////////////////////////////////////////////////////////////
 | 
			
		||||
  MPI_Comm_split_type(comm, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL,&WorldShmComm);
 | 
			
		||||
  MPI_Comm_rank(WorldShmComm     ,&WorldShmRank);
 | 
			
		||||
  MPI_Comm_size(WorldShmComm     ,&WorldShmSize);
 | 
			
		||||
  // WorldShmComm, WorldShmSize, WorldShmRank
 | 
			
		||||
 | 
			
		||||
  // WorldNodes
 | 
			
		||||
  WorldNodes = WorldSize/WorldShmSize;
 | 
			
		||||
  assert( (WorldNodes * WorldShmSize) == WorldSize );
 | 
			
		||||
 | 
			
		||||
  // FIXME: Check all WorldShmSize are the same ?
 | 
			
		||||
 | 
			
		||||
  /////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // find world ranks in our SHM group (i.e. which ranks are on our node)
 | 
			
		||||
  /////////////////////////////////////////////////////////////////////
 | 
			
		||||
  MPI_Group WorldGroup, ShmGroup;
 | 
			
		||||
  MPI_Comm_group (WorldComm, &WorldGroup); 
 | 
			
		||||
  MPI_Comm_group (WorldShmComm, &ShmGroup);
 | 
			
		||||
 | 
			
		||||
  std::vector<int> world_ranks(WorldSize);   for(int r=0;r<WorldSize;r++) world_ranks[r]=r;
 | 
			
		||||
 | 
			
		||||
  WorldShmRanks.resize(WorldSize); 
 | 
			
		||||
  MPI_Group_translate_ranks (WorldGroup,WorldSize,&world_ranks[0],ShmGroup, &WorldShmRanks[0]); 
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Identify who is in my group and nominate the leader
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////
 | 
			
		||||
  int g=0;
 | 
			
		||||
  std::vector<int> MyGroup;
 | 
			
		||||
  MyGroup.resize(WorldShmSize);
 | 
			
		||||
  for(int rank=0;rank<WorldSize;rank++){
 | 
			
		||||
    if(WorldShmRanks[rank]!=MPI_UNDEFINED){
 | 
			
		||||
      assert(g<WorldShmSize);
 | 
			
		||||
      MyGroup[g++] = rank;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  std::sort(MyGroup.begin(),MyGroup.end(),std::less<int>());
 | 
			
		||||
  int myleader = MyGroup[0];
 | 
			
		||||
  
 | 
			
		||||
  std::vector<int> leaders_1hot(WorldSize,0);
 | 
			
		||||
  std::vector<int> leaders_group(WorldNodes,0);
 | 
			
		||||
  leaders_1hot [ myleader ] = 1;
 | 
			
		||||
    
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////
 | 
			
		||||
  // global sum leaders over comm world
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////
 | 
			
		||||
  int ierr=MPI_Allreduce(MPI_IN_PLACE,&leaders_1hot[0],WorldSize,MPI_INT,MPI_SUM,WorldComm);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////
 | 
			
		||||
  // find the group leaders world rank
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////
 | 
			
		||||
  int group=0;
 | 
			
		||||
  for(int l=0;l<WorldSize;l++){
 | 
			
		||||
    if(leaders_1hot[l]){
 | 
			
		||||
      leaders_group[group++] = l;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Identify the node of the group in which I (and my leader) live
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////
 | 
			
		||||
  WorldNode=-1;
 | 
			
		||||
  for(int g=0;g<WorldNodes;g++){
 | 
			
		||||
    if (myleader == leaders_group[g]){
 | 
			
		||||
      WorldNode=g;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  assert(WorldNode!=-1);
 | 
			
		||||
  _ShmSetup=1;
 | 
			
		||||
}
 | 
			
		||||
// Gray encode support 
 | 
			
		||||
int BinaryToGray (int  binary) {
 | 
			
		||||
  int gray = (binary>>1)^binary;
 | 
			
		||||
  return gray;
 | 
			
		||||
}
 | 
			
		||||
int Log2Size(int TwoToPower,int MAXLOG2)
 | 
			
		||||
{
 | 
			
		||||
  int log2size = -1;
 | 
			
		||||
  for(int i=0;i<=MAXLOG2;i++){
 | 
			
		||||
    if ( (0x1<<i) == TwoToPower ) {
 | 
			
		||||
      log2size = i;
 | 
			
		||||
      break;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  return log2size;
 | 
			
		||||
}
 | 
			
		||||
void GlobalSharedMemory::OptimalCommunicator(const std::vector<int> &processors,Grid_MPI_Comm & optimal_comm)
 | 
			
		||||
{
 | 
			
		||||
#ifdef HYPERCUBE
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Assert power of two shm_size.
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
  int log2size = Log2Size(WorldShmSize,MAXLOG2RANKSPERNODE);
 | 
			
		||||
  assert(log2size != -1);
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Identify the hypercube coordinate of this node using hostname
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
  // n runs 0...7 9...16 18...25 27...34     (8*4)  5 bits
 | 
			
		||||
  // i runs 0..7                                    3 bits
 | 
			
		||||
  // r runs 0..3                                    2 bits
 | 
			
		||||
  // 2^10 = 1024 nodes
 | 
			
		||||
  const int maxhdim = 10; 
 | 
			
		||||
  std::vector<int> HyperCubeCoords(maxhdim,0);
 | 
			
		||||
  std::vector<int> RootHyperCubeCoords(maxhdim,0);
 | 
			
		||||
  int R;
 | 
			
		||||
  int I;
 | 
			
		||||
  int N;
 | 
			
		||||
  const int namelen = _POSIX_HOST_NAME_MAX;
 | 
			
		||||
  char name[namelen];
 | 
			
		||||
 | 
			
		||||
  // Parse ICE-XA hostname to get hypercube location
 | 
			
		||||
  gethostname(name,namelen);
 | 
			
		||||
  int nscan = sscanf(name,"r%di%dn%d",&R,&I,&N) ;
 | 
			
		||||
  assert(nscan==3);
 | 
			
		||||
 | 
			
		||||
  int nlo = N%9;
 | 
			
		||||
  int nhi = N/9;
 | 
			
		||||
  uint32_t hypercoor = (R<<8)|(I<<5)|(nhi<<3)|nlo ;
 | 
			
		||||
  uint32_t rootcoor  = hypercoor;
 | 
			
		||||
 | 
			
		||||
  //////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Print debug info
 | 
			
		||||
  //////////////////////////////////////////////////////////////////
 | 
			
		||||
  for(int d=0;d<maxhdim;d++){
 | 
			
		||||
    HyperCubeCoords[d] = (hypercoor>>d)&0x1;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  std::string hname(name);
 | 
			
		||||
  std::cout << "hostname "<<hname<<std::endl;
 | 
			
		||||
  std::cout << "R " << R << " I " << I << " N "<< N
 | 
			
		||||
            << " hypercoor 0x"<<std::hex<<hypercoor<<std::dec<<std::endl;
 | 
			
		||||
 | 
			
		||||
  //////////////////////////////////////////////////////////////////
 | 
			
		||||
  // broadcast node 0's base coordinate for this partition.
 | 
			
		||||
  //////////////////////////////////////////////////////////////////
 | 
			
		||||
  MPI_Bcast(&rootcoor, sizeof(rootcoor), MPI_BYTE, 0, WorldComm); 
 | 
			
		||||
  hypercoor=hypercoor-rootcoor;
 | 
			
		||||
  assert(hypercoor<WorldSize);
 | 
			
		||||
  assert(hypercoor>=0);
 | 
			
		||||
 | 
			
		||||
  //////////////////////////////////////
 | 
			
		||||
  // Printing
 | 
			
		||||
  //////////////////////////////////////
 | 
			
		||||
  for(int d=0;d<maxhdim;d++){
 | 
			
		||||
    HyperCubeCoords[d] = (hypercoor>>d)&0x1;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Identify subblock of ranks on node spreading across dims
 | 
			
		||||
  // in a maximally symmetrical way
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
  int ndimension              = processors.size();
 | 
			
		||||
  std::vector<int> processor_coor(ndimension);
 | 
			
		||||
  std::vector<int> WorldDims = processors;   std::vector<int> ShmDims  (ndimension,1);  std::vector<int> NodeDims (ndimension);
 | 
			
		||||
  std::vector<int> ShmCoor  (ndimension);    std::vector<int> NodeCoor (ndimension);    std::vector<int> WorldCoor(ndimension);
 | 
			
		||||
  std::vector<int> HyperCoor(ndimension);
 | 
			
		||||
  int dim = 0;
 | 
			
		||||
  for(int l2=0;l2<log2size;l2++){
 | 
			
		||||
    while ( (WorldDims[dim] / ShmDims[dim]) <= 1 ) dim=(dim+1)%ndimension;
 | 
			
		||||
    ShmDims[dim]*=2;
 | 
			
		||||
    dim=(dim+1)%ndimension;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Establish torus of processes and nodes with sub-blockings
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
  for(int d=0;d<ndimension;d++){
 | 
			
		||||
    NodeDims[d] = WorldDims[d]/ShmDims[d];
 | 
			
		||||
  }
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Map Hcube according to physical lattice 
 | 
			
		||||
  // must partition. Loop over dims and find out who would join.
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
  int hcoor = hypercoor;
 | 
			
		||||
  for(int d=0;d<ndimension;d++){
 | 
			
		||||
     int bits = Log2Size(NodeDims[d],MAXLOG2RANKSPERNODE);
 | 
			
		||||
     int msk  = (0x1<<bits)-1;
 | 
			
		||||
     HyperCoor[d]=hcoor & msk;  
 | 
			
		||||
     HyperCoor[d]=BinaryToGray(HyperCoor[d]); // Space filling curve magic
 | 
			
		||||
     hcoor = hcoor >> bits;
 | 
			
		||||
  } 
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Check processor counts match
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
  int Nprocessors=1;
 | 
			
		||||
  for(int i=0;i<ndimension;i++){
 | 
			
		||||
    Nprocessors*=processors[i];
 | 
			
		||||
  }
 | 
			
		||||
  assert(WorldSize==Nprocessors);
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Establish mapping between lexico physics coord and WorldRank
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
  int rank;
 | 
			
		||||
 | 
			
		||||
  Lexicographic::CoorFromIndexReversed(NodeCoor,WorldNode   ,NodeDims);
 | 
			
		||||
 | 
			
		||||
  for(int d=0;d<ndimension;d++) NodeCoor[d]=HyperCoor[d];
 | 
			
		||||
 | 
			
		||||
  Lexicographic::CoorFromIndexReversed(ShmCoor ,WorldShmRank,ShmDims);
 | 
			
		||||
  for(int d=0;d<ndimension;d++) WorldCoor[d] = NodeCoor[d]*ShmDims[d]+ShmCoor[d];
 | 
			
		||||
  Lexicographic::IndexFromCoorReversed(WorldCoor,rank,WorldDims);
 | 
			
		||||
 | 
			
		||||
  /////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Build the new communicator
 | 
			
		||||
  /////////////////////////////////////////////////////////////////
 | 
			
		||||
  int ierr= MPI_Comm_split(WorldComm,0,rank,&optimal_comm);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
#else 
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Assert power of two shm_size.
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
  int log2size = Log2Size(WorldShmSize,MAXLOG2RANKSPERNODE);
 | 
			
		||||
  assert(log2size != -1);
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Identify subblock of ranks on node spreading across dims
 | 
			
		||||
  // in a maximally symmetrical way
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
  int ndimension              = processors.size();
 | 
			
		||||
  std::vector<int> processor_coor(ndimension);
 | 
			
		||||
  std::vector<int> WorldDims = processors;   std::vector<int> ShmDims  (ndimension,1);  std::vector<int> NodeDims (ndimension);
 | 
			
		||||
  std::vector<int> ShmCoor  (ndimension);    std::vector<int> NodeCoor (ndimension);    std::vector<int> WorldCoor(ndimension);
 | 
			
		||||
  int dim = 0;
 | 
			
		||||
  for(int l2=0;l2<log2size;l2++){
 | 
			
		||||
    while ( (WorldDims[dim] / ShmDims[dim]) <= 1 ) dim=(dim+1)%ndimension;
 | 
			
		||||
    ShmDims[dim]*=2;
 | 
			
		||||
    dim=(dim+1)%ndimension;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Establish torus of processes and nodes with sub-blockings
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
  for(int d=0;d<ndimension;d++){
 | 
			
		||||
    NodeDims[d] = WorldDims[d]/ShmDims[d];
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Check processor counts match
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
  int Nprocessors=1;
 | 
			
		||||
  for(int i=0;i<ndimension;i++){
 | 
			
		||||
    Nprocessors*=processors[i];
 | 
			
		||||
  }
 | 
			
		||||
  assert(WorldSize==Nprocessors);
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Establish mapping between lexico physics coord and WorldRank
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
  int rank;
 | 
			
		||||
 | 
			
		||||
  Lexicographic::CoorFromIndexReversed(NodeCoor,WorldNode   ,NodeDims);
 | 
			
		||||
  Lexicographic::CoorFromIndexReversed(ShmCoor ,WorldShmRank,ShmDims);
 | 
			
		||||
  for(int d=0;d<ndimension;d++) WorldCoor[d] = NodeCoor[d]*ShmDims[d]+ShmCoor[d];
 | 
			
		||||
  Lexicographic::IndexFromCoorReversed(WorldCoor,rank,WorldDims);
 | 
			
		||||
 | 
			
		||||
  /////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Build the new communicator
 | 
			
		||||
  /////////////////////////////////////////////////////////////////
 | 
			
		||||
  int ierr= MPI_Comm_split(WorldComm,0,rank,&optimal_comm);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// SHMGET
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
#ifdef GRID_MPI3_SHMGET
 | 
			
		||||
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
 | 
			
		||||
{
 | 
			
		||||
  std::cout << "SharedMemoryAllocate "<< bytes<< " shmget implementation "<<std::endl;
 | 
			
		||||
  assert(_ShmSetup==1);
 | 
			
		||||
  assert(_ShmAlloc==0);
 | 
			
		||||
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // allocate the shared windows for our group
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  MPI_Barrier(WorldShmComm);
 | 
			
		||||
  WorldShmCommBufs.resize(WorldShmSize);
 | 
			
		||||
  std::vector<int> shmids(WorldShmSize);
 | 
			
		||||
 | 
			
		||||
  if ( WorldShmRank == 0 ) {
 | 
			
		||||
    for(int r=0;r<WorldShmSize;r++){
 | 
			
		||||
      size_t size = bytes;
 | 
			
		||||
      key_t key   = IPC_PRIVATE;
 | 
			
		||||
      int flags = IPC_CREAT | SHM_R | SHM_W;
 | 
			
		||||
#ifdef SHM_HUGETLB
 | 
			
		||||
      if (Hugepages) flags|=SHM_HUGETLB;
 | 
			
		||||
#endif
 | 
			
		||||
      if ((shmids[r]= shmget(key,size, flags)) ==-1) {
 | 
			
		||||
        int errsv = errno;
 | 
			
		||||
        printf("Errno %d\n",errsv);
 | 
			
		||||
        printf("key   %d\n",key);
 | 
			
		||||
        printf("size  %lld\n",size);
 | 
			
		||||
        printf("flags %d\n",flags);
 | 
			
		||||
        perror("shmget");
 | 
			
		||||
        exit(1);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  MPI_Barrier(WorldShmComm);
 | 
			
		||||
  MPI_Bcast(&shmids[0],WorldShmSize*sizeof(int),MPI_BYTE,0,WorldShmComm);
 | 
			
		||||
  MPI_Barrier(WorldShmComm);
 | 
			
		||||
 | 
			
		||||
  for(int r=0;r<WorldShmSize;r++){
 | 
			
		||||
    WorldShmCommBufs[r] = (uint64_t *)shmat(shmids[r], NULL,0);
 | 
			
		||||
    if (WorldShmCommBufs[r] == (uint64_t *)-1) {
 | 
			
		||||
      perror("Shared memory attach failure");
 | 
			
		||||
      shmctl(shmids[r], IPC_RMID, NULL);
 | 
			
		||||
      exit(2);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  MPI_Barrier(WorldShmComm);
 | 
			
		||||
  ///////////////////////////////////
 | 
			
		||||
  // Mark for clean up
 | 
			
		||||
  ///////////////////////////////////
 | 
			
		||||
  for(int r=0;r<WorldShmSize;r++){
 | 
			
		||||
    shmctl(shmids[r], IPC_RMID,(struct shmid_ds *)NULL);
 | 
			
		||||
  }
 | 
			
		||||
  MPI_Barrier(WorldShmComm);
 | 
			
		||||
 | 
			
		||||
  _ShmAlloc=1;
 | 
			
		||||
  _ShmAllocBytes  = bytes;
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Hugetlbfs mapping intended
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
#ifdef GRID_MPI3_SHMMMAP
 | 
			
		||||
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
 | 
			
		||||
{
 | 
			
		||||
  std::cout << "SharedMemoryAllocate "<< bytes<< " MMAP implementation "<< GRID_SHM_PATH <<std::endl;
 | 
			
		||||
  assert(_ShmSetup==1);
 | 
			
		||||
  assert(_ShmAlloc==0);
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // allocate the shared windows for our group
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  MPI_Barrier(WorldShmComm);
 | 
			
		||||
  WorldShmCommBufs.resize(WorldShmSize);
 | 
			
		||||
  
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Hugetlbfs and others map filesystems as mappable huge pages
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  char shm_name [NAME_MAX];
 | 
			
		||||
  for(int r=0;r<WorldShmSize;r++){
 | 
			
		||||
    
 | 
			
		||||
    sprintf(shm_name,GRID_SHM_PATH "/Grid_mpi3_shm_%d_%d",WorldNode,r);
 | 
			
		||||
    int fd=open(shm_name,O_RDWR|O_CREAT,0666);
 | 
			
		||||
    if ( fd == -1) { 
 | 
			
		||||
      printf("open %s failed\n",shm_name);
 | 
			
		||||
      perror("open hugetlbfs");
 | 
			
		||||
      exit(0);
 | 
			
		||||
    }
 | 
			
		||||
    int mmap_flag = MAP_SHARED ;
 | 
			
		||||
#ifdef MAP_POPULATE    
 | 
			
		||||
    mmap_flag|=MAP_POPULATE;
 | 
			
		||||
#endif
 | 
			
		||||
#ifdef MAP_HUGETLB
 | 
			
		||||
    if ( flags ) mmap_flag |= MAP_HUGETLB;
 | 
			
		||||
#endif
 | 
			
		||||
    void *ptr = (void *) mmap(NULL, bytes, PROT_READ | PROT_WRITE, mmap_flag,fd, 0); 
 | 
			
		||||
    if ( ptr == (void *)MAP_FAILED ) {    
 | 
			
		||||
      printf("mmap %s failed\n",shm_name);
 | 
			
		||||
      perror("failed mmap");      assert(0);    
 | 
			
		||||
    }
 | 
			
		||||
    assert(((uint64_t)ptr&0x3F)==0);
 | 
			
		||||
    close(fd);
 | 
			
		||||
    WorldShmCommBufs[r] =ptr;
 | 
			
		||||
    //    std::cout << "Set WorldShmCommBufs["<<r<<"]="<<ptr<< "("<< bytes<< "bytes)"<<std::endl;
 | 
			
		||||
  }
 | 
			
		||||
  _ShmAlloc=1;
 | 
			
		||||
  _ShmAllocBytes  = bytes;
 | 
			
		||||
};
 | 
			
		||||
#endif // MMAP
 | 
			
		||||
 | 
			
		||||
#ifdef GRID_MPI3_SHM_NONE
 | 
			
		||||
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
 | 
			
		||||
{
 | 
			
		||||
  std::cout << "SharedMemoryAllocate "<< bytes<< " MMAP anonymous implementation "<<std::endl;
 | 
			
		||||
  assert(_ShmSetup==1);
 | 
			
		||||
  assert(_ShmAlloc==0);
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // allocate the shared windows for our group
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  MPI_Barrier(WorldShmComm);
 | 
			
		||||
  WorldShmCommBufs.resize(WorldShmSize);
 | 
			
		||||
  
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Hugetlbf and others map filesystems as mappable huge pages
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  char shm_name [NAME_MAX];
 | 
			
		||||
  assert(WorldShmSize == 1);
 | 
			
		||||
  for(int r=0;r<WorldShmSize;r++){
 | 
			
		||||
    
 | 
			
		||||
    int fd=-1;
 | 
			
		||||
    int mmap_flag = MAP_SHARED |MAP_ANONYMOUS ;
 | 
			
		||||
#ifdef MAP_POPULATE    
 | 
			
		||||
    mmap_flag|=MAP_POPULATE;
 | 
			
		||||
#endif
 | 
			
		||||
#ifdef MAP_HUGETLB
 | 
			
		||||
    if ( flags ) mmap_flag |= MAP_HUGETLB;
 | 
			
		||||
#endif
 | 
			
		||||
    void *ptr = (void *) mmap(NULL, bytes, PROT_READ | PROT_WRITE, mmap_flag,fd, 0); 
 | 
			
		||||
    if ( ptr == (void *)MAP_FAILED ) {    
 | 
			
		||||
      printf("mmap %s failed\n",shm_name);
 | 
			
		||||
      perror("failed mmap");      assert(0);    
 | 
			
		||||
    }
 | 
			
		||||
    assert(((uint64_t)ptr&0x3F)==0);
 | 
			
		||||
    close(fd);
 | 
			
		||||
    WorldShmCommBufs[r] =ptr;
 | 
			
		||||
    //    std::cout << "Set WorldShmCommBufs["<<r<<"]="<<ptr<< "("<< bytes<< "bytes)"<<std::endl;
 | 
			
		||||
  }
 | 
			
		||||
  _ShmAlloc=1;
 | 
			
		||||
  _ShmAllocBytes  = bytes;
 | 
			
		||||
};
 | 
			
		||||
#endif // MMAP
 | 
			
		||||
 | 
			
		||||
#ifdef GRID_MPI3_SHMOPEN
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// POSIX SHMOPEN ; as far as I know Linux does not allow EXPLICIT HugePages with this case
 | 
			
		||||
// tmpfs (Larry Meadows says) does not support explicit huge page, and this is used for 
 | 
			
		||||
// the posix shm virtual file system
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
 | 
			
		||||
{ 
 | 
			
		||||
  std::cout << "SharedMemoryAllocate "<< bytes<< " SHMOPEN implementation "<<std::endl;
 | 
			
		||||
  assert(_ShmSetup==1);
 | 
			
		||||
  assert(_ShmAlloc==0); 
 | 
			
		||||
  MPI_Barrier(WorldShmComm);
 | 
			
		||||
  WorldShmCommBufs.resize(WorldShmSize);
 | 
			
		||||
 | 
			
		||||
  char shm_name [NAME_MAX];
 | 
			
		||||
  if ( WorldShmRank == 0 ) {
 | 
			
		||||
    for(int r=0;r<WorldShmSize;r++){
 | 
			
		||||
	
 | 
			
		||||
      size_t size = bytes;
 | 
			
		||||
      
 | 
			
		||||
      struct passwd *pw = getpwuid (getuid());
 | 
			
		||||
      sprintf(shm_name,"/Grid_%s_mpi3_shm_%d_%d",pw->pw_name,WorldNode,r);
 | 
			
		||||
      
 | 
			
		||||
      shm_unlink(shm_name);
 | 
			
		||||
      int fd=shm_open(shm_name,O_RDWR|O_CREAT,0666);
 | 
			
		||||
      if ( fd < 0 ) {	perror("failed shm_open");	assert(0);      }
 | 
			
		||||
      ftruncate(fd, size);
 | 
			
		||||
	
 | 
			
		||||
      int mmap_flag = MAP_SHARED;
 | 
			
		||||
#ifdef MAP_POPULATE 
 | 
			
		||||
      mmap_flag |= MAP_POPULATE;
 | 
			
		||||
#endif
 | 
			
		||||
#ifdef MAP_HUGETLB
 | 
			
		||||
      if (flags) mmap_flag |= MAP_HUGETLB;
 | 
			
		||||
#endif
 | 
			
		||||
      void * ptr =  mmap(NULL,size, PROT_READ | PROT_WRITE, mmap_flag, fd, 0);
 | 
			
		||||
      
 | 
			
		||||
      //      std::cout << "Set WorldShmCommBufs["<<r<<"]="<<ptr<< "("<< size<< "bytes)"<<std::endl;
 | 
			
		||||
      if ( ptr == (void * )MAP_FAILED ) {       
 | 
			
		||||
	perror("failed mmap");     
 | 
			
		||||
	assert(0);    
 | 
			
		||||
      }
 | 
			
		||||
      assert(((uint64_t)ptr&0x3F)==0);
 | 
			
		||||
      
 | 
			
		||||
      WorldShmCommBufs[r] =ptr;
 | 
			
		||||
      close(fd);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  MPI_Barrier(WorldShmComm);
 | 
			
		||||
  
 | 
			
		||||
  if ( WorldShmRank != 0 ) { 
 | 
			
		||||
    for(int r=0;r<WorldShmSize;r++){
 | 
			
		||||
 | 
			
		||||
      size_t size = bytes ;
 | 
			
		||||
      
 | 
			
		||||
      struct passwd *pw = getpwuid (getuid());
 | 
			
		||||
      sprintf(shm_name,"/Grid_%s_mpi3_shm_%d_%d",pw->pw_name,WorldNode,r);
 | 
			
		||||
      
 | 
			
		||||
      int fd=shm_open(shm_name,O_RDWR,0666);
 | 
			
		||||
      if ( fd<0 ) {	perror("failed shm_open");	assert(0);      }
 | 
			
		||||
      
 | 
			
		||||
      void * ptr =  mmap(NULL,size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
 | 
			
		||||
      if ( ptr == MAP_FAILED ) {       perror("failed mmap");      assert(0);    }
 | 
			
		||||
      assert(((uint64_t)ptr&0x3F)==0);
 | 
			
		||||
      WorldShmCommBufs[r] =ptr;
 | 
			
		||||
 | 
			
		||||
      close(fd);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  _ShmAlloc=1;
 | 
			
		||||
  _ShmAllocBytes = bytes;
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////////////
 | 
			
		||||
  // Global shared functionality finished
 | 
			
		||||
  // Now move to per communicator functionality
 | 
			
		||||
  ////////////////////////////////////////////////////////
 | 
			
		||||
void SharedMemory::SetCommunicator(Grid_MPI_Comm comm)
 | 
			
		||||
{
 | 
			
		||||
  int rank, size;
 | 
			
		||||
  MPI_Comm_rank(comm,&rank);
 | 
			
		||||
  MPI_Comm_size(comm,&size);
 | 
			
		||||
  ShmRanks.resize(size);
 | 
			
		||||
 | 
			
		||||
  /////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Split into groups that can share memory
 | 
			
		||||
  /////////////////////////////////////////////////////////////////////
 | 
			
		||||
  MPI_Comm_split_type(comm, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL,&ShmComm);
 | 
			
		||||
  MPI_Comm_rank(ShmComm     ,&ShmRank);
 | 
			
		||||
  MPI_Comm_size(ShmComm     ,&ShmSize);
 | 
			
		||||
  ShmCommBufs.resize(ShmSize);
 | 
			
		||||
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Map ShmRank to WorldShmRank and use the right buffer
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////
 | 
			
		||||
  assert (GlobalSharedMemory::ShmAlloc()==1);
 | 
			
		||||
  heap_size = GlobalSharedMemory::ShmAllocBytes();
 | 
			
		||||
  for(int r=0;r<ShmSize;r++){
 | 
			
		||||
 | 
			
		||||
    uint32_t wsr = (r==ShmRank) ? GlobalSharedMemory::WorldShmRank : 0 ;
 | 
			
		||||
 | 
			
		||||
    MPI_Allreduce(MPI_IN_PLACE,&wsr,1,MPI_UINT32_T,MPI_SUM,ShmComm);
 | 
			
		||||
 | 
			
		||||
    ShmCommBufs[r] = GlobalSharedMemory::WorldShmCommBufs[wsr];
 | 
			
		||||
    //    std::cout << "SetCommunicator ShmCommBufs ["<< r<< "] = "<< ShmCommBufs[r]<< "  wsr = "<<wsr<<std::endl;
 | 
			
		||||
  }
 | 
			
		||||
  ShmBufferFreeAll();
 | 
			
		||||
 | 
			
		||||
  /////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // find comm ranks in our SHM group (i.e. which ranks are on our node)
 | 
			
		||||
  /////////////////////////////////////////////////////////////////////
 | 
			
		||||
  MPI_Group FullGroup, ShmGroup;
 | 
			
		||||
  MPI_Comm_group (comm   , &FullGroup); 
 | 
			
		||||
  MPI_Comm_group (ShmComm, &ShmGroup);
 | 
			
		||||
 | 
			
		||||
  std::vector<int> ranks(size);   for(int r=0;r<size;r++) ranks[r]=r;
 | 
			
		||||
  MPI_Group_translate_ranks (FullGroup,size,&ranks[0],ShmGroup, &ShmRanks[0]); 
 | 
			
		||||
}
 | 
			
		||||
//////////////////////////////////////////////////////////////////
 | 
			
		||||
// On node barrier
 | 
			
		||||
//////////////////////////////////////////////////////////////////
 | 
			
		||||
void SharedMemory::ShmBarrier(void)
 | 
			
		||||
{
 | 
			
		||||
  MPI_Barrier  (ShmComm);
 | 
			
		||||
}
 | 
			
		||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Test the shared memory is working
 | 
			
		||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
void SharedMemory::SharedMemoryTest(void)
 | 
			
		||||
{
 | 
			
		||||
  ShmBarrier();
 | 
			
		||||
  if ( ShmRank == 0 ) {
 | 
			
		||||
    for(int r=0;r<ShmSize;r++){
 | 
			
		||||
      uint64_t * check = (uint64_t *) ShmCommBufs[r];
 | 
			
		||||
      check[0] = GlobalSharedMemory::WorldNode;
 | 
			
		||||
      check[1] = r;
 | 
			
		||||
      check[2] = 0x5A5A5A;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  ShmBarrier();
 | 
			
		||||
  for(int r=0;r<ShmSize;r++){
 | 
			
		||||
    uint64_t * check = (uint64_t *) ShmCommBufs[r];
 | 
			
		||||
    
 | 
			
		||||
    assert(check[0]==GlobalSharedMemory::WorldNode);
 | 
			
		||||
    assert(check[1]==r);
 | 
			
		||||
    assert(check[2]==0x5A5A5A);
 | 
			
		||||
    
 | 
			
		||||
  }
 | 
			
		||||
  ShmBarrier();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void *SharedMemory::ShmBuffer(int rank)
 | 
			
		||||
{
 | 
			
		||||
  int gpeer = ShmRanks[rank];
 | 
			
		||||
  if (gpeer == MPI_UNDEFINED){
 | 
			
		||||
    return NULL;
 | 
			
		||||
  } else { 
 | 
			
		||||
    return ShmCommBufs[gpeer];
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
void *SharedMemory::ShmBufferTranslate(int rank,void * local_p)
 | 
			
		||||
{
 | 
			
		||||
  static int count =0;
 | 
			
		||||
  int gpeer = ShmRanks[rank];
 | 
			
		||||
  assert(gpeer!=ShmRank); // never send to self
 | 
			
		||||
  if (gpeer == MPI_UNDEFINED){
 | 
			
		||||
    return NULL;
 | 
			
		||||
  } else { 
 | 
			
		||||
    uint64_t offset = (uint64_t)local_p - (uint64_t)ShmCommBufs[ShmRank];
 | 
			
		||||
    uint64_t remote = (uint64_t)ShmCommBufs[gpeer]+offset;
 | 
			
		||||
    return (void *) remote;
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
SharedMemory::~SharedMemory()
 | 
			
		||||
{
 | 
			
		||||
  int MPI_is_finalised;  MPI_Finalized(&MPI_is_finalised);
 | 
			
		||||
  if ( !MPI_is_finalised ) { 
 | 
			
		||||
    MPI_Comm_free(&ShmComm);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										128
									
								
								Grid/communicator/SharedMemoryNone.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										128
									
								
								Grid/communicator/SharedMemoryNone.cc
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,128 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
    Source file: ./lib/communicator/SharedMemory.cc
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
 | 
			
		||||
#include <Grid/GridCore.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid { 
 | 
			
		||||
 | 
			
		||||
/*Construct from an MPI communicator*/
 | 
			
		||||
void GlobalSharedMemory::Init(Grid_MPI_Comm comm)
 | 
			
		||||
{
 | 
			
		||||
  assert(_ShmSetup==0);
 | 
			
		||||
  WorldComm = 0;
 | 
			
		||||
  WorldRank = 0;
 | 
			
		||||
  WorldSize = 1;
 | 
			
		||||
  WorldShmComm = 0 ;
 | 
			
		||||
  WorldShmRank = 0 ;
 | 
			
		||||
  WorldShmSize = 1 ;
 | 
			
		||||
  WorldNodes   = 1 ;
 | 
			
		||||
  WorldNode    = 0 ;
 | 
			
		||||
  WorldShmRanks.resize(WorldSize); WorldShmRanks[0] = 0;
 | 
			
		||||
  WorldShmCommBufs.resize(1);
 | 
			
		||||
  _ShmSetup=1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void GlobalSharedMemory::OptimalCommunicator(const std::vector<int> &processors,Grid_MPI_Comm & optimal_comm)
 | 
			
		||||
{
 | 
			
		||||
  optimal_comm = WorldComm;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Hugetlbfs mapping intended, use anonymous mmap
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
 | 
			
		||||
{
 | 
			
		||||
  void * ShmCommBuf ; 
 | 
			
		||||
  assert(_ShmSetup==1);
 | 
			
		||||
  assert(_ShmAlloc==0);
 | 
			
		||||
  int mmap_flag =0;
 | 
			
		||||
#ifdef MAP_ANONYMOUS
 | 
			
		||||
  mmap_flag = mmap_flag| MAP_SHARED | MAP_ANONYMOUS;
 | 
			
		||||
#endif
 | 
			
		||||
#ifdef MAP_ANON
 | 
			
		||||
  mmap_flag = mmap_flag| MAP_SHARED | MAP_ANON;
 | 
			
		||||
#endif
 | 
			
		||||
#ifdef MAP_HUGETLB
 | 
			
		||||
  if ( flags ) mmap_flag |= MAP_HUGETLB;
 | 
			
		||||
#endif
 | 
			
		||||
  ShmCommBuf =(void *) mmap(NULL, bytes, PROT_READ | PROT_WRITE, mmap_flag, -1, 0); 
 | 
			
		||||
  if (ShmCommBuf == (void *)MAP_FAILED) {
 | 
			
		||||
    perror("mmap failed ");
 | 
			
		||||
    exit(EXIT_FAILURE);  
 | 
			
		||||
  }
 | 
			
		||||
#ifdef MADV_HUGEPAGE
 | 
			
		||||
  if (!Hugepages ) madvise(ShmCommBuf,bytes,MADV_HUGEPAGE);
 | 
			
		||||
#endif
 | 
			
		||||
  bzero(ShmCommBuf,bytes);
 | 
			
		||||
  WorldShmCommBufs[0] = ShmCommBuf;
 | 
			
		||||
  _ShmAllocBytes=bytes;
 | 
			
		||||
  _ShmAlloc=1;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////////////
 | 
			
		||||
  // Global shared functionality finished
 | 
			
		||||
  // Now move to per communicator functionality
 | 
			
		||||
  ////////////////////////////////////////////////////////
 | 
			
		||||
void SharedMemory::SetCommunicator(Grid_MPI_Comm comm)
 | 
			
		||||
{
 | 
			
		||||
  assert(GlobalSharedMemory::ShmAlloc()==1);
 | 
			
		||||
  ShmRanks.resize(1);
 | 
			
		||||
  ShmCommBufs.resize(1);
 | 
			
		||||
  ShmRanks[0] = 0;
 | 
			
		||||
  ShmRank     = 0;
 | 
			
		||||
  ShmSize     = 1;
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Map ShmRank to WorldShmRank and use the right buffer
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////
 | 
			
		||||
  ShmCommBufs[0] = GlobalSharedMemory::WorldShmCommBufs[0];
 | 
			
		||||
  heap_size      = GlobalSharedMemory::ShmAllocBytes();
 | 
			
		||||
  ShmBufferFreeAll();
 | 
			
		||||
  return;
 | 
			
		||||
}
 | 
			
		||||
//////////////////////////////////////////////////////////////////
 | 
			
		||||
// On node barrier
 | 
			
		||||
//////////////////////////////////////////////////////////////////
 | 
			
		||||
void SharedMemory::ShmBarrier(void){ return ; }
 | 
			
		||||
 | 
			
		||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Test the shared memory is working
 | 
			
		||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
void SharedMemory::SharedMemoryTest(void) { return; }
 | 
			
		||||
 | 
			
		||||
void *SharedMemory::ShmBuffer(int rank)
 | 
			
		||||
{
 | 
			
		||||
  return NULL;
 | 
			
		||||
}
 | 
			
		||||
void *SharedMemory::ShmBufferTranslate(int rank,void * local_p)
 | 
			
		||||
{
 | 
			
		||||
  return NULL;
 | 
			
		||||
}
 | 
			
		||||
SharedMemory::~SharedMemory()
 | 
			
		||||
{};
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
@@ -45,31 +45,33 @@ Gather_plane_simple (const Lattice<vobj> &rhs,commVector<vobj> &buffer,int dimen
 | 
			
		||||
  int so=plane*rhs._grid->_ostride[dimension]; // base offset for start of plane 
 | 
			
		||||
  int e1=rhs._grid->_slice_nblock[dimension];
 | 
			
		||||
  int e2=rhs._grid->_slice_block[dimension];
 | 
			
		||||
  int ent = 0;
 | 
			
		||||
 | 
			
		||||
  static std::vector<std::pair<int,int> > table; table.resize(e1*e2);
 | 
			
		||||
 | 
			
		||||
  int stride=rhs._grid->_slice_stride[dimension];
 | 
			
		||||
  if ( cbmask == 0x3 ) { 
 | 
			
		||||
    parallel_for_nest2(int n=0;n<e1;n++){
 | 
			
		||||
    for(int n=0;n<e1;n++){
 | 
			
		||||
      for(int b=0;b<e2;b++){
 | 
			
		||||
	int o  = n*stride;
 | 
			
		||||
	int bo = n*e2;
 | 
			
		||||
	buffer[off+bo+b]=rhs._odata[so+o+b];
 | 
			
		||||
	table[ent++] = std::pair<int,int>(off+bo+b,so+o+b);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  } else { 
 | 
			
		||||
     int bo=0;
 | 
			
		||||
     std::vector<std::pair<int,int> > table;
 | 
			
		||||
     for(int n=0;n<e1;n++){
 | 
			
		||||
       for(int b=0;b<e2;b++){
 | 
			
		||||
	 int o  = n*stride;
 | 
			
		||||
	 int ocb=1<<rhs._grid->CheckerBoardFromOindex(o+b);
 | 
			
		||||
	 if ( ocb &cbmask ) {
 | 
			
		||||
	   table.push_back(std::pair<int,int> (bo++,o+b));
 | 
			
		||||
	   table[ent++]=std::pair<int,int> (off+bo++,so+o+b);
 | 
			
		||||
	 }
 | 
			
		||||
       }
 | 
			
		||||
     }
 | 
			
		||||
     parallel_for(int i=0;i<table.size();i++){
 | 
			
		||||
       buffer[off+table[i].first]=rhs._odata[so+table[i].second];
 | 
			
		||||
  }
 | 
			
		||||
  parallel_for(int i=0;i<ent;i++){
 | 
			
		||||
    buffer[table[i].first]=rhs._odata[table[i].second];
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -141,31 +143,35 @@ template<class vobj> void Scatter_plane_simple (Lattice<vobj> &rhs,commVector<vo
 | 
			
		||||
  int e2=rhs._grid->_slice_block[dimension];
 | 
			
		||||
  int stride=rhs._grid->_slice_stride[dimension];
 | 
			
		||||
 | 
			
		||||
  static std::vector<std::pair<int,int> > table; table.resize(e1*e2);
 | 
			
		||||
  int ent    =0;
 | 
			
		||||
 | 
			
		||||
  if ( cbmask ==0x3 ) {
 | 
			
		||||
    parallel_for_nest2(int n=0;n<e1;n++){
 | 
			
		||||
 | 
			
		||||
    for(int n=0;n<e1;n++){
 | 
			
		||||
      for(int b=0;b<e2;b++){
 | 
			
		||||
	int o   =n*rhs._grid->_slice_stride[dimension];
 | 
			
		||||
	int bo  =n*rhs._grid->_slice_block[dimension];
 | 
			
		||||
	rhs._odata[so+o+b]=buffer[bo+b];
 | 
			
		||||
	table[ent++] = std::pair<int,int>(so+o+b,bo+b);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
  } else { 
 | 
			
		||||
    std::vector<std::pair<int,int> > table;
 | 
			
		||||
    int bo=0;
 | 
			
		||||
    for(int n=0;n<e1;n++){
 | 
			
		||||
      for(int b=0;b<e2;b++){
 | 
			
		||||
	int o   =n*rhs._grid->_slice_stride[dimension];
 | 
			
		||||
	int ocb=1<<rhs._grid->CheckerBoardFromOindex(o+b);// Could easily be a table lookup
 | 
			
		||||
	if ( ocb & cbmask ) {
 | 
			
		||||
	  table.push_back(std::pair<int,int> (so+o+b,bo++));
 | 
			
		||||
	  table[ent++]=std::pair<int,int> (so+o+b,bo++);
 | 
			
		||||
	}
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
    parallel_for(int i=0;i<table.size();i++){
 | 
			
		||||
       //       std::cout << "Rcv"<< table[i].first << " " << table[i].second << " " <<buffer[table[i].second]<<std::endl;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  parallel_for(int i=0;i<ent;i++){
 | 
			
		||||
    rhs._odata[table[i].first]=buffer[table[i].second];
 | 
			
		||||
  }
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//////////////////////////////////////////////////////
 | 
			
		||||
@@ -228,29 +234,32 @@ template<class vobj> void Copy_plane(Lattice<vobj>& lhs,const Lattice<vobj> &rhs
 | 
			
		||||
  int e1=rhs._grid->_slice_nblock[dimension]; // clearly loop invariant for icpc
 | 
			
		||||
  int e2=rhs._grid->_slice_block[dimension];
 | 
			
		||||
  int stride = rhs._grid->_slice_stride[dimension];
 | 
			
		||||
  if(cbmask == 0x3 ){
 | 
			
		||||
    parallel_for_nest2(int n=0;n<e1;n++){
 | 
			
		||||
      for(int b=0;b<e2;b++){
 | 
			
		||||
  static std::vector<std::pair<int,int> > table; table.resize(e1*e2);
 | 
			
		||||
  int ent=0;
 | 
			
		||||
 | 
			
		||||
  if(cbmask == 0x3 ){
 | 
			
		||||
    for(int n=0;n<e1;n++){
 | 
			
		||||
      for(int b=0;b<e2;b++){
 | 
			
		||||
        int o =n*stride+b;
 | 
			
		||||
  	//lhs._odata[lo+o]=rhs._odata[ro+o];
 | 
			
		||||
	vstream(lhs._odata[lo+o],rhs._odata[ro+o]);
 | 
			
		||||
	table[ent++] = std::pair<int,int>(lo+o,ro+o);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  } else { 
 | 
			
		||||
    parallel_for_nest2(int n=0;n<e1;n++){
 | 
			
		||||
    for(int n=0;n<e1;n++){
 | 
			
		||||
      for(int b=0;b<e2;b++){
 | 
			
		||||
 
 | 
			
		||||
        int o =n*stride+b;
 | 
			
		||||
        int ocb=1<<lhs._grid->CheckerBoardFromOindex(o);
 | 
			
		||||
        if ( ocb&cbmask ) {
 | 
			
		||||
  	//lhs._odata[lo+o]=rhs._odata[ro+o];
 | 
			
		||||
	  vstream(lhs._odata[lo+o],rhs._odata[ro+o]);
 | 
			
		||||
	  table[ent++] = std::pair<int,int>(lo+o,ro+o);
 | 
			
		||||
	}
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  parallel_for(int i=0;i<ent;i++){
 | 
			
		||||
    lhs._odata[table[i].first]=rhs._odata[table[i].second];
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class vobj> void Copy_plane_permute(Lattice<vobj>& lhs,const Lattice<vobj> &rhs, int dimension,int lplane,int rplane,int cbmask,int permute_type)
 | 
			
		||||
@@ -269,16 +278,28 @@ template<class vobj> void Copy_plane_permute(Lattice<vobj>& lhs,const Lattice<vo
 | 
			
		||||
  int e2=rhs._grid->_slice_block [dimension];
 | 
			
		||||
  int stride = rhs._grid->_slice_stride[dimension];
 | 
			
		||||
 | 
			
		||||
  parallel_for_nest2(int n=0;n<e1;n++){
 | 
			
		||||
  for(int b=0;b<e2;b++){
 | 
			
		||||
  static std::vector<std::pair<int,int> > table;  table.resize(e1*e2);
 | 
			
		||||
  int ent=0;
 | 
			
		||||
 | 
			
		||||
  double t_tab,t_perm;
 | 
			
		||||
  if ( cbmask == 0x3 ) {
 | 
			
		||||
    for(int n=0;n<e1;n++){
 | 
			
		||||
    for(int b=0;b<e2;b++){
 | 
			
		||||
      int o  =n*stride;
 | 
			
		||||
      table[ent++] = std::pair<int,int>(lo+o+b,ro+o+b);
 | 
			
		||||
    }}
 | 
			
		||||
  } else {
 | 
			
		||||
    for(int n=0;n<e1;n++){
 | 
			
		||||
    for(int b=0;b<e2;b++){
 | 
			
		||||
      int o  =n*stride;
 | 
			
		||||
      int ocb=1<<lhs._grid->CheckerBoardFromOindex(o+b);
 | 
			
		||||
      if ( ocb&cbmask ) {
 | 
			
		||||
	permute(lhs._odata[lo+o+b],rhs._odata[ro+o+b],permute_type);
 | 
			
		||||
      if ( ocb&cbmask ) table[ent++] = std::pair<int,int>(lo+o+b,ro+o+b);
 | 
			
		||||
    }}
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  }}
 | 
			
		||||
  parallel_for(int i=0;i<ent;i++){
 | 
			
		||||
    permute(lhs._odata[table[i].first],rhs._odata[table[i].second],permute_type);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//////////////////////////////////////////////////////
 | 
			
		||||
@@ -291,6 +312,8 @@ template<class vobj> void Cshift_local(Lattice<vobj>& ret,const Lattice<vobj> &r
 | 
			
		||||
  sshift[0] = rhs._grid->CheckerBoardShiftForCB(rhs.checkerboard,dimension,shift,Even);
 | 
			
		||||
  sshift[1] = rhs._grid->CheckerBoardShiftForCB(rhs.checkerboard,dimension,shift,Odd);
 | 
			
		||||
 | 
			
		||||
  double t_local;
 | 
			
		||||
  
 | 
			
		||||
  if ( sshift[0] == sshift[1] ) {
 | 
			
		||||
    Cshift_local(ret,rhs,dimension,shift,0x3);
 | 
			
		||||
  } else {
 | 
			
		||||
@@ -299,7 +322,7 @@ template<class vobj> void Cshift_local(Lattice<vobj>& ret,const Lattice<vobj> &r
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class vobj> Lattice<vobj> Cshift_local(Lattice<vobj> &ret,const Lattice<vobj> &rhs,int dimension,int shift,int cbmask)
 | 
			
		||||
template<class vobj> void Cshift_local(Lattice<vobj> &ret,const Lattice<vobj> &rhs,int dimension,int shift,int cbmask)
 | 
			
		||||
{
 | 
			
		||||
  GridBase *grid = rhs._grid;
 | 
			
		||||
  int fd = grid->_fdimensions[dimension];
 | 
			
		||||
@@ -326,10 +349,6 @@ template<class vobj> Lattice<vobj> Cshift_local(Lattice<vobj> &ret,const Lattice
 | 
			
		||||
    int sshift = grid->CheckerBoardShiftForCB(rhs.checkerboard,dimension,shift,cb);
 | 
			
		||||
    int sx     = (x+sshift)%rd;
 | 
			
		||||
    
 | 
			
		||||
    // FIXME : This must change where we have a 
 | 
			
		||||
    // Rotate slice.
 | 
			
		||||
    
 | 
			
		||||
    // Document how this works ; why didn't I do this when I first wrote it...
 | 
			
		||||
    // wrap is whether sshift > rd.
 | 
			
		||||
    //  num is sshift mod rd.
 | 
			
		||||
    // 
 | 
			
		||||
@@ -366,9 +385,7 @@ template<class vobj> Lattice<vobj> Cshift_local(Lattice<vobj> &ret,const Lattice
 | 
			
		||||
    if ( permute_slice ) Copy_plane_permute(ret,rhs,dimension,x,sx,cbmask,permute_type_dist);
 | 
			
		||||
    else                 Copy_plane(ret,rhs,dimension,x,sx,cbmask); 
 | 
			
		||||
  
 | 
			
		||||
  
 | 
			
		||||
  }
 | 
			
		||||
  return ret;
 | 
			
		||||
}
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
@@ -54,13 +54,13 @@ template<class vobj> Lattice<vobj> Cshift(const Lattice<vobj> &rhs,int dimension
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  if ( !comm_dim ) {
 | 
			
		||||
    //    std::cout << "Cshift_local" <<std::endl;
 | 
			
		||||
    //std::cout << "CSHIFT: Cshift_local" <<std::endl;
 | 
			
		||||
    Cshift_local(ret,rhs,dimension,shift); // Handles checkerboarding
 | 
			
		||||
  } else if ( splice_dim ) {
 | 
			
		||||
    //    std::cout << "Cshift_comms_simd" <<std::endl;
 | 
			
		||||
    //std::cout << "CSHIFT: Cshift_comms_simd call - splice_dim = " << splice_dim << " shift " << shift << " dimension = " << dimension << std::endl;
 | 
			
		||||
    Cshift_comms_simd(ret,rhs,dimension,shift);
 | 
			
		||||
  } else {
 | 
			
		||||
    //    std::cout << "Cshift_comms" <<std::endl;
 | 
			
		||||
    //std::cout << "CSHIFT: Cshift_comms" <<std::endl;
 | 
			
		||||
    Cshift_comms(ret,rhs,dimension,shift);
 | 
			
		||||
  }
 | 
			
		||||
  return ret;
 | 
			
		||||
@@ -91,9 +91,12 @@ template<class vobj> void Cshift_comms_simd(Lattice<vobj>& ret,const Lattice<vob
 | 
			
		||||
  sshift[0] = rhs._grid->CheckerBoardShiftForCB(rhs.checkerboard,dimension,shift,Even);
 | 
			
		||||
  sshift[1] = rhs._grid->CheckerBoardShiftForCB(rhs.checkerboard,dimension,shift,Odd);
 | 
			
		||||
 | 
			
		||||
  //std::cout << "Cshift_comms_simd dim "<<dimension<<"cb "<<rhs.checkerboard<<"shift "<<shift<<" sshift " << sshift[0]<<" "<<sshift[1]<<std::endl;
 | 
			
		||||
  if ( sshift[0] == sshift[1] ) {
 | 
			
		||||
    //std::cout << "Single pass Cshift_comms" <<std::endl;
 | 
			
		||||
    Cshift_comms_simd(ret,rhs,dimension,shift,0x3);
 | 
			
		||||
  } else {
 | 
			
		||||
    //std::cout << "Two pass Cshift_comms" <<std::endl;
 | 
			
		||||
    Cshift_comms_simd(ret,rhs,dimension,shift,0x1);// if checkerboard is unfavourable take two passes
 | 
			
		||||
    Cshift_comms_simd(ret,rhs,dimension,shift,0x2);// both with block stride loop iteration
 | 
			
		||||
  }
 | 
			
		||||
@@ -175,6 +178,10 @@ template<class vobj> void  Cshift_comms_simd(Lattice<vobj> &ret,const Lattice<vo
 | 
			
		||||
  int simd_layout     = grid->_simd_layout[dimension];
 | 
			
		||||
  int comm_dim        = grid->_processors[dimension] >1 ;
 | 
			
		||||
 | 
			
		||||
  //std::cout << "Cshift_comms_simd dim "<< dimension << " fd "<<fd<<" rd "<<rd
 | 
			
		||||
  //    << " ld "<<ld<<" pd " << pd<<" simd_layout "<<simd_layout 
 | 
			
		||||
  //    << " comm_dim " << comm_dim << " cbmask " << cbmask <<std::endl;
 | 
			
		||||
 | 
			
		||||
  assert(comm_dim==1);
 | 
			
		||||
  assert(simd_layout==2);
 | 
			
		||||
  assert(shift>=0);
 | 
			
		||||
							
								
								
									
										18920
									
								
								Grid/json/json.hpp
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										18920
									
								
								Grid/json/json.hpp
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							@@ -244,19 +244,11 @@ namespace Grid {
 | 
			
		||||
 | 
			
		||||
  template<class sobj,class vobj> strong_inline
 | 
			
		||||
  RealD axpy_norm(Lattice<vobj> &ret,sobj a,const Lattice<vobj> &x,const Lattice<vobj> &y){
 | 
			
		||||
    ret.checkerboard = x.checkerboard;
 | 
			
		||||
    conformable(ret,x);
 | 
			
		||||
    conformable(x,y);
 | 
			
		||||
    axpy(ret,a,x,y);
 | 
			
		||||
    return norm2(ret);
 | 
			
		||||
    return axpy_norm_fast(ret,a,x,y);
 | 
			
		||||
  }
 | 
			
		||||
  template<class sobj,class vobj> strong_inline
 | 
			
		||||
  RealD axpby_norm(Lattice<vobj> &ret,sobj a,sobj b,const Lattice<vobj> &x,const Lattice<vobj> &y){
 | 
			
		||||
    ret.checkerboard = x.checkerboard;
 | 
			
		||||
    conformable(ret,x);
 | 
			
		||||
    conformable(x,y);
 | 
			
		||||
    axpby(ret,a,b,x,y);
 | 
			
		||||
    return norm2(ret); // FIXME implement parallel norm in ss loop
 | 
			
		||||
    return axpby_norm_fast(ret,a,b,x,y);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
@@ -257,7 +257,40 @@ public:
 | 
			
		||||
    }  	
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  Lattice(Lattice&& r){ // move constructor
 | 
			
		||||
    _grid = r._grid;
 | 
			
		||||
    checkerboard = r.checkerboard;
 | 
			
		||||
    _odata=std::move(r._odata);
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  inline Lattice<vobj> & operator = (Lattice<vobj> && r)
 | 
			
		||||
  {
 | 
			
		||||
    _grid        = r._grid;
 | 
			
		||||
    checkerboard = r.checkerboard;
 | 
			
		||||
    _odata       =std::move(r._odata);
 | 
			
		||||
    return *this;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  inline Lattice<vobj> & operator = (const Lattice<vobj> & r){
 | 
			
		||||
    _grid        = r._grid;
 | 
			
		||||
    checkerboard = r.checkerboard;
 | 
			
		||||
    _odata.resize(_grid->oSites());// essential
 | 
			
		||||
    
 | 
			
		||||
    parallel_for(int ss=0;ss<_grid->oSites();ss++){
 | 
			
		||||
      _odata[ss]=r._odata[ss];
 | 
			
		||||
    }  	
 | 
			
		||||
    return *this;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template<class robj> strong_inline Lattice<vobj> & operator = (const Lattice<robj> & r){
 | 
			
		||||
    this->checkerboard = r.checkerboard;
 | 
			
		||||
    conformable(*this,r);
 | 
			
		||||
    
 | 
			
		||||
    parallel_for(int ss=0;ss<_grid->oSites();ss++){
 | 
			
		||||
      this->_odata[ss]=r._odata[ss];
 | 
			
		||||
    }
 | 
			
		||||
    return *this;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  virtual ~Lattice(void) = default;
 | 
			
		||||
    
 | 
			
		||||
@@ -277,15 +310,6 @@ public:
 | 
			
		||||
    return *this;
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  template<class robj> strong_inline Lattice<vobj> & operator = (const Lattice<robj> & r){
 | 
			
		||||
    this->checkerboard = r.checkerboard;
 | 
			
		||||
    conformable(*this,r);
 | 
			
		||||
    
 | 
			
		||||
    parallel_for(int ss=0;ss<_grid->oSites();ss++){
 | 
			
		||||
      this->_odata[ss]=r._odata[ss];
 | 
			
		||||
    }
 | 
			
		||||
    return *this;
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  // *=,+=,-= operators inherit behvour from correspond */+/- operation
 | 
			
		||||
  template<class T> strong_inline Lattice<vobj> &operator *=(const T &r) {
 | 
			
		||||
@@ -179,7 +179,7 @@ namespace Grid {
 | 
			
		||||
      return ret;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
#define DECLARE_RELATIONAL(op,functor) \
 | 
			
		||||
#define DECLARE_RELATIONAL_EQ(op,functor) \
 | 
			
		||||
  template<class vsimd,IfSimd<vsimd> = 0>\
 | 
			
		||||
    inline vInteger operator op (const vsimd & lhs, const vsimd & rhs)\
 | 
			
		||||
    {\
 | 
			
		||||
@@ -198,11 +198,6 @@ namespace Grid {
 | 
			
		||||
      typedef typename vsimd::scalar_type scalar;\
 | 
			
		||||
      return Comparison(functor<scalar,scalar>(),lhs,rhs);\
 | 
			
		||||
    }\
 | 
			
		||||
  template<class vsimd>\
 | 
			
		||||
    inline vInteger operator op(const iScalar<vsimd> &lhs,const iScalar<vsimd> &rhs)\
 | 
			
		||||
    {									\
 | 
			
		||||
      return lhs._internal op rhs._internal;				\
 | 
			
		||||
    }									\
 | 
			
		||||
  template<class vsimd>\
 | 
			
		||||
    inline vInteger operator op(const iScalar<vsimd> &lhs,const typename vsimd::scalar_type &rhs) \
 | 
			
		||||
    {									\
 | 
			
		||||
@@ -212,14 +207,21 @@ namespace Grid {
 | 
			
		||||
    inline vInteger operator op(const typename vsimd::scalar_type &lhs,const iScalar<vsimd> &rhs) \
 | 
			
		||||
    {									\
 | 
			
		||||
      return lhs op rhs._internal;					\
 | 
			
		||||
    }									
 | 
			
		||||
    }									\
 | 
			
		||||
 | 
			
		||||
#define DECLARE_RELATIONAL(op,functor) \
 | 
			
		||||
  DECLARE_RELATIONAL_EQ(op,functor)    \
 | 
			
		||||
  template<class vsimd>\
 | 
			
		||||
    inline vInteger operator op(const iScalar<vsimd> &lhs,const iScalar<vsimd> &rhs)\
 | 
			
		||||
    {									\
 | 
			
		||||
      return lhs._internal op rhs._internal;				\
 | 
			
		||||
    }									
 | 
			
		||||
 | 
			
		||||
DECLARE_RELATIONAL(<,slt);
 | 
			
		||||
DECLARE_RELATIONAL(<=,sle);
 | 
			
		||||
DECLARE_RELATIONAL(>,sgt);
 | 
			
		||||
DECLARE_RELATIONAL(>=,sge);
 | 
			
		||||
DECLARE_RELATIONAL(==,seq);
 | 
			
		||||
DECLARE_RELATIONAL_EQ(==,seq);
 | 
			
		||||
DECLARE_RELATIONAL(!=,sne);
 | 
			
		||||
 | 
			
		||||
#undef DECLARE_RELATIONAL
 | 
			
		||||
@@ -52,23 +52,5 @@ namespace Grid {
 | 
			
		||||
      }
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    // LatticeCoordinate();
 | 
			
		||||
    // FIXME for debug; deprecate this; made obscelete by 
 | 
			
		||||
    template<class vobj> void lex_sites(Lattice<vobj> &l){
 | 
			
		||||
      Real *v_ptr = (Real *)&l._odata[0];
 | 
			
		||||
      size_t o_len = l._grid->oSites();
 | 
			
		||||
      size_t v_len = sizeof(vobj)/sizeof(vRealF);
 | 
			
		||||
      size_t vec_len = vRealF::Nsimd();
 | 
			
		||||
 | 
			
		||||
      for(int i=0;i<o_len;i++){
 | 
			
		||||
	for(int j=0;j<v_len;j++){
 | 
			
		||||
          for(int vv=0;vv<vec_len;vv+=2){
 | 
			
		||||
	    v_ptr[i*v_len*vec_len+j*vec_len+vv  ]= i+vv*500;
 | 
			
		||||
	    v_ptr[i*v_len*vec_len+j*vec_len+vv+1]= i+vv*500;
 | 
			
		||||
	  }
 | 
			
		||||
	}}
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
@@ -33,7 +33,7 @@ namespace Grid {
 | 
			
		||||
  // Deterministic Reduction operations
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
template<class vobj> inline RealD norm2(const Lattice<vobj> &arg){
 | 
			
		||||
  ComplexD nrm = innerProduct(arg,arg);
 | 
			
		||||
  auto nrm = innerProduct(arg,arg);
 | 
			
		||||
  return std::real(nrm); 
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -43,32 +43,85 @@ inline ComplexD innerProduct(const Lattice<vobj> &left,const Lattice<vobj> &righ
 | 
			
		||||
{
 | 
			
		||||
  typedef typename vobj::scalar_type scalar_type;
 | 
			
		||||
  typedef typename vobj::vector_typeD vector_type;
 | 
			
		||||
  scalar_type  nrm;
 | 
			
		||||
  
 | 
			
		||||
  GridBase *grid = left._grid;
 | 
			
		||||
  const int pad = 8;
 | 
			
		||||
 | 
			
		||||
  std::vector<vector_type,alignedAllocator<vector_type> > sumarray(grid->SumArraySize());
 | 
			
		||||
  ComplexD  inner;
 | 
			
		||||
  Vector<ComplexD> sumarray(grid->SumArraySize()*pad);
 | 
			
		||||
 | 
			
		||||
  parallel_for(int thr=0;thr<grid->SumArraySize();thr++){
 | 
			
		||||
    int nwork, mywork, myoff;
 | 
			
		||||
    GridThread::GetWork(left._grid->oSites(),thr,mywork,myoff);
 | 
			
		||||
    
 | 
			
		||||
    decltype(innerProductD(left._odata[0],right._odata[0])) vnrm=zero; // private to thread; sub summation
 | 
			
		||||
    decltype(innerProductD(left._odata[0],right._odata[0])) vinner=zero; // private to thread; sub summation
 | 
			
		||||
    for(int ss=myoff;ss<mywork+myoff; ss++){
 | 
			
		||||
      vnrm = vnrm + innerProductD(left._odata[ss],right._odata[ss]);
 | 
			
		||||
      vinner = vinner + innerProductD(left._odata[ss],right._odata[ss]);
 | 
			
		||||
    }
 | 
			
		||||
    sumarray[thr]=TensorRemove(vnrm) ;
 | 
			
		||||
    // All threads sum across SIMD; reduce serial work at end
 | 
			
		||||
    // one write per cacheline with streaming store
 | 
			
		||||
    ComplexD tmp = Reduce(TensorRemove(vinner)) ;
 | 
			
		||||
    vstream(sumarray[thr*pad],tmp);
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  vector_type vvnrm; vvnrm=zero;  // sum across threads
 | 
			
		||||
  inner=0.0;
 | 
			
		||||
  for(int i=0;i<grid->SumArraySize();i++){
 | 
			
		||||
    vvnrm = vvnrm+sumarray[i];
 | 
			
		||||
    inner = inner+sumarray[i*pad];
 | 
			
		||||
  } 
 | 
			
		||||
  nrm = Reduce(vvnrm);// sum across simd
 | 
			
		||||
  right._grid->GlobalSum(nrm);
 | 
			
		||||
  right._grid->GlobalSum(inner);
 | 
			
		||||
  return inner;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/////////////////////////
 | 
			
		||||
// Fast axpby_norm
 | 
			
		||||
// z = a x + b y
 | 
			
		||||
// return norm z
 | 
			
		||||
/////////////////////////
 | 
			
		||||
template<class sobj,class vobj> strong_inline RealD 
 | 
			
		||||
axpy_norm_fast(Lattice<vobj> &z,sobj a,const Lattice<vobj> &x,const Lattice<vobj> &y) 
 | 
			
		||||
{
 | 
			
		||||
  sobj one(1.0);
 | 
			
		||||
  return axpby_norm_fast(z,a,one,x,y);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class sobj,class vobj> strong_inline RealD 
 | 
			
		||||
axpby_norm_fast(Lattice<vobj> &z,sobj a,sobj b,const Lattice<vobj> &x,const Lattice<vobj> &y) 
 | 
			
		||||
{
 | 
			
		||||
  const int pad = 8;
 | 
			
		||||
  z.checkerboard = x.checkerboard;
 | 
			
		||||
  conformable(z,x);
 | 
			
		||||
  conformable(x,y);
 | 
			
		||||
 | 
			
		||||
  typedef typename vobj::scalar_type scalar_type;
 | 
			
		||||
  typedef typename vobj::vector_typeD vector_type;
 | 
			
		||||
  RealD  nrm;
 | 
			
		||||
  
 | 
			
		||||
  GridBase *grid = x._grid;
 | 
			
		||||
  
 | 
			
		||||
  Vector<RealD> sumarray(grid->SumArraySize()*pad);
 | 
			
		||||
  
 | 
			
		||||
  parallel_for(int thr=0;thr<grid->SumArraySize();thr++){
 | 
			
		||||
    int nwork, mywork, myoff;
 | 
			
		||||
    GridThread::GetWork(x._grid->oSites(),thr,mywork,myoff);
 | 
			
		||||
    
 | 
			
		||||
    // private to thread; sub summation
 | 
			
		||||
    decltype(innerProductD(z._odata[0],z._odata[0])) vnrm=zero; 
 | 
			
		||||
    for(int ss=myoff;ss<mywork+myoff; ss++){
 | 
			
		||||
      vobj tmp = a*x._odata[ss]+b*y._odata[ss];
 | 
			
		||||
      vnrm = vnrm + innerProductD(tmp,tmp);
 | 
			
		||||
      vstream(z._odata[ss],tmp);
 | 
			
		||||
    }
 | 
			
		||||
    vstream(sumarray[thr*pad],real(Reduce(TensorRemove(vnrm)))) ;
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  nrm = 0.0; // sum across threads; linear in thread count but fast
 | 
			
		||||
  for(int i=0;i<grid->SumArraySize();i++){
 | 
			
		||||
    nrm = nrm+sumarray[i*pad];
 | 
			
		||||
  } 
 | 
			
		||||
  z._grid->GlobalSum(nrm);
 | 
			
		||||
  return nrm; 
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
template<class Op,class T1>
 | 
			
		||||
inline auto sum(const LatticeUnaryExpression<Op,T1> & expr)
 | 
			
		||||
  ->typename decltype(expr.first.func(eval(0,std::get<0>(expr.second))))::scalar_object
 | 
			
		||||
@@ -221,6 +274,115 @@ template<class vobj> inline void sliceSum(const Lattice<vobj> &Data,std::vector<
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class vobj>
 | 
			
		||||
static void mySliceInnerProductVector( std::vector<ComplexD> & result, const Lattice<vobj> &lhs,const Lattice<vobj> &rhs,int orthogdim)
 | 
			
		||||
{
 | 
			
		||||
  // std::cout << GridLogMessage << "Start mySliceInnerProductVector" << std::endl;
 | 
			
		||||
 | 
			
		||||
  typedef typename vobj::scalar_type scalar_type;
 | 
			
		||||
  std::vector<scalar_type> lsSum;
 | 
			
		||||
  localSliceInnerProductVector(result, lhs, rhs, lsSum, orthogdim);
 | 
			
		||||
  globalSliceInnerProductVector(result, lhs, lsSum, orthogdim);
 | 
			
		||||
  // std::cout << GridLogMessage << "End mySliceInnerProductVector" << std::endl;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class vobj>
 | 
			
		||||
static void localSliceInnerProductVector(std::vector<ComplexD> &result, const Lattice<vobj> &lhs, const Lattice<vobj> &rhs, std::vector<typename vobj::scalar_type> &lsSum, int orthogdim)
 | 
			
		||||
{
 | 
			
		||||
  // std::cout << GridLogMessage << "Start prep" << std::endl;
 | 
			
		||||
  typedef typename vobj::vector_type   vector_type;
 | 
			
		||||
  typedef typename vobj::scalar_type   scalar_type;
 | 
			
		||||
  GridBase  *grid = lhs._grid;
 | 
			
		||||
  assert(grid!=NULL);
 | 
			
		||||
  conformable(grid,rhs._grid);
 | 
			
		||||
 | 
			
		||||
  const int    Nd = grid->_ndimension;
 | 
			
		||||
  const int Nsimd = grid->Nsimd();
 | 
			
		||||
 | 
			
		||||
  assert(orthogdim >= 0);
 | 
			
		||||
  assert(orthogdim < Nd);
 | 
			
		||||
 | 
			
		||||
  int fd=grid->_fdimensions[orthogdim];
 | 
			
		||||
  int ld=grid->_ldimensions[orthogdim];
 | 
			
		||||
  int rd=grid->_rdimensions[orthogdim];
 | 
			
		||||
  // std::cout << GridLogMessage << "Start alloc" << std::endl;
 | 
			
		||||
 | 
			
		||||
  std::vector<vector_type,alignedAllocator<vector_type> > lvSum(rd); // will locally sum vectors first
 | 
			
		||||
  lsSum.resize(ld,scalar_type(0.0));                    // sum across these down to scalars
 | 
			
		||||
  std::vector<iScalar<scalar_type>> extracted(Nsimd);   // splitting the SIMD  
 | 
			
		||||
  // std::cout << GridLogMessage << "End alloc" << std::endl;
 | 
			
		||||
 | 
			
		||||
  result.resize(fd); // And then global sum to return the same vector to every node for IO to file
 | 
			
		||||
  for(int r=0;r<rd;r++){
 | 
			
		||||
    lvSum[r]=zero;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  int e1=    grid->_slice_nblock[orthogdim];
 | 
			
		||||
  int e2=    grid->_slice_block [orthogdim];
 | 
			
		||||
  int stride=grid->_slice_stride[orthogdim];
 | 
			
		||||
  // std::cout << GridLogMessage << "End prep" << std::endl;
 | 
			
		||||
  // std::cout << GridLogMessage << "Start parallel inner product, _rd = " << rd << std::endl;
 | 
			
		||||
  vector_type vv;
 | 
			
		||||
  parallel_for(int r=0;r<rd;r++)
 | 
			
		||||
  {
 | 
			
		||||
 | 
			
		||||
    int so=r*grid->_ostride[orthogdim]; // base offset for start of plane 
 | 
			
		||||
 | 
			
		||||
    for(int n=0;n<e1;n++){
 | 
			
		||||
      for(int b=0;b<e2;b++){
 | 
			
		||||
        int ss = so + n * stride + b;
 | 
			
		||||
        vv = TensorRemove(innerProduct(lhs._odata[ss], rhs._odata[ss]));
 | 
			
		||||
        lvSum[r] = lvSum[r] + vv;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  // std::cout << GridLogMessage << "End parallel inner product" << std::endl;
 | 
			
		||||
 | 
			
		||||
  // Sum across simd lanes in the plane, breaking out orthog dir.
 | 
			
		||||
  std::vector<int> icoor(Nd);
 | 
			
		||||
  for(int rt=0;rt<rd;rt++){
 | 
			
		||||
 | 
			
		||||
    iScalar<vector_type> temp; 
 | 
			
		||||
    temp._internal = lvSum[rt];
 | 
			
		||||
    extract(temp,extracted);
 | 
			
		||||
 | 
			
		||||
    for(int idx=0;idx<Nsimd;idx++){
 | 
			
		||||
 | 
			
		||||
      grid->iCoorFromIindex(icoor,idx);
 | 
			
		||||
 | 
			
		||||
      int ldx =rt+icoor[orthogdim]*rd;
 | 
			
		||||
 | 
			
		||||
      lsSum[ldx]=lsSum[ldx]+extracted[idx]._internal;
 | 
			
		||||
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  // std::cout << GridLogMessage << "End sum over simd lanes" << std::endl;
 | 
			
		||||
}
 | 
			
		||||
template <class vobj>
 | 
			
		||||
static void globalSliceInnerProductVector(std::vector<ComplexD> &result, const Lattice<vobj> &lhs, std::vector<typename vobj::scalar_type> &lsSum, int orthogdim)
 | 
			
		||||
{
 | 
			
		||||
  typedef typename vobj::scalar_type scalar_type;
 | 
			
		||||
  GridBase *grid = lhs._grid;
 | 
			
		||||
  int fd = result.size();
 | 
			
		||||
  int ld = lsSum.size();
 | 
			
		||||
  // sum over nodes.
 | 
			
		||||
  std::vector<scalar_type> gsum;
 | 
			
		||||
  gsum.resize(fd, scalar_type(0.0));
 | 
			
		||||
  // std::cout << GridLogMessage << "Start of gsum[t] creation:" << std::endl;
 | 
			
		||||
  for(int t=0;t<fd;t++){
 | 
			
		||||
    int pt = t/ld; // processor plane
 | 
			
		||||
    int lt = t%ld;
 | 
			
		||||
    if ( pt == grid->_processor_coor[orthogdim] ) {
 | 
			
		||||
      gsum[t]=lsSum[lt];
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  // std::cout << GridLogMessage << "End of gsum[t] creation:" << std::endl;
 | 
			
		||||
  // std::cout << GridLogMessage << "Start of GlobalSumVector:" << std::endl;
 | 
			
		||||
  grid->GlobalSumVector(&gsum[0], fd);
 | 
			
		||||
  // std::cout << GridLogMessage << "End of GlobalSumVector:" << std::endl;
 | 
			
		||||
 | 
			
		||||
  result = gsum;
 | 
			
		||||
}
 | 
			
		||||
template<class vobj>
 | 
			
		||||
static void sliceInnerProductVector( std::vector<ComplexD> & result, const Lattice<vobj> &lhs,const Lattice<vobj> &rhs,int orthogdim) 
 | 
			
		||||
{
 | 
			
		||||
@@ -544,7 +706,6 @@ static void sliceInnerProductMatrix(  Eigen::MatrixXcd &mat, const Lattice<vobj>
 | 
			
		||||
      for(int i=0;i<Nblock;i++){
 | 
			
		||||
      for(int j=0;j<Nblock;j++){
 | 
			
		||||
	auto tmp = innerProduct(Left[i],Right[j]);
 | 
			
		||||
	//	vector_typeD rtmp = TensorRemove(tmp);
 | 
			
		||||
	auto rtmp = TensorRemove(tmp);
 | 
			
		||||
	mat_thread(i,j) += Reduce(rtmp);
 | 
			
		||||
      }}
 | 
			
		||||
@@ -77,9 +77,6 @@ namespace Grid {
 | 
			
		||||
 | 
			
		||||
  
 | 
			
		||||
// merge of April 11 2017
 | 
			
		||||
//<<<<<<< HEAD
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  // this function is necessary for the LS vectorised field
 | 
			
		||||
  inline int RNGfillable_general(GridBase *coarse,GridBase *fine)
 | 
			
		||||
  {
 | 
			
		||||
@@ -92,7 +89,6 @@ namespace Grid {
 | 
			
		||||
    for(int d=0;d<lowerdims;d++) assert(fine->_processors[d]==1);
 | 
			
		||||
    for(int d=0;d<rngdims;d++) assert(coarse->_processors[d] == fine->_processors[d+lowerdims]);
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    // then divide the number of local sites
 | 
			
		||||
    // check that the total number of sims agree, meanse the iSites are the same
 | 
			
		||||
    assert(fine->Nsimd() == coarse->Nsimd());
 | 
			
		||||
@@ -103,27 +99,6 @@ namespace Grid {
 | 
			
		||||
    return fine->lSites() / coarse->lSites();
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  /*
 | 
			
		||||
  // Wrap seed_seq to give common interface with random_device
 | 
			
		||||
  class fixedSeed {
 | 
			
		||||
  public:
 | 
			
		||||
    typedef std::seed_seq::result_type result_type;
 | 
			
		||||
    std::seed_seq src;
 | 
			
		||||
    
 | 
			
		||||
    fixedSeed(const std::vector<int> &seeds) : src(seeds.begin(),seeds.end()) {};
 | 
			
		||||
 | 
			
		||||
    result_type operator () (void){
 | 
			
		||||
      std::vector<result_type> list(1);
 | 
			
		||||
      src.generate(list.begin(),list.end());
 | 
			
		||||
      return list[0];
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
=======
 | 
			
		||||
>>>>>>> develop
 | 
			
		||||
  */
 | 
			
		||||
  
 | 
			
		||||
  // real scalars are one component
 | 
			
		||||
  template<class scalar,class distribution,class generator> 
 | 
			
		||||
  void fillScalar(scalar &s,distribution &dist,generator & gen)
 | 
			
		||||
@@ -171,7 +146,7 @@ namespace Grid {
 | 
			
		||||
    // support for parallel init
 | 
			
		||||
    ///////////////////////
 | 
			
		||||
#ifdef RNG_FAST_DISCARD
 | 
			
		||||
    static void Skip(RngEngine &eng)
 | 
			
		||||
    static void Skip(RngEngine &eng,uint64_t site)
 | 
			
		||||
    {
 | 
			
		||||
      /////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
      // Skip by 2^40 elements between successive lattice sites
 | 
			
		||||
@@ -183,9 +158,21 @@ namespace Grid {
 | 
			
		||||
      // tens of seconds per trajectory so this is clean in all reasonable cases,
 | 
			
		||||
      // and margin of safety is orders of magnitude.
 | 
			
		||||
      // We could hack Sitmo to skip in the higher order words of state if necessary
 | 
			
		||||
      //
 | 
			
		||||
      // Replace with 2^30 ; avoid problem on large volumes
 | 
			
		||||
      //
 | 
			
		||||
      /////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
      uint64_t skip = 0x1; skip = skip<<40;
 | 
			
		||||
      //      uint64_t skip = site+1;  //   Old init Skipped then drew.  Checked compat with faster init
 | 
			
		||||
      const int shift = 30;
 | 
			
		||||
 | 
			
		||||
      uint64_t skip = site;
 | 
			
		||||
 | 
			
		||||
      skip = skip<<shift;
 | 
			
		||||
 | 
			
		||||
      assert((skip >> shift)==site); // check for overflow
 | 
			
		||||
 | 
			
		||||
      eng.discard(skip);
 | 
			
		||||
      //      std::cout << " Engine  " <<site << " state " <<eng<<std::endl;
 | 
			
		||||
    } 
 | 
			
		||||
#endif
 | 
			
		||||
    static RngEngine Reseed(RngEngine &eng)
 | 
			
		||||
@@ -330,6 +317,19 @@ namespace Grid {
 | 
			
		||||
      std::seed_seq src(seeds.begin(),seeds.end());
 | 
			
		||||
      Seed(src,0);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void SeedUniqueString(const std::string &s){
 | 
			
		||||
      std::vector<int> seeds;
 | 
			
		||||
      std::stringstream sha;
 | 
			
		||||
      seeds = GridChecksum::sha256_seeds(s);
 | 
			
		||||
      for(int i=0;i<seeds.size();i++) { 
 | 
			
		||||
        sha << std::hex << seeds[i];
 | 
			
		||||
      }
 | 
			
		||||
      std::cout << GridLogMessage << "Intialising serial RNG with unique string '" 
 | 
			
		||||
                << s << "'" << std::endl;
 | 
			
		||||
      std::cout << GridLogMessage << "Seed SHA256: " << sha.str() << std::endl;
 | 
			
		||||
      SeedFixedIntegers(seeds);
 | 
			
		||||
    }
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  class GridParallelRNG : public GridRNGbase {
 | 
			
		||||
@@ -390,6 +390,14 @@ namespace Grid {
 | 
			
		||||
      _time_counter += usecond()- inner_time_counter;
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    void SeedUniqueString(const std::string &s){
 | 
			
		||||
      std::vector<int> seeds;
 | 
			
		||||
      seeds = GridChecksum::sha256_seeds(s);
 | 
			
		||||
      std::cout << GridLogMessage << "Intialising parallel RNG with unique string '" 
 | 
			
		||||
                << s << "'" << std::endl;
 | 
			
		||||
      std::cout << GridLogMessage << "Seed SHA256: " << GridChecksum::sha256_string(seeds) << std::endl;
 | 
			
		||||
      SeedFixedIntegers(seeds);
 | 
			
		||||
    }
 | 
			
		||||
    void SeedFixedIntegers(const std::vector<int> &seeds){
 | 
			
		||||
 | 
			
		||||
      // Everyone generates the same seed_seq based on input seeds
 | 
			
		||||
@@ -407,15 +415,14 @@ namespace Grid {
 | 
			
		||||
      // MT implementation does not implement fast discard even though
 | 
			
		||||
      // in principle this is possible
 | 
			
		||||
      ////////////////////////////////////////////////
 | 
			
		||||
      std::vector<int> gcoor;
 | 
			
		||||
      int rank,o_idx,i_idx;
 | 
			
		||||
 | 
			
		||||
      // Everybody loops over global volume.
 | 
			
		||||
      for(int gidx=0;gidx<_grid->_gsites;gidx++){
 | 
			
		||||
 | 
			
		||||
	Skip(master_engine); // Skip to next RNG sequence
 | 
			
		||||
      parallel_for(int gidx=0;gidx<_grid->_gsites;gidx++){
 | 
			
		||||
 | 
			
		||||
	// Where is it?
 | 
			
		||||
	int rank,o_idx,i_idx;
 | 
			
		||||
	std::vector<int> gcoor;
 | 
			
		||||
 | 
			
		||||
	_grid->GlobalIndexToGlobalCoor(gidx,gcoor);
 | 
			
		||||
	_grid->GlobalCoorToRankIndex(rank,o_idx,i_idx,gcoor);
 | 
			
		||||
 | 
			
		||||
@@ -423,6 +430,7 @@ namespace Grid {
 | 
			
		||||
	if( rank == _grid->ThisRank() ){
 | 
			
		||||
	  int l_idx=generator_idx(o_idx,i_idx);
 | 
			
		||||
	  _generators[l_idx] = master_engine;
 | 
			
		||||
	  Skip(_generators[l_idx],gidx); // Skip to next RNG sequence
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
      }
 | 
			
		||||
@@ -50,26 +50,22 @@ inline void subdivides(GridBase *coarse,GridBase *fine)
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  template<class vobj> inline void pickCheckerboard(int cb,Lattice<vobj> &half,const Lattice<vobj> &full){
 | 
			
		||||
    half.checkerboard = cb;
 | 
			
		||||
    int ssh=0;
 | 
			
		||||
    //parallel_for
 | 
			
		||||
    for(int ss=0;ss<full._grid->oSites();ss++){
 | 
			
		||||
      std::vector<int> coor;
 | 
			
		||||
      int cbos;
 | 
			
		||||
 | 
			
		||||
    parallel_for(int ss=0;ss<full._grid->oSites();ss++){
 | 
			
		||||
      int cbos;
 | 
			
		||||
      std::vector<int> coor;
 | 
			
		||||
      full._grid->oCoorFromOindex(coor,ss);
 | 
			
		||||
      cbos=half._grid->CheckerBoard(coor);
 | 
			
		||||
      
 | 
			
		||||
      if (cbos==cb) {
 | 
			
		||||
	int ssh=half._grid->oIndex(coor);
 | 
			
		||||
	half._odata[ssh] = full._odata[ss];
 | 
			
		||||
	ssh++;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  template<class vobj> inline void setCheckerboard(Lattice<vobj> &full,const Lattice<vobj> &half){
 | 
			
		||||
    int cb = half.checkerboard;
 | 
			
		||||
    int ssh=0;
 | 
			
		||||
    //parallel_for
 | 
			
		||||
    for(int ss=0;ss<full._grid->oSites();ss++){
 | 
			
		||||
    parallel_for(int ss=0;ss<full._grid->oSites();ss++){
 | 
			
		||||
      std::vector<int> coor;
 | 
			
		||||
      int cbos;
 | 
			
		||||
 | 
			
		||||
@@ -77,8 +73,8 @@ inline void subdivides(GridBase *coarse,GridBase *fine)
 | 
			
		||||
      cbos=half._grid->CheckerBoard(coor);
 | 
			
		||||
      
 | 
			
		||||
      if (cbos==cb) {
 | 
			
		||||
	int ssh=half._grid->oIndex(coor);
 | 
			
		||||
	full._odata[ss]=half._odata[ssh];
 | 
			
		||||
	ssh++;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
@@ -109,8 +105,8 @@ inline void blockProject(Lattice<iVector<CComplex,nbasis > > &coarseData,
 | 
			
		||||
 | 
			
		||||
  coarseData=zero;
 | 
			
		||||
 | 
			
		||||
  // Loop with a cache friendly loop ordering
 | 
			
		||||
  for(int sf=0;sf<fine->oSites();sf++){
 | 
			
		||||
  // Loop over coars parallel, and then loop over fine associated with coarse.
 | 
			
		||||
  parallel_for(int sf=0;sf<fine->oSites();sf++){
 | 
			
		||||
 | 
			
		||||
    int sc;
 | 
			
		||||
    std::vector<int> coor_c(_ndimension);
 | 
			
		||||
@@ -119,6 +115,7 @@ inline void blockProject(Lattice<iVector<CComplex,nbasis > > &coarseData,
 | 
			
		||||
    for(int d=0;d<_ndimension;d++) coor_c[d]=coor_f[d]/block_r[d];
 | 
			
		||||
    Lexicographic::IndexFromCoor(coor_c,sc,coarse->_rdimensions);
 | 
			
		||||
 | 
			
		||||
PARALLEL_CRITICAL
 | 
			
		||||
    for(int i=0;i<nbasis;i++) {
 | 
			
		||||
 | 
			
		||||
      coarseData._odata[sc](i)=coarseData._odata[sc](i)
 | 
			
		||||
@@ -139,6 +136,7 @@ inline void blockZAXPY(Lattice<vobj> &fineZ,
 | 
			
		||||
  GridBase * coarse= coarseA._grid;
 | 
			
		||||
 | 
			
		||||
  fineZ.checkerboard=fineX.checkerboard;
 | 
			
		||||
  assert(fineX.checkerboard==fineY.checkerboard);
 | 
			
		||||
  subdivides(coarse,fine); // require they map
 | 
			
		||||
  conformable(fineX,fineY);
 | 
			
		||||
  conformable(fineX,fineZ);
 | 
			
		||||
@@ -180,9 +178,10 @@ template<class vobj,class CComplex>
 | 
			
		||||
  GridBase *coarse(CoarseInner._grid);
 | 
			
		||||
  GridBase *fine  (fineX._grid);
 | 
			
		||||
 | 
			
		||||
  Lattice<dotp> fine_inner(fine);
 | 
			
		||||
  Lattice<dotp> fine_inner(fine); fine_inner.checkerboard = fineX.checkerboard;
 | 
			
		||||
  Lattice<dotp> coarse_inner(coarse);
 | 
			
		||||
 | 
			
		||||
  // Precision promotion?
 | 
			
		||||
  fine_inner = localInnerProduct(fineX,fineY);
 | 
			
		||||
  blockSum(coarse_inner,fine_inner);
 | 
			
		||||
  parallel_for(int ss=0;ss<coarse->oSites();ss++){
 | 
			
		||||
@@ -193,7 +192,7 @@ template<class vobj,class CComplex>
 | 
			
		||||
inline void blockNormalise(Lattice<CComplex> &ip,Lattice<vobj> &fineX)
 | 
			
		||||
{
 | 
			
		||||
  GridBase *coarse = ip._grid;
 | 
			
		||||
  Lattice<vobj> zz(fineX._grid); zz=zero;
 | 
			
		||||
  Lattice<vobj> zz(fineX._grid); zz=zero; zz.checkerboard=fineX.checkerboard;
 | 
			
		||||
  blockInnerProduct(ip,fineX,fineX);
 | 
			
		||||
  ip = pow(ip,-0.5);
 | 
			
		||||
  blockZAXPY(fineX,ip,fineX,zz);
 | 
			
		||||
@@ -216,20 +215,26 @@ inline void blockSum(Lattice<vobj> &coarseData,const Lattice<vobj> &fineData)
 | 
			
		||||
    block_r[d] = fine->_rdimensions[d] / coarse->_rdimensions[d];
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Turn this around to loop threaded over sc and interior loop 
 | 
			
		||||
  // over sf would thread better
 | 
			
		||||
  coarseData=zero;
 | 
			
		||||
  for(int sf=0;sf<fine->oSites();sf++){
 | 
			
		||||
  parallel_region {
 | 
			
		||||
 | 
			
		||||
    int sc;
 | 
			
		||||
    std::vector<int> coor_c(_ndimension);
 | 
			
		||||
    std::vector<int> coor_f(_ndimension);
 | 
			
		||||
 | 
			
		||||
    parallel_for_internal(int sf=0;sf<fine->oSites();sf++){
 | 
			
		||||
    
 | 
			
		||||
      Lexicographic::CoorFromIndex(coor_f,sf,fine->_rdimensions);
 | 
			
		||||
      for(int d=0;d<_ndimension;d++) coor_c[d]=coor_f[d]/block_r[d];
 | 
			
		||||
      Lexicographic::IndexFromCoor(coor_c,sc,coarse->_rdimensions);
 | 
			
		||||
      
 | 
			
		||||
PARALLEL_CRITICAL
 | 
			
		||||
      coarseData._odata[sc]=coarseData._odata[sc]+fineData._odata[sf];
 | 
			
		||||
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  return;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -238,7 +243,7 @@ inline void blockPick(GridBase *coarse,const Lattice<vobj> &unpicked,Lattice<vob
 | 
			
		||||
{
 | 
			
		||||
  GridBase * fine = unpicked._grid;
 | 
			
		||||
 | 
			
		||||
  Lattice<vobj> zz(fine);
 | 
			
		||||
  Lattice<vobj> zz(fine); zz.checkerboard = unpicked.checkerboard;
 | 
			
		||||
  Lattice<iScalar<vInteger> > fcoor(fine);
 | 
			
		||||
 | 
			
		||||
  zz = zero;
 | 
			
		||||
@@ -303,12 +308,13 @@ inline void blockPromote(const Lattice<iVector<CComplex,nbasis > > &coarseData,
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Loop with a cache friendly loop ordering
 | 
			
		||||
  for(int sf=0;sf<fine->oSites();sf++){
 | 
			
		||||
 | 
			
		||||
  parallel_region {
 | 
			
		||||
    int sc;
 | 
			
		||||
    std::vector<int> coor_c(_ndimension);
 | 
			
		||||
    std::vector<int> coor_f(_ndimension);
 | 
			
		||||
 | 
			
		||||
    parallel_for_internal(int sf=0;sf<fine->oSites();sf++){
 | 
			
		||||
 | 
			
		||||
      Lexicographic::CoorFromIndex(coor_f,sf,fine->_rdimensions);
 | 
			
		||||
      for(int d=0;d<_ndimension;d++) coor_c[d]=coor_f[d]/block_r[d];
 | 
			
		||||
      Lexicographic::IndexFromCoor(coor_c,sc,coarse->_rdimensions);
 | 
			
		||||
@@ -316,7 +322,7 @@ inline void blockPromote(const Lattice<iVector<CComplex,nbasis > > &coarseData,
 | 
			
		||||
      for(int i=0;i<nbasis;i++) {
 | 
			
		||||
	if(i==0) fineData._odata[sf]=coarseData._odata[sc](i) * Basis[i]._odata[sf];
 | 
			
		||||
	else     fineData._odata[sf]=fineData._odata[sf]+coarseData._odata[sc](i)*Basis[i]._odata[sf];
 | 
			
		||||
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  return;
 | 
			
		||||
@@ -458,9 +464,11 @@ void InsertSliceLocal(const Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int
 | 
			
		||||
  assert(orthog>=0);
 | 
			
		||||
 | 
			
		||||
  for(int d=0;d<nh;d++){
 | 
			
		||||
    if ( d!=orthog ) {
 | 
			
		||||
      assert(lg->_processors[d]  == hg->_processors[d]);
 | 
			
		||||
      assert(lg->_ldimensions[d] == hg->_ldimensions[d]);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // the above should guarantee that the operations are local
 | 
			
		||||
  parallel_for(int idx=0;idx<lg->lSites();idx++){
 | 
			
		||||
@@ -479,7 +487,7 @@ void InsertSliceLocal(const Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
template<class vobj>
 | 
			
		||||
void ExtractSliceLocal(Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int slice_lo,int slice_hi, int orthog)
 | 
			
		||||
void ExtractSliceLocal(Lattice<vobj> &lowDim,const Lattice<vobj> & higherDim,int slice_lo,int slice_hi, int orthog)
 | 
			
		||||
{
 | 
			
		||||
  typedef typename vobj::scalar_object sobj;
 | 
			
		||||
 | 
			
		||||
@@ -493,9 +501,11 @@ void ExtractSliceLocal(Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int slic
 | 
			
		||||
  assert(orthog>=0);
 | 
			
		||||
 | 
			
		||||
  for(int d=0;d<nh;d++){
 | 
			
		||||
    if ( d!=orthog ) {
 | 
			
		||||
      assert(lg->_processors[d]  == hg->_processors[d]);
 | 
			
		||||
      assert(lg->_ldimensions[d] == hg->_ldimensions[d]);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // the above should guarantee that the operations are local
 | 
			
		||||
  parallel_for(int idx=0;idx<lg->lSites();idx++){
 | 
			
		||||
@@ -593,6 +603,51 @@ unvectorizeToLexOrdArray(std::vector<sobj> &out, const Lattice<vobj> &in)
 | 
			
		||||
    extract1(in_vobj, out_ptrs, 0);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<typename vobj, typename sobj>
 | 
			
		||||
typename std::enable_if<isSIMDvectorized<vobj>::value && !isSIMDvectorized<sobj>::value, void>::type 
 | 
			
		||||
unvectorizeToRevLexOrdArray(std::vector<sobj> &out, const Lattice<vobj> &in)
 | 
			
		||||
{
 | 
			
		||||
 | 
			
		||||
  typedef typename vobj::vector_type vtype;
 | 
			
		||||
  
 | 
			
		||||
  GridBase* in_grid = in._grid;
 | 
			
		||||
  out.resize(in_grid->lSites());
 | 
			
		||||
  
 | 
			
		||||
  int ndim = in_grid->Nd();
 | 
			
		||||
  int in_nsimd = vtype::Nsimd();
 | 
			
		||||
 | 
			
		||||
  std::vector<std::vector<int> > in_icoor(in_nsimd);
 | 
			
		||||
      
 | 
			
		||||
  for(int lane=0; lane < in_nsimd; lane++){
 | 
			
		||||
    in_icoor[lane].resize(ndim);
 | 
			
		||||
    in_grid->iCoorFromIindex(in_icoor[lane], lane);
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  parallel_for(int in_oidx = 0; in_oidx < in_grid->oSites(); in_oidx++){ //loop over outer index
 | 
			
		||||
    //Assemble vector of pointers to output elements
 | 
			
		||||
    std::vector<sobj*> out_ptrs(in_nsimd);
 | 
			
		||||
 | 
			
		||||
    std::vector<int> in_ocoor(ndim);
 | 
			
		||||
    in_grid->oCoorFromOindex(in_ocoor, in_oidx);
 | 
			
		||||
 | 
			
		||||
    std::vector<int> lcoor(in_grid->Nd());
 | 
			
		||||
      
 | 
			
		||||
    for(int lane=0; lane < in_nsimd; lane++){
 | 
			
		||||
      for(int mu=0;mu<ndim;mu++)
 | 
			
		||||
	lcoor[mu] = in_ocoor[mu] + in_grid->_rdimensions[mu]*in_icoor[lane][mu];
 | 
			
		||||
 | 
			
		||||
      int lex;
 | 
			
		||||
      Lexicographic::IndexFromCoorReversed(lcoor, lex, in_grid->_ldimensions);
 | 
			
		||||
      out_ptrs[lane] = &out[lex];
 | 
			
		||||
    }
 | 
			
		||||
    
 | 
			
		||||
    //Unpack into those ptrs
 | 
			
		||||
    const vobj & in_vobj = in._odata[in_oidx];
 | 
			
		||||
    extract1(in_vobj, out_ptrs, 0);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//Copy SIMD-vectorized lattice to array of scalar objects in lexicographic order
 | 
			
		||||
template<typename vobj, typename sobj>
 | 
			
		||||
typename std::enable_if<isSIMDvectorized<vobj>::value 
 | 
			
		||||
@@ -642,10 +697,59 @@ vectorizeFromLexOrdArray( std::vector<sobj> &in, Lattice<vobj> &out)
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<typename vobj, typename sobj>
 | 
			
		||||
typename std::enable_if<isSIMDvectorized<vobj>::value 
 | 
			
		||||
                    && !isSIMDvectorized<sobj>::value, void>::type 
 | 
			
		||||
vectorizeFromRevLexOrdArray( std::vector<sobj> &in, Lattice<vobj> &out)
 | 
			
		||||
{
 | 
			
		||||
 | 
			
		||||
  typedef typename vobj::vector_type vtype;
 | 
			
		||||
  
 | 
			
		||||
  GridBase* grid = out._grid;
 | 
			
		||||
  assert(in.size()==grid->lSites());
 | 
			
		||||
  
 | 
			
		||||
  int ndim     = grid->Nd();
 | 
			
		||||
  int nsimd    = vtype::Nsimd();
 | 
			
		||||
 | 
			
		||||
  std::vector<std::vector<int> > icoor(nsimd);
 | 
			
		||||
      
 | 
			
		||||
  for(int lane=0; lane < nsimd; lane++){
 | 
			
		||||
    icoor[lane].resize(ndim);
 | 
			
		||||
    grid->iCoorFromIindex(icoor[lane],lane);
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  parallel_for(uint64_t oidx = 0; oidx < grid->oSites(); oidx++){ //loop over outer index
 | 
			
		||||
    //Assemble vector of pointers to output elements
 | 
			
		||||
    std::vector<sobj*> ptrs(nsimd);
 | 
			
		||||
 | 
			
		||||
    std::vector<int> ocoor(ndim);
 | 
			
		||||
    grid->oCoorFromOindex(ocoor, oidx);
 | 
			
		||||
 | 
			
		||||
    std::vector<int> lcoor(grid->Nd());
 | 
			
		||||
      
 | 
			
		||||
    for(int lane=0; lane < nsimd; lane++){
 | 
			
		||||
 | 
			
		||||
      for(int mu=0;mu<ndim;mu++){
 | 
			
		||||
	lcoor[mu] = ocoor[mu] + grid->_rdimensions[mu]*icoor[lane][mu];
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      int lex;
 | 
			
		||||
      Lexicographic::IndexFromCoorReversed(lcoor, lex, grid->_ldimensions);
 | 
			
		||||
      ptrs[lane] = &in[lex];
 | 
			
		||||
    }
 | 
			
		||||
    
 | 
			
		||||
    //pack from those ptrs
 | 
			
		||||
    vobj vecobj;
 | 
			
		||||
    merge1(vecobj, ptrs, 0);
 | 
			
		||||
    out._odata[oidx] = vecobj; 
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//Convert a Lattice from one precision to another
 | 
			
		||||
template<class VobjOut, class VobjIn>
 | 
			
		||||
void precisionChange(Lattice<VobjOut> &out, const Lattice<VobjIn> &in){
 | 
			
		||||
  assert(out._grid->Nd() == in._grid->Nd());
 | 
			
		||||
  assert(out._grid->FullDimensions() == in._grid->FullDimensions());
 | 
			
		||||
  out.checkerboard = in.checkerboard;
 | 
			
		||||
  GridBase *in_grid=in._grid;
 | 
			
		||||
  GridBase *out_grid = out._grid;
 | 
			
		||||
@@ -685,5 +789,301 @@ void precisionChange(Lattice<VobjOut> &out, const Lattice<VobjIn> &in){
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Communicate between grids
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// SIMPLE CASE:
 | 
			
		||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
//
 | 
			
		||||
// Mesh of nodes (2x2) ; subdivide to  1x1 subdivisions
 | 
			
		||||
//
 | 
			
		||||
// Lex ord:   
 | 
			
		||||
//          N0 va0 vb0 vc0 vd0       N1 va1 vb1 vc1 vd1  
 | 
			
		||||
//          N2 va2 vb2 vc2 vd2       N3 va3 vb3 vc3 vd3 
 | 
			
		||||
//
 | 
			
		||||
// Ratio = full[dim] / split[dim]
 | 
			
		||||
//
 | 
			
		||||
// For each dimension do an all to all; get Nvec -> Nvec / ratio
 | 
			
		||||
//                                          Ldim -> Ldim * ratio
 | 
			
		||||
//                                          LocalVol -> LocalVol * ratio
 | 
			
		||||
// full AllToAll(0)
 | 
			
		||||
//          N0 va0 vb0 va1 vb1       N1 vc0 vd0 vc1 vd1   
 | 
			
		||||
//          N2 va2 vb2 va3 vb3       N3 vc2 vd2 vc3 vd3 
 | 
			
		||||
//
 | 
			
		||||
// REARRANGE
 | 
			
		||||
//          N0 va01 vb01      N1 vc01 vd01
 | 
			
		||||
//          N2 va23 vb23      N3 vc23 vd23
 | 
			
		||||
//
 | 
			
		||||
// full AllToAll(1)           // Not what is wanted. FIXME
 | 
			
		||||
//          N0 va01 va23      N1 vc01 vc23 
 | 
			
		||||
//          N2 vb01 vb23      N3 vd01 vd23
 | 
			
		||||
// 
 | 
			
		||||
// REARRANGE
 | 
			
		||||
//          N0 va0123      N1 vc0123
 | 
			
		||||
//          N2 vb0123      N3 vd0123
 | 
			
		||||
//
 | 
			
		||||
// Must also rearrange data to get into the NEW lex order of grid at each stage. Some kind of "insert/extract".
 | 
			
		||||
// NB: Easiest to programme if keep in lex order.
 | 
			
		||||
/*
 | 
			
		||||
 *  Let chunk = (fvol*nvec)/sP be size of a chunk.         ( Divide lexico vol * nvec into fP/sP = M chunks )
 | 
			
		||||
 *  
 | 
			
		||||
 *  2nd A2A (over sP nodes; subdivide the fP into sP chunks of M)
 | 
			
		||||
 * 
 | 
			
		||||
 *     node 0     1st chunk of node 0M..(1M-1); 2nd chunk of node 0M..(1M-1)..   data chunk x M x sP = fL / sP * M * sP = fL * M growth
 | 
			
		||||
 *     node 1     1st chunk of node 1M..(2M-1); 2nd chunk of node 1M..(2M-1)..
 | 
			
		||||
 *     node 2     1st chunk of node 2M..(3M-1); 2nd chunk of node 2M..(3M-1)..
 | 
			
		||||
 *     node 3     1st chunk of node 3M..(3M-1); 2nd chunk of node 2M..(3M-1)..
 | 
			
		||||
 *  etc...
 | 
			
		||||
 */
 | 
			
		||||
template<class Vobj>
 | 
			
		||||
void Grid_split(std::vector<Lattice<Vobj> > & full,Lattice<Vobj>   & split)
 | 
			
		||||
{
 | 
			
		||||
  typedef typename Vobj::scalar_object Sobj;
 | 
			
		||||
 | 
			
		||||
  int full_vecs   = full.size();
 | 
			
		||||
 | 
			
		||||
  assert(full_vecs>=1);
 | 
			
		||||
 | 
			
		||||
  GridBase * full_grid = full[0]._grid;
 | 
			
		||||
  GridBase *split_grid = split._grid;
 | 
			
		||||
 | 
			
		||||
  int       ndim  = full_grid->_ndimension;
 | 
			
		||||
  int  full_nproc = full_grid->_Nprocessors;
 | 
			
		||||
  int split_nproc =split_grid->_Nprocessors;
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////
 | 
			
		||||
  // Checkerboard management
 | 
			
		||||
  ////////////////////////////////
 | 
			
		||||
  int cb = full[0].checkerboard;
 | 
			
		||||
  split.checkerboard = cb;
 | 
			
		||||
 | 
			
		||||
  //////////////////////////////
 | 
			
		||||
  // Checks
 | 
			
		||||
  //////////////////////////////
 | 
			
		||||
  assert(full_grid->_ndimension==split_grid->_ndimension);
 | 
			
		||||
  for(int n=0;n<full_vecs;n++){
 | 
			
		||||
    assert(full[n].checkerboard == cb);
 | 
			
		||||
    for(int d=0;d<ndim;d++){
 | 
			
		||||
      assert(full[n]._grid->_gdimensions[d]==split._grid->_gdimensions[d]);
 | 
			
		||||
      assert(full[n]._grid->_fdimensions[d]==split._grid->_fdimensions[d]);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  int   nvector   =full_nproc/split_nproc; 
 | 
			
		||||
  assert(nvector*split_nproc==full_nproc);
 | 
			
		||||
  assert(nvector == full_vecs);
 | 
			
		||||
 | 
			
		||||
  std::vector<int> ratio(ndim);
 | 
			
		||||
  for(int d=0;d<ndim;d++){
 | 
			
		||||
    ratio[d] = full_grid->_processors[d]/ split_grid->_processors[d];
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  uint64_t lsites = full_grid->lSites();
 | 
			
		||||
  uint64_t     sz = lsites * nvector;
 | 
			
		||||
  std::vector<Sobj> tmpdata(sz);
 | 
			
		||||
  std::vector<Sobj> alldata(sz);
 | 
			
		||||
  std::vector<Sobj> scalardata(lsites); 
 | 
			
		||||
 | 
			
		||||
  for(int v=0;v<nvector;v++){
 | 
			
		||||
    unvectorizeToLexOrdArray(scalardata,full[v]);    
 | 
			
		||||
    parallel_for(int site=0;site<lsites;site++){
 | 
			
		||||
      alldata[v*lsites+site] = scalardata[site];
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  int nvec = nvector; // Counts down to 1 as we collapse dims
 | 
			
		||||
  std::vector<int> ldims = full_grid->_ldimensions;
 | 
			
		||||
 | 
			
		||||
  for(int d=ndim-1;d>=0;d--){
 | 
			
		||||
 | 
			
		||||
    if ( ratio[d] != 1 ) {
 | 
			
		||||
 | 
			
		||||
      full_grid ->AllToAll(d,alldata,tmpdata);
 | 
			
		||||
      if ( split_grid->_processors[d] > 1 ) {
 | 
			
		||||
	alldata=tmpdata;
 | 
			
		||||
	split_grid->AllToAll(d,alldata,tmpdata);
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      auto rdims = ldims; 
 | 
			
		||||
      auto     M = ratio[d];
 | 
			
		||||
      auto rsites= lsites*M;// increases rsites by M
 | 
			
		||||
      nvec      /= M;       // Reduce nvec by subdivision factor
 | 
			
		||||
      rdims[d]  *= M;       // increase local dim by same factor
 | 
			
		||||
 | 
			
		||||
      int sP =   split_grid->_processors[d];
 | 
			
		||||
      int fP =    full_grid->_processors[d];
 | 
			
		||||
 | 
			
		||||
      int fvol   = lsites;
 | 
			
		||||
      
 | 
			
		||||
      int chunk  = (nvec*fvol)/sP;          assert(chunk*sP == nvec*fvol);
 | 
			
		||||
 | 
			
		||||
      // Loop over reordered data post A2A
 | 
			
		||||
      parallel_for(int c=0;c<chunk;c++){
 | 
			
		||||
	std::vector<int> coor(ndim);
 | 
			
		||||
	for(int m=0;m<M;m++){
 | 
			
		||||
	  for(int s=0;s<sP;s++){
 | 
			
		||||
	    
 | 
			
		||||
	    // addressing; use lexico
 | 
			
		||||
	    int lex_r;
 | 
			
		||||
	    uint64_t lex_c        = c+chunk*m+chunk*M*s;
 | 
			
		||||
	    uint64_t lex_fvol_vec = c+chunk*s;
 | 
			
		||||
	    uint64_t lex_fvol     = lex_fvol_vec%fvol;
 | 
			
		||||
	    uint64_t lex_vec      = lex_fvol_vec/fvol;
 | 
			
		||||
 | 
			
		||||
	    // which node sets an adder to the coordinate
 | 
			
		||||
	    Lexicographic::CoorFromIndex(coor, lex_fvol, ldims);	  
 | 
			
		||||
	    coor[d] += m*ldims[d];
 | 
			
		||||
	    Lexicographic::IndexFromCoor(coor, lex_r, rdims);	  
 | 
			
		||||
	    lex_r += lex_vec * rsites;
 | 
			
		||||
 | 
			
		||||
	    // LexicoFind coordinate & vector number within split lattice
 | 
			
		||||
	    alldata[lex_r] = tmpdata[lex_c];
 | 
			
		||||
 | 
			
		||||
	  }
 | 
			
		||||
	}
 | 
			
		||||
      }
 | 
			
		||||
      ldims[d]*= ratio[d];
 | 
			
		||||
      lsites  *= ratio[d];
 | 
			
		||||
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  vectorizeFromLexOrdArray(alldata,split);    
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Vobj>
 | 
			
		||||
void Grid_split(Lattice<Vobj> &full,Lattice<Vobj>   & split)
 | 
			
		||||
{
 | 
			
		||||
  int nvector = full._grid->_Nprocessors / split._grid->_Nprocessors;
 | 
			
		||||
  std::vector<Lattice<Vobj> > full_v(nvector,full._grid);
 | 
			
		||||
  for(int n=0;n<nvector;n++){
 | 
			
		||||
    full_v[n] = full;
 | 
			
		||||
  }
 | 
			
		||||
  Grid_split(full_v,split);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Vobj>
 | 
			
		||||
void Grid_unsplit(std::vector<Lattice<Vobj> > & full,Lattice<Vobj>   & split)
 | 
			
		||||
{
 | 
			
		||||
  typedef typename Vobj::scalar_object Sobj;
 | 
			
		||||
 | 
			
		||||
  int full_vecs   = full.size();
 | 
			
		||||
 | 
			
		||||
  assert(full_vecs>=1);
 | 
			
		||||
 | 
			
		||||
  GridBase * full_grid = full[0]._grid;
 | 
			
		||||
  GridBase *split_grid = split._grid;
 | 
			
		||||
 | 
			
		||||
  int       ndim  = full_grid->_ndimension;
 | 
			
		||||
  int  full_nproc = full_grid->_Nprocessors;
 | 
			
		||||
  int split_nproc =split_grid->_Nprocessors;
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////
 | 
			
		||||
  // Checkerboard management
 | 
			
		||||
  ////////////////////////////////
 | 
			
		||||
  int cb = full[0].checkerboard;
 | 
			
		||||
  split.checkerboard = cb;
 | 
			
		||||
 | 
			
		||||
  //////////////////////////////
 | 
			
		||||
  // Checks
 | 
			
		||||
  //////////////////////////////
 | 
			
		||||
  assert(full_grid->_ndimension==split_grid->_ndimension);
 | 
			
		||||
  for(int n=0;n<full_vecs;n++){
 | 
			
		||||
    assert(full[n].checkerboard == cb);
 | 
			
		||||
    for(int d=0;d<ndim;d++){
 | 
			
		||||
      assert(full[n]._grid->_gdimensions[d]==split._grid->_gdimensions[d]);
 | 
			
		||||
      assert(full[n]._grid->_fdimensions[d]==split._grid->_fdimensions[d]);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  int   nvector   =full_nproc/split_nproc; 
 | 
			
		||||
  assert(nvector*split_nproc==full_nproc);
 | 
			
		||||
  assert(nvector == full_vecs);
 | 
			
		||||
 | 
			
		||||
  std::vector<int> ratio(ndim);
 | 
			
		||||
  for(int d=0;d<ndim;d++){
 | 
			
		||||
    ratio[d] = full_grid->_processors[d]/ split_grid->_processors[d];
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  uint64_t lsites = full_grid->lSites();
 | 
			
		||||
  uint64_t     sz = lsites * nvector;
 | 
			
		||||
  std::vector<Sobj> tmpdata(sz);
 | 
			
		||||
  std::vector<Sobj> alldata(sz);
 | 
			
		||||
  std::vector<Sobj> scalardata(lsites); 
 | 
			
		||||
 | 
			
		||||
  unvectorizeToLexOrdArray(alldata,split);    
 | 
			
		||||
 | 
			
		||||
  /////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Start from split grid and work towards full grid
 | 
			
		||||
  /////////////////////////////////////////////////////////////////
 | 
			
		||||
 | 
			
		||||
  int nvec = 1;
 | 
			
		||||
  uint64_t rsites        = split_grid->lSites();
 | 
			
		||||
  std::vector<int> rdims = split_grid->_ldimensions;
 | 
			
		||||
 | 
			
		||||
  for(int d=0;d<ndim;d++){
 | 
			
		||||
 | 
			
		||||
    if ( ratio[d] != 1 ) {
 | 
			
		||||
 | 
			
		||||
      auto     M = ratio[d];
 | 
			
		||||
 | 
			
		||||
      int sP =   split_grid->_processors[d];
 | 
			
		||||
      int fP =    full_grid->_processors[d];
 | 
			
		||||
      
 | 
			
		||||
      auto ldims = rdims;  ldims[d]  /= M;  // Decrease local dims by same factor
 | 
			
		||||
      auto lsites= rsites/M;                // Decreases rsites by M
 | 
			
		||||
      
 | 
			
		||||
      int fvol   = lsites;
 | 
			
		||||
      int chunk  = (nvec*fvol)/sP;          assert(chunk*sP == nvec*fvol);
 | 
			
		||||
	
 | 
			
		||||
      {
 | 
			
		||||
	// Loop over reordered data post A2A
 | 
			
		||||
	parallel_for(int c=0;c<chunk;c++){
 | 
			
		||||
	  std::vector<int> coor(ndim);
 | 
			
		||||
	  for(int m=0;m<M;m++){
 | 
			
		||||
	    for(int s=0;s<sP;s++){
 | 
			
		||||
 | 
			
		||||
	      // addressing; use lexico
 | 
			
		||||
	      int lex_r;
 | 
			
		||||
	      uint64_t lex_c = c+chunk*m+chunk*M*s;
 | 
			
		||||
	      uint64_t lex_fvol_vec = c+chunk*s;
 | 
			
		||||
	      uint64_t lex_fvol     = lex_fvol_vec%fvol;
 | 
			
		||||
	      uint64_t lex_vec      = lex_fvol_vec/fvol;
 | 
			
		||||
	      
 | 
			
		||||
	      // which node sets an adder to the coordinate
 | 
			
		||||
	      Lexicographic::CoorFromIndex(coor, lex_fvol, ldims);	  
 | 
			
		||||
	      coor[d] += m*ldims[d];
 | 
			
		||||
	      Lexicographic::IndexFromCoor(coor, lex_r, rdims);	  
 | 
			
		||||
	      lex_r += lex_vec * rsites;
 | 
			
		||||
	      
 | 
			
		||||
	      // LexicoFind coordinate & vector number within split lattice
 | 
			
		||||
	      tmpdata[lex_c] = alldata[lex_r];
 | 
			
		||||
	    }
 | 
			
		||||
	  }
 | 
			
		||||
	}
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      if ( split_grid->_processors[d] > 1 ) {
 | 
			
		||||
	split_grid->AllToAll(d,tmpdata,alldata);
 | 
			
		||||
	tmpdata=alldata;
 | 
			
		||||
      }
 | 
			
		||||
      full_grid ->AllToAll(d,tmpdata,alldata);
 | 
			
		||||
      rdims[d]/= M;
 | 
			
		||||
      rsites  /= M;
 | 
			
		||||
      nvec    *= M;       // Increase nvec by subdivision factor
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  lsites = full_grid->lSites();
 | 
			
		||||
  for(int v=0;v<nvector;v++){
 | 
			
		||||
    //    assert(v<full.size());
 | 
			
		||||
    parallel_for(int site=0;site<lsites;site++){
 | 
			
		||||
      //      assert(v*lsites+site < alldata.size());
 | 
			
		||||
      scalardata[site] = alldata[v*lsites+site];
 | 
			
		||||
    }
 | 
			
		||||
    vectorizeFromLexOrdArray(scalardata,full[v]);    
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
@@ -50,7 +50,7 @@ namespace Grid {
 | 
			
		||||
    return (status==0) ? res.get() : name ;
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
GridStopWatch Logger::StopWatch;
 | 
			
		||||
GridStopWatch Logger::GlobalStopWatch;
 | 
			
		||||
int Logger::timestamp;
 | 
			
		||||
std::ostream Logger::devnull(0);
 | 
			
		||||
 | 
			
		||||
@@ -59,13 +59,16 @@ void GridLogTimestamp(int on){
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Colours GridLogColours(0);
 | 
			
		||||
GridLogger GridLogError(1, "Error", GridLogColours, "RED");
 | 
			
		||||
GridLogger GridLogMG     (1, "MG"    , GridLogColours, "NORMAL");
 | 
			
		||||
GridLogger GridLogIRL    (1, "IRL"   , GridLogColours, "NORMAL");
 | 
			
		||||
GridLogger GridLogSolver (1, "Solver", GridLogColours, "NORMAL");
 | 
			
		||||
GridLogger GridLogError  (1, "Error" , GridLogColours, "RED");
 | 
			
		||||
GridLogger GridLogWarning(1, "Warning", GridLogColours, "YELLOW");
 | 
			
		||||
GridLogger GridLogMessage(1, "Message", GridLogColours, "NORMAL");
 | 
			
		||||
GridLogger GridLogDebug(1, "Debug", GridLogColours, "PURPLE");
 | 
			
		||||
GridLogger GridLogDebug  (1, "Debug", GridLogColours, "PURPLE");
 | 
			
		||||
GridLogger GridLogPerformance(1, "Performance", GridLogColours, "GREEN");
 | 
			
		||||
GridLogger GridLogIterative(1, "Iterative", GridLogColours, "BLUE");
 | 
			
		||||
GridLogger GridLogIntegrator(1, "Integrator", GridLogColours, "BLUE");
 | 
			
		||||
GridLogger GridLogIterative  (1, "Iterative", GridLogColours, "BLUE");
 | 
			
		||||
GridLogger GridLogIntegrator (1, "Integrator", GridLogColours, "BLUE");
 | 
			
		||||
 | 
			
		||||
void GridLogConfigure(std::vector<std::string> &logstreams) {
 | 
			
		||||
  GridLogError.Active(0);
 | 
			
		||||
@@ -85,12 +85,16 @@ class Logger {
 | 
			
		||||
protected:
 | 
			
		||||
  Colours &Painter;
 | 
			
		||||
  int active;
 | 
			
		||||
  int timing_mode;
 | 
			
		||||
  int topWidth{-1}, chanWidth{-1};
 | 
			
		||||
  static int timestamp;
 | 
			
		||||
  std::string name, topName;
 | 
			
		||||
  std::string COLOUR;
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
  static GridStopWatch StopWatch;
 | 
			
		||||
  static GridStopWatch GlobalStopWatch;
 | 
			
		||||
  GridStopWatch         LocalStopWatch;
 | 
			
		||||
  GridStopWatch *StopWatch;
 | 
			
		||||
  static std::ostream devnull;
 | 
			
		||||
 | 
			
		||||
  std::string background() {return Painter.colour["NORMAL"];}
 | 
			
		||||
@@ -101,22 +105,52 @@ public:
 | 
			
		||||
    name(nm),
 | 
			
		||||
    topName(topNm),
 | 
			
		||||
    Painter(col_class),
 | 
			
		||||
    COLOUR(col) {} ;
 | 
			
		||||
    timing_mode(0),
 | 
			
		||||
    COLOUR(col) 
 | 
			
		||||
    {
 | 
			
		||||
      StopWatch = & GlobalStopWatch;
 | 
			
		||||
    };
 | 
			
		||||
  
 | 
			
		||||
  void Active(int on) {active = on;};
 | 
			
		||||
  int  isActive(void) {return active;};
 | 
			
		||||
  static void Timestamp(int on) {timestamp = on;};
 | 
			
		||||
  void Reset(void) { 
 | 
			
		||||
    StopWatch->Reset(); 
 | 
			
		||||
    StopWatch->Start(); 
 | 
			
		||||
  }
 | 
			
		||||
  void TimingMode(int on) { 
 | 
			
		||||
    timing_mode = on; 
 | 
			
		||||
    if(on) { 
 | 
			
		||||
      StopWatch = &LocalStopWatch;
 | 
			
		||||
      Reset(); 
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  void setTopWidth(const int w) {topWidth = w;}
 | 
			
		||||
  void setChanWidth(const int w) {chanWidth = w;}
 | 
			
		||||
 | 
			
		||||
  friend std::ostream& operator<< (std::ostream& stream, Logger& log){
 | 
			
		||||
 | 
			
		||||
    if ( log.active ) {
 | 
			
		||||
      stream << log.background()<< std::setw(8) << std::left << log.topName << log.background()<< " : ";
 | 
			
		||||
      stream << log.colour() << std::setw(10) << std::left << log.name << log.background() << " : ";
 | 
			
		||||
      stream << log.background()<<  std::left;
 | 
			
		||||
      if (log.topWidth > 0)
 | 
			
		||||
      {
 | 
			
		||||
        stream << std::setw(log.topWidth);
 | 
			
		||||
      }
 | 
			
		||||
      stream << log.topName << log.background()<< " : ";
 | 
			
		||||
      stream << log.colour() <<  std::left;
 | 
			
		||||
      if (log.chanWidth > 0)
 | 
			
		||||
      {
 | 
			
		||||
        stream << std::setw(log.chanWidth);
 | 
			
		||||
      }
 | 
			
		||||
      stream << log.name << log.background() << " : ";
 | 
			
		||||
      if ( log.timestamp ) {
 | 
			
		||||
	StopWatch.Stop();
 | 
			
		||||
	GridTime now = StopWatch.Elapsed();
 | 
			
		||||
	StopWatch.Start();
 | 
			
		||||
	stream << log.evidence()<< now << log.background() << " : " ;
 | 
			
		||||
	log.StopWatch->Stop();
 | 
			
		||||
	GridTime now = log.StopWatch->Elapsed();
 | 
			
		||||
	
 | 
			
		||||
	if ( log.timing_mode==1 ) log.StopWatch->Reset();
 | 
			
		||||
	log.StopWatch->Start();
 | 
			
		||||
	stream << log.evidence()
 | 
			
		||||
	       << now	       << log.background() << " : " ;
 | 
			
		||||
      }
 | 
			
		||||
      stream << log.colour();
 | 
			
		||||
      return stream;
 | 
			
		||||
@@ -135,6 +169,9 @@ public:
 | 
			
		||||
 | 
			
		||||
void GridLogConfigure(std::vector<std::string> &logstreams);
 | 
			
		||||
 | 
			
		||||
extern GridLogger GridLogMG;
 | 
			
		||||
extern GridLogger GridLogIRL;
 | 
			
		||||
extern GridLogger GridLogSolver;
 | 
			
		||||
extern GridLogger GridLogError;
 | 
			
		||||
extern GridLogger GridLogWarning;
 | 
			
		||||
extern GridLogger GridLogMessage;
 | 
			
		||||
							
								
								
									
										3
									
								
								Grid/parallelIO/BinaryIO.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										3
									
								
								Grid/parallelIO/BinaryIO.cc
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,3 @@
 | 
			
		||||
#include <Grid/GridCore.h>
 | 
			
		||||
 | 
			
		||||
int Grid::BinaryIO::latticeWriteMaxRetry = -1;
 | 
			
		||||
@@ -81,6 +81,7 @@ inline void removeWhitespace(std::string &key)
 | 
			
		||||
///////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
class BinaryIO {
 | 
			
		||||
 public:
 | 
			
		||||
  static int latticeWriteMaxRetry;
 | 
			
		||||
 | 
			
		||||
  /////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // more byte manipulation helpers
 | 
			
		||||
@@ -91,7 +92,7 @@ class BinaryIO {
 | 
			
		||||
    typedef typename vobj::scalar_object sobj;
 | 
			
		||||
 | 
			
		||||
    GridBase *grid = lat._grid;
 | 
			
		||||
    int lsites = grid->lSites();
 | 
			
		||||
    uint64_t lsites = grid->lSites();
 | 
			
		||||
 | 
			
		||||
    std::vector<sobj> scalardata(lsites); 
 | 
			
		||||
    unvectorizeToLexOrdArray(scalardata,lat);    
 | 
			
		||||
@@ -110,11 +111,11 @@ class BinaryIO {
 | 
			
		||||
      lsites = 1;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    #pragma omp parallel
 | 
			
		||||
PARALLEL_REGION
 | 
			
		||||
    {
 | 
			
		||||
      uint32_t nersc_csum_thr = 0;
 | 
			
		||||
 | 
			
		||||
      #pragma omp for
 | 
			
		||||
PARALLEL_FOR_LOOP_INTERN
 | 
			
		||||
      for (uint64_t local_site = 0; local_site < lsites; local_site++)
 | 
			
		||||
      {
 | 
			
		||||
        uint32_t *site_buf = (uint32_t *)&fbuf[local_site];
 | 
			
		||||
@@ -124,7 +125,7 @@ class BinaryIO {
 | 
			
		||||
        }
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      #pragma omp critical
 | 
			
		||||
PARALLEL_CRITICAL
 | 
			
		||||
      {
 | 
			
		||||
        nersc_csum += nersc_csum_thr;
 | 
			
		||||
      }
 | 
			
		||||
@@ -146,21 +147,23 @@ class BinaryIO {
 | 
			
		||||
    std::vector<int> local_start =grid->LocalStarts();
 | 
			
		||||
    std::vector<int> global_vol  =grid->FullDimensions();
 | 
			
		||||
 | 
			
		||||
#pragma omp parallel
 | 
			
		||||
PARALLEL_REGION
 | 
			
		||||
    { 
 | 
			
		||||
      std::vector<int> coor(nd);
 | 
			
		||||
      uint32_t scidac_csuma_thr=0;
 | 
			
		||||
      uint32_t scidac_csumb_thr=0;
 | 
			
		||||
      uint32_t site_crc=0;
 | 
			
		||||
 | 
			
		||||
#pragma omp for
 | 
			
		||||
PARALLEL_FOR_LOOP_INTERN
 | 
			
		||||
      for(uint64_t local_site=0;local_site<lsites;local_site++){
 | 
			
		||||
 | 
			
		||||
	uint32_t * site_buf = (uint32_t *)&fbuf[local_site];
 | 
			
		||||
 | 
			
		||||
	/* 
 | 
			
		||||
	 * Scidac csum  is rather more heavyweight
 | 
			
		||||
	 * FIXME -- 128^3 x 256 x 16 will overflow.
 | 
			
		||||
	 */
 | 
			
		||||
	
 | 
			
		||||
	int global_site;
 | 
			
		||||
 | 
			
		||||
	Lexicographic::CoorFromIndex(coor,local_site,local_vol);
 | 
			
		||||
@@ -181,7 +184,7 @@ class BinaryIO {
 | 
			
		||||
	scidac_csumb_thr ^= site_crc<<gsite31 | site_crc>>(32-gsite31);
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
#pragma omp critical
 | 
			
		||||
PARALLEL_CRITICAL
 | 
			
		||||
      {
 | 
			
		||||
	scidac_csuma^= scidac_csuma_thr;
 | 
			
		||||
	scidac_csumb^= scidac_csumb_thr;
 | 
			
		||||
@@ -261,7 +264,7 @@ class BinaryIO {
 | 
			
		||||
			      GridBase *grid,
 | 
			
		||||
			      std::vector<fobj> &iodata,
 | 
			
		||||
			      std::string file,
 | 
			
		||||
			      int offset,
 | 
			
		||||
			      uint64_t& offset,
 | 
			
		||||
			      const std::string &format, int control,
 | 
			
		||||
			      uint32_t &nersc_csum,
 | 
			
		||||
			      uint32_t &scidac_csuma,
 | 
			
		||||
@@ -356,7 +359,7 @@ class BinaryIO {
 | 
			
		||||
 | 
			
		||||
      if ( (control & BINARYIO_LEXICOGRAPHIC) && (nrank > 1) ) {
 | 
			
		||||
#ifdef USE_MPI_IO
 | 
			
		||||
	std::cout<< GridLogMessage<< "MPI read I/O "<< file<< std::endl;
 | 
			
		||||
	std::cout<< GridLogMessage<<"IOobject: MPI read I/O "<< file<< std::endl;
 | 
			
		||||
	ierr=MPI_File_open(grid->communicator,(char *) file.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);    assert(ierr==0);
 | 
			
		||||
	ierr=MPI_File_set_view(fh, disp, mpiObject, fileArray, "native", MPI_INFO_NULL);    assert(ierr==0);
 | 
			
		||||
	ierr=MPI_File_read_all(fh, &iodata[0], 1, localArray, &status);    assert(ierr==0);
 | 
			
		||||
@@ -367,8 +370,8 @@ class BinaryIO {
 | 
			
		||||
	assert(0);
 | 
			
		||||
#endif
 | 
			
		||||
      } else {
 | 
			
		||||
        std::cout << GridLogMessage << "C++ read I/O " << file << " : "
 | 
			
		||||
                  << iodata.size() * sizeof(fobj) << " bytes" << std::endl;
 | 
			
		||||
	std::cout << GridLogMessage <<"IOobject: C++ read I/O " << file << " : "
 | 
			
		||||
                  << iodata.size() * sizeof(fobj) << " bytes and offset " << offset << std::endl;
 | 
			
		||||
        std::ifstream fin;
 | 
			
		||||
	fin.open(file, std::ios::binary | std::ios::in);
 | 
			
		||||
        if (control & BINARYIO_MASTER_APPEND)
 | 
			
		||||
@@ -413,9 +416,9 @@ class BinaryIO {
 | 
			
		||||
      timer.Start();
 | 
			
		||||
      if ( (control & BINARYIO_LEXICOGRAPHIC) && (nrank > 1) ) {
 | 
			
		||||
#ifdef USE_MPI_IO
 | 
			
		||||
        std::cout << GridLogMessage << "MPI write I/O " << file << std::endl;
 | 
			
		||||
        std::cout << GridLogMessage <<"IOobject: MPI write I/O " << file << std::endl;
 | 
			
		||||
        ierr = MPI_File_open(grid->communicator, (char *)file.c_str(), MPI_MODE_RDWR | MPI_MODE_CREATE, MPI_INFO_NULL, &fh);
 | 
			
		||||
        std::cout << GridLogMessage << "Checking for errors" << std::endl;
 | 
			
		||||
	//        std::cout << GridLogMessage << "Checking for errors" << std::endl;
 | 
			
		||||
        if (ierr != MPI_SUCCESS)
 | 
			
		||||
        {
 | 
			
		||||
          char error_string[BUFSIZ];
 | 
			
		||||
@@ -429,14 +432,20 @@ class BinaryIO {
 | 
			
		||||
          MPI_Abort(MPI_COMM_WORLD, 1); //assert(ierr == 0);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        std::cout << GridLogDebug << "MPI read I/O set view " << file << std::endl;
 | 
			
		||||
        std::cout << GridLogDebug << "MPI write I/O set view " << file << std::endl;
 | 
			
		||||
        ierr = MPI_File_set_view(fh, disp, mpiObject, fileArray, "native", MPI_INFO_NULL);
 | 
			
		||||
        assert(ierr == 0);
 | 
			
		||||
 | 
			
		||||
        std::cout << GridLogDebug << "MPI read I/O write all " << file << std::endl;
 | 
			
		||||
        std::cout << GridLogDebug << "MPI write I/O write all " << file << std::endl;
 | 
			
		||||
        ierr = MPI_File_write_all(fh, &iodata[0], 1, localArray, &status);
 | 
			
		||||
        assert(ierr == 0);
 | 
			
		||||
 | 
			
		||||
        MPI_Offset os;
 | 
			
		||||
        MPI_File_get_position(fh, &os);
 | 
			
		||||
        MPI_File_get_byte_offset(fh, os, &disp);
 | 
			
		||||
        offset = disp;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
        MPI_File_close(&fh);
 | 
			
		||||
        MPI_Type_free(&fileArray);
 | 
			
		||||
        MPI_Type_free(&localArray);
 | 
			
		||||
@@ -445,27 +454,40 @@ class BinaryIO {
 | 
			
		||||
#endif
 | 
			
		||||
      } else { 
 | 
			
		||||
 | 
			
		||||
        std::cout << GridLogMessage << "IOobject: C++ write I/O " << file << " : "
 | 
			
		||||
                  << iodata.size() * sizeof(fobj) << " bytes and offset " << offset << std::endl;
 | 
			
		||||
        
 | 
			
		||||
	std::ofstream fout; 
 | 
			
		||||
	fout.exceptions ( std::fstream::failbit | std::fstream::badbit );
 | 
			
		||||
	try {
 | 
			
		||||
	  if (offset) { // Must already exist and contain data
 | 
			
		||||
	    fout.open(file,std::ios::binary|std::ios::out|std::ios::in);
 | 
			
		||||
	  } else {     // Allow create
 | 
			
		||||
	    fout.open(file,std::ios::binary|std::ios::out);
 | 
			
		||||
	  }
 | 
			
		||||
	} catch (const std::fstream::failure& exc) {
 | 
			
		||||
	  std::cout << GridLogError << "Error in opening the file " << file << " for output" <<std::endl;
 | 
			
		||||
	  std::cout << GridLogError << "Exception description: " << exc.what() << std::endl;
 | 
			
		||||
    std::cout << GridLogError << "Probable cause: wrong path, inaccessible location "<< std::endl;
 | 
			
		||||
    #ifdef USE_MPI_IO
 | 
			
		||||
	  //	  std::cout << GridLogError << "Probable cause: wrong path, inaccessible location "<< std::endl;
 | 
			
		||||
#ifdef USE_MPI_IO
 | 
			
		||||
	  MPI_Abort(MPI_COMM_WORLD,1);
 | 
			
		||||
    #else
 | 
			
		||||
#else
 | 
			
		||||
	  exit(1);
 | 
			
		||||
    #endif
 | 
			
		||||
#endif
 | 
			
		||||
	}
 | 
			
		||||
	std::cout << GridLogMessage<< "C++ write I/O "<< file<<" : "
 | 
			
		||||
		        << iodata.size()*sizeof(fobj)<<" bytes"<<std::endl;
 | 
			
		||||
	
 | 
			
		||||
	if ( control & BINARYIO_MASTER_APPEND )  {
 | 
			
		||||
	  try {
 | 
			
		||||
	    fout.seekp(0,fout.end);
 | 
			
		||||
	  } catch (const std::fstream::failure& exc) {
 | 
			
		||||
	    std::cout << "Exception in seeking file end " << file << std::endl;
 | 
			
		||||
	  }
 | 
			
		||||
	} else {
 | 
			
		||||
	  try { 
 | 
			
		||||
	    fout.seekp(offset+myrank*lsites*sizeof(fobj));
 | 
			
		||||
	  } catch (const std::fstream::failure& exc) {
 | 
			
		||||
	    std::cout << "Exception in seeking file " << file <<" offset "<< offset << std::endl;
 | 
			
		||||
	  }
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	try {
 | 
			
		||||
@@ -474,13 +496,13 @@ class BinaryIO {
 | 
			
		||||
	catch (const std::fstream::failure& exc) {
 | 
			
		||||
	  std::cout << "Exception in writing file " << file << std::endl;
 | 
			
		||||
	  std::cout << GridLogError << "Exception description: "<< exc.what() << std::endl;
 | 
			
		||||
    #ifdef USE_MPI_IO
 | 
			
		||||
#ifdef USE_MPI_IO
 | 
			
		||||
	  MPI_Abort(MPI_COMM_WORLD,1);
 | 
			
		||||
    #else
 | 
			
		||||
#else
 | 
			
		||||
	  exit(1);
 | 
			
		||||
    #endif
 | 
			
		||||
#endif
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
  offset  = fout.tellp();
 | 
			
		||||
	fout.close();
 | 
			
		||||
      }
 | 
			
		||||
      timer.Stop();
 | 
			
		||||
@@ -515,7 +537,7 @@ class BinaryIO {
 | 
			
		||||
  static inline void readLatticeObject(Lattice<vobj> &Umu,
 | 
			
		||||
				       std::string file,
 | 
			
		||||
				       munger munge,
 | 
			
		||||
				       int offset,
 | 
			
		||||
				       uint64_t offset,
 | 
			
		||||
				       const std::string &format,
 | 
			
		||||
				       uint32_t &nersc_csum,
 | 
			
		||||
				       uint32_t &scidac_csuma,
 | 
			
		||||
@@ -525,7 +547,7 @@ class BinaryIO {
 | 
			
		||||
    typedef typename vobj::Realified::scalar_type word;    word w=0;
 | 
			
		||||
 | 
			
		||||
    GridBase *grid = Umu._grid;
 | 
			
		||||
    int lsites = grid->lSites();
 | 
			
		||||
    uint64_t lsites = grid->lSites();
 | 
			
		||||
 | 
			
		||||
    std::vector<sobj> scalardata(lsites); 
 | 
			
		||||
    std::vector<fobj>     iodata(lsites); // Munge, checksum, byte order in here
 | 
			
		||||
@@ -536,7 +558,7 @@ class BinaryIO {
 | 
			
		||||
    GridStopWatch timer; 
 | 
			
		||||
    timer.Start();
 | 
			
		||||
 | 
			
		||||
    parallel_for(int x=0;x<lsites;x++) munge(iodata[x], scalardata[x]);
 | 
			
		||||
    parallel_for(uint64_t x=0;x<lsites;x++) munge(iodata[x], scalardata[x]);
 | 
			
		||||
 | 
			
		||||
    vectorizeFromLexOrdArray(scalardata,Umu);    
 | 
			
		||||
    grid->Barrier();
 | 
			
		||||
@@ -552,7 +574,7 @@ class BinaryIO {
 | 
			
		||||
    static inline void writeLatticeObject(Lattice<vobj> &Umu,
 | 
			
		||||
					  std::string file,
 | 
			
		||||
					  munger munge,
 | 
			
		||||
					  int offset,
 | 
			
		||||
					  uint64_t offset,
 | 
			
		||||
					  const std::string &format,
 | 
			
		||||
					  uint32_t &nersc_csum,
 | 
			
		||||
					  uint32_t &scidac_csuma,
 | 
			
		||||
@@ -561,7 +583,9 @@ class BinaryIO {
 | 
			
		||||
    typedef typename vobj::scalar_object sobj;
 | 
			
		||||
    typedef typename vobj::Realified::scalar_type word;    word w=0;
 | 
			
		||||
    GridBase *grid = Umu._grid;
 | 
			
		||||
    int lsites = grid->lSites();
 | 
			
		||||
    uint64_t lsites = grid->lSites(), offsetCopy = offset;
 | 
			
		||||
    int attemptsLeft = std::max(0, BinaryIO::latticeWriteMaxRetry);
 | 
			
		||||
    bool checkWrite = (BinaryIO::latticeWriteMaxRetry >= 0);
 | 
			
		||||
 | 
			
		||||
    std::vector<sobj> scalardata(lsites); 
 | 
			
		||||
    std::vector<fobj>     iodata(lsites); // Munge, checksum, byte order in here
 | 
			
		||||
@@ -572,13 +596,39 @@ class BinaryIO {
 | 
			
		||||
    GridStopWatch timer; timer.Start();
 | 
			
		||||
    unvectorizeToLexOrdArray(scalardata,Umu);    
 | 
			
		||||
 | 
			
		||||
    parallel_for(int x=0;x<lsites;x++) munge(scalardata[x],iodata[x]);
 | 
			
		||||
    parallel_for(uint64_t x=0;x<lsites;x++) munge(scalardata[x],iodata[x]);
 | 
			
		||||
 | 
			
		||||
    grid->Barrier();
 | 
			
		||||
    timer.Stop();
 | 
			
		||||
 | 
			
		||||
    while (attemptsLeft >= 0)
 | 
			
		||||
    {
 | 
			
		||||
      grid->Barrier();
 | 
			
		||||
      IOobject(w,grid,iodata,file,offset,format,BINARYIO_WRITE|BINARYIO_LEXICOGRAPHIC,
 | 
			
		||||
	             nersc_csum,scidac_csuma,scidac_csumb);
 | 
			
		||||
      if (checkWrite)
 | 
			
		||||
      {
 | 
			
		||||
        std::vector<fobj> ckiodata(lsites);
 | 
			
		||||
        uint32_t          cknersc_csum, ckscidac_csuma, ckscidac_csumb;
 | 
			
		||||
        uint64_t          ckoffset = offsetCopy;
 | 
			
		||||
 | 
			
		||||
        std::cout << GridLogMessage << "writeLatticeObject: read back object" << std::endl;
 | 
			
		||||
        grid->Barrier();
 | 
			
		||||
        IOobject(w,grid,ckiodata,file,ckoffset,format,BINARYIO_READ|BINARYIO_LEXICOGRAPHIC,
 | 
			
		||||
	               cknersc_csum,ckscidac_csuma,ckscidac_csumb);
 | 
			
		||||
        if ((cknersc_csum != nersc_csum) or (ckscidac_csuma != scidac_csuma) or (ckscidac_csumb != scidac_csumb))
 | 
			
		||||
        {
 | 
			
		||||
          std::cout << GridLogMessage << "writeLatticeObject: read test checksum failure, re-writing (" << attemptsLeft << " attempt(s) remaining)" << std::endl;
 | 
			
		||||
          offset = offsetCopy;
 | 
			
		||||
        }
 | 
			
		||||
        else
 | 
			
		||||
        {
 | 
			
		||||
          std::cout << GridLogMessage << "writeLatticeObject: read test checksum correct" << std::endl;
 | 
			
		||||
          break;
 | 
			
		||||
        }
 | 
			
		||||
      }
 | 
			
		||||
      attemptsLeft--;
 | 
			
		||||
    }
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    std::cout<<GridLogMessage<<"writeLatticeObject: unvectorize overhead "<<timer.Elapsed()  <<std::endl;
 | 
			
		||||
  }
 | 
			
		||||
@@ -589,7 +639,7 @@ class BinaryIO {
 | 
			
		||||
  static inline void readRNG(GridSerialRNG &serial,
 | 
			
		||||
			     GridParallelRNG ¶llel,
 | 
			
		||||
			     std::string file,
 | 
			
		||||
			     int offset,
 | 
			
		||||
			     uint64_t offset,
 | 
			
		||||
			     uint32_t &nersc_csum,
 | 
			
		||||
			     uint32_t &scidac_csuma,
 | 
			
		||||
			     uint32_t &scidac_csumb)
 | 
			
		||||
@@ -602,8 +652,8 @@ class BinaryIO {
 | 
			
		||||
    std::string format = "IEEE32BIG";
 | 
			
		||||
 | 
			
		||||
    GridBase *grid = parallel._grid;
 | 
			
		||||
    int gsites = grid->gSites();
 | 
			
		||||
    int lsites = grid->lSites();
 | 
			
		||||
    uint64_t gsites = grid->gSites();
 | 
			
		||||
    uint64_t lsites = grid->lSites();
 | 
			
		||||
 | 
			
		||||
    uint32_t nersc_csum_tmp   = 0;
 | 
			
		||||
    uint32_t scidac_csuma_tmp = 0;
 | 
			
		||||
@@ -618,7 +668,7 @@ class BinaryIO {
 | 
			
		||||
	     nersc_csum,scidac_csuma,scidac_csumb);
 | 
			
		||||
 | 
			
		||||
    timer.Start();
 | 
			
		||||
    parallel_for(int lidx=0;lidx<lsites;lidx++){
 | 
			
		||||
    parallel_for(uint64_t lidx=0;lidx<lsites;lidx++){
 | 
			
		||||
      std::vector<RngStateType> tmp(RngStateCount);
 | 
			
		||||
      std::copy(iodata[lidx].begin(),iodata[lidx].end(),tmp.begin());
 | 
			
		||||
      parallel.SetState(tmp,lidx);
 | 
			
		||||
@@ -651,7 +701,7 @@ class BinaryIO {
 | 
			
		||||
  static inline void writeRNG(GridSerialRNG &serial,
 | 
			
		||||
			      GridParallelRNG ¶llel,
 | 
			
		||||
			      std::string file,
 | 
			
		||||
			      int offset,
 | 
			
		||||
			      uint64_t offset,
 | 
			
		||||
			      uint32_t &nersc_csum,
 | 
			
		||||
			      uint32_t &scidac_csuma,
 | 
			
		||||
			      uint32_t &scidac_csumb)
 | 
			
		||||
@@ -662,8 +712,8 @@ class BinaryIO {
 | 
			
		||||
    typedef std::array<RngStateType,RngStateCount> RNGstate;
 | 
			
		||||
 | 
			
		||||
    GridBase *grid = parallel._grid;
 | 
			
		||||
    int gsites = grid->gSites();
 | 
			
		||||
    int lsites = grid->lSites();
 | 
			
		||||
    uint64_t gsites = grid->gSites();
 | 
			
		||||
    uint64_t lsites = grid->lSites();
 | 
			
		||||
 | 
			
		||||
    uint32_t nersc_csum_tmp;
 | 
			
		||||
    uint32_t scidac_csuma_tmp;
 | 
			
		||||
@@ -676,7 +726,7 @@ class BinaryIO {
 | 
			
		||||
 | 
			
		||||
    timer.Start();
 | 
			
		||||
    std::vector<RNGstate> iodata(lsites);
 | 
			
		||||
    parallel_for(int lidx=0;lidx<lsites;lidx++){
 | 
			
		||||
    parallel_for(uint64_t lidx=0;lidx<lsites;lidx++){
 | 
			
		||||
      std::vector<RngStateType> tmp(RngStateCount);
 | 
			
		||||
      parallel.GetState(tmp,lidx);
 | 
			
		||||
      std::copy(tmp.begin(),tmp.end(),iodata[lidx].begin());
 | 
			
		||||
@@ -685,7 +735,6 @@ class BinaryIO {
 | 
			
		||||
 | 
			
		||||
    IOobject(w,grid,iodata,file,offset,format,BINARYIO_WRITE|BINARYIO_LEXICOGRAPHIC,
 | 
			
		||||
	     nersc_csum,scidac_csuma,scidac_csumb);
 | 
			
		||||
 | 
			
		||||
    iodata.resize(1);
 | 
			
		||||
    {
 | 
			
		||||
      std::vector<RngStateType> tmp(RngStateCount);
 | 
			
		||||
@@ -705,5 +754,6 @@ class BinaryIO {
 | 
			
		||||
    std::cout << GridLogMessage << "RNG state overhead " << timer.Elapsed() << std::endl;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
@@ -84,10 +84,6 @@ namespace QCD {
 | 
			
		||||
   stream << "GRID_";
 | 
			
		||||
   stream << ScidacWordMnemonic<stype>();
 | 
			
		||||
 | 
			
		||||
   //   std::cout << " Lorentz N/S/V/M : " << _LorentzN<<" "<<_LorentzScalar<<"/"<<_LorentzVector<<"/"<<_LorentzMatrix<<std::endl;
 | 
			
		||||
   //   std::cout << " Spin    N/S/V/M : " << _SpinN   <<" "<<_SpinScalar   <<"/"<<_SpinVector   <<"/"<<_SpinMatrix<<std::endl;
 | 
			
		||||
   //   std::cout << " Colour  N/S/V/M : " << _ColourN <<" "<<_ColourScalar <<"/"<<_ColourVector <<"/"<<_ColourMatrix<<std::endl;
 | 
			
		||||
 | 
			
		||||
   if ( _LorentzVector )   stream << "_LorentzVector"<<_LorentzN;
 | 
			
		||||
   if ( _LorentzMatrix )   stream << "_LorentzMatrix"<<_LorentzN;
 | 
			
		||||
 | 
			
		||||
@@ -151,7 +147,7 @@ namespace QCD {
 | 
			
		||||
 | 
			
		||||
   _scidacRecord = sr;
 | 
			
		||||
 | 
			
		||||
   std::cout << GridLogMessage << "Build SciDAC datatype " <<sr.datatype<<std::endl;
 | 
			
		||||
   //   std::cout << GridLogMessage << "Build SciDAC datatype " <<sr.datatype<<std::endl;
 | 
			
		||||
 }
 | 
			
		||||
 
 | 
			
		||||
 ///////////////////////////////////////////////////////
 | 
			
		||||
@@ -182,10 +178,15 @@ class GridLimeReader : public BinaryIO {
 | 
			
		||||
   /////////////////////////////////////////////
 | 
			
		||||
   // Open the file
 | 
			
		||||
   /////////////////////////////////////////////
 | 
			
		||||
   void open(std::string &_filename) 
 | 
			
		||||
   void open(const std::string &_filename) 
 | 
			
		||||
   {
 | 
			
		||||
     filename= _filename;
 | 
			
		||||
     File = fopen(filename.c_str(), "r");
 | 
			
		||||
     if (File == nullptr)
 | 
			
		||||
     {
 | 
			
		||||
       std::cerr << "cannot open file '" << filename << "'" << std::endl;
 | 
			
		||||
       abort();
 | 
			
		||||
     }
 | 
			
		||||
     LimeR = limeCreateReader(File);
 | 
			
		||||
   }
 | 
			
		||||
   /////////////////////////////////////////////
 | 
			
		||||
@@ -210,24 +211,39 @@ class GridLimeReader : public BinaryIO {
 | 
			
		||||
 | 
			
		||||
    while ( limeReaderNextRecord(LimeR) == LIME_SUCCESS ) { 
 | 
			
		||||
 | 
			
		||||
      std::cout << GridLogMessage << limeReaderType(LimeR) <<std::endl;
 | 
			
		||||
      uint64_t file_bytes =limeReaderBytes(LimeR);
 | 
			
		||||
 | 
			
		||||
      if ( strncmp(limeReaderType(LimeR), record_name.c_str(),strlen(record_name.c_str()) )  ) {
 | 
			
		||||
      //      std::cout << GridLogMessage << limeReaderType(LimeR) << " "<< file_bytes <<" bytes "<<std::endl;
 | 
			
		||||
      //      std::cout << GridLogMessage<< " readLimeObject seeking "<<  record_name <<" found record :" <<limeReaderType(LimeR) <<std::endl;
 | 
			
		||||
 | 
			
		||||
      if ( !strncmp(limeReaderType(LimeR), record_name.c_str(),strlen(record_name.c_str()) )  ) {
 | 
			
		||||
 | 
			
		||||
	off_t offset= ftell(File);
 | 
			
		||||
	//	std::cout << GridLogMessage<< " readLimeLatticeBinaryObject matches ! " <<std::endl;
 | 
			
		||||
 | 
			
		||||
	uint64_t PayloadSize = sizeof(sobj) * field._grid->_gsites;
 | 
			
		||||
 | 
			
		||||
	//	std::cout << "R sizeof(sobj)= " <<sizeof(sobj)<<std::endl;
 | 
			
		||||
	//	std::cout << "R Gsites " <<field._grid->_gsites<<std::endl;
 | 
			
		||||
	//	std::cout << "R Payload expected " <<PayloadSize<<std::endl;
 | 
			
		||||
	//	std::cout << "R file size " <<file_bytes <<std::endl;
 | 
			
		||||
 | 
			
		||||
	assert(PayloadSize == file_bytes);// Must match or user error
 | 
			
		||||
 | 
			
		||||
	uint64_t offset= ftello(File);
 | 
			
		||||
	//	std::cout << " ReadLatticeObject from offset "<<offset << std::endl;
 | 
			
		||||
	BinarySimpleMunger<sobj,sobj> munge;
 | 
			
		||||
	BinaryIO::readLatticeObject< sobj, sobj >(field, filename, munge, offset, format,nersc_csum,scidac_csuma,scidac_csumb);
 | 
			
		||||
 | 
			
		||||
	BinaryIO::readLatticeObject< vobj, sobj >(field, filename, munge, offset, format,nersc_csum,scidac_csuma,scidac_csumb);
 | 
			
		||||
  std::cout << GridLogMessage << "SciDAC checksum A " << std::hex << scidac_csuma << std::dec << std::endl;
 | 
			
		||||
  std::cout << GridLogMessage << "SciDAC checksum B " << std::hex << scidac_csumb << std::dec << std::endl;
 | 
			
		||||
	/////////////////////////////////////////////
 | 
			
		||||
	// Insist checksum is next record
 | 
			
		||||
	/////////////////////////////////////////////
 | 
			
		||||
	readLimeObject(scidacChecksum_,std::string("scidacChecksum"),record_name);
 | 
			
		||||
	readLimeObject(scidacChecksum_,std::string("scidacChecksum"),std::string(SCIDAC_CHECKSUM));
 | 
			
		||||
 | 
			
		||||
	/////////////////////////////////////////////
 | 
			
		||||
	// Verify checksums
 | 
			
		||||
	/////////////////////////////////////////////
 | 
			
		||||
	scidacChecksumVerify(scidacChecksum_,scidac_csuma,scidac_csumb);
 | 
			
		||||
	assert(scidacChecksumVerify(scidacChecksum_,scidac_csuma,scidac_csumb)==1);
 | 
			
		||||
	return;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
@@ -235,48 +251,70 @@ class GridLimeReader : public BinaryIO {
 | 
			
		||||
  ////////////////////////////////////////////
 | 
			
		||||
  // Read a generic serialisable object
 | 
			
		||||
  ////////////////////////////////////////////
 | 
			
		||||
  template<class serialisable_object>
 | 
			
		||||
  void readLimeObject(serialisable_object &object,std::string object_name,std::string record_name)
 | 
			
		||||
  void readLimeObject(std::string &xmlstring,std::string record_name)
 | 
			
		||||
  {
 | 
			
		||||
    std::string xmlstring;
 | 
			
		||||
    // should this be a do while; can we miss a first record??
 | 
			
		||||
    while ( limeReaderNextRecord(LimeR) == LIME_SUCCESS ) { 
 | 
			
		||||
 | 
			
		||||
      //      std::cout << GridLogMessage<< " readLimeObject seeking "<< record_name <<" found record :" <<limeReaderType(LimeR) <<std::endl;
 | 
			
		||||
      uint64_t nbytes = limeReaderBytes(LimeR);//size of this record (configuration)
 | 
			
		||||
 | 
			
		||||
      if ( strncmp(limeReaderType(LimeR), record_name.c_str(),strlen(record_name.c_str()) )  ) {
 | 
			
		||||
      if ( !strncmp(limeReaderType(LimeR), record_name.c_str(),strlen(record_name.c_str()) )  ) {
 | 
			
		||||
 | 
			
		||||
	//	std::cout << GridLogMessage<< " readLimeObject matches ! " << record_name <<std::endl;
 | 
			
		||||
	std::vector<char> xmlc(nbytes+1,'\0');
 | 
			
		||||
	limeReaderReadData((void *)&xmlc[0], &nbytes, LimeR);    
 | 
			
		||||
	XmlReader RD(&xmlc[0],"");
 | 
			
		||||
	read(RD,object_name,object);
 | 
			
		||||
	//	std::cout << GridLogMessage<< " readLimeObject matches XML " << &xmlc[0] <<std::endl;
 | 
			
		||||
 | 
			
		||||
   xmlstring = std::string(&xmlc[0]);
 | 
			
		||||
	return;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
    }  
 | 
			
		||||
    assert(0);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template<class serialisable_object>
 | 
			
		||||
  void readLimeObject(serialisable_object &object,std::string object_name,std::string record_name)
 | 
			
		||||
  {
 | 
			
		||||
    std::string xmlstring;
 | 
			
		||||
 | 
			
		||||
    readLimeObject(xmlstring, record_name);
 | 
			
		||||
	  XmlReader RD(xmlstring, true, "");
 | 
			
		||||
	  read(RD,object_name,object);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
class GridLimeWriter : public BinaryIO {
 | 
			
		||||
class GridLimeWriter : public BinaryIO 
 | 
			
		||||
{
 | 
			
		||||
 public:
 | 
			
		||||
 | 
			
		||||
   ///////////////////////////////////////////////////
 | 
			
		||||
   // FIXME: format for RNG? Now just binary out instead
 | 
			
		||||
   // FIXME: collective calls or not ?
 | 
			
		||||
   //      : must know if I am the I/O boss
 | 
			
		||||
   ///////////////////////////////////////////////////
 | 
			
		||||
 | 
			
		||||
   FILE       *File;
 | 
			
		||||
   LimeWriter *LimeW;
 | 
			
		||||
   std::string filename;
 | 
			
		||||
 | 
			
		||||
   void open(std::string &_filename) { 
 | 
			
		||||
   bool        boss_node;
 | 
			
		||||
   GridLimeWriter( bool isboss = true) {
 | 
			
		||||
     boss_node = isboss;
 | 
			
		||||
   }
 | 
			
		||||
   void open(const std::string &_filename) { 
 | 
			
		||||
     filename= _filename;
 | 
			
		||||
     if ( boss_node ) {
 | 
			
		||||
       File = fopen(filename.c_str(), "w");
 | 
			
		||||
       LimeW = limeCreateWriter(File); assert(LimeW != NULL );
 | 
			
		||||
     }
 | 
			
		||||
   }
 | 
			
		||||
   /////////////////////////////////////////////
 | 
			
		||||
   // Close the file
 | 
			
		||||
   /////////////////////////////////////////////
 | 
			
		||||
   void close(void) {
 | 
			
		||||
     if ( boss_node ) {
 | 
			
		||||
       fclose(File);
 | 
			
		||||
     }
 | 
			
		||||
     //  limeDestroyWriter(LimeW);
 | 
			
		||||
   }
 | 
			
		||||
  ///////////////////////////////////////////////////////
 | 
			
		||||
@@ -284,66 +322,122 @@ class GridLimeWriter : public BinaryIO {
 | 
			
		||||
  ///////////////////////////////////////////////////////
 | 
			
		||||
  int createLimeRecordHeader(std::string message, int MB, int ME, size_t PayloadSize)
 | 
			
		||||
  {
 | 
			
		||||
    if ( boss_node ) {
 | 
			
		||||
      LimeRecordHeader *h;
 | 
			
		||||
      h = limeCreateHeader(MB, ME, const_cast<char *>(message.c_str()), PayloadSize);
 | 
			
		||||
      assert(limeWriteRecordHeader(h, LimeW) >= 0);
 | 
			
		||||
      limeDestroyHeader(h);
 | 
			
		||||
    }
 | 
			
		||||
    return LIME_SUCCESS;
 | 
			
		||||
  }
 | 
			
		||||
  ////////////////////////////////////////////
 | 
			
		||||
  // Write a generic serialisable object
 | 
			
		||||
  ////////////////////////////////////////////
 | 
			
		||||
  template<class serialisable_object>
 | 
			
		||||
  void writeLimeObject(int MB,int ME,serialisable_object &object,std::string object_name,std::string record_name)
 | 
			
		||||
  void writeLimeObject(int MB,int ME,XmlWriter &writer,std::string object_name,std::string record_name)
 | 
			
		||||
  {
 | 
			
		||||
    std::string xmlstring;
 | 
			
		||||
    {
 | 
			
		||||
      XmlWriter WR("","");
 | 
			
		||||
      write(WR,object_name,object);
 | 
			
		||||
      xmlstring = WR.XmlString();
 | 
			
		||||
    }
 | 
			
		||||
    if ( boss_node ) {
 | 
			
		||||
      std::string xmlstring = writer.docString();
 | 
			
		||||
 | 
			
		||||
      //    std::cout << "WriteLimeObject" << record_name <<std::endl;
 | 
			
		||||
      uint64_t nbytes = xmlstring.size();
 | 
			
		||||
      //    std::cout << " xmlstring "<< nbytes<< " " << xmlstring <<std::endl;
 | 
			
		||||
      int err;
 | 
			
		||||
    LimeRecordHeader *h = limeCreateHeader(MB, ME,(char *)record_name.c_str(), nbytes); assert(h!= NULL);
 | 
			
		||||
      LimeRecordHeader *h = limeCreateHeader(MB, ME,const_cast<char *>(record_name.c_str()), nbytes); 
 | 
			
		||||
      assert(h!= NULL);
 | 
			
		||||
      
 | 
			
		||||
      err=limeWriteRecordHeader(h, LimeW);                    assert(err>=0);
 | 
			
		||||
      err=limeWriteRecordData(&xmlstring[0], &nbytes, LimeW); assert(err>=0);
 | 
			
		||||
      err=limeWriterCloseRecord(LimeW);                       assert(err>=0);
 | 
			
		||||
      limeDestroyHeader(h);
 | 
			
		||||
    }
 | 
			
		||||
  ////////////////////////////////////////////
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template<class serialisable_object>
 | 
			
		||||
  void writeLimeObject(int MB,int ME,serialisable_object &object,std::string object_name,std::string record_name, const unsigned int scientificPrec = 0)
 | 
			
		||||
  {
 | 
			
		||||
    XmlWriter WR("","");
 | 
			
		||||
 | 
			
		||||
    if (scientificPrec)
 | 
			
		||||
    {
 | 
			
		||||
      WR.scientificFormat(true);
 | 
			
		||||
      WR.setPrecision(scientificPrec);
 | 
			
		||||
    }
 | 
			
		||||
    write(WR,object_name,object);
 | 
			
		||||
    writeLimeObject(MB, ME, WR, object_name, record_name);
 | 
			
		||||
  }
 | 
			
		||||
  ////////////////////////////////////////////////////
 | 
			
		||||
  // Write a generic lattice field and csum
 | 
			
		||||
  ////////////////////////////////////////////
 | 
			
		||||
  // This routine is Collectively called by all nodes
 | 
			
		||||
  // in communicator used by the field._grid
 | 
			
		||||
  ////////////////////////////////////////////////////
 | 
			
		||||
  template<class vobj>
 | 
			
		||||
  void writeLimeLatticeBinaryObject(Lattice<vobj> &field,std::string record_name)
 | 
			
		||||
  {
 | 
			
		||||
    ////////////////////////////////////////////
 | 
			
		||||
    // Create record header
 | 
			
		||||
    ////////////////////////////////////////////
 | 
			
		||||
    typedef typename vobj::scalar_object sobj;
 | 
			
		||||
    int err;
 | 
			
		||||
    uint32_t nersc_csum,scidac_csuma,scidac_csumb;
 | 
			
		||||
    uint64_t PayloadSize = sizeof(sobj) * field._grid->_gsites;
 | 
			
		||||
    createLimeRecordHeader(record_name, 0, 0, PayloadSize);
 | 
			
		||||
 | 
			
		||||
    ////////////////////////////////////////////////////////////////////
 | 
			
		||||
    // NB: FILE and iostream are jointly writing disjoint sequences in the
 | 
			
		||||
    // the same file through different file handles (integer units).
 | 
			
		||||
    // 
 | 
			
		||||
    // These are both buffered, so why I think this code is right is as follows.
 | 
			
		||||
    //
 | 
			
		||||
    // i)  write record header to FILE *File, telegraphing the size. 
 | 
			
		||||
    // ii) ftell reads the offset from FILE *File .
 | 
			
		||||
    // i)  write record header to FILE *File, telegraphing the size; flush
 | 
			
		||||
    // ii) ftello reads the offset from FILE *File . 
 | 
			
		||||
    // iii) iostream / MPI Open independently seek this offset. Write sequence direct to disk.
 | 
			
		||||
    //      Closes iostream and flushes.
 | 
			
		||||
    // iv) fseek on FILE * to end of this disjoint section.
 | 
			
		||||
    //  v) Continue writing scidac record.
 | 
			
		||||
    ////////////////////////////////////////////////////////////////////
 | 
			
		||||
    off_t offset = ftell(File);
 | 
			
		||||
    
 | 
			
		||||
    GridBase *grid = field._grid;
 | 
			
		||||
    assert(boss_node == field._grid->IsBoss() );
 | 
			
		||||
 | 
			
		||||
    ////////////////////////////////////////////
 | 
			
		||||
    // Create record header
 | 
			
		||||
    ////////////////////////////////////////////
 | 
			
		||||
    typedef typename vobj::scalar_object sobj;
 | 
			
		||||
    int err;
 | 
			
		||||
    uint32_t nersc_csum,scidac_csuma,scidac_csumb;
 | 
			
		||||
    uint64_t PayloadSize = sizeof(sobj) * grid->_gsites;
 | 
			
		||||
    if ( boss_node ) {
 | 
			
		||||
      createLimeRecordHeader(record_name, 0, 0, PayloadSize);
 | 
			
		||||
      fflush(File);
 | 
			
		||||
    }
 | 
			
		||||
    
 | 
			
		||||
    //    std::cout << "W sizeof(sobj)"      <<sizeof(sobj)<<std::endl;
 | 
			
		||||
    //    std::cout << "W Gsites "           <<field._grid->_gsites<<std::endl;
 | 
			
		||||
    //    std::cout << "W Payload expected " <<PayloadSize<<std::endl;
 | 
			
		||||
 | 
			
		||||
    ////////////////////////////////////////////////
 | 
			
		||||
    // Check all nodes agree on file position
 | 
			
		||||
    ////////////////////////////////////////////////
 | 
			
		||||
    uint64_t offset1;
 | 
			
		||||
    if ( boss_node ) {
 | 
			
		||||
      offset1 = ftello(File);    
 | 
			
		||||
    }
 | 
			
		||||
    grid->Broadcast(0,(void *)&offset1,sizeof(offset1));
 | 
			
		||||
 | 
			
		||||
    ///////////////////////////////////////////
 | 
			
		||||
    // The above is collective. Write by other means into the binary record
 | 
			
		||||
    ///////////////////////////////////////////
 | 
			
		||||
    std::string format = getFormatString<vobj>();
 | 
			
		||||
    BinarySimpleMunger<sobj,sobj> munge;
 | 
			
		||||
    BinaryIO::writeLatticeObject<vobj,sobj>(field, filename, munge, offset, format,nersc_csum,scidac_csuma,scidac_csumb);
 | 
			
		||||
    BinaryIO::writeLatticeObject<vobj,sobj>(field, filename, munge, offset1, format,nersc_csum,scidac_csuma,scidac_csumb);
 | 
			
		||||
 | 
			
		||||
    ///////////////////////////////////////////
 | 
			
		||||
    // Wind forward and close the record
 | 
			
		||||
    ///////////////////////////////////////////
 | 
			
		||||
    if ( boss_node ) {
 | 
			
		||||
      fseek(File,0,SEEK_END);             
 | 
			
		||||
      uint64_t offset2 = ftello(File);     //    std::cout << " now at offset "<<offset2 << std::endl;
 | 
			
		||||
      assert( (offset2-offset1) == PayloadSize);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /////////////////////////////////////////////////////////////
 | 
			
		||||
    // Check MPI-2 I/O did what we expect to file
 | 
			
		||||
    /////////////////////////////////////////////////////////////
 | 
			
		||||
 | 
			
		||||
    if ( boss_node ) { 
 | 
			
		||||
      err=limeWriterCloseRecord(LimeW);  assert(err>=0);
 | 
			
		||||
    }
 | 
			
		||||
    ////////////////////////////////////////
 | 
			
		||||
    // Write checksum element, propagaing forward from the BinaryIO
 | 
			
		||||
    // Always pair a checksum with a binary object, and close message
 | 
			
		||||
@@ -353,29 +447,33 @@ class GridLimeWriter : public BinaryIO {
 | 
			
		||||
    std::stringstream streamb; streamb << std::hex << scidac_csumb;
 | 
			
		||||
    checksum.suma= streama.str();
 | 
			
		||||
    checksum.sumb= streamb.str();
 | 
			
		||||
    std::cout << GridLogMessage<<" writing scidac checksums "<<std::hex<<scidac_csuma<<"/"<<scidac_csumb<<std::dec<<std::endl;
 | 
			
		||||
    writeLimeObject(0,1,checksum,std::string("scidacChecksum"    ),std::string(SCIDAC_CHECKSUM));
 | 
			
		||||
    if ( boss_node ) { 
 | 
			
		||||
      writeLimeObject(0,1,checksum,std::string("scidacChecksum"),std::string(SCIDAC_CHECKSUM));
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
class ScidacWriter : public GridLimeWriter {
 | 
			
		||||
 public:
 | 
			
		||||
 | 
			
		||||
  ScidacWriter(bool isboss =true ) : GridLimeWriter(isboss)  { };
 | 
			
		||||
 | 
			
		||||
  template<class SerialisableUserFile>
 | 
			
		||||
  void writeScidacFileRecord(GridBase *grid,SerialisableUserFile &_userFile)
 | 
			
		||||
  {
 | 
			
		||||
    scidacFile    _scidacFile(grid);
 | 
			
		||||
    if ( this->boss_node ) {
 | 
			
		||||
      writeLimeObject(1,0,_scidacFile,_scidacFile.SerialisableClassName(),std::string(SCIDAC_PRIVATE_FILE_XML));
 | 
			
		||||
      writeLimeObject(0,1,_userFile,_userFile.SerialisableClassName(),std::string(SCIDAC_FILE_XML));
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  ////////////////////////////////////////////////
 | 
			
		||||
  // Write generic lattice field in scidac format
 | 
			
		||||
  ////////////////////////////////////////////////
 | 
			
		||||
  template <class vobj, class userRecord>
 | 
			
		||||
  void writeScidacFieldRecord(Lattice<vobj> &field,userRecord _userRecord) 
 | 
			
		||||
  void writeScidacFieldRecord(Lattice<vobj> &field,userRecord _userRecord,
 | 
			
		||||
                              const unsigned int recordScientificPrec = 0) 
 | 
			
		||||
  {
 | 
			
		||||
    typedef typename vobj::scalar_object sobj;
 | 
			
		||||
    uint64_t nbytes;
 | 
			
		||||
    GridBase * grid = field._grid;
 | 
			
		||||
 | 
			
		||||
    ////////////////////////////////////////
 | 
			
		||||
@@ -390,16 +488,81 @@ class ScidacWriter : public GridLimeWriter {
 | 
			
		||||
    //////////////////////////////////////////////
 | 
			
		||||
    // Fill the Lime file record by record
 | 
			
		||||
    //////////////////////////////////////////////
 | 
			
		||||
    if ( this->boss_node ) {
 | 
			
		||||
      writeLimeObject(1,0,header ,std::string("FieldMetaData"),std::string(GRID_FORMAT)); // Open message 
 | 
			
		||||
    writeLimeObject(0,0,_userRecord,_userRecord.SerialisableClassName(),std::string(SCIDAC_RECORD_XML));
 | 
			
		||||
      writeLimeObject(0,0,_userRecord,_userRecord.SerialisableClassName(),std::string(SCIDAC_RECORD_XML), recordScientificPrec);
 | 
			
		||||
      writeLimeObject(0,0,_scidacRecord,_scidacRecord.SerialisableClassName(),std::string(SCIDAC_PRIVATE_RECORD_XML));
 | 
			
		||||
    }
 | 
			
		||||
    // Collective call
 | 
			
		||||
    writeLimeLatticeBinaryObject(field,std::string(ILDG_BINARY_DATA));      // Closes message with checksum
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ScidacReader : public GridLimeReader {
 | 
			
		||||
 public:
 | 
			
		||||
 | 
			
		||||
   template<class SerialisableUserFile>
 | 
			
		||||
   void readScidacFileRecord(GridBase *grid,SerialisableUserFile &_userFile)
 | 
			
		||||
   {
 | 
			
		||||
     scidacFile    _scidacFile(grid);
 | 
			
		||||
     readLimeObject(_scidacFile,_scidacFile.SerialisableClassName(),std::string(SCIDAC_PRIVATE_FILE_XML));
 | 
			
		||||
     readLimeObject(_userFile,_userFile.SerialisableClassName(),std::string(SCIDAC_FILE_XML));
 | 
			
		||||
   }
 | 
			
		||||
  ////////////////////////////////////////////////
 | 
			
		||||
  // Write generic lattice field in scidac format
 | 
			
		||||
  ////////////////////////////////////////////////
 | 
			
		||||
  template <class vobj, class userRecord>
 | 
			
		||||
  void readScidacFieldRecord(Lattice<vobj> &field,userRecord &_userRecord) 
 | 
			
		||||
  {
 | 
			
		||||
    typedef typename vobj::scalar_object sobj;
 | 
			
		||||
    GridBase * grid = field._grid;
 | 
			
		||||
 | 
			
		||||
    ////////////////////////////////////////
 | 
			
		||||
    // fill the Grid header
 | 
			
		||||
    ////////////////////////////////////////
 | 
			
		||||
    FieldMetaData header;
 | 
			
		||||
    scidacRecord  _scidacRecord;
 | 
			
		||||
    scidacFile    _scidacFile;
 | 
			
		||||
 | 
			
		||||
    //////////////////////////////////////////////
 | 
			
		||||
    // Fill the Lime file record by record
 | 
			
		||||
    //////////////////////////////////////////////
 | 
			
		||||
    readLimeObject(header ,std::string("FieldMetaData"),std::string(GRID_FORMAT)); // Open message 
 | 
			
		||||
    readLimeObject(_userRecord,_userRecord.SerialisableClassName(),std::string(SCIDAC_RECORD_XML));
 | 
			
		||||
    readLimeObject(_scidacRecord,_scidacRecord.SerialisableClassName(),std::string(SCIDAC_PRIVATE_RECORD_XML));
 | 
			
		||||
    readLimeLatticeBinaryObject(field,std::string(ILDG_BINARY_DATA));
 | 
			
		||||
  }
 | 
			
		||||
  void skipPastBinaryRecord(void) {
 | 
			
		||||
    std::string rec_name(ILDG_BINARY_DATA);
 | 
			
		||||
    while ( limeReaderNextRecord(LimeR) == LIME_SUCCESS ) { 
 | 
			
		||||
      if ( !strncmp(limeReaderType(LimeR), rec_name.c_str(),strlen(rec_name.c_str()) )  ) {
 | 
			
		||||
	skipPastObjectRecord(std::string(SCIDAC_CHECKSUM));
 | 
			
		||||
	return;
 | 
			
		||||
      }
 | 
			
		||||
    }    
 | 
			
		||||
  }
 | 
			
		||||
  void skipPastObjectRecord(std::string rec_name) {
 | 
			
		||||
    while ( limeReaderNextRecord(LimeR) == LIME_SUCCESS ) { 
 | 
			
		||||
      if ( !strncmp(limeReaderType(LimeR), rec_name.c_str(),strlen(rec_name.c_str()) )  ) {
 | 
			
		||||
	return;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  void skipScidacFieldRecord() {
 | 
			
		||||
    skipPastObjectRecord(std::string(GRID_FORMAT));
 | 
			
		||||
    skipPastObjectRecord(std::string(SCIDAC_RECORD_XML));
 | 
			
		||||
    skipPastObjectRecord(std::string(SCIDAC_PRIVATE_RECORD_XML));
 | 
			
		||||
    skipPastBinaryRecord();
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class IldgWriter : public ScidacWriter {
 | 
			
		||||
 public:
 | 
			
		||||
  
 | 
			
		||||
  IldgWriter(bool isboss) : ScidacWriter(isboss) {};
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////////////
 | 
			
		||||
  // A little helper
 | 
			
		||||
  ///////////////////////////////////
 | 
			
		||||
@@ -425,8 +588,6 @@ class IldgWriter : public ScidacWriter {
 | 
			
		||||
    typedef iLorentzColourMatrix<vsimd> vobj;
 | 
			
		||||
    typedef typename vobj::scalar_object sobj;
 | 
			
		||||
 | 
			
		||||
    uint64_t nbytes;
 | 
			
		||||
 | 
			
		||||
    ////////////////////////////////////////
 | 
			
		||||
    // fill the Grid header
 | 
			
		||||
    ////////////////////////////////////////
 | 
			
		||||
@@ -485,7 +646,6 @@ class IldgWriter : public ScidacWriter {
 | 
			
		||||
    writeLimeIldgLFN(header.ildg_lfn);                                                 // rec
 | 
			
		||||
    writeLimeLatticeBinaryObject(Umu,std::string(ILDG_BINARY_DATA));      // Closes message with checksum
 | 
			
		||||
    //    limeDestroyWriter(LimeW);
 | 
			
		||||
    fclose(File);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
@@ -557,13 +717,15 @@ class IldgReader : public GridLimeReader {
 | 
			
		||||
	// Copy out the string
 | 
			
		||||
	std::vector<char> xmlc(nbytes+1,'\0');
 | 
			
		||||
	limeReaderReadData((void *)&xmlc[0], &nbytes, LimeR);    
 | 
			
		||||
	std::cout << GridLogMessage<< "Non binary record :" <<limeReaderType(LimeR) <<std::endl; //<<"\n"<<(&xmlc[0])<<std::endl;
 | 
			
		||||
	//	std::cout << GridLogMessage<< "Non binary record :" <<limeReaderType(LimeR) <<std::endl; //<<"\n"<<(&xmlc[0])<<std::endl;
 | 
			
		||||
 | 
			
		||||
	//////////////////////////////////
 | 
			
		||||
	// ILDG format record
 | 
			
		||||
 | 
			
		||||
  std::string xmlstring(&xmlc[0]);
 | 
			
		||||
	if ( !strncmp(limeReaderType(LimeR), ILDG_FORMAT,strlen(ILDG_FORMAT)) ) { 
 | 
			
		||||
 | 
			
		||||
	  XmlReader RD(&xmlc[0],"");
 | 
			
		||||
	  XmlReader RD(xmlstring, true, "");
 | 
			
		||||
	  read(RD,"ildgFormat",ildgFormat_);
 | 
			
		||||
 | 
			
		||||
	  if ( ildgFormat_.precision == 64 ) format = std::string("IEEE64BIG");
 | 
			
		||||
@@ -578,13 +740,13 @@ class IldgReader : public GridLimeReader {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if ( !strncmp(limeReaderType(LimeR), ILDG_DATA_LFN,strlen(ILDG_DATA_LFN)) ) {
 | 
			
		||||
	  FieldMetaData_.ildg_lfn = std::string(&xmlc[0]);
 | 
			
		||||
	  FieldMetaData_.ildg_lfn = xmlstring;
 | 
			
		||||
	  found_ildgLFN = 1;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if ( !strncmp(limeReaderType(LimeR), GRID_FORMAT,strlen(ILDG_FORMAT)) ) { 
 | 
			
		||||
 | 
			
		||||
	  XmlReader RD(&xmlc[0],"");
 | 
			
		||||
	  XmlReader RD(xmlstring, true, "");
 | 
			
		||||
	  read(RD,"FieldMetaData",FieldMetaData_);
 | 
			
		||||
 | 
			
		||||
	  format = FieldMetaData_.floating_point;
 | 
			
		||||
@@ -598,18 +760,17 @@ class IldgReader : public GridLimeReader {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if ( !strncmp(limeReaderType(LimeR), SCIDAC_RECORD_XML,strlen(SCIDAC_RECORD_XML)) ) { 
 | 
			
		||||
	  std::string xmls(&xmlc[0]);
 | 
			
		||||
	  // is it a USQCD info field
 | 
			
		||||
	  if ( xmls.find(std::string("usqcdInfo")) != std::string::npos ) { 
 | 
			
		||||
	    std::cout << GridLogMessage<<"...found a usqcdInfo field"<<std::endl;
 | 
			
		||||
	    XmlReader RD(&xmlc[0],"");
 | 
			
		||||
	  if ( xmlstring.find(std::string("usqcdInfo")) != std::string::npos ) { 
 | 
			
		||||
	    //	    std::cout << GridLogMessage<<"...found a usqcdInfo field"<<std::endl;
 | 
			
		||||
	    XmlReader RD(xmlstring, true, "");
 | 
			
		||||
	    read(RD,"usqcdInfo",usqcdInfo_);
 | 
			
		||||
	    found_usqcdInfo = 1;
 | 
			
		||||
	  }
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if ( !strncmp(limeReaderType(LimeR), SCIDAC_CHECKSUM,strlen(SCIDAC_CHECKSUM)) ) { 
 | 
			
		||||
	  XmlReader RD(&xmlc[0],"");
 | 
			
		||||
	  XmlReader RD(xmlstring, true, "");
 | 
			
		||||
	  read(RD,"scidacChecksum",scidacChecksum_);
 | 
			
		||||
	  found_scidacChecksum = 1;
 | 
			
		||||
	}
 | 
			
		||||
@@ -619,8 +780,7 @@ class IldgReader : public GridLimeReader {
 | 
			
		||||
	// Binary data
 | 
			
		||||
	/////////////////////////////////
 | 
			
		||||
	std::cout << GridLogMessage << "ILDG Binary record found : "  ILDG_BINARY_DATA << std::endl;
 | 
			
		||||
	off_t offset= ftell(File);
 | 
			
		||||
 | 
			
		||||
	uint64_t offset= ftello(File);
 | 
			
		||||
	if ( format == std::string("IEEE64BIG") ) {
 | 
			
		||||
	  GaugeSimpleMunger<dobj, sobj> munge;
 | 
			
		||||
	  BinaryIO::readLatticeObject< vobj, dobj >(Umu, filename, munge, offset, format,nersc_csum,scidac_csuma,scidac_csumb);
 | 
			
		||||
@@ -64,6 +64,11 @@ namespace Grid {
 | 
			
		||||
// file compatability, so should be correct to assume the undocumented but defacto file structure.
 | 
			
		||||
/////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
 | 
			
		||||
struct emptyUserRecord : Serializable { 
 | 
			
		||||
  GRID_SERIALIZABLE_CLASS_MEMBERS(emptyUserRecord,int,dummy);
 | 
			
		||||
  emptyUserRecord() { dummy=0; };
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
////////////////////////
 | 
			
		||||
// Scidac private file xml
 | 
			
		||||
// <?xml version="1.0" encoding="UTF-8"?><scidacFile><version>1.1</version><spacetime>4</spacetime><dims>16 16 16 32 </dims><volfmt>0</volfmt></scidacFile>
 | 
			
		||||
@@ -131,8 +136,9 @@ struct scidacRecord : Serializable {
 | 
			
		||||
				  int, typesize,
 | 
			
		||||
				  int, datacount);
 | 
			
		||||
 | 
			
		||||
  scidacRecord() { version =1.0; }
 | 
			
		||||
 | 
			
		||||
  scidacRecord()
 | 
			
		||||
  : version(1.0), recordtype(0), colors(0), spins(0), typesize(0), datacount(0)
 | 
			
		||||
  {}
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
////////////////////////
 | 
			
		||||
@@ -81,15 +81,16 @@ namespace Grid {
 | 
			
		||||
				      std::string, creation_date,
 | 
			
		||||
				      std::string, archive_date,
 | 
			
		||||
				      std::string, floating_point);
 | 
			
		||||
      FieldMetaData(void) { 
 | 
			
		||||
	nd=4;
 | 
			
		||||
	dimension.resize(4);
 | 
			
		||||
	boundary.resize(4);
 | 
			
		||||
      }
 | 
			
		||||
      // WARNING: non-initialised values might lead to twisted parallel IO
 | 
			
		||||
      // issues, std::string are fine because they initliase to size 0
 | 
			
		||||
      // as per C++ standard.
 | 
			
		||||
      FieldMetaData(void) 
 | 
			
		||||
      : nd(4), dimension(4,0), boundary(4, ""), data_start(0),
 | 
			
		||||
      link_trace(0.), plaquette(0.), checksum(0),
 | 
			
		||||
      scidac_checksuma(0), scidac_checksumb(0), sequence_number(0)
 | 
			
		||||
      {}
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  namespace QCD {
 | 
			
		||||
 | 
			
		||||
    using namespace Grid;
 | 
			
		||||
@@ -104,6 +105,7 @@ namespace Grid {
 | 
			
		||||
      header.nd = nd;
 | 
			
		||||
      header.dimension.resize(nd);
 | 
			
		||||
      header.boundary.resize(nd);
 | 
			
		||||
      header.data_start = 0;
 | 
			
		||||
      for(int d=0;d<nd;d++) {
 | 
			
		||||
	header.dimension[d] = grid->_fdimensions[d];
 | 
			
		||||
      }
 | 
			
		||||
@@ -57,7 +57,7 @@ namespace Grid {
 | 
			
		||||
      // for the header-reader
 | 
			
		||||
      static inline int readHeader(std::string file,GridBase *grid,  FieldMetaData &field)
 | 
			
		||||
      {
 | 
			
		||||
      int offset=0;
 | 
			
		||||
      uint64_t offset=0;
 | 
			
		||||
      std::map<std::string,std::string> header;
 | 
			
		||||
      std::string line;
 | 
			
		||||
 | 
			
		||||
@@ -139,7 +139,7 @@ namespace Grid {
 | 
			
		||||
      typedef Lattice<iLorentzColourMatrix<vsimd> > GaugeField;
 | 
			
		||||
 | 
			
		||||
      GridBase *grid = Umu._grid;
 | 
			
		||||
      int offset = readHeader(file,Umu._grid,header);
 | 
			
		||||
      uint64_t offset = readHeader(file,Umu._grid,header);
 | 
			
		||||
 | 
			
		||||
      FieldMetaData clone(header);
 | 
			
		||||
 | 
			
		||||
@@ -236,21 +236,25 @@ namespace Grid {
 | 
			
		||||
	GaugeStatistics(Umu,header);
 | 
			
		||||
	MachineCharacteristics(header);
 | 
			
		||||
 | 
			
		||||
	int offset;
 | 
			
		||||
  
 | 
			
		||||
	truncate(file);
 | 
			
		||||
	uint64_t offset;
 | 
			
		||||
 | 
			
		||||
	// Sod it -- always write 3x3 double
 | 
			
		||||
	header.floating_point = std::string("IEEE64BIG");
 | 
			
		||||
	header.data_type      = std::string("4D_SU3_GAUGE_3x3");
 | 
			
		||||
	GaugeSimpleUnmunger<fobj3D,sobj> munge;
 | 
			
		||||
	if ( grid->IsBoss() ) { 
 | 
			
		||||
	  truncate(file);
 | 
			
		||||
	  offset = writeHeader(header,file);
 | 
			
		||||
	}
 | 
			
		||||
	grid->Broadcast(0,(void *)&offset,sizeof(offset));
 | 
			
		||||
 | 
			
		||||
	uint32_t nersc_csum,scidac_csuma,scidac_csumb;
 | 
			
		||||
	BinaryIO::writeLatticeObject<vobj,fobj3D>(Umu,file,munge,offset,header.floating_point,
 | 
			
		||||
								  nersc_csum,scidac_csuma,scidac_csumb);
 | 
			
		||||
	header.checksum = nersc_csum;
 | 
			
		||||
	if ( grid->IsBoss() ) { 
 | 
			
		||||
	  writeHeader(header,file);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	std::cout<<GridLogMessage <<"Written NERSC Configuration on "<< file << " checksum "
 | 
			
		||||
		 <<std::hex<<header.checksum
 | 
			
		||||
@@ -278,7 +282,7 @@ namespace Grid {
 | 
			
		||||
	header.plaquette=0.0;
 | 
			
		||||
	MachineCharacteristics(header);
 | 
			
		||||
 | 
			
		||||
	int offset;
 | 
			
		||||
	uint64_t offset;
 | 
			
		||||
  
 | 
			
		||||
#ifdef RNG_RANLUX
 | 
			
		||||
	header.floating_point = std::string("UINT64");
 | 
			
		||||
@@ -293,12 +297,18 @@ namespace Grid {
 | 
			
		||||
	header.data_type      = std::string("SITMO");
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
	if ( grid->IsBoss() ) { 
 | 
			
		||||
	  truncate(file);
 | 
			
		||||
	  offset = writeHeader(header,file);
 | 
			
		||||
	}
 | 
			
		||||
	grid->Broadcast(0,(void *)&offset,sizeof(offset));
 | 
			
		||||
	
 | 
			
		||||
	uint32_t nersc_csum,scidac_csuma,scidac_csumb;
 | 
			
		||||
	BinaryIO::writeRNG(serial,parallel,file,offset,nersc_csum,scidac_csuma,scidac_csumb);
 | 
			
		||||
	header.checksum = nersc_csum;
 | 
			
		||||
	if ( grid->IsBoss() ) { 
 | 
			
		||||
	  offset = writeHeader(header,file);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	std::cout<<GridLogMessage 
 | 
			
		||||
		 <<"Written NERSC RNG STATE "<<file<< " checksum "
 | 
			
		||||
@@ -313,7 +323,7 @@ namespace Grid {
 | 
			
		||||
 | 
			
		||||
	GridBase *grid = parallel._grid;
 | 
			
		||||
 | 
			
		||||
	int offset = readHeader(file,grid,header);
 | 
			
		||||
	uint64_t offset = readHeader(file,grid,header);
 | 
			
		||||
 | 
			
		||||
	FieldMetaData clone(header);
 | 
			
		||||
 | 
			
		||||
@@ -49,14 +49,38 @@ inline double usecond(void) {
 | 
			
		||||
 | 
			
		||||
typedef  std::chrono::system_clock          GridClock;
 | 
			
		||||
typedef  std::chrono::time_point<GridClock> GridTimePoint;
 | 
			
		||||
typedef  std::chrono::milliseconds          GridTime;
 | 
			
		||||
typedef  std::chrono::microseconds          GridUsecs;
 | 
			
		||||
 | 
			
		||||
inline std::ostream& operator<< (std::ostream & stream, const std::chrono::milliseconds & time)
 | 
			
		||||
typedef  std::chrono::seconds               GridSecs;
 | 
			
		||||
typedef  std::chrono::milliseconds          GridMillisecs;
 | 
			
		||||
typedef  std::chrono::microseconds          GridUsecs;
 | 
			
		||||
typedef  std::chrono::microseconds          GridTime;
 | 
			
		||||
 | 
			
		||||
inline std::ostream& operator<< (std::ostream & stream, const GridSecs & time)
 | 
			
		||||
{
 | 
			
		||||
  stream << time.count()<<" ms";
 | 
			
		||||
  stream << time.count()<<" s";
 | 
			
		||||
  return stream;
 | 
			
		||||
}
 | 
			
		||||
inline std::ostream& operator<< (std::ostream & stream, const GridMillisecs & now)
 | 
			
		||||
{
 | 
			
		||||
  GridSecs second(1);
 | 
			
		||||
  auto     secs       = now/second ; 
 | 
			
		||||
  auto     subseconds = now%second ;
 | 
			
		||||
  auto     fill       = stream.fill();
 | 
			
		||||
  stream << secs<<"."<<std::setw(3)<<std::setfill('0')<<subseconds.count()<<" s";
 | 
			
		||||
  stream.fill(fill);
 | 
			
		||||
  return stream;
 | 
			
		||||
}
 | 
			
		||||
inline std::ostream& operator<< (std::ostream & stream, const GridUsecs & now)
 | 
			
		||||
{
 | 
			
		||||
  GridSecs second(1);
 | 
			
		||||
  auto     seconds    = now/second ; 
 | 
			
		||||
  auto     subseconds = now%second ;
 | 
			
		||||
  auto     fill       = stream.fill();
 | 
			
		||||
  stream << seconds<<"."<<std::setw(6)<<std::setfill('0')<<subseconds.count()<<" s";
 | 
			
		||||
  stream.fill(fill);
 | 
			
		||||
  return stream;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class GridStopWatch {
 | 
			
		||||
private:
 | 
			
		||||
@@ -96,6 +120,9 @@ public:
 | 
			
		||||
    assert(running == false);
 | 
			
		||||
    return (uint64_t) accumulator.count();
 | 
			
		||||
  }
 | 
			
		||||
  bool isRunning(void){
 | 
			
		||||
    return running;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
@@ -1,7 +1,7 @@
 | 
			
		||||
/**
 | 
			
		||||
 * pugixml parser - version 1.6
 | 
			
		||||
 * pugixml parser - version 1.9
 | 
			
		||||
 * --------------------------------------------------------
 | 
			
		||||
 * Copyright (C) 2006-2015, by Arseny Kapoulkine (arseny.kapoulkine@gmail.com)
 | 
			
		||||
 * Copyright (C) 2006-2018, by Arseny Kapoulkine (arseny.kapoulkine@gmail.com)
 | 
			
		||||
 * Report bugs and download new versions at http://pugixml.org/
 | 
			
		||||
 *
 | 
			
		||||
 * This library is distributed under the MIT License. See notice at the end
 | 
			
		||||
@@ -17,6 +17,9 @@
 | 
			
		||||
// Uncomment this to enable wchar_t mode
 | 
			
		||||
// #define PUGIXML_WCHAR_MODE
 | 
			
		||||
 | 
			
		||||
// Uncomment this to enable compact mode
 | 
			
		||||
// #define PUGIXML_COMPACT
 | 
			
		||||
 | 
			
		||||
// Uncomment this to disable XPath
 | 
			
		||||
// #define PUGIXML_NO_XPATH
 | 
			
		||||
 | 
			
		||||
@@ -46,7 +49,7 @@
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * Copyright (c) 2006-2015 Arseny Kapoulkine
 | 
			
		||||
 * Copyright (c) 2006-2018 Arseny Kapoulkine
 | 
			
		||||
 *
 | 
			
		||||
 * Permission is hereby granted, free of charge, to any person
 | 
			
		||||
 * obtaining a copy of this software and associated documentation
 | 
			
		||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user