mirror of
				https://github.com/paboyle/Grid.git
				synced 2025-11-03 21:44:33 +00:00 
			
		
		
		
	Merge branch 'develop' into feature/scalar_adjointFT
This commit is contained in:
		@@ -81,6 +81,8 @@ public:
 | 
			
		||||
      Init(dimensions,simd_layout,processor_grid);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    virtual ~GridCartesian() = default;
 | 
			
		||||
 | 
			
		||||
    void Init(const std::vector<int> &dimensions,
 | 
			
		||||
	      const std::vector<int> &simd_layout,
 | 
			
		||||
	      const std::vector<int> &processor_grid)
 | 
			
		||||
@@ -159,7 +161,6 @@ public:
 | 
			
		||||
      }
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    virtual ~GridCartesian() = default;
 | 
			
		||||
};
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 
 | 
			
		||||
@@ -133,6 +133,8 @@ public:
 | 
			
		||||
    {
 | 
			
		||||
      Init(base->_fdimensions,base->_simd_layout,base->_processors,checker_dim_mask,checker_dim)  ;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    virtual ~GridRedBlackCartesian() = default;
 | 
			
		||||
#if 0
 | 
			
		||||
    ////////////////////////////////////////////////////////////
 | 
			
		||||
    // Create redblack grid ;; deprecate these. Should not
 | 
			
		||||
 
 | 
			
		||||
@@ -52,6 +52,13 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
 | 
			
		||||
  MPI_Comm_dup (MPI_COMM_WORLD,&communicator_world);
 | 
			
		||||
  ShmInitGeneric();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
CartesianCommunicator::~CartesianCommunicator()
 | 
			
		||||
{
 | 
			
		||||
  if (communicator && !MPI::Is_finalized())
 | 
			
		||||
    MPI_Comm_free(&communicator);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void CartesianCommunicator::GlobalSum(uint32_t &u){
 | 
			
		||||
  int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
 
 | 
			
		||||
@@ -53,6 +53,13 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
 | 
			
		||||
  ShmInitGeneric();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
CartesianCommunicator::~CartesianCommunicator()
 | 
			
		||||
{
 | 
			
		||||
  if (communicator && !MPI::Is_finalized())
 | 
			
		||||
    MPI_Comm_free(&communicator);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
void CartesianCommunicator::GlobalSum(uint32_t &u){
 | 
			
		||||
  int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
 
 | 
			
		||||
@@ -57,8 +57,7 @@ CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
CartesianCommunicator::~CartesianCommunicator(){}
 | 
			
		||||
  
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
void CartesianCommunicator::GlobalSum(float &){}
 | 
			
		||||
void CartesianCommunicator::GlobalSumVector(float *,int N){}
 | 
			
		||||
void CartesianCommunicator::GlobalSum(double &){}
 | 
			
		||||
 
 | 
			
		||||
@@ -31,7 +31,8 @@ class ScalarActionParameters : Serializable {
 | 
			
		||||
 public:
 | 
			
		||||
  GRID_SERIALIZABLE_CLASS_MEMBERS(ScalarActionParameters,
 | 
			
		||||
    double, mass_squared,
 | 
			
		||||
    double, lambda);
 | 
			
		||||
    double, lambda,
 | 
			
		||||
    double, g);
 | 
			
		||||
 | 
			
		||||
    template <class ReaderClass >
 | 
			
		||||
  ScalarActionParameters(Reader<ReaderClass>& Reader){
 | 
			
		||||
@@ -140,7 +141,7 @@ int main(int argc, char **argv) {
 | 
			
		||||
 | 
			
		||||
  // Scalar action in adjoint representation
 | 
			
		||||
  ScalarActionParameters SPar(Reader);
 | 
			
		||||
  ScalarAction Saction(SPar.mass_squared, SPar.lambda);
 | 
			
		||||
  ScalarAction Saction(SPar.mass_squared, SPar.lambda, SPar.g);
 | 
			
		||||
 | 
			
		||||
  // Collect actions
 | 
			
		||||
  ActionLevel<ScalarAction::Field, ScalarNxNMatrixFields<Ncolours>> Level1(1);
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user