C++ mpi module for stochmagnet_main Package
SMOMPI_NoMasterMacroCellsDemagnetizedField.h
1 #ifndef SMOMPI_NoMasterMacroCellsDemagnetizedField_H
2 #define SMOMPI_NoMasterMacroCellsDemagnetizedField_H
3 
4 //base classes
5 #include "SM_MacroCellsDemagnetizedField.h"
6 
7 //mpi header
8 #include "MPI_Run.h"
9 #include "MPI_CoreToCoreIMessage.h"
10 
11 //network header
12 #include "SMOMPI_NoMasterMacroCellsNetwork.h"
13 
25 
26 private :
27 
30 
31  //tag to send center of massof network
32  static constexpr tMPITag TAG_P=99;
33  //tag to send magnetization field
34  static constexpr tMPITag TAG_M=98;
35 
36  //single associations
37 
38  //mpi-communication requests
39  std::valarray<tMPIRequest> mReceiveRequests;
40  std::valarray<tMPIRequest> mSendRequests;
41 
42  //MPI requests data
43  tMPIInteger mIsAvaliable;//test message is available
44  tMPICount mValuesNumber;//the number of values received
45  tMPIStatus mStatus;//the status of the received request
46  std::map<tMPICoreId,tBoolean> mIsDataReceivedFromCore;//indicates if the data is received
47 
48  //numbe rof neighor cores
49  tInteger mNeighborCoresNumber;
50 
51  //values of M at macro cells P
52  std::valarray<tReal> mMp_values;
53 
54  //velues of center of mass of macro cells P
55  std::valarray<tReal> mP_values;
56 
57 protected:
58  // CONSTRUCTORS
62 
63  mNeighborCoresNumber=0;
64  }
65 
66  // DESTRUCTORS
70  }
71 
72 
73 public :
74 
75  //Instance building
76  //=================
77 
78 
82  inline static CORE_UniquePointer<SelfClass> New() {
83  return CORE_UniquePointer<SelfClass>(new SelfClass(),
85  }
86 
90  virtual CORE_UniquePointer<SM_MacroCellsDemagnetizedField> newInstance() const override {
91  CORE_UniquePointer<SelfClass> p=New();
92  p->setStorage(getStorage());
93  return std::move(p);
94  }
95 
109  virtual tMemSize getMemorySize() const {
110  return sizeof(*this)+this->getContentsMemorySize();
111  }
112 
121  virtual tMemSize getContentsMemorySize() const {
122  tMemSize mem=SuperClass::getContentsMemorySize();
123  mem+=mReceiveRequests.size()*sizeof(tMPIRequest);
124  mem+=mSendRequests.size()*sizeof(tMPIRequest);
125  mem+=mMp_values.size()*sizeof(tReal);
126  mem+=mP_values.size()*sizeof(tReal);
127  mem+=mIsDataReceivedFromCore.size()*(sizeof(tMPICoreId)+sizeof(tBoolean));
128  return mem;
129  }
130 public:
131 
144  virtual void discretize(const SM_Material& material,const SM_MacroCellsNetwork& network) final {
145 
146  SuperClass::discretize(material,network);
147 
148  //MPI environment
150 
151  //current id of the MPI core
152  //const tMPICoreId& coreId=mpiEnv.getCoreId();
153 
154 
155  //number of cores of the environemnt
156  const tMPICoreId& coresNumber=mpiEnv.getCoresNumber();
157 
158  //number of neighboring cores : all except tteh core This
159  mNeighborCoresNumber=coresNumber;
160  mNeighborCoresNumber--;
161 
162  //requests for messages
163  mSendRequests.resize(2*mNeighborCoresNumber);
164  mReceiveRequests.resize(2);
165  }
166 
171  virtual void computeMatrix(const SM_Material& material,
172  const SM_MacroCellsNetwork& network) final {
173 
174 
175  throw CORE_Exception("stochmagnet/operators/fields/mpi",
176  "SMOMPI_NoMasterMacroCellsDemagnetizedField::computeMatrix()",
177  "method not valid for no master core ");
178 
179  }
180 
181 public:
182 
183 
188  virtual void computeField(const SM_MacroCellsNetwork& network,
189  const SM_MacroCellsMagnetizationField& M) final;
190 
191 private:
194  inline void sendDataToAllNeighborCores(const SM_RealField& P,const SM_RealField& M) {
195 
196 
197  //number of values of P (= number of values of M_P)
198  tMPICount N_P=P.getSize();
199 
200  const tReal *vP=null,*vM=null;
201  if (N_P>0) {
202  vP=&P[0];
203  vM=&M[0];
204  }
205 
206  //MPI environment
208 
209  //current id of the MPI core
210  const tMPICoreId& coreId=mpiEnv.getCoreId();
211 
212  //number of cores
213  const tMPICoreId& coresNumber=mpiEnv.getCoresNumber();
214 
215 
216  //sent the mass ceneter (P,M_P) to all neighbor cores
217  tMPICoreId c;
218  tMPIRequest *iSRequest=&mSendRequests[0];
219  for (c=0;c<coreId;c++) {
220  //send X all neighbor cores
221  MPI_CoreToCoreIMessage<tReal>::Send(mpiEnv,c,vP,N_P,TAG_P,*iSRequest);
222  iSRequest++;
223  //send S to all neighbor cores
224  MPI_CoreToCoreIMessage<tReal>::Send(mpiEnv,c,vM,N_P,TAG_M,*iSRequest);
225  iSRequest++;
226  //std::cout<<"core "<<coreId<<" -> "<<N_P<<" (P,S) to "<<c<<"\n";
227  }
228  for (c=coreId+1;c<coresNumber;c++) {
229  //send X all neighbor cores
230  MPI_CoreToCoreIMessage<tReal>::Send(mpiEnv,c,vP,N_P,TAG_P,*iSRequest);
231  iSRequest++;
232  //send S to all neighbor cores
233  MPI_CoreToCoreIMessage<tReal>::Send(mpiEnv,c,vM,N_P,TAG_M,*iSRequest);
234  iSRequest++;
235  //std::cout<<"core "<<coreId<<" -> "<<N_P<<" (P,S) to "<<c<<"\n";
236  }
237 
238  mIsDataReceivedFromCore.clear();
239  }
240 
249  inline tBoolean waitForDataFromNeighborCores(tMPICoreId& c,tMPICount& nValues,std::valarray<tReal>& Pvalues,std::valarray<tReal>& Mvalues) {
250 
251  if (mIsDataReceivedFromCore.size()==mNeighborCoresNumber) return false;
252 
253  //MPI environment
255 
256 
257  //current id of the MPI core
258  //const tMPICoreId& coreId=mpiEnv.getCoreId();
259  nValues=0;//no data received
260  if (MPI_Environment::IsTagMessageAvaliable(mpiEnv,TAG_P,c,mIsAvaliable,mStatus) &&
261  MPI_Environment::IsMessageAvaliable(mpiEnv,TAG_M,c,mIsAvaliable) &&
262  (mIsDataReceivedFromCore.find(c)==mIsDataReceivedFromCore.end())) {
263  //received data from core
264 
265  //get the number of values sent
266  MPI_Environment::GetCount<tReal>(mStatus,nValues);
267 
268  //resize (P,M) from core c if necessary
269  if (((tMPICount)Pvalues.size())<nValues) {
270  Pvalues.resize(nValues);
271  }
272  if (((tMPICount)Mvalues.size())<nValues) {
273  Mvalues.resize(nValues);
274  }
275 
276  //receive the messages in best order
277  MPI_CoreToCoreIMessage<tReal>::Receive(mpiEnv,c,&Pvalues[0],nValues,TAG_P,mReceiveRequests[0]);
278  MPI_CoreToCoreIMessage<tReal>::Receive(mpiEnv,c,&Mvalues[0],nValues,TAG_M,mReceiveRequests[1]);
279  //wait for receiving the 2 messages
280  MPI_Environment::WaitAll(mReceiveRequests);
281  mIsDataReceivedFromCore[c]=true;
282  //std::cout<<"core "<<coreId<<" receive "<<nValues<<" (P,S) from "<<c<<"\n";
283 
284  }
285 
286  return true;
287  }
288 
289 };
290 
291 
292 #endif
this class describes the exceptions raised for CORE package
Definition: CORE_Exception.h:17
tIndex getSize() const
return the number values of the container
Definition: CORE_Field.h:161
class Free introduced for deleting a smart pointer
Definition: CORE_Object.h:113
static void Send(const MPI_Environment &env, const tMPICoreId &dstCore, const T &data, const tMPITag &tag, tMPIRequest &request)
none blocking of sending a data with flag to dstCore
Definition: MPI_CoreToCoreIMessage.h:99
static tBoolean Receive(const MPI_Environment &env, const tMPICoreId &srcCore, T &data, const tMPITag &tag, tMPIRequest &request)
blocking receive a data with tag from source core
Definition: MPI_CoreToCoreIMessage.h:196
This class is a Environment class to define MPI world.
Definition: MPI_Environment.h:36
static tBoolean IsTagMessageAvaliable(const MPI_Environment &env, const tMPITag &tag, tMPICoreId &source, tMPIBoolean &isAvailable, tMPIStatus &status)
test if the message with tag is avalaible
Definition: MPI_Environment.h:437
const tMPICoreId & getCoreId() const
get the id of the current process of this environment
Definition: MPI_Environment.h:200
const tMPICoreId & getCoresNumber() const
get the number of cores of this environment of common environment
Definition: MPI_Environment.h:180
static tBoolean WaitAll(const tMPICount &nRequests, tMPIRequest requests[], tMPIStatus statuses[])
wait the end the the communication by blocking
Definition: MPI_Environment.h:308
static tBoolean IsMessageAvaliable(const MPI_Environment &env, tMPICoreId &source, tMPITag &tag, tMPIBoolean &isAvailable, tMPIStatus &status)
test if a message is available form any tag and any source
Definition: MPI_Environment.h:401
static MPI_Environment & GetEnvironment()
get the environment
Definition: MPI_Run.h:114
This class describes an MPI implementation of the demagnetized field based on no master core.
Definition: SMOMPI_NoMasterMacroCellsDemagnetizedField.h:24
virtual tMemSize getContentsMemorySize() const
return the memory size of the included associations
Definition: SMOMPI_NoMasterMacroCellsDemagnetizedField.h:121
virtual void discretize(const SM_Material &material, const SM_MacroCellsNetwork &network) final
discretize the field
Definition: SMOMPI_NoMasterMacroCellsDemagnetizedField.h:144
static CORE_UniquePointer< SelfClass > New()
build a new instance of the operator
Definition: SMOMPI_NoMasterMacroCellsDemagnetizedField.h:82
SMOMPI_NoMasterMacroCellsDemagnetizedField(void)
create a network class
Definition: SMOMPI_NoMasterMacroCellsDemagnetizedField.h:61
virtual void computeField(const SM_MacroCellsNetwork &network, const SM_MacroCellsMagnetizationField &M) final
compute the demagnetized field on macro cells network
Definition: SMOMPI_NoMasterMacroCellsDemagnetizedField.cpp:21
virtual ~SMOMPI_NoMasterMacroCellsDemagnetizedField(void)
destroy
Definition: SMOMPI_NoMasterMacroCellsDemagnetizedField.h:69
virtual tMemSize getMemorySize() const
return the memory size of the class and the memory size of all its attributes/associations
Definition: SMOMPI_NoMasterMacroCellsDemagnetizedField.h:109
virtual void computeMatrix(const SM_Material &material, const SM_MacroCellsNetwork &network) final
compute the matrix
Definition: SMOMPI_NoMasterMacroCellsDemagnetizedField.h:171
virtual CORE_UniquePointer< SM_MacroCellsDemagnetizedField > newInstance() const override
create a New instance of this
Definition: SMOMPI_NoMasterMacroCellsDemagnetizedField.h:90
This class is describes a demagnetized operator.
Definition: SM_MacroCellsDemagnetizedField.h:50
virtual tMemSize getContentsMemorySize() const
return the memory size of the included associations
Definition: SM_MacroCellsDemagnetizedField.h:141
const tFlag & getStorage() const
yet storage type in {NO_STORAGE,PACKED_STORAGE}
Definition: SM_MacroCellsDemagnetizedField.h:162
virtual void discretize(const SM_Material &material, const SM_MacroCellsNetwork &network)
discretize the field
Definition: SM_MacroCellsDemagnetizedField.cpp:12
This class is describes a Magnetization field.
Definition: SM_MacroCellsMagnetizationField.h:23
This class is describes a macro cell network.
Definition: SM_MacroCellsNetwork.h:25
This class describes a materials defined by state attributes:
Definition: SM_Material.h:61