3 #ifndef DUNE_MPICOLLECTIVECOMMUNICATION_HH
4 #define DUNE_MPICOLLECTIVECOMMUNICATION_HH
34 template<
typename Type,
typename BinaryFunction>
44 MPI_Op_create((
void (*)(
void*,
void*,
int*, MPI_Datatype*))&operation,
true,op.
get());
49 static void operation (Type *in, Type *inout,
int *len, MPI_Datatype *dptr)
53 for (
int i=0; i< *len; ++i, ++in, ++inout){
55 temp = func(*in, *inout);
60 Generic_MPI_Op (
const Generic_MPI_Op& ) {}
61 static shared_ptr<MPI_Op> op;
65 template<
typename Type,
typename BinaryFunction>
66 shared_ptr<MPI_Op> Generic_MPI_Op<Type,BinaryFunction>::op = shared_ptr<MPI_Op>(
static_cast<MPI_Op*
>(0));
68 #define ComposeMPIOp(type,func,op) \
70 class Generic_MPI_Op<type, func<type> >{ \
72 static MPI_Op get(){ \
77 Generic_MPI_Op (const Generic_MPI_Op& ) {}\
148 if(communicator!=MPI_COMM_NULL){
149 MPI_Comm_rank(communicator,&me);
150 MPI_Comm_size(communicator,&procs);
174 allreduce<std::plus<T> >(&in,&out,1);
180 int sum (T* inout,
int len)
const
182 return allreduce<std::plus<T> >(inout,len);
190 allreduce<std::multiplies<T> >(&in,&out,1);
196 int prod (T* inout,
int len)
const
198 return allreduce<std::plus<T> >(inout,len);
206 allreduce<Min<T> >(&in,&out,1);
212 int min (T* inout,
int len)
const
214 return allreduce<Min<T> >(inout,len);
223 allreduce<Max<T> >(&in,&out,1);
229 int max (T* inout,
int len)
const
231 return allreduce<Max<T> >(inout,len);
237 return MPI_Barrier(communicator);
249 int gather (T* in, T* out,
int len,
int root)
const
258 int scatter (T* send, T* recv,
int len,
int root)
const
265 operator MPI_Comm ()
const
271 template<
typename T,
typename T1>
280 template<
typename BinaryFunction,
typename Type>
283 Type* out =
new Type[len];
284 int ret = allreduce<BinaryFunction>(inout,out,len);
285 std::copy(out, out+len, inout);
291 template<
typename BinaryFunction,
typename Type>
299 MPI_Comm communicator;