Newer
Older
/*----------------------------------------------------------------------
PuReMD - Purdue ReaxFF Molecular Dynamics Program
Copyright (2010) Purdue University
Hasan Metin Aktulga, haktulga@cs.purdue.edu
Joseph Fogarty, jcfogart@mail.usf.edu
Sagar Pandit, pandit@usf.edu
Ananth Y Grama, ayg@cs.purdue.edu
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
See the GNU General Public License for more details:
<http://www.gnu.org/licenses/>.
----------------------------------------------------------------------*/
#include "reax_types.h"
#include "basic_comm.h"
#include "vector.h"
#include "reax_basic_comm.h"
#include "reax_vector.h"
Kurt A. O'Hearn
committed
typedef void (*dist_packer)( void const * const, mpi_out_data * const );
typedef void (*coll_unpacker)( void const * const, void * const,
mpi_out_data * const );
Kurt A. O'Hearn
committed
static void int_packer( void const * const dummy, mpi_out_data * const out_buf )
{
int i;
int *buf = (int*) dummy;
int *out = (int*) out_buf->out_atoms;
for ( i = 0; i < out_buf->cnt; ++i )
{
out[i] = buf[ out_buf->index[i] ];
}
}
Kurt A. O'Hearn
committed
static void real_packer( void const * const dummy, mpi_out_data * const out_buf )
{
int i;
real *buf = (real*) dummy;
real *out = (real*) out_buf->out_atoms;
for ( i = 0; i < out_buf->cnt; ++i )
{
out[i] = buf[ out_buf->index[i] ];
}
}
Kurt A. O'Hearn
committed
static void rvec_packer( void const * const dummy, mpi_out_data * const out_buf )
{
int i;
rvec *buf, *out;
buf = (rvec*) dummy;
out = (rvec*) out_buf->out_atoms;
for ( i = 0; i < out_buf->cnt; ++i )
{
memcpy( out + i, buf + out_buf->index[i], sizeof(rvec) );
}
}
Kurt A. O'Hearn
committed
static void rvec2_packer( void const * const dummy, mpi_out_data * const out_buf )
{
int i;
rvec2 *buf, *out;
buf = (rvec2*) dummy;
out = (rvec2*) out_buf->out_atoms;
for ( i = 0; i < out_buf->cnt; ++i )
{
memcpy( out + i, buf + out_buf->index[i], sizeof(rvec2) );
}
}
Kurt A. O'Hearn
committed
static void int_unpacker( void const * const dummy_in, void * const dummy_buf,
mpi_out_data * const out_buf )
{
int i;
int *in, *buf;
in = (int*) dummy_in;
buf = (int*) dummy_buf;
for ( i = 0; i < out_buf->cnt; ++i )
{
Kurt A. O'Hearn
committed
//TODO: used in SAI, purpose?
if ( buf[ out_buf->index[i] ] == -1 && in[i] != -1 )
{
buf[ out_buf->index[i] ] = in[i];
}
Kurt A. O'Hearn
committed
static void real_unpacker( void const * const dummy_in, void * const dummy_buf,
mpi_out_data * const out_buf )
{
int i;
real *in, *buf;
in = (real*) dummy_in;
buf = (real*) dummy_buf;
for ( i = 0; i < out_buf->cnt; ++i )
{
buf[ out_buf->index[i] ] += in[i];
}
}
Kurt A. O'Hearn
committed
static void rvec_unpacker( void const * const dummy_in, void * const dummy_buf,
mpi_out_data * const out_buf )
{
int i;
rvec *in, *buf;
in = (rvec*) dummy_in;
buf = (rvec*) dummy_buf;
for ( i = 0; i < out_buf->cnt; ++i )
{
rvec_Add( buf[ out_buf->index[i] ], in[i] );
Kurt A. O'Hearn
committed
#if defined(DEBUG_FOCUS)
fprintf( stderr, "rvec_unpacker: cnt=%d i =%d index[i]=%d\n",
out_buf->cnt, i, out_buf->index[i] );
#endif
}
}
Kurt A. O'Hearn
committed
static void rvec2_unpacker( void const * const dummy_in, void * const dummy_buf,
mpi_out_data * const out_buf )
{
int i;
rvec2 *in, *buf;
in = (rvec2*) dummy_in;
buf = (rvec2*) dummy_buf;
for ( i = 0; i < out_buf->cnt; ++i )
{
buf[ out_buf->index[i] ][0] += in[i][0];
buf[ out_buf->index[i] ][1] += in[i][1];
}
}
Kurt A. O'Hearn
committed
static void * Get_Buffer_Offset( void const * const buffer,
int offset, int type )
Kurt A. O'Hearn
committed
{
void * ptr;
switch ( type )
{
case INT_PTR_TYPE:
ptr = (int *) buffer + offset;
break;
Kurt A. O'Hearn
committed
case REAL_PTR_TYPE:
ptr = (real *) buffer + offset;
break;
case RVEC_PTR_TYPE:
ptr = (rvec *) buffer + offset;
break;
case RVEC2_PTR_TYPE:
ptr = (rvec2 *) buffer + offset;
break;
default:
fprintf( stderr, "[ERROR] unknown pointer type. Terminating...\n" );
exit( UNKNOWN_OPTION );
break;
}
return ptr;
}
Kurt A. O'Hearn
committed
static dist_packer Get_Packer( int type )
dist_packer ptr;
switch ( type )
Kurt A. O'Hearn
committed
case INT_PTR_TYPE:
ptr = &int_packer;
break;
case REAL_PTR_TYPE:
Kurt A. O'Hearn
committed
ptr = &real_packer;
break;
case RVEC_PTR_TYPE:
Kurt A. O'Hearn
committed
ptr = &rvec_packer;
break;
case RVEC2_PTR_TYPE:
Kurt A. O'Hearn
committed
ptr = &rvec2_packer;
break;
default:
fprintf( stderr, "[ERROR] unknown pointer type. Terminating...\n" );
exit( UNKNOWN_OPTION );
break;
return ptr;
Kurt A. O'Hearn
committed
static coll_unpacker Get_Unpacker( int type )
coll_unpacker ptr;
switch ( type )
Kurt A. O'Hearn
committed
case INT_PTR_TYPE:
ptr = &int_unpacker;
break;
case REAL_PTR_TYPE:
Kurt A. O'Hearn
committed
ptr = &real_unpacker;
break;
case RVEC_PTR_TYPE:
Kurt A. O'Hearn
committed
ptr = &rvec_unpacker;
break;
case RVEC2_PTR_TYPE:
Kurt A. O'Hearn
committed
ptr = &rvec2_unpacker;
break;
default:
fprintf( stderr, "[ERROR] unknown pointer type. Terminating...\n" );
exit( UNKNOWN_OPTION );
break;
return ptr;
Kurt A. O'Hearn
committed
void Dist( reax_system const * const system, mpi_datatypes * const mpi_data,
void const * const buf, int buf_type, MPI_Datatype type )
Kurt A. O'Hearn
committed
#if defined(NEUTRAL_TERRITORY)
int d, count, index;
mpi_out_data *out_bufs;
MPI_Comm comm;
MPI_Request req[6];
MPI_Status stat[6];
dist_packer pack;
comm = mpi_data->comm_mesh3D;
out_bufs = mpi_data->out_nt_buffers;
pack = Get_Packer( buf_type );
count = 0;
/* initiate recvs */
for ( d = 0; d < 6; ++d )
{
Kurt A. O'Hearn
committed
if ( system->my_nt_nbrs[d].atoms_cnt > 0 )
Kurt A. O'Hearn
committed
{
count++;
MPI_Irecv( Get_Buffer_Offset( buf, system->my_nt_nbrs[d].atoms_str, buf_type ),
system->my_nt_nbrs[d].atoms_cnt, type,
system->my_nt_nbrs[d].receive_rank, d, comm, &req[d] );
}
}
Kurt A. O'Hearn
committed
for ( d = 0; d < 6; ++d )
Kurt A. O'Hearn
committed
{
/* send both messages in dimension d */
Kurt A. O'Hearn
committed
if ( out_bufs[d].cnt > 0 )
Kurt A. O'Hearn
committed
{
pack( buf, &out_bufs[d] );
MPI_Send( out_bufs[d].out_atoms, out_bufs[d].cnt, type,
system->my_nt_nbrs[d].rank, d, comm );
}
}
for ( d = 0; d < count; ++d )
{
MPI_Waitany( MAX_NT_NBRS, req, &index, stat);
}
#else
int d;
mpi_out_data *out_bufs;
MPI_Comm comm;
MPI_Request req1, req2;
MPI_Status stat1, stat2;
Kurt A. O'Hearn
committed
const neighbor_proc *nbr1, *nbr2;
dist_packer pack;
comm = mpi_data->comm_mesh3D;
out_bufs = mpi_data->out_buffers;
pack = Get_Packer( buf_type );
for ( d = 0; d < 3; ++d )
{
/* initiate recvs */
Kurt A. O'Hearn
committed
nbr1 = &system->my_nbrs[2 * d];
Kurt A. O'Hearn
committed
if ( nbr1->atoms_cnt > 0 )
Kurt A. O'Hearn
committed
MPI_Irecv( Get_Buffer_Offset( buf, nbr1->atoms_str, buf_type ),
nbr1->atoms_cnt, type, nbr1->rank, 2 * d + 1, comm, &req1 );
Kurt A. O'Hearn
committed
nbr2 = &system->my_nbrs[2 * d + 1];
Kurt A. O'Hearn
committed
if ( nbr2->atoms_cnt > 0 )
Kurt A. O'Hearn
committed
MPI_Irecv( Get_Buffer_Offset( buf, nbr2->atoms_str, buf_type ),
nbr2->atoms_cnt, type, nbr2->rank, 2 * d, comm, &req2 );
Kurt A. O'Hearn
committed
if ( out_bufs[2 * d].cnt > 0 )
Kurt A. O'Hearn
committed
pack( buf, &out_bufs[2 * d] );
Kurt A. O'Hearn
committed
MPI_Send( out_bufs[2 * d].out_atoms, out_bufs[2 * d].cnt,
type, nbr1->rank, 2 * d, comm );
Kurt A. O'Hearn
committed
if ( out_bufs[2 * d + 1].cnt > 0 )
Kurt A. O'Hearn
committed
pack( buf, &out_bufs[2 * d + 1] );
MPI_Send( out_bufs[2 * d + 1].out_atoms, out_bufs[2 * d + 1].cnt,
type, nbr2->rank, 2 * d + 1, comm );
Kurt A. O'Hearn
committed
if ( nbr1->atoms_cnt > 0 )
Kurt A. O'Hearn
committed
if ( nbr2->atoms_cnt > 0 )
Kurt A. O'Hearn
committed
#endif
Kurt A. O'Hearn
committed
void Dist_FS( reax_system const * const system, mpi_datatypes * const mpi_data,
void const * const buf, int buf_type, MPI_Datatype type )
Kurt A. O'Hearn
committed
int d;
mpi_out_data *out_bufs;
MPI_Comm comm;
MPI_Request req1, req2;
MPI_Status stat1, stat2;
const neighbor_proc *nbr1, *nbr2;
dist_packer pack;
comm = mpi_data->comm_mesh3D;
out_bufs = mpi_data->out_buffers;
pack = Get_Packer( buf_type );
for ( d = 0; d < 3; ++d )
{
/* initiate recvs */
nbr1 = &system->my_nbrs[2 * d];
Kurt A. O'Hearn
committed
if ( nbr1->atoms_cnt > 0 )
Kurt A. O'Hearn
committed
{
MPI_Irecv( Get_Buffer_Offset( buf, nbr1->atoms_str, buf_type ),
nbr1->atoms_cnt, type, nbr1->rank, 2 * d + 1, comm, &req1 );
}
nbr2 = &system->my_nbrs[2 * d + 1];
Kurt A. O'Hearn
committed
if ( nbr2->atoms_cnt > 0 )
Kurt A. O'Hearn
committed
{
MPI_Irecv( Get_Buffer_Offset( buf, nbr2->atoms_str, buf_type ),
nbr2->atoms_cnt, type, nbr2->rank, 2 * d, comm, &req2 );
}
/* send both messages in dimension d */
Kurt A. O'Hearn
committed
if ( out_bufs[2 * d].cnt > 0 )
Kurt A. O'Hearn
committed
{
pack( buf, &out_bufs[2 * d] );
MPI_Send( out_bufs[2 * d].out_atoms, out_bufs[2 * d].cnt,
type, nbr1->rank, 2 * d, comm );
}
Kurt A. O'Hearn
committed
if ( out_bufs[2 * d + 1].cnt > 0 )
Kurt A. O'Hearn
committed
{
pack( buf, &out_bufs[2 * d + 1] );
MPI_Send( out_bufs[2 * d + 1].out_atoms, out_bufs[2 * d + 1].cnt,
type, nbr2->rank, 2 * d + 1, comm );
}
Kurt A. O'Hearn
committed
if ( nbr1->atoms_cnt > 0 )
Kurt A. O'Hearn
committed
{
MPI_Wait( &req1, &stat1 );
}
Kurt A. O'Hearn
committed
if ( nbr2->atoms_cnt > 0 )
Kurt A. O'Hearn
committed
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
{
MPI_Wait( &req2, &stat2 );
}
}
}
void Coll( reax_system const * const system, mpi_datatypes * const mpi_data,
void * const buf, int buf_type, MPI_Datatype type )
{
#if defined(NEUTRAL_TERRITORY)
int d, count, index;
void *in[6];
mpi_out_data *out_bufs;
MPI_Comm comm;
MPI_Request req[6];
MPI_Status stat[6];
coll_unpacker unpack;
comm = mpi_data->comm_mesh3D;
out_bufs = mpi_data->out_nt_buffers;
unpack = Get_Unpacker( buf_type );
count = 0;
for ( d = 0; d < 6; ++d )
{
in[d] = mpi_data->in_nt_buffer[d];
Kurt A. O'Hearn
committed
if ( out_bufs[d].cnt > 0 )
Kurt A. O'Hearn
committed
{
count++;
MPI_Irecv( in[d], out_bufs[d].cnt, type,
system->my_nt_nbrs[d].rank, d, comm, &req[d] );
}
}
for ( d = 0; d < 6; ++d )
{
/* send both messages in direction d */
Kurt A. O'Hearn
committed
if ( system->my_nt_nbrs[d].atoms_cnt > 0 )
Kurt A. O'Hearn
committed
{
MPI_Send( Get_Buffer_Offset( buf, system->my_nt_nbrs[d].atoms_str, buf_type ),
system->my_nt_nbrs[d].atoms_cnt, type,
system->my_nt_nbrs[d].receive_rank, d, comm );
}
}
for ( d = 0; d < count; ++d )
{
MPI_Waitany( MAX_NT_NBRS, req, &index, stat);
unpack( in[index], buf, &out_bufs[index] );
}
#else
int d;
mpi_out_data *out_bufs;
MPI_Comm comm;
MPI_Request req1, req2;
MPI_Status stat1, stat2;
Kurt A. O'Hearn
committed
const neighbor_proc *nbr1, *nbr2;
coll_unpacker unpack;
comm = mpi_data->comm_mesh3D;
out_bufs = mpi_data->out_buffers;
unpack = Get_Unpacker( buf_type );
for ( d = 2; d >= 0; --d )
{
/* initiate recvs */
Kurt A. O'Hearn
committed
nbr1 = &system->my_nbrs[2 * d];
Kurt A. O'Hearn
committed
if ( out_bufs[2 * d].cnt > 0 )
Kurt A. O'Hearn
committed
MPI_Irecv( mpi_data->in1_buffer, out_bufs[2 * d].cnt,
type, nbr1->rank, 2 * d + 1, comm, &req1 );
Kurt A. O'Hearn
committed
nbr2 = &system->my_nbrs[2 * d + 1];
Kurt A. O'Hearn
committed
if ( out_bufs[2 * d + 1].cnt > 0 )
Kurt A. O'Hearn
committed
Kurt A. O'Hearn
committed
MPI_Irecv( mpi_data->in2_buffer, out_bufs[2 * d + 1].cnt,
type, nbr2->rank, 2 * d, comm, &req2 );
Kurt A. O'Hearn
committed
Kurt A. O'Hearn
committed
if ( nbr1->atoms_cnt > 0 )
Kurt A. O'Hearn
committed
MPI_Send( Get_Buffer_Offset( buf, nbr1->atoms_str, buf_type ),
nbr1->atoms_cnt, type, nbr1->rank, 2 * d, comm );
Kurt A. O'Hearn
committed
Kurt A. O'Hearn
committed
if ( nbr2->atoms_cnt > 0 )
Kurt A. O'Hearn
committed
{
MPI_Send( Get_Buffer_Offset( buf, nbr2->atoms_str, buf_type ),
nbr2->atoms_cnt, type, nbr2->rank, 2 * d + 1, comm );
}
#if defined(DEBUG_FOCUS)
fprintf( stderr, "p%d coll[%d] nbr1: str=%d cnt=%d recv=%d\n",
system->my_rank, d, nbr1->atoms_str, nbr1->atoms_cnt,
out_bufs[2 * d].cnt );
fprintf( stderr, "p%d coll[%d] nbr2: str=%d cnt=%d recv=%d\n",
system->my_rank, d, nbr2->atoms_str, nbr2->atoms_cnt,
out_bufs[2 * d + 1].cnt );
#endif
Kurt A. O'Hearn
committed
if ( out_bufs[2 * d].cnt > 0 )
Kurt A. O'Hearn
committed
{
MPI_Wait( &req1, &stat1 );
unpack( mpi_data->in1_buffer, buf, &out_bufs[2 * d] );
}
Kurt A. O'Hearn
committed
if ( out_bufs[2 * d + 1].cnt > 0 )
Kurt A. O'Hearn
committed
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
{
MPI_Wait( &req2, &stat2 );
unpack( mpi_data->in2_buffer, buf, &out_bufs[2 * d + 1] );
}
}
#endif
}
void Coll_FS( reax_system const * const system, mpi_datatypes * const mpi_data,
void * const buf, int buf_type, MPI_Datatype type )
{
int d;
mpi_out_data *out_bufs;
MPI_Comm comm;
MPI_Request req1, req2;
MPI_Status stat1, stat2;
const neighbor_proc *nbr1, *nbr2;
coll_unpacker unpack;
comm = mpi_data->comm_mesh3D;
out_bufs = mpi_data->out_buffers;
unpack = Get_Unpacker( buf_type );
for ( d = 2; d >= 0; --d )
{
/* initiate recvs */
nbr1 = &system->my_nbrs[2 * d];
if ( out_bufs[2 * d].cnt )
{
MPI_Irecv( mpi_data->in1_buffer, out_bufs[2 * d].cnt,
type, nbr1->rank, 2 * d + 1, comm, &req1 );
}
nbr2 = &system->my_nbrs[2 * d + 1];
Kurt A. O'Hearn
committed
if ( out_bufs[2 * d + 1].cnt )
{
MPI_Irecv( mpi_data->in2_buffer, out_bufs[2 * d + 1].cnt,
type, nbr2->rank, 2 * d, comm, &req2 );
}
/* send both messages in dimension d */
if ( nbr1->atoms_cnt )
{
MPI_Send( Get_Buffer_Offset( buf, nbr1->atoms_str, buf_type ),
nbr1->atoms_cnt, type, nbr1->rank, 2 * d, comm );
}
Kurt A. O'Hearn
committed
MPI_Send( Get_Buffer_Offset( buf, nbr2->atoms_str, buf_type ),
nbr2->atoms_cnt, type, nbr2->rank, 2 * d + 1, comm );
Kurt A. O'Hearn
committed
#if defined(DEBUG_FOCUS)
fprintf( stderr, "p%d coll[%d] nbr1: str=%d cnt=%d recv=%d\n",
system->my_rank, d, nbr1->atoms_str, nbr1->atoms_cnt,
out_bufs[2 * d].cnt );
fprintf( stderr, "p%d coll[%d] nbr2: str=%d cnt=%d recv=%d\n",
system->my_rank, d, nbr2->atoms_str, nbr2->atoms_cnt,
out_bufs[2 * d + 1].cnt );
if ( out_bufs[2 * d].cnt )
{
MPI_Wait( &req1, &stat1 );
unpack( mpi_data->in1_buffer, buf, &out_bufs[2 * d] );
}
if ( out_bufs[2 * d + 1].cnt )
{
MPI_Wait( &req2, &stat2 );
unpack( mpi_data->in2_buffer, buf, &out_bufs[2 * d + 1] );
Kurt A. O'Hearn
committed
real Parallel_Norm( real const * const v, const int n, MPI_Comm comm )
Kurt A. O'Hearn
committed
int i;
Kurt A. O'Hearn
committed
my_sum = 0.0;
Kurt A. O'Hearn
committed
/* compute local part of vector 2-norm */
MPI_Allreduce( &my_sum, &norm_sqr, 1, MPI_DOUBLE, MPI_SUM, comm );
Kurt A. O'Hearn
committed
real Parallel_Dot( real const * const v1, real const * const v2,
const int n, MPI_Comm comm )
Kurt A. O'Hearn
committed
my_dot = 0.0;
Kurt A. O'Hearn
committed
/* compute local part of inner product */
MPI_Allreduce( &my_dot, &res, 1, MPI_DOUBLE, MPI_SUM, comm );
Kurt A. O'Hearn
committed
real Parallel_Vector_Acc( real const * const v, const int n,
MPI_Comm comm )
Kurt A. O'Hearn
committed
/* compute local part of vector element-wise sum */
Kurt A. O'Hearn
committed
my_acc = 0.0;
MPI_Allreduce( &my_acc, &res, 1, MPI_DOUBLE, MPI_SUM, comm );
}
/*****************************************************************************/
#if defined(TEST_FORCES)
Kurt A. O'Hearn
committed
void Coll_ids_at_Master( reax_system *system, storage *workspace,
mpi_datatypes *mpi_data )
MPI_Gather( &system->n, 1, MPI_INT, workspace->rcounts, 1, MPI_INT,
Kurt A. O'Hearn
committed
MASTER_NODE, mpi_data->world );
if ( system->my_rank == MASTER_NODE )
{
workspace->displs[0] = 0;
for ( i = 1; i < system->nprocs; ++i )
workspace->displs[i] = workspace->displs[i - 1] + workspace->rcounts[i - 1];
Kurt A. O'Hearn
committed
id_list = (int*) smalloc( system->n * sizeof(int), "Coll_ids_at_Master::id_list" );
MPI_Gatherv( id_list, system->n, MPI_INT, workspace->id_all,
workspace->rcounts, workspace->displs, MPI_INT, MASTER_NODE,
Kurt A. O'Hearn
committed
mpi_data->world );
Kurt A. O'Hearn
committed
sfree( id_list, "Coll_ids_at_Master::id_list" );
Kurt A. O'Hearn
committed
#if defined(DEBUG_FOCUS)
if ( system->my_rank == MASTER_NODE )
{
for ( i = 0 ; i < system->bigN; ++i )
fprintf( stderr, "id_all[%d]: %d\n", i, workspace->id_all[i] );
void Coll_rvecs_at_Master( reax_system *system, storage *workspace,
MPI_Gatherv( v, system->n, mpi_data->mpi_rvec, workspace->f_all,
workspace->rcounts, workspace->displs, mpi_data->mpi_rvec,
Kurt A. O'Hearn
committed
MASTER_NODE, mpi_data->world );