sparse_alltoall_test Program

Uses

    • mpi
    • tem_sparse_comm_module
  • program~~sparse_alltoall_test~~UsesGraph program~sparse_alltoall_test sparse_alltoall_test mpi mpi program~sparse_alltoall_test->mpi tem_sparse_comm_module tem_sparse_comm_module program~sparse_alltoall_test->tem_sparse_comm_module

Calls

program~~sparse_alltoall_test~~CallsGraph program~sparse_alltoall_test sparse_alltoall_test mpi_comm_rank mpi_comm_rank program~sparse_alltoall_test->mpi_comm_rank mpi_comm_size mpi_comm_size program~sparse_alltoall_test->mpi_comm_size mpi_reduce mpi_reduce program~sparse_alltoall_test->mpi_reduce mpi_finalize mpi_finalize program~sparse_alltoall_test->mpi_finalize mpi_init mpi_init program~sparse_alltoall_test->mpi_init tem_sparse_alltoall_int tem_sparse_alltoall_int program~sparse_alltoall_test->tem_sparse_alltoall_int

Contents

Source Code


Variables

Type AttributesNameInitial
integer :: iError
integer :: myrank
integer :: nProcs
logical :: local_success
logical :: success
integer, allocatable:: targets(:)
integer, allocatable:: send_buffer(:)
integer, allocatable:: sources(:)
integer, allocatable:: recv_buffer(:)

Source Code

program sparse_alltoall_test
  use mpi
  use tem_sparse_comm_module, only: tem_sparse_alltoall_int

  implicit none

  ! Assume we have 4 procs: 0 - 1 - 2 - 3
  ! Each proc send data to their direct neighbors
  ! the data is a single integer whose value is the targeting proc, i.e.
  ! proc 0 sends value 1 to proc 1
  ! proc 1 sends value 0 to proc 0, value 2 to proc 2
  ! proc 2 sends value 1 to proc 1, value 3 to proc 3
  ! proc 3 sends value 2 to proc 2
  ! After communication, each proc should have the following data:
  ! proc 0: 0
  ! proc 1: 1 1
  ! proc 2: 2 2
  ! proc 3: 3

  integer :: iError
  integer :: myrank, nProcs
  logical :: local_success, success
  ! integer :: rstat(MPI_STATUS_SIZE)

  integer, allocatable :: targets(:)
  integer, allocatable :: send_buffer(:)
  integer, allocatable :: sources(:)
  integer, allocatable :: recv_buffer(:)

  call MPI_Init(iError)
  call MPI_Comm_rank(MPI_COMM_WORLD, myrank, iError)
  call MPI_COMM_SIZE(MPI_COMM_WORLD, nProcs, IERROR)

  if ( myrank == 0 ) then
    ! has one neighbor
    allocate( targets(1) )
    allocate( send_buffer(1) )
    targets(1)     = myrank + 1
    send_buffer(1) = myrank + 1
  else if ( myrank == (nProcs-1) ) then
    ! has one neighbor
    allocate( targets(1) )
    allocate( send_buffer(1) )
    targets(1)     = myrank - 1
    send_buffer(1) = myrank - 1
  else
    ! has two neighbor
    allocate( targets(2) )
    allocate( send_buffer(2) )
    targets(1)     = myrank - 1
    targets(2)     = myrank + 1
    send_buffer(1) = myrank - 1
    send_buffer(2) = myrank + 1
  end if

  call tem_sparse_alltoall_int( targets, send_buffer, &
    &                           sources, recv_buffer, MPI_COMM_WORLD)

  if ( myrank == 0 .or. myrank == (nProcs-1) ) then
    local_success = (recv_buffer(1) == myrank)
  else
    local_success = ((recv_buffer(1) == myrank) .and. (recv_buffer(2) == myrank))
  end if

  if ( .not. local_success ) then
    write(*,*) "Rank: ", myrank, " sources: ", sources
    write(*,*) "Rank: ", myrank, " recv_bu: ", recv_buffer
  end if

  call MPI_REDUCE(local_success, success, 1, MPI_LOGICAL, MPI_LAND, 0, MPI_COMM_world, IERROR)

  if ( myrank == 0 ) then
    if ( success ) then
      write(*,*) "PASSED"
    else
      write(*,*) "FAILED"
    end if
  end if

  call MPI_Finalize(iError)

end program sparse_alltoall_test