Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions 3 tutorials.md
Original file line number Diff line number Diff line change
Expand Up @@ -30,3 +30,6 @@ The tutorials assume that the reader has a basic knowledge of C, some C++, and L

## Groups and communicators
* [Introduction to groups and communicators]({{ site.baseurl }}/tutorials/introduction-to-groups-and-communicators/) ([中文版]({{ site.baseurl }}/tutorials/introduction-to-groups-and-communicators/zh_cn))

## MPI and Fortran
* [Introduction and differences for MPI Fortran]({{ site.baseurl }}/tutorials/mpi-fortran/)
88 changes: 88 additions & 0 deletions 88 tutorials/mpi-fortran/code/all_avg.f90
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
program main
use mpi_f08
stephenpcook marked this conversation as resolved.
Show resolved Hide resolved
use iso_fortran_env, only: error_unit
stephenpcook marked this conversation as resolved.
Show resolved Hide resolved

implicit none

interface
function compute_avg(array, num_elements)
real :: compute_avg
integer, intent(in) :: num_elements
real, intent(in) :: array(num_elements)
end function compute_avg
end interface

integer :: num_args
character(12) :: arg

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

See comment later on about using a hardcoded value of 12 here.

integer :: num_elements_per_proc
integer :: world_size, world_rank
real :: sub_avg, avg
real, allocatable :: rand_nums(:), sub_rand_nums(:), sub_avgs(:)

num_args = command_argument_count()

if (num_args .ne. 1) then
write (error_unit, *) 'Usage: all_avg num_elements_per_proc'
stop
end if

stephenpcook marked this conversation as resolved.
Show resolved Hide resolved
call get_command_argument(1, arg)

read (arg, *) num_elements_per_proc
! Seed the random number generator to get different results each time
stephenpcook marked this conversation as resolved.
Show resolved Hide resolved
call random_seed()

stephenpcook marked this conversation as resolved.
Show resolved Hide resolved
call MPI_INIT()

call MPI_COMM_SIZE(MPI_COMM_WORLD, world_size)
call MPI_COMM_RANK(MPI_COMM_WORLD, world_rank)

! Create a random array of elements on the root process. Its total
! size will be the number of elements per process times the number
! of processes
if (world_rank .eq. 0) then
allocate(rand_nums(num_elements_per_proc * world_size))
call random_number(rand_nums)
end if

allocate(sub_rand_nums(num_elements_per_proc))

call MPI_Scatter(rand_nums, num_elements_per_proc, MPI_FLOAT, sub_rand_nums, &
num_elements_per_proc, MPI_FLOAT, 0, MPI_COMM_WORLD)

! Compute the average of your subset
sub_avg = compute_avg(sub_rand_nums, num_elements_per_proc)

! Gather all partial averages down to all the processes
allocate(sub_avgs(world_size))
call MPI_Allgather(sub_avg, 1, MPI_FLOAT, sub_avgs, 1, MPI_FLOAT, MPI_COMM_WORLD)

! Now that we have all of the partial averages, compute the
! total average of all numbers. Since we are assuming each process computed
! an average across an equal amount of elements, this computation will
! produce the correct answer.
avg = compute_avg(sub_avgs, world_size)
print '("Avg of all elements from proc ", I0, " is ", ES12.5)', world_rank, avg

! Clean up
if (world_rank .eq. 0) then
deallocate(rand_nums)
end if
deallocate(sub_avgs)
deallocate(sub_rand_nums)

call MPI_Barrier(MPI_COMM_WORLD)
call MPI_FINALIZE()

end program main
stephenpcook marked this conversation as resolved.
Show resolved Hide resolved


function compute_avg(array, num_elements)
! Computes the average of an array of numbers
implicit none
real :: compute_avg
integer, intent(in) :: num_elements
real, intent(in) :: array(num_elements)

compute_avg = sum(array) / real(num_elements)
end function compute_avg
95 changes: 95 additions & 0 deletions 95 tutorials/mpi-fortran/code/avg.f90
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@

program main
use mpi_f08
stephenpcook marked this conversation as resolved.
Show resolved Hide resolved
use iso_fortran_env, only: error_unit

implicit none

interface
function compute_avg(array, num_elements)
real :: compute_avg
integer, intent(in) :: num_elements
real, intent(in) :: array(num_elements)
end function
end interface

integer :: num_args
character(12) :: arg

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just noticed this is a hard coded value of 12. Perhaps define it as a parameter? If you want to the code to be really flexible and help with ease of maintenance, create a seperate module file that contains default settings/values to create uniform behaviour accross all programs in this pakage. Then import (use) values as needed.

integer :: num_elements_per_proc
integer :: world_size, world_rank
real :: sub_avg, avg, original_data_avg
real, allocatable :: rand_nums(:), sub_rand_nums(:), sub_avgs(:)

num_args = command_argument_count()

if (num_args .ne. 1) then
write (error_unit, *) 'Usage: avg num_elements_per_proc'
stop
end if

call get_command_argument(1, arg)

read (arg, *) num_elements_per_proc
stephenpcook marked this conversation as resolved.
Show resolved Hide resolved
! Seed the random number generator to get different results each time
call random_seed()

call MPI_INIT()

call MPI_COMM_SIZE(MPI_COMM_WORLD, world_size)
call MPI_COMM_RANK(MPI_COMM_WORLD, world_rank)

! Create a random array of elements on the root process. Its total
! size will be the number of elements per process times the number
! of processes
if (world_rank .eq. 0) then
allocate(rand_nums(num_elements_per_proc * world_size))
call random_number(rand_nums)
end if

allocate(sub_rand_nums(num_elements_per_proc))

call MPI_Scatter(rand_nums, num_elements_per_proc, MPI_FLOAT, sub_rand_nums, &
num_elements_per_proc, MPI_FLOAT, 0, MPI_COMM_WORLD)

! Compute the average of your subset
sub_avg = compute_avg(sub_rand_nums, num_elements_per_proc)

! Gather all partial averages down to the root process
if (world_rank .eq. 0) then
allocate(sub_avgs(world_size))
end if
call MPI_Gather(sub_avg, 1, MPI_FLOAT, sub_avgs, 1, MPI_FLOAT, 0, MPI_COMM_WORLD)

! Now that we have all of the partial averages on the root, compute the
! total average of all numbers. Since we are assuming each process computed
! an average across an equal amount of elements, this computation will
! produce the correct answer.
if (world_rank .eq. 0) then
avg = compute_avg(sub_avgs, world_size)
print '("Avg of all elements is ", ES12.5)', avg
! Compute the average across the original data for comparison
original_data_avg = compute_avg(rand_nums, num_elements_per_proc * world_size)
print '("Avg computed across original data is ", ES12.5)', avg
end if

! Clean up
if (world_rank .eq. 0) then
deallocate(rand_nums)
deallocate(sub_avgs)
end if
deallocate(sub_rand_nums)

call MPI_Barrier(MPI_COMM_WORLD)
call MPI_FINALIZE()

end program main

function compute_avg(array, num_elements)
! Computes the average of an array of numbers
implicit none
real :: compute_avg
integer, intent(in) :: num_elements
real, intent(in) :: array(num_elements)

compute_avg = sum(array) / real(num_elements)
end function compute_avg
46 changes: 46 additions & 0 deletions 46 tutorials/mpi-fortran/code/check_status.f90
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
program check_status
use mpi_f08

implicit none

integer :: world_rank, world_size
integer, parameter :: MAX_NUMBERS=100
integer :: numbers(MAX_NUMBERS)
integer :: number_amount
type(MPI_Status) :: recv_status

real :: r

call MPI_INIT()
call MPI_COMM_SIZE(MPI_COMM_WORLD, world_size)
call MPI_COMM_RANK(MPI_COMM_WORLD, world_rank)


if (world_rank .eq. 0) then
! Pick a random amount of integers to send to process one
call random_seed()

call random_number(r)
number_amount = int(r * real(MAX_NUMBERS))
! Send the amount of integers to process one
call MPI_SEND(numbers, number_amount, MPI_INT, 1, 9, &
MPI_COMM_WORLD)
print '("0 sent ", I0, " numbers to 1")', number_amount
else if (world_rank .eq. 1) then
! Receive at most MAX_NUMBERS from process zero
call MPI_RECV(numbers, MAX_NUMBERS, MPI_INT, 0, 9, &
MPI_COMM_WORLD, recv_status)
! After receiving the message, check the status to determine how many
! numbers were actually received
call MPI_Get_count(recv_status, MPI_INT, number_amount)
! Print off the amount of numbers, and also print additional information
! in the status object
print '("1 received ", I0, " numbers from 0. Message source = ", I0, ", tag = ", I0)', &
number_amount , recv_status%MPI_SOURCE , recv_status%MPI_TAG
end if

call MPI_Barrier(MPI_COMM_WORLD)

call MPI_FINALIZE()

end program
stephenpcook marked this conversation as resolved.
Show resolved Hide resolved
51 changes: 51 additions & 0 deletions 51 tutorials/mpi-fortran/code/comm_groups.f90
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
program main
use mpi_f08
stephenpcook marked this conversation as resolved.
Show resolved Hide resolved

implicit none

integer :: world_rank, world_size
type(MPI_Group) :: world_group, prime_group
type(MPI_Comm) :: prime_comm
integer, parameter :: n = 7
integer :: ranks(n)
integer :: prime_rank, prime_size

call MPI_Init()

! Get the rank and size in the original communicator
call MPI_Comm_rank(MPI_COMM_WORLD, world_rank)
call MPI_Comm_size(MPI_COMM_WORLD, world_size)

! Get the group of processes in MPI_COMM_WORLD
call MPI_Comm_group(MPI_COMM_WORLD, world_group)

ranks = [1, 2, 3, 5, 7, 11, 13]

! Construct a group containing all of the prime ranks in world_group
call MPI_Group_incl(world_group, 7, ranks, prime_group)

! Create a new communicator based on the group
call MPI_Comm_create_group(MPI_COMM_WORLD, prime_group, 0, prime_comm)

prime_rank = -1
prime_size = -1
! If this rank isn't in the new communicator, it will be MPI_COMM_NULL
! Using MPI_COMM_NULL for MPI_Comm_rank or MPI_Comm_size is erroneous
if (MPI_COMM_NULL .ne. prime_comm) then
call MPI_Comm_rank(prime_comm, prime_rank)
call MPI_Comm_size(prime_comm, prime_size)
end if

print '("WORLD RANK/SIZE: ", I0, "/", I0, " --- PRIME RANK/SIZE: ", I0, "/", I0)', &
world_rank, world_size, prime_rank, prime_size

call MPI_Group_free(world_group)
call MPI_Group_free(prime_group)

if (MPI_COMM_NULL .ne. prime_comm) then
call MPI_Comm_free(prime_comm)
end if

call MPI_Finalize()

end program main
stephenpcook marked this conversation as resolved.
Show resolved Hide resolved
32 changes: 32 additions & 0 deletions 32 tutorials/mpi-fortran/code/comm_split.f90
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
program main
use mpi_f08
stephenpcook marked this conversation as resolved.
Show resolved Hide resolved

implicit none

integer :: world_rank, world_size
integer :: color
type(MPI_Comm) :: row_comm
integer :: row_rank, row_size

call MPI_INIT()

! Get the rank and size in the original communicator
call MPI_Comm_rank(MPI_COMM_WORLD, world_rank)
call MPI_Comm_size(MPI_COMM_WORLD, world_size)

color = world_rank / 4 ! Determine color based on row

! Split the communicator based on the color and use the original rank for ordering
call MPI_Comm_split(MPI_COMM_WORLD, color, world_rank, row_comm)

call MPI_Comm_rank(row_comm, row_rank)
call MPI_Comm_size(row_comm, row_size)

print '("WORLD RANK/SIZE: ", I0, "/", I0, " --- ROW RANK/SIZE: ", I0, "/", I0)', &
world_rank, world_size, row_rank, row_size

call MPI_Comm_free(row_comm)

call MPI_Finalize()

end program main
Loading
Morty Proxy This is a proxified and sanitized view of the page, visit original site.