Doing Work in a Tree



 Task 6 running on tc-node159.red.lan did some work and passed to 2.
 Task 3 running on tc-node160.red.lan did some work and passed to 1.
 Task 4 running on tc-node157.red.lan did some work and passed to 1.
 Task 5 running on tc-node158.red.lan did some work and passed to 2.
 Task 0 running on tc-node157.red.lan heard from 1 and 2 and finished the work.
 Task 1 running on tc-node158.red.lan heard from 3 and 4 did some work and passed to 0.
 Task 2 running on tc-node159.red.lan heard from 5 and 6 did some work and passed to 0.

#include<mpi.h>
#include<stdio.h>
/* A 7 node binary tree*/
int main(int argc, char *argv[]) {
    MPI_Status status;
    int len;
    char name[MPI_MAX_PROCESSOR_NAME];
    int rank,i,j,tag_starter=34;
    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD,&rank);
    int reorder=1;   /* Can the processes be reordered, may result in improved performance.*/
    int index[] = { 2,5,8,9,10,11,12};
    int edges[] = { 1,2,0,3,4,0,5,6,1,1,2,2};
    int nneighbors, send_rank=-2,receive_ranks[]={-2,-2};
    int tags[7][7];
    for (i=0;i<7;i++) {
        for (j=0;j<7;j++) {
            (i<j)?tags[i][j]=tag_starter++:-2;
        }
    }
    /* One could use the tags either way but this code will only use them in a downward direction, which is being emphasized by
    the array values*/
    int * intptr=&nneighbors;
    int the_neighbors[10];
    MPI_Comm MPI_Tree_World;
    MPI_Graph_create(MPI_COMM_WORLD, 7, index, edges, reorder,&MPI_Tree_World);
    /*int MPI_Graph_neighbors_count(MPI_Comm comm,int rank, int *nneighbors); */
    MPI_Graph_neighbors_count(MPI_Tree_World,rank, intptr);
    /* int MPI_Graph_neighbors (MPI_Comm comm,int rank,int mneighbors, int *neighbors); */
    MPI_Graph_neighbors (MPI_Tree_World,rank,nneighbors, the_neighbors);
    j=0;
    for (i=0;i<nneighbors;i++) {
        if (the_neighbors[i]<rank) {
            send_rank=the_neighbors[i];
        } else {
            receive_ranks[j++]=the_neighbors[i];
        }
    }
    MPI_Get_processor_name(name, &len);
    if (receive_ranks[0]==-2) {
        MPI_Send(&rank,1, MPI_INT, send_rank, tags[send_rank][rank], MPI_Tree_World);
        printf(" Task %i running on %s did some work and passed to %i.\n ",rank,name,send_rank);
    } else {
        int from_where[2];
        MPI_Recv(&from_where[0],1, MPI_INT, receive_ranks[0], tags[rank][receive_ranks[0]], MPI_Tree_World,  MPI_STATUS_IGNORE);
        MPI_Recv(&from_where[1],1, MPI_INT, receive_ranks[1], tags[rank][receive_ranks[1]], MPI_Tree_World,  MPI_STATUS_IGNORE);
        if (send_rank!=-2) {
            MPI_Send(&rank,1, MPI_INT, send_rank, tags[send_rank][rank], MPI_Tree_World);
            printf(" Task %i running on %s heard from %i and %i did some work and passed to %i.\n ",rank,name, from_where[0],from_where[1],send_rank);
        } else {
            printf(" Task %i running on %s heard from %i and %i and finished the work.\n ",rank,name, from_where[0],from_where[1]);
        }
    }
    MPI_Finalize();

}



tree.sh

#! /bin/bash
#SBATCH --partition=TC
#SBATCH --job-name=treetest
#SBATCH --error=treetest_%j.err
#SBATCH --output=treetest_%j.out
#SBATCH --time=00:00:05
## --mem memory required per node
#SBATCH --mem=1G
#SBATCH --ntasks=7
#SBATCH --nodes=4
#SBATCH --ntasks-per-node=2
mpirun     --map-by node   ./$1  
exit 0