1 |
C $Header: /u/gcmpack/MITgcm/eesupp/src/Attic/gather_2d.F,v 1.10 2009/05/16 13:41:19 jmc dead $ |
2 |
C $Name: $ |
3 |
|
4 |
#include "CPP_EEOPTIONS.h" |
5 |
|
6 |
SUBROUTINE GATHER_2D( global, local, myThid ) |
7 |
C Gather elements of a 2-D array from all mpi processes to process 0. |
8 |
IMPLICIT NONE |
9 |
#include "SIZE.h" |
10 |
#include "EEPARAMS.h" |
11 |
#include "EESUPPORT.h" |
12 |
C mythid - thread number for this instance of the routine. |
13 |
C global,local - working arrays used to transfer 2-D fields |
14 |
INTEGER mythid |
15 |
Real*8 global(Nx,Ny) |
16 |
_RL local(1:sNx,1:sNy,nSx,nSy) |
17 |
|
18 |
INTEGER iG,jG, i, j, bi, bj |
19 |
#ifdef ALLOW_USE_MPI |
20 |
_RL temp(1:sNx,1:sNy,nSx,nSy) |
21 |
INTEGER istatus(MPI_STATUS_SIZE), ierr |
22 |
INTEGER lbuff, idest, itag, npe, ready_to_receive |
23 |
#endif /* ALLOW_USE_MPI */ |
24 |
|
25 |
C-- Make everyone wait except for master thread. |
26 |
_BARRIER |
27 |
_BEGIN_MASTER( myThid ) |
28 |
|
29 |
#ifndef ALLOW_USE_MPI |
30 |
|
31 |
DO bj=1,nSy |
32 |
DO bi=1,nSx |
33 |
DO j=1,sNy |
34 |
DO i=1,sNx |
35 |
iG = myXGlobalLo-1+(bi-1)*sNx+i |
36 |
jG = myYGlobalLo-1+(bj-1)*sNy+j |
37 |
global(iG,jG) = local(i,j,bi,bj) |
38 |
ENDDO |
39 |
ENDDO |
40 |
ENDDO |
41 |
ENDDO |
42 |
|
43 |
#else /* ALLOW_USE_MPI */ |
44 |
|
45 |
lbuff = sNx*nSx*sNy*nSy |
46 |
idest = 0 |
47 |
itag = 0 |
48 |
ready_to_receive = 0 |
49 |
|
50 |
IF( mpiMyId .EQ. 0 ) THEN |
51 |
|
52 |
C-- Process 0 fills-in its local data |
53 |
npe = 0 |
54 |
DO bj=1,nSy |
55 |
DO bi=1,nSx |
56 |
DO j=1,sNy |
57 |
#ifdef TARGET_NEC_SX |
58 |
!cdir novector |
59 |
#endif |
60 |
DO i=1,sNx |
61 |
iG = mpi_myXGlobalLo(npe+1)-1+(bi-1)*sNx+i |
62 |
jG = mpi_myYGlobalLo(npe+1)-1+(bj-1)*sNy+j |
63 |
global(iG,jG) = local(i,j,bi,bj) |
64 |
ENDDO |
65 |
ENDDO |
66 |
ENDDO |
67 |
ENDDO |
68 |
|
69 |
C-- Process 0 polls and receives data from each process in turn |
70 |
DO npe = 1, numberOfProcs-1 |
71 |
#ifndef DISABLE_MPI_READY_TO_RECEIVE |
72 |
CALL MPI_SEND (ready_to_receive, 1, MPI_INTEGER, |
73 |
& npe, itag, MPI_COMM_MODEL, ierr) |
74 |
#endif |
75 |
CALL MPI_RECV (temp, lbuff, MPI_DOUBLE_PRECISION, |
76 |
& npe, itag, MPI_COMM_MODEL, istatus, ierr) |
77 |
|
78 |
C-- Process 0 gathers the local arrays into a global array. |
79 |
DO bj=1,nSy |
80 |
DO bi=1,nSx |
81 |
DO j=1,sNy |
82 |
#ifdef TARGET_NEC_SX |
83 |
!cdir novector |
84 |
#endif |
85 |
DO i=1,sNx |
86 |
iG = mpi_myXGlobalLo(npe+1)-1+(bi-1)*sNx+i |
87 |
jG = mpi_myYGlobalLo(npe+1)-1+(bj-1)*sNy+j |
88 |
global(iG,jG) = temp(i,j,bi,bj) |
89 |
ENDDO |
90 |
ENDDO |
91 |
ENDDO |
92 |
ENDDO |
93 |
ENDDO |
94 |
|
95 |
ELSE |
96 |
|
97 |
C-- All proceses except 0 wait to be polled then send local array |
98 |
#ifndef DISABLE_MPI_READY_TO_RECEIVE |
99 |
CALL MPI_RECV (ready_to_receive, 1, MPI_INTEGER, |
100 |
& idest, itag, MPI_COMM_MODEL, istatus, ierr) |
101 |
#endif |
102 |
CALL MPI_SEND (local, lbuff, MPI_DOUBLE_PRECISION, |
103 |
& idest, itag, MPI_COMM_MODEL, ierr) |
104 |
|
105 |
ENDIF |
106 |
|
107 |
#endif /* ALLOW_USE_MPI */ |
108 |
|
109 |
_END_MASTER( myThid ) |
110 |
_BARRIER |
111 |
|
112 |
RETURN |
113 |
END |