1 |
dimitri |
1.4 |
C $Header: /u/gcmpack/MITgcm_contrib/MPMice/beaufort/code/eeboot_minimal.F,v 1.3 2012/03/08 21:52:27 dimitri Exp $ |
2 |
dimitri |
1.1 |
C $Name: $ |
3 |
|
|
|
4 |
dimitri |
1.2 |
#include "PACKAGES_CONFIG.h" |
5 |
dimitri |
1.1 |
#include "CPP_EEOPTIONS.h" |
6 |
|
|
|
7 |
|
|
CBOP |
8 |
|
|
C !ROUTINE: EEBOOT_MINIMAL |
9 |
|
|
|
10 |
|
|
C !INTERFACE: |
11 |
|
|
SUBROUTINE EEBOOT_MINIMAL |
12 |
|
|
|
13 |
|
|
C !DESCRIPTION: |
14 |
|
|
C *==========================================================* |
15 |
dimitri |
1.2 |
C | SUBROUTINE EEBOOT\_MINIMAL |
16 |
|
|
C | o Set an initial environment that is predictable i.e. |
17 |
|
|
C | behaves in a similar way on all machines and stable. |
18 |
dimitri |
1.1 |
C *==========================================================* |
19 |
dimitri |
1.2 |
C | Under MPI this routine calls MPI\_INIT to setup the |
20 |
|
|
C | mpi environment ( on some systems the code is running as |
21 |
|
|
C | a single process prior to MPI\_INIT, on others the mpirun |
22 |
|
|
C | script has already created multiple processes). Until |
23 |
|
|
C | MPI\_Init is called it is unclear what state the |
24 |
|
|
C | application is in. Once this routine has been run it is |
25 |
|
|
C | "safe" to do things like I/O to report erros and to get |
26 |
|
|
C | run parameters. |
27 |
|
|
C | Note: This routine can also be compiled with CPP |
28 |
|
|
C | directives set so that no multi-processing is initialise. |
29 |
|
|
C | This is OK and will work fine. |
30 |
dimitri |
1.1 |
C *==========================================================* |
31 |
|
|
|
32 |
|
|
C !USES: |
33 |
dimitri |
1.2 |
IMPLICIT NONE |
34 |
dimitri |
1.1 |
C == Global data == |
35 |
|
|
#include "SIZE.h" |
36 |
|
|
#include "EEPARAMS.h" |
37 |
|
|
#include "EESUPPORT.h" |
38 |
|
|
|
39 |
|
|
C !LOCAL VARIABLES: |
40 |
|
|
C == Local variables == |
41 |
dimitri |
1.4 |
C myThid :: Temp. dummy thread number. |
42 |
|
|
C fNam :: Used to build file name for standard and error output. |
43 |
|
|
C msgBuf :: Used to build messages for printing. |
44 |
dimitri |
1.2 |
INTEGER myThid |
45 |
dimitri |
1.1 |
CHARACTER*13 fNam |
46 |
dimitri |
1.4 |
CHARACTER*(MAX_LEN_MBUF) msgBuf |
47 |
dimitri |
1.1 |
#ifdef ALLOW_USE_MPI |
48 |
dimitri |
1.4 |
C mpiRC :: Error code reporting variable used with MPI. |
49 |
dimitri |
1.1 |
INTEGER mpiRC |
50 |
dimitri |
1.4 |
LOGICAL doReport |
51 |
dimitri |
1.1 |
#ifdef COMPONENT_MODULE |
52 |
|
|
INTEGER mpiMyWid |
53 |
|
|
#endif |
54 |
|
|
#ifdef ALLOW_CPL_MPMICE |
55 |
|
|
COMMON /CPL_MPI_ID/ |
56 |
|
|
& myworldid, local_ocean_leader, local_ice_leader |
57 |
|
|
integer :: n, myid, numprocs, i, ierr, myworldid, numprocsworld |
58 |
|
|
integer :: mycomponent |
59 |
|
|
integer :: icesize, oceansize |
60 |
|
|
integer :: local_ocean_leader, local_ice_leader |
61 |
|
|
integer, dimension(:), allocatable :: components |
62 |
|
|
integer, dimension(:), allocatable :: icegroup, oceangroup |
63 |
|
|
#endif /* ALLOW_CPL_MPMICE */ |
64 |
dimitri |
1.2 |
#if defined(ALLOW_NEST_PARENT) || defined(ALLOW_NEST_CHILD) |
65 |
|
|
INTEGER mpiMyWid, color |
66 |
|
|
#endif |
67 |
dimitri |
1.1 |
#endif /* ALLOW_USE_MPI */ |
68 |
|
|
CEOP |
69 |
|
|
|
70 |
|
|
C-- Default values set to single processor case |
71 |
|
|
numberOfProcs = 1 |
72 |
|
|
myProcId = 0 |
73 |
dimitri |
1.2 |
pidIO = myProcId |
74 |
dimitri |
1.1 |
myProcessStr = '------' |
75 |
dimitri |
1.4 |
C Set a dummy value for myThid because we are not multi-threading yet. |
76 |
dimitri |
1.1 |
myThid = 1 |
77 |
dimitri |
1.4 |
|
78 |
|
|
C Annoyingly there is no universal way to have the usingMPI |
79 |
|
|
C parameter work as one might expect. This is because, on some |
80 |
|
|
C systems I/O does not work until MPI_Init has been called. |
81 |
|
|
C The solution for now is that the parameter below may need to |
82 |
|
|
C be changed manually! |
83 |
dimitri |
1.1 |
#ifdef ALLOW_USE_MPI |
84 |
dimitri |
1.4 |
usingMPI = .TRUE. |
85 |
|
|
#else |
86 |
|
|
usingMPI = .FALSE. |
87 |
|
|
#endif |
88 |
|
|
|
89 |
|
|
IF ( .NOT.usingMPI ) THEN |
90 |
|
|
|
91 |
|
|
WRITE(myProcessStr,'(I4.4)') myProcId |
92 |
|
|
WRITE(fNam,'(A,A)') 'STDERR.', myProcessStr(1:4) |
93 |
|
|
OPEN(errorMessageUnit,FILE=fNam,STATUS='unknown') |
94 |
|
|
c WRITE(fNam,'(A,A)') 'STDOUT.', myProcessStr(1:4) |
95 |
|
|
c OPEN(standardMessageUnit,FILE=fNam,STATUS='unknown') |
96 |
|
|
|
97 |
|
|
#ifdef ALLOW_USE_MPI |
98 |
|
|
ELSE |
99 |
dimitri |
1.1 |
C-- MPI style multiple-process initialisation |
100 |
|
|
C-- ========================================= |
101 |
dimitri |
1.4 |
|
102 |
dimitri |
1.1 |
C-- Initialise MPI multi-process parallel environment. |
103 |
|
|
C On some systems program forks at this point. Others have already |
104 |
|
|
C forked within mpirun - now thats an open standard! |
105 |
|
|
CALL MPI_INIT( mpiRC ) |
106 |
|
|
IF ( mpiRC .NE. MPI_SUCCESS ) THEN |
107 |
|
|
eeBootError = .TRUE. |
108 |
dimitri |
1.4 |
WRITE(msgBuf,'(A,I5)') |
109 |
|
|
& 'EEBOOT_MINIMAL: MPI_INIT return code', mpiRC |
110 |
|
|
CALL PRINT_ERROR( msgBuf, myThid ) |
111 |
dimitri |
1.1 |
GOTO 999 |
112 |
|
|
ENDIF |
113 |
|
|
|
114 |
|
|
C-- MPI has now been initialized but now we need to either |
115 |
|
|
C ask for a communicator or pretend that we have: |
116 |
|
|
C Pretend that we have asked for a communicator |
117 |
|
|
MPI_COMM_MODEL = MPI_COMM_WORLD |
118 |
dimitri |
1.4 |
doReport = .FALSE. |
119 |
dimitri |
1.1 |
|
120 |
dimitri |
1.2 |
#ifdef ALLOW_OASIS |
121 |
|
|
C add a 1rst preliminary call EESET_PARAMS to set useOASIS |
122 |
|
|
C (needed to decide either to call OASIS_INIT or not) |
123 |
dimitri |
1.4 |
CALL EESET_PARMS ( doReport ) |
124 |
dimitri |
1.2 |
IF ( useOASIS ) CALL OASIS_INIT(MPI_COMM_MODEL) |
125 |
|
|
#endif /* ALLOW_OASIS */ |
126 |
|
|
|
127 |
dimitri |
1.1 |
#ifdef COMPONENT_MODULE |
128 |
|
|
C-- Set the running directory |
129 |
|
|
CALL MPI_COMM_RANK( MPI_COMM_WORLD, mpiMyWId, mpiRC ) |
130 |
|
|
CALL SETDIR( mpiMyWId ) |
131 |
|
|
|
132 |
|
|
C- jmc: test: |
133 |
|
|
C add a 1rst preliminary call EESET_PARAMS to set useCoupler |
134 |
|
|
C (needed to decide either to call CPL_INIT or not) |
135 |
dimitri |
1.4 |
CALL EESET_PARMS ( doReport ) |
136 |
dimitri |
1.1 |
C- jmc: test end ; otherwise, uncomment next line: |
137 |
|
|
c useCoupler = .TRUE. |
138 |
|
|
|
139 |
|
|
C-- Ask coupler interface for a communicator |
140 |
|
|
IF ( useCoupler) CALL CPL_INIT |
141 |
dimitri |
1.4 |
#endif /* COMPONENT_MODULE */ |
142 |
dimitri |
1.1 |
|
143 |
dimitri |
1.2 |
C-- Case with Nest(ing) |
144 |
|
|
#if defined(ALLOW_NEST_PARENT) || defined(ALLOW_NEST_CHILD) |
145 |
|
|
C-- Set the running directory |
146 |
|
|
CALL MPI_COMM_RANK( MPI_COMM_WORLD, mpiMyWId, mpiRC ) |
147 |
|
|
CALL SETDIR( mpiMyWId ) |
148 |
|
|
|
149 |
|
|
C-- Setup Nesting Execution Environment |
150 |
|
|
CALL NEST_EEINIT( mpiMyWId, color ) |
151 |
|
|
#endif /* ALLOW_NEST_PARENT | ALLOW_NEST_CHILD */ |
152 |
|
|
|
153 |
dimitri |
1.3 |
#if defined(ALLOW_CPL_MPMICE) && !defined(CPL_DEBUG) |
154 |
dimitri |
1.1 |
CALL SETDIR_OCEAN( ) |
155 |
|
|
call MPI_comm_rank(MPI_COMM_WORLD, myworldid, ierr) |
156 |
|
|
call MPI_comm_size(MPI_COMM_WORLD, numprocsworld, ierr) |
157 |
|
|
|
158 |
|
|
C allocate array components based on the number of processors |
159 |
|
|
allocate(components(numprocsworld)) |
160 |
|
|
|
161 |
|
|
C assign a component to the ocean code to organize processors into a group |
162 |
|
|
mycomponent=0 |
163 |
|
|
|
164 |
|
|
C gather components to all processors, |
165 |
|
|
C so each knows who is ice and who is ocean |
166 |
|
|
call MPI_allgather(mycomponent,1,MPI_INTEGER,components,1, |
167 |
|
|
& MPI_INTEGER,MPI_COMM_WORLD,ierr) |
168 |
|
|
|
169 |
|
|
C form ice and ocean groups |
170 |
|
|
C count the processors in each groups |
171 |
|
|
icesize=0 |
172 |
|
|
oceansize=0 |
173 |
|
|
do i=1,numprocsworld |
174 |
|
|
if(components(i).eq.0) then |
175 |
|
|
oceansize=oceansize+1 |
176 |
|
|
elseif(components(i).eq.1) then |
177 |
|
|
icesize=icesize+1 |
178 |
|
|
else |
179 |
|
|
write(6,*) 'error: processor', i, |
180 |
|
|
& 'not associated with ice or ocean' |
181 |
|
|
stop |
182 |
|
|
endif |
183 |
|
|
enddo |
184 |
|
|
|
185 |
|
|
C allocate group arrays |
186 |
|
|
allocate(icegroup(icesize)) |
187 |
|
|
allocate(oceangroup(oceansize)) |
188 |
|
|
C form the groups |
189 |
|
|
icesize=0 |
190 |
|
|
oceansize=0 |
191 |
|
|
do i=1,numprocsworld |
192 |
|
|
if(components(i).eq.0) then |
193 |
|
|
oceansize=oceansize+1 |
194 |
|
|
oceangroup(oceansize)=i-1 ! ranks are from 0 to numprocsworld-1 |
195 |
|
|
elseif(components(i).eq.1) then |
196 |
|
|
icesize=icesize+1 |
197 |
|
|
icegroup(icesize)=i-1 ! ranks are from 0 to numprocsworld-1 |
198 |
|
|
else |
199 |
|
|
write(6,*) 'error: processor', i, |
200 |
|
|
& 'not associated with ice or ocean' |
201 |
|
|
endif |
202 |
|
|
enddo |
203 |
|
|
|
204 |
|
|
C pick the lowest rank in the group as the local group leader |
205 |
|
|
local_ocean_leader=oceangroup(1) |
206 |
|
|
local_ice_leader=icegroup(1) |
207 |
|
|
|
208 |
|
|
C form ocean communicator |
209 |
|
|
call MPI_comm_split(MPI_COMM_WORLD,mycomponent,myworldid, |
210 |
|
|
& MPI_COMM_MODEL,ierr) |
211 |
|
|
call MPI_comm_rank(MPI_COMM_MODEL,myid,ierr) |
212 |
|
|
call MPI_comm_size(MPI_COMM_MODEL,numprocs,ierr) |
213 |
dimitri |
1.3 |
#endif /* defined(ALLOW_CPL_MPMICE) && !defined(CPL_DEBUG) */ |
214 |
dimitri |
1.1 |
|
215 |
dimitri |
1.2 |
C---+----1----+----2----+----3----+----4----+----5----+----6----+----7-|--+----| |
216 |
|
|
|
217 |
dimitri |
1.1 |
C-- Get my process number |
218 |
|
|
CALL MPI_COMM_RANK( MPI_COMM_MODEL, mpiMyId, mpiRC ) |
219 |
|
|
IF ( mpiRC .NE. MPI_SUCCESS ) THEN |
220 |
|
|
eeBootError = .TRUE. |
221 |
dimitri |
1.4 |
WRITE(msgBuf,'(A,I5)') |
222 |
|
|
& 'EEBOOT_MINIMAL: MPI_COMM_RANK return code', mpiRC |
223 |
|
|
CALL PRINT_ERROR( msgBuf, myThid ) |
224 |
dimitri |
1.1 |
GOTO 999 |
225 |
|
|
ENDIF |
226 |
|
|
myProcId = mpiMyId |
227 |
dimitri |
1.2 |
WRITE(myProcessStr,'(I4.4)') myProcId |
228 |
dimitri |
1.1 |
mpiPidIo = myProcId |
229 |
|
|
pidIO = mpiPidIo |
230 |
|
|
IF ( mpiPidIo .EQ. myProcId ) THEN |
231 |
|
|
WRITE(fNam,'(A,A)') 'STDERR.', myProcessStr(1:4) |
232 |
|
|
OPEN(errorMessageUnit,FILE=fNam,STATUS='unknown') |
233 |
|
|
WRITE(fNam,'(A,A)') 'STDOUT.', myProcessStr(1:4) |
234 |
|
|
OPEN(standardMessageUnit,FILE=fNam,STATUS='unknown') |
235 |
|
|
ENDIF |
236 |
|
|
|
237 |
dimitri |
1.2 |
#if defined(ALLOW_NEST_PARENT) || defined(ALLOW_NEST_CHILD) |
238 |
|
|
WRITE(standardMessageUnit,'(2(A,I6))') |
239 |
|
|
& ' mpiMyWId =', mpiMyWId, ' , color =',color |
240 |
|
|
#endif /* ALLOW_NEST_PARENT | ALLOW_NEST_CHILD */ |
241 |
|
|
|
242 |
dimitri |
1.1 |
C-- Synchronise all processes |
243 |
dimitri |
1.2 |
C Strictly this is superfluous, but by using it we can guarantee to |
244 |
dimitri |
1.1 |
C find out about processes that did not start up. |
245 |
|
|
CALL MPI_BARRIER( MPI_COMM_MODEL, mpiRC ) |
246 |
|
|
IF ( mpiRC .NE. MPI_SUCCESS ) THEN |
247 |
|
|
eeBootError = .TRUE. |
248 |
dimitri |
1.4 |
WRITE(msgBuf,'(A,I6)') |
249 |
|
|
& 'EEBOOT_MINIMAL: MPI_BARRIER return code', mpiRC |
250 |
|
|
CALL PRINT_ERROR( msgBuf, myThid ) |
251 |
dimitri |
1.1 |
GOTO 999 |
252 |
|
|
ENDIF |
253 |
|
|
|
254 |
|
|
C-- Get number of MPI processes |
255 |
|
|
CALL MPI_COMM_SIZE ( MPI_COMM_MODEL, mpiNProcs, mpiRC ) |
256 |
|
|
IF ( mpiRC .NE. MPI_SUCCESS ) THEN |
257 |
|
|
eeBootError = .TRUE. |
258 |
dimitri |
1.4 |
WRITE(msgBuf,'(A,I6)') |
259 |
|
|
& 'EEBOOT_MINIMAL: MPI_COMM_SIZE return code', mpiRC |
260 |
|
|
CALL PRINT_ERROR( msgBuf, myThid ) |
261 |
dimitri |
1.1 |
GOTO 999 |
262 |
|
|
ENDIF |
263 |
|
|
numberOfProcs = mpiNProcs |
264 |
|
|
|
265 |
dimitri |
1.4 |
#endif /* ALLOW_USE_MPI */ |
266 |
|
|
ENDIF |
267 |
|
|
|
268 |
dimitri |
1.1 |
C-- Can not have more processes than compile time MAX_NO_PROCS |
269 |
|
|
IF ( numberOfProcs .GT. MAX_NO_PROCS ) THEN |
270 |
|
|
eeBootError = .TRUE. |
271 |
dimitri |
1.4 |
WRITE(msgBuf,'(A,2I6)') |
272 |
|
|
& 'EEBOOT_MINIMAL: Nb. of procs exceeds MAX_NO_PROCS', |
273 |
dimitri |
1.1 |
& numberOfProcs, MAX_NO_PROCS |
274 |
dimitri |
1.4 |
CALL PRINT_ERROR( msgBuf, myThid ) |
275 |
|
|
WRITE(msgBuf,'(2A)') |
276 |
dimitri |
1.1 |
& ' Needs to increase MAX_NO_PROCS', |
277 |
|
|
& ' in file "EEPARAMS.h" and to re-compile' |
278 |
dimitri |
1.4 |
CALL PRINT_ERROR( msgBuf, myThid ) |
279 |
dimitri |
1.1 |
GOTO 999 |
280 |
|
|
ENDIF |
281 |
dimitri |
1.4 |
C-- Under MPI only allow same number of processes as proc grid size. |
282 |
|
|
C Strictly we are allowed more procs but knowing there |
283 |
dimitri |
1.1 |
C is an exact match makes things easier. |
284 |
|
|
IF ( numberOfProcs .NE. nPx*nPy ) THEN |
285 |
|
|
eeBootError = .TRUE. |
286 |
dimitri |
1.4 |
WRITE(msgBuf,'(2(A,I6))') |
287 |
|
|
& 'EEBOOT_MINIMAL: No. of procs=', numberOfProcs, |
288 |
|
|
& ' not equal to nPx*nPy=', nPx*nPy |
289 |
|
|
CALL PRINT_ERROR( msgBuf, myThid ) |
290 |
dimitri |
1.1 |
GOTO 999 |
291 |
|
|
ENDIF |
292 |
|
|
|
293 |
|
|
#ifdef USE_LIBHPM |
294 |
dimitri |
1.4 |
CALL F_HPMINIT(myProcId, "mitgcmuv") |
295 |
dimitri |
1.1 |
#endif |
296 |
|
|
|
297 |
|
|
999 CONTINUE |
298 |
|
|
RETURN |
299 |
|
|
END |