/[MITgcm]/MITgcm_contrib/MPMice/beaufort/code/eeboot_minimal.F
ViewVC logotype

Contents of /MITgcm_contrib/MPMice/beaufort/code/eeboot_minimal.F

Parent Directory Parent Directory | Revision Log Revision Log | View Revision Graph Revision Graph


Revision 1.5 - (show annotations) (download)
Sat Oct 4 03:24:19 2014 UTC (10 years, 10 months ago) by dimitri
Branch: MAIN
CVS Tags: HEAD
Changes since 1.4: +24 -18 lines
updating beaufort test to checkpoint65e

1 C $Header: /u/gcmpack/MITgcm_contrib/MPMice/beaufort/code/eeboot_minimal.F,v 1.4 2013/10/03 18:37:48 dimitri Exp $
2 C $Name: $
3
4 #include "PACKAGES_CONFIG.h"
5 #include "CPP_EEOPTIONS.h"
6
7 CBOP
8 C !ROUTINE: EEBOOT_MINIMAL
9
10 C !INTERFACE:
11 SUBROUTINE EEBOOT_MINIMAL
12
13 C !DESCRIPTION:
14 C *==========================================================*
15 C | SUBROUTINE EEBOOT\_MINIMAL
16 C | o Set an initial environment that is predictable i.e.
17 C | behaves in a similar way on all machines and stable.
18 C *==========================================================*
19 C | Under MPI this routine calls MPI\_INIT to setup the
20 C | mpi environment ( on some systems the code is running as
21 C | a single process prior to MPI\_INIT, on others the mpirun
22 C | script has already created multiple processes). Until
23 C | MPI\_Init is called it is unclear what state the
24 C | application is in. Once this routine has been run it is
25 C | "safe" to do things like I/O to report erros and to get
26 C | run parameters.
27 C | Note: This routine can also be compiled with CPP
28 C | directives set so that no multi-processing is initialise.
29 C | This is OK and will work fine.
30 C *==========================================================*
31
32 C !USES:
33 IMPLICIT NONE
34 C == Global data ==
35 #include "SIZE.h"
36 #include "EEPARAMS.h"
37 #include "EESUPPORT.h"
38
39 C !LOCAL VARIABLES:
40 C == Local variables ==
41 C myThid :: Temp. dummy thread number.
42 C fNam :: Used to build file name for standard and error output.
43 C msgBuf :: Used to build messages for printing.
44 INTEGER myThid
45 CHARACTER*13 fNam
46 CHARACTER*(MAX_LEN_MBUF) msgBuf
47 #ifdef ALLOW_USE_MPI
48 C mpiRC :: Error code reporting variable used with MPI.
49 INTEGER mpiRC
50 LOGICAL doReport
51 #ifdef COMPONENT_MODULE
52 INTEGER mpiMyWid
53 #endif
54 #ifdef ALLOW_CPL_MPMICE
55 COMMON /CPL_MPI_ID/
56 & myworldid, local_ocean_leader, local_ice_leader
57 integer :: n, myid, numprocs, i, ierr, myworldid, numprocsworld
58 integer :: mycomponent
59 integer :: icesize, oceansize
60 integer :: local_ocean_leader, local_ice_leader
61 integer, dimension(:), allocatable :: components
62 integer, dimension(:), allocatable :: icegroup, oceangroup
63 #endif /* ALLOW_CPL_MPMICE */
64 #if defined(ALLOW_NEST_PARENT) || defined(ALLOW_NEST_CHILD)
65 INTEGER mpiMyWid, color
66 #endif
67 #endif /* ALLOW_USE_MPI */
68 CEOP
69
70 C-- Default values set to single processor case
71 numberOfProcs = 1
72 myProcId = 0
73 pidIO = myProcId
74 myProcessStr = '------'
75 C Set a dummy value for myThid because we are not multi-threading yet.
76 myThid = 1
77
78 C Annoyingly there is no universal way to have the usingMPI
79 C parameter work as one might expect. This is because, on some
80 C systems I/O does not work until MPI_Init has been called.
81 C The solution for now is that the parameter below may need to
82 C be changed manually!
83 #ifdef ALLOW_USE_MPI
84 usingMPI = .TRUE.
85 #else
86 usingMPI = .FALSE.
87 #endif
88
89 IF ( .NOT.usingMPI ) THEN
90
91 WRITE(myProcessStr,'(I4.4)') myProcId
92 WRITE(fNam,'(A,A)') 'STDERR.', myProcessStr(1:4)
93 OPEN(errorMessageUnit,FILE=fNam,STATUS='unknown')
94 c WRITE(fNam,'(A,A)') 'STDOUT.', myProcessStr(1:4)
95 c OPEN(standardMessageUnit,FILE=fNam,STATUS='unknown')
96
97 #ifdef ALLOW_USE_MPI
98 ELSE
99 C-- MPI style multiple-process initialisation
100 C-- =========================================
101
102 C-- Initialise MPI multi-process parallel environment.
103 C On some systems program forks at this point. Others have already
104 C forked within mpirun - now thats an open standard!
105 CALL MPI_INIT( mpiRC )
106 IF ( mpiRC .NE. MPI_SUCCESS ) THEN
107 eeBootError = .TRUE.
108 WRITE(msgBuf,'(A,I5)')
109 & 'EEBOOT_MINIMAL: MPI_INIT return code', mpiRC
110 CALL PRINT_ERROR( msgBuf, myThid )
111 GOTO 999
112 ENDIF
113
114 C-- MPI has now been initialized but now we need to either
115 C ask for a communicator or pretend that we have:
116 C Pretend that we have asked for a communicator
117 MPI_COMM_MODEL = MPI_COMM_WORLD
118 doReport = .FALSE.
119
120 #ifdef ALLOW_OASIS
121 C add a 1rst preliminary call EESET_PARAMS to set useOASIS
122 C (needed to decide either to call OASIS_INIT or not)
123 CALL EESET_PARMS ( doReport )
124 IF ( useOASIS ) CALL OASIS_INIT(MPI_COMM_MODEL)
125 #endif /* ALLOW_OASIS */
126
127 #ifdef COMPONENT_MODULE
128 C-- Set the running directory
129 CALL MPI_COMM_RANK( MPI_COMM_WORLD, mpiMyWId, mpiRC )
130 CALL SETDIR( mpiMyWId )
131
132 C- jmc: test:
133 C add a 1rst preliminary call EESET_PARAMS to set useCoupler
134 C (needed to decide either to call CPL_INIT or not)
135 CALL EESET_PARMS ( doReport )
136 C- jmc: test end ; otherwise, uncomment next line:
137 c useCoupler = .TRUE.
138
139 C-- Ask coupler interface for a communicator
140 IF ( useCoupler) CALL CPL_INIT
141 #endif /* COMPONENT_MODULE */
142
143 C-- Case with Nest(ing)
144 #if defined(ALLOW_NEST_PARENT) || defined(ALLOW_NEST_CHILD)
145 C-- Set the running directory
146 CALL MPI_COMM_RANK( MPI_COMM_WORLD, mpiMyWId, mpiRC )
147 CALL SETDIR( mpiMyWId )
148
149 C-- Setup Nesting Execution Environment
150 CALL NEST_EEINIT( mpiMyWId, color )
151 #endif /* ALLOW_NEST_PARENT | ALLOW_NEST_CHILD */
152
153 #if defined(ALLOW_CPL_MPMICE) && !defined(CPL_DEBUG)
154 CALL SETDIR_OCEAN( )
155 call MPI_comm_rank(MPI_COMM_WORLD, myworldid, ierr)
156 call MPI_comm_size(MPI_COMM_WORLD, numprocsworld, ierr)
157
158 C allocate array components based on the number of processors
159 allocate(components(numprocsworld))
160
161 C assign a component to the ocean code to organize processors into a group
162 mycomponent=0
163
164 C gather components to all processors,
165 C so each knows who is ice and who is ocean
166 call MPI_allgather(mycomponent,1,MPI_INTEGER,components,1,
167 & MPI_INTEGER,MPI_COMM_WORLD,ierr)
168
169 C form ice and ocean groups
170 C count the processors in each groups
171 icesize=0
172 oceansize=0
173 do i=1,numprocsworld
174 if(components(i).eq.0) then
175 oceansize=oceansize+1
176 elseif(components(i).eq.1) then
177 icesize=icesize+1
178 else
179 write(6,*) 'error: processor', i,
180 & 'not associated with ice or ocean'
181 stop
182 endif
183 enddo
184
185 C allocate group arrays
186 allocate(icegroup(icesize))
187 allocate(oceangroup(oceansize))
188 C form the groups
189 icesize=0
190 oceansize=0
191 do i=1,numprocsworld
192 if(components(i).eq.0) then
193 oceansize=oceansize+1
194 oceangroup(oceansize)=i-1 ! ranks are from 0 to numprocsworld-1
195 elseif(components(i).eq.1) then
196 icesize=icesize+1
197 icegroup(icesize)=i-1 ! ranks are from 0 to numprocsworld-1
198 else
199 write(6,*) 'error: processor', i,
200 & 'not associated with ice or ocean'
201 endif
202 enddo
203
204 C pick the lowest rank in the group as the local group leader
205 local_ocean_leader=oceangroup(1)
206 local_ice_leader=icegroup(1)
207
208 C form ocean communicator
209 call MPI_comm_split(MPI_COMM_WORLD,mycomponent,myworldid,
210 & MPI_COMM_MODEL,ierr)
211 call MPI_comm_rank(MPI_COMM_MODEL,myid,ierr)
212 call MPI_comm_size(MPI_COMM_MODEL,numprocs,ierr)
213 #endif /* defined(ALLOW_CPL_MPMICE) && !defined(CPL_DEBUG) */
214
215 C---+----1----+----2----+----3----+----4----+----5----+----6----+----7-|--+----|
216
217 C-- Get my process number
218 CALL MPI_COMM_RANK( MPI_COMM_MODEL, mpiMyId, mpiRC )
219 IF ( mpiRC .NE. MPI_SUCCESS ) THEN
220 eeBootError = .TRUE.
221 WRITE(msgBuf,'(A,I5)')
222 & 'EEBOOT_MINIMAL: MPI_COMM_RANK return code', mpiRC
223 CALL PRINT_ERROR( msgBuf, myThid )
224 GOTO 999
225 ENDIF
226 myProcId = mpiMyId
227 WRITE(myProcessStr,'(I4.4)') myProcId
228 mpiPidIo = myProcId
229 pidIO = mpiPidIo
230 IF ( mpiPidIo .EQ. myProcId ) THEN
231 #ifdef SINGLE_DISK_IO
232 IF( myProcId .EQ. 0 ) THEN
233 #endif
234 WRITE(fNam,'(A,A)') 'STDERR.', myProcessStr(1:4)
235 OPEN(errorMessageUnit,FILE=fNam,STATUS='unknown')
236 WRITE(fNam,'(A,A)') 'STDOUT.', myProcessStr(1:4)
237 OPEN(standardMessageUnit,FILE=fNam,STATUS='unknown')
238 #ifdef SINGLE_DISK_IO
239 ELSE
240 OPEN(errorMessageUnit,FILE='/dev/null',STATUS='unknown')
241 standardMessageUnit=errorMessageUnit
242 ENDIF
243 IF( myProcId .EQ. 0 ) THEN
244 WRITE(msgBuf,'(2A)') '** WARNING ** EEBOOT_MINIMAL: ',
245 & 'defined SINGLE_DISK_IO will result in losing'
246 CALL PRINT_MESSAGE( msgBuf, errorMessageUnit,
247 & SQUEEZE_RIGHT, myThid )
248 WRITE(msgBuf,'(2A)') '** WARNING ** EEBOOT_MINIMAL: ',
249 & 'any message (error/warning) from any proc <> 0'
250 CALL PRINT_MESSAGE( msgBuf, errorMessageUnit,
251 & SQUEEZE_RIGHT, myThid )
252 ENDIF
253 #endif
254 ENDIF
255
256 #if defined(ALLOW_NEST_PARENT) || defined(ALLOW_NEST_CHILD)
257 WRITE(standardMessageUnit,'(2(A,I6))')
258 & ' mpiMyWId =', mpiMyWId, ' , color =',color
259 #endif /* ALLOW_NEST_PARENT | ALLOW_NEST_CHILD */
260
261 C-- Synchronise all processes
262 C Strictly this is superfluous, but by using it we can guarantee to
263 C find out about processes that did not start up.
264 CALL MPI_BARRIER( MPI_COMM_MODEL, mpiRC )
265 IF ( mpiRC .NE. MPI_SUCCESS ) THEN
266 eeBootError = .TRUE.
267 WRITE(msgBuf,'(A,I6)')
268 & 'EEBOOT_MINIMAL: MPI_BARRIER return code', mpiRC
269 CALL PRINT_ERROR( msgBuf, myThid )
270 GOTO 999
271 ENDIF
272
273 C-- Get number of MPI processes
274 CALL MPI_COMM_SIZE ( MPI_COMM_MODEL, mpiNProcs, mpiRC )
275 IF ( mpiRC .NE. MPI_SUCCESS ) THEN
276 eeBootError = .TRUE.
277 WRITE(msgBuf,'(A,I6)')
278 & 'EEBOOT_MINIMAL: MPI_COMM_SIZE return code', mpiRC
279 CALL PRINT_ERROR( msgBuf, myThid )
280 GOTO 999
281 ENDIF
282 numberOfProcs = mpiNProcs
283
284 #endif /* ALLOW_USE_MPI */
285 ENDIF
286
287 C-- Under MPI only allow same number of processes as proc grid size.
288 C Strictly we are allowed more procs but knowing there
289 C is an exact match makes things easier.
290 IF ( numberOfProcs .NE. nPx*nPy ) THEN
291 eeBootError = .TRUE.
292 WRITE(msgBuf,'(2(A,I6))')
293 & 'EEBOOT_MINIMAL: No. of procs=', numberOfProcs,
294 & ' not equal to nPx*nPy=', nPx*nPy
295 CALL PRINT_ERROR( msgBuf, myThid )
296 GOTO 999
297 ENDIF
298
299 #ifdef USE_LIBHPM
300 CALL F_HPMINIT(myProcId, "mitgcmuv")
301 #endif
302
303 999 CONTINUE
304 RETURN
305 END

  ViewVC Help
Powered by ViewVC 1.1.22