module_dm.F
References to this file elsewhere.
1 !WRF:PACKAGE:RSL
2 !
3 MODULE module_dm
4
5 USE module_machine
6 USE module_configure
7 USE module_state_description
8 USE module_wrf_error
9 USE module_driver_constants
10 USE module_comm_dm
11 IMPLICIT NONE
12
13 #if ( NMM_CORE == 1 ) || defined( WRF_CHEM )
14 INTEGER, PARAMETER :: max_halo_width = 6
15 #else
16 INTEGER, PARAMETER :: max_halo_width = 5
17 #endif
18
19 INTEGER :: ips_save, ipe_save, jps_save, jpe_save, itrace
20
21 INTEGER ntasks, ntasks_y, ntasks_x, mytask, mytask_x, mytask_y
22 INTEGER local_communicator, local_communicator_periodic, local_iocommunicator
23 INTEGER local_communicator_x, local_communicator_y ! subcommunicators for rows and cols of mesh
24 LOGICAL :: dm_debug_flag = .FALSE.
25
26 INTERFACE wrf_dm_maxval
27 #ifdef PROMOTE_FLOAT
28 MODULE PROCEDURE wrf_dm_maxval_real , wrf_dm_maxval_integer
29 #else
30 MODULE PROCEDURE wrf_dm_maxval_real , wrf_dm_maxval_integer, wrf_dm_maxval_doubleprecision
31 #endif
32 END INTERFACE
33
34 INTERFACE wrf_dm_minval ! gopal's doing
35 #ifdef PROMOTE_FLOAT
36 MODULE PROCEDURE wrf_dm_minval_real , wrf_dm_minval_integer
37 #else
38 MODULE PROCEDURE wrf_dm_minval_real , wrf_dm_minval_integer, wrf_dm_minval_doubleprecision
39 #endif
40 END INTERFACE
41
42 CONTAINS
43
44
45 SUBROUTINE MPASPECT( P, MINM, MINN, PROCMIN_M, PROCMIN_N )
46 IMPLICIT NONE
47 INTEGER P, M, N, MINI, MINM, MINN, PROCMIN_M, PROCMIN_N
48 MINI = 2*P
49 MINM = 1
50 MINN = P
51 DO M = 1, P
52 IF ( MOD( P, M ) .EQ. 0 ) THEN
53 N = P / M
54 IF ( ABS(M-N) .LT. MINI &
55 .AND. M .GE. PROCMIN_M &
56 .AND. N .GE. PROCMIN_N &
57 ) THEN
58 MINI = ABS(M-N)
59 MINM = M
60 MINN = N
61 ENDIF
62 ENDIF
63 ENDDO
64 IF ( MINM .LT. PROCMIN_M .OR. MINN .LT. PROCMIN_N ) THEN
65 WRITE( wrf_err_message , * )'MPASPECT: UNABLE TO GENERATE PROCESSOR MESH. STOPPING.'
66 CALL wrf_message ( TRIM ( wrf_err_message ) )
67 WRITE(0,*)' PROCMIN_M ', PROCMIN_M
68 WRITE( wrf_err_message , * )' PROCMIN_M ', PROCMIN_M
69 CALL wrf_message ( TRIM ( wrf_err_message ) )
70 WRITE( wrf_err_message , * )' PROCMIN_N ', PROCMIN_N
71 CALL wrf_message ( TRIM ( wrf_err_message ) )
72 WRITE( wrf_err_message , * )' P ', P
73 CALL wrf_message ( TRIM ( wrf_err_message ) )
74 WRITE( wrf_err_message , * )' MINM ', MINM
75 CALL wrf_message ( TRIM ( wrf_err_message ) )
76 WRITE( wrf_err_message , * )' MINN ', MINN
77 CALL wrf_message ( TRIM ( wrf_err_message ) )
78 CALL wrf_error_fatal ( 'module_dm: mpaspect' )
79 ENDIF
80 RETURN
81 END SUBROUTINE MPASPECT
82
83 SUBROUTINE wrf_dm_initialize
84 IMPLICIT NONE
85 INCLUDE 'mpif.h'
86 INTEGER :: local_comm, local_comm2, new_local_comm, group, newgroup, p, p1, ierr
87 INTEGER, ALLOCATABLE, DIMENSION(:) :: ranks
88 INTEGER comdup
89 INTEGER, DIMENSION(2) :: dims, coords
90 LOGICAL, DIMENSION(2) :: isperiodic
91 LOGICAL :: reorder_mesh
92
93 CALL wrf_get_dm_communicator ( local_comm )
94 CALL mpi_comm_size( local_comm, ntasks, ierr )
95 CALL nl_get_nproc_x ( 1, ntasks_x )
96 CALL nl_get_nproc_y ( 1, ntasks_y )
97 CALL nl_get_reorder_mesh( 1, reorder_mesh )
98
99 ! check if user has specified in the namelist
100 IF ( ntasks_x .GT. 0 .OR. ntasks_y .GT. 0 ) THEN
101 ! if only ntasks_x is specified then make it 1-d decomp in i
102 IF ( ntasks_x .GT. 0 .AND. ntasks_y .EQ. -1 ) THEN
103 ntasks_y = ntasks / ntasks_x
104 ! if only ntasks_y is specified then make it 1-d decomp in j
105 ELSE IF ( ntasks_x .EQ. -1 .AND. ntasks_y .GT. 0 ) THEN
106 ntasks_x = ntasks / ntasks_y
107 ENDIF
108 ! make sure user knows what they're doing
109 IF ( ntasks_x * ntasks_y .NE. ntasks ) THEN
110 WRITE( wrf_err_message , * )'WRF_DM_INITIALIZE (RSL_LITE): nproc_x * nproc_y in namelist ne ',ntasks
111 CALL wrf_error_fatal ( wrf_err_message )
112 ENDIF
113 ELSE
114 ! When neither is specified, work out mesh with MPASPECT
115 ! Pass nproc_ln and nproc_nt so that number of procs in
116 ! i-dim (nproc_ln) is equal or lesser.
117 CALL mpaspect ( ntasks, ntasks_x, ntasks_y, 1, 1 )
118 ENDIF
119 WRITE( wrf_err_message , * )'Ntasks in X ',ntasks_x,', ntasks in Y ',ntasks_y
120 CALL wrf_message( wrf_err_message )
121
122 CALL mpi_comm_rank( local_comm, mytask, ierr )
123 ! extra code to reorder the communicator 20051212jm
124 IF ( reorder_mesh ) THEN
125 write(0,*)'reordering mesh'
126 ALLOCATE (ranks(ntasks))
127 CALL mpi_comm_dup ( local_comm , local_comm2, ierr )
128 CALL mpi_comm_group ( local_comm2, group, ierr )
129 DO p1=1,ntasks
130 p = p1 - 1
131 ranks(p1) = mod( p , ntasks_x ) * ntasks_y + p / ntasks_x
132 ENDDO
133 CALL mpi_group_incl( group, ntasks, ranks, newgroup, ierr )
134 DEALLOCATE (ranks)
135 CALL mpi_comm_create( local_comm2, newgroup, new_local_comm , ierr )
136 ELSE
137 new_local_comm = local_comm
138 ENDIF
139 ! end extra code to reorder the communicator 20051212jm
140 dims(1) = ntasks_y ! rows
141 dims(2) = ntasks_x ! columns
142 isperiodic(1) = .false.
143 isperiodic(2) = .false.
144 CALL mpi_cart_create( new_local_comm, 2, dims, isperiodic, .false., local_communicator, ierr )
145 dims(1) = ntasks_y ! rows
146 dims(2) = ntasks_x ! columns
147 isperiodic(1) = .true.
148 isperiodic(2) = .true.
149 CALL mpi_cart_create( new_local_comm, 2, dims, isperiodic, .false., local_communicator_periodic, ierr )
150 ! debug
151 CALL mpi_comm_rank( local_communicator_periodic, mytask, ierr )
152 CALL mpi_cart_coords( local_communicator_periodic, mytask, 2, coords, ierr )
153 #if (DA_CORE != 1)
154 write(0,*)'periodic coords ',mytask, coords
155 #endif
156 CALL mpi_comm_rank( local_communicator, mytask, ierr )
157 CALL mpi_cart_coords( local_communicator, mytask, 2, coords, ierr )
158 #if (DA_CORE != 1)
159 write(0,*)'non periodic coords ',mytask, coords
160 #endif
161 mytask_x = coords(2) ! col task (x)
162 mytask_y = coords(1) ! row task (y)
163 CALL nl_set_nproc_x ( 1, ntasks_x )
164 CALL nl_set_nproc_y ( 1, ntasks_y )
165
166 ! 20061228 set up subcommunicators for processors in X, Y coords of mesh
167 ! note that local_comm_x has all the processors in a row (X=0:nproc_x-1);
168 ! in other words, local_comm_x has all the processes with the same rank in Y
169 CALL MPI_Comm_dup( new_local_comm, comdup, ierr )
170 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('MPI_Comm_dup fails in 20061228 mod')
171 CALL MPI_Comm_split(comdup,mytask_y,mytask,local_communicator_x,ierr)
172 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('MPI_Comm_split fails for x in 20061228 mod')
173 CALL MPI_Comm_split(comdup,mytask_x,mytask,local_communicator_y,ierr)
174 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('MPI_Comm_split fails for y in 20061228 mod')
175 ! end 20061228
176
177 CALL wrf_set_dm_communicator ( local_communicator )
178 RETURN
179 END SUBROUTINE wrf_dm_initialize
180
181 SUBROUTINE patch_domain_rsl_lite( id , parent, parent_id, &
182 sd1 , ed1 , sp1 , ep1 , sm1 , em1 , &
183 sd2 , ed2 , sp2 , ep2 , sm2 , em2 , &
184 sd3 , ed3 , sp3 , ep3 , sm3 , em3 , &
185 sp1x , ep1x , sm1x , em1x , &
186 sp2x , ep2x , sm2x , em2x , &
187 sp3x , ep3x , sm3x , em3x , &
188 sp1y , ep1y , sm1y , em1y , &
189 sp2y , ep2y , sm2y , em2y , &
190 sp3y , ep3y , sm3y , em3y , &
191 bdx , bdy )
192
193 USE module_domain
194 USE module_machine
195
196 IMPLICIT NONE
197 INTEGER, INTENT(IN) :: sd1 , ed1 , sd2 , ed2 , sd3 , ed3 , bdx , bdy
198 INTEGER, INTENT(OUT) :: sp1 , ep1 , sp2 , ep2 , sp3 , ep3 , &
199 sm1 , em1 , sm2 , em2 , sm3 , em3
200 INTEGER, INTENT(OUT) :: sp1x , ep1x , sp2x , ep2x , sp3x , ep3x , &
201 sm1x , em1x , sm2x , em2x , sm3x , em3x
202 INTEGER, INTENT(OUT) :: sp1y , ep1y , sp2y , ep2y , sp3y , ep3y , &
203 sm1y , em1y , sm2y , em2y , sm3y , em3y
204 INTEGER, INTENT(IN) :: id, parent_id
205 TYPE(domain),POINTER :: parent
206
207 ! Local variables
208 INTEGER :: ids, ide, jds, jde, kds, kde
209 INTEGER :: ims, ime, jms, jme, kms, kme
210 INTEGER :: ips, ipe, jps, jpe, kps, kpe
211 INTEGER :: imsx, imex, jmsx, jmex, kmsx, kmex
212 INTEGER :: ipsx, ipex, jpsx, jpex, kpsx, kpex
213 INTEGER :: imsy, imey, jmsy, jmey, kmsy, kmey
214 INTEGER :: ipsy, ipey, jpsy, jpey, kpsy, kpey
215
216 INTEGER :: c_sd1 , c_ed1 , c_sd2 , c_ed2 , c_sd3 , c_ed3
217 INTEGER :: c_sp1 , c_ep1 , c_sp2 , c_ep2 , c_sp3 , c_ep3 , &
218 c_sm1 , c_em1 , c_sm2 , c_em2 , c_sm3 , c_em3
219 INTEGER :: c_sp1x , c_ep1x , c_sp2x , c_ep2x , c_sp3x , c_ep3x , &
220 c_sm1x , c_em1x , c_sm2x , c_em2x , c_sm3x , c_em3x
221 INTEGER :: c_sp1y , c_ep1y , c_sp2y , c_ep2y , c_sp3y , c_ep3y , &
222 c_sm1y , c_em1y , c_sm2y , c_em2y , c_sm3y , c_em3y
223
224 INTEGER :: c_ids, c_ide, c_jds, c_jde, c_kds, c_kde
225 INTEGER :: c_ims, c_ime, c_jms, c_jme, c_kms, c_kme
226 INTEGER :: c_ips, c_ipe, c_jps, c_jpe, c_kps, c_kpe
227
228 INTEGER :: idim , jdim , kdim , rem , a, b
229 INTEGER :: i, j, ni, nj, Px, Py, P
230
231 INTEGER :: parent_grid_ratio, i_parent_start, j_parent_start
232 INTEGER :: shw
233 INTEGER :: idim_cd, jdim_cd
234
235 TYPE(domain), POINTER :: intermediate_grid
236 TYPE(domain), POINTER :: nest_grid
237
238
239 SELECT CASE ( model_data_order )
240 ! need to finish other cases
241 CASE ( DATA_ORDER_ZXY )
242 ids = sd2 ; ide = ed2
243 jds = sd3 ; jde = ed3
244 kds = sd1 ; kde = ed1
245 CASE ( DATA_ORDER_XYZ )
246 ids = sd1 ; ide = ed1
247 jds = sd2 ; jde = ed2
248 kds = sd3 ; kde = ed3
249 CASE ( DATA_ORDER_XZY )
250 ids = sd1 ; ide = ed1
251 jds = sd3 ; jde = ed3
252 kds = sd2 ; kde = ed2
253 CASE ( DATA_ORDER_YXZ)
254 ids = sd2 ; ide = ed2
255 jds = sd1 ; jde = ed1
256 kds = sd3 ; kde = ed3
257 END SELECT
258
259 CALL compute_memory_dims_rsl_lite ( 0 , bdx, bdy, &
260 ids, ide, jds, jde, kds, kde, &
261 ims, ime, jms, jme, kms, kme, &
262 imsx, imex, jmsx, jmex, kmsx, kmex, &
263 imsy, imey, jmsy, jmey, kmsy, kmey, &
264 ips, ipe, jps, jpe, kps, kpe, &
265 ipsx, ipex, jpsx, jpex, kpsx, kpex, &
266 ipsy, ipey, jpsy, jpey, kpsy, kpey )
267
268 ! ensure that the every parent domain point has a full set of nested points under it
269 ! even at the borders. Do this by making sure the number of nest points is a multiple of
270 ! the nesting ratio. Note that this is important mostly to the intermediate domain, which
271 ! is the subject of the scatter gather comms with the parent
272
273 IF ( id .GT. 1 ) THEN
274 CALL nl_get_parent_grid_ratio( id, parent_grid_ratio )
275 if ( mod(ime,parent_grid_ratio) .NE. 0 ) ime = ime + parent_grid_ratio - mod(ime,parent_grid_ratio)
276 if ( mod(jme,parent_grid_ratio) .NE. 0 ) jme = jme + parent_grid_ratio - mod(jme,parent_grid_ratio)
277 ENDIF
278
279 SELECT CASE ( model_data_order )
280 CASE ( DATA_ORDER_ZXY )
281 sp2 = ips ; ep2 = ipe ; sm2 = ims ; em2 = ime
282 sp3 = jps ; ep3 = jpe ; sm3 = jms ; em3 = jme
283 sp1 = kps ; ep1 = kpe ; sm1 = kms ; em1 = kme
284 sp2x = ipsx ; ep2x = ipex ; sm2x = imsx ; em2x = imex
285 sp3x = jpsx ; ep3x = jpex ; sm3x = jmsx ; em3x = jmex
286 sp1x = kpsx ; ep1x = kpex ; sm1x = kmsx ; em1x = kmex
287 sp2y = ipsy ; ep2y = ipey ; sm2y = imsy ; em2y = imey
288 sp3y = jpsy ; ep3y = jpey ; sm3y = jmsy ; em3y = jmey
289 sp1y = kpsy ; ep1y = kpey ; sm1y = kmsy ; em1y = kmey
290 CASE ( DATA_ORDER_ZYX )
291 sp3 = ips ; ep3 = ipe ; sm3 = ims ; em3 = ime
292 sp2 = jps ; ep2 = jpe ; sm2 = jms ; em2 = jme
293 sp1 = kps ; ep1 = kpe ; sm1 = kms ; em1 = kme
294 sp3x = ipsx ; ep3x = ipex ; sm3x = imsx ; em3x = imex
295 sp2x = jpsx ; ep2x = jpex ; sm2x = jmsx ; em2x = jmex
296 sp1x = kpsx ; ep1x = kpex ; sm1x = kmsx ; em1x = kmex
297 sp3y = ipsy ; ep3y = ipey ; sm3y = imsy ; em3y = imey
298 sp2y = jpsy ; ep2y = jpey ; sm2y = jmsy ; em2y = jmey
299 sp1y = kpsy ; ep1y = kpey ; sm1y = kmsy ; em1y = kmey
300 CASE ( DATA_ORDER_XYZ )
301 sp1 = ips ; ep1 = ipe ; sm1 = ims ; em1 = ime
302 sp2 = jps ; ep2 = jpe ; sm2 = jms ; em2 = jme
303 sp3 = kps ; ep3 = kpe ; sm3 = kms ; em3 = kme
304 sp1x = ipsx ; ep1x = ipex ; sm1x = imsx ; em1x = imex
305 sp2x = jpsx ; ep2x = jpex ; sm2x = jmsx ; em2x = jmex
306 sp3x = kpsx ; ep3x = kpex ; sm3x = kmsx ; em3x = kmex
307 sp1y = ipsy ; ep1y = ipey ; sm1y = imsy ; em1y = imey
308 sp2y = jpsy ; ep2y = jpey ; sm2y = jmsy ; em2y = jmey
309 sp3y = kpsy ; ep3y = kpey ; sm3y = kmsy ; em3y = kmey
310 CASE ( DATA_ORDER_YXZ)
311 sp2 = ips ; ep2 = ipe ; sm2 = ims ; em2 = ime
312 sp1 = jps ; ep1 = jpe ; sm1 = jms ; em1 = jme
313 sp3 = kps ; ep3 = kpe ; sm3 = kms ; em3 = kme
314 sp2x = ipsx ; ep2x = ipex ; sm2x = imsx ; em2x = imex
315 sp1x = jpsx ; ep1x = jpex ; sm1x = jmsx ; em1x = jmex
316 sp3x = kpsx ; ep3x = kpex ; sm3x = kmsx ; em3x = kmex
317 sp2y = ipsy ; ep2y = ipey ; sm2y = imsy ; em2y = imey
318 sp1y = jpsy ; ep1y = jpey ; sm1y = jmsy ; em1y = jmey
319 sp3y = kpsy ; ep3y = kpey ; sm3y = kmsy ; em3y = kmey
320 CASE ( DATA_ORDER_XZY )
321 sp1 = ips ; ep1 = ipe ; sm1 = ims ; em1 = ime
322 sp3 = jps ; ep3 = jpe ; sm3 = jms ; em3 = jme
323 sp2 = kps ; ep2 = kpe ; sm2 = kms ; em2 = kme
324 sp1x = ipsx ; ep1x = ipex ; sm1x = imsx ; em1x = imex
325 sp3x = jpsx ; ep3x = jpex ; sm3x = jmsx ; em3x = jmex
326 sp2x = kpsx ; ep2x = kpex ; sm2x = kmsx ; em2x = kmex
327 sp1y = ipsy ; ep1y = ipey ; sm1y = imsy ; em1y = imey
328 sp3y = jpsy ; ep3y = jpey ; sm3y = jmsy ; em3y = jmey
329 sp2y = kpsy ; ep2y = kpey ; sm2y = kmsy ; em2y = kmey
330 CASE ( DATA_ORDER_YZX )
331 sp3 = ips ; ep3 = ipe ; sm3 = ims ; em3 = ime
332 sp1 = jps ; ep1 = jpe ; sm1 = jms ; em1 = jme
333 sp2 = kps ; ep2 = kpe ; sm2 = kms ; em2 = kme
334 sp3x = ipsx ; ep3x = ipex ; sm3x = imsx ; em3x = imex
335 sp1x = jpsx ; ep1x = jpex ; sm1x = jmsx ; em1x = jmex
336 sp2x = kpsx ; ep2x = kpex ; sm2x = kmsx ; em2x = kmex
337 sp3y = ipsy ; ep3y = ipey ; sm3y = imsy ; em3y = imey
338 sp1y = jpsy ; ep1y = jpey ; sm1y = jmsy ; em1y = jmey
339 sp2y = kpsy ; ep2y = kpey ; sm2y = kmsy ; em2y = kmey
340 END SELECT
341
342 IF ( id.EQ.1 ) THEN
343 WRITE(wrf_err_message,*)'*************************************'
344 CALL wrf_message( TRIM(wrf_err_message) )
345 WRITE(wrf_err_message,*)'Parent domain'
346 CALL wrf_message( TRIM(wrf_err_message) )
347 WRITE(wrf_err_message,*)'ids,ide,jds,jde ',ids,ide,jds,jde
348 CALL wrf_message( TRIM(wrf_err_message) )
349 WRITE(wrf_err_message,*)'ims,ime,jms,jme ',ims,ime,jms,jme
350 CALL wrf_message( TRIM(wrf_err_message) )
351 WRITE(wrf_err_message,*)'ips,ipe,jps,jpe ',ips,ipe,jps,jpe
352 CALL wrf_message( TRIM(wrf_err_message) )
353 WRITE(wrf_err_message,*)'*************************************'
354 CALL wrf_message( TRIM(wrf_err_message) )
355 ENDIF
356
357 IF ( id .GT. 1 ) THEN
358
359 CALL nl_get_shw( id, shw )
360 CALL nl_get_i_parent_start( id , i_parent_start )
361 CALL nl_get_j_parent_start( id , j_parent_start )
362 CALL nl_get_parent_grid_ratio( id, parent_grid_ratio )
363
364 SELECT CASE ( model_data_order )
365 CASE ( DATA_ORDER_ZXY )
366 idim = ed2-sd2+1
367 jdim = ed3-sd3+1
368 kdim = ed1-sd1+1
369 c_kds = sd1 ; c_kde = ed1
370 CASE ( DATA_ORDER_ZYX )
371 idim = ed3-sd3+1
372 jdim = ed2-sd2+1
373 kdim = ed1-sd1+1
374 c_kds = sd1 ; c_kde = ed1
375 CASE ( DATA_ORDER_XYZ )
376 idim = ed1-sd1+1
377 jdim = ed2-sd2+1
378 kdim = ed3-sd3+1
379 c_kds = sd3 ; c_kde = ed3
380 CASE ( DATA_ORDER_YXZ)
381 idim = ed2-sd2+1
382 jdim = ed1-sd1+1
383 kdim = ed3-sd3+1
384 c_kds = sd3 ; c_kde = ed3
385 CASE ( DATA_ORDER_XZY )
386 idim = ed1-sd1+1
387 jdim = ed3-sd3+1
388 kdim = ed2-sd2+1
389 c_kds = sd2 ; c_kde = ed2
390 CASE ( DATA_ORDER_YZX )
391 idim = ed3-sd3+1
392 jdim = ed1-sd1+1
393 kdim = ed2-sd2+1
394 c_kds = sd2 ; c_kde = ed2
395 END SELECT
396
397 idim_cd = idim / parent_grid_ratio + 1 + 2*shw + 1
398 jdim_cd = jdim / parent_grid_ratio + 1 + 2*shw + 1
399
400 c_ids = i_parent_start-shw ; c_ide = c_ids + idim_cd - 1
401 c_jds = j_parent_start-shw ; c_jde = c_jds + jdim_cd - 1
402
403 ! we want the intermediate domain to be decomposed the
404 ! the same as the underlying nest. So try this:
405
406 ! At such time as NMM nesting is able to use RSL_LITE (would require
407 ! a number of other mods to this file for that to happen), this should
408 ! be updated along the lines of what's done in compute_memory_dims_rsl_lite
409 ! below. See note dated 20051020. JM
410
411 c_ips = -1
412 nj = ( c_jds - j_parent_start ) * parent_grid_ratio + 1 + 1 ;
413 DO i = c_ids, c_ide
414 ni = ( i - i_parent_start ) * parent_grid_ratio + 1 + 1 ;
415 CALL task_for_point ( ni, nj, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py )
416 IF ( Px .EQ. mytask_x ) THEN
417 c_ipe = i
418 IF ( c_ips .EQ. -1 ) c_ips = i
419 ENDIF
420 ENDDO
421
422 c_jps = -1
423 ni = ( c_ids - i_parent_start ) * parent_grid_ratio + 1 + 1 ;
424 DO j = c_jds, c_jde
425 nj = ( j - j_parent_start ) * parent_grid_ratio + 1 + 1 ;
426 CALL task_for_point ( ni, nj, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py )
427 IF ( Py .EQ. mytask_y ) THEN
428 c_jpe = j
429 IF ( c_jps .EQ. -1 ) c_jps = j
430 ENDIF
431 ENDDO
432
433 ! extend the patch dimensions out shw along edges of domain
434 IF ( mytask_x .EQ. 0 ) THEN
435 c_ips = c_ips - shw
436 ENDIF
437 IF ( mytask_x .EQ. ntasks_x-1 ) THEN
438 c_ipe = c_ipe + shw
439 ENDIF
440 c_ims = max( c_ips - max(shw,max_halo_width), c_ids - bdx ) - 1
441 c_ime = min( c_ipe + max(shw,max_halo_width), c_ide + bdx ) + 1
442
443 ! handle j dims
444 ! extend the patch dimensions out shw along edges of domain
445 IF ( mytask_y .EQ. 0 ) THEN
446 c_jps = c_jps - shw
447 ENDIF
448 IF ( mytask_y .EQ. ntasks_y-1 ) THEN
449 c_jpe = c_jpe + shw
450 ENDIF
451 c_jms = max( c_jps - max(shw,max_halo_width), c_jds - bdx ) - 1
452 c_jme = min( c_jpe + max(shw,max_halo_width), c_jde + bdx ) + 1
453 ! handle k dims
454 c_kps = 1
455 c_kpe = c_kde
456 c_kms = 1
457 c_kme = c_kde
458
459 WRITE(wrf_err_message,*)'*************************************'
460 CALL wrf_message( TRIM(wrf_err_message) )
461 WRITE(wrf_err_message,*)'Nesting domain'
462 CALL wrf_message( TRIM(wrf_err_message) )
463 WRITE(wrf_err_message,*)'ids,ide,jds,jde ',ids,ide,jds,jde
464 CALL wrf_message( TRIM(wrf_err_message) )
465 WRITE(wrf_err_message,*)'ims,ime,jms,jme ',ims,ime,jms,jme
466 CALL wrf_message( TRIM(wrf_err_message) )
467 WRITE(wrf_err_message,*)'ips,ipe,jps,jpe ',ips,ipe,jps,jpe
468 CALL wrf_message( TRIM(wrf_err_message) )
469 WRITE(wrf_err_message,*)'INTERMEDIATE domain'
470 CALL wrf_message( TRIM(wrf_err_message) )
471 WRITE(wrf_err_message,*)'ids,ide,jds,jde ',c_ids,c_ide,c_jds,c_jde
472 CALL wrf_message( TRIM(wrf_err_message) )
473 WRITE(wrf_err_message,*)'ims,ime,jms,jme ',c_ims,c_ime,c_jms,c_jme
474 CALL wrf_message( TRIM(wrf_err_message) )
475 WRITE(wrf_err_message,*)'ips,ipe,jps,jpe ',c_ips,c_ipe,c_jps,c_jpe
476 CALL wrf_message( TRIM(wrf_err_message) )
477 WRITE(wrf_err_message,*)'*************************************'
478 CALL wrf_message( TRIM(wrf_err_message) )
479
480 SELECT CASE ( model_data_order )
481 CASE ( DATA_ORDER_ZXY )
482 c_sd2 = c_ids ; c_ed2 = c_ide ; c_sp2 = c_ips ; c_ep2 = c_ipe ; c_sm2 = c_ims ; c_em2 = c_ime
483 c_sd3 = c_jds ; c_ed3 = c_jde ; c_sp3 = c_jps ; c_ep3 = c_jpe ; c_sm3 = c_jms ; c_em3 = c_jme
484 c_sd1 = c_kds ; c_ed1 = c_kde ; c_sp1 = c_kps ; c_ep1 = c_kpe ; c_sm1 = c_kms ; c_em1 = c_kme
485 CASE ( DATA_ORDER_ZYX )
486 c_sd3 = c_ids ; c_ed3 = c_ide ; c_sp3 = c_ips ; c_ep3 = c_ipe ; c_sm3 = c_ims ; c_em3 = c_ime
487 c_sd2 = c_jds ; c_ed2 = c_jde ; c_sp2 = c_jps ; c_ep2 = c_jpe ; c_sm2 = c_jms ; c_em2 = c_jme
488 c_sd1 = c_kds ; c_ed1 = c_kde ; c_sp1 = c_kps ; c_ep1 = c_kpe ; c_sm1 = c_kms ; c_em1 = c_kme
489 CASE ( DATA_ORDER_XYZ )
490 c_sd1 = c_ids ; c_ed1 = c_ide ; c_sp1 = c_ips ; c_ep1 = c_ipe ; c_sm1 = c_ims ; c_em1 = c_ime
491 c_sd2 = c_jds ; c_ed2 = c_jde ; c_sp2 = c_jps ; c_ep2 = c_jpe ; c_sm2 = c_jms ; c_em2 = c_jme
492 c_sd3 = c_kds ; c_ed3 = c_kde ; c_sp3 = c_kps ; c_ep3 = c_kpe ; c_sm3 = c_kms ; c_em3 = c_kme
493 CASE ( DATA_ORDER_YXZ)
494 c_sd2 = c_ids ; c_ed2 = c_ide ; c_sp2 = c_ips ; c_ep2 = c_ipe ; c_sm2 = c_ims ; c_em2 = c_ime
495 c_sd1 = c_jds ; c_ed1 = c_jde ; c_sp1 = c_jps ; c_ep1 = c_jpe ; c_sm1 = c_jms ; c_em1 = c_jme
496 c_sd3 = c_kds ; c_ed3 = c_kde ; c_sp3 = c_kps ; c_ep3 = c_kpe ; c_sm3 = c_kms ; c_em3 = c_kme
497 CASE ( DATA_ORDER_XZY )
498 c_sd1 = c_ids ; c_ed1 = c_ide ; c_sp1 = c_ips ; c_ep1 = c_ipe ; c_sm1 = c_ims ; c_em1 = c_ime
499 c_sd3 = c_jds ; c_ed3 = c_jde ; c_sp3 = c_jps ; c_ep3 = c_jpe ; c_sm3 = c_jms ; c_em3 = c_jme
500 c_sd2 = c_kds ; c_ed2 = c_kde ; c_sp2 = c_kps ; c_ep2 = c_kpe ; c_sm2 = c_kms ; c_em2 = c_kme
501 CASE ( DATA_ORDER_YZX )
502 c_sd3 = c_ids ; c_ed3 = c_ide ; c_sp3 = c_ips ; c_ep3 = c_ipe ; c_sm3 = c_ims ; c_em3 = c_ime
503 c_sd1 = c_jds ; c_ed1 = c_jde ; c_sp1 = c_jps ; c_ep1 = c_jpe ; c_sm1 = c_jms ; c_em1 = c_jme
504 c_sd2 = c_kds ; c_ed2 = c_kde ; c_sp2 = c_kps ; c_ep2 = c_kpe ; c_sm2 = c_kms ; c_em2 = c_kme
505 END SELECT
506
507 ALLOCATE ( intermediate_grid )
508 ALLOCATE ( intermediate_grid%parents( max_parents ) )
509 ALLOCATE ( intermediate_grid%nests( max_nests ) )
510
511 NULLIFY( intermediate_grid%sibling )
512 DO i = 1, max_nests
513 NULLIFY( intermediate_grid%nests(i)%ptr )
514 ENDDO
515 NULLIFY (intermediate_grid%next)
516 NULLIFY (intermediate_grid%same_level)
517 NULLIFY (intermediate_grid%i_start)
518 NULLIFY (intermediate_grid%j_start)
519 NULLIFY (intermediate_grid%i_end)
520 NULLIFY (intermediate_grid%j_end)
521 intermediate_grid%id = id
522 intermediate_grid%num_nests = 0
523 intermediate_grid%num_siblings = 0
524 intermediate_grid%num_parents = 1
525 intermediate_grid%max_tiles = 0
526 intermediate_grid%num_tiles_spec = 0
527 CALL find_grid_by_id ( id, head_grid, nest_grid )
528
529 nest_grid%intermediate_grid => intermediate_grid ! nest grid now has a pointer to this baby
530 intermediate_grid%parents(1)%ptr => nest_grid ! the intermediate grid considers nest its parent
531 intermediate_grid%num_parents = 1
532
533 c_sm1x = 1 ; c_em1x = 1 ; c_sm2x = 1 ; c_em2x = 1 ; c_sm3x = 1 ; c_em3x = 1
534 c_sm1y = 1 ; c_em1y = 1 ; c_sm2y = 1 ; c_em2y = 1 ; c_sm3y = 1 ; c_em3y = 1
535
536 intermediate_grid%sm31x = c_sm1x
537 intermediate_grid%em31x = c_em1x
538 intermediate_grid%sm32x = c_sm2x
539 intermediate_grid%em32x = c_em2x
540 intermediate_grid%sm33x = c_sm3x
541 intermediate_grid%em33x = c_em3x
542 intermediate_grid%sm31y = c_sm1y
543 intermediate_grid%em31y = c_em1y
544 intermediate_grid%sm32y = c_sm2y
545 intermediate_grid%em32y = c_em2y
546 intermediate_grid%sm33y = c_sm3y
547 intermediate_grid%em33y = c_em3y
548
549 #ifdef SGIALTIX
550 ! allocate space for the intermediate domain
551 CALL alloc_space_field ( intermediate_grid, intermediate_grid%id , 1, 2 , .TRUE., & ! use same id as nest
552 c_sd1, c_ed1, c_sd2, c_ed2, c_sd3, c_ed3, &
553 c_sm1, c_em1, c_sm2, c_em2, c_sm3, c_em3, &
554 c_sm1x, c_em1x, c_sm2x, c_em2x, c_sm3x, c_em3x, & ! x-xpose
555 c_sm1y, c_em1y, c_sm2y, c_em2y, c_sm3y, c_em3y ) ! y-xpose
556 #endif
557 intermediate_grid%sd31 = c_sd1
558 intermediate_grid%ed31 = c_ed1
559 intermediate_grid%sp31 = c_sp1
560 intermediate_grid%ep31 = c_ep1
561 intermediate_grid%sm31 = c_sm1
562 intermediate_grid%em31 = c_em1
563 intermediate_grid%sd32 = c_sd2
564 intermediate_grid%ed32 = c_ed2
565 intermediate_grid%sp32 = c_sp2
566 intermediate_grid%ep32 = c_ep2
567 intermediate_grid%sm32 = c_sm2
568 intermediate_grid%em32 = c_em2
569 intermediate_grid%sd33 = c_sd3
570 intermediate_grid%ed33 = c_ed3
571 intermediate_grid%sp33 = c_sp3
572 intermediate_grid%ep33 = c_ep3
573 intermediate_grid%sm33 = c_sm3
574 intermediate_grid%em33 = c_em3
575
576 CALL med_add_config_info_to_grid ( intermediate_grid )
577
578 intermediate_grid%dx = parent%dx
579 intermediate_grid%dy = parent%dy
580 intermediate_grid%dt = parent%dt
581 ENDIF
582
583 RETURN
584 END SUBROUTINE patch_domain_rsl_lite
585
586 SUBROUTINE compute_memory_dims_rsl_lite ( &
587 shw , bdx, bdy , &
588 ids, ide, jds, jde, kds, kde, &
589 ims, ime, jms, jme, kms, kme, &
590 imsx, imex, jmsx, jmex, kmsx, kmex, &
591 imsy, imey, jmsy, jmey, kmsy, kmey, &
592 ips, ipe, jps, jpe, kps, kpe, &
593 ipsx, ipex, jpsx, jpex, kpsx, kpex, &
594 ipsy, ipey, jpsy, jpey, kpsy, kpey )
595
596 USE module_machine
597 IMPLICIT NONE
598 INTEGER, INTENT(IN) :: shw, bdx, bdy
599 INTEGER, INTENT(IN) :: ids, ide, jds, jde, kds, kde
600 INTEGER, INTENT(OUT) :: ims, ime, jms, jme, kms, kme
601 INTEGER, INTENT(OUT) :: imsx, imex, jmsx, jmex, kmsx, kmex
602 INTEGER, INTENT(OUT) :: imsy, imey, jmsy, jmey, kmsy, kmey
603 INTEGER, INTENT(OUT) :: ips, ipe, jps, jpe, kps, kpe
604 INTEGER, INTENT(OUT) :: ipsx, ipex, jpsx, jpex, kpsx, kpex
605 INTEGER, INTENT(OUT) :: ipsy, ipey, jpsy, jpey, kpsy, kpey
606
607 INTEGER Px, Py, P, i, j, k
608
609 #if ( ! NMM_CORE == 1 )
610
611 ! xy decomposition
612
613 ips = -1
614 j = jds ;
615 DO i = ids, ide
616 CALL task_for_point ( i, j, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py )
617 IF ( Px .EQ. mytask_x ) THEN
618 ipe = i
619 IF ( ips .EQ. -1 ) ips = i
620 ENDIF
621 ENDDO
622 ! handle setting the memory dimensions where there are no X elements assigned to this proc
623 IF (ips .EQ. -1 ) THEN
624 ipe = -1
625 ips = 0
626 ENDIF
627 jps = -1
628 i = ids ;
629 DO j = jds, jde
630 CALL task_for_point ( i, j, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py )
631 IF ( Py .EQ. mytask_y ) THEN
632 jpe = j
633 IF ( jps .EQ. -1 ) jps = j
634 ENDIF
635 ENDDO
636 ! handle setting the memory dimensions where there are no Y elements assigned to this proc
637 IF (jps .EQ. -1 ) THEN
638 jpe = -1
639 jps = 0
640 ENDIF
641
642 !
643 ! description of transpose decomposition strategy for RSL LITE. 20061231jm
644 !
645 ! Here is the tranpose scheme that is implemented for RSL_LITE. Upper-case
646 ! XY corresponds to the dimension of the processor mesh, lower-case xyz
647 ! corresponds to grid dimension.
648 !
649 ! xy zy zx
650 !
651 ! XxYy <--> XzYy <--> XzYx <- note x decomposed over Y procs
652 ! ^ ^
653 ! | |
654 ! +------------------+ <- this edge is costly; see below
655 !
656 ! The aim is to avoid all-to-all communication over whole
657 ! communicator. Instead, when possible, use a transpose scheme that requires
658 ! all-to-all within dimensional communicators; that is, communicators
659 ! defined for the processes in a rank or column of the processor mesh. Note,
660 ! however, it is not possible to create a ring of transposes between
661 ! xy-yz-xz decompositions without at least one of the edges in the ring
662 ! being fully all-to-all (in other words, one of the tranpose edges must
663 ! rotate and not just transpose a plane of the model grid within the
664 ! processor mesh). The issue is then, where should we put this costly edge
665 ! in the tranpose scheme we chose? To avoid being completely arbitrary,
666 ! we chose a scheme most natural for models that use parallel spectral
667 ! transforms, where the costly edge is the one that goes from the xz to
668 ! the xy decomposition. (May be implemented as just a two step transpose
669 ! back through yz).
670 !
671 ! Additional notational convention, below. The 'x' or 'y' appended to the
672 ! dimension start or end variable refers to which grid dimension is all
673 ! on-processor in the given decomposition. That is ipsx and ipex are the
674 ! start and end for the i-dimension in the zy decomposition where x is
675 ! on-processor. ('z' is assumed for xy decomposition and not appended to
676 ! the ips, ipe, etc. variable names).
677 !
678
679 ! XzYy decomposition
680
681 kpsx = -1
682 j = jds ;
683 DO k = kds, kde
684 CALL task_for_point ( k, j, kds, kde, jds, jde, ntasks_x, ntasks_y, Px, Py )
685 IF ( Px .EQ. mytask_x ) THEN
686 kpex = k
687 IF ( kpsx .EQ. -1 ) kpsx = k
688 ENDIF
689 ENDDO
690 ! handle case where no levels are assigned to this process
691 ! no iterations. Do same for I and J. Need to handle memory alloc below.
692 IF (kpsx .EQ. -1 ) THEN
693 kpex = -1
694 kpsx = 0
695 ENDIF
696
697 jpsx = -1
698 k = kds ;
699 DO j = jds, jde
700 CALL task_for_point ( k, j, kds, kde, jds, jde, ntasks_x, ntasks_y, Px, Py )
701 IF ( Py .EQ. mytask_y ) THEN
702 jpex = j
703 IF ( jpsx .EQ. -1 ) jpsx = j
704 ENDIF
705 ENDDO
706 IF (jpsx .EQ. -1 ) THEN
707 jpex = -1
708 jpsx = 0
709 ENDIF
710
711 ! XzYx decomposition (note, x grid dim is decomposed over Y processor dim)
712
713 kpsy = kpsx ! same as above
714 kpey = kpex ! same as above
715
716 ipsy = -1
717 k = kds ;
718 DO i = ids, ide
719 CALL task_for_point ( i, k, ids, ide, kds, kde, ntasks_y, ntasks_x, Py, Px ) ! x and y for proc mesh reversed
720 IF ( Py .EQ. mytask_y ) THEN
721 ipey = i
722 IF ( ipsy .EQ. -1 ) ipsy = i
723 ENDIF
724 ENDDO
725 IF (ipsy .EQ. -1 ) THEN
726 ipey = -1
727 ipsy = 0
728 ENDIF
729
730
731 #else
732
733 ! In case of NMM CORE, the domain only ever runs from ids..ide-1 and jds..jde-1 so
734 ! adjust decomposition to reflect. 20051020 JM
735 ips = -1
736 j = jds ;
737 DO i = ids, ide-1
738 CALL task_for_point ( i, j, ids, ide-1, jds, jde-1, ntasks_x, ntasks_y, Px, Py )
739 IF ( Px .EQ. mytask_x ) THEN
740 ipe = i
741 IF ( Px .EQ. ntasks_x-1 ) ipe = ipe + 1
742 IF ( ips .EQ. -1 ) ips = i
743 ENDIF
744 ENDDO
745 jps = -1
746 i = ids ;
747 DO j = jds, jde-1
748 CALL task_for_point ( i, j, ids, ide-1, jds, jde-1, ntasks_x, ntasks_y, Px, Py )
749 IF ( Py .EQ. mytask_y ) THEN
750 jpe = j
751 IF ( Py .EQ. ntasks_y-1 ) jpe = jpe + 1
752 IF ( jps .EQ. -1 ) jps = j
753 ENDIF
754 ENDDO
755 #endif
756
757 ! extend the patch dimensions out shw along edges of domain
758 IF ( mytask_x .EQ. 0 ) THEN
759 ips = ips - shw
760 ipsy = ipsy - shw
761 ENDIF
762 IF ( mytask_x .EQ. ntasks_x-1 ) THEN
763 ipe = ipe + shw
764 ipey = ipey + shw
765 ENDIF
766 IF ( mytask_y .EQ. 0 ) THEN
767 jps = jps - shw
768 jpsx = jpsx - shw
769 ENDIF
770 IF ( mytask_y .EQ. ntasks_y-1 ) THEN
771 jpe = jpe + shw
772 jpex = jpex + shw
773 ENDIF
774
775 kps = 1
776 kpe = kde-kds+1
777
778 kms = 1
779 kme = kpe
780 kmsx = kpsx
781 kmex = kpex
782 kmsy = kpsy
783 kmey = kpey
784 ! handle setting the memory dimensions where there are no levels assigned to this proc
785 IF ( kpsx .EQ. 0 .AND. kpex .EQ. -1 ) THEN
786 kmsx = 0
787 kmex = 0
788 ENDIF
789 IF ( kpsy .EQ. 0 .AND. kpey .EQ. -1 ) THEN
790 kmsy = 0
791 kmey = 0
792 ENDIF
793
794 IF ( ips .EQ. 0 .AND. ipe .EQ. -1 ) THEN
795 ims = 0
796 ime = 0
797 ELSE
798 ims = max( ips - max(shw,max_halo_width), ids - bdx ) - 1
799 ime = min( ipe + max(shw,max_halo_width), ide + bdx ) + 1
800 ENDIF
801 imsx = ids
802 imex = ide
803 ipsx = imsx
804 ipex = imex
805 ! handle setting the memory dimensions where there are no Y elements assigned to this proc
806 IF ( ipsy .EQ. 0 .AND. ipey .EQ. -1 ) THEN
807 imsy = 0
808 imey = 0
809 ELSE
810 imsy = ipsy
811 imey = ipey
812 ENDIF
813
814 IF ( jps .EQ. 0 .AND. jpe .EQ. -1 ) THEN
815 jms = 0
816 jme = 0
817 ELSE
818 jms = max( jps - max(shw,max_halo_width), jds - bdy ) - 1
819 jme = min( jpe + max(shw,max_halo_width), jde + bdy ) + 1
820 ENDIF
821 jmsx = jpsx
822 jmex = jpex
823 jmsy = jds
824 jmey = jde
825 ! handle setting the memory dimensions where there are no X elements assigned to this proc
826 IF ( jpsx .EQ. 0 .AND. jpex .EQ. -1 ) THEN
827 jmsx = 0
828 jmex = 0
829 ELSE
830 jpsy = jmsy
831 jpey = jmey
832 ENDIF
833
834 END SUBROUTINE compute_memory_dims_rsl_lite
835
836 ! internal, used below for switching the argument to MPI calls
837 ! if reals are being autopromoted to doubles in the build of WRF
838 INTEGER function getrealmpitype()
839 #ifndef STUBMPI
840 IMPLICIT NONE
841 INCLUDE 'mpif.h'
842 INTEGER rtypesize, dtypesize, ierr
843 CALL mpi_type_size ( MPI_REAL, rtypesize, ierr )
844 CALL mpi_type_size ( MPI_DOUBLE_PRECISION, dtypesize, ierr )
845 IF ( RWORDSIZE .EQ. rtypesize ) THEN
846 getrealmpitype = MPI_REAL
847 ELSE IF ( RWORDSIZE .EQ. dtypesize ) THEN
848 getrealmpitype = MPI_DOUBLE_PRECISION
849 ELSE
850 CALL wrf_error_fatal ( 'RWORDSIZE or DWORDSIZE does not match any MPI type' )
851 ENDIF
852 #else
853 ! required dummy initialization for function that is never called
854 getrealmpitype = 1
855 #endif
856 RETURN
857 END FUNCTION getrealmpitype
858
859 REAL FUNCTION wrf_dm_max_real ( inval )
860 IMPLICIT NONE
861 INCLUDE 'mpif.h'
862 REAL inval, retval
863 INTEGER ierr
864 CALL mpi_allreduce ( inval, retval , 1, getrealmpitype(), MPI_MAX, local_communicator, ierr )
865 wrf_dm_max_real = retval
866 END FUNCTION wrf_dm_max_real
867
868 REAL FUNCTION wrf_dm_min_real ( inval )
869 IMPLICIT NONE
870 INCLUDE 'mpif.h'
871 REAL inval, retval
872 INTEGER ierr
873 CALL mpi_allreduce ( inval, retval , 1, getrealmpitype(), MPI_MIN, local_communicator, ierr )
874 wrf_dm_min_real = retval
875 END FUNCTION wrf_dm_min_real
876
877 REAL FUNCTION wrf_dm_sum_real ( inval )
878 IMPLICIT NONE
879 INCLUDE 'mpif.h'
880 REAL inval, retval
881 INTEGER ierr
882 CALL mpi_allreduce ( inval, retval , 1, getrealmpitype(), MPI_SUM, local_communicator, ierr )
883 wrf_dm_sum_real = retval
884 END FUNCTION wrf_dm_sum_real
885
886 SUBROUTINE wrf_dm_sum_reals (inval, retval)
887 IMPLICIT NONE
888 INCLUDE 'mpif.h'
889 REAL, INTENT(IN) :: inval(:)
890 REAL, INTENT(OUT) :: retval(:)
891 INTEGER ierr
892 CALL mpi_allreduce ( inval, retval, SIZE(inval), getrealmpitype(), MPI_SUM, local_communicator, ierr )
893 END SUBROUTINE wrf_dm_sum_reals
894
895 INTEGER FUNCTION wrf_dm_sum_integer ( inval )
896 IMPLICIT NONE
897 INCLUDE 'mpif.h'
898 INTEGER inval, retval
899 INTEGER ierr
900 CALL mpi_allreduce ( inval, retval , 1, MPI_INTEGER, MPI_SUM, local_communicator, ierr )
901 wrf_dm_sum_integer = retval
902 END FUNCTION wrf_dm_sum_integer
903
904 SUBROUTINE wrf_dm_maxval_real ( val, idex, jdex )
905 IMPLICIT NONE
906 INCLUDE 'mpif.h'
907 REAL val, val_all( ntasks )
908 INTEGER idex, jdex, ierr
909 INTEGER dex(2)
910 INTEGER dex_all (2,ntasks)
911 INTEGER i
912
913 dex(1) = idex ; dex(2) = jdex
914 CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, local_communicator, ierr )
915 CALL mpi_allgather ( val, 1, getrealmpitype(), val_all , 1, getrealmpitype(), local_communicator, ierr )
916 val = val_all(1)
917 idex = dex_all(1,1) ; jdex = dex_all(2,1)
918 DO i = 2, ntasks
919 IF ( val_all(i) .GT. val ) THEN
920 val = val_all(i)
921 idex = dex_all(1,i)
922 jdex = dex_all(2,i)
923 ENDIF
924 ENDDO
925 END SUBROUTINE wrf_dm_maxval_real
926
927 #ifndef PROMOTE_FLOAT
928 SUBROUTINE wrf_dm_maxval_doubleprecision ( val, idex, jdex )
929 IMPLICIT NONE
930 INCLUDE 'mpif.h'
931 DOUBLE PRECISION val, val_all( ntasks )
932 INTEGER idex, jdex, ierr
933 INTEGER dex(2)
934 INTEGER dex_all (2,ntasks)
935 INTEGER i
936
937 dex(1) = idex ; dex(2) = jdex
938 CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, local_communicator, ierr )
939 CALL mpi_allgather ( val, 1, MPI_DOUBLE_PRECISION, val_all , 1, MPI_DOUBLE_PRECISION, local_communicator, ierr )
940 val = val_all(1)
941 idex = dex_all(1,1) ; jdex = dex_all(2,1)
942 DO i = 2, ntasks
943 IF ( val_all(i) .GT. val ) THEN
944 val = val_all(i)
945 idex = dex_all(1,i)
946 jdex = dex_all(2,i)
947 ENDIF
948 ENDDO
949 END SUBROUTINE wrf_dm_maxval_doubleprecision
950 #endif
951
952 SUBROUTINE wrf_dm_maxval_integer ( val, idex, jdex )
953 IMPLICIT NONE
954 INCLUDE 'mpif.h'
955 INTEGER val, val_all( ntasks )
956 INTEGER idex, jdex, ierr
957 INTEGER dex(2)
958 INTEGER dex_all (2,ntasks)
959 INTEGER i
960
961 dex(1) = idex ; dex(2) = jdex
962 CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, local_communicator, ierr )
963 CALL mpi_allgather ( val, 1, MPI_INTEGER, val_all , 1, MPI_INTEGER, local_communicator, ierr )
964 val = val_all(1)
965 idex = dex_all(1,1) ; jdex = dex_all(2,1)
966 DO i = 2, ntasks
967 IF ( val_all(i) .GT. val ) THEN
968 val = val_all(i)
969 idex = dex_all(1,i)
970 jdex = dex_all(2,i)
971 ENDIF
972 ENDDO
973 END SUBROUTINE wrf_dm_maxval_integer
974
975 ! For HWRF some additional computation is required. This is gopal's doing
976
977 SUBROUTINE wrf_dm_minval_real ( val, idex, jdex )
978 IMPLICIT NONE
979 REAL val, val_all( ntasks )
980 INTEGER idex, jdex, ierr
981 INTEGER dex(2)
982 INTEGER dex_all (2,ntasks)
983 ! <DESCRIPTION>
984 ! Collective operation. Each processor calls passing a local value and its index; on return
985 ! all processors are passed back the maximum of all values passed and its index.
986 !
987 ! </DESCRIPTION>
988 INTEGER i, comm
989 #ifndef STUBMPI
990 INCLUDE 'mpif.h'
991
992 CALL wrf_get_dm_communicator ( comm )
993 dex(1) = idex ; dex(2) = jdex
994 CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, comm, ierr )
995 CALL mpi_allgather ( val, 1, MPI_REAL, val_all , 1, MPI_REAL, comm, ierr )
996 val = val_all(1)
997 idex = dex_all(1,1) ; jdex = dex_all(2,1)
998 DO i = 2, ntasks
999 IF ( val_all(i) .LT. val ) THEN
1000 val = val_all(i)
1001 idex = dex_all(1,i)
1002 jdex = dex_all(2,i)
1003 ENDIF
1004 ENDDO
1005 #endif
1006 END SUBROUTINE wrf_dm_minval_real
1007
1008 #ifndef PROMOTE_FLOAT
1009 SUBROUTINE wrf_dm_minval_doubleprecision ( val, idex, jdex )
1010 IMPLICIT NONE
1011 DOUBLE PRECISION val, val_all( ntasks )
1012 INTEGER idex, jdex, ierr
1013 INTEGER dex(2)
1014 INTEGER dex_all (2,ntasks)
1015 ! <DESCRIPTION>
1016 ! Collective operation. Each processor calls passing a local value and its index; on return
1017 ! all processors are passed back the maximum of all values passed and its index.
1018 !
1019 ! </DESCRIPTION>
1020 INTEGER i, comm
1021 #ifndef STUBMPI
1022 INCLUDE 'mpif.h'
1023
1024 CALL wrf_get_dm_communicator ( comm )
1025 dex(1) = idex ; dex(2) = jdex
1026 CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, comm, ierr )
1027 CALL mpi_allgather ( val, 1, MPI_DOUBLE_PRECISION, val_all , 1, MPI_DOUBLE_PRECISION, comm, ierr )
1028 val = val_all(1)
1029 idex = dex_all(1,1) ; jdex = dex_all(2,1)
1030 DO i = 2, ntasks
1031 IF ( val_all(i) .LT. val ) THEN
1032 val = val_all(i)
1033 idex = dex_all(1,i)
1034 jdex = dex_all(2,i)
1035 ENDIF
1036 ENDDO
1037 #endif
1038 END SUBROUTINE wrf_dm_minval_doubleprecision
1039 #endif
1040
1041 SUBROUTINE wrf_dm_minval_integer ( val, idex, jdex )
1042 IMPLICIT NONE
1043 INTEGER val, val_all( ntasks )
1044 INTEGER idex, jdex, ierr
1045 INTEGER dex(2)
1046 INTEGER dex_all (2,ntasks)
1047 ! <DESCRIPTION>
1048 ! Collective operation. Each processor calls passing a local value and its index; on return
1049 ! all processors are passed back the maximum of all values passed and its index.
1050 !
1051 ! </DESCRIPTION>
1052 INTEGER i, comm
1053 #ifndef STUBMPI
1054 INCLUDE 'mpif.h'
1055
1056 CALL wrf_get_dm_communicator ( comm )
1057 dex(1) = idex ; dex(2) = jdex
1058 CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, comm, ierr )
1059 CALL mpi_allgather ( val, 1, MPI_INTEGER, val_all , 1, MPI_INTEGER, comm, ierr )
1060 val = val_all(1)
1061 idex = dex_all(1,1) ; jdex = dex_all(2,1)
1062 DO i = 2, ntasks
1063 IF ( val_all(i) .LT. val ) THEN
1064 val = val_all(i)
1065 idex = dex_all(1,i)
1066 jdex = dex_all(2,i)
1067 ENDIF
1068 ENDDO
1069 #endif
1070 END SUBROUTINE wrf_dm_minval_integer ! End of gopal's doing
1071
1072 SUBROUTINE split_communicator
1073 IMPLICIT NONE
1074 INCLUDE 'mpif.h'
1075 LOGICAL mpi_inited
1076 INTEGER mpi_comm_here, mpi_comm_local, comdup, mytask, ntasks, ierr, io_status
1077 INTEGER i, j
1078 INTEGER, ALLOCATABLE :: icolor(:)
1079 INTEGER tasks_per_split
1080 NAMELIST /namelist_split/ tasks_per_split
1081
1082 CALL MPI_INITIALIZED( mpi_inited, ierr )
1083 IF ( .NOT. mpi_inited ) THEN
1084 CALL mpi_init ( ierr )
1085 CALL wrf_set_dm_communicator( MPI_COMM_WORLD )
1086 CALL wrf_termio_dup
1087 ENDIF
1088 CALL wrf_get_dm_communicator( mpi_comm_here )
1089
1090 CALL MPI_Comm_rank ( mpi_comm_here, mytask, ierr ) ;
1091 CALL mpi_comm_size ( mpi_comm_here, ntasks, ierr ) ;
1092 #ifdef WRFNL
1093 CALL mpi_comm_split( MPI_COMM_WORLD , 3 , 1 , mpi_comm_local, ierr )
1094 #else
1095 CALL mpi_comm_split( MPI_COMM_WORLD , 4 , 1 , mpi_comm_local, ierr )
1096 #endif
1097 CALL wrf_set_dm_communicator( mpi_comm_local )
1098 CALL wrf_termio_dup
1099 CALL wrf_get_dm_communicator( mpi_comm_here )
1100
1101 IF ( mytask .EQ. 0 ) THEN
1102 OPEN ( unit=27, file="namelist.input", form="formatted", status="old" )
1103 tasks_per_split = ntasks
1104 READ ( 27 , NML = namelist_split, IOSTAT=io_status )
1105 CLOSE ( 27 )
1106 ENDIF
1107 CALL mpi_bcast( io_status, 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
1108 IF ( io_status .NE. 0 ) THEN
1109 RETURN ! just ignore and return
1110 ENDIF
1111 CALL mpi_bcast( tasks_per_split, 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
1112 IF ( tasks_per_split .GT. ntasks .OR. tasks_per_split .LE. 0 ) RETURN
1113 IF ( mod( ntasks, tasks_per_split ) .NE. 0 ) THEN
1114 CALL wrf_message( 'WARNING: tasks_per_split does not evenly divide ntasks. Some tasks will be wasted.' )
1115 ENDIF
1116
1117 ALLOCATE( icolor(ntasks) )
1118 j = 0
1119 DO WHILE ( j .LT. ntasks / tasks_per_split )
1120 DO i = 1, tasks_per_split
1121 icolor( i + j * tasks_per_split ) = j
1122 ENDDO
1123 j = j + 1
1124 ENDDO
1125
1126 CALL MPI_Comm_dup(mpi_comm_here,comdup,ierr)
1127 CALL MPI_Comm_split(comdup,icolor(mytask+1),mytask,mpi_comm_local,ierr)
1128 CALL wrf_set_dm_communicator( mpi_comm_local )
1129
1130 DEALLOCATE( icolor )
1131
1132 END SUBROUTINE split_communicator
1133
1134 SUBROUTINE init_module_dm
1135 IMPLICIT NONE
1136 INTEGER mpi_comm_local, ierr, mytask, nproc
1137 INCLUDE 'mpif.h'
1138 LOGICAL mpi_inited
1139 CALL mpi_initialized( mpi_inited, ierr )
1140 IF ( .NOT. mpi_inited ) THEN
1141 ! If MPI has not been initialized then initialize it and
1142 ! make comm_world the communicator
1143 ! Otherwise, something else (e.g. quilt-io) has already
1144 ! initialized MPI, so just grab the communicator that
1145 ! should already be stored and use that.
1146 CALL mpi_init ( ierr )
1147 CALL wrf_termio_dup
1148 CALL wrf_set_dm_communicator ( MPI_COMM_WORLD )
1149 ENDIF
1150 CALL wrf_get_dm_communicator( mpi_comm_local )
1151 END SUBROUTINE init_module_dm
1152
1153 ! stub
1154 SUBROUTINE wrf_dm_move_nest ( parent, nest, dx, dy )
1155 USE module_domain
1156 IMPLICIT NONE
1157 TYPE (domain), INTENT(INOUT) :: parent, nest
1158 INTEGER, INTENT(IN) :: dx,dy
1159 RETURN
1160 END SUBROUTINE wrf_dm_move_nest
1161
1162 !------------------------------------------------------------------------------
1163 SUBROUTINE get_full_obs_vector( nsta, nerrf, niobf, &
1164 mp_local_uobmask, &
1165 mp_local_vobmask, &
1166 mp_local_cobmask, errf )
1167
1168 !------------------------------------------------------------------------------
1169 ! PURPOSE: Do MPI allgatherv operation across processors to get the
1170 ! errors at each observation point on all processors.
1171 !
1172 !------------------------------------------------------------------------------
1173 #ifndef STUBMPI
1174 INCLUDE 'mpif.h'
1175
1176 INTEGER, INTENT(IN) :: nsta ! Observation index.
1177 INTEGER, INTENT(IN) :: nerrf ! Number of error fields.
1178 INTEGER, INTENT(IN) :: niobf ! Number of observations.
1179 LOGICAL, INTENT(IN) :: MP_LOCAL_UOBMASK(NIOBF)
1180 LOGICAL, INTENT(IN) :: MP_LOCAL_VOBMASK(NIOBF)
1181 LOGICAL, INTENT(IN) :: MP_LOCAL_COBMASK(NIOBF)
1182 REAL, INTENT(INOUT) :: errf(nerrf, niobf)
1183
1184 ! Local declarations
1185 integer i, n, nlocal_dot, nlocal_crs
1186 REAL UVT_BUFFER(NIOBF) ! Buffer for holding U, V, or T
1187 REAL QRK_BUFFER(NIOBF) ! Buffer for holding Q or RKO
1188 REAL SFP_BUFFER(NIOBF) ! Buffer for holding Surface pressure
1189 INTEGER N_BUFFER(NIOBF)
1190 REAL FULL_BUFFER(NIOBF)
1191 INTEGER IFULL_BUFFER(NIOBF)
1192 INTEGER IDISPLACEMENT(1024) ! HARD CODED MAX NUMBER OF PROCESSORS
1193 INTEGER ICOUNT(1024) ! HARD CODED MAX NUMBER OF PROCESSORS
1194
1195 INTEGER :: MPI_COMM_COMP ! MPI group communicator
1196 INTEGER :: NPROCS ! Number of processors
1197 INTEGER :: IERR ! Error code from MPI routines
1198
1199 ! Get communicator for MPI operations.
1200 CALL WRF_GET_DM_COMMUNICATOR(MPI_COMM_COMP)
1201
1202 ! Get rank of monitor processor and broadcast to others.
1203 CALL MPI_COMM_SIZE( MPI_COMM_COMP, NPROCS, IERR )
1204
1205 ! DO THE U FIELD
1206 NLOCAL_DOT = 0
1207 DO N = 1, NSTA
1208 IF ( MP_LOCAL_UOBMASK(N) ) THEN ! USE U-POINT MASK
1209 NLOCAL_DOT = NLOCAL_DOT + 1
1210 UVT_BUFFER(NLOCAL_DOT) = ERRF(1,N) ! U WIND COMPONENT
1211 SFP_BUFFER(NLOCAL_DOT) = ERRF(7,N) ! SURFACE PRESSURE
1212 QRK_BUFFER(NLOCAL_DOT) = ERRF(9,N) ! RKO
1213 N_BUFFER(NLOCAL_DOT) = N
1214 ENDIF
1215 ENDDO
1216 CALL MPI_ALLGATHER(NLOCAL_DOT,1,MPI_INTEGER, &
1217 ICOUNT,1,MPI_INTEGER, &
1218 MPI_COMM_COMP,IERR)
1219 I = 1
1220
1221 IDISPLACEMENT(1) = 0
1222 DO I = 2, NPROCS
1223 IDISPLACEMENT(I) = IDISPLACEMENT(I-1) + ICOUNT(I-1)
1224 ENDDO
1225 CALL MPI_ALLGATHERV( N_BUFFER, NLOCAL_DOT, MPI_INTEGER, &
1226 IFULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1227 MPI_INTEGER, MPI_COMM_COMP, IERR)
1228 ! U
1229 CALL MPI_ALLGATHERV( UVT_BUFFER, NLOCAL_DOT, MPI_REAL, &
1230 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1231 MPI_REAL, MPI_COMM_COMP, IERR)
1232 DO N = 1, NSTA
1233 ERRF(1,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1234 ENDDO
1235 ! SURF PRESS AT U-POINTS
1236 CALL MPI_ALLGATHERV( SFP_BUFFER, NLOCAL_DOT, MPI_REAL, &
1237 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1238 MPI_REAL, MPI_COMM_COMP, IERR)
1239 DO N = 1, NSTA
1240 ERRF(7,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1241 ENDDO
1242 ! RKO
1243 CALL MPI_ALLGATHERV( QRK_BUFFER, NLOCAL_DOT, MPI_REAL, &
1244 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1245 MPI_REAL, MPI_COMM_COMP, IERR)
1246 DO N = 1, NSTA
1247 ERRF(9,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1248 ENDDO
1249
1250 ! DO THE V FIELD
1251 NLOCAL_DOT = 0
1252 DO N = 1, NSTA
1253 IF ( MP_LOCAL_VOBMASK(N) ) THEN ! USE V-POINT MASK
1254 NLOCAL_DOT = NLOCAL_DOT + 1
1255 UVT_BUFFER(NLOCAL_DOT) = ERRF(2,N) ! V WIND COMPONENT
1256 SFP_BUFFER(NLOCAL_DOT) = ERRF(8,N) ! SURFACE PRESSURE
1257 N_BUFFER(NLOCAL_DOT) = N
1258 ENDIF
1259 ENDDO
1260 CALL MPI_ALLGATHER(NLOCAL_DOT,1,MPI_INTEGER, &
1261 ICOUNT,1,MPI_INTEGER, &
1262 MPI_COMM_COMP,IERR)
1263 I = 1
1264
1265 IDISPLACEMENT(1) = 0
1266 DO I = 2, NPROCS
1267 IDISPLACEMENT(I) = IDISPLACEMENT(I-1) + ICOUNT(I-1)
1268 ENDDO
1269 CALL MPI_ALLGATHERV( N_BUFFER, NLOCAL_DOT, MPI_INTEGER, &
1270 IFULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1271 MPI_INTEGER, MPI_COMM_COMP, IERR)
1272 ! V
1273 CALL MPI_ALLGATHERV( UVT_BUFFER, NLOCAL_DOT, MPI_REAL, &
1274 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1275 MPI_REAL, MPI_COMM_COMP, IERR)
1276 DO N = 1, NSTA
1277 ERRF(2,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1278 ENDDO
1279 ! SURF PRESS AT V-POINTS
1280 CALL MPI_ALLGATHERV( SFP_BUFFER, NLOCAL_DOT, MPI_REAL, &
1281 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1282 MPI_REAL, MPI_COMM_COMP, IERR)
1283 DO N = 1, NSTA
1284 ERRF(8,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1285 ENDDO
1286
1287 ! DO THE CROSS FIELDS, T AND Q
1288 NLOCAL_CRS = 0
1289 DO N = 1, NSTA
1290 IF ( MP_LOCAL_COBMASK(N) ) THEN ! USE MASS-POINT MASK
1291 NLOCAL_CRS = NLOCAL_CRS + 1
1292 UVT_BUFFER(NLOCAL_CRS) = ERRF(3,N) ! TEMPERATURE
1293 QRK_BUFFER(NLOCAL_CRS) = ERRF(4,N) ! MOISTURE
1294 SFP_BUFFER(NLOCAL_CRS) = ERRF(6,N) ! SURFACE PRESSURE
1295 N_BUFFER(NLOCAL_CRS) = N
1296 ENDIF
1297 ENDDO
1298 CALL MPI_ALLGATHER(NLOCAL_CRS,1,MPI_INTEGER, &
1299 ICOUNT,1,MPI_INTEGER, &
1300 MPI_COMM_COMP,IERR)
1301 IDISPLACEMENT(1) = 0
1302 DO I = 2, NPROCS
1303 IDISPLACEMENT(I) = IDISPLACEMENT(I-1) + ICOUNT(I-1)
1304 ENDDO
1305 CALL MPI_ALLGATHERV( N_BUFFER, NLOCAL_CRS, MPI_INTEGER, &
1306 IFULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1307 MPI_INTEGER, MPI_COMM_COMP, IERR)
1308 ! T
1309 CALL MPI_ALLGATHERV( UVT_BUFFER, NLOCAL_CRS, MPI_REAL, &
1310 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1311 MPI_REAL, MPI_COMM_COMP, IERR)
1312
1313 DO N = 1, NSTA
1314 ERRF(3,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1315 ENDDO
1316 ! Q
1317 CALL MPI_ALLGATHERV( QRK_BUFFER, NLOCAL_CRS, MPI_REAL, &
1318 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1319 MPI_REAL, MPI_COMM_COMP, IERR)
1320 DO N = 1, NSTA
1321 ERRF(4,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1322 ENDDO
1323 ! SURF PRESS AT MASS POINTS
1324 CALL MPI_ALLGATHERV( SFP_BUFFER, NLOCAL_CRS, MPI_REAL, &
1325 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1326 MPI_REAL, MPI_COMM_COMP, IERR)
1327 DO N = 1, NSTA
1328 ERRF(6,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1329 ENDDO
1330 #endif
1331 END SUBROUTINE get_full_obs_vector
1332
1333 END MODULE module_dm
1334
1335 !=========================================================================
1336 ! wrf_dm_patch_domain has to be outside the module because it is called
1337 ! by a routine in module_domain but depends on module domain
1338
1339 SUBROUTINE wrf_dm_patch_domain ( id , domdesc , parent_id , parent_domdesc , &
1340 sd1 , ed1 , sp1 , ep1 , sm1 , em1 , &
1341 sd2 , ed2 , sp2 , ep2 , sm2 , em2 , &
1342 sd3 , ed3 , sp3 , ep3 , sm3 , em3 , &
1343 sp1x , ep1x , sm1x , em1x , &
1344 sp2x , ep2x , sm2x , em2x , &
1345 sp3x , ep3x , sm3x , em3x , &
1346 sp1y , ep1y , sm1y , em1y , &
1347 sp2y , ep2y , sm2y , em2y , &
1348 sp3y , ep3y , sm3y , em3y , &
1349 bdx , bdy )
1350 USE module_domain
1351 USE module_dm
1352 IMPLICIT NONE
1353
1354 INTEGER, INTENT(IN) :: sd1 , ed1 , sd2 , ed2 , sd3 , ed3 , bdx , bdy
1355 INTEGER, INTENT(OUT) :: sp1 , ep1 , sp2 , ep2 , sp3 , ep3 , &
1356 sm1 , em1 , sm2 , em2 , sm3 , em3
1357 INTEGER :: sp1x , ep1x , sp2x , ep2x , sp3x , ep3x , &
1358 sm1x , em1x , sm2x , em2x , sm3x , em3x
1359 INTEGER :: sp1y , ep1y , sp2y , ep2y , sp3y , ep3y , &
1360 sm1y , em1y , sm2y , em2y , sm3y , em3y
1361 INTEGER, INTENT(INOUT):: id , domdesc , parent_id , parent_domdesc
1362
1363 TYPE(domain), POINTER :: parent
1364 TYPE(domain), POINTER :: grid_ptr
1365
1366 ! this is necessary because we cannot pass parent directly into
1367 ! wrf_dm_patch_domain because creating the correct interface definitions
1368 ! would generate a circular USE reference between module_domain and module_dm
1369 ! see comment this date in module_domain for more information. JM 20020416
1370
1371 NULLIFY( parent )
1372 grid_ptr => head_grid
1373 CALL find_grid_by_id( parent_id , grid_ptr , parent )
1374
1375 CALL patch_domain_rsl_lite ( id , parent, parent_id , &
1376 sd1 , ed1 , sp1 , ep1 , sm1 , em1 , &
1377 sd2 , ed2 , sp2 , ep2 , sm2 , em2 , &
1378 sd3 , ed3 , sp3 , ep3 , sm3 , em3 , &
1379 sp1x , ep1x , sm1x , em1x , &
1380 sp2x , ep2x , sm2x , em2x , &
1381 sp3x , ep3x , sm3x , em3x , &
1382 sp1y , ep1y , sm1y , em1y , &
1383 sp2y , ep2y , sm2y , em2y , &
1384 sp3y , ep3y , sm3y , em3y , &
1385 bdx , bdy )
1386
1387 RETURN
1388 END SUBROUTINE wrf_dm_patch_domain
1389
1390 SUBROUTINE wrf_termio_dup
1391 IMPLICIT NONE
1392 INCLUDE 'mpif.h'
1393 INTEGER mytask, ntasks, ierr
1394 CALL mpi_comm_size(MPI_COMM_WORLD, ntasks, ierr )
1395 CALL mpi_comm_rank(MPI_COMM_WORLD, mytask, ierr )
1396 write(0,*)'starting wrf task ',mytask,' of ',ntasks
1397 CALL rsl_error_dup1( mytask )
1398 END SUBROUTINE wrf_termio_dup
1399
1400 SUBROUTINE wrf_get_myproc( myproc )
1401 USE module_dm
1402 IMPLICIT NONE
1403 INTEGER myproc
1404 myproc = mytask
1405 RETURN
1406 END SUBROUTINE wrf_get_myproc
1407
1408 SUBROUTINE wrf_get_nproc( nproc )
1409 USE module_dm
1410 IMPLICIT NONE
1411 INTEGER nproc
1412 nproc = ntasks
1413 RETURN
1414 END SUBROUTINE wrf_get_nproc
1415
1416 SUBROUTINE wrf_get_nprocx( nprocx )
1417 USE module_dm
1418 IMPLICIT NONE
1419 INTEGER nprocx
1420 nprocx = ntasks_x
1421 RETURN
1422 END SUBROUTINE wrf_get_nprocx
1423
1424 SUBROUTINE wrf_get_nprocy( nprocy )
1425 USE module_dm
1426 IMPLICIT NONE
1427 INTEGER nprocy
1428 nprocy = ntasks_y
1429 RETURN
1430 END SUBROUTINE wrf_get_nprocy
1431
1432 SUBROUTINE wrf_dm_bcast_bytes ( buf , size )
1433 USE module_dm
1434 IMPLICIT NONE
1435 INCLUDE 'mpif.h'
1436 INTEGER size
1437 #ifndef NEC
1438 INTEGER*1 BUF(size)
1439 #else
1440 CHARACTER*1 BUF(size)
1441 #endif
1442 CALL BYTE_BCAST ( buf , size, local_communicator )
1443 RETURN
1444 END SUBROUTINE wrf_dm_bcast_bytes
1445
1446 SUBROUTINE wrf_dm_bcast_string( BUF, N1 )
1447 IMPLICIT NONE
1448 INTEGER n1
1449 ! <DESCRIPTION>
1450 ! Collective operation. Given a string and a size in characters on task zero, broadcast and return that buffer on all tasks.
1451 !
1452 ! </DESCRIPTION>
1453 CHARACTER*(*) buf
1454 INTEGER ibuf(256),i,n
1455 CHARACTER*256 tstr
1456 n = n1
1457 ! Root task is required to have the correct value of N1, other tasks
1458 ! might not have the correct value.
1459 CALL wrf_dm_bcast_integer( n , 1 )
1460 IF (n .GT. 256) n = 256
1461 IF (n .GT. 0 ) then
1462 DO i = 1, n
1463 ibuf(I) = ichar(buf(I:I))
1464 ENDDO
1465 CALL wrf_dm_bcast_integer( ibuf, n )
1466 buf = ''
1467 DO i = 1, n
1468 buf(i:i) = char(ibuf(i))
1469 ENDDO
1470 ENDIF
1471 RETURN
1472 END SUBROUTINE wrf_dm_bcast_string
1473
1474 SUBROUTINE wrf_dm_bcast_integer( BUF, N1 )
1475 IMPLICIT NONE
1476 INTEGER n1
1477 INTEGER buf(*)
1478 CALL wrf_dm_bcast_bytes ( BUF , N1 * IWORDSIZE )
1479 RETURN
1480 END SUBROUTINE wrf_dm_bcast_integer
1481
1482 SUBROUTINE wrf_dm_bcast_double( BUF, N1 )
1483 IMPLICIT NONE
1484 INTEGER n1
1485 ! this next declaration is REAL, not DOUBLE PRECISION because it will be autopromoted
1486 ! to double precision by the compiler when WRF is compiled for 8 byte reals. Only reason
1487 ! for having this separate routine is so we pass the correct MPI type to mpi_scatterv
1488 ! since we were not indexing the globbuf and Field arrays it does not matter
1489 REAL buf(*)
1490 CALL wrf_dm_bcast_bytes ( BUF , N1 * DWORDSIZE )
1491 RETURN
1492 END SUBROUTINE wrf_dm_bcast_double
1493
1494 SUBROUTINE wrf_dm_bcast_real( BUF, N1 )
1495 IMPLICIT NONE
1496 INTEGER n1
1497 REAL buf(*)
1498 CALL wrf_dm_bcast_bytes ( BUF , N1 * RWORDSIZE )
1499 RETURN
1500 END SUBROUTINE wrf_dm_bcast_real
1501
1502 SUBROUTINE wrf_dm_bcast_logical( BUF, N1 )
1503 IMPLICIT NONE
1504 INTEGER n1
1505 LOGICAL buf(*)
1506 CALL wrf_dm_bcast_bytes ( BUF , N1 * LWORDSIZE )
1507 RETURN
1508 END SUBROUTINE wrf_dm_bcast_logical
1509
1510 SUBROUTINE write_68( grid, v , s , &
1511 ids, ide, jds, jde, kds, kde, &
1512 ims, ime, jms, jme, kms, kme, &
1513 its, ite, jts, jte, kts, kte )
1514 USE module_domain
1515 IMPLICIT NONE
1516 TYPE(domain) , INTENT (INOUT) :: grid
1517 CHARACTER *(*) s
1518 INTEGER ids, ide, jds, jde, kds, kde, &
1519 ims, ime, jms, jme, kms, kme, &
1520 its, ite, jts, jte, kts, kte
1521 REAL, DIMENSION( ims:ime , kms:kme, jms:jme ) :: v
1522
1523 INTEGER i,j,k,ierr
1524
1525 logical, external :: wrf_dm_on_monitor
1526 real globbuf( ids:ide, kds:kde, jds:jde )
1527 character*3 ord, stag
1528
1529 if ( kds == kde ) then
1530 ord = 'xy'
1531 stag = 'xy'
1532 CALL wrf_patch_to_global_real ( v, globbuf, grid%domdesc, stag, ord, &
1533 ids, ide, jds, jde, kds, kde, &
1534 ims, ime, jms, jme, kms, kme, &
1535 its, ite, jts, jte, kts, kte )
1536 else
1537
1538 stag = 'xyz'
1539 ord = 'xzy'
1540 CALL wrf_patch_to_global_real ( v, globbuf, grid%domdesc, stag, ord, &
1541 ids, ide, kds, kde, jds, jde, &
1542 ims, ime, kms, kme, jms, jme, &
1543 its, ite, kts, kte, jts, jte )
1544 endif
1545
1546
1547 if ( wrf_dm_on_monitor() ) THEN
1548 WRITE(68,*) ide-ids+1, jde-jds+1 , s
1549 DO j = jds, jde
1550 DO i = ids, ide
1551 WRITE(68,*) globbuf(i,1,j)
1552 ENDDO
1553 ENDDO
1554 endif
1555
1556 RETURN
1557 END
1558
1559 SUBROUTINE wrf_abort
1560 IMPLICIT NONE
1561 INCLUDE 'mpif.h'
1562 INTEGER ierr
1563 CALL mpi_abort(MPI_COMM_WORLD,1,ierr)
1564 END SUBROUTINE wrf_abort
1565
1566 SUBROUTINE wrf_dm_shutdown
1567 IMPLICIT NONE
1568 INTEGER ierr
1569 CALL MPI_FINALIZE( ierr )
1570 RETURN
1571 END SUBROUTINE wrf_dm_shutdown
1572
1573 LOGICAL FUNCTION wrf_dm_on_monitor()
1574 USE module_dm
1575 IMPLICIT NONE
1576 INCLUDE 'mpif.h'
1577 INTEGER tsk, ierr, mpi_comm_local
1578 CALL wrf_get_dm_communicator( mpi_comm_local )
1579 CALL mpi_comm_rank ( mpi_comm_local, tsk , ierr )
1580 wrf_dm_on_monitor = tsk .EQ. 0
1581 RETURN
1582 END FUNCTION wrf_dm_on_monitor
1583
1584 INTEGER FUNCTION wrf_dm_monitor_rank()
1585 USE module_dm
1586 IMPLICIT NONE
1587 wrf_dm_monitor_rank = 0
1588 RETURN
1589 END FUNCTION wrf_dm_monitor_rank
1590
1591 SUBROUTINE wrf_get_dm_communicator ( communicator )
1592 USE module_dm
1593 IMPLICIT NONE
1594 INTEGER , INTENT(OUT) :: communicator
1595 communicator = local_communicator
1596 RETURN
1597 END SUBROUTINE wrf_get_dm_communicator
1598
1599 SUBROUTINE wrf_get_dm_iocommunicator ( iocommunicator )
1600 USE module_dm
1601 IMPLICIT NONE
1602 INTEGER , INTENT(OUT) :: iocommunicator
1603 iocommunicator = local_iocommunicator
1604 RETURN
1605 END SUBROUTINE wrf_get_dm_iocommunicator
1606
1607 SUBROUTINE wrf_set_dm_communicator ( communicator )
1608 USE module_dm
1609 IMPLICIT NONE
1610 INTEGER , INTENT(IN) :: communicator
1611 local_communicator = communicator
1612 RETURN
1613 END SUBROUTINE wrf_set_dm_communicator
1614
1615 SUBROUTINE wrf_set_dm_iocommunicator ( iocommunicator )
1616 USE module_dm
1617 IMPLICIT NONE
1618 INTEGER , INTENT(IN) :: iocommunicator
1619 local_iocommunicator = iocommunicator
1620 RETURN
1621 END SUBROUTINE wrf_set_dm_iocommunicator
1622
1623
1624 !!!!!!!!!!!!!!!!!!!!!!! PATCH TO GLOBAL !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1625
1626 SUBROUTINE wrf_patch_to_global_real (buf,globbuf,domdesc,stagger,ordering,&
1627 DS1,DE1,DS2,DE2,DS3,DE3,&
1628 MS1,ME1,MS2,ME2,MS3,ME3,&
1629 PS1,PE1,PS2,PE2,PS3,PE3 )
1630 IMPLICIT NONE
1631 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
1632 MS1,ME1,MS2,ME2,MS3,ME3,&
1633 PS1,PE1,PS2,PE2,PS3,PE3
1634 CHARACTER *(*) stagger,ordering
1635 INTEGER fid,domdesc
1636 REAL globbuf(*)
1637 REAL buf(*)
1638
1639 CALL wrf_patch_to_global_generic (buf,globbuf,domdesc,stagger,ordering,RWORDSIZE,&
1640 DS1,DE1,DS2,DE2,DS3,DE3,&
1641 MS1,ME1,MS2,ME2,MS3,ME3,&
1642 PS1,PE1,PS2,PE2,PS3,PE3 )
1643
1644 RETURN
1645 END SUBROUTINE wrf_patch_to_global_real
1646
1647 SUBROUTINE wrf_patch_to_global_double (buf,globbuf,domdesc,stagger,ordering,&
1648 DS1,DE1,DS2,DE2,DS3,DE3,&
1649 MS1,ME1,MS2,ME2,MS3,ME3,&
1650 PS1,PE1,PS2,PE2,PS3,PE3 )
1651 IMPLICIT NONE
1652 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
1653 MS1,ME1,MS2,ME2,MS3,ME3,&
1654 PS1,PE1,PS2,PE2,PS3,PE3
1655 CHARACTER *(*) stagger,ordering
1656 INTEGER fid,domdesc
1657 ! this next declaration is REAL, not DOUBLE PRECISION because it will be autopromoted
1658 ! to double precision by the compiler when WRF is compiled for 8 byte reals. Only reason
1659 ! for having this separate routine is so we pass the correct MPI type to mpi_scatterv
1660 ! since we were not indexing the globbuf and Field arrays it does not matter
1661 REAL globbuf(*)
1662 REAL buf(*)
1663
1664 CALL wrf_patch_to_global_generic (buf,globbuf,domdesc,stagger,ordering,DWORDSIZE,&
1665 DS1,DE1,DS2,DE2,DS3,DE3,&
1666 MS1,ME1,MS2,ME2,MS3,ME3,&
1667 PS1,PE1,PS2,PE2,PS3,PE3 )
1668
1669 RETURN
1670 END SUBROUTINE wrf_patch_to_global_double
1671
1672
1673 SUBROUTINE wrf_patch_to_global_integer (buf,globbuf,domdesc,stagger,ordering,&
1674 DS1,DE1,DS2,DE2,DS3,DE3,&
1675 MS1,ME1,MS2,ME2,MS3,ME3,&
1676 PS1,PE1,PS2,PE2,PS3,PE3 )
1677 IMPLICIT NONE
1678 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
1679 MS1,ME1,MS2,ME2,MS3,ME3,&
1680 PS1,PE1,PS2,PE2,PS3,PE3
1681 CHARACTER *(*) stagger,ordering
1682 INTEGER fid,domdesc
1683 INTEGER globbuf(*)
1684 INTEGER buf(*)
1685
1686 CALL wrf_patch_to_global_generic (buf,globbuf,domdesc,stagger,ordering,IWORDSIZE,&
1687 DS1,DE1,DS2,DE2,DS3,DE3,&
1688 MS1,ME1,MS2,ME2,MS3,ME3,&
1689 PS1,PE1,PS2,PE2,PS3,PE3 )
1690
1691 RETURN
1692 END SUBROUTINE wrf_patch_to_global_integer
1693
1694
1695 SUBROUTINE wrf_patch_to_global_logical (buf,globbuf,domdesc,stagger,ordering,&
1696 DS1,DE1,DS2,DE2,DS3,DE3,&
1697 MS1,ME1,MS2,ME2,MS3,ME3,&
1698 PS1,PE1,PS2,PE2,PS3,PE3 )
1699 IMPLICIT NONE
1700 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
1701 MS1,ME1,MS2,ME2,MS3,ME3,&
1702 PS1,PE1,PS2,PE2,PS3,PE3
1703 CHARACTER *(*) stagger,ordering
1704 INTEGER fid,domdesc
1705 LOGICAL globbuf(*)
1706 LOGICAL buf(*)
1707
1708 CALL wrf_patch_to_global_generic (buf,globbuf,domdesc,stagger,ordering,LWORDSIZE,&
1709 DS1,DE1,DS2,DE2,DS3,DE3,&
1710 MS1,ME1,MS2,ME2,MS3,ME3,&
1711 PS1,PE1,PS2,PE2,PS3,PE3 )
1712
1713 RETURN
1714 END SUBROUTINE wrf_patch_to_global_logical
1715
1716 #ifdef DEREF_KLUDGE
1717 # define FRSTELEM (1)
1718 #else
1719 # define FRSTELEM
1720 #endif
1721
1722 SUBROUTINE wrf_patch_to_global_generic (buf,globbuf,domdesc,stagger,ordering,typesize,&
1723 DS1a,DE1a,DS2a,DE2a,DS3a,DE3a,&
1724 MS1a,ME1a,MS2a,ME2a,MS3a,ME3a,&
1725 PS1a,PE1a,PS2a,PE2a,PS3a,PE3a )
1726 USE module_driver_constants
1727 USE module_timing
1728 USE module_wrf_error
1729 USE module_dm
1730 IMPLICIT NONE
1731 INTEGER DS1a,DE1a,DS2a,DE2a,DS3a,DE3a,&
1732 MS1a,ME1a,MS2a,ME2a,MS3a,ME3a,&
1733 PS1a,PE1a,PS2a,PE2a,PS3a,PE3A
1734 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
1735 MS1,ME1,MS2,ME2,MS3,ME3,&
1736 PS1,PE1,PS2,PE2,PS3,PE3
1737 INTEGER ids,ide,jds,jde,kds,kde,&
1738 ims,ime,jms,jme,kms,kme,&
1739 ips,ipe,jps,jpe,kps,kpe
1740 CHARACTER *(*) stagger,ordering
1741 LOGICAL, EXTERNAL :: wrf_dm_on_monitor, has_char
1742 INTEGER fid,domdesc,typesize,ierr
1743 REAL globbuf(*)
1744 REAL buf(*)
1745
1746 INTEGER i, j, k, ndim
1747 INTEGER Patch(3,2), Gpatch(3,2,ntasks)
1748 ! allocated further down, after the D indices are potentially recalculated for staggering
1749 REAL, ALLOCATABLE :: tmpbuf( : )
1750 REAL locbuf( (PE1a-PS1a+1)*(PE2a-PS2a+1)*(PE3a-PS3a+1)/RWORDSIZE*typesize+32 )
1751
1752 DS1 = DS1a ; DE1 = DE1a ; DS2=DS2a ; DE2 = DE2a ; DS3 = DS3a ; DE3 = DE3a
1753 MS1 = MS1a ; ME1 = ME1a ; MS2=MS2a ; ME2 = ME2a ; MS3 = MS3a ; ME3 = ME3a
1754 PS1 = PS1a ; PE1 = PE1a ; PS2=PS2a ; PE2 = PE2a ; PS3 = PS3a ; PE3 = PE3a
1755
1756 SELECT CASE ( TRIM(ordering) )
1757 CASE ( 'xy', 'yx' )
1758 ndim = 2
1759 CASE DEFAULT
1760 ndim = 3 ! where appropriate
1761 END SELECT
1762
1763 SELECT CASE ( TRIM(ordering) )
1764 CASE ( 'xyz','xy' )
1765 ! the non-staggered variables come in at one-less than
1766 ! domain dimensions, but code wants full domain spec, so
1767 ! adjust if not staggered
1768 IF ( .NOT. has_char( stagger, 'x' ) ) DE1 = DE1+1
1769 IF ( .NOT. has_char( stagger, 'y' ) ) DE2 = DE2+1
1770 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE3 = DE3+1
1771 CASE ( 'yxz','yx' )
1772 IF ( .NOT. has_char( stagger, 'x' ) ) DE2 = DE2+1
1773 IF ( .NOT. has_char( stagger, 'y' ) ) DE1 = DE1+1
1774 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE3 = DE3+1
1775 CASE ( 'zxy' )
1776 IF ( .NOT. has_char( stagger, 'x' ) ) DE2 = DE2+1
1777 IF ( .NOT. has_char( stagger, 'y' ) ) DE3 = DE3+1
1778 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE1 = DE1+1
1779 CASE ( 'xzy' )
1780 IF ( .NOT. has_char( stagger, 'x' ) ) DE1 = DE1+1
1781 IF ( .NOT. has_char( stagger, 'y' ) ) DE3 = DE3+1
1782 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE2 = DE2+1
1783 CASE DEFAULT
1784 END SELECT
1785
1786 ! moved to here to be after the potential recalculations of D dims
1787 IF ( wrf_dm_on_monitor() ) THEN
1788 ALLOCATE ( tmpbuf ( (DE1-DS1+1)*(DE2-DS2+1)*(DE3-DS3+1)/RWORDSIZE*typesize+32 ), STAT=ierr )
1789 ELSE
1790 ALLOCATE ( tmpbuf ( 1 ), STAT=ierr )
1791 ENDIF
1792 IF ( ierr .ne. 0 ) CALL wrf_error_fatal ('allocating tmpbuf in wrf_patch_to_global_generic')
1793
1794 Patch(1,1) = ps1 ; Patch(1,2) = pe1 ! use patch dims
1795 Patch(2,1) = ps2 ; Patch(2,2) = pe2
1796 Patch(3,1) = ps3 ; Patch(3,2) = pe3
1797
1798 IF ( typesize .EQ. RWORDSIZE ) THEN
1799 CALL just_patch_r ( buf , locbuf , size(locbuf), &
1800 PS1, PE1, PS2, PE2, PS3, PE3 , &
1801 MS1, ME1, MS2, ME2, MS3, ME3 )
1802 ELSE IF ( typesize .EQ. IWORDSIZE ) THEN
1803 CALL just_patch_i ( buf , locbuf , size(locbuf), &
1804 PS1, PE1, PS2, PE2, PS3, PE3 , &
1805 MS1, ME1, MS2, ME2, MS3, ME3 )
1806 ELSE IF ( typesize .EQ. DWORDSIZE ) THEN
1807 CALL just_patch_d ( buf , locbuf , size(locbuf), &
1808 PS1, PE1, PS2, PE2, PS3, PE3 , &
1809 MS1, ME1, MS2, ME2, MS3, ME3 )
1810 ELSE IF ( typesize .EQ. LWORDSIZE ) THEN
1811 CALL just_patch_l ( buf , locbuf , size(locbuf), &
1812 PS1, PE1, PS2, PE2, PS3, PE3 , &
1813 MS1, ME1, MS2, ME2, MS3, ME3 )
1814 ENDIF
1815
1816 ! defined in external/io_quilt
1817 CALL collect_on_comm0 ( local_communicator , IWORDSIZE , &
1818 Patch , 6 , &
1819 GPatch , 6*ntasks )
1820
1821 CALL collect_on_comm0 ( local_communicator , typesize , &
1822 locbuf , (pe1-ps1+1)*(pe2-ps2+1)*(pe3-ps3+1), &
1823 tmpbuf FRSTELEM , (de1-ds1+1)*(de2-ds2+1)*(de3-ds3+1) )
1824
1825 ndim = len(TRIM(ordering))
1826
1827 IF ( wrf_at_debug_level(500) ) THEN
1828 CALL start_timing
1829 ENDIF
1830
1831 IF ( ndim .GE. 2 .AND. wrf_dm_on_monitor() ) THEN
1832
1833 IF ( typesize .EQ. RWORDSIZE ) THEN
1834 CALL patch_2_outbuf_r ( tmpbuf FRSTELEM , globbuf , &
1835 DS1, DE1, DS2, DE2, DS3, DE3 , &
1836 GPATCH )
1837 ELSE IF ( typesize .EQ. IWORDSIZE ) THEN
1838 CALL patch_2_outbuf_i ( tmpbuf FRSTELEM , globbuf , &
1839 DS1, DE1, DS2, DE2, DS3, DE3 , &
1840 GPATCH )
1841 ELSE IF ( typesize .EQ. DWORDSIZE ) THEN
1842 CALL patch_2_outbuf_d ( tmpbuf FRSTELEM , globbuf , &
1843 DS1, DE1, DS2, DE2, DS3, DE3 , &
1844 GPATCH )
1845 ELSE IF ( typesize .EQ. LWORDSIZE ) THEN
1846 CALL patch_2_outbuf_l ( tmpbuf FRSTELEM , globbuf , &
1847 DS1, DE1, DS2, DE2, DS3, DE3 , &
1848 GPATCH )
1849 ENDIF
1850
1851 ENDIF
1852
1853 IF ( wrf_at_debug_level(500) ) THEN
1854 CALL end_timing('wrf_patch_to_global_generic')
1855 ENDIF
1856 DEALLOCATE( tmpbuf )
1857 RETURN
1858 END SUBROUTINE wrf_patch_to_global_generic
1859
1860 SUBROUTINE just_patch_i ( inbuf , outbuf, noutbuf, &
1861 PS1,PE1,PS2,PE2,PS3,PE3, &
1862 MS1,ME1,MS2,ME2,MS3,ME3 )
1863 USE module_dm
1864 IMPLICIT NONE
1865 INTEGER , INTENT(IN) :: noutbuf
1866 INTEGER , DIMENSION(noutbuf) , INTENT(OUT) :: outbuf
1867 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
1868 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
1869 INTEGER , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(IN) :: inbuf
1870 ! Local
1871 INTEGER :: i,j,k,n , icurs
1872 icurs = 1
1873 DO k = PS3, PE3
1874 DO j = PS2, PE2
1875 DO i = PS1, PE1
1876 outbuf( icurs ) = inbuf( i, j, k )
1877 icurs = icurs + 1
1878 ENDDO
1879 ENDDO
1880 ENDDO
1881 RETURN
1882 END SUBROUTINE just_patch_i
1883
1884 SUBROUTINE just_patch_r ( inbuf , outbuf, noutbuf, &
1885 PS1,PE1,PS2,PE2,PS3,PE3, &
1886 MS1,ME1,MS2,ME2,MS3,ME3 )
1887 USE module_dm
1888 IMPLICIT NONE
1889 INTEGER , INTENT(IN) :: noutbuf
1890 REAL , DIMENSION(noutbuf) , INTENT(OUT) :: outbuf
1891 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
1892 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
1893 REAL , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(in) :: inbuf
1894 ! Local
1895 INTEGER :: i,j,k , icurs
1896 icurs = 1
1897 DO k = PS3, PE3
1898 DO j = PS2, PE2
1899 DO i = PS1, PE1
1900 outbuf( icurs ) = inbuf( i, j, k )
1901 icurs = icurs + 1
1902 ENDDO
1903 ENDDO
1904 ENDDO
1905 RETURN
1906 END SUBROUTINE just_patch_r
1907
1908 SUBROUTINE just_patch_d ( inbuf , outbuf, noutbuf, &
1909 PS1,PE1,PS2,PE2,PS3,PE3, &
1910 MS1,ME1,MS2,ME2,MS3,ME3 )
1911 USE module_dm
1912 IMPLICIT NONE
1913 INTEGER , INTENT(IN) :: noutbuf
1914 DOUBLE PRECISION , DIMENSION(noutbuf) , INTENT(OUT) :: outbuf
1915 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
1916 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
1917 DOUBLE PRECISION , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(in) :: inbuf
1918 ! Local
1919 INTEGER :: i,j,k,n , icurs
1920 icurs = 1
1921 DO k = PS3, PE3
1922 DO j = PS2, PE2
1923 DO i = PS1, PE1
1924 outbuf( icurs ) = inbuf( i, j, k )
1925 icurs = icurs + 1
1926 ENDDO
1927 ENDDO
1928 ENDDO
1929 RETURN
1930 END SUBROUTINE just_patch_d
1931
1932 SUBROUTINE just_patch_l ( inbuf , outbuf, noutbuf, &
1933 PS1,PE1,PS2,PE2,PS3,PE3, &
1934 MS1,ME1,MS2,ME2,MS3,ME3 )
1935 USE module_dm
1936 IMPLICIT NONE
1937 INTEGER , INTENT(IN) :: noutbuf
1938 LOGICAL , DIMENSION(noutbuf) , INTENT(OUT) :: outbuf
1939 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
1940 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
1941 LOGICAL , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(in) :: inbuf
1942 ! Local
1943 INTEGER :: i,j,k,n , icurs
1944 icurs = 1
1945 DO k = PS3, PE3
1946 DO j = PS2, PE2
1947 DO i = PS1, PE1
1948 outbuf( icurs ) = inbuf( i, j, k )
1949 icurs = icurs + 1
1950 ENDDO
1951 ENDDO
1952 ENDDO
1953 RETURN
1954 END SUBROUTINE just_patch_l
1955
1956
1957 SUBROUTINE patch_2_outbuf_r( inbuf, outbuf, &
1958 DS1,DE1,DS2,DE2,DS3,DE3, &
1959 GPATCH )
1960 USE module_dm
1961 IMPLICIT NONE
1962 REAL , DIMENSION(*) , INTENT(IN) :: inbuf
1963 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
1964 REAL , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(out) :: outbuf
1965 ! Local
1966 INTEGER :: i,j,k,n , icurs
1967 icurs = 1
1968 DO n = 1, ntasks
1969 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
1970 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
1971 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
1972 outbuf( i, j, k ) = inbuf( icurs )
1973 icurs = icurs + 1
1974 ENDDO
1975 ENDDO
1976 ENDDO
1977 ENDDO
1978
1979 RETURN
1980 END SUBROUTINE patch_2_outbuf_r
1981
1982 SUBROUTINE patch_2_outbuf_i( inbuf, outbuf, &
1983 DS1,DE1,DS2,DE2,DS3,DE3,&
1984 GPATCH )
1985 USE module_dm
1986 IMPLICIT NONE
1987 INTEGER , DIMENSION(*) , INTENT(IN) :: inbuf
1988 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
1989 INTEGER , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(out) :: outbuf
1990 ! Local
1991 INTEGER :: i,j,k,n , icurs
1992 icurs = 1
1993 DO n = 1, ntasks
1994 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
1995 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
1996 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
1997 outbuf( i, j, k ) = inbuf( icurs )
1998 icurs = icurs + 1
1999 ENDDO
2000 ENDDO
2001 ENDDO
2002 ENDDO
2003 RETURN
2004 END SUBROUTINE patch_2_outbuf_i
2005
2006 SUBROUTINE patch_2_outbuf_d( inbuf, outbuf, &
2007 DS1,DE1,DS2,DE2,DS3,DE3,&
2008 GPATCH )
2009 USE module_dm
2010 IMPLICIT NONE
2011 DOUBLE PRECISION , DIMENSION(*) , INTENT(IN) :: inbuf
2012 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
2013 DOUBLE PRECISION , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(out) :: outbuf
2014 ! Local
2015 INTEGER :: i,j,k,n , icurs
2016 icurs = 1
2017 DO n = 1, ntasks
2018 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
2019 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
2020 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
2021 outbuf( i, j, k ) = inbuf( icurs )
2022 icurs = icurs + 1
2023 ENDDO
2024 ENDDO
2025 ENDDO
2026 ENDDO
2027 RETURN
2028 END SUBROUTINE patch_2_outbuf_d
2029
2030 SUBROUTINE patch_2_outbuf_l( inbuf, outbuf, &
2031 DS1,DE1,DS2,DE2,DS3,DE3,&
2032 GPATCH )
2033 USE module_dm
2034 IMPLICIT NONE
2035 LOGICAL , DIMENSION(*) , INTENT(IN) :: inbuf
2036 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
2037 LOGICAL , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(out) :: outbuf
2038 ! Local
2039 INTEGER :: i,j,k,n , icurs
2040 icurs = 1
2041 DO n = 1, ntasks
2042 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
2043 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
2044 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
2045 outbuf( i, j, k ) = inbuf( icurs )
2046 icurs = icurs + 1
2047 ENDDO
2048 ENDDO
2049 ENDDO
2050 ENDDO
2051 RETURN
2052 END SUBROUTINE patch_2_outbuf_l
2053
2054 !!!!!!!!!!!!!!!!!!!!!!! GLOBAL TO PATCH !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2055
2056 SUBROUTINE wrf_global_to_patch_real (globbuf,buf,domdesc,stagger,ordering,&
2057 DS1,DE1,DS2,DE2,DS3,DE3,&
2058 MS1,ME1,MS2,ME2,MS3,ME3,&
2059 PS1,PE1,PS2,PE2,PS3,PE3 )
2060 IMPLICIT NONE
2061 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2062 MS1,ME1,MS2,ME2,MS3,ME3,&
2063 PS1,PE1,PS2,PE2,PS3,PE3
2064 CHARACTER *(*) stagger,ordering
2065 INTEGER fid,domdesc
2066 REAL globbuf(*)
2067 REAL buf(*)
2068
2069 CALL wrf_global_to_patch_generic (globbuf,buf,domdesc,stagger,ordering,RWORDSIZE,&
2070 DS1,DE1,DS2,DE2,DS3,DE3,&
2071 MS1,ME1,MS2,ME2,MS3,ME3,&
2072 PS1,PE1,PS2,PE2,PS3,PE3 )
2073 RETURN
2074 END SUBROUTINE wrf_global_to_patch_real
2075
2076 SUBROUTINE wrf_global_to_patch_double (globbuf,buf,domdesc,stagger,ordering,&
2077 DS1,DE1,DS2,DE2,DS3,DE3,&
2078 MS1,ME1,MS2,ME2,MS3,ME3,&
2079 PS1,PE1,PS2,PE2,PS3,PE3 )
2080 IMPLICIT NONE
2081 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2082 MS1,ME1,MS2,ME2,MS3,ME3,&
2083 PS1,PE1,PS2,PE2,PS3,PE3
2084 CHARACTER *(*) stagger,ordering
2085 INTEGER fid,domdesc
2086 ! this next declaration is REAL, not DOUBLE PRECISION because it will be autopromoted
2087 ! to double precision by the compiler when WRF is compiled for 8 byte reals. Only reason
2088 ! for having this separate routine is so we pass the correct MPI type to mpi_scatterv
2089 ! since we were not indexing the globbuf and Field arrays it does not matter
2090 REAL globbuf(*)
2091 REAL buf(*)
2092
2093 CALL wrf_global_to_patch_generic (globbuf,buf,domdesc,stagger,ordering,DWORDSIZE,&
2094 DS1,DE1,DS2,DE2,DS3,DE3,&
2095 MS1,ME1,MS2,ME2,MS3,ME3,&
2096 PS1,PE1,PS2,PE2,PS3,PE3 )
2097 RETURN
2098 END SUBROUTINE wrf_global_to_patch_double
2099
2100
2101 SUBROUTINE wrf_global_to_patch_integer (globbuf,buf,domdesc,stagger,ordering,&
2102 DS1,DE1,DS2,DE2,DS3,DE3,&
2103 MS1,ME1,MS2,ME2,MS3,ME3,&
2104 PS1,PE1,PS2,PE2,PS3,PE3 )
2105 IMPLICIT NONE
2106 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2107 MS1,ME1,MS2,ME2,MS3,ME3,&
2108 PS1,PE1,PS2,PE2,PS3,PE3
2109 CHARACTER *(*) stagger,ordering
2110 INTEGER fid,domdesc
2111 INTEGER globbuf(*)
2112 INTEGER buf(*)
2113
2114 CALL wrf_global_to_patch_generic (globbuf,buf,domdesc,stagger,ordering,IWORDSIZE,&
2115 DS1,DE1,DS2,DE2,DS3,DE3,&
2116 MS1,ME1,MS2,ME2,MS3,ME3,&
2117 PS1,PE1,PS2,PE2,PS3,PE3 )
2118 RETURN
2119 END SUBROUTINE wrf_global_to_patch_integer
2120
2121 SUBROUTINE wrf_global_to_patch_logical (globbuf,buf,domdesc,stagger,ordering,&
2122 DS1,DE1,DS2,DE2,DS3,DE3,&
2123 MS1,ME1,MS2,ME2,MS3,ME3,&
2124 PS1,PE1,PS2,PE2,PS3,PE3 )
2125 IMPLICIT NONE
2126 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2127 MS1,ME1,MS2,ME2,MS3,ME3,&
2128 PS1,PE1,PS2,PE2,PS3,PE3
2129 CHARACTER *(*) stagger,ordering
2130 INTEGER fid,domdesc
2131 LOGICAL globbuf(*)
2132 LOGICAL buf(*)
2133
2134 CALL wrf_global_to_patch_generic (globbuf,buf,domdesc,stagger,ordering,LWORDSIZE,&
2135 DS1,DE1,DS2,DE2,DS3,DE3,&
2136 MS1,ME1,MS2,ME2,MS3,ME3,&
2137 PS1,PE1,PS2,PE2,PS3,PE3 )
2138 RETURN
2139 END SUBROUTINE wrf_global_to_patch_logical
2140
2141 SUBROUTINE wrf_global_to_patch_generic (globbuf,buf,domdesc,stagger,ordering,typesize,&
2142 DS1a,DE1a,DS2a,DE2a,DS3a,DE3a,&
2143 MS1a,ME1a,MS2a,ME2a,MS3a,ME3a,&
2144 PS1a,PE1a,PS2a,PE2a,PS3a,PE3a )
2145 USE module_dm
2146 USE module_driver_constants
2147 IMPLICIT NONE
2148 INTEGER DS1a,DE1a,DS2a,DE2a,DS3a,DE3a,&
2149 MS1a,ME1a,MS2a,ME2a,MS3a,ME3a,&
2150 PS1a,PE1a,PS2a,PE2a,PS3a,PE3A
2151 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2152 MS1,ME1,MS2,ME2,MS3,ME3,&
2153 PS1,PE1,PS2,PE2,PS3,PE3
2154 CHARACTER *(*) stagger,ordering
2155 INTEGER fid,domdesc,typesize,ierr
2156 REAL globbuf(*)
2157 REAL buf(*)
2158 LOGICAL, EXTERNAL :: wrf_dm_on_monitor, has_char
2159
2160 INTEGER i,j,k,ord,ord2d,ndim
2161 INTEGER Patch(3,2), Gpatch(3,2,ntasks)
2162 REAL, ALLOCATABLE :: tmpbuf( : )
2163 REAL locbuf( (PE1a-PS1a+1)*(PE2a-PS2a+1)*(PE3a-PS3a+1)/RWORDSIZE*typesize+32 )
2164
2165 DS1 = DS1a ; DE1 = DE1a ; DS2=DS2a ; DE2 = DE2a ; DS3 = DS3a ; DE3 = DE3a
2166 MS1 = MS1a ; ME1 = ME1a ; MS2=MS2a ; ME2 = ME2a ; MS3 = MS3a ; ME3 = ME3a
2167 PS1 = PS1a ; PE1 = PE1a ; PS2=PS2a ; PE2 = PE2a ; PS3 = PS3a ; PE3 = PE3a
2168
2169 SELECT CASE ( TRIM(ordering) )
2170 CASE ( 'xy', 'yx' )
2171 ndim = 2
2172 CASE DEFAULT
2173 ndim = 3 ! where appropriate
2174 END SELECT
2175
2176 SELECT CASE ( TRIM(ordering) )
2177 CASE ( 'xyz','xy' )
2178 ! the non-staggered variables come in at one-less than
2179 ! domain dimensions, but code wants full domain spec, so
2180 ! adjust if not staggered
2181 IF ( .NOT. has_char( stagger, 'x' ) ) DE1 = DE1+1
2182 IF ( .NOT. has_char( stagger, 'y' ) ) DE2 = DE2+1
2183 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE3 = DE3+1
2184 CASE ( 'yxz','yx' )
2185 IF ( .NOT. has_char( stagger, 'x' ) ) DE2 = DE2+1
2186 IF ( .NOT. has_char( stagger, 'y' ) ) DE1 = DE1+1
2187 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE3 = DE3+1
2188 CASE ( 'zxy' )
2189 IF ( .NOT. has_char( stagger, 'x' ) ) DE2 = DE2+1
2190 IF ( .NOT. has_char( stagger, 'y' ) ) DE3 = DE3+1
2191 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE1 = DE1+1
2192 CASE ( 'xzy' )
2193 IF ( .NOT. has_char( stagger, 'x' ) ) DE1 = DE1+1
2194 IF ( .NOT. has_char( stagger, 'y' ) ) DE3 = DE3+1
2195 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE2 = DE2+1
2196 CASE DEFAULT
2197 END SELECT
2198
2199 ! moved to here to be after the potential recalculations of D dims
2200 IF ( wrf_dm_on_monitor() ) THEN
2201 ALLOCATE ( tmpbuf ( (DE1-DS1+1)*(DE2-DS2+1)*(DE3-DS3+1)/RWORDSIZE*typesize+32 ), STAT=ierr )
2202 ELSE
2203 ALLOCATE ( tmpbuf ( 1 ), STAT=ierr )
2204 ENDIF
2205 IF ( ierr .ne. 0 ) CALL wrf_error_fatal ('allocating tmpbuf in wrf_global_to_patch_generic')
2206
2207 Patch(1,1) = ps1 ; Patch(1,2) = pe1 ! use patch dims
2208 Patch(2,1) = ps2 ; Patch(2,2) = pe2
2209 Patch(3,1) = ps3 ; Patch(3,2) = pe3
2210
2211 ! defined in external/io_quilt
2212 CALL collect_on_comm0 ( local_communicator , IWORDSIZE , &
2213 Patch , 6 , &
2214 GPatch , 6*ntasks )
2215
2216 ndim = len(TRIM(ordering))
2217
2218 IF ( wrf_dm_on_monitor() .AND. ndim .GE. 2 ) THEN
2219 IF ( typesize .EQ. RWORDSIZE ) THEN
2220 CALL outbuf_2_patch_r ( globbuf , tmpbuf FRSTELEM , &
2221 DS1, DE1, DS2, DE2, DS3, DE3 , &
2222 MS1, ME1, MS2, ME2, MS3, ME3 , &
2223 GPATCH )
2224 ELSE IF ( typesize .EQ. IWORDSIZE ) THEN
2225 CALL outbuf_2_patch_i ( globbuf , tmpbuf FRSTELEM , &
2226 DS1, DE1, DS2, DE2, DS3, DE3 , &
2227 GPATCH )
2228 ELSE IF ( typesize .EQ. DWORDSIZE ) THEN
2229 CALL outbuf_2_patch_d ( globbuf , tmpbuf FRSTELEM , &
2230 DS1, DE1, DS2, DE2, DS3, DE3 , &
2231 GPATCH )
2232 ELSE IF ( typesize .EQ. LWORDSIZE ) THEN
2233 CALL outbuf_2_patch_l ( globbuf , tmpbuf FRSTELEM , &
2234 DS1, DE1, DS2, DE2, DS3, DE3 , &
2235 GPATCH )
2236 ENDIF
2237 ENDIF
2238
2239 CALL dist_on_comm0 ( local_communicator , typesize , &
2240 tmpbuf FRSTELEM , (de1-ds1+1)*(de2-ds2+1)*(de3-ds3+1) , &
2241 locbuf , (pe1-ps1+1)*(pe2-ps2+1)*(pe3-ps3+1) )
2242
2243 IF ( typesize .EQ. RWORDSIZE ) THEN
2244 CALL all_sub_r ( locbuf , buf , &
2245 PS1, PE1, PS2, PE2, PS3, PE3 , &
2246 MS1, ME1, MS2, ME2, MS3, ME3 )
2247
2248 ELSE IF ( typesize .EQ. IWORDSIZE ) THEN
2249 CALL all_sub_i ( locbuf , buf , &
2250 PS1, PE1, PS2, PE2, PS3, PE3 , &
2251 MS1, ME1, MS2, ME2, MS3, ME3 )
2252 ELSE IF ( typesize .EQ. DWORDSIZE ) THEN
2253 CALL all_sub_d ( locbuf , buf , &
2254 PS1, PE1, PS2, PE2, PS3, PE3 , &
2255 MS1, ME1, MS2, ME2, MS3, ME3 )
2256 ELSE IF ( typesize .EQ. LWORDSIZE ) THEN
2257 CALL all_sub_l ( locbuf , buf , &
2258 PS1, PE1, PS2, PE2, PS3, PE3 , &
2259 MS1, ME1, MS2, ME2, MS3, ME3 )
2260 ENDIF
2261
2262
2263 DEALLOCATE ( tmpbuf )
2264 RETURN
2265 END SUBROUTINE wrf_global_to_patch_generic
2266
2267 SUBROUTINE all_sub_i ( inbuf , outbuf, &
2268 PS1,PE1,PS2,PE2,PS3,PE3, &
2269 MS1,ME1,MS2,ME2,MS3,ME3 )
2270 USE module_dm
2271 IMPLICIT NONE
2272 INTEGER , DIMENSION(*) , INTENT(IN) :: inbuf
2273 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2274 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
2275 INTEGER , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(OUT) :: outbuf
2276 ! Local
2277 INTEGER :: i,j,k,n , icurs
2278 icurs = 1
2279 DO k = PS3, PE3
2280 DO j = PS2, PE2
2281 DO i = PS1, PE1
2282 outbuf( i, j, k ) = inbuf ( icurs )
2283 icurs = icurs + 1
2284 ENDDO
2285 ENDDO
2286 ENDDO
2287 RETURN
2288 END SUBROUTINE all_sub_i
2289
2290 SUBROUTINE all_sub_r ( inbuf , outbuf, &
2291 PS1,PE1,PS2,PE2,PS3,PE3, &
2292 MS1,ME1,MS2,ME2,MS3,ME3 )
2293 USE module_dm
2294 IMPLICIT NONE
2295 REAL , DIMENSION(*) , INTENT(IN) :: inbuf
2296 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2297 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
2298 REAL , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(OUT) :: outbuf
2299 ! Local
2300 INTEGER :: i,j,k,n , icurs
2301 icurs = 1
2302 DO k = PS3, PE3
2303 DO j = PS2, PE2
2304 DO i = PS1, PE1
2305 outbuf( i, j, k ) = inbuf ( icurs )
2306 icurs = icurs + 1
2307 ENDDO
2308 ENDDO
2309 ENDDO
2310
2311 RETURN
2312 END SUBROUTINE all_sub_r
2313
2314 SUBROUTINE all_sub_d ( inbuf , outbuf, &
2315 PS1,PE1,PS2,PE2,PS3,PE3, &
2316 MS1,ME1,MS2,ME2,MS3,ME3 )
2317 USE module_dm
2318 IMPLICIT NONE
2319 DOUBLE PRECISION , DIMENSION(*) , INTENT(IN) :: inbuf
2320 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2321 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
2322 DOUBLE PRECISION , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(OUT) :: outbuf
2323 ! Local
2324 INTEGER :: i,j,k,n , icurs
2325 icurs = 1
2326 DO k = PS3, PE3
2327 DO j = PS2, PE2
2328 DO i = PS1, PE1
2329 outbuf( i, j, k ) = inbuf ( icurs )
2330 icurs = icurs + 1
2331 ENDDO
2332 ENDDO
2333 ENDDO
2334 RETURN
2335 END SUBROUTINE all_sub_d
2336
2337 SUBROUTINE all_sub_l ( inbuf , outbuf, &
2338 PS1,PE1,PS2,PE2,PS3,PE3, &
2339 MS1,ME1,MS2,ME2,MS3,ME3 )
2340 USE module_dm
2341 IMPLICIT NONE
2342 LOGICAL , DIMENSION(*) , INTENT(IN) :: inbuf
2343 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2344 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
2345 LOGICAL , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(OUT) :: outbuf
2346 ! Local
2347 INTEGER :: i,j,k,n , icurs
2348 icurs = 1
2349 DO k = PS3, PE3
2350 DO j = PS2, PE2
2351 DO i = PS1, PE1
2352 outbuf( i, j, k ) = inbuf ( icurs )
2353 icurs = icurs + 1
2354 ENDDO
2355 ENDDO
2356 ENDDO
2357 RETURN
2358 END SUBROUTINE all_sub_l
2359
2360 SUBROUTINE outbuf_2_patch_r( inbuf, outbuf, &
2361 DS1,DE1,DS2,DE2,DS3,DE3, &
2362 MS1, ME1, MS2, ME2, MS3, ME3 , &
2363 GPATCH )
2364 USE module_dm
2365 IMPLICIT NONE
2366 REAL , DIMENSION(*) , INTENT(OUT) :: outbuf
2367 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
2368 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2369 REAL , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(IN) :: inbuf
2370 ! Local
2371 INTEGER :: i,j,k,n , icurs
2372
2373 icurs = 1
2374 DO n = 1, ntasks
2375 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
2376 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
2377 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
2378 outbuf( icurs ) = inbuf( i,j,k )
2379 icurs = icurs + 1
2380 ENDDO
2381 ENDDO
2382 ENDDO
2383 ENDDO
2384 RETURN
2385 END SUBROUTINE outbuf_2_patch_r
2386
2387 SUBROUTINE outbuf_2_patch_i( inbuf, outbuf, &
2388 DS1,DE1,DS2,DE2,DS3,DE3,&
2389 GPATCH )
2390 USE module_dm
2391 IMPLICIT NONE
2392 INTEGER , DIMENSION(*) , INTENT(OUT) :: outbuf
2393 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
2394 INTEGER , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(IN) :: inbuf
2395 ! Local
2396 INTEGER :: i,j,k,n , icurs
2397 icurs = 1
2398 DO n = 1, ntasks
2399 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
2400 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
2401 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
2402 outbuf( icurs ) = inbuf( i,j,k )
2403 icurs = icurs + 1
2404 ENDDO
2405 ENDDO
2406 ENDDO
2407 ENDDO
2408 RETURN
2409 END SUBROUTINE outbuf_2_patch_i
2410
2411 SUBROUTINE outbuf_2_patch_d( inbuf, outbuf, &
2412 DS1,DE1,DS2,DE2,DS3,DE3,&
2413 GPATCH )
2414 USE module_dm
2415 IMPLICIT NONE
2416 DOUBLE PRECISION , DIMENSION(*) , INTENT(OUT) :: outbuf
2417 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
2418 DOUBLE PRECISION , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(IN) :: inbuf
2419 ! Local
2420 INTEGER :: i,j,k,n , icurs
2421 icurs = 1
2422 DO n = 1, ntasks
2423 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
2424 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
2425 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
2426 outbuf( icurs ) = inbuf( i,j,k )
2427 icurs = icurs + 1
2428 ENDDO
2429 ENDDO
2430 ENDDO
2431 ENDDO
2432 RETURN
2433 END SUBROUTINE outbuf_2_patch_d
2434
2435 SUBROUTINE outbuf_2_patch_l( inbuf, outbuf, &
2436 DS1,DE1,DS2,DE2,DS3,DE3,&
2437 GPATCH )
2438 USE module_dm
2439 IMPLICIT NONE
2440 LOGICAL , DIMENSION(*) , INTENT(OUT) :: outbuf
2441 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
2442 LOGICAL , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(IN) :: inbuf
2443 ! Local
2444 INTEGER :: i,j,k,n , icurs
2445 icurs = 1
2446 DO n = 1, ntasks
2447 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
2448 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
2449 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
2450 outbuf( icurs ) = inbuf( i,j,k )
2451 icurs = icurs + 1
2452 ENDDO
2453 ENDDO
2454 ENDDO
2455 ENDDO
2456 RETURN
2457 END SUBROUTINE outbuf_2_patch_l
2458
2459
2460
2461 !------------------------------------------------------------------
2462
2463 #if ( EM_CORE == 1 )
2464
2465 !------------------------------------------------------------------
2466
2467 SUBROUTINE force_domain_em_part2 ( grid, ngrid, config_flags &
2468 !
2469 #include "em_dummy_new_args.inc"
2470 !
2471 )
2472 USE module_domain
2473 USE module_configure
2474 USE module_dm
2475 IMPLICIT NONE
2476 !
2477 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
2478 TYPE(domain), POINTER :: ngrid
2479 #include <em_dummy_new_decl.inc>
2480 INTEGER nlev, msize
2481 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
2482 TYPE (grid_config_rec_type) :: config_flags
2483 REAL xv(500)
2484 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
2485 cims, cime, cjms, cjme, ckms, ckme, &
2486 cips, cipe, cjps, cjpe, ckps, ckpe
2487 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
2488 nims, nime, njms, njme, nkms, nkme, &
2489 nips, nipe, njps, njpe, nkps, nkpe
2490 INTEGER :: ids, ide, jds, jde, kds, kde, &
2491 ims, ime, jms, jme, kms, kme, &
2492 ips, ipe, jps, jpe, kps, kpe
2493
2494 CALL get_ijk_from_grid ( grid , &
2495 cids, cide, cjds, cjde, ckds, ckde, &
2496 cims, cime, cjms, cjme, ckms, ckme, &
2497 cips, cipe, cjps, cjpe, ckps, ckpe )
2498 CALL get_ijk_from_grid ( ngrid , &
2499 nids, nide, njds, njde, nkds, nkde, &
2500 nims, nime, njms, njme, nkms, nkme, &
2501 nips, nipe, njps, njpe, nkps, nkpe )
2502
2503 nlev = ckde - ckds + 1
2504
2505 #include "em_nest_interpdown_unpack.inc"
2506
2507 CALL get_ijk_from_grid ( grid , &
2508 ids, ide, jds, jde, kds, kde, &
2509 ims, ime, jms, jme, kms, kme, &
2510 ips, ipe, jps, jpe, kps, kpe )
2511
2512 #include "HALO_EM_FORCE_DOWN.inc"
2513
2514 ! code here to interpolate the data into the nested domain
2515 # include "em_nest_forcedown_interp.inc"
2516
2517 RETURN
2518 END SUBROUTINE force_domain_em_part2
2519
2520 !------------------------------------------------------------------
2521
2522 SUBROUTINE interp_domain_em_part1 ( grid, intermediate_grid, ngrid, config_flags &
2523 !
2524 #include "em_dummy_new_args.inc"
2525 !
2526 )
2527 USE module_domain
2528 USE module_configure
2529 USE module_dm
2530 USE module_timing
2531 IMPLICIT NONE
2532 !
2533 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
2534 TYPE(domain), POINTER :: intermediate_grid
2535 TYPE(domain), POINTER :: ngrid
2536 #include <em_dummy_new_decl.inc>
2537 INTEGER nlev, msize
2538 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
2539 INTEGER iparstrt,jparstrt,sw
2540 TYPE (grid_config_rec_type) :: config_flags
2541 REAL xv(500)
2542 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
2543 cims, cime, cjms, cjme, ckms, ckme, &
2544 cips, cipe, cjps, cjpe, ckps, ckpe
2545 INTEGER :: iids, iide, ijds, ijde, ikds, ikde, &
2546 iims, iime, ijms, ijme, ikms, ikme, &
2547 iips, iipe, ijps, ijpe, ikps, ikpe
2548 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
2549 nims, nime, njms, njme, nkms, nkme, &
2550 nips, nipe, njps, njpe, nkps, nkpe
2551
2552 INTEGER icoord, jcoord, idim_cd, jdim_cd, pgr
2553 INTEGER local_comm, myproc, nproc
2554
2555 CALL wrf_get_dm_communicator ( local_comm )
2556 CALL wrf_get_myproc( myproc )
2557 CALL wrf_get_nproc( nproc )
2558
2559 CALL get_ijk_from_grid ( grid , &
2560 cids, cide, cjds, cjde, ckds, ckde, &
2561 cims, cime, cjms, cjme, ckms, ckme, &
2562 cips, cipe, cjps, cjpe, ckps, ckpe )
2563 CALL get_ijk_from_grid ( intermediate_grid , &
2564 iids, iide, ijds, ijde, ikds, ikde, &
2565 iims, iime, ijms, ijme, ikms, ikme, &
2566 iips, iipe, ijps, ijpe, ikps, ikpe )
2567 CALL get_ijk_from_grid ( ngrid , &
2568 nids, nide, njds, njde, nkds, nkde, &
2569 nims, nime, njms, njme, nkms, nkme, &
2570 nips, nipe, njps, njpe, nkps, nkpe )
2571
2572 CALL nl_get_parent_grid_ratio ( ngrid%id, pgr )
2573 CALL nl_get_i_parent_start ( intermediate_grid%id, iparstrt )
2574 CALL nl_get_j_parent_start ( intermediate_grid%id, jparstrt )
2575 CALL nl_get_shw ( intermediate_grid%id, sw )
2576 icoord = iparstrt - sw
2577 jcoord = jparstrt - sw
2578 idim_cd = iide - iids + 1
2579 jdim_cd = ijde - ijds + 1
2580
2581 nlev = ckde - ckds + 1
2582
2583 #include "em_nest_interpdown_pack.inc"
2584
2585 CALL rsl_lite_bcast_msgs( myproc, nproc, local_comm )
2586
2587 RETURN
2588 END SUBROUTINE interp_domain_em_part1
2589
2590 !------------------------------------------------------------------
2591
2592 SUBROUTINE interp_domain_em_part2 ( grid, ngrid, config_flags &
2593 !
2594 #include "em_dummy_new_args.inc"
2595 !
2596 )
2597 USE module_domain
2598 USE module_configure
2599 USE module_dm
2600 IMPLICIT NONE
2601 !
2602 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
2603 TYPE(domain), POINTER :: ngrid
2604 #include <em_dummy_new_decl.inc>
2605 INTEGER nlev, msize
2606 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
2607 TYPE (grid_config_rec_type) :: config_flags
2608 REAL xv(500)
2609 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
2610 cims, cime, cjms, cjme, ckms, ckme, &
2611 cips, cipe, cjps, cjpe, ckps, ckpe
2612 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
2613 nims, nime, njms, njme, nkms, nkme, &
2614 nips, nipe, njps, njpe, nkps, nkpe
2615 INTEGER :: ids, ide, jds, jde, kds, kde, &
2616 ims, ime, jms, jme, kms, kme, &
2617 ips, ipe, jps, jpe, kps, kpe
2618 INTEGER myproc
2619 INTEGER ierr
2620
2621 CALL get_ijk_from_grid ( grid , &
2622 cids, cide, cjds, cjde, ckds, ckde, &
2623 cims, cime, cjms, cjme, ckms, ckme, &
2624 cips, cipe, cjps, cjpe, ckps, ckpe )
2625 CALL get_ijk_from_grid ( ngrid , &
2626 nids, nide, njds, njde, nkds, nkde, &
2627 nims, nime, njms, njme, nkms, nkme, &
2628 nips, nipe, njps, njpe, nkps, nkpe )
2629
2630 nlev = ckde - ckds + 1
2631
2632 #include "em_nest_interpdown_unpack.inc"
2633
2634 CALL get_ijk_from_grid ( grid , &
2635 ids, ide, jds, jde, kds, kde, &
2636 ims, ime, jms, jme, kms, kme, &
2637 ips, ipe, jps, jpe, kps, kpe )
2638
2639 #include "HALO_EM_INTERP_DOWN.inc"
2640
2641 # include "em_nest_interpdown_interp.inc"
2642
2643 RETURN
2644 END SUBROUTINE interp_domain_em_part2
2645
2646 !------------------------------------------------------------------
2647
2648 SUBROUTINE feedback_nest_prep ( grid, config_flags &
2649 !
2650 #include "em_dummy_new_args.inc"
2651 !
2652 )
2653 USE module_domain
2654 USE module_configure
2655 USE module_dm
2656 USE module_state_description
2657 IMPLICIT NONE
2658 !
2659 TYPE(domain), TARGET :: grid ! name of the grid being dereferenced (must be "grid")
2660 TYPE (grid_config_rec_type) :: config_flags ! configureation flags, has vertical dim of
2661 ! soil temp, moisture, etc., has vertical dim
2662 ! of soil categories
2663 #include <em_dummy_new_decl.inc>
2664
2665 INTEGER :: ids, ide, jds, jde, kds, kde, &
2666 ims, ime, jms, jme, kms, kme, &
2667 ips, ipe, jps, jpe, kps, kpe
2668
2669 INTEGER :: idum1, idum2
2670
2671
2672 CALL get_ijk_from_grid ( grid , &
2673 ids, ide, jds, jde, kds, kde, &
2674 ims, ime, jms, jme, kms, kme, &
2675 ips, ipe, jps, jpe, kps, kpe )
2676
2677 #ifdef DM_PARALLEL
2678 #include "HALO_EM_INTERP_UP.inc"
2679 #endif
2680
2681 END SUBROUTINE feedback_nest_prep
2682
2683 !------------------------------------------------------------------
2684
2685 SUBROUTINE feedback_domain_em_part1 ( grid, ngrid, config_flags &
2686 !
2687 #include "em_dummy_new_args.inc"
2688 !
2689 )
2690 USE module_domain
2691 USE module_configure
2692 USE module_dm
2693 IMPLICIT NONE
2694 !
2695 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
2696 TYPE(domain), POINTER :: ngrid
2697 #include <em_dummy_new_decl.inc>
2698 INTEGER nlev, msize
2699 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
2700 TYPE(domain), POINTER :: xgrid
2701 TYPE (grid_config_rec_type) :: config_flags, nconfig_flags
2702 REAL xv(500)
2703 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
2704 cims, cime, cjms, cjme, ckms, ckme, &
2705 cips, cipe, cjps, cjpe, ckps, ckpe
2706 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
2707 nims, nime, njms, njme, nkms, nkme, &
2708 nips, nipe, njps, njpe, nkps, nkpe
2709 INTEGER local_comm, myproc, nproc, idum1, idum2
2710
2711 INTERFACE
2712 SUBROUTINE feedback_nest_prep ( grid, config_flags &
2713 !
2714 #include "em_dummy_new_args.inc"
2715 !
2716 )
2717 USE module_domain
2718 USE module_configure
2719 USE module_dm
2720 USE module_state_description
2721 !
2722 TYPE (grid_config_rec_type) :: config_flags
2723 TYPE(domain), TARGET :: grid
2724 #include <em_dummy_new_decl.inc>
2725 END SUBROUTINE feedback_nest_prep
2726 END INTERFACE
2727 !
2728
2729 CALL wrf_get_dm_communicator ( local_comm )
2730 CALL wrf_get_myproc( myproc )
2731 CALL wrf_get_nproc( nproc )
2732
2733 !
2734 ! intermediate grid
2735 CALL get_ijk_from_grid ( grid , &
2736 cids, cide, cjds, cjde, ckds, ckde, &
2737 cims, cime, cjms, cjme, ckms, ckme, &
2738 cips, cipe, cjps, cjpe, ckps, ckpe )
2739 ! nest grid
2740 CALL get_ijk_from_grid ( ngrid , &
2741 nids, nide, njds, njde, nkds, nkde, &
2742 nims, nime, njms, njme, nkms, nkme, &
2743 nips, nipe, njps, njpe, nkps, nkpe )
2744
2745 nlev = ckde - ckds + 1
2746
2747 ips_save = ngrid%i_parent_start ! used in feedback_domain_em_part2 below
2748 jps_save = ngrid%j_parent_start
2749 ipe_save = ngrid%i_parent_start + (nide-nids+1) / ngrid%parent_grid_ratio - 1
2750 jpe_save = ngrid%j_parent_start + (njde-njds+1) / ngrid%parent_grid_ratio - 1
2751
2752 ! feedback_nest_prep invokes a halo exchange on the ngrid. It is done this way
2753 ! in a separate routine because the HALOs need the data to be dereference from the
2754 ! grid data structure and, in this routine, the dereferenced fields are related to
2755 ! the intermediate domain, not the nest itself. Save the current grid pointer to intermediate
2756 ! domain, switch grid to point to ngrid, invoke feedback_nest_prep, then restore grid
2757 ! to point to intermediate domain.
2758
2759 CALL model_to_grid_config_rec ( ngrid%id , model_config_rec , nconfig_flags )
2760 CALL set_scalar_indices_from_config ( ngrid%id , idum1 , idum2 )
2761 xgrid => grid
2762 grid => ngrid
2763
2764 CALL feedback_nest_prep ( grid, nconfig_flags &
2765 !
2766 #include "em_actual_new_args.inc"
2767 !
2768 )
2769
2770 ! put things back so grid is intermediate grid
2771
2772 grid => xgrid
2773 CALL set_scalar_indices_from_config ( grid%id , idum1 , idum2 )
2774
2775 ! "interp" (basically copy) ngrid onto intermediate grid
2776
2777 #include "em_nest_feedbackup_interp.inc"
2778
2779 RETURN
2780 END SUBROUTINE feedback_domain_em_part1
2781
2782 !------------------------------------------------------------------
2783
2784 SUBROUTINE feedback_domain_em_part2 ( grid, intermediate_grid, ngrid , config_flags &
2785 !
2786 #include "em_dummy_new_args.inc"
2787 !
2788 )
2789 USE module_domain
2790 USE module_configure
2791 USE module_dm
2792 USE module_utility
2793 IMPLICIT NONE
2794
2795 !
2796 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
2797 TYPE(domain), POINTER :: intermediate_grid
2798 TYPE(domain), POINTER :: ngrid
2799
2800 #include <em_dummy_new_decl.inc>
2801 INTEGER nlev, msize
2802 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
2803 TYPE (grid_config_rec_type) :: config_flags
2804 REAL xv(500)
2805 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
2806 cims, cime, cjms, cjme, ckms, ckme, &
2807 cips, cipe, cjps, cjpe, ckps, ckpe
2808 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
2809 nims, nime, njms, njme, nkms, nkme, &
2810 nips, nipe, njps, njpe, nkps, nkpe
2811 INTEGER :: ids, ide, jds, jde, kds, kde, &
2812 ims, ime, jms, jme, kms, kme, &
2813 ips, ipe, jps, jpe, kps, kpe
2814 INTEGER icoord, jcoord, idim_cd, jdim_cd
2815 INTEGER local_comm, myproc, nproc
2816 INTEGER iparstrt, jparstrt, sw
2817 REAL nest_influence
2818
2819 character*256 :: timestr
2820 integer ierr
2821
2822 LOGICAL, EXTERNAL :: em_cd_feedback_mask
2823
2824 ! On entry to this routine,
2825 ! "grid" refers to the parent domain
2826 ! "intermediate_grid" refers to local copy of parent domain that overlies this patch of nest
2827 ! "ngrid" refers to the nest, which is only needed for smoothing on the parent because
2828 ! the nest feedback data has already been transferred during em_nest_feedbackup_interp
2829 ! in part1, above.
2830 ! The way these settings c and n dimensions are set, below, looks backwards but from the point
2831 ! of view of the RSL routine rsl_lite_to_parent_info(), call to which is included by
2832 ! em_nest_feedbackup_pack, the "n" domain represents the parent domain and the "c" domain
2833 ! represents the intermediate domain. The backwards lookingness should be fixed in the gen_comms.c
2834 ! registry routine that accompanies RSL_LITE but, just as it's sometimes easier to put up a road
2835 ! sign that says "DIP" than fix the dip, at this point it was easier just to write this comment. JM
2836 !
2837 nest_influence = 1.
2838
2839 CALL domain_clock_get( grid, current_timestr=timestr )
2840
2841 CALL get_ijk_from_grid ( intermediate_grid , &
2842 cids, cide, cjds, cjde, ckds, ckde, &
2843 cims, cime, cjms, cjme, ckms, ckme, &
2844 cips, cipe, cjps, cjpe, ckps, ckpe )
2845 CALL get_ijk_from_grid ( grid , &
2846 nids, nide, njds, njde, nkds, nkde, &
2847 nims, nime, njms, njme, nkms, nkme, &
2848 nips, nipe, njps, njpe, nkps, nkpe )
2849
2850 CALL nl_get_i_parent_start ( intermediate_grid%id, iparstrt )
2851 CALL nl_get_j_parent_start ( intermediate_grid%id, jparstrt )
2852 CALL nl_get_shw ( intermediate_grid%id, sw )
2853 icoord = iparstrt - sw
2854 jcoord = jparstrt - sw
2855 idim_cd = cide - cids + 1
2856 jdim_cd = cjde - cjds + 1
2857
2858 nlev = ckde - ckds + 1
2859
2860 #include "em_nest_feedbackup_pack.inc"
2861
2862 CALL wrf_get_dm_communicator ( local_comm )
2863 CALL wrf_get_myproc( myproc )
2864 CALL wrf_get_nproc( nproc )
2865
2866 CALL rsl_lite_merge_msgs( myproc, nproc, local_comm )
2867
2868 #define NEST_INFLUENCE(A,B) A = B
2869 #include "em_nest_feedbackup_unpack.inc"
2870
2871 ! smooth coarse grid
2872 CALL get_ijk_from_grid ( ngrid, &
2873 nids, nide, njds, njde, nkds, nkde, &
2874 nims, nime, njms, njme, nkms, nkme, &
2875 nips, nipe, njps, njpe, nkps, nkpe )
2876 CALL get_ijk_from_grid ( grid , &
2877 ids, ide, jds, jde, kds, kde, &
2878 ims, ime, jms, jme, kms, kme, &
2879 ips, ipe, jps, jpe, kps, kpe )
2880
2881 #include "HALO_EM_INTERP_UP.inc"
2882
2883 CALL get_ijk_from_grid ( grid , &
2884 cids, cide, cjds, cjde, ckds, ckde, &
2885 cims, cime, cjms, cjme, ckms, ckme, &
2886 cips, cipe, cjps, cjpe, ckps, ckpe )
2887
2888 #include "em_nest_feedbackup_smooth.inc"
2889
2890 RETURN
2891 END SUBROUTINE feedback_domain_em_part2
2892 #endif
2893
2894 #if ( NMM_CORE == 1 && NMM_NEST == 1 )
2895 !==============================================================================
2896 ! NMM nesting infrastructure extended from EM core. This is gopal's doing.
2897 !==============================================================================
2898
2899 SUBROUTINE interp_domain_nmm_part1 ( grid, intermediate_grid, ngrid, config_flags &
2900 !
2901 #include "nmm_dummy_args.inc"
2902 !
2903 )
2904 USE module_domain
2905 USE module_configure
2906 USE module_dm
2907 USE module_timing
2908 IMPLICIT NONE
2909 !
2910 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
2911 TYPE(domain), POINTER :: intermediate_grid
2912 TYPE(domain), POINTER :: ngrid
2913 #include <nmm_dummy_decl.inc>
2914 INTEGER nlev, msize
2915 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
2916 INTEGER iparstrt,jparstrt,sw
2917 TYPE (grid_config_rec_type) :: config_flags
2918 REAL xv(500)
2919 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
2920 cims, cime, cjms, cjme, ckms, ckme, &
2921 cips, cipe, cjps, cjpe, ckps, ckpe
2922 INTEGER :: iids, iide, ijds, ijde, ikds, ikde, &
2923 iims, iime, ijms, ijme, ikms, ikme, &
2924 iips, iipe, ijps, ijpe, ikps, ikpe
2925 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
2926 nims, nime, njms, njme, nkms, nkme, &
2927 nips, nipe, njps, njpe, nkps, nkpe
2928
2929 INTEGER icoord, jcoord, idim_cd, jdim_cd, pgr
2930 INTEGER local_comm, myproc, nproc
2931
2932 CALL wrf_get_dm_communicator ( local_comm )
2933 CALL wrf_get_myproc( myproc )
2934 CALL wrf_get_nproc( nproc )
2935
2936 #define COPY_IN
2937 #include <nmm_scalar_derefs.inc>
2938
2939 CALL get_ijk_from_grid ( grid , &
2940 cids, cide, cjds, cjde, ckds, ckde, &
2941 cims, cime, cjms, cjme, ckms, ckme, &
2942 cips, cipe, cjps, cjpe, ckps, ckpe )
2943 CALL get_ijk_from_grid ( intermediate_grid , &
2944 iids, iide, ijds, ijde, ikds, ikde, &
2945 iims, iime, ijms, ijme, ikms, ikme, &
2946 iips, iipe, ijps, ijpe, ikps, ikpe )
2947 CALL get_ijk_from_grid ( ngrid , &
2948 nids, nide, njds, njde, nkds, nkde, &
2949 nims, nime, njms, njme, nkms, nkme, &
2950 nips, nipe, njps, njpe, nkps, nkpe )
2951
2952 CALL nl_get_parent_grid_ratio ( ngrid%id, pgr )
2953 CALL nl_get_i_parent_start ( intermediate_grid%id, iparstrt )
2954 CALL nl_get_j_parent_start ( intermediate_grid%id, jparstrt )
2955 CALL nl_get_shw ( intermediate_grid%id, sw )
2956 icoord = iparstrt - sw
2957 jcoord = jparstrt - sw
2958 idim_cd = iide - iids + 1
2959 jdim_cd = ijde - ijds + 1
2960
2961 nlev = ckde - ckds + 1
2962
2963 #include "nmm_nest_interpdown_pack.inc"
2964
2965 CALL rsl_lite_bcast_msgs( myproc, nproc, local_comm )
2966
2967 #define COPY_OUT
2968 #include <nmm_scalar_derefs.inc>
2969 RETURN
2970 END SUBROUTINE interp_domain_nmm_part1
2971
2972 !------------------------------------------------------------------
2973
2974 SUBROUTINE interp_domain_nmm_part2 ( grid, ngrid, config_flags &
2975 !
2976 #include "nmm_dummy_args.inc"
2977 !
2978 )
2979 USE module_domain
2980 USE module_configure
2981 USE module_dm
2982 IMPLICIT NONE
2983 !
2984 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
2985 TYPE(domain), POINTER :: ngrid
2986 #include <nmm_dummy_decl.inc>
2987 INTEGER nlev, msize
2988 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
2989 TYPE (grid_config_rec_type) :: config_flags
2990 REAL xv(500)
2991 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
2992 cims, cime, cjms, cjme, ckms, ckme, &
2993 cips, cipe, cjps, cjpe, ckps, ckpe
2994 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
2995 nims, nime, njms, njme, nkms, nkme, &
2996 nips, nipe, njps, njpe, nkps, nkpe
2997 INTEGER :: ids, ide, jds, jde, kds, kde, &
2998 ims, ime, jms, jme, kms, kme, &
2999 ips, ipe, jps, jpe, kps, kpe
3000 INTEGER myproc
3001 INTEGER ierr
3002
3003 #ifdef DEREF_KLUDGE
3004 ! see http://www.mmm.ucar.edu/wrf/WG2/topics/deref_kludge.htm
3005 INTEGER :: sm31 , em31 , sm32 , em32 , sm33 , em33
3006 INTEGER :: sm31x, em31x, sm32x, em32x, sm33x, em33x
3007 INTEGER :: sm31y, em31y, sm32y, em32y, sm33y, em33y
3008 #endif
3009 #include "deref_kludge.h"
3010
3011 #define COPY_IN
3012 #include <nmm_scalar_derefs.inc>
3013 CALL get_ijk_from_grid ( grid , &
3014 cids, cide, cjds, cjde, ckds, ckde, &
3015 cims, cime, cjms, cjme, ckms, ckme, &
3016 cips, cipe, cjps, cjpe, ckps, ckpe )
3017 CALL get_ijk_from_grid ( ngrid , &
3018 nids, nide, njds, njde, nkds, nkde, &
3019 nims, nime, njms, njme, nkms, nkme, &
3020 nips, nipe, njps, njpe, nkps, nkpe )
3021
3022 nlev = ckde - ckds + 1
3023
3024 #include "nmm_nest_interpdown_unpack.inc"
3025
3026 CALL get_ijk_from_grid ( grid , &
3027 ids, ide, jds, jde, kds, kde, &
3028 ims, ime, jms, jme, kms, kme, &
3029 ips, ipe, jps, jpe, kps, kpe )
3030
3031 #include "HALO_NMM_INTERP_DOWN1.inc"
3032
3033 #include "nmm_nest_interpdown_interp.inc"
3034
3035 #define COPY_OUT
3036 #include <nmm_scalar_derefs.inc>
3037
3038 RETURN
3039 END SUBROUTINE interp_domain_nmm_part2
3040
3041 !------------------------------------------------------------------
3042
3043 SUBROUTINE force_domain_nmm_part1 ( grid, intermediate_grid, config_flags &
3044 !
3045 #include "nmm_dummy_args.inc"
3046 !
3047 )
3048 USE module_domain
3049 USE module_configure
3050 USE module_dm
3051 USE module_timing
3052 !
3053 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3054 TYPE(domain), POINTER :: intermediate_grid
3055 #include <nmm_dummy_decl.inc>
3056 INTEGER nlev, msize
3057 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3058 TYPE (grid_config_rec_type) :: config_flags
3059 REAL xv(500)
3060 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3061 cims, cime, cjms, cjme, ckms, ckme, &
3062 cips, cipe, cjps, cjpe, ckps, ckpe
3063 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3064 nims, nime, njms, njme, nkms, nkme, &
3065 nips, nipe, njps, njpe, nkps, nkpe
3066 #define COPY_IN
3067 #include <nmm_scalar_derefs.inc>
3068 !
3069 CALL get_ijk_from_grid ( grid , &
3070 cids, cide, cjds, cjde, ckds, ckde, &
3071 cims, cime, cjms, cjme, ckms, ckme, &
3072 cips, cipe, cjps, cjpe, ckps, ckpe )
3073
3074 CALL get_ijk_from_grid ( intermediate_grid , &
3075 nids, nide, njds, njde, nkds, nkde, &
3076 nims, nime, njms, njme, nkms, nkme, &
3077 nips, nipe, njps, njpe, nkps, nkpe )
3078
3079 nlev = ckde - ckds + 1
3080
3081 #include "nmm_nest_forcedown_pack.inc"
3082
3083 ! WRITE(0,*)'I have completed PACKING of BCs data successfully'
3084
3085 #define COPY_OUT
3086 #include <nmm_scalar_derefs.inc>
3087 RETURN
3088 END SUBROUTINE force_domain_nmm_part1
3089
3090 !==============================================================================================
3091
3092 SUBROUTINE force_domain_nmm_part2 ( grid, ngrid, config_flags &
3093 !
3094 #include "nmm_dummy_args.inc"
3095 !
3096 )
3097 USE module_domain
3098 USE module_configure
3099 USE module_dm
3100 IMPLICIT NONE
3101 !
3102 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3103 TYPE(domain), POINTER :: ngrid
3104 #include <nmm_dummy_decl.inc>
3105 INTEGER nlev, msize
3106 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3107 TYPE (grid_config_rec_type) :: config_flags
3108 REAL xv(500)
3109 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3110 cims, cime, cjms, cjme, ckms, ckme, &
3111 cips, cipe, cjps, cjpe, ckps, ckpe
3112 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3113 nims, nime, njms, njme, nkms, nkme, &
3114 nips, nipe, njps, njpe, nkps, nkpe
3115 INTEGER :: ids, ide, jds, jde, kds, kde, &
3116 ims, ime, jms, jme, kms, kme, &
3117 ips, ipe, jps, jpe, kps, kpe
3118 integer myproc
3119
3120 #ifdef DEREF_KLUDGE
3121 ! see http://www.mmm.ucar.edu/wrf/WG2/topics/deref_kludge.htm
3122 INTEGER :: sm31 , em31 , sm32 , em32 , sm33 , em33
3123 INTEGER :: sm31x, em31x, sm32x, em32x, sm33x, em33x
3124 INTEGER :: sm31y, em31y, sm32y, em32y, sm33y, em33y
3125 #endif
3126 #include "deref_kludge.h"
3127
3128 #define COPY_IN
3129 #include <nmm_scalar_derefs.inc>
3130
3131 CALL get_ijk_from_grid ( grid , &
3132 cids, cide, cjds, cjde, ckds, ckde, &
3133 cims, cime, cjms, cjme, ckms, ckme, &
3134 cips, cipe, cjps, cjpe, ckps, ckpe )
3135 CALL get_ijk_from_grid ( ngrid , &
3136 nids, nide, njds, njde, nkds, nkde, &
3137 nims, nime, njms, njme, nkms, nkme, &
3138 nips, nipe, njps, njpe, nkps, nkpe )
3139
3140 nlev = ckde - ckds + 1
3141
3142 #include "nmm_nest_interpdown_unpack.inc"
3143
3144 CALL get_ijk_from_grid ( grid , &
3145 ids, ide, jds, jde, kds, kde, &
3146 ims, ime, jms, jme, kms, kme, &
3147 ips, ipe, jps, jpe, kps, kpe )
3148
3149 #include "HALO_NMM_FORCE_DOWN1.inc"
3150
3151 ! code here to interpolate the data into the nested domain
3152 #include "nmm_nest_forcedown_interp.inc"
3153
3154 #define COPY_OUT
3155 #include <nmm_scalar_derefs.inc>
3156
3157 RETURN
3158 END SUBROUTINE force_domain_nmm_part2
3159
3160 !================================================================================
3161 !
3162 ! This routine exists only to call a halo on a domain (the nest)
3163 ! gets called from feedback_domain_em_part1, below. This is needed
3164 ! because the halo code expects the fields being exchanged to have
3165 ! been dereferenced from the grid data structure, but in feedback_domain_em_part1
3166 ! the grid data structure points to the coarse domain, not the nest.
3167 ! And we want the halo exchange on the nest, so that the code in
3168 ! em_nest_feedbackup_interp.inc will work correctly on multi-p. JM 20040308
3169 !
3170
3171 SUBROUTINE feedback_nest_prep_nmm ( grid, config_flags &
3172 !
3173 #include "nmm_dummy_args.inc"
3174 !
3175 )
3176 USE module_domain
3177 USE module_configure
3178 USE module_dm
3179 USE module_state_description
3180 IMPLICIT NONE
3181 !
3182 TYPE(domain), TARGET :: grid ! name of the grid being dereferenced (must be "grid")
3183 TYPE (grid_config_rec_type) :: config_flags ! configureation flags, has vertical dim of
3184 ! soil temp, moisture, etc., has vertical dim
3185 ! of soil categories
3186 #include <nmm_dummy_decl.inc>
3187
3188 INTEGER :: ids, ide, jds, jde, kds, kde, &
3189 ims, ime, jms, jme, kms, kme, &
3190 ips, ipe, jps, jpe, kps, kpe
3191
3192 INTEGER :: idum1, idum2
3193
3194
3195 #ifdef DEREF_KLUDGE
3196 ! see http://www.mmm.ucar.edu/wrf/WG2/topics/deref_kludge.htm
3197 INTEGER :: sm31 , em31 , sm32 , em32 , sm33 , em33
3198 INTEGER :: sm31x, em31x, sm32x, em32x, sm33x, em33x
3199 INTEGER :: sm31y, em31y, sm32y, em32y, sm33y, em33y
3200 #endif
3201 #include "deref_kludge.h"
3202
3203 #define COPY_IN
3204 #include <nmm_scalar_derefs.inc>
3205
3206 CALL get_ijk_from_grid ( grid , &
3207 ids, ide, jds, jde, kds, kde, &
3208 ims, ime, jms, jme, kms, kme, &
3209 ips, ipe, jps, jpe, kps, kpe )
3210
3211 #ifdef DM_PARALLEL
3212 #include "HALO_NMM_WEIGHTS.inc"
3213 #endif
3214
3215 #define COPY_OUT
3216 #include <nmm_scalar_derefs.inc>
3217
3218 END SUBROUTINE feedback_nest_prep_nmm
3219
3220 !------------------------------------------------------------------
3221
3222 SUBROUTINE feedback_domain_nmm_part1 ( grid, ngrid, config_flags &
3223 !
3224 #include "nmm_dummy_args.inc"
3225 !
3226 )
3227 USE module_domain
3228 USE module_configure
3229 USE module_dm
3230 IMPLICIT NONE
3231 !
3232 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3233 TYPE(domain), POINTER :: ngrid
3234 #include <nmm_dummy_decl.inc>
3235 INTEGER nlev, msize
3236 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3237 TYPE(domain), POINTER :: xgrid
3238 TYPE (grid_config_rec_type) :: config_flags, nconfig_flags
3239 REAL xv(500)
3240 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3241 cims, cime, cjms, cjme, ckms, ckme, &
3242 cips, cipe, cjps, cjpe, ckps, ckpe
3243 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3244 nims, nime, njms, njme, nkms, nkme, &
3245 nips, nipe, njps, njpe, nkps, nkpe
3246 INTEGER local_comm, myproc, nproc, idum1, idum2
3247
3248 #ifdef DEREF_KLUDGE
3249 ! see http://www.mmm.ucar.edu/wrf/WG2/topics/deref_kludge.htm
3250 INTEGER :: sm31 , em31 , sm32 , em32 , sm33 , em33
3251 INTEGER :: sm31x, em31x, sm32x, em32x, sm33x, em33x
3252 INTEGER :: sm31y, em31y, sm32y, em32y, sm33y, em33y
3253 #endif
3254
3255 INTERFACE
3256 SUBROUTINE feedback_nest_prep_nmm ( grid, config_flags &
3257 !
3258 #include "nmm_dummy_args.inc"
3259 !
3260 )
3261 USE module_domain
3262 USE module_configure
3263 USE module_dm
3264 USE module_state_description
3265 !
3266 TYPE (grid_config_rec_type) :: config_flags
3267 TYPE(domain), TARGET :: grid
3268 #include <nmm_dummy_decl.inc>
3269 END SUBROUTINE feedback_nest_prep_nmm
3270 END INTERFACE
3271 !
3272 #define COPY_IN
3273 #include <nmm_scalar_derefs.inc>
3274
3275 CALL wrf_get_dm_communicator ( local_comm )
3276 CALL wrf_get_myproc( myproc )
3277 CALL wrf_get_nproc( nproc )
3278
3279
3280 !
3281 ! intermediate grid
3282 CALL get_ijk_from_grid ( grid , &
3283 cids, cide, cjds, cjde, ckds, ckde, &
3284 cims, cime, cjms, cjme, ckms, ckme, &
3285 cips, cipe, cjps, cjpe, ckps, ckpe )
3286 ! nest grid
3287 CALL get_ijk_from_grid ( ngrid , &
3288 nids, nide, njds, njde, nkds, nkde, &
3289 nims, nime, njms, njme, nkms, nkme, &
3290 nips, nipe, njps, njpe, nkps, nkpe )
3291
3292 nlev = ckde - ckds + 1
3293
3294 ips_save = ngrid%i_parent_start ! +1 not used in ipe_save & jpe_save
3295 jps_save = ngrid%j_parent_start ! because of one extra namelist point
3296 ipe_save = ngrid%i_parent_start + (nide-nids) / ngrid%parent_grid_ratio
3297 jpe_save = ngrid%j_parent_start + (njde-njds) / ngrid%parent_grid_ratio
3298
3299 ! feedback_nest_prep invokes a halo exchange on the ngrid. It is done this way
3300 ! in a separate routine because the HALOs need the data to be dereference from the
3301 ! grid data structure and, in this routine, the dereferenced fields are related to
3302 ! the intermediate domain, not the nest itself. Save the current grid pointer to intermediate
3303 ! domain, switch grid to point to ngrid, invoke feedback_nest_prep, then restore grid
3304 ! to point to intermediate domain.
3305
3306 CALL model_to_grid_config_rec ( ngrid%id , model_config_rec , nconfig_flags )
3307 CALL set_scalar_indices_from_config ( ngrid%id , idum1 , idum2 )
3308 xgrid => grid
3309 grid => ngrid
3310 #include "deref_kludge.h"
3311 CALL feedback_nest_prep_nmm ( grid, config_flags &
3312 !
3313 #include "nmm_actual_args.inc"
3314 !
3315 )
3316
3317 ! put things back so grid is intermediate grid
3318
3319 grid => xgrid
3320 CALL set_scalar_indices_from_config ( grid%id , idum1 , idum2 )
3321
3322 ! "interp" (basically copy) ngrid onto intermediate grid
3323
3324 #include "nmm_nest_feedbackup_interp.inc"
3325
3326 #define COPY_OUT
3327 #include <nmm_scalar_derefs.inc>
3328 RETURN
3329 END SUBROUTINE feedback_domain_nmm_part1
3330
3331 !------------------------------------------------------------------
3332
3333 SUBROUTINE feedback_domain_nmm_part2 ( grid, intermediate_grid, ngrid , config_flags &
3334 !
3335 #include "nmm_dummy_args.inc"
3336 !
3337 )
3338 USE module_domain
3339 USE module_configure
3340 USE module_dm
3341 USE module_utility
3342 IMPLICIT NONE
3343
3344 !
3345 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3346 TYPE(domain), POINTER :: intermediate_grid
3347 TYPE(domain), POINTER :: ngrid
3348
3349 #include <nmm_dummy_decl.inc>
3350 INTEGER nlev, msize
3351 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3352 TYPE (grid_config_rec_type) :: config_flags
3353 REAL xv(500)
3354 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3355 cims, cime, cjms, cjme, ckms, ckme, &
3356 cips, cipe, cjps, cjpe, ckps, ckpe
3357 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3358 nims, nime, njms, njme, nkms, nkme, &
3359 nips, nipe, njps, njpe, nkps, nkpe
3360 INTEGER :: ids, ide, jds, jde, kds, kde, &
3361 ims, ime, jms, jme, kms, kme, &
3362 ips, ipe, jps, jpe, kps, kpe
3363 INTEGER icoord, jcoord, idim_cd, jdim_cd
3364 INTEGER local_comm, myproc, nproc
3365 INTEGER iparstrt, jparstrt, sw
3366
3367 character*256 :: timestr
3368 integer ierr
3369
3370 REAL nest_influence
3371 LOGICAL, EXTERNAL :: nmm_cd_feedback_mask
3372 #ifdef DEREF_KLUDGE
3373 ! see http://www.mmm.ucar.edu/wrf/WG2/topics/deref_kludge.htm
3374 INTEGER :: sm31 , em31 , sm32 , em32 , sm33 , em33
3375 INTEGER :: sm31x, em31x, sm32x, em32x, sm33x, em33x
3376 INTEGER :: sm31y, em31y, sm32y, em32y, sm33y, em33y
3377 #endif
3378 #include "deref_kludge.h"
3379
3380 #define COPY_IN
3381 #include <nmm_scalar_derefs.inc>
3382
3383 ! On entry to this routine,
3384 ! "grid" refers to the parent domain
3385 ! "intermediate_grid" refers to local copy of parent domain that overlies this patch of nest
3386 ! "ngrid" refers to the nest, which is only needed for smoothing on the parent because
3387 ! the nest feedback data has already been transferred during em_nest_feedbackup_interp
3388 ! in part1, above.
3389 ! The way these settings c and n dimensions are set, below, looks backwards but from the point
3390 ! of view of the RSL routine rsl_lite_to_parent_info(), call to which is included by
3391 ! em_nest_feedbackup_pack, the "n" domain represents the parent domain and the "c" domain
3392 ! represents the intermediate domain. The backwards lookingness should be fixed in the gen_comms.c
3393 ! registry routine that accompanies RSL_LITE but, just as it's sometimes easier to put up a road
3394 ! sign that says "DIP" than fix the dip, at this point it was easier just to write this comment. JM
3395 !
3396
3397 nest_influence = 0.5
3398 #define NEST_INFLUENCE(A,B) A = nest_influence*(B) + (1.0-nest_influence)*(A)
3399
3400
3401 CALL domain_clock_get( grid, current_timestr=timestr )
3402
3403 CALL get_ijk_from_grid ( intermediate_grid , &
3404 cids, cide, cjds, cjde, ckds, ckde, &
3405 cims, cime, cjms, cjme, ckms, ckme, &
3406 cips, cipe, cjps, cjpe, ckps, ckpe )
3407 CALL get_ijk_from_grid ( grid , &
3408 nids, nide, njds, njde, nkds, nkde, &
3409 nims, nime, njms, njme, nkms, nkme, &
3410 nips, nipe, njps, njpe, nkps, nkpe )
3411
3412 nide = nide - 1 !dusan
3413 njde = njde - 1 !dusan
3414
3415 CALL nl_get_i_parent_start ( intermediate_grid%id, iparstrt )
3416 CALL nl_get_j_parent_start ( intermediate_grid%id, jparstrt )
3417 CALL nl_get_shw ( intermediate_grid%id, sw )
3418 icoord = iparstrt - sw
3419 jcoord = jparstrt - sw
3420 idim_cd = cide - cids + 1
3421 jdim_cd = cjde - cjds + 1
3422
3423 nlev = ckde - ckds + 1
3424
3425 #include "nmm_nest_feedbackup_pack.inc"
3426
3427 CALL wrf_get_dm_communicator ( local_comm )
3428 CALL wrf_get_myproc( myproc )
3429 CALL wrf_get_nproc( nproc )
3430
3431 CALL rsl_lite_merge_msgs( myproc, nproc, local_comm )
3432
3433 #include "nmm_nest_feedbackup_unpack.inc"
3434
3435
3436 ! smooth coarse grid
3437
3438 CALL get_ijk_from_grid ( ngrid, &
3439 nids, nide, njds, njde, nkds, nkde, &
3440 nims, nime, njms, njme, nkms, nkme, &
3441 nips, nipe, njps, njpe, nkps, nkpe )
3442 CALL get_ijk_from_grid ( grid , &
3443 ids, ide, jds, jde, kds, kde, &
3444 ims, ime, jms, jme, kms, kme, &
3445 ips, ipe, jps, jpe, kps, kpe )
3446
3447 #include "HALO_NMM_INTERP_UP.inc"
3448
3449 CALL get_ijk_from_grid ( grid , &
3450 cids, cide, cjds, cjde, ckds, ckde, &
3451 cims, cime, cjms, cjme, ckms, ckme, &
3452 cips, cipe, cjps, cjpe, ckps, ckpe )
3453
3454 #include "nmm_nest_feedbackup_smooth.inc"
3455
3456 #define COPY_OUT
3457 #include <nmm_scalar_derefs.inc>
3458 RETURN
3459 END SUBROUTINE feedback_domain_nmm_part2
3460
3461 !=================================================================================
3462 ! End of gopal's doing
3463 !=================================================================================
3464 #endif
3465
3466 !------------------------------------------------------------------
3467
3468 SUBROUTINE wrf_gatherv_real (Field, field_ofst, &
3469 my_count , & ! sendcount
3470 globbuf, glob_ofst , & ! recvbuf
3471 counts , & ! recvcounts
3472 displs , & ! displs
3473 root , & ! root
3474 communicator , & ! communicator
3475 ierr )
3476 USE module_dm
3477 IMPLICIT NONE
3478 INCLUDE 'mpif.h'
3479 INTEGER field_ofst, glob_ofst
3480 INTEGER my_count, communicator, root, ierr
3481 INTEGER , DIMENSION(*) :: counts, displs
3482 REAL, DIMENSION(*) :: Field, globbuf
3483
3484 CALL mpi_gatherv( Field( field_ofst ), & ! sendbuf
3485 my_count , & ! sendcount
3486 getrealmpitype() , & ! sendtype
3487 globbuf( glob_ofst ) , & ! recvbuf
3488 counts , & ! recvcounts
3489 displs , & ! displs
3490 getrealmpitype() , & ! recvtype
3491 root , & ! root
3492 communicator , & ! communicator
3493 ierr )
3494
3495 END SUBROUTINE wrf_gatherv_real
3496
3497 SUBROUTINE wrf_gatherv_double (Field, field_ofst, &
3498 my_count , & ! sendcount
3499 globbuf, glob_ofst , & ! recvbuf
3500 counts , & ! recvcounts
3501 displs , & ! displs
3502 root , & ! root
3503 communicator , & ! communicator
3504 ierr )
3505 USE module_dm
3506 IMPLICIT NONE
3507 INCLUDE 'mpif.h'
3508 INTEGER field_ofst, glob_ofst
3509 INTEGER my_count, communicator, root, ierr
3510 INTEGER , DIMENSION(*) :: counts, displs
3511 ! this next declaration is REAL, not DOUBLE PRECISION because it will be autopromoted
3512 ! to double precision by the compiler when WRF is compiled for 8 byte reals. Only reason
3513 ! for having this separate routine is so we pass the correct MPI type to mpi_scatterv
3514 ! if we were not indexing the globbuf and Field arrays it would not even matter
3515 REAL, DIMENSION(*) :: Field, globbuf
3516
3517 CALL mpi_gatherv( Field( field_ofst ), & ! sendbuf
3518 my_count , & ! sendcount
3519 MPI_DOUBLE_PRECISION , & ! sendtype
3520 globbuf( glob_ofst ) , & ! recvbuf
3521 counts , & ! recvcounts
3522 displs , & ! displs
3523 MPI_DOUBLE_PRECISION , & ! recvtype
3524 root , & ! root
3525 communicator , & ! communicator
3526 ierr )
3527
3528 END SUBROUTINE wrf_gatherv_double
3529
3530 SUBROUTINE wrf_gatherv_integer (Field, field_ofst, &
3531 my_count , & ! sendcount
3532 globbuf, glob_ofst , & ! recvbuf
3533 counts , & ! recvcounts
3534 displs , & ! displs
3535 root , & ! root
3536 communicator , & ! communicator
3537 ierr )
3538 IMPLICIT NONE
3539 INCLUDE 'mpif.h'
3540 INTEGER field_ofst, glob_ofst
3541 INTEGER my_count, communicator, root, ierr
3542 INTEGER , DIMENSION(*) :: counts, displs
3543 INTEGER, DIMENSION(*) :: Field, globbuf
3544
3545 CALL mpi_gatherv( Field( field_ofst ), & ! sendbuf
3546 my_count , & ! sendcount
3547 MPI_INTEGER , & ! sendtype
3548 globbuf( glob_ofst ) , & ! recvbuf
3549 counts , & ! recvcounts
3550 displs , & ! displs
3551 MPI_INTEGER , & ! recvtype
3552 root , & ! root
3553 communicator , & ! communicator
3554 ierr )
3555
3556 END SUBROUTINE wrf_gatherv_integer
3557
3558 !new stuff 20070124
3559 SUBROUTINE wrf_scatterv_real ( &
3560 globbuf, glob_ofst , & ! recvbuf
3561 counts , & ! recvcounts
3562 Field, field_ofst, &
3563 my_count , & ! sendcount
3564 displs , & ! displs
3565 root , & ! root
3566 communicator , & ! communicator
3567 ierr )
3568 USE module_dm
3569 IMPLICIT NONE
3570 INCLUDE 'mpif.h'
3571 INTEGER field_ofst, glob_ofst
3572 INTEGER my_count, communicator, root, ierr
3573 INTEGER , DIMENSION(*) :: counts, displs
3574 REAL, DIMENSION(*) :: Field, globbuf
3575
3576 CALL mpi_scatterv( &
3577 globbuf( glob_ofst ) , & ! recvbuf
3578 counts , & ! recvcounts
3579 displs , & ! displs
3580 getrealmpitype() , & ! recvtype
3581 Field( field_ofst ), & ! sendbuf
3582 my_count , & ! sendcount
3583 getrealmpitype() , & ! sendtype
3584 root , & ! root
3585 communicator , & ! communicator
3586 ierr )
3587
3588 END SUBROUTINE wrf_scatterv_real
3589
3590 SUBROUTINE wrf_scatterv_double ( &
3591 globbuf, glob_ofst , & ! recvbuf
3592 counts , & ! recvcounts
3593 Field, field_ofst, &
3594 my_count , & ! sendcount
3595 displs , & ! displs
3596 root , & ! root
3597 communicator , & ! communicator
3598 ierr )
3599 USE module_dm
3600 IMPLICIT NONE
3601 INCLUDE 'mpif.h'
3602 INTEGER field_ofst, glob_ofst
3603 INTEGER my_count, communicator, root, ierr
3604 INTEGER , DIMENSION(*) :: counts, displs
3605 ! this next declaration is REAL, not DOUBLE PRECISION because it will be autopromoted
3606 ! to double precision by the compiler when WRF is compiled for 8 byte reals. Only reason
3607 ! for having this separate routine is so we pass the correct MPI type to mpi_scatterv
3608 ! if we were not indexing the globbuf and Field arrays it would not even matter
3609 REAL, DIMENSION(*) :: Field, globbuf
3610
3611 CALL mpi_scatterv( &
3612 globbuf( glob_ofst ) , & ! recvbuf
3613 counts , & ! recvcounts
3614 displs , & ! displs
3615 MPI_DOUBLE_PRECISION , & ! recvtype
3616 Field( field_ofst ), & ! sendbuf
3617 my_count , & ! sendcount
3618 MPI_DOUBLE_PRECISION , & ! sendtype
3619 root , & ! root
3620 communicator , & ! communicator
3621 ierr )
3622
3623 END SUBROUTINE wrf_scatterv_double
3624
3625 SUBROUTINE wrf_scatterv_integer ( &
3626 globbuf, glob_ofst , & ! recvbuf
3627 counts , & ! recvcounts
3628 Field, field_ofst, &
3629 my_count , & ! sendcount
3630 displs , & ! displs
3631 root , & ! root
3632 communicator , & ! communicator
3633 ierr )
3634 IMPLICIT NONE
3635 INCLUDE 'mpif.h'
3636 INTEGER field_ofst, glob_ofst
3637 INTEGER my_count, communicator, root, ierr
3638 INTEGER , DIMENSION(*) :: counts, displs
3639 INTEGER, DIMENSION(*) :: Field, globbuf
3640
3641 CALL mpi_scatterv( &
3642 globbuf( glob_ofst ) , & ! recvbuf
3643 counts , & ! recvcounts
3644 displs , & ! displs
3645 MPI_INTEGER , & ! recvtype
3646 Field( field_ofst ), & ! sendbuf
3647 my_count , & ! sendcount
3648 MPI_INTEGER , & ! sendtype
3649 root , & ! root
3650 communicator , & ! communicator
3651 ierr )
3652
3653 END SUBROUTINE wrf_scatterv_integer
3654 ! end new stuff 20070124
3655
3656 SUBROUTINE wrf_dm_define_comms ( grid )
3657 USE module_domain
3658 IMPLICIT NONE
3659 TYPE(domain) , INTENT (INOUT) :: grid
3660 RETURN
3661 END SUBROUTINE wrf_dm_define_comms
3662
3663 SUBROUTINE set_dm_debug
3664 USE module_dm
3665 IMPLICIT NONE
3666 dm_debug_flag = .TRUE.
3667 END SUBROUTINE set_dm_debug
3668 SUBROUTINE reset_dm_debug
3669 USE module_dm
3670 IMPLICIT NONE
3671 dm_debug_flag = .FALSE.
3672 END SUBROUTINE reset_dm_debug
3673 SUBROUTINE get_dm_debug ( arg )
3674 USE module_dm
3675 IMPLICIT NONE
3676 LOGICAL arg
3677 arg = dm_debug_flag
3678 END SUBROUTINE get_dm_debug