module_dm.F
References to this file elsewhere.
1 !WRF:PACKAGE:RSL
2 !
3 MODULE module_dm
4
5 USE module_machine
6 USE module_configure
7 USE module_state_description
8 USE module_wrf_error
9 USE module_driver_constants
10 USE module_comm_dm
11 IMPLICIT NONE
12
13 #if ( NMM_CORE == 1 ) || defined( WRF_CHEM )
14 INTEGER, PARAMETER :: max_halo_width = 6
15 #else
16 INTEGER, PARAMETER :: max_halo_width = 5
17 #endif
18
19 INTEGER :: ips_save, ipe_save, jps_save, jpe_save, itrace
20
21 INTEGER ntasks, ntasks_y, ntasks_x, mytask, mytask_x, mytask_y
22 INTEGER local_communicator, local_communicator_periodic, local_iocommunicator
23 INTEGER local_communicator_x, local_communicator_y ! subcommunicators for rows and cols of mesh
24 LOGICAL :: dm_debug_flag = .FALSE.
25
26 INTERFACE wrf_dm_maxval
27 #ifdef PROMOTE_FLOAT
28 MODULE PROCEDURE wrf_dm_maxval_real , wrf_dm_maxval_integer
29 #else
30 MODULE PROCEDURE wrf_dm_maxval_real , wrf_dm_maxval_integer, wrf_dm_maxval_doubleprecision
31 #endif
32 END INTERFACE
33
34 INTERFACE wrf_dm_minval ! gopal's doing
35 #ifdef PROMOTE_FLOAT
36 MODULE PROCEDURE wrf_dm_minval_real , wrf_dm_minval_integer
37 #else
38 MODULE PROCEDURE wrf_dm_minval_real , wrf_dm_minval_integer, wrf_dm_minval_doubleprecision
39 #endif
40 END INTERFACE
41
42 CONTAINS
43
44
45 SUBROUTINE MPASPECT( P, MINM, MINN, PROCMIN_M, PROCMIN_N )
46 IMPLICIT NONE
47 INTEGER P, M, N, MINI, MINM, MINN, PROCMIN_M, PROCMIN_N
48 MINI = 2*P
49 MINM = 1
50 MINN = P
51 DO M = 1, P
52 IF ( MOD( P, M ) .EQ. 0 ) THEN
53 N = P / M
54 IF ( ABS(M-N) .LT. MINI &
55 .AND. M .GE. PROCMIN_M &
56 .AND. N .GE. PROCMIN_N &
57 ) THEN
58 MINI = ABS(M-N)
59 MINM = M
60 MINN = N
61 ENDIF
62 ENDIF
63 ENDDO
64 IF ( MINM .LT. PROCMIN_M .OR. MINN .LT. PROCMIN_N ) THEN
65 WRITE( wrf_err_message , * )'MPASPECT: UNABLE TO GENERATE PROCESSOR MESH. STOPPING.'
66 CALL wrf_message ( TRIM ( wrf_err_message ) )
67 WRITE(0,*)' PROCMIN_M ', PROCMIN_M
68 WRITE( wrf_err_message , * )' PROCMIN_M ', PROCMIN_M
69 CALL wrf_message ( TRIM ( wrf_err_message ) )
70 WRITE( wrf_err_message , * )' PROCMIN_N ', PROCMIN_N
71 CALL wrf_message ( TRIM ( wrf_err_message ) )
72 WRITE( wrf_err_message , * )' P ', P
73 CALL wrf_message ( TRIM ( wrf_err_message ) )
74 WRITE( wrf_err_message , * )' MINM ', MINM
75 CALL wrf_message ( TRIM ( wrf_err_message ) )
76 WRITE( wrf_err_message , * )' MINN ', MINN
77 CALL wrf_message ( TRIM ( wrf_err_message ) )
78 CALL wrf_error_fatal ( 'module_dm: mpaspect' )
79 ENDIF
80 RETURN
81 END SUBROUTINE MPASPECT
82
83 SUBROUTINE wrf_dm_initialize
84 IMPLICIT NONE
85 INCLUDE 'mpif.h'
86 INTEGER :: local_comm, local_comm2, new_local_comm, group, newgroup, p, p1, ierr
87 INTEGER, ALLOCATABLE, DIMENSION(:) :: ranks
88 INTEGER comdup
89 INTEGER, DIMENSION(2) :: dims, coords
90 LOGICAL, DIMENSION(2) :: isperiodic
91 LOGICAL :: reorder_mesh
92
93 CALL wrf_get_dm_communicator ( local_comm )
94 CALL mpi_comm_size( local_comm, ntasks, ierr )
95 CALL nl_get_nproc_x ( 1, ntasks_x )
96 CALL nl_get_nproc_y ( 1, ntasks_y )
97 CALL nl_get_reorder_mesh( 1, reorder_mesh )
98
99 ! check if user has specified in the namelist
100 IF ( ntasks_x .GT. 0 .OR. ntasks_y .GT. 0 ) THEN
101 ! if only ntasks_x is specified then make it 1-d decomp in i
102 IF ( ntasks_x .GT. 0 .AND. ntasks_y .EQ. -1 ) THEN
103 ntasks_y = ntasks / ntasks_x
104 ! if only ntasks_y is specified then make it 1-d decomp in j
105 ELSE IF ( ntasks_x .EQ. -1 .AND. ntasks_y .GT. 0 ) THEN
106 ntasks_x = ntasks / ntasks_y
107 ENDIF
108 ! make sure user knows what they're doing
109 IF ( ntasks_x * ntasks_y .NE. ntasks ) THEN
110 WRITE( wrf_err_message , * )'WRF_DM_INITIALIZE (RSL_LITE): nproc_x * nproc_y in namelist ne ',ntasks
111 CALL wrf_error_fatal ( wrf_err_message )
112 ENDIF
113 ELSE
114 ! When neither is specified, work out mesh with MPASPECT
115 ! Pass nproc_ln and nproc_nt so that number of procs in
116 ! i-dim (nproc_ln) is equal or lesser.
117 CALL mpaspect ( ntasks, ntasks_x, ntasks_y, 1, 1 )
118 ENDIF
119 WRITE( wrf_err_message , * )'Ntasks in X ',ntasks_x,', ntasks in Y ',ntasks_y
120 CALL wrf_message( wrf_err_message )
121
122 CALL mpi_comm_rank( local_comm, mytask, ierr )
123 ! extra code to reorder the communicator 20051212jm
124 IF ( reorder_mesh ) THEN
125 write(0,*)'reordering mesh'
126 ALLOCATE (ranks(ntasks))
127 CALL mpi_comm_dup ( local_comm , local_comm2, ierr )
128 CALL mpi_comm_group ( local_comm2, group, ierr )
129 DO p1=1,ntasks
130 p = p1 - 1
131 ranks(p1) = mod( p , ntasks_x ) * ntasks_y + p / ntasks_x
132 ENDDO
133 CALL mpi_group_incl( group, ntasks, ranks, newgroup, ierr )
134 DEALLOCATE (ranks)
135 CALL mpi_comm_create( local_comm2, newgroup, new_local_comm , ierr )
136 ELSE
137 new_local_comm = local_comm
138 ENDIF
139 ! end extra code to reorder the communicator 20051212jm
140 dims(1) = ntasks_y ! rows
141 dims(2) = ntasks_x ! columns
142 isperiodic(1) = .false.
143 isperiodic(2) = .false.
144 CALL mpi_cart_create( new_local_comm, 2, dims, isperiodic, .false., local_communicator, ierr )
145 dims(1) = ntasks_y ! rows
146 dims(2) = ntasks_x ! columns
147 isperiodic(1) = .true.
148 isperiodic(2) = .true.
149 CALL mpi_cart_create( new_local_comm, 2, dims, isperiodic, .false., local_communicator_periodic, ierr )
150 ! debug
151 CALL mpi_comm_rank( local_communicator_periodic, mytask, ierr )
152 CALL mpi_cart_coords( local_communicator_periodic, mytask, 2, coords, ierr )
153
154 CALL mpi_comm_rank( local_communicator, mytask, ierr )
155 CALL mpi_cart_coords( local_communicator, mytask, 2, coords, ierr )
156 mytask_x = coords(2) ! col task (x)
157 mytask_y = coords(1) ! row task (y)
158 CALL nl_set_nproc_x ( 1, ntasks_x )
159 CALL nl_set_nproc_y ( 1, ntasks_y )
160
161 ! 20061228 set up subcommunicators for processors in X, Y coords of mesh
162 ! note that local_comm_x has all the processors in a row (X=0:nproc_x-1);
163 ! in other words, local_comm_x has all the processes with the same rank in Y
164 CALL MPI_Comm_dup( new_local_comm, comdup, ierr )
165 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('MPI_Comm_dup fails in 20061228 mod')
166 CALL MPI_Comm_split(comdup,mytask_y,mytask,local_communicator_x,ierr)
167 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('MPI_Comm_split fails for x in 20061228 mod')
168 CALL MPI_Comm_split(comdup,mytask_x,mytask,local_communicator_y,ierr)
169 IF ( ierr .NE. 0 ) CALL wrf_error_fatal('MPI_Comm_split fails for y in 20061228 mod')
170 ! end 20061228
171
172 CALL wrf_set_dm_communicator ( local_communicator )
173 RETURN
174 END SUBROUTINE wrf_dm_initialize
175
176 SUBROUTINE patch_domain_rsl_lite( id , parent, parent_id, &
177 sd1 , ed1 , sp1 , ep1 , sm1 , em1 , &
178 sd2 , ed2 , sp2 , ep2 , sm2 , em2 , &
179 sd3 , ed3 , sp3 , ep3 , sm3 , em3 , &
180 sp1x , ep1x , sm1x , em1x , &
181 sp2x , ep2x , sm2x , em2x , &
182 sp3x , ep3x , sm3x , em3x , &
183 sp1y , ep1y , sm1y , em1y , &
184 sp2y , ep2y , sm2y , em2y , &
185 sp3y , ep3y , sm3y , em3y , &
186 bdx , bdy )
187
188 USE module_domain
189 USE module_machine
190
191 IMPLICIT NONE
192 INTEGER, INTENT(IN) :: sd1 , ed1 , sd2 , ed2 , sd3 , ed3 , bdx , bdy
193 INTEGER, INTENT(OUT) :: sp1 , ep1 , sp2 , ep2 , sp3 , ep3 , &
194 sm1 , em1 , sm2 , em2 , sm3 , em3
195 INTEGER, INTENT(OUT) :: sp1x , ep1x , sp2x , ep2x , sp3x , ep3x , &
196 sm1x , em1x , sm2x , em2x , sm3x , em3x
197 INTEGER, INTENT(OUT) :: sp1y , ep1y , sp2y , ep2y , sp3y , ep3y , &
198 sm1y , em1y , sm2y , em2y , sm3y , em3y
199 INTEGER, INTENT(IN) :: id, parent_id
200 TYPE(domain),POINTER :: parent
201
202 ! Local variables
203 INTEGER :: ids, ide, jds, jde, kds, kde
204 INTEGER :: ims, ime, jms, jme, kms, kme
205 INTEGER :: ips, ipe, jps, jpe, kps, kpe
206 INTEGER :: imsx, imex, jmsx, jmex, kmsx, kmex
207 INTEGER :: ipsx, ipex, jpsx, jpex, kpsx, kpex
208 INTEGER :: imsy, imey, jmsy, jmey, kmsy, kmey
209 INTEGER :: ipsy, ipey, jpsy, jpey, kpsy, kpey
210
211 INTEGER :: c_sd1 , c_ed1 , c_sd2 , c_ed2 , c_sd3 , c_ed3
212 INTEGER :: c_sp1 , c_ep1 , c_sp2 , c_ep2 , c_sp3 , c_ep3 , &
213 c_sm1 , c_em1 , c_sm2 , c_em2 , c_sm3 , c_em3
214 INTEGER :: c_sp1x , c_ep1x , c_sp2x , c_ep2x , c_sp3x , c_ep3x , &
215 c_sm1x , c_em1x , c_sm2x , c_em2x , c_sm3x , c_em3x
216 INTEGER :: c_sp1y , c_ep1y , c_sp2y , c_ep2y , c_sp3y , c_ep3y , &
217 c_sm1y , c_em1y , c_sm2y , c_em2y , c_sm3y , c_em3y
218
219 INTEGER :: c_ids, c_ide, c_jds, c_jde, c_kds, c_kde
220 INTEGER :: c_ims, c_ime, c_jms, c_jme, c_kms, c_kme
221 INTEGER :: c_ips, c_ipe, c_jps, c_jpe, c_kps, c_kpe
222
223 INTEGER :: idim , jdim , kdim , rem , a, b
224 INTEGER :: i, j, ni, nj, Px, Py, P
225
226 INTEGER :: parent_grid_ratio, i_parent_start, j_parent_start
227 INTEGER :: shw
228 INTEGER :: idim_cd, jdim_cd
229
230 TYPE(domain), POINTER :: intermediate_grid
231 TYPE(domain), POINTER :: nest_grid
232
233
234 SELECT CASE ( model_data_order )
235 ! need to finish other cases
236 CASE ( DATA_ORDER_ZXY )
237 ids = sd2 ; ide = ed2
238 jds = sd3 ; jde = ed3
239 kds = sd1 ; kde = ed1
240 CASE ( DATA_ORDER_XYZ )
241 ids = sd1 ; ide = ed1
242 jds = sd2 ; jde = ed2
243 kds = sd3 ; kde = ed3
244 CASE ( DATA_ORDER_XZY )
245 ids = sd1 ; ide = ed1
246 jds = sd3 ; jde = ed3
247 kds = sd2 ; kde = ed2
248 CASE ( DATA_ORDER_YXZ)
249 ids = sd2 ; ide = ed2
250 jds = sd1 ; jde = ed1
251 kds = sd3 ; kde = ed3
252 END SELECT
253
254 CALL compute_memory_dims_rsl_lite ( 0 , bdx, bdy, &
255 ids, ide, jds, jde, kds, kde, &
256 ims, ime, jms, jme, kms, kme, &
257 imsx, imex, jmsx, jmex, kmsx, kmex, &
258 imsy, imey, jmsy, jmey, kmsy, kmey, &
259 ips, ipe, jps, jpe, kps, kpe, &
260 ipsx, ipex, jpsx, jpex, kpsx, kpex, &
261 ipsy, ipey, jpsy, jpey, kpsy, kpey )
262
263 ! ensure that the every parent domain point has a full set of nested points under it
264 ! even at the borders. Do this by making sure the number of nest points is a multiple of
265 ! the nesting ratio. Note that this is important mostly to the intermediate domain, which
266 ! is the subject of the scatter gather comms with the parent
267
268 IF ( id .GT. 1 ) THEN
269 CALL nl_get_parent_grid_ratio( id, parent_grid_ratio )
270 if ( mod(ime,parent_grid_ratio) .NE. 0 ) ime = ime + parent_grid_ratio - mod(ime,parent_grid_ratio)
271 if ( mod(jme,parent_grid_ratio) .NE. 0 ) jme = jme + parent_grid_ratio - mod(jme,parent_grid_ratio)
272 ENDIF
273
274 SELECT CASE ( model_data_order )
275 CASE ( DATA_ORDER_ZXY )
276 sp2 = ips ; ep2 = ipe ; sm2 = ims ; em2 = ime
277 sp3 = jps ; ep3 = jpe ; sm3 = jms ; em3 = jme
278 sp1 = kps ; ep1 = kpe ; sm1 = kms ; em1 = kme
279 sp2x = ipsx ; ep2x = ipex ; sm2x = imsx ; em2x = imex
280 sp3x = jpsx ; ep3x = jpex ; sm3x = jmsx ; em3x = jmex
281 sp1x = kpsx ; ep1x = kpex ; sm1x = kmsx ; em1x = kmex
282 sp2y = ipsy ; ep2y = ipey ; sm2y = imsy ; em2y = imey
283 sp3y = jpsy ; ep3y = jpey ; sm3y = jmsy ; em3y = jmey
284 sp1y = kpsy ; ep1y = kpey ; sm1y = kmsy ; em1y = kmey
285 CASE ( DATA_ORDER_ZYX )
286 sp3 = ips ; ep3 = ipe ; sm3 = ims ; em3 = ime
287 sp2 = jps ; ep2 = jpe ; sm2 = jms ; em2 = jme
288 sp1 = kps ; ep1 = kpe ; sm1 = kms ; em1 = kme
289 sp3x = ipsx ; ep3x = ipex ; sm3x = imsx ; em3x = imex
290 sp2x = jpsx ; ep2x = jpex ; sm2x = jmsx ; em2x = jmex
291 sp1x = kpsx ; ep1x = kpex ; sm1x = kmsx ; em1x = kmex
292 sp3y = ipsy ; ep3y = ipey ; sm3y = imsy ; em3y = imey
293 sp2y = jpsy ; ep2y = jpey ; sm2y = jmsy ; em2y = jmey
294 sp1y = kpsy ; ep1y = kpey ; sm1y = kmsy ; em1y = kmey
295 CASE ( DATA_ORDER_XYZ )
296 sp1 = ips ; ep1 = ipe ; sm1 = ims ; em1 = ime
297 sp2 = jps ; ep2 = jpe ; sm2 = jms ; em2 = jme
298 sp3 = kps ; ep3 = kpe ; sm3 = kms ; em3 = kme
299 sp1x = ipsx ; ep1x = ipex ; sm1x = imsx ; em1x = imex
300 sp2x = jpsx ; ep2x = jpex ; sm2x = jmsx ; em2x = jmex
301 sp3x = kpsx ; ep3x = kpex ; sm3x = kmsx ; em3x = kmex
302 sp1y = ipsy ; ep1y = ipey ; sm1y = imsy ; em1y = imey
303 sp2y = jpsy ; ep2y = jpey ; sm2y = jmsy ; em2y = jmey
304 sp3y = kpsy ; ep3y = kpey ; sm3y = kmsy ; em3y = kmey
305 CASE ( DATA_ORDER_YXZ)
306 sp2 = ips ; ep2 = ipe ; sm2 = ims ; em2 = ime
307 sp1 = jps ; ep1 = jpe ; sm1 = jms ; em1 = jme
308 sp3 = kps ; ep3 = kpe ; sm3 = kms ; em3 = kme
309 sp2x = ipsx ; ep2x = ipex ; sm2x = imsx ; em2x = imex
310 sp1x = jpsx ; ep1x = jpex ; sm1x = jmsx ; em1x = jmex
311 sp3x = kpsx ; ep3x = kpex ; sm3x = kmsx ; em3x = kmex
312 sp2y = ipsy ; ep2y = ipey ; sm2y = imsy ; em2y = imey
313 sp1y = jpsy ; ep1y = jpey ; sm1y = jmsy ; em1y = jmey
314 sp3y = kpsy ; ep3y = kpey ; sm3y = kmsy ; em3y = kmey
315 CASE ( DATA_ORDER_XZY )
316 sp1 = ips ; ep1 = ipe ; sm1 = ims ; em1 = ime
317 sp3 = jps ; ep3 = jpe ; sm3 = jms ; em3 = jme
318 sp2 = kps ; ep2 = kpe ; sm2 = kms ; em2 = kme
319 sp1x = ipsx ; ep1x = ipex ; sm1x = imsx ; em1x = imex
320 sp3x = jpsx ; ep3x = jpex ; sm3x = jmsx ; em3x = jmex
321 sp2x = kpsx ; ep2x = kpex ; sm2x = kmsx ; em2x = kmex
322 sp1y = ipsy ; ep1y = ipey ; sm1y = imsy ; em1y = imey
323 sp3y = jpsy ; ep3y = jpey ; sm3y = jmsy ; em3y = jmey
324 sp2y = kpsy ; ep2y = kpey ; sm2y = kmsy ; em2y = kmey
325 CASE ( DATA_ORDER_YZX )
326 sp3 = ips ; ep3 = ipe ; sm3 = ims ; em3 = ime
327 sp1 = jps ; ep1 = jpe ; sm1 = jms ; em1 = jme
328 sp2 = kps ; ep2 = kpe ; sm2 = kms ; em2 = kme
329 sp3x = ipsx ; ep3x = ipex ; sm3x = imsx ; em3x = imex
330 sp1x = jpsx ; ep1x = jpex ; sm1x = jmsx ; em1x = jmex
331 sp2x = kpsx ; ep2x = kpex ; sm2x = kmsx ; em2x = kmex
332 sp3y = ipsy ; ep3y = ipey ; sm3y = imsy ; em3y = imey
333 sp1y = jpsy ; ep1y = jpey ; sm1y = jmsy ; em1y = jmey
334 sp2y = kpsy ; ep2y = kpey ; sm2y = kmsy ; em2y = kmey
335 END SELECT
336
337 IF ( id.EQ.1 ) THEN
338 WRITE(wrf_err_message,*)'*************************************'
339 CALL wrf_message( TRIM(wrf_err_message) )
340 WRITE(wrf_err_message,*)'Parent domain'
341 CALL wrf_message( TRIM(wrf_err_message) )
342 WRITE(wrf_err_message,*)'ids,ide,jds,jde ',ids,ide,jds,jde
343 CALL wrf_message( TRIM(wrf_err_message) )
344 WRITE(wrf_err_message,*)'ims,ime,jms,jme ',ims,ime,jms,jme
345 CALL wrf_message( TRIM(wrf_err_message) )
346 WRITE(wrf_err_message,*)'ips,ipe,jps,jpe ',ips,ipe,jps,jpe
347 CALL wrf_message( TRIM(wrf_err_message) )
348 WRITE(wrf_err_message,*)'*************************************'
349 CALL wrf_message( TRIM(wrf_err_message) )
350 ENDIF
351
352 IF ( id .GT. 1 ) THEN
353
354 CALL nl_get_shw( id, shw )
355 CALL nl_get_i_parent_start( id , i_parent_start )
356 CALL nl_get_j_parent_start( id , j_parent_start )
357 CALL nl_get_parent_grid_ratio( id, parent_grid_ratio )
358
359 SELECT CASE ( model_data_order )
360 CASE ( DATA_ORDER_ZXY )
361 idim = ed2-sd2+1
362 jdim = ed3-sd3+1
363 kdim = ed1-sd1+1
364 c_kds = sd1 ; c_kde = ed1
365 CASE ( DATA_ORDER_ZYX )
366 idim = ed3-sd3+1
367 jdim = ed2-sd2+1
368 kdim = ed1-sd1+1
369 c_kds = sd1 ; c_kde = ed1
370 CASE ( DATA_ORDER_XYZ )
371 idim = ed1-sd1+1
372 jdim = ed2-sd2+1
373 kdim = ed3-sd3+1
374 c_kds = sd3 ; c_kde = ed3
375 CASE ( DATA_ORDER_YXZ)
376 idim = ed2-sd2+1
377 jdim = ed1-sd1+1
378 kdim = ed3-sd3+1
379 c_kds = sd3 ; c_kde = ed3
380 CASE ( DATA_ORDER_XZY )
381 idim = ed1-sd1+1
382 jdim = ed3-sd3+1
383 kdim = ed2-sd2+1
384 c_kds = sd2 ; c_kde = ed2
385 CASE ( DATA_ORDER_YZX )
386 idim = ed3-sd3+1
387 jdim = ed1-sd1+1
388 kdim = ed2-sd2+1
389 c_kds = sd2 ; c_kde = ed2
390 END SELECT
391
392 idim_cd = idim / parent_grid_ratio + 1 + 2*shw + 1
393 jdim_cd = jdim / parent_grid_ratio + 1 + 2*shw + 1
394
395 c_ids = i_parent_start-shw ; c_ide = c_ids + idim_cd - 1
396 c_jds = j_parent_start-shw ; c_jde = c_jds + jdim_cd - 1
397
398 ! we want the intermediate domain to be decomposed the
399 ! the same as the underlying nest. So try this:
400
401 ! At such time as NMM nesting is able to use RSL_LITE (would require
402 ! a number of other mods to this file for that to happen), this should
403 ! be updated along the lines of what's done in compute_memory_dims_rsl_lite
404 ! below. See note dated 20051020. JM
405
406 c_ips = -1
407 nj = ( c_jds - j_parent_start ) * parent_grid_ratio + 1 + 1 ;
408 DO i = c_ids, c_ide
409 ni = ( i - i_parent_start ) * parent_grid_ratio + 1 + 1 ;
410 CALL task_for_point ( ni, nj, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py )
411 IF ( Px .EQ. mytask_x ) THEN
412 c_ipe = i
413 IF ( c_ips .EQ. -1 ) c_ips = i
414 ENDIF
415 ENDDO
416
417 c_jps = -1
418 ni = ( c_ids - i_parent_start ) * parent_grid_ratio + 1 + 1 ;
419 DO j = c_jds, c_jde
420 nj = ( j - j_parent_start ) * parent_grid_ratio + 1 + 1 ;
421 CALL task_for_point ( ni, nj, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py )
422 IF ( Py .EQ. mytask_y ) THEN
423 c_jpe = j
424 IF ( c_jps .EQ. -1 ) c_jps = j
425 ENDIF
426 ENDDO
427
428 ! extend the patch dimensions out shw along edges of domain
429 IF ( mytask_x .EQ. 0 ) THEN
430 c_ips = c_ips - shw
431 ENDIF
432 IF ( mytask_x .EQ. ntasks_x-1 ) THEN
433 c_ipe = c_ipe + shw
434 ENDIF
435 c_ims = max( c_ips - max(shw,max_halo_width), c_ids - bdx ) - 1
436 c_ime = min( c_ipe + max(shw,max_halo_width), c_ide + bdx ) + 1
437
438 ! handle j dims
439 ! extend the patch dimensions out shw along edges of domain
440 IF ( mytask_y .EQ. 0 ) THEN
441 c_jps = c_jps - shw
442 ENDIF
443 IF ( mytask_y .EQ. ntasks_y-1 ) THEN
444 c_jpe = c_jpe + shw
445 ENDIF
446 c_jms = max( c_jps - max(shw,max_halo_width), c_jds - bdx ) - 1
447 c_jme = min( c_jpe + max(shw,max_halo_width), c_jde + bdx ) + 1
448 ! handle k dims
449 c_kps = 1
450 c_kpe = c_kde
451 c_kms = 1
452 c_kme = c_kde
453
454 WRITE(wrf_err_message,*)'*************************************'
455 CALL wrf_message( TRIM(wrf_err_message) )
456 WRITE(wrf_err_message,*)'Nesting domain'
457 CALL wrf_message( TRIM(wrf_err_message) )
458 WRITE(wrf_err_message,*)'ids,ide,jds,jde ',ids,ide,jds,jde
459 CALL wrf_message( TRIM(wrf_err_message) )
460 WRITE(wrf_err_message,*)'ims,ime,jms,jme ',ims,ime,jms,jme
461 CALL wrf_message( TRIM(wrf_err_message) )
462 WRITE(wrf_err_message,*)'ips,ipe,jps,jpe ',ips,ipe,jps,jpe
463 CALL wrf_message( TRIM(wrf_err_message) )
464 WRITE(wrf_err_message,*)'INTERMEDIATE domain'
465 CALL wrf_message( TRIM(wrf_err_message) )
466 WRITE(wrf_err_message,*)'ids,ide,jds,jde ',c_ids,c_ide,c_jds,c_jde
467 CALL wrf_message( TRIM(wrf_err_message) )
468 WRITE(wrf_err_message,*)'ims,ime,jms,jme ',c_ims,c_ime,c_jms,c_jme
469 CALL wrf_message( TRIM(wrf_err_message) )
470 WRITE(wrf_err_message,*)'ips,ipe,jps,jpe ',c_ips,c_ipe,c_jps,c_jpe
471 CALL wrf_message( TRIM(wrf_err_message) )
472 WRITE(wrf_err_message,*)'*************************************'
473 CALL wrf_message( TRIM(wrf_err_message) )
474
475 SELECT CASE ( model_data_order )
476 CASE ( DATA_ORDER_ZXY )
477 c_sd2 = c_ids ; c_ed2 = c_ide ; c_sp2 = c_ips ; c_ep2 = c_ipe ; c_sm2 = c_ims ; c_em2 = c_ime
478 c_sd3 = c_jds ; c_ed3 = c_jde ; c_sp3 = c_jps ; c_ep3 = c_jpe ; c_sm3 = c_jms ; c_em3 = c_jme
479 c_sd1 = c_kds ; c_ed1 = c_kde ; c_sp1 = c_kps ; c_ep1 = c_kpe ; c_sm1 = c_kms ; c_em1 = c_kme
480 CASE ( DATA_ORDER_ZYX )
481 c_sd3 = c_ids ; c_ed3 = c_ide ; c_sp3 = c_ips ; c_ep3 = c_ipe ; c_sm3 = c_ims ; c_em3 = c_ime
482 c_sd2 = c_jds ; c_ed2 = c_jde ; c_sp2 = c_jps ; c_ep2 = c_jpe ; c_sm2 = c_jms ; c_em2 = c_jme
483 c_sd1 = c_kds ; c_ed1 = c_kde ; c_sp1 = c_kps ; c_ep1 = c_kpe ; c_sm1 = c_kms ; c_em1 = c_kme
484 CASE ( DATA_ORDER_XYZ )
485 c_sd1 = c_ids ; c_ed1 = c_ide ; c_sp1 = c_ips ; c_ep1 = c_ipe ; c_sm1 = c_ims ; c_em1 = c_ime
486 c_sd2 = c_jds ; c_ed2 = c_jde ; c_sp2 = c_jps ; c_ep2 = c_jpe ; c_sm2 = c_jms ; c_em2 = c_jme
487 c_sd3 = c_kds ; c_ed3 = c_kde ; c_sp3 = c_kps ; c_ep3 = c_kpe ; c_sm3 = c_kms ; c_em3 = c_kme
488 CASE ( DATA_ORDER_YXZ)
489 c_sd2 = c_ids ; c_ed2 = c_ide ; c_sp2 = c_ips ; c_ep2 = c_ipe ; c_sm2 = c_ims ; c_em2 = c_ime
490 c_sd1 = c_jds ; c_ed1 = c_jde ; c_sp1 = c_jps ; c_ep1 = c_jpe ; c_sm1 = c_jms ; c_em1 = c_jme
491 c_sd3 = c_kds ; c_ed3 = c_kde ; c_sp3 = c_kps ; c_ep3 = c_kpe ; c_sm3 = c_kms ; c_em3 = c_kme
492 CASE ( DATA_ORDER_XZY )
493 c_sd1 = c_ids ; c_ed1 = c_ide ; c_sp1 = c_ips ; c_ep1 = c_ipe ; c_sm1 = c_ims ; c_em1 = c_ime
494 c_sd3 = c_jds ; c_ed3 = c_jde ; c_sp3 = c_jps ; c_ep3 = c_jpe ; c_sm3 = c_jms ; c_em3 = c_jme
495 c_sd2 = c_kds ; c_ed2 = c_kde ; c_sp2 = c_kps ; c_ep2 = c_kpe ; c_sm2 = c_kms ; c_em2 = c_kme
496 CASE ( DATA_ORDER_YZX )
497 c_sd3 = c_ids ; c_ed3 = c_ide ; c_sp3 = c_ips ; c_ep3 = c_ipe ; c_sm3 = c_ims ; c_em3 = c_ime
498 c_sd1 = c_jds ; c_ed1 = c_jde ; c_sp1 = c_jps ; c_ep1 = c_jpe ; c_sm1 = c_jms ; c_em1 = c_jme
499 c_sd2 = c_kds ; c_ed2 = c_kde ; c_sp2 = c_kps ; c_ep2 = c_kpe ; c_sm2 = c_kms ; c_em2 = c_kme
500 END SELECT
501
502 ALLOCATE ( intermediate_grid )
503 ALLOCATE ( intermediate_grid%parents( max_parents ) )
504 ALLOCATE ( intermediate_grid%nests( max_nests ) )
505
506 NULLIFY( intermediate_grid%sibling )
507 DO i = 1, max_nests
508 NULLIFY( intermediate_grid%nests(i)%ptr )
509 ENDDO
510 NULLIFY (intermediate_grid%next)
511 NULLIFY (intermediate_grid%same_level)
512 NULLIFY (intermediate_grid%i_start)
513 NULLIFY (intermediate_grid%j_start)
514 NULLIFY (intermediate_grid%i_end)
515 NULLIFY (intermediate_grid%j_end)
516 intermediate_grid%id = id
517 intermediate_grid%num_nests = 0
518 intermediate_grid%num_siblings = 0
519 intermediate_grid%num_parents = 1
520 intermediate_grid%max_tiles = 0
521 intermediate_grid%num_tiles_spec = 0
522 CALL find_grid_by_id ( id, head_grid, nest_grid )
523
524 nest_grid%intermediate_grid => intermediate_grid ! nest grid now has a pointer to this baby
525 intermediate_grid%parents(1)%ptr => nest_grid ! the intermediate grid considers nest its parent
526 intermediate_grid%num_parents = 1
527
528 c_sm1x = 1 ; c_em1x = 1 ; c_sm2x = 1 ; c_em2x = 1 ; c_sm3x = 1 ; c_em3x = 1
529 c_sm1y = 1 ; c_em1y = 1 ; c_sm2y = 1 ; c_em2y = 1 ; c_sm3y = 1 ; c_em3y = 1
530
531 intermediate_grid%sm31x = c_sm1x
532 intermediate_grid%em31x = c_em1x
533 intermediate_grid%sm32x = c_sm2x
534 intermediate_grid%em32x = c_em2x
535 intermediate_grid%sm33x = c_sm3x
536 intermediate_grid%em33x = c_em3x
537 intermediate_grid%sm31y = c_sm1y
538 intermediate_grid%em31y = c_em1y
539 intermediate_grid%sm32y = c_sm2y
540 intermediate_grid%em32y = c_em2y
541 intermediate_grid%sm33y = c_sm3y
542 intermediate_grid%em33y = c_em3y
543
544 #ifdef SGIALTIX
545 ! allocate space for the intermediate domain
546 CALL alloc_space_field ( intermediate_grid, intermediate_grid%id , 1, 2 , .TRUE., & ! use same id as nest
547 c_sd1, c_ed1, c_sd2, c_ed2, c_sd3, c_ed3, &
548 c_sm1, c_em1, c_sm2, c_em2, c_sm3, c_em3, &
549 c_sm1x, c_em1x, c_sm2x, c_em2x, c_sm3x, c_em3x, & ! x-xpose
550 c_sm1y, c_em1y, c_sm2y, c_em2y, c_sm3y, c_em3y ) ! y-xpose
551 #endif
552 intermediate_grid%sd31 = c_sd1
553 intermediate_grid%ed31 = c_ed1
554 intermediate_grid%sp31 = c_sp1
555 intermediate_grid%ep31 = c_ep1
556 intermediate_grid%sm31 = c_sm1
557 intermediate_grid%em31 = c_em1
558 intermediate_grid%sd32 = c_sd2
559 intermediate_grid%ed32 = c_ed2
560 intermediate_grid%sp32 = c_sp2
561 intermediate_grid%ep32 = c_ep2
562 intermediate_grid%sm32 = c_sm2
563 intermediate_grid%em32 = c_em2
564 intermediate_grid%sd33 = c_sd3
565 intermediate_grid%ed33 = c_ed3
566 intermediate_grid%sp33 = c_sp3
567 intermediate_grid%ep33 = c_ep3
568 intermediate_grid%sm33 = c_sm3
569 intermediate_grid%em33 = c_em3
570
571 CALL med_add_config_info_to_grid ( intermediate_grid )
572
573 intermediate_grid%dx = parent%dx
574 intermediate_grid%dy = parent%dy
575 intermediate_grid%dt = parent%dt
576 ENDIF
577
578 RETURN
579 END SUBROUTINE patch_domain_rsl_lite
580
581 SUBROUTINE compute_memory_dims_rsl_lite ( &
582 shw , bdx, bdy , &
583 ids, ide, jds, jde, kds, kde, &
584 ims, ime, jms, jme, kms, kme, &
585 imsx, imex, jmsx, jmex, kmsx, kmex, &
586 imsy, imey, jmsy, jmey, kmsy, kmey, &
587 ips, ipe, jps, jpe, kps, kpe, &
588 ipsx, ipex, jpsx, jpex, kpsx, kpex, &
589 ipsy, ipey, jpsy, jpey, kpsy, kpey )
590
591 USE module_machine
592 IMPLICIT NONE
593 INTEGER, INTENT(IN) :: shw, bdx, bdy
594 INTEGER, INTENT(IN) :: ids, ide, jds, jde, kds, kde
595 INTEGER, INTENT(OUT) :: ims, ime, jms, jme, kms, kme
596 INTEGER, INTENT(OUT) :: imsx, imex, jmsx, jmex, kmsx, kmex
597 INTEGER, INTENT(OUT) :: imsy, imey, jmsy, jmey, kmsy, kmey
598 INTEGER, INTENT(OUT) :: ips, ipe, jps, jpe, kps, kpe
599 INTEGER, INTENT(OUT) :: ipsx, ipex, jpsx, jpex, kpsx, kpex
600 INTEGER, INTENT(OUT) :: ipsy, ipey, jpsy, jpey, kpsy, kpey
601
602 INTEGER Px, Py, P, i, j, k
603
604 #if ( ! NMM_CORE == 1 )
605
606 ! xy decomposition
607
608 ips = -1
609 j = jds ;
610 DO i = ids, ide
611 CALL task_for_point ( i, j, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py )
612 IF ( Px .EQ. mytask_x ) THEN
613 ipe = i
614 IF ( ips .EQ. -1 ) ips = i
615 ENDIF
616 ENDDO
617 ! handle setting the memory dimensions where there are no X elements assigned to this proc
618 IF (ips .EQ. -1 ) THEN
619 ipe = -1
620 ips = 0
621 ENDIF
622 jps = -1
623 i = ids ;
624 DO j = jds, jde
625 CALL task_for_point ( i, j, ids, ide, jds, jde, ntasks_x, ntasks_y, Px, Py )
626 IF ( Py .EQ. mytask_y ) THEN
627 jpe = j
628 IF ( jps .EQ. -1 ) jps = j
629 ENDIF
630 ENDDO
631 ! handle setting the memory dimensions where there are no Y elements assigned to this proc
632 IF (jps .EQ. -1 ) THEN
633 jpe = -1
634 jps = 0
635 ENDIF
636
637 !
638 ! description of transpose decomposition strategy for RSL LITE. 20061231jm
639 !
640 ! Here is the tranpose scheme that is implemented for RSL_LITE. Upper-case
641 ! XY corresponds to the dimension of the processor mesh, lower-case xyz
642 ! corresponds to grid dimension.
643 !
644 ! xy zy zx
645 !
646 ! XxYy <--> XzYy <--> XzYx <- note x decomposed over Y procs
647 ! ^ ^
648 ! | |
649 ! +------------------+ <- this edge is costly; see below
650 !
651 ! The aim is to avoid all-to-all communication over whole
652 ! communicator. Instead, when possible, use a transpose scheme that requires
653 ! all-to-all within dimensional communicators; that is, communicators
654 ! defined for the processes in a rank or column of the processor mesh. Note,
655 ! however, it is not possible to create a ring of transposes between
656 ! xy-yz-xz decompositions without at least one of the edges in the ring
657 ! being fully all-to-all (in other words, one of the tranpose edges must
658 ! rotate and not just transpose a plane of the model grid within the
659 ! processor mesh). The issue is then, where should we put this costly edge
660 ! in the tranpose scheme we chose? To avoid being completely arbitrary,
661 ! we chose a scheme most natural for models that use parallel spectral
662 ! transforms, where the costly edge is the one that goes from the xz to
663 ! the xy decomposition. (May be implemented as just a two step transpose
664 ! back through yz).
665 !
666 ! Additional notational convention, below. The 'x' or 'y' appended to the
667 ! dimension start or end variable refers to which grid dimension is all
668 ! on-processor in the given decomposition. That is ipsx and ipex are the
669 ! start and end for the i-dimension in the zy decomposition where x is
670 ! on-processor. ('z' is assumed for xy decomposition and not appended to
671 ! the ips, ipe, etc. variable names).
672 !
673
674 ! XzYy decomposition
675
676 kpsx = -1
677 j = jds ;
678 DO k = kds, kde
679 CALL task_for_point ( k, j, kds, kde, jds, jde, ntasks_x, ntasks_y, Px, Py )
680 IF ( Px .EQ. mytask_x ) THEN
681 kpex = k
682 IF ( kpsx .EQ. -1 ) kpsx = k
683 ENDIF
684 ENDDO
685 ! handle case where no levels are assigned to this process
686 ! no iterations. Do same for I and J. Need to handle memory alloc below.
687 IF (kpsx .EQ. -1 ) THEN
688 kpex = -1
689 kpsx = 0
690 ENDIF
691
692 jpsx = -1
693 k = kds ;
694 DO j = jds, jde
695 CALL task_for_point ( k, j, kds, kde, jds, jde, ntasks_x, ntasks_y, Px, Py )
696 IF ( Py .EQ. mytask_y ) THEN
697 jpex = j
698 IF ( jpsx .EQ. -1 ) jpsx = j
699 ENDIF
700 ENDDO
701 IF (jpsx .EQ. -1 ) THEN
702 jpex = -1
703 jpsx = 0
704 ENDIF
705
706 ! XzYx decomposition (note, x grid dim is decomposed over Y processor dim)
707
708 kpsy = kpsx ! same as above
709 kpey = kpex ! same as above
710
711 ipsy = -1
712 k = kds ;
713 DO i = ids, ide
714 CALL task_for_point ( i, k, ids, ide, kds, kde, ntasks_y, ntasks_x, Py, Px ) ! x and y for proc mesh reversed
715 IF ( Py .EQ. mytask_y ) THEN
716 ipey = i
717 IF ( ipsy .EQ. -1 ) ipsy = i
718 ENDIF
719 ENDDO
720 IF (ipsy .EQ. -1 ) THEN
721 ipey = -1
722 ipsy = 0
723 ENDIF
724
725
726 #else
727
728 ! In case of NMM CORE, the domain only ever runs from ids..ide-1 and jds..jde-1 so
729 ! adjust decomposition to reflect. 20051020 JM
730 ips = -1
731 j = jds ;
732 DO i = ids, ide-1
733 CALL task_for_point ( i, j, ids, ide-1, jds, jde-1, ntasks_x, ntasks_y, Px, Py )
734 IF ( Px .EQ. mytask_x ) THEN
735 ipe = i
736 IF ( Px .EQ. ntasks_x-1 ) ipe = ipe + 1
737 IF ( ips .EQ. -1 ) ips = i
738 ENDIF
739 ENDDO
740 jps = -1
741 i = ids ;
742 DO j = jds, jde-1
743 CALL task_for_point ( i, j, ids, ide-1, jds, jde-1, ntasks_x, ntasks_y, Px, Py )
744 IF ( Py .EQ. mytask_y ) THEN
745 jpe = j
746 IF ( Py .EQ. ntasks_y-1 ) jpe = jpe + 1
747 IF ( jps .EQ. -1 ) jps = j
748 ENDIF
749 ENDDO
750 #endif
751
752 ! extend the patch dimensions out shw along edges of domain
753 IF ( mytask_x .EQ. 0 ) THEN
754 ips = ips - shw
755 ipsy = ipsy - shw
756 ENDIF
757 IF ( mytask_x .EQ. ntasks_x-1 ) THEN
758 ipe = ipe + shw
759 ipey = ipey + shw
760 ENDIF
761 IF ( mytask_y .EQ. 0 ) THEN
762 jps = jps - shw
763 jpsx = jpsx - shw
764 ENDIF
765 IF ( mytask_y .EQ. ntasks_y-1 ) THEN
766 jpe = jpe + shw
767 jpex = jpex + shw
768 ENDIF
769
770 kps = 1
771 kpe = kde-kds+1
772
773 kms = 1
774 kme = kpe
775 kmsx = kpsx
776 kmex = kpex
777 kmsy = kpsy
778 kmey = kpey
779 ! handle setting the memory dimensions where there are no levels assigned to this proc
780 IF ( kpsx .EQ. 0 .AND. kpex .EQ. -1 ) THEN
781 kmsx = 0
782 kmex = 0
783 ENDIF
784 IF ( kpsy .EQ. 0 .AND. kpey .EQ. -1 ) THEN
785 kmsy = 0
786 kmey = 0
787 ENDIF
788
789 IF ( ips .EQ. 0 .AND. ipe .EQ. -1 ) THEN
790 ims = 0
791 ime = 0
792 ELSE
793 ims = max( ips - max(shw,max_halo_width), ids - bdx ) - 1
794 ime = min( ipe + max(shw,max_halo_width), ide + bdx ) + 1
795 ENDIF
796 imsx = ids
797 imex = ide
798 ipsx = imsx
799 ipex = imex
800 ! handle setting the memory dimensions where there are no Y elements assigned to this proc
801 IF ( ipsy .EQ. 0 .AND. ipey .EQ. -1 ) THEN
802 imsy = 0
803 imey = 0
804 ELSE
805 imsy = ipsy
806 imey = ipey
807 ENDIF
808
809 IF ( jps .EQ. 0 .AND. jpe .EQ. -1 ) THEN
810 jms = 0
811 jme = 0
812 ELSE
813 jms = max( jps - max(shw,max_halo_width), jds - bdy ) - 1
814 jme = min( jpe + max(shw,max_halo_width), jde + bdy ) + 1
815 ENDIF
816 jmsx = jpsx
817 jmex = jpex
818 jmsy = jds
819 jmey = jde
820 ! handle setting the memory dimensions where there are no X elements assigned to this proc
821 IF ( jpsx .EQ. 0 .AND. jpex .EQ. -1 ) THEN
822 jmsx = 0
823 jmex = 0
824 ELSE
825 jpsy = jmsy
826 jpey = jmey
827 ENDIF
828
829 END SUBROUTINE compute_memory_dims_rsl_lite
830
831 ! internal, used below for switching the argument to MPI calls
832 ! if reals are being autopromoted to doubles in the build of WRF
833 INTEGER function getrealmpitype()
834 #ifndef STUBMPI
835 IMPLICIT NONE
836 INCLUDE 'mpif.h'
837 INTEGER rtypesize, dtypesize, ierr
838 CALL mpi_type_size ( MPI_REAL, rtypesize, ierr )
839 CALL mpi_type_size ( MPI_DOUBLE_PRECISION, dtypesize, ierr )
840 IF ( RWORDSIZE .EQ. rtypesize ) THEN
841 getrealmpitype = MPI_REAL
842 ELSE IF ( RWORDSIZE .EQ. dtypesize ) THEN
843 getrealmpitype = MPI_DOUBLE_PRECISION
844 ELSE
845 CALL wrf_error_fatal ( 'RWORDSIZE or DWORDSIZE does not match any MPI type' )
846 ENDIF
847 #else
848 ! required dummy initialization for function that is never called
849 getrealmpitype = 1
850 #endif
851 RETURN
852 END FUNCTION getrealmpitype
853
854 REAL FUNCTION wrf_dm_max_real ( inval )
855 IMPLICIT NONE
856 INCLUDE 'mpif.h'
857 REAL inval, retval
858 INTEGER ierr
859 CALL mpi_allreduce ( inval, retval , 1, getrealmpitype(), MPI_MAX, local_communicator, ierr )
860 wrf_dm_max_real = retval
861 END FUNCTION wrf_dm_max_real
862
863 REAL FUNCTION wrf_dm_min_real ( inval )
864 IMPLICIT NONE
865 INCLUDE 'mpif.h'
866 REAL inval, retval
867 INTEGER ierr
868 CALL mpi_allreduce ( inval, retval , 1, getrealmpitype(), MPI_MIN, local_communicator, ierr )
869 wrf_dm_min_real = retval
870 END FUNCTION wrf_dm_min_real
871
872 REAL FUNCTION wrf_dm_sum_real ( inval )
873 IMPLICIT NONE
874 INCLUDE 'mpif.h'
875 REAL inval, retval
876 INTEGER ierr
877 CALL mpi_allreduce ( inval, retval , 1, getrealmpitype(), MPI_SUM, local_communicator, ierr )
878 wrf_dm_sum_real = retval
879 END FUNCTION wrf_dm_sum_real
880
881 SUBROUTINE wrf_dm_sum_reals (inval, retval)
882 IMPLICIT NONE
883 INCLUDE 'mpif.h'
884 REAL, INTENT(IN) :: inval(:)
885 REAL, INTENT(OUT) :: retval(:)
886 INTEGER ierr
887 CALL mpi_allreduce ( inval, retval, SIZE(inval), getrealmpitype(), MPI_SUM, local_communicator, ierr )
888 END SUBROUTINE wrf_dm_sum_reals
889
890 INTEGER FUNCTION wrf_dm_sum_integer ( inval )
891 IMPLICIT NONE
892 INCLUDE 'mpif.h'
893 INTEGER inval, retval
894 INTEGER ierr
895 CALL mpi_allreduce ( inval, retval , 1, MPI_INTEGER, MPI_SUM, local_communicator, ierr )
896 wrf_dm_sum_integer = retval
897 END FUNCTION wrf_dm_sum_integer
898
899 SUBROUTINE wrf_dm_maxval_real ( val, idex, jdex )
900 IMPLICIT NONE
901 INCLUDE 'mpif.h'
902 REAL val, val_all( ntasks )
903 INTEGER idex, jdex, ierr
904 INTEGER dex(2)
905 INTEGER dex_all (2,ntasks)
906 INTEGER i
907
908 dex(1) = idex ; dex(2) = jdex
909 CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, local_communicator, ierr )
910 CALL mpi_allgather ( val, 1, getrealmpitype(), val_all , 1, getrealmpitype(), local_communicator, ierr )
911 val = val_all(1)
912 idex = dex_all(1,1) ; jdex = dex_all(2,1)
913 DO i = 2, ntasks
914 IF ( val_all(i) .GT. val ) THEN
915 val = val_all(i)
916 idex = dex_all(1,i)
917 jdex = dex_all(2,i)
918 ENDIF
919 ENDDO
920 END SUBROUTINE wrf_dm_maxval_real
921
922 #ifndef PROMOTE_FLOAT
923 SUBROUTINE wrf_dm_maxval_doubleprecision ( val, idex, jdex )
924 IMPLICIT NONE
925 INCLUDE 'mpif.h'
926 DOUBLE PRECISION val, val_all( ntasks )
927 INTEGER idex, jdex, ierr
928 INTEGER dex(2)
929 INTEGER dex_all (2,ntasks)
930 INTEGER i
931
932 dex(1) = idex ; dex(2) = jdex
933 CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, local_communicator, ierr )
934 CALL mpi_allgather ( val, 1, MPI_DOUBLE_PRECISION, val_all , 1, MPI_DOUBLE_PRECISION, local_communicator, ierr )
935 val = val_all(1)
936 idex = dex_all(1,1) ; jdex = dex_all(2,1)
937 DO i = 2, ntasks
938 IF ( val_all(i) .GT. val ) THEN
939 val = val_all(i)
940 idex = dex_all(1,i)
941 jdex = dex_all(2,i)
942 ENDIF
943 ENDDO
944 END SUBROUTINE wrf_dm_maxval_doubleprecision
945 #endif
946
947 SUBROUTINE wrf_dm_maxval_integer ( val, idex, jdex )
948 IMPLICIT NONE
949 INCLUDE 'mpif.h'
950 INTEGER val, val_all( ntasks )
951 INTEGER idex, jdex, ierr
952 INTEGER dex(2)
953 INTEGER dex_all (2,ntasks)
954 INTEGER i
955
956 dex(1) = idex ; dex(2) = jdex
957 CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, local_communicator, ierr )
958 CALL mpi_allgather ( val, 1, MPI_INTEGER, val_all , 1, MPI_INTEGER, local_communicator, ierr )
959 val = val_all(1)
960 idex = dex_all(1,1) ; jdex = dex_all(2,1)
961 DO i = 2, ntasks
962 IF ( val_all(i) .GT. val ) THEN
963 val = val_all(i)
964 idex = dex_all(1,i)
965 jdex = dex_all(2,i)
966 ENDIF
967 ENDDO
968 END SUBROUTINE wrf_dm_maxval_integer
969
970 ! For HWRF some additional computation is required. This is gopal's doing
971
972 SUBROUTINE wrf_dm_minval_real ( val, idex, jdex )
973 IMPLICIT NONE
974 REAL val, val_all( ntasks )
975 INTEGER idex, jdex, ierr
976 INTEGER dex(2)
977 INTEGER dex_all (2,ntasks)
978 ! <DESCRIPTION>
979 ! Collective operation. Each processor calls passing a local value and its index; on return
980 ! all processors are passed back the maximum of all values passed and its index.
981 !
982 ! </DESCRIPTION>
983 INTEGER i, comm
984 #ifndef STUBMPI
985 INCLUDE 'mpif.h'
986
987 CALL wrf_get_dm_communicator ( comm )
988 dex(1) = idex ; dex(2) = jdex
989 CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, comm, ierr )
990 CALL mpi_allgather ( val, 1, MPI_REAL, val_all , 1, MPI_REAL, comm, ierr )
991 val = val_all(1)
992 idex = dex_all(1,1) ; jdex = dex_all(2,1)
993 DO i = 2, ntasks
994 IF ( val_all(i) .LT. val ) THEN
995 val = val_all(i)
996 idex = dex_all(1,i)
997 jdex = dex_all(2,i)
998 ENDIF
999 ENDDO
1000 #endif
1001 END SUBROUTINE wrf_dm_minval_real
1002
1003 #ifndef PROMOTE_FLOAT
1004 SUBROUTINE wrf_dm_minval_doubleprecision ( val, idex, jdex )
1005 IMPLICIT NONE
1006 DOUBLE PRECISION val, val_all( ntasks )
1007 INTEGER idex, jdex, ierr
1008 INTEGER dex(2)
1009 INTEGER dex_all (2,ntasks)
1010 ! <DESCRIPTION>
1011 ! Collective operation. Each processor calls passing a local value and its index; on return
1012 ! all processors are passed back the maximum of all values passed and its index.
1013 !
1014 ! </DESCRIPTION>
1015 INTEGER i, comm
1016 #ifndef STUBMPI
1017 INCLUDE 'mpif.h'
1018
1019 CALL wrf_get_dm_communicator ( comm )
1020 dex(1) = idex ; dex(2) = jdex
1021 CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, comm, ierr )
1022 CALL mpi_allgather ( val, 1, MPI_DOUBLE_PRECISION, val_all , 1, MPI_DOUBLE_PRECISION, comm, ierr )
1023 val = val_all(1)
1024 idex = dex_all(1,1) ; jdex = dex_all(2,1)
1025 DO i = 2, ntasks
1026 IF ( val_all(i) .LT. val ) THEN
1027 val = val_all(i)
1028 idex = dex_all(1,i)
1029 jdex = dex_all(2,i)
1030 ENDIF
1031 ENDDO
1032 #endif
1033 END SUBROUTINE wrf_dm_minval_doubleprecision
1034 #endif
1035
1036 SUBROUTINE wrf_dm_minval_integer ( val, idex, jdex )
1037 IMPLICIT NONE
1038 INTEGER val, val_all( ntasks )
1039 INTEGER idex, jdex, ierr
1040 INTEGER dex(2)
1041 INTEGER dex_all (2,ntasks)
1042 ! <DESCRIPTION>
1043 ! Collective operation. Each processor calls passing a local value and its index; on return
1044 ! all processors are passed back the maximum of all values passed and its index.
1045 !
1046 ! </DESCRIPTION>
1047 INTEGER i, comm
1048 #ifndef STUBMPI
1049 INCLUDE 'mpif.h'
1050
1051 CALL wrf_get_dm_communicator ( comm )
1052 dex(1) = idex ; dex(2) = jdex
1053 CALL mpi_allgather ( dex, 2, MPI_INTEGER, dex_all , 2, MPI_INTEGER, comm, ierr )
1054 CALL mpi_allgather ( val, 1, MPI_INTEGER, val_all , 1, MPI_INTEGER, comm, ierr )
1055 val = val_all(1)
1056 idex = dex_all(1,1) ; jdex = dex_all(2,1)
1057 DO i = 2, ntasks
1058 IF ( val_all(i) .LT. val ) THEN
1059 val = val_all(i)
1060 idex = dex_all(1,i)
1061 jdex = dex_all(2,i)
1062 ENDIF
1063 ENDDO
1064 #endif
1065 END SUBROUTINE wrf_dm_minval_integer ! End of gopal's doing
1066
1067 SUBROUTINE split_communicator
1068 IMPLICIT NONE
1069 INCLUDE 'mpif.h'
1070 LOGICAL mpi_inited
1071 INTEGER mpi_comm_here, mpi_comm_local, comdup, mytask, ntasks, ierr, io_status
1072 INTEGER i, j
1073 INTEGER, ALLOCATABLE :: icolor(:)
1074 INTEGER tasks_per_split
1075 NAMELIST /namelist_split/ tasks_per_split
1076
1077 CALL MPI_INITIALIZED( mpi_inited, ierr )
1078 IF ( .NOT. mpi_inited ) THEN
1079 CALL mpi_init ( ierr )
1080 CALL wrf_set_dm_communicator( MPI_COMM_WORLD )
1081 CALL wrf_termio_dup
1082 ENDIF
1083 CALL wrf_get_dm_communicator( mpi_comm_here )
1084
1085 CALL MPI_Comm_rank ( mpi_comm_here, mytask, ierr ) ;
1086 CALL mpi_comm_size ( mpi_comm_here, ntasks, ierr ) ;
1087 #ifdef WRF_NL
1088 CALL mpi_comm_split( MPI_COMM_WORLD , 3 , 1 , mpi_comm_local, ierr )
1089 #else
1090 CALL mpi_comm_split( MPI_COMM_WORLD , 4 , 1 , mpi_comm_local, ierr )
1091 #endif
1092 CALL wrf_set_dm_communicator( mpi_comm_local )
1093 CALL wrf_termio_dup
1094 CALL wrf_get_dm_communicator( mpi_comm_here )
1095
1096 IF ( mytask .EQ. 0 ) THEN
1097 OPEN ( unit=27, file="namelist.input", form="formatted", status="old" )
1098 tasks_per_split = ntasks
1099 READ ( 27 , NML = namelist_split, IOSTAT=io_status )
1100 CLOSE ( 27 )
1101 ENDIF
1102 CALL mpi_bcast( io_status, 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
1103 IF ( io_status .NE. 0 ) THEN
1104 RETURN ! just ignore and return
1105 ENDIF
1106 CALL mpi_bcast( tasks_per_split, 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
1107 IF ( tasks_per_split .GT. ntasks .OR. tasks_per_split .LE. 0 ) RETURN
1108 IF ( mod( ntasks, tasks_per_split ) .NE. 0 ) THEN
1109 CALL wrf_message( 'WARNING: tasks_per_split does not evenly divide ntasks. Some tasks will be wasted.' )
1110 ENDIF
1111
1112 ALLOCATE( icolor(ntasks) )
1113 j = 0
1114 DO WHILE ( j .LT. ntasks / tasks_per_split )
1115 DO i = 1, tasks_per_split
1116 icolor( i + j * tasks_per_split ) = j
1117 ENDDO
1118 j = j + 1
1119 ENDDO
1120
1121 CALL MPI_Comm_dup(mpi_comm_here,comdup,ierr)
1122 CALL MPI_Comm_split(comdup,icolor(mytask+1),mytask,mpi_comm_local,ierr)
1123 CALL wrf_set_dm_communicator( mpi_comm_local )
1124
1125 DEALLOCATE( icolor )
1126
1127 END SUBROUTINE split_communicator
1128
1129 SUBROUTINE init_module_dm
1130 IMPLICIT NONE
1131 INTEGER mpi_comm_local, ierr, mytask, nproc
1132 INCLUDE 'mpif.h'
1133 LOGICAL mpi_inited
1134 CALL mpi_initialized( mpi_inited, ierr )
1135 IF ( .NOT. mpi_inited ) THEN
1136 ! If MPI has not been initialized then initialize it and
1137 ! make comm_world the communicator
1138 ! Otherwise, something else (e.g. quilt-io) has already
1139 ! initialized MPI, so just grab the communicator that
1140 ! should already be stored and use that.
1141 CALL mpi_init ( ierr )
1142 CALL wrf_termio_dup
1143 CALL wrf_set_dm_communicator ( MPI_COMM_WORLD )
1144 ENDIF
1145 CALL wrf_get_dm_communicator( mpi_comm_local )
1146 END SUBROUTINE init_module_dm
1147
1148 ! stub
1149 SUBROUTINE wrf_dm_move_nest ( parent, nest, dx, dy )
1150 USE module_domain
1151 IMPLICIT NONE
1152 TYPE (domain), INTENT(INOUT) :: parent, nest
1153 INTEGER, INTENT(IN) :: dx,dy
1154 RETURN
1155 END SUBROUTINE wrf_dm_move_nest
1156
1157 !------------------------------------------------------------------------------
1158 SUBROUTINE get_full_obs_vector( nsta, nerrf, niobf, &
1159 mp_local_uobmask, &
1160 mp_local_vobmask, &
1161 mp_local_cobmask, errf )
1162
1163 !------------------------------------------------------------------------------
1164 ! PURPOSE: Do MPI allgatherv operation across processors to get the
1165 ! errors at each observation point on all processors.
1166 !
1167 !------------------------------------------------------------------------------
1168 #ifndef STUBMPI
1169 INCLUDE 'mpif.h'
1170
1171 INTEGER, INTENT(IN) :: nsta ! Observation index.
1172 INTEGER, INTENT(IN) :: nerrf ! Number of error fields.
1173 INTEGER, INTENT(IN) :: niobf ! Number of observations.
1174 LOGICAL, INTENT(IN) :: MP_LOCAL_UOBMASK(NIOBF)
1175 LOGICAL, INTENT(IN) :: MP_LOCAL_VOBMASK(NIOBF)
1176 LOGICAL, INTENT(IN) :: MP_LOCAL_COBMASK(NIOBF)
1177 REAL, INTENT(INOUT) :: errf(nerrf, niobf)
1178
1179 ! Local declarations
1180 integer i, n, nlocal_dot, nlocal_crs
1181 REAL UVT_BUFFER(NIOBF) ! Buffer for holding U, V, or T
1182 REAL QRK_BUFFER(NIOBF) ! Buffer for holding Q or RKO
1183 REAL SFP_BUFFER(NIOBF) ! Buffer for holding Surface pressure
1184 INTEGER N_BUFFER(NIOBF)
1185 REAL FULL_BUFFER(NIOBF)
1186 INTEGER IFULL_BUFFER(NIOBF)
1187 INTEGER IDISPLACEMENT(1024) ! HARD CODED MAX NUMBER OF PROCESSORS
1188 INTEGER ICOUNT(1024) ! HARD CODED MAX NUMBER OF PROCESSORS
1189
1190 INTEGER :: MPI_COMM_COMP ! MPI group communicator
1191 INTEGER :: NPROCS ! Number of processors
1192 INTEGER :: IERR ! Error code from MPI routines
1193
1194 ! Get communicator for MPI operations.
1195 CALL WRF_GET_DM_COMMUNICATOR(MPI_COMM_COMP)
1196
1197 ! Get rank of monitor processor and broadcast to others.
1198 CALL MPI_COMM_SIZE( MPI_COMM_COMP, NPROCS, IERR )
1199
1200 ! DO THE U FIELD
1201 NLOCAL_DOT = 0
1202 DO N = 1, NSTA
1203 IF ( MP_LOCAL_UOBMASK(N) ) THEN ! USE U-POINT MASK
1204 NLOCAL_DOT = NLOCAL_DOT + 1
1205 UVT_BUFFER(NLOCAL_DOT) = ERRF(1,N) ! U WIND COMPONENT
1206 SFP_BUFFER(NLOCAL_DOT) = ERRF(7,N) ! SURFACE PRESSURE
1207 QRK_BUFFER(NLOCAL_DOT) = ERRF(9,N) ! RKO
1208 N_BUFFER(NLOCAL_DOT) = N
1209 ENDIF
1210 ENDDO
1211 CALL MPI_ALLGATHER(NLOCAL_DOT,1,MPI_INTEGER, &
1212 ICOUNT,1,MPI_INTEGER, &
1213 MPI_COMM_COMP,IERR)
1214 I = 1
1215
1216 IDISPLACEMENT(1) = 0
1217 DO I = 2, NPROCS
1218 IDISPLACEMENT(I) = IDISPLACEMENT(I-1) + ICOUNT(I-1)
1219 ENDDO
1220 CALL MPI_ALLGATHERV( N_BUFFER, NLOCAL_DOT, MPI_INTEGER, &
1221 IFULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1222 MPI_INTEGER, MPI_COMM_COMP, IERR)
1223 ! U
1224 CALL MPI_ALLGATHERV( UVT_BUFFER, NLOCAL_DOT, MPI_REAL, &
1225 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1226 MPI_REAL, MPI_COMM_COMP, IERR)
1227 DO N = 1, NSTA
1228 ERRF(1,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1229 ENDDO
1230 ! SURF PRESS AT U-POINTS
1231 CALL MPI_ALLGATHERV( SFP_BUFFER, NLOCAL_DOT, MPI_REAL, &
1232 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1233 MPI_REAL, MPI_COMM_COMP, IERR)
1234 DO N = 1, NSTA
1235 ERRF(7,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1236 ENDDO
1237 ! RKO
1238 CALL MPI_ALLGATHERV( QRK_BUFFER, NLOCAL_DOT, MPI_REAL, &
1239 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1240 MPI_REAL, MPI_COMM_COMP, IERR)
1241 DO N = 1, NSTA
1242 ERRF(9,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1243 ENDDO
1244
1245 ! DO THE V FIELD
1246 NLOCAL_DOT = 0
1247 DO N = 1, NSTA
1248 IF ( MP_LOCAL_VOBMASK(N) ) THEN ! USE V-POINT MASK
1249 NLOCAL_DOT = NLOCAL_DOT + 1
1250 UVT_BUFFER(NLOCAL_DOT) = ERRF(2,N) ! V WIND COMPONENT
1251 SFP_BUFFER(NLOCAL_DOT) = ERRF(8,N) ! SURFACE PRESSURE
1252 N_BUFFER(NLOCAL_DOT) = N
1253 ENDIF
1254 ENDDO
1255 CALL MPI_ALLGATHER(NLOCAL_DOT,1,MPI_INTEGER, &
1256 ICOUNT,1,MPI_INTEGER, &
1257 MPI_COMM_COMP,IERR)
1258 I = 1
1259
1260 IDISPLACEMENT(1) = 0
1261 DO I = 2, NPROCS
1262 IDISPLACEMENT(I) = IDISPLACEMENT(I-1) + ICOUNT(I-1)
1263 ENDDO
1264 CALL MPI_ALLGATHERV( N_BUFFER, NLOCAL_DOT, MPI_INTEGER, &
1265 IFULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1266 MPI_INTEGER, MPI_COMM_COMP, IERR)
1267 ! V
1268 CALL MPI_ALLGATHERV( UVT_BUFFER, NLOCAL_DOT, MPI_REAL, &
1269 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1270 MPI_REAL, MPI_COMM_COMP, IERR)
1271 DO N = 1, NSTA
1272 ERRF(2,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1273 ENDDO
1274 ! SURF PRESS AT V-POINTS
1275 CALL MPI_ALLGATHERV( SFP_BUFFER, NLOCAL_DOT, MPI_REAL, &
1276 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1277 MPI_REAL, MPI_COMM_COMP, IERR)
1278 DO N = 1, NSTA
1279 ERRF(8,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1280 ENDDO
1281
1282 ! DO THE CROSS FIELDS, T AND Q
1283 NLOCAL_CRS = 0
1284 DO N = 1, NSTA
1285 IF ( MP_LOCAL_COBMASK(N) ) THEN ! USE MASS-POINT MASK
1286 NLOCAL_CRS = NLOCAL_CRS + 1
1287 UVT_BUFFER(NLOCAL_CRS) = ERRF(3,N) ! TEMPERATURE
1288 QRK_BUFFER(NLOCAL_CRS) = ERRF(4,N) ! MOISTURE
1289 SFP_BUFFER(NLOCAL_CRS) = ERRF(6,N) ! SURFACE PRESSURE
1290 N_BUFFER(NLOCAL_CRS) = N
1291 ENDIF
1292 ENDDO
1293 CALL MPI_ALLGATHER(NLOCAL_CRS,1,MPI_INTEGER, &
1294 ICOUNT,1,MPI_INTEGER, &
1295 MPI_COMM_COMP,IERR)
1296 IDISPLACEMENT(1) = 0
1297 DO I = 2, NPROCS
1298 IDISPLACEMENT(I) = IDISPLACEMENT(I-1) + ICOUNT(I-1)
1299 ENDDO
1300 CALL MPI_ALLGATHERV( N_BUFFER, NLOCAL_CRS, MPI_INTEGER, &
1301 IFULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1302 MPI_INTEGER, MPI_COMM_COMP, IERR)
1303 ! T
1304 CALL MPI_ALLGATHERV( UVT_BUFFER, NLOCAL_CRS, MPI_REAL, &
1305 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1306 MPI_REAL, MPI_COMM_COMP, IERR)
1307
1308 DO N = 1, NSTA
1309 ERRF(3,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1310 ENDDO
1311 ! Q
1312 CALL MPI_ALLGATHERV( QRK_BUFFER, NLOCAL_CRS, MPI_REAL, &
1313 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1314 MPI_REAL, MPI_COMM_COMP, IERR)
1315 DO N = 1, NSTA
1316 ERRF(4,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1317 ENDDO
1318 ! SURF PRESS AT MASS POINTS
1319 CALL MPI_ALLGATHERV( SFP_BUFFER, NLOCAL_CRS, MPI_REAL, &
1320 FULL_BUFFER, ICOUNT, IDISPLACEMENT, &
1321 MPI_REAL, MPI_COMM_COMP, IERR)
1322 DO N = 1, NSTA
1323 ERRF(6,IFULL_BUFFER(N)) = FULL_BUFFER(N)
1324 ENDDO
1325 #endif
1326 END SUBROUTINE get_full_obs_vector
1327
1328 END MODULE module_dm
1329
1330 !=========================================================================
1331 ! wrf_dm_patch_domain has to be outside the module because it is called
1332 ! by a routine in module_domain but depends on module domain
1333
1334 SUBROUTINE wrf_dm_patch_domain ( id , domdesc , parent_id , parent_domdesc , &
1335 sd1 , ed1 , sp1 , ep1 , sm1 , em1 , &
1336 sd2 , ed2 , sp2 , ep2 , sm2 , em2 , &
1337 sd3 , ed3 , sp3 , ep3 , sm3 , em3 , &
1338 sp1x , ep1x , sm1x , em1x , &
1339 sp2x , ep2x , sm2x , em2x , &
1340 sp3x , ep3x , sm3x , em3x , &
1341 sp1y , ep1y , sm1y , em1y , &
1342 sp2y , ep2y , sm2y , em2y , &
1343 sp3y , ep3y , sm3y , em3y , &
1344 bdx , bdy )
1345 USE module_domain
1346 USE module_dm
1347 IMPLICIT NONE
1348
1349 INTEGER, INTENT(IN) :: sd1 , ed1 , sd2 , ed2 , sd3 , ed3 , bdx , bdy
1350 INTEGER, INTENT(OUT) :: sp1 , ep1 , sp2 , ep2 , sp3 , ep3 , &
1351 sm1 , em1 , sm2 , em2 , sm3 , em3
1352 INTEGER :: sp1x , ep1x , sp2x , ep2x , sp3x , ep3x , &
1353 sm1x , em1x , sm2x , em2x , sm3x , em3x
1354 INTEGER :: sp1y , ep1y , sp2y , ep2y , sp3y , ep3y , &
1355 sm1y , em1y , sm2y , em2y , sm3y , em3y
1356 INTEGER, INTENT(INOUT):: id , domdesc , parent_id , parent_domdesc
1357
1358 TYPE(domain), POINTER :: parent
1359 TYPE(domain), POINTER :: grid_ptr
1360
1361 ! this is necessary because we cannot pass parent directly into
1362 ! wrf_dm_patch_domain because creating the correct interface definitions
1363 ! would generate a circular USE reference between module_domain and module_dm
1364 ! see comment this date in module_domain for more information. JM 20020416
1365
1366 NULLIFY( parent )
1367 grid_ptr => head_grid
1368 CALL find_grid_by_id( parent_id , grid_ptr , parent )
1369
1370 CALL patch_domain_rsl_lite ( id , parent, parent_id , &
1371 sd1 , ed1 , sp1 , ep1 , sm1 , em1 , &
1372 sd2 , ed2 , sp2 , ep2 , sm2 , em2 , &
1373 sd3 , ed3 , sp3 , ep3 , sm3 , em3 , &
1374 sp1x , ep1x , sm1x , em1x , &
1375 sp2x , ep2x , sm2x , em2x , &
1376 sp3x , ep3x , sm3x , em3x , &
1377 sp1y , ep1y , sm1y , em1y , &
1378 sp2y , ep2y , sm2y , em2y , &
1379 sp3y , ep3y , sm3y , em3y , &
1380 bdx , bdy )
1381
1382 RETURN
1383 END SUBROUTINE wrf_dm_patch_domain
1384
1385 SUBROUTINE wrf_termio_dup
1386 IMPLICIT NONE
1387 INCLUDE 'mpif.h'
1388 INTEGER mytask, ntasks, ierr
1389 CALL mpi_comm_size(MPI_COMM_WORLD, ntasks, ierr )
1390 CALL mpi_comm_rank(MPI_COMM_WORLD, mytask, ierr )
1391 write(0,*)'starting wrf task ',mytask,' of ',ntasks
1392 CALL rsl_error_dup1( mytask )
1393 END SUBROUTINE wrf_termio_dup
1394
1395 SUBROUTINE wrf_get_myproc( myproc )
1396 USE module_dm
1397 IMPLICIT NONE
1398 INTEGER myproc
1399 myproc = mytask
1400 RETURN
1401 END SUBROUTINE wrf_get_myproc
1402
1403 SUBROUTINE wrf_get_nproc( nproc )
1404 USE module_dm
1405 IMPLICIT NONE
1406 INTEGER nproc
1407 nproc = ntasks
1408 RETURN
1409 END SUBROUTINE wrf_get_nproc
1410
1411 SUBROUTINE wrf_get_nprocx( nprocx )
1412 USE module_dm
1413 IMPLICIT NONE
1414 INTEGER nprocx
1415 nprocx = ntasks_x
1416 RETURN
1417 END SUBROUTINE wrf_get_nprocx
1418
1419 SUBROUTINE wrf_get_nprocy( nprocy )
1420 USE module_dm
1421 IMPLICIT NONE
1422 INTEGER nprocy
1423 nprocy = ntasks_y
1424 RETURN
1425 END SUBROUTINE wrf_get_nprocy
1426
1427 SUBROUTINE wrf_dm_bcast_bytes ( buf , size )
1428 USE module_dm
1429 IMPLICIT NONE
1430 INCLUDE 'mpif.h'
1431 INTEGER size
1432 #ifndef NEC
1433 INTEGER*1 BUF(size)
1434 #else
1435 CHARACTER*1 BUF(size)
1436 #endif
1437 CALL BYTE_BCAST ( buf , size, local_communicator )
1438 RETURN
1439 END SUBROUTINE wrf_dm_bcast_bytes
1440
1441 SUBROUTINE wrf_dm_bcast_string( BUF, N1 )
1442 IMPLICIT NONE
1443 INTEGER n1
1444 ! <DESCRIPTION>
1445 ! Collective operation. Given a string and a size in characters on task zero, broadcast and return that buffer on all tasks.
1446 !
1447 ! </DESCRIPTION>
1448 CHARACTER*(*) buf
1449 INTEGER ibuf(256),i,n
1450 CHARACTER*256 tstr
1451 n = n1
1452 ! Root task is required to have the correct value of N1, other tasks
1453 ! might not have the correct value.
1454 CALL wrf_dm_bcast_integer( n , 1 )
1455 IF (n .GT. 256) n = 256
1456 IF (n .GT. 0 ) then
1457 DO i = 1, n
1458 ibuf(I) = ichar(buf(I:I))
1459 ENDDO
1460 CALL wrf_dm_bcast_integer( ibuf, n )
1461 buf = ''
1462 DO i = 1, n
1463 buf(i:i) = char(ibuf(i))
1464 ENDDO
1465 ENDIF
1466 RETURN
1467 END SUBROUTINE wrf_dm_bcast_string
1468
1469 SUBROUTINE wrf_dm_bcast_integer( BUF, N1 )
1470 IMPLICIT NONE
1471 INTEGER n1
1472 INTEGER buf(*)
1473 CALL wrf_dm_bcast_bytes ( BUF , N1 * IWORDSIZE )
1474 RETURN
1475 END SUBROUTINE wrf_dm_bcast_integer
1476
1477 SUBROUTINE wrf_dm_bcast_double( BUF, N1 )
1478 IMPLICIT NONE
1479 INTEGER n1
1480 ! this next declaration is REAL, not DOUBLE PRECISION because it will be autopromoted
1481 ! to double precision by the compiler when WRF is compiled for 8 byte reals. Only reason
1482 ! for having this separate routine is so we pass the correct MPI type to mpi_scatterv
1483 ! since we were not indexing the globbuf and Field arrays it does not matter
1484 REAL buf(*)
1485 CALL wrf_dm_bcast_bytes ( BUF , N1 * DWORDSIZE )
1486 RETURN
1487 END SUBROUTINE wrf_dm_bcast_double
1488
1489 SUBROUTINE wrf_dm_bcast_real( BUF, N1 )
1490 IMPLICIT NONE
1491 INTEGER n1
1492 REAL buf(*)
1493 CALL wrf_dm_bcast_bytes ( BUF , N1 * RWORDSIZE )
1494 RETURN
1495 END SUBROUTINE wrf_dm_bcast_real
1496
1497 SUBROUTINE wrf_dm_bcast_logical( BUF, N1 )
1498 IMPLICIT NONE
1499 INTEGER n1
1500 LOGICAL buf(*)
1501 CALL wrf_dm_bcast_bytes ( BUF , N1 * LWORDSIZE )
1502 RETURN
1503 END SUBROUTINE wrf_dm_bcast_logical
1504
1505 SUBROUTINE write_68( grid, v , s , &
1506 ids, ide, jds, jde, kds, kde, &
1507 ims, ime, jms, jme, kms, kme, &
1508 its, ite, jts, jte, kts, kte )
1509 USE module_domain
1510 IMPLICIT NONE
1511 TYPE(domain) , INTENT (INOUT) :: grid
1512 CHARACTER *(*) s
1513 INTEGER ids, ide, jds, jde, kds, kde, &
1514 ims, ime, jms, jme, kms, kme, &
1515 its, ite, jts, jte, kts, kte
1516 REAL, DIMENSION( ims:ime , kms:kme, jms:jme ) :: v
1517
1518 INTEGER i,j,k,ierr
1519
1520 logical, external :: wrf_dm_on_monitor
1521 real globbuf( ids:ide, kds:kde, jds:jde )
1522 character*3 ord, stag
1523
1524 if ( kds == kde ) then
1525 ord = 'xy'
1526 stag = 'xy'
1527 CALL wrf_patch_to_global_real ( v, globbuf, grid%domdesc, stag, ord, &
1528 ids, ide, jds, jde, kds, kde, &
1529 ims, ime, jms, jme, kms, kme, &
1530 its, ite, jts, jte, kts, kte )
1531 else
1532
1533 stag = 'xyz'
1534 ord = 'xzy'
1535 CALL wrf_patch_to_global_real ( v, globbuf, grid%domdesc, stag, ord, &
1536 ids, ide, kds, kde, jds, jde, &
1537 ims, ime, kms, kme, jms, jme, &
1538 its, ite, kts, kte, jts, jte )
1539 endif
1540
1541
1542 if ( wrf_dm_on_monitor() ) THEN
1543 WRITE(68,*) ide-ids+1, jde-jds+1 , s
1544 DO j = jds, jde
1545 DO i = ids, ide
1546 WRITE(68,*) globbuf(i,1,j)
1547 ENDDO
1548 ENDDO
1549 endif
1550
1551 RETURN
1552 END
1553
1554 SUBROUTINE wrf_abort
1555 IMPLICIT NONE
1556 INCLUDE 'mpif.h'
1557 INTEGER ierr
1558 CALL mpi_abort(MPI_COMM_WORLD,1,ierr)
1559 END SUBROUTINE wrf_abort
1560
1561 SUBROUTINE wrf_dm_shutdown
1562 IMPLICIT NONE
1563 INTEGER ierr
1564 CALL MPI_FINALIZE( ierr )
1565 RETURN
1566 END SUBROUTINE wrf_dm_shutdown
1567
1568 LOGICAL FUNCTION wrf_dm_on_monitor()
1569 USE module_dm
1570 IMPLICIT NONE
1571 INCLUDE 'mpif.h'
1572 INTEGER tsk, ierr, mpi_comm_local
1573 CALL wrf_get_dm_communicator( mpi_comm_local )
1574 CALL mpi_comm_rank ( mpi_comm_local, tsk , ierr )
1575 wrf_dm_on_monitor = tsk .EQ. 0
1576 RETURN
1577 END FUNCTION wrf_dm_on_monitor
1578
1579 INTEGER FUNCTION wrf_dm_monitor_rank()
1580 USE module_dm
1581 IMPLICIT NONE
1582 wrf_dm_monitor_rank = 0
1583 RETURN
1584 END FUNCTION wrf_dm_monitor_rank
1585
1586 SUBROUTINE wrf_get_dm_communicator ( communicator )
1587 USE module_dm
1588 IMPLICIT NONE
1589 INTEGER , INTENT(OUT) :: communicator
1590 communicator = local_communicator
1591 RETURN
1592 END SUBROUTINE wrf_get_dm_communicator
1593
1594 SUBROUTINE wrf_get_dm_iocommunicator ( iocommunicator )
1595 USE module_dm
1596 IMPLICIT NONE
1597 INTEGER , INTENT(OUT) :: iocommunicator
1598 iocommunicator = local_iocommunicator
1599 RETURN
1600 END SUBROUTINE wrf_get_dm_iocommunicator
1601
1602 SUBROUTINE wrf_set_dm_communicator ( communicator )
1603 USE module_dm
1604 IMPLICIT NONE
1605 INTEGER , INTENT(IN) :: communicator
1606 local_communicator = communicator
1607 RETURN
1608 END SUBROUTINE wrf_set_dm_communicator
1609
1610 SUBROUTINE wrf_set_dm_iocommunicator ( iocommunicator )
1611 USE module_dm
1612 IMPLICIT NONE
1613 INTEGER , INTENT(IN) :: iocommunicator
1614 local_iocommunicator = iocommunicator
1615 RETURN
1616 END SUBROUTINE wrf_set_dm_iocommunicator
1617
1618
1619 !!!!!!!!!!!!!!!!!!!!!!! PATCH TO GLOBAL !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1620
1621 SUBROUTINE wrf_patch_to_global_real (buf,globbuf,domdesc,stagger,ordering,&
1622 DS1,DE1,DS2,DE2,DS3,DE3,&
1623 MS1,ME1,MS2,ME2,MS3,ME3,&
1624 PS1,PE1,PS2,PE2,PS3,PE3 )
1625 IMPLICIT NONE
1626 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
1627 MS1,ME1,MS2,ME2,MS3,ME3,&
1628 PS1,PE1,PS2,PE2,PS3,PE3
1629 CHARACTER *(*) stagger,ordering
1630 INTEGER fid,domdesc
1631 REAL globbuf(*)
1632 REAL buf(*)
1633
1634 CALL wrf_patch_to_global_generic (buf,globbuf,domdesc,stagger,ordering,RWORDSIZE,&
1635 DS1,DE1,DS2,DE2,DS3,DE3,&
1636 MS1,ME1,MS2,ME2,MS3,ME3,&
1637 PS1,PE1,PS2,PE2,PS3,PE3 )
1638
1639 RETURN
1640 END SUBROUTINE wrf_patch_to_global_real
1641
1642 SUBROUTINE wrf_patch_to_global_double (buf,globbuf,domdesc,stagger,ordering,&
1643 DS1,DE1,DS2,DE2,DS3,DE3,&
1644 MS1,ME1,MS2,ME2,MS3,ME3,&
1645 PS1,PE1,PS2,PE2,PS3,PE3 )
1646 IMPLICIT NONE
1647 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
1648 MS1,ME1,MS2,ME2,MS3,ME3,&
1649 PS1,PE1,PS2,PE2,PS3,PE3
1650 CHARACTER *(*) stagger,ordering
1651 INTEGER fid,domdesc
1652 ! this next declaration is REAL, not DOUBLE PRECISION because it will be autopromoted
1653 ! to double precision by the compiler when WRF is compiled for 8 byte reals. Only reason
1654 ! for having this separate routine is so we pass the correct MPI type to mpi_scatterv
1655 ! since we were not indexing the globbuf and Field arrays it does not matter
1656 REAL globbuf(*)
1657 REAL buf(*)
1658
1659 CALL wrf_patch_to_global_generic (buf,globbuf,domdesc,stagger,ordering,DWORDSIZE,&
1660 DS1,DE1,DS2,DE2,DS3,DE3,&
1661 MS1,ME1,MS2,ME2,MS3,ME3,&
1662 PS1,PE1,PS2,PE2,PS3,PE3 )
1663
1664 RETURN
1665 END SUBROUTINE wrf_patch_to_global_double
1666
1667
1668 SUBROUTINE wrf_patch_to_global_integer (buf,globbuf,domdesc,stagger,ordering,&
1669 DS1,DE1,DS2,DE2,DS3,DE3,&
1670 MS1,ME1,MS2,ME2,MS3,ME3,&
1671 PS1,PE1,PS2,PE2,PS3,PE3 )
1672 IMPLICIT NONE
1673 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
1674 MS1,ME1,MS2,ME2,MS3,ME3,&
1675 PS1,PE1,PS2,PE2,PS3,PE3
1676 CHARACTER *(*) stagger,ordering
1677 INTEGER fid,domdesc
1678 INTEGER globbuf(*)
1679 INTEGER buf(*)
1680
1681 CALL wrf_patch_to_global_generic (buf,globbuf,domdesc,stagger,ordering,IWORDSIZE,&
1682 DS1,DE1,DS2,DE2,DS3,DE3,&
1683 MS1,ME1,MS2,ME2,MS3,ME3,&
1684 PS1,PE1,PS2,PE2,PS3,PE3 )
1685
1686 RETURN
1687 END SUBROUTINE wrf_patch_to_global_integer
1688
1689
1690 SUBROUTINE wrf_patch_to_global_logical (buf,globbuf,domdesc,stagger,ordering,&
1691 DS1,DE1,DS2,DE2,DS3,DE3,&
1692 MS1,ME1,MS2,ME2,MS3,ME3,&
1693 PS1,PE1,PS2,PE2,PS3,PE3 )
1694 IMPLICIT NONE
1695 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
1696 MS1,ME1,MS2,ME2,MS3,ME3,&
1697 PS1,PE1,PS2,PE2,PS3,PE3
1698 CHARACTER *(*) stagger,ordering
1699 INTEGER fid,domdesc
1700 LOGICAL globbuf(*)
1701 LOGICAL buf(*)
1702
1703 CALL wrf_patch_to_global_generic (buf,globbuf,domdesc,stagger,ordering,LWORDSIZE,&
1704 DS1,DE1,DS2,DE2,DS3,DE3,&
1705 MS1,ME1,MS2,ME2,MS3,ME3,&
1706 PS1,PE1,PS2,PE2,PS3,PE3 )
1707
1708 RETURN
1709 END SUBROUTINE wrf_patch_to_global_logical
1710
1711 #ifdef DEREF_KLUDGE
1712 # define FRSTELEM (1)
1713 #else
1714 # define FRSTELEM
1715 #endif
1716
1717 SUBROUTINE wrf_patch_to_global_generic (buf,globbuf,domdesc,stagger,ordering,typesize,&
1718 DS1a,DE1a,DS2a,DE2a,DS3a,DE3a,&
1719 MS1a,ME1a,MS2a,ME2a,MS3a,ME3a,&
1720 PS1a,PE1a,PS2a,PE2a,PS3a,PE3a )
1721 USE module_driver_constants
1722 USE module_timing
1723 USE module_wrf_error
1724 USE module_dm
1725 IMPLICIT NONE
1726 INTEGER DS1a,DE1a,DS2a,DE2a,DS3a,DE3a,&
1727 MS1a,ME1a,MS2a,ME2a,MS3a,ME3a,&
1728 PS1a,PE1a,PS2a,PE2a,PS3a,PE3A
1729 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
1730 MS1,ME1,MS2,ME2,MS3,ME3,&
1731 PS1,PE1,PS2,PE2,PS3,PE3
1732 INTEGER ids,ide,jds,jde,kds,kde,&
1733 ims,ime,jms,jme,kms,kme,&
1734 ips,ipe,jps,jpe,kps,kpe
1735 CHARACTER *(*) stagger,ordering
1736 LOGICAL, EXTERNAL :: wrf_dm_on_monitor, has_char
1737 INTEGER fid,domdesc,typesize,ierr
1738 REAL globbuf(*)
1739 REAL buf(*)
1740
1741 INTEGER i, j, k, ndim
1742 INTEGER Patch(3,2), Gpatch(3,2,ntasks)
1743 ! allocated further down, after the D indices are potentially recalculated for staggering
1744 REAL, ALLOCATABLE :: tmpbuf( : )
1745 REAL locbuf( (PE1a-PS1a+1)*(PE2a-PS2a+1)*(PE3a-PS3a+1)/RWORDSIZE*typesize+32 )
1746
1747 DS1 = DS1a ; DE1 = DE1a ; DS2=DS2a ; DE2 = DE2a ; DS3 = DS3a ; DE3 = DE3a
1748 MS1 = MS1a ; ME1 = ME1a ; MS2=MS2a ; ME2 = ME2a ; MS3 = MS3a ; ME3 = ME3a
1749 PS1 = PS1a ; PE1 = PE1a ; PS2=PS2a ; PE2 = PE2a ; PS3 = PS3a ; PE3 = PE3a
1750
1751 SELECT CASE ( TRIM(ordering) )
1752 CASE ( 'xy', 'yx' )
1753 ndim = 2
1754 CASE DEFAULT
1755 ndim = 3 ! where appropriate
1756 END SELECT
1757
1758 SELECT CASE ( TRIM(ordering) )
1759 CASE ( 'xyz','xy' )
1760 ! the non-staggered variables come in at one-less than
1761 ! domain dimensions, but code wants full domain spec, so
1762 ! adjust if not staggered
1763 IF ( .NOT. has_char( stagger, 'x' ) ) DE1 = DE1+1
1764 IF ( .NOT. has_char( stagger, 'y' ) ) DE2 = DE2+1
1765 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE3 = DE3+1
1766 CASE ( 'yxz','yx' )
1767 IF ( .NOT. has_char( stagger, 'x' ) ) DE2 = DE2+1
1768 IF ( .NOT. has_char( stagger, 'y' ) ) DE1 = DE1+1
1769 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE3 = DE3+1
1770 CASE ( 'zxy' )
1771 IF ( .NOT. has_char( stagger, 'x' ) ) DE2 = DE2+1
1772 IF ( .NOT. has_char( stagger, 'y' ) ) DE3 = DE3+1
1773 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE1 = DE1+1
1774 CASE ( 'xzy' )
1775 IF ( .NOT. has_char( stagger, 'x' ) ) DE1 = DE1+1
1776 IF ( .NOT. has_char( stagger, 'y' ) ) DE3 = DE3+1
1777 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE2 = DE2+1
1778 CASE DEFAULT
1779 END SELECT
1780
1781 ! moved to here to be after the potential recalculations of D dims
1782 IF ( wrf_dm_on_monitor() ) THEN
1783 ALLOCATE ( tmpbuf ( (DE1-DS1+1)*(DE2-DS2+1)*(DE3-DS3+1)/RWORDSIZE*typesize+32 ), STAT=ierr )
1784 ELSE
1785 ALLOCATE ( tmpbuf ( 1 ), STAT=ierr )
1786 ENDIF
1787 IF ( ierr .ne. 0 ) CALL wrf_error_fatal ('allocating tmpbuf in wrf_patch_to_global_generic')
1788
1789 Patch(1,1) = ps1 ; Patch(1,2) = pe1 ! use patch dims
1790 Patch(2,1) = ps2 ; Patch(2,2) = pe2
1791 Patch(3,1) = ps3 ; Patch(3,2) = pe3
1792
1793 IF ( typesize .EQ. RWORDSIZE ) THEN
1794 CALL just_patch_r ( buf , locbuf , size(locbuf), &
1795 PS1, PE1, PS2, PE2, PS3, PE3 , &
1796 MS1, ME1, MS2, ME2, MS3, ME3 )
1797 ELSE IF ( typesize .EQ. IWORDSIZE ) THEN
1798 CALL just_patch_i ( buf , locbuf , size(locbuf), &
1799 PS1, PE1, PS2, PE2, PS3, PE3 , &
1800 MS1, ME1, MS2, ME2, MS3, ME3 )
1801 ELSE IF ( typesize .EQ. DWORDSIZE ) THEN
1802 CALL just_patch_d ( buf , locbuf , size(locbuf), &
1803 PS1, PE1, PS2, PE2, PS3, PE3 , &
1804 MS1, ME1, MS2, ME2, MS3, ME3 )
1805 ELSE IF ( typesize .EQ. LWORDSIZE ) THEN
1806 CALL just_patch_l ( buf , locbuf , size(locbuf), &
1807 PS1, PE1, PS2, PE2, PS3, PE3 , &
1808 MS1, ME1, MS2, ME2, MS3, ME3 )
1809 ENDIF
1810
1811 ! defined in external/io_quilt
1812 CALL collect_on_comm0 ( local_communicator , IWORDSIZE , &
1813 Patch , 6 , &
1814 GPatch , 6*ntasks )
1815
1816 CALL collect_on_comm0 ( local_communicator , typesize , &
1817 locbuf , (pe1-ps1+1)*(pe2-ps2+1)*(pe3-ps3+1), &
1818 tmpbuf FRSTELEM , (de1-ds1+1)*(de2-ds2+1)*(de3-ds3+1) )
1819
1820 ndim = len(TRIM(ordering))
1821
1822 IF ( wrf_at_debug_level(500) ) THEN
1823 CALL start_timing
1824 ENDIF
1825
1826 IF ( ndim .GE. 2 .AND. wrf_dm_on_monitor() ) THEN
1827
1828 IF ( typesize .EQ. RWORDSIZE ) THEN
1829 CALL patch_2_outbuf_r ( tmpbuf FRSTELEM , globbuf , &
1830 DS1, DE1, DS2, DE2, DS3, DE3 , &
1831 GPATCH )
1832 ELSE IF ( typesize .EQ. IWORDSIZE ) THEN
1833 CALL patch_2_outbuf_i ( tmpbuf FRSTELEM , globbuf , &
1834 DS1, DE1, DS2, DE2, DS3, DE3 , &
1835 GPATCH )
1836 ELSE IF ( typesize .EQ. DWORDSIZE ) THEN
1837 CALL patch_2_outbuf_d ( tmpbuf FRSTELEM , globbuf , &
1838 DS1, DE1, DS2, DE2, DS3, DE3 , &
1839 GPATCH )
1840 ELSE IF ( typesize .EQ. LWORDSIZE ) THEN
1841 CALL patch_2_outbuf_l ( tmpbuf FRSTELEM , globbuf , &
1842 DS1, DE1, DS2, DE2, DS3, DE3 , &
1843 GPATCH )
1844 ENDIF
1845
1846 ENDIF
1847
1848 IF ( wrf_at_debug_level(500) ) THEN
1849 CALL end_timing('wrf_patch_to_global_generic')
1850 ENDIF
1851 DEALLOCATE( tmpbuf )
1852 RETURN
1853 END SUBROUTINE wrf_patch_to_global_generic
1854
1855 SUBROUTINE just_patch_i ( inbuf , outbuf, noutbuf, &
1856 PS1,PE1,PS2,PE2,PS3,PE3, &
1857 MS1,ME1,MS2,ME2,MS3,ME3 )
1858 USE module_dm
1859 IMPLICIT NONE
1860 INTEGER , INTENT(IN) :: noutbuf
1861 INTEGER , DIMENSION(noutbuf) , INTENT(OUT) :: outbuf
1862 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
1863 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
1864 INTEGER , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(IN) :: inbuf
1865 ! Local
1866 INTEGER :: i,j,k,n , icurs
1867 icurs = 1
1868 DO k = PS3, PE3
1869 DO j = PS2, PE2
1870 DO i = PS1, PE1
1871 outbuf( icurs ) = inbuf( i, j, k )
1872 icurs = icurs + 1
1873 ENDDO
1874 ENDDO
1875 ENDDO
1876 RETURN
1877 END SUBROUTINE just_patch_i
1878
1879 SUBROUTINE just_patch_r ( inbuf , outbuf, noutbuf, &
1880 PS1,PE1,PS2,PE2,PS3,PE3, &
1881 MS1,ME1,MS2,ME2,MS3,ME3 )
1882 USE module_dm
1883 IMPLICIT NONE
1884 INTEGER , INTENT(IN) :: noutbuf
1885 REAL , DIMENSION(noutbuf) , INTENT(OUT) :: outbuf
1886 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
1887 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
1888 REAL , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(in) :: inbuf
1889 ! Local
1890 INTEGER :: i,j,k , icurs
1891 icurs = 1
1892 DO k = PS3, PE3
1893 DO j = PS2, PE2
1894 DO i = PS1, PE1
1895 outbuf( icurs ) = inbuf( i, j, k )
1896 icurs = icurs + 1
1897 ENDDO
1898 ENDDO
1899 ENDDO
1900 RETURN
1901 END SUBROUTINE just_patch_r
1902
1903 SUBROUTINE just_patch_d ( inbuf , outbuf, noutbuf, &
1904 PS1,PE1,PS2,PE2,PS3,PE3, &
1905 MS1,ME1,MS2,ME2,MS3,ME3 )
1906 USE module_dm
1907 IMPLICIT NONE
1908 INTEGER , INTENT(IN) :: noutbuf
1909 DOUBLE PRECISION , DIMENSION(noutbuf) , INTENT(OUT) :: outbuf
1910 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
1911 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
1912 DOUBLE PRECISION , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(in) :: inbuf
1913 ! Local
1914 INTEGER :: i,j,k,n , icurs
1915 icurs = 1
1916 DO k = PS3, PE3
1917 DO j = PS2, PE2
1918 DO i = PS1, PE1
1919 outbuf( icurs ) = inbuf( i, j, k )
1920 icurs = icurs + 1
1921 ENDDO
1922 ENDDO
1923 ENDDO
1924 RETURN
1925 END SUBROUTINE just_patch_d
1926
1927 SUBROUTINE just_patch_l ( inbuf , outbuf, noutbuf, &
1928 PS1,PE1,PS2,PE2,PS3,PE3, &
1929 MS1,ME1,MS2,ME2,MS3,ME3 )
1930 USE module_dm
1931 IMPLICIT NONE
1932 INTEGER , INTENT(IN) :: noutbuf
1933 LOGICAL , DIMENSION(noutbuf) , INTENT(OUT) :: outbuf
1934 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
1935 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
1936 LOGICAL , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(in) :: inbuf
1937 ! Local
1938 INTEGER :: i,j,k,n , icurs
1939 icurs = 1
1940 DO k = PS3, PE3
1941 DO j = PS2, PE2
1942 DO i = PS1, PE1
1943 outbuf( icurs ) = inbuf( i, j, k )
1944 icurs = icurs + 1
1945 ENDDO
1946 ENDDO
1947 ENDDO
1948 RETURN
1949 END SUBROUTINE just_patch_l
1950
1951
1952 SUBROUTINE patch_2_outbuf_r( inbuf, outbuf, &
1953 DS1,DE1,DS2,DE2,DS3,DE3, &
1954 GPATCH )
1955 USE module_dm
1956 IMPLICIT NONE
1957 REAL , DIMENSION(*) , INTENT(IN) :: inbuf
1958 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
1959 REAL , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(out) :: outbuf
1960 ! Local
1961 INTEGER :: i,j,k,n , icurs
1962 icurs = 1
1963 DO n = 1, ntasks
1964 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
1965 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
1966 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
1967 outbuf( i, j, k ) = inbuf( icurs )
1968 icurs = icurs + 1
1969 ENDDO
1970 ENDDO
1971 ENDDO
1972 ENDDO
1973
1974 RETURN
1975 END SUBROUTINE patch_2_outbuf_r
1976
1977 SUBROUTINE patch_2_outbuf_i( inbuf, outbuf, &
1978 DS1,DE1,DS2,DE2,DS3,DE3,&
1979 GPATCH )
1980 USE module_dm
1981 IMPLICIT NONE
1982 INTEGER , DIMENSION(*) , INTENT(IN) :: inbuf
1983 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
1984 INTEGER , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(out) :: outbuf
1985 ! Local
1986 INTEGER :: i,j,k,n , icurs
1987 icurs = 1
1988 DO n = 1, ntasks
1989 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
1990 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
1991 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
1992 outbuf( i, j, k ) = inbuf( icurs )
1993 icurs = icurs + 1
1994 ENDDO
1995 ENDDO
1996 ENDDO
1997 ENDDO
1998 RETURN
1999 END SUBROUTINE patch_2_outbuf_i
2000
2001 SUBROUTINE patch_2_outbuf_d( inbuf, outbuf, &
2002 DS1,DE1,DS2,DE2,DS3,DE3,&
2003 GPATCH )
2004 USE module_dm
2005 IMPLICIT NONE
2006 DOUBLE PRECISION , DIMENSION(*) , INTENT(IN) :: inbuf
2007 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
2008 DOUBLE PRECISION , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(out) :: outbuf
2009 ! Local
2010 INTEGER :: i,j,k,n , icurs
2011 icurs = 1
2012 DO n = 1, ntasks
2013 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
2014 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
2015 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
2016 outbuf( i, j, k ) = inbuf( icurs )
2017 icurs = icurs + 1
2018 ENDDO
2019 ENDDO
2020 ENDDO
2021 ENDDO
2022 RETURN
2023 END SUBROUTINE patch_2_outbuf_d
2024
2025 SUBROUTINE patch_2_outbuf_l( inbuf, outbuf, &
2026 DS1,DE1,DS2,DE2,DS3,DE3,&
2027 GPATCH )
2028 USE module_dm
2029 IMPLICIT NONE
2030 LOGICAL , DIMENSION(*) , INTENT(IN) :: inbuf
2031 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
2032 LOGICAL , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(out) :: outbuf
2033 ! Local
2034 INTEGER :: i,j,k,n , icurs
2035 icurs = 1
2036 DO n = 1, ntasks
2037 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
2038 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
2039 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
2040 outbuf( i, j, k ) = inbuf( icurs )
2041 icurs = icurs + 1
2042 ENDDO
2043 ENDDO
2044 ENDDO
2045 ENDDO
2046 RETURN
2047 END SUBROUTINE patch_2_outbuf_l
2048
2049 !!!!!!!!!!!!!!!!!!!!!!! GLOBAL TO PATCH !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2050
2051 SUBROUTINE wrf_global_to_patch_real (globbuf,buf,domdesc,stagger,ordering,&
2052 DS1,DE1,DS2,DE2,DS3,DE3,&
2053 MS1,ME1,MS2,ME2,MS3,ME3,&
2054 PS1,PE1,PS2,PE2,PS3,PE3 )
2055 IMPLICIT NONE
2056 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2057 MS1,ME1,MS2,ME2,MS3,ME3,&
2058 PS1,PE1,PS2,PE2,PS3,PE3
2059 CHARACTER *(*) stagger,ordering
2060 INTEGER fid,domdesc
2061 REAL globbuf(*)
2062 REAL buf(*)
2063
2064 CALL wrf_global_to_patch_generic (globbuf,buf,domdesc,stagger,ordering,RWORDSIZE,&
2065 DS1,DE1,DS2,DE2,DS3,DE3,&
2066 MS1,ME1,MS2,ME2,MS3,ME3,&
2067 PS1,PE1,PS2,PE2,PS3,PE3 )
2068 RETURN
2069 END SUBROUTINE wrf_global_to_patch_real
2070
2071 SUBROUTINE wrf_global_to_patch_double (globbuf,buf,domdesc,stagger,ordering,&
2072 DS1,DE1,DS2,DE2,DS3,DE3,&
2073 MS1,ME1,MS2,ME2,MS3,ME3,&
2074 PS1,PE1,PS2,PE2,PS3,PE3 )
2075 IMPLICIT NONE
2076 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2077 MS1,ME1,MS2,ME2,MS3,ME3,&
2078 PS1,PE1,PS2,PE2,PS3,PE3
2079 CHARACTER *(*) stagger,ordering
2080 INTEGER fid,domdesc
2081 ! this next declaration is REAL, not DOUBLE PRECISION because it will be autopromoted
2082 ! to double precision by the compiler when WRF is compiled for 8 byte reals. Only reason
2083 ! for having this separate routine is so we pass the correct MPI type to mpi_scatterv
2084 ! since we were not indexing the globbuf and Field arrays it does not matter
2085 REAL globbuf(*)
2086 REAL buf(*)
2087
2088 CALL wrf_global_to_patch_generic (globbuf,buf,domdesc,stagger,ordering,DWORDSIZE,&
2089 DS1,DE1,DS2,DE2,DS3,DE3,&
2090 MS1,ME1,MS2,ME2,MS3,ME3,&
2091 PS1,PE1,PS2,PE2,PS3,PE3 )
2092 RETURN
2093 END SUBROUTINE wrf_global_to_patch_double
2094
2095
2096 SUBROUTINE wrf_global_to_patch_integer (globbuf,buf,domdesc,stagger,ordering,&
2097 DS1,DE1,DS2,DE2,DS3,DE3,&
2098 MS1,ME1,MS2,ME2,MS3,ME3,&
2099 PS1,PE1,PS2,PE2,PS3,PE3 )
2100 IMPLICIT NONE
2101 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2102 MS1,ME1,MS2,ME2,MS3,ME3,&
2103 PS1,PE1,PS2,PE2,PS3,PE3
2104 CHARACTER *(*) stagger,ordering
2105 INTEGER fid,domdesc
2106 INTEGER globbuf(*)
2107 INTEGER buf(*)
2108
2109 CALL wrf_global_to_patch_generic (globbuf,buf,domdesc,stagger,ordering,IWORDSIZE,&
2110 DS1,DE1,DS2,DE2,DS3,DE3,&
2111 MS1,ME1,MS2,ME2,MS3,ME3,&
2112 PS1,PE1,PS2,PE2,PS3,PE3 )
2113 RETURN
2114 END SUBROUTINE wrf_global_to_patch_integer
2115
2116 SUBROUTINE wrf_global_to_patch_logical (globbuf,buf,domdesc,stagger,ordering,&
2117 DS1,DE1,DS2,DE2,DS3,DE3,&
2118 MS1,ME1,MS2,ME2,MS3,ME3,&
2119 PS1,PE1,PS2,PE2,PS3,PE3 )
2120 IMPLICIT NONE
2121 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2122 MS1,ME1,MS2,ME2,MS3,ME3,&
2123 PS1,PE1,PS2,PE2,PS3,PE3
2124 CHARACTER *(*) stagger,ordering
2125 INTEGER fid,domdesc
2126 LOGICAL globbuf(*)
2127 LOGICAL buf(*)
2128
2129 CALL wrf_global_to_patch_generic (globbuf,buf,domdesc,stagger,ordering,LWORDSIZE,&
2130 DS1,DE1,DS2,DE2,DS3,DE3,&
2131 MS1,ME1,MS2,ME2,MS3,ME3,&
2132 PS1,PE1,PS2,PE2,PS3,PE3 )
2133 RETURN
2134 END SUBROUTINE wrf_global_to_patch_logical
2135
2136 SUBROUTINE wrf_global_to_patch_generic (globbuf,buf,domdesc,stagger,ordering,typesize,&
2137 DS1a,DE1a,DS2a,DE2a,DS3a,DE3a,&
2138 MS1a,ME1a,MS2a,ME2a,MS3a,ME3a,&
2139 PS1a,PE1a,PS2a,PE2a,PS3a,PE3a )
2140 USE module_dm
2141 USE module_driver_constants
2142 IMPLICIT NONE
2143 INTEGER DS1a,DE1a,DS2a,DE2a,DS3a,DE3a,&
2144 MS1a,ME1a,MS2a,ME2a,MS3a,ME3a,&
2145 PS1a,PE1a,PS2a,PE2a,PS3a,PE3A
2146 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,&
2147 MS1,ME1,MS2,ME2,MS3,ME3,&
2148 PS1,PE1,PS2,PE2,PS3,PE3
2149 CHARACTER *(*) stagger,ordering
2150 INTEGER fid,domdesc,typesize,ierr
2151 REAL globbuf(*)
2152 REAL buf(*)
2153 LOGICAL, EXTERNAL :: wrf_dm_on_monitor, has_char
2154
2155 INTEGER i,j,k,ord,ord2d,ndim
2156 INTEGER Patch(3,2), Gpatch(3,2,ntasks)
2157 REAL, ALLOCATABLE :: tmpbuf( : )
2158 REAL locbuf( (PE1a-PS1a+1)*(PE2a-PS2a+1)*(PE3a-PS3a+1)/RWORDSIZE*typesize+32 )
2159
2160 DS1 = DS1a ; DE1 = DE1a ; DS2=DS2a ; DE2 = DE2a ; DS3 = DS3a ; DE3 = DE3a
2161 MS1 = MS1a ; ME1 = ME1a ; MS2=MS2a ; ME2 = ME2a ; MS3 = MS3a ; ME3 = ME3a
2162 PS1 = PS1a ; PE1 = PE1a ; PS2=PS2a ; PE2 = PE2a ; PS3 = PS3a ; PE3 = PE3a
2163
2164 SELECT CASE ( TRIM(ordering) )
2165 CASE ( 'xy', 'yx' )
2166 ndim = 2
2167 CASE DEFAULT
2168 ndim = 3 ! where appropriate
2169 END SELECT
2170
2171 SELECT CASE ( TRIM(ordering) )
2172 CASE ( 'xyz','xy' )
2173 ! the non-staggered variables come in at one-less than
2174 ! domain dimensions, but code wants full domain spec, so
2175 ! adjust if not staggered
2176 IF ( .NOT. has_char( stagger, 'x' ) ) DE1 = DE1+1
2177 IF ( .NOT. has_char( stagger, 'y' ) ) DE2 = DE2+1
2178 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE3 = DE3+1
2179 CASE ( 'yxz','yx' )
2180 IF ( .NOT. has_char( stagger, 'x' ) ) DE2 = DE2+1
2181 IF ( .NOT. has_char( stagger, 'y' ) ) DE1 = DE1+1
2182 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE3 = DE3+1
2183 CASE ( 'zxy' )
2184 IF ( .NOT. has_char( stagger, 'x' ) ) DE2 = DE2+1
2185 IF ( .NOT. has_char( stagger, 'y' ) ) DE3 = DE3+1
2186 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE1 = DE1+1
2187 CASE ( 'xzy' )
2188 IF ( .NOT. has_char( stagger, 'x' ) ) DE1 = DE1+1
2189 IF ( .NOT. has_char( stagger, 'y' ) ) DE3 = DE3+1
2190 IF ( ndim .EQ. 3 .AND. .NOT. has_char( stagger, 'z' ) ) DE2 = DE2+1
2191 CASE DEFAULT
2192 END SELECT
2193
2194 ! moved to here to be after the potential recalculations of D dims
2195 IF ( wrf_dm_on_monitor() ) THEN
2196 ALLOCATE ( tmpbuf ( (DE1-DS1+1)*(DE2-DS2+1)*(DE3-DS3+1)/RWORDSIZE*typesize+32 ), STAT=ierr )
2197 ELSE
2198 ALLOCATE ( tmpbuf ( 1 ), STAT=ierr )
2199 ENDIF
2200 IF ( ierr .ne. 0 ) CALL wrf_error_fatal ('allocating tmpbuf in wrf_global_to_patch_generic')
2201
2202 Patch(1,1) = ps1 ; Patch(1,2) = pe1 ! use patch dims
2203 Patch(2,1) = ps2 ; Patch(2,2) = pe2
2204 Patch(3,1) = ps3 ; Patch(3,2) = pe3
2205
2206 ! defined in external/io_quilt
2207 CALL collect_on_comm0 ( local_communicator , IWORDSIZE , &
2208 Patch , 6 , &
2209 GPatch , 6*ntasks )
2210
2211 ndim = len(TRIM(ordering))
2212
2213 IF ( wrf_dm_on_monitor() .AND. ndim .GE. 2 ) THEN
2214 IF ( typesize .EQ. RWORDSIZE ) THEN
2215 CALL outbuf_2_patch_r ( globbuf , tmpbuf FRSTELEM , &
2216 DS1, DE1, DS2, DE2, DS3, DE3 , &
2217 MS1, ME1, MS2, ME2, MS3, ME3 , &
2218 GPATCH )
2219 ELSE IF ( typesize .EQ. IWORDSIZE ) THEN
2220 CALL outbuf_2_patch_i ( globbuf , tmpbuf FRSTELEM , &
2221 DS1, DE1, DS2, DE2, DS3, DE3 , &
2222 GPATCH )
2223 ELSE IF ( typesize .EQ. DWORDSIZE ) THEN
2224 CALL outbuf_2_patch_d ( globbuf , tmpbuf FRSTELEM , &
2225 DS1, DE1, DS2, DE2, DS3, DE3 , &
2226 GPATCH )
2227 ELSE IF ( typesize .EQ. LWORDSIZE ) THEN
2228 CALL outbuf_2_patch_l ( globbuf , tmpbuf FRSTELEM , &
2229 DS1, DE1, DS2, DE2, DS3, DE3 , &
2230 GPATCH )
2231 ENDIF
2232 ENDIF
2233
2234 CALL dist_on_comm0 ( local_communicator , typesize , &
2235 tmpbuf FRSTELEM , (de1-ds1+1)*(de2-ds2+1)*(de3-ds3+1) , &
2236 locbuf , (pe1-ps1+1)*(pe2-ps2+1)*(pe3-ps3+1) )
2237
2238 IF ( typesize .EQ. RWORDSIZE ) THEN
2239 CALL all_sub_r ( locbuf , buf , &
2240 PS1, PE1, PS2, PE2, PS3, PE3 , &
2241 MS1, ME1, MS2, ME2, MS3, ME3 )
2242
2243 ELSE IF ( typesize .EQ. IWORDSIZE ) THEN
2244 CALL all_sub_i ( locbuf , buf , &
2245 PS1, PE1, PS2, PE2, PS3, PE3 , &
2246 MS1, ME1, MS2, ME2, MS3, ME3 )
2247 ELSE IF ( typesize .EQ. DWORDSIZE ) THEN
2248 CALL all_sub_d ( locbuf , buf , &
2249 PS1, PE1, PS2, PE2, PS3, PE3 , &
2250 MS1, ME1, MS2, ME2, MS3, ME3 )
2251 ELSE IF ( typesize .EQ. LWORDSIZE ) THEN
2252 CALL all_sub_l ( locbuf , buf , &
2253 PS1, PE1, PS2, PE2, PS3, PE3 , &
2254 MS1, ME1, MS2, ME2, MS3, ME3 )
2255 ENDIF
2256
2257
2258 DEALLOCATE ( tmpbuf )
2259 RETURN
2260 END SUBROUTINE wrf_global_to_patch_generic
2261
2262 SUBROUTINE all_sub_i ( inbuf , outbuf, &
2263 PS1,PE1,PS2,PE2,PS3,PE3, &
2264 MS1,ME1,MS2,ME2,MS3,ME3 )
2265 USE module_dm
2266 IMPLICIT NONE
2267 INTEGER , DIMENSION(*) , INTENT(IN) :: inbuf
2268 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2269 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
2270 INTEGER , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(OUT) :: outbuf
2271 ! Local
2272 INTEGER :: i,j,k,n , icurs
2273 icurs = 1
2274 DO k = PS3, PE3
2275 DO j = PS2, PE2
2276 DO i = PS1, PE1
2277 outbuf( i, j, k ) = inbuf ( icurs )
2278 icurs = icurs + 1
2279 ENDDO
2280 ENDDO
2281 ENDDO
2282 RETURN
2283 END SUBROUTINE all_sub_i
2284
2285 SUBROUTINE all_sub_r ( inbuf , outbuf, &
2286 PS1,PE1,PS2,PE2,PS3,PE3, &
2287 MS1,ME1,MS2,ME2,MS3,ME3 )
2288 USE module_dm
2289 IMPLICIT NONE
2290 REAL , DIMENSION(*) , INTENT(IN) :: inbuf
2291 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2292 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
2293 REAL , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(OUT) :: outbuf
2294 ! Local
2295 INTEGER :: i,j,k,n , icurs
2296 icurs = 1
2297 DO k = PS3, PE3
2298 DO j = PS2, PE2
2299 DO i = PS1, PE1
2300 outbuf( i, j, k ) = inbuf ( icurs )
2301 icurs = icurs + 1
2302 ENDDO
2303 ENDDO
2304 ENDDO
2305
2306 RETURN
2307 END SUBROUTINE all_sub_r
2308
2309 SUBROUTINE all_sub_d ( inbuf , outbuf, &
2310 PS1,PE1,PS2,PE2,PS3,PE3, &
2311 MS1,ME1,MS2,ME2,MS3,ME3 )
2312 USE module_dm
2313 IMPLICIT NONE
2314 DOUBLE PRECISION , DIMENSION(*) , INTENT(IN) :: inbuf
2315 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2316 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
2317 DOUBLE PRECISION , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(OUT) :: outbuf
2318 ! Local
2319 INTEGER :: i,j,k,n , icurs
2320 icurs = 1
2321 DO k = PS3, PE3
2322 DO j = PS2, PE2
2323 DO i = PS1, PE1
2324 outbuf( i, j, k ) = inbuf ( icurs )
2325 icurs = icurs + 1
2326 ENDDO
2327 ENDDO
2328 ENDDO
2329 RETURN
2330 END SUBROUTINE all_sub_d
2331
2332 SUBROUTINE all_sub_l ( inbuf , outbuf, &
2333 PS1,PE1,PS2,PE2,PS3,PE3, &
2334 MS1,ME1,MS2,ME2,MS3,ME3 )
2335 USE module_dm
2336 IMPLICIT NONE
2337 LOGICAL , DIMENSION(*) , INTENT(IN) :: inbuf
2338 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2339 INTEGER PS1,PE1,PS2,PE2,PS3,PE3
2340 LOGICAL , DIMENSION( MS1:ME1,MS2:ME2,MS3:ME3 ) , INTENT(OUT) :: outbuf
2341 ! Local
2342 INTEGER :: i,j,k,n , icurs
2343 icurs = 1
2344 DO k = PS3, PE3
2345 DO j = PS2, PE2
2346 DO i = PS1, PE1
2347 outbuf( i, j, k ) = inbuf ( icurs )
2348 icurs = icurs + 1
2349 ENDDO
2350 ENDDO
2351 ENDDO
2352 RETURN
2353 END SUBROUTINE all_sub_l
2354
2355 SUBROUTINE outbuf_2_patch_r( inbuf, outbuf, &
2356 DS1,DE1,DS2,DE2,DS3,DE3, &
2357 MS1, ME1, MS2, ME2, MS3, ME3 , &
2358 GPATCH )
2359 USE module_dm
2360 IMPLICIT NONE
2361 REAL , DIMENSION(*) , INTENT(OUT) :: outbuf
2362 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
2363 INTEGER MS1,ME1,MS2,ME2,MS3,ME3
2364 REAL , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(IN) :: inbuf
2365 ! Local
2366 INTEGER :: i,j,k,n , icurs
2367
2368 icurs = 1
2369 DO n = 1, ntasks
2370 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
2371 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
2372 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
2373 outbuf( icurs ) = inbuf( i,j,k )
2374 icurs = icurs + 1
2375 ENDDO
2376 ENDDO
2377 ENDDO
2378 ENDDO
2379 RETURN
2380 END SUBROUTINE outbuf_2_patch_r
2381
2382 SUBROUTINE outbuf_2_patch_i( inbuf, outbuf, &
2383 DS1,DE1,DS2,DE2,DS3,DE3,&
2384 GPATCH )
2385 USE module_dm
2386 IMPLICIT NONE
2387 INTEGER , DIMENSION(*) , INTENT(OUT) :: outbuf
2388 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
2389 INTEGER , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(IN) :: inbuf
2390 ! Local
2391 INTEGER :: i,j,k,n , icurs
2392 icurs = 1
2393 DO n = 1, ntasks
2394 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
2395 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
2396 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
2397 outbuf( icurs ) = inbuf( i,j,k )
2398 icurs = icurs + 1
2399 ENDDO
2400 ENDDO
2401 ENDDO
2402 ENDDO
2403 RETURN
2404 END SUBROUTINE outbuf_2_patch_i
2405
2406 SUBROUTINE outbuf_2_patch_d( inbuf, outbuf, &
2407 DS1,DE1,DS2,DE2,DS3,DE3,&
2408 GPATCH )
2409 USE module_dm
2410 IMPLICIT NONE
2411 DOUBLE PRECISION , DIMENSION(*) , INTENT(OUT) :: outbuf
2412 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
2413 DOUBLE PRECISION , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(IN) :: inbuf
2414 ! Local
2415 INTEGER :: i,j,k,n , icurs
2416 icurs = 1
2417 DO n = 1, ntasks
2418 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
2419 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
2420 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
2421 outbuf( icurs ) = inbuf( i,j,k )
2422 icurs = icurs + 1
2423 ENDDO
2424 ENDDO
2425 ENDDO
2426 ENDDO
2427 RETURN
2428 END SUBROUTINE outbuf_2_patch_d
2429
2430 SUBROUTINE outbuf_2_patch_l( inbuf, outbuf, &
2431 DS1,DE1,DS2,DE2,DS3,DE3,&
2432 GPATCH )
2433 USE module_dm
2434 IMPLICIT NONE
2435 LOGICAL , DIMENSION(*) , INTENT(OUT) :: outbuf
2436 INTEGER DS1,DE1,DS2,DE2,DS3,DE3,GPATCH(3,2,ntasks)
2437 LOGICAL , DIMENSION( DS1:DE1,DS2:DE2,DS3:DE3 ) , INTENT(IN) :: inbuf
2438 ! Local
2439 INTEGER :: i,j,k,n , icurs
2440 icurs = 1
2441 DO n = 1, ntasks
2442 DO k = GPATCH( 3,1,n ), GPATCH( 3,2,n )
2443 DO j = GPATCH( 2,1,n ), GPATCH( 2,2,n )
2444 DO i = GPATCH( 1,1,n ), GPATCH( 1,2,n )
2445 outbuf( icurs ) = inbuf( i,j,k )
2446 icurs = icurs + 1
2447 ENDDO
2448 ENDDO
2449 ENDDO
2450 ENDDO
2451 RETURN
2452 END SUBROUTINE outbuf_2_patch_l
2453
2454
2455
2456 !------------------------------------------------------------------
2457
2458 #if ( EM_CORE == 1 )
2459
2460 !------------------------------------------------------------------
2461
2462 SUBROUTINE force_domain_em_part2 ( grid, ngrid, config_flags &
2463 !
2464 #include "em_dummy_new_args.inc"
2465 !
2466 )
2467 USE module_domain
2468 USE module_configure
2469 USE module_dm
2470 IMPLICIT NONE
2471 !
2472 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
2473 TYPE(domain), POINTER :: ngrid
2474 #include <em_dummy_new_decl.inc>
2475 INTEGER nlev, msize
2476 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
2477 TYPE (grid_config_rec_type) :: config_flags
2478 REAL xv(500)
2479 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
2480 cims, cime, cjms, cjme, ckms, ckme, &
2481 cips, cipe, cjps, cjpe, ckps, ckpe
2482 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
2483 nims, nime, njms, njme, nkms, nkme, &
2484 nips, nipe, njps, njpe, nkps, nkpe
2485 INTEGER :: ids, ide, jds, jde, kds, kde, &
2486 ims, ime, jms, jme, kms, kme, &
2487 ips, ipe, jps, jpe, kps, kpe
2488
2489 CALL get_ijk_from_grid ( grid , &
2490 cids, cide, cjds, cjde, ckds, ckde, &
2491 cims, cime, cjms, cjme, ckms, ckme, &
2492 cips, cipe, cjps, cjpe, ckps, ckpe )
2493 CALL get_ijk_from_grid ( ngrid , &
2494 nids, nide, njds, njde, nkds, nkde, &
2495 nims, nime, njms, njme, nkms, nkme, &
2496 nips, nipe, njps, njpe, nkps, nkpe )
2497
2498 nlev = ckde - ckds + 1
2499
2500 #include "em_nest_interpdown_unpack.inc"
2501
2502 CALL get_ijk_from_grid ( grid , &
2503 ids, ide, jds, jde, kds, kde, &
2504 ims, ime, jms, jme, kms, kme, &
2505 ips, ipe, jps, jpe, kps, kpe )
2506
2507 #include "HALO_EM_FORCE_DOWN.inc"
2508
2509 ! code here to interpolate the data into the nested domain
2510 # include "em_nest_forcedown_interp.inc"
2511
2512 RETURN
2513 END SUBROUTINE force_domain_em_part2
2514
2515 !------------------------------------------------------------------
2516
2517 SUBROUTINE interp_domain_em_part1 ( grid, intermediate_grid, ngrid, config_flags &
2518 !
2519 #include "em_dummy_new_args.inc"
2520 !
2521 )
2522 USE module_domain
2523 USE module_configure
2524 USE module_dm
2525 USE module_timing
2526 IMPLICIT NONE
2527 !
2528 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
2529 TYPE(domain), POINTER :: intermediate_grid
2530 TYPE(domain), POINTER :: ngrid
2531 #include <em_dummy_new_decl.inc>
2532 INTEGER nlev, msize
2533 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
2534 INTEGER iparstrt,jparstrt,sw
2535 TYPE (grid_config_rec_type) :: config_flags
2536 REAL xv(500)
2537 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
2538 cims, cime, cjms, cjme, ckms, ckme, &
2539 cips, cipe, cjps, cjpe, ckps, ckpe
2540 INTEGER :: iids, iide, ijds, ijde, ikds, ikde, &
2541 iims, iime, ijms, ijme, ikms, ikme, &
2542 iips, iipe, ijps, ijpe, ikps, ikpe
2543 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
2544 nims, nime, njms, njme, nkms, nkme, &
2545 nips, nipe, njps, njpe, nkps, nkpe
2546
2547 INTEGER icoord, jcoord, idim_cd, jdim_cd, pgr
2548 INTEGER local_comm, myproc, nproc
2549
2550 CALL wrf_get_dm_communicator ( local_comm )
2551 CALL wrf_get_myproc( myproc )
2552 CALL wrf_get_nproc( nproc )
2553
2554 CALL get_ijk_from_grid ( grid , &
2555 cids, cide, cjds, cjde, ckds, ckde, &
2556 cims, cime, cjms, cjme, ckms, ckme, &
2557 cips, cipe, cjps, cjpe, ckps, ckpe )
2558 CALL get_ijk_from_grid ( intermediate_grid , &
2559 iids, iide, ijds, ijde, ikds, ikde, &
2560 iims, iime, ijms, ijme, ikms, ikme, &
2561 iips, iipe, ijps, ijpe, ikps, ikpe )
2562 CALL get_ijk_from_grid ( ngrid , &
2563 nids, nide, njds, njde, nkds, nkde, &
2564 nims, nime, njms, njme, nkms, nkme, &
2565 nips, nipe, njps, njpe, nkps, nkpe )
2566
2567 CALL nl_get_parent_grid_ratio ( ngrid%id, pgr )
2568 CALL nl_get_i_parent_start ( intermediate_grid%id, iparstrt )
2569 CALL nl_get_j_parent_start ( intermediate_grid%id, jparstrt )
2570 CALL nl_get_shw ( intermediate_grid%id, sw )
2571 icoord = iparstrt - sw
2572 jcoord = jparstrt - sw
2573 idim_cd = iide - iids + 1
2574 jdim_cd = ijde - ijds + 1
2575
2576 nlev = ckde - ckds + 1
2577
2578 #include "em_nest_interpdown_pack.inc"
2579
2580 CALL rsl_lite_bcast_msgs( myproc, nproc, local_comm )
2581
2582 RETURN
2583 END SUBROUTINE interp_domain_em_part1
2584
2585 !------------------------------------------------------------------
2586
2587 SUBROUTINE interp_domain_em_part2 ( grid, ngrid, config_flags &
2588 !
2589 #include "em_dummy_new_args.inc"
2590 !
2591 )
2592 USE module_domain
2593 USE module_configure
2594 USE module_dm
2595 IMPLICIT NONE
2596 !
2597 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
2598 TYPE(domain), POINTER :: ngrid
2599 #include <em_dummy_new_decl.inc>
2600 INTEGER nlev, msize
2601 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
2602 TYPE (grid_config_rec_type) :: config_flags
2603 REAL xv(500)
2604 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
2605 cims, cime, cjms, cjme, ckms, ckme, &
2606 cips, cipe, cjps, cjpe, ckps, ckpe
2607 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
2608 nims, nime, njms, njme, nkms, nkme, &
2609 nips, nipe, njps, njpe, nkps, nkpe
2610 INTEGER :: ids, ide, jds, jde, kds, kde, &
2611 ims, ime, jms, jme, kms, kme, &
2612 ips, ipe, jps, jpe, kps, kpe
2613 INTEGER myproc
2614 INTEGER ierr
2615
2616 CALL get_ijk_from_grid ( grid , &
2617 cids, cide, cjds, cjde, ckds, ckde, &
2618 cims, cime, cjms, cjme, ckms, ckme, &
2619 cips, cipe, cjps, cjpe, ckps, ckpe )
2620 CALL get_ijk_from_grid ( ngrid , &
2621 nids, nide, njds, njde, nkds, nkde, &
2622 nims, nime, njms, njme, nkms, nkme, &
2623 nips, nipe, njps, njpe, nkps, nkpe )
2624
2625 nlev = ckde - ckds + 1
2626
2627 #include "em_nest_interpdown_unpack.inc"
2628
2629 CALL get_ijk_from_grid ( grid , &
2630 ids, ide, jds, jde, kds, kde, &
2631 ims, ime, jms, jme, kms, kme, &
2632 ips, ipe, jps, jpe, kps, kpe )
2633
2634 #include "HALO_EM_INTERP_DOWN.inc"
2635
2636 # include "em_nest_interpdown_interp.inc"
2637
2638 RETURN
2639 END SUBROUTINE interp_domain_em_part2
2640
2641 !------------------------------------------------------------------
2642
2643 SUBROUTINE feedback_nest_prep ( grid, config_flags &
2644 !
2645 #include "em_dummy_new_args.inc"
2646 !
2647 )
2648 USE module_domain
2649 USE module_configure
2650 USE module_dm
2651 USE module_state_description
2652 IMPLICIT NONE
2653 !
2654 TYPE(domain), TARGET :: grid ! name of the grid being dereferenced (must be "grid")
2655 TYPE (grid_config_rec_type) :: config_flags ! configureation flags, has vertical dim of
2656 ! soil temp, moisture, etc., has vertical dim
2657 ! of soil categories
2658 #include <em_dummy_new_decl.inc>
2659
2660 INTEGER :: ids, ide, jds, jde, kds, kde, &
2661 ims, ime, jms, jme, kms, kme, &
2662 ips, ipe, jps, jpe, kps, kpe
2663
2664 INTEGER :: idum1, idum2
2665
2666
2667 CALL get_ijk_from_grid ( grid , &
2668 ids, ide, jds, jde, kds, kde, &
2669 ims, ime, jms, jme, kms, kme, &
2670 ips, ipe, jps, jpe, kps, kpe )
2671
2672 #ifdef DM_PARALLEL
2673 #include "HALO_EM_INTERP_UP.inc"
2674 #endif
2675
2676 END SUBROUTINE feedback_nest_prep
2677
2678 !------------------------------------------------------------------
2679
2680 SUBROUTINE feedback_domain_em_part1 ( grid, ngrid, config_flags &
2681 !
2682 #include "em_dummy_new_args.inc"
2683 !
2684 )
2685 USE module_domain
2686 USE module_configure
2687 USE module_dm
2688 IMPLICIT NONE
2689 !
2690 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
2691 TYPE(domain), POINTER :: ngrid
2692 #include <em_dummy_new_decl.inc>
2693 INTEGER nlev, msize
2694 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
2695 TYPE(domain), POINTER :: xgrid
2696 TYPE (grid_config_rec_type) :: config_flags, nconfig_flags
2697 REAL xv(500)
2698 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
2699 cims, cime, cjms, cjme, ckms, ckme, &
2700 cips, cipe, cjps, cjpe, ckps, ckpe
2701 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
2702 nims, nime, njms, njme, nkms, nkme, &
2703 nips, nipe, njps, njpe, nkps, nkpe
2704 INTEGER local_comm, myproc, nproc, idum1, idum2
2705
2706 INTERFACE
2707 SUBROUTINE feedback_nest_prep ( grid, config_flags &
2708 !
2709 #include "em_dummy_new_args.inc"
2710 !
2711 )
2712 USE module_domain
2713 USE module_configure
2714 USE module_dm
2715 USE module_state_description
2716 !
2717 TYPE (grid_config_rec_type) :: config_flags
2718 TYPE(domain), TARGET :: grid
2719 #include <em_dummy_new_decl.inc>
2720 END SUBROUTINE feedback_nest_prep
2721 END INTERFACE
2722 !
2723
2724 CALL wrf_get_dm_communicator ( local_comm )
2725 CALL wrf_get_myproc( myproc )
2726 CALL wrf_get_nproc( nproc )
2727
2728 !
2729 ! intermediate grid
2730 CALL get_ijk_from_grid ( grid , &
2731 cids, cide, cjds, cjde, ckds, ckde, &
2732 cims, cime, cjms, cjme, ckms, ckme, &
2733 cips, cipe, cjps, cjpe, ckps, ckpe )
2734 ! nest grid
2735 CALL get_ijk_from_grid ( ngrid , &
2736 nids, nide, njds, njde, nkds, nkde, &
2737 nims, nime, njms, njme, nkms, nkme, &
2738 nips, nipe, njps, njpe, nkps, nkpe )
2739
2740 nlev = ckde - ckds + 1
2741
2742 ips_save = ngrid%i_parent_start ! used in feedback_domain_em_part2 below
2743 jps_save = ngrid%j_parent_start
2744 ipe_save = ngrid%i_parent_start + (nide-nids+1) / ngrid%parent_grid_ratio - 1
2745 jpe_save = ngrid%j_parent_start + (njde-njds+1) / ngrid%parent_grid_ratio - 1
2746
2747 ! feedback_nest_prep invokes a halo exchange on the ngrid. It is done this way
2748 ! in a separate routine because the HALOs need the data to be dereference from the
2749 ! grid data structure and, in this routine, the dereferenced fields are related to
2750 ! the intermediate domain, not the nest itself. Save the current grid pointer to intermediate
2751 ! domain, switch grid to point to ngrid, invoke feedback_nest_prep, then restore grid
2752 ! to point to intermediate domain.
2753
2754 CALL model_to_grid_config_rec ( ngrid%id , model_config_rec , nconfig_flags )
2755 CALL set_scalar_indices_from_config ( ngrid%id , idum1 , idum2 )
2756 xgrid => grid
2757 grid => ngrid
2758
2759 CALL feedback_nest_prep ( grid, nconfig_flags &
2760 !
2761 #include "em_actual_new_args.inc"
2762 !
2763 )
2764
2765 ! put things back so grid is intermediate grid
2766
2767 grid => xgrid
2768 CALL set_scalar_indices_from_config ( grid%id , idum1 , idum2 )
2769
2770 ! "interp" (basically copy) ngrid onto intermediate grid
2771
2772 #include "em_nest_feedbackup_interp.inc"
2773
2774 RETURN
2775 END SUBROUTINE feedback_domain_em_part1
2776
2777 !------------------------------------------------------------------
2778
2779 SUBROUTINE feedback_domain_em_part2 ( grid, intermediate_grid, ngrid , config_flags &
2780 !
2781 #include "em_dummy_new_args.inc"
2782 !
2783 )
2784 USE module_domain
2785 USE module_configure
2786 USE module_dm
2787 USE module_utility
2788 IMPLICIT NONE
2789
2790 !
2791 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
2792 TYPE(domain), POINTER :: intermediate_grid
2793 TYPE(domain), POINTER :: ngrid
2794
2795 #include <em_dummy_new_decl.inc>
2796 INTEGER nlev, msize
2797 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
2798 TYPE (grid_config_rec_type) :: config_flags
2799 REAL xv(500)
2800 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
2801 cims, cime, cjms, cjme, ckms, ckme, &
2802 cips, cipe, cjps, cjpe, ckps, ckpe
2803 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
2804 nims, nime, njms, njme, nkms, nkme, &
2805 nips, nipe, njps, njpe, nkps, nkpe
2806 INTEGER :: ids, ide, jds, jde, kds, kde, &
2807 ims, ime, jms, jme, kms, kme, &
2808 ips, ipe, jps, jpe, kps, kpe
2809 INTEGER icoord, jcoord, idim_cd, jdim_cd
2810 INTEGER local_comm, myproc, nproc
2811 INTEGER iparstrt, jparstrt, sw
2812 REAL nest_influence
2813
2814 character*256 :: timestr
2815 integer ierr
2816
2817 LOGICAL, EXTERNAL :: em_cd_feedback_mask
2818
2819 ! On entry to this routine,
2820 ! "grid" refers to the parent domain
2821 ! "intermediate_grid" refers to local copy of parent domain that overlies this patch of nest
2822 ! "ngrid" refers to the nest, which is only needed for smoothing on the parent because
2823 ! the nest feedback data has already been transferred during em_nest_feedbackup_interp
2824 ! in part1, above.
2825 ! The way these settings c and n dimensions are set, below, looks backwards but from the point
2826 ! of view of the RSL routine rsl_lite_to_parent_info(), call to which is included by
2827 ! em_nest_feedbackup_pack, the "n" domain represents the parent domain and the "c" domain
2828 ! represents the intermediate domain. The backwards lookingness should be fixed in the gen_comms.c
2829 ! registry routine that accompanies RSL_LITE but, just as it's sometimes easier to put up a road
2830 ! sign that says "DIP" than fix the dip, at this point it was easier just to write this comment. JM
2831 !
2832 nest_influence = 1.
2833
2834 CALL domain_clock_get( grid, current_timestr=timestr )
2835
2836 CALL get_ijk_from_grid ( intermediate_grid , &
2837 cids, cide, cjds, cjde, ckds, ckde, &
2838 cims, cime, cjms, cjme, ckms, ckme, &
2839 cips, cipe, cjps, cjpe, ckps, ckpe )
2840 CALL get_ijk_from_grid ( grid , &
2841 nids, nide, njds, njde, nkds, nkde, &
2842 nims, nime, njms, njme, nkms, nkme, &
2843 nips, nipe, njps, njpe, nkps, nkpe )
2844
2845 CALL nl_get_i_parent_start ( intermediate_grid%id, iparstrt )
2846 CALL nl_get_j_parent_start ( intermediate_grid%id, jparstrt )
2847 CALL nl_get_shw ( intermediate_grid%id, sw )
2848 icoord = iparstrt - sw
2849 jcoord = jparstrt - sw
2850 idim_cd = cide - cids + 1
2851 jdim_cd = cjde - cjds + 1
2852
2853 nlev = ckde - ckds + 1
2854
2855 #include "em_nest_feedbackup_pack.inc"
2856
2857 CALL wrf_get_dm_communicator ( local_comm )
2858 CALL wrf_get_myproc( myproc )
2859 CALL wrf_get_nproc( nproc )
2860
2861 CALL rsl_lite_merge_msgs( myproc, nproc, local_comm )
2862
2863 #define NEST_INFLUENCE(A,B) A = B
2864 #include "em_nest_feedbackup_unpack.inc"
2865
2866 ! smooth coarse grid
2867 CALL get_ijk_from_grid ( ngrid, &
2868 nids, nide, njds, njde, nkds, nkde, &
2869 nims, nime, njms, njme, nkms, nkme, &
2870 nips, nipe, njps, njpe, nkps, nkpe )
2871 CALL get_ijk_from_grid ( grid , &
2872 ids, ide, jds, jde, kds, kde, &
2873 ims, ime, jms, jme, kms, kme, &
2874 ips, ipe, jps, jpe, kps, kpe )
2875
2876 #include "HALO_EM_INTERP_UP.inc"
2877
2878 CALL get_ijk_from_grid ( grid , &
2879 cids, cide, cjds, cjde, ckds, ckde, &
2880 cims, cime, cjms, cjme, ckms, ckme, &
2881 cips, cipe, cjps, cjpe, ckps, ckpe )
2882
2883 #include "em_nest_feedbackup_smooth.inc"
2884
2885 RETURN
2886 END SUBROUTINE feedback_domain_em_part2
2887 #endif
2888
2889 #if ( NMM_CORE == 1 && NMM_NEST == 1 )
2890 !==============================================================================
2891 ! NMM nesting infrastructure extended from EM core. This is gopal's doing.
2892 !==============================================================================
2893
2894 SUBROUTINE interp_domain_nmm_part1 ( grid, intermediate_grid, ngrid, config_flags &
2895 !
2896 #include "nmm_dummy_args.inc"
2897 !
2898 )
2899 USE module_domain
2900 USE module_configure
2901 USE module_dm
2902 USE module_timing
2903 IMPLICIT NONE
2904 !
2905 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
2906 TYPE(domain), POINTER :: intermediate_grid
2907 TYPE(domain), POINTER :: ngrid
2908 #include <nmm_dummy_decl.inc>
2909 INTEGER nlev, msize
2910 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
2911 INTEGER iparstrt,jparstrt,sw
2912 TYPE (grid_config_rec_type) :: config_flags
2913 REAL xv(500)
2914 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
2915 cims, cime, cjms, cjme, ckms, ckme, &
2916 cips, cipe, cjps, cjpe, ckps, ckpe
2917 INTEGER :: iids, iide, ijds, ijde, ikds, ikde, &
2918 iims, iime, ijms, ijme, ikms, ikme, &
2919 iips, iipe, ijps, ijpe, ikps, ikpe
2920 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
2921 nims, nime, njms, njme, nkms, nkme, &
2922 nips, nipe, njps, njpe, nkps, nkpe
2923
2924 INTEGER icoord, jcoord, idim_cd, jdim_cd, pgr
2925 INTEGER local_comm, myproc, nproc
2926
2927 CALL wrf_get_dm_communicator ( local_comm )
2928 CALL wrf_get_myproc( myproc )
2929 CALL wrf_get_nproc( nproc )
2930
2931 #define COPY_IN
2932 #include <nmm_scalar_derefs.inc>
2933
2934 CALL get_ijk_from_grid ( grid , &
2935 cids, cide, cjds, cjde, ckds, ckde, &
2936 cims, cime, cjms, cjme, ckms, ckme, &
2937 cips, cipe, cjps, cjpe, ckps, ckpe )
2938 CALL get_ijk_from_grid ( intermediate_grid , &
2939 iids, iide, ijds, ijde, ikds, ikde, &
2940 iims, iime, ijms, ijme, ikms, ikme, &
2941 iips, iipe, ijps, ijpe, ikps, ikpe )
2942 CALL get_ijk_from_grid ( ngrid , &
2943 nids, nide, njds, njde, nkds, nkde, &
2944 nims, nime, njms, njme, nkms, nkme, &
2945 nips, nipe, njps, njpe, nkps, nkpe )
2946
2947 CALL nl_get_parent_grid_ratio ( ngrid%id, pgr )
2948 CALL nl_get_i_parent_start ( intermediate_grid%id, iparstrt )
2949 CALL nl_get_j_parent_start ( intermediate_grid%id, jparstrt )
2950 CALL nl_get_shw ( intermediate_grid%id, sw )
2951 icoord = iparstrt - sw
2952 jcoord = jparstrt - sw
2953 idim_cd = iide - iids + 1
2954 jdim_cd = ijde - ijds + 1
2955
2956 nlev = ckde - ckds + 1
2957
2958 #include "nmm_nest_interpdown_pack.inc"
2959
2960 CALL rsl_lite_bcast_msgs( myproc, nproc, local_comm )
2961
2962 #define COPY_OUT
2963 #include <nmm_scalar_derefs.inc>
2964 RETURN
2965 END SUBROUTINE interp_domain_nmm_part1
2966
2967 !------------------------------------------------------------------
2968
2969 SUBROUTINE interp_domain_nmm_part2 ( grid, ngrid, config_flags &
2970 !
2971 #include "nmm_dummy_args.inc"
2972 !
2973 )
2974 USE module_domain
2975 USE module_configure
2976 USE module_dm
2977 IMPLICIT NONE
2978 !
2979 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
2980 TYPE(domain), POINTER :: ngrid
2981 #include <nmm_dummy_decl.inc>
2982 INTEGER nlev, msize
2983 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
2984 TYPE (grid_config_rec_type) :: config_flags
2985 REAL xv(500)
2986 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
2987 cims, cime, cjms, cjme, ckms, ckme, &
2988 cips, cipe, cjps, cjpe, ckps, ckpe
2989 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
2990 nims, nime, njms, njme, nkms, nkme, &
2991 nips, nipe, njps, njpe, nkps, nkpe
2992 INTEGER :: ids, ide, jds, jde, kds, kde, &
2993 ims, ime, jms, jme, kms, kme, &
2994 ips, ipe, jps, jpe, kps, kpe
2995 INTEGER myproc
2996 INTEGER ierr
2997
2998 #ifdef DEREF_KLUDGE
2999 ! see http://www.mmm.ucar.edu/wrf/WG2/topics/deref_kludge.htm
3000 INTEGER :: sm31 , em31 , sm32 , em32 , sm33 , em33
3001 INTEGER :: sm31x, em31x, sm32x, em32x, sm33x, em33x
3002 INTEGER :: sm31y, em31y, sm32y, em32y, sm33y, em33y
3003 #endif
3004 #include "deref_kludge.h"
3005
3006 #define COPY_IN
3007 #include <nmm_scalar_derefs.inc>
3008 CALL get_ijk_from_grid ( grid , &
3009 cids, cide, cjds, cjde, ckds, ckde, &
3010 cims, cime, cjms, cjme, ckms, ckme, &
3011 cips, cipe, cjps, cjpe, ckps, ckpe )
3012 CALL get_ijk_from_grid ( ngrid , &
3013 nids, nide, njds, njde, nkds, nkde, &
3014 nims, nime, njms, njme, nkms, nkme, &
3015 nips, nipe, njps, njpe, nkps, nkpe )
3016
3017 nlev = ckde - ckds + 1
3018
3019 #include "nmm_nest_interpdown_unpack.inc"
3020
3021 CALL get_ijk_from_grid ( grid , &
3022 ids, ide, jds, jde, kds, kde, &
3023 ims, ime, jms, jme, kms, kme, &
3024 ips, ipe, jps, jpe, kps, kpe )
3025
3026 #include "HALO_NMM_INTERP_DOWN1.inc"
3027
3028 #include "nmm_nest_interpdown_interp.inc"
3029
3030 #define COPY_OUT
3031 #include <nmm_scalar_derefs.inc>
3032
3033 RETURN
3034 END SUBROUTINE interp_domain_nmm_part2
3035
3036 !------------------------------------------------------------------
3037
3038 SUBROUTINE force_domain_nmm_part1 ( grid, intermediate_grid, config_flags &
3039 !
3040 #include "nmm_dummy_args.inc"
3041 !
3042 )
3043 USE module_domain
3044 USE module_configure
3045 USE module_dm
3046 USE module_timing
3047 !
3048 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3049 TYPE(domain), POINTER :: intermediate_grid
3050 #include <nmm_dummy_decl.inc>
3051 INTEGER nlev, msize
3052 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3053 TYPE (grid_config_rec_type) :: config_flags
3054 REAL xv(500)
3055 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3056 cims, cime, cjms, cjme, ckms, ckme, &
3057 cips, cipe, cjps, cjpe, ckps, ckpe
3058 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3059 nims, nime, njms, njme, nkms, nkme, &
3060 nips, nipe, njps, njpe, nkps, nkpe
3061 #define COPY_IN
3062 #include <nmm_scalar_derefs.inc>
3063 !
3064 CALL get_ijk_from_grid ( grid , &
3065 cids, cide, cjds, cjde, ckds, ckde, &
3066 cims, cime, cjms, cjme, ckms, ckme, &
3067 cips, cipe, cjps, cjpe, ckps, ckpe )
3068
3069 CALL get_ijk_from_grid ( intermediate_grid , &
3070 nids, nide, njds, njde, nkds, nkde, &
3071 nims, nime, njms, njme, nkms, nkme, &
3072 nips, nipe, njps, njpe, nkps, nkpe )
3073
3074 nlev = ckde - ckds + 1
3075
3076 #include "nmm_nest_forcedown_pack.inc"
3077
3078 ! WRITE(0,*)'I have completed PACKING of BCs data successfully'
3079
3080 #define COPY_OUT
3081 #include <nmm_scalar_derefs.inc>
3082 RETURN
3083 END SUBROUTINE force_domain_nmm_part1
3084
3085 !==============================================================================================
3086
3087 SUBROUTINE force_domain_nmm_part2 ( grid, ngrid, config_flags &
3088 !
3089 #include "nmm_dummy_args.inc"
3090 !
3091 )
3092 USE module_domain
3093 USE module_configure
3094 USE module_dm
3095 IMPLICIT NONE
3096 !
3097 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3098 TYPE(domain), POINTER :: ngrid
3099 #include <nmm_dummy_decl.inc>
3100 INTEGER nlev, msize
3101 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3102 TYPE (grid_config_rec_type) :: config_flags
3103 REAL xv(500)
3104 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3105 cims, cime, cjms, cjme, ckms, ckme, &
3106 cips, cipe, cjps, cjpe, ckps, ckpe
3107 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3108 nims, nime, njms, njme, nkms, nkme, &
3109 nips, nipe, njps, njpe, nkps, nkpe
3110 INTEGER :: ids, ide, jds, jde, kds, kde, &
3111 ims, ime, jms, jme, kms, kme, &
3112 ips, ipe, jps, jpe, kps, kpe
3113 integer myproc
3114
3115 #ifdef DEREF_KLUDGE
3116 ! see http://www.mmm.ucar.edu/wrf/WG2/topics/deref_kludge.htm
3117 INTEGER :: sm31 , em31 , sm32 , em32 , sm33 , em33
3118 INTEGER :: sm31x, em31x, sm32x, em32x, sm33x, em33x
3119 INTEGER :: sm31y, em31y, sm32y, em32y, sm33y, em33y
3120 #endif
3121 #include "deref_kludge.h"
3122
3123 #define COPY_IN
3124 #include <nmm_scalar_derefs.inc>
3125
3126 CALL get_ijk_from_grid ( grid , &
3127 cids, cide, cjds, cjde, ckds, ckde, &
3128 cims, cime, cjms, cjme, ckms, ckme, &
3129 cips, cipe, cjps, cjpe, ckps, ckpe )
3130 CALL get_ijk_from_grid ( ngrid , &
3131 nids, nide, njds, njde, nkds, nkde, &
3132 nims, nime, njms, njme, nkms, nkme, &
3133 nips, nipe, njps, njpe, nkps, nkpe )
3134
3135 nlev = ckde - ckds + 1
3136
3137 #include "nmm_nest_interpdown_unpack.inc"
3138
3139 CALL get_ijk_from_grid ( grid , &
3140 ids, ide, jds, jde, kds, kde, &
3141 ims, ime, jms, jme, kms, kme, &
3142 ips, ipe, jps, jpe, kps, kpe )
3143
3144 #include "HALO_NMM_FORCE_DOWN1.inc"
3145
3146 ! code here to interpolate the data into the nested domain
3147 #include "nmm_nest_forcedown_interp.inc"
3148
3149 #define COPY_OUT
3150 #include <nmm_scalar_derefs.inc>
3151
3152 RETURN
3153 END SUBROUTINE force_domain_nmm_part2
3154
3155 !================================================================================
3156 !
3157 ! This routine exists only to call a halo on a domain (the nest)
3158 ! gets called from feedback_domain_em_part1, below. This is needed
3159 ! because the halo code expects the fields being exchanged to have
3160 ! been dereferenced from the grid data structure, but in feedback_domain_em_part1
3161 ! the grid data structure points to the coarse domain, not the nest.
3162 ! And we want the halo exchange on the nest, so that the code in
3163 ! em_nest_feedbackup_interp.inc will work correctly on multi-p. JM 20040308
3164 !
3165
3166 SUBROUTINE feedback_nest_prep_nmm ( grid, config_flags &
3167 !
3168 #include "nmm_dummy_args.inc"
3169 !
3170 )
3171 USE module_domain
3172 USE module_configure
3173 USE module_dm
3174 USE module_state_description
3175 IMPLICIT NONE
3176 !
3177 TYPE(domain), TARGET :: grid ! name of the grid being dereferenced (must be "grid")
3178 TYPE (grid_config_rec_type) :: config_flags ! configureation flags, has vertical dim of
3179 ! soil temp, moisture, etc., has vertical dim
3180 ! of soil categories
3181 #include <nmm_dummy_decl.inc>
3182
3183 INTEGER :: ids, ide, jds, jde, kds, kde, &
3184 ims, ime, jms, jme, kms, kme, &
3185 ips, ipe, jps, jpe, kps, kpe
3186
3187 INTEGER :: idum1, idum2
3188
3189
3190 #ifdef DEREF_KLUDGE
3191 ! see http://www.mmm.ucar.edu/wrf/WG2/topics/deref_kludge.htm
3192 INTEGER :: sm31 , em31 , sm32 , em32 , sm33 , em33
3193 INTEGER :: sm31x, em31x, sm32x, em32x, sm33x, em33x
3194 INTEGER :: sm31y, em31y, sm32y, em32y, sm33y, em33y
3195 #endif
3196 #include "deref_kludge.h"
3197
3198 #define COPY_IN
3199 #include <nmm_scalar_derefs.inc>
3200
3201 CALL get_ijk_from_grid ( grid , &
3202 ids, ide, jds, jde, kds, kde, &
3203 ims, ime, jms, jme, kms, kme, &
3204 ips, ipe, jps, jpe, kps, kpe )
3205
3206 #ifdef DM_PARALLEL
3207 #include "HALO_NMM_WEIGHTS.inc"
3208 #endif
3209
3210 #define COPY_OUT
3211 #include <nmm_scalar_derefs.inc>
3212
3213 END SUBROUTINE feedback_nest_prep_nmm
3214
3215 !------------------------------------------------------------------
3216
3217 SUBROUTINE feedback_domain_nmm_part1 ( grid, ngrid, config_flags &
3218 !
3219 #include "nmm_dummy_args.inc"
3220 !
3221 )
3222 USE module_domain
3223 USE module_configure
3224 USE module_dm
3225 IMPLICIT NONE
3226 !
3227 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3228 TYPE(domain), POINTER :: ngrid
3229 #include <nmm_dummy_decl.inc>
3230 INTEGER nlev, msize
3231 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3232 TYPE(domain), POINTER :: xgrid
3233 TYPE (grid_config_rec_type) :: config_flags, nconfig_flags
3234 REAL xv(500)
3235 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3236 cims, cime, cjms, cjme, ckms, ckme, &
3237 cips, cipe, cjps, cjpe, ckps, ckpe
3238 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3239 nims, nime, njms, njme, nkms, nkme, &
3240 nips, nipe, njps, njpe, nkps, nkpe
3241 INTEGER local_comm, myproc, nproc, idum1, idum2
3242
3243 #ifdef DEREF_KLUDGE
3244 ! see http://www.mmm.ucar.edu/wrf/WG2/topics/deref_kludge.htm
3245 INTEGER :: sm31 , em31 , sm32 , em32 , sm33 , em33
3246 INTEGER :: sm31x, em31x, sm32x, em32x, sm33x, em33x
3247 INTEGER :: sm31y, em31y, sm32y, em32y, sm33y, em33y
3248 #endif
3249
3250 INTERFACE
3251 SUBROUTINE feedback_nest_prep_nmm ( grid, config_flags &
3252 !
3253 #include "nmm_dummy_args.inc"
3254 !
3255 )
3256 USE module_domain
3257 USE module_configure
3258 USE module_dm
3259 USE module_state_description
3260 !
3261 TYPE (grid_config_rec_type) :: config_flags
3262 TYPE(domain), TARGET :: grid
3263 #include <nmm_dummy_decl.inc>
3264 END SUBROUTINE feedback_nest_prep_nmm
3265 END INTERFACE
3266 !
3267 #define COPY_IN
3268 #include <nmm_scalar_derefs.inc>
3269
3270 CALL wrf_get_dm_communicator ( local_comm )
3271 CALL wrf_get_myproc( myproc )
3272 CALL wrf_get_nproc( nproc )
3273
3274
3275 !
3276 ! intermediate grid
3277 CALL get_ijk_from_grid ( grid , &
3278 cids, cide, cjds, cjde, ckds, ckde, &
3279 cims, cime, cjms, cjme, ckms, ckme, &
3280 cips, cipe, cjps, cjpe, ckps, ckpe )
3281 ! nest grid
3282 CALL get_ijk_from_grid ( ngrid , &
3283 nids, nide, njds, njde, nkds, nkde, &
3284 nims, nime, njms, njme, nkms, nkme, &
3285 nips, nipe, njps, njpe, nkps, nkpe )
3286
3287 nlev = ckde - ckds + 1
3288
3289 ips_save = ngrid%i_parent_start ! +1 not used in ipe_save & jpe_save
3290 jps_save = ngrid%j_parent_start ! because of one extra namelist point
3291 ipe_save = ngrid%i_parent_start + (nide-nids) / ngrid%parent_grid_ratio
3292 jpe_save = ngrid%j_parent_start + (njde-njds) / ngrid%parent_grid_ratio
3293
3294 ! feedback_nest_prep invokes a halo exchange on the ngrid. It is done this way
3295 ! in a separate routine because the HALOs need the data to be dereference from the
3296 ! grid data structure and, in this routine, the dereferenced fields are related to
3297 ! the intermediate domain, not the nest itself. Save the current grid pointer to intermediate
3298 ! domain, switch grid to point to ngrid, invoke feedback_nest_prep, then restore grid
3299 ! to point to intermediate domain.
3300
3301 CALL model_to_grid_config_rec ( ngrid%id , model_config_rec , nconfig_flags )
3302 CALL set_scalar_indices_from_config ( ngrid%id , idum1 , idum2 )
3303 xgrid => grid
3304 grid => ngrid
3305 #include "deref_kludge.h"
3306 CALL feedback_nest_prep_nmm ( grid, config_flags &
3307 !
3308 #include "nmm_actual_args.inc"
3309 !
3310 )
3311
3312 ! put things back so grid is intermediate grid
3313
3314 grid => xgrid
3315 CALL set_scalar_indices_from_config ( grid%id , idum1 , idum2 )
3316
3317 ! "interp" (basically copy) ngrid onto intermediate grid
3318
3319 #include "nmm_nest_feedbackup_interp.inc"
3320
3321 #define COPY_OUT
3322 #include <nmm_scalar_derefs.inc>
3323 RETURN
3324 END SUBROUTINE feedback_domain_nmm_part1
3325
3326 !------------------------------------------------------------------
3327
3328 SUBROUTINE feedback_domain_nmm_part2 ( grid, intermediate_grid, ngrid , config_flags &
3329 !
3330 #include "nmm_dummy_args.inc"
3331 !
3332 )
3333 USE module_domain
3334 USE module_configure
3335 USE module_dm
3336 USE module_utility
3337 IMPLICIT NONE
3338
3339 !
3340 TYPE(domain), POINTER :: grid ! name of the grid being dereferenced (must be "grid")
3341 TYPE(domain), POINTER :: intermediate_grid
3342 TYPE(domain), POINTER :: ngrid
3343
3344 #include <nmm_dummy_decl.inc>
3345 INTEGER nlev, msize
3346 INTEGER i,j,pig,pjg,cm,cn,nig,njg,retval,k
3347 TYPE (grid_config_rec_type) :: config_flags
3348 REAL xv(500)
3349 INTEGER :: cids, cide, cjds, cjde, ckds, ckde, &
3350 cims, cime, cjms, cjme, ckms, ckme, &
3351 cips, cipe, cjps, cjpe, ckps, ckpe
3352 INTEGER :: nids, nide, njds, njde, nkds, nkde, &
3353 nims, nime, njms, njme, nkms, nkme, &
3354 nips, nipe, njps, njpe, nkps, nkpe
3355 INTEGER :: ids, ide, jds, jde, kds, kde, &
3356 ims, ime, jms, jme, kms, kme, &
3357 ips, ipe, jps, jpe, kps, kpe
3358 INTEGER icoord, jcoord, idim_cd, jdim_cd
3359 INTEGER local_comm, myproc, nproc
3360 INTEGER iparstrt, jparstrt, sw
3361
3362 character*256 :: timestr
3363 integer ierr
3364
3365 REAL nest_influence
3366 LOGICAL, EXTERNAL :: nmm_cd_feedback_mask
3367 LOGICAL, EXTERNAL :: nmm_cd_feedback_mask_v
3368 #ifdef DEREF_KLUDGE
3369 ! see http://www.mmm.ucar.edu/wrf/WG2/topics/deref_kludge.htm
3370 INTEGER :: sm31 , em31 , sm32 , em32 , sm33 , em33
3371 INTEGER :: sm31x, em31x, sm32x, em32x, sm33x, em33x
3372 INTEGER :: sm31y, em31y, sm32y, em32y, sm33y, em33y
3373 #endif
3374 #include "deref_kludge.h"
3375
3376 #define COPY_IN
3377 #include <nmm_scalar_derefs.inc>
3378
3379 ! On entry to this routine,
3380 ! "grid" refers to the parent domain
3381 ! "intermediate_grid" refers to local copy of parent domain that overlies this patch of nest
3382 ! "ngrid" refers to the nest, which is only needed for smoothing on the parent because
3383 ! the nest feedback data has already been transferred during em_nest_feedbackup_interp
3384 ! in part1, above.
3385 ! The way these settings c and n dimensions are set, below, looks backwards but from the point
3386 ! of view of the RSL routine rsl_lite_to_parent_info(), call to which is included by
3387 ! em_nest_feedbackup_pack, the "n" domain represents the parent domain and the "c" domain
3388 ! represents the intermediate domain. The backwards lookingness should be fixed in the gen_comms.c
3389 ! registry routine that accompanies RSL_LITE but, just as it's sometimes easier to put up a road
3390 ! sign that says "DIP" than fix the dip, at this point it was easier just to write this comment. JM
3391 !
3392
3393 nest_influence = 0.5
3394 #define NEST_INFLUENCE(A,B) A = nest_influence*(B) + (1.0-nest_influence)*(A)
3395
3396
3397 CALL domain_clock_get( grid, current_timestr=timestr )
3398
3399 CALL get_ijk_from_grid ( intermediate_grid , &
3400 cids, cide, cjds, cjde, ckds, ckde, &
3401 cims, cime, cjms, cjme, ckms, ckme, &
3402 cips, cipe, cjps, cjpe, ckps, ckpe )
3403 CALL get_ijk_from_grid ( grid , &
3404 nids, nide, njds, njde, nkds, nkde, &
3405 nims, nime, njms, njme, nkms, nkme, &
3406 nips, nipe, njps, njpe, nkps, nkpe )
3407
3408 nide = nide - 1 !dusan
3409 njde = njde - 1 !dusan
3410
3411 CALL nl_get_i_parent_start ( intermediate_grid%id, iparstrt )
3412 CALL nl_get_j_parent_start ( intermediate_grid%id, jparstrt )
3413 CALL nl_get_shw ( intermediate_grid%id, sw )
3414 icoord = iparstrt - sw
3415 jcoord = jparstrt - sw
3416 idim_cd = cide - cids + 1
3417 jdim_cd = cjde - cjds + 1
3418
3419 nlev = ckde - ckds + 1
3420
3421 #include "nmm_nest_feedbackup_pack.inc"
3422
3423 CALL wrf_get_dm_communicator ( local_comm )
3424 CALL wrf_get_myproc( myproc )
3425 CALL wrf_get_nproc( nproc )
3426
3427 CALL rsl_lite_merge_msgs( myproc, nproc, local_comm )
3428
3429 #include "nmm_nest_feedbackup_unpack.inc"
3430
3431
3432 ! smooth coarse grid
3433
3434 CALL get_ijk_from_grid ( ngrid, &
3435 nids, nide, njds, njde, nkds, nkde, &
3436 nims, nime, njms, njme, nkms, nkme, &
3437 nips, nipe, njps, njpe, nkps, nkpe )
3438 CALL get_ijk_from_grid ( grid , &
3439 ids, ide, jds, jde, kds, kde, &
3440 ims, ime, jms, jme, kms, kme, &
3441 ips, ipe, jps, jpe, kps, kpe )
3442
3443 #include "HALO_NMM_INTERP_UP.inc"
3444
3445 CALL get_ijk_from_grid ( grid , &
3446 cids, cide, cjds, cjde, ckds, ckde, &
3447 cims, cime, cjms, cjme, ckms, ckme, &
3448 cips, cipe, cjps, cjpe, ckps, ckpe )
3449
3450 #include "nmm_nest_feedbackup_smooth.inc"
3451
3452 #define COPY_OUT
3453 #include <nmm_scalar_derefs.inc>
3454 RETURN
3455 END SUBROUTINE feedback_domain_nmm_part2
3456
3457 !=================================================================================
3458 ! End of gopal's doing
3459 !=================================================================================
3460 #endif
3461
3462 !------------------------------------------------------------------
3463
3464 SUBROUTINE wrf_gatherv_real (Field, field_ofst, &
3465 my_count , & ! sendcount
3466 globbuf, glob_ofst , & ! recvbuf
3467 counts , & ! recvcounts
3468 displs , & ! displs
3469 root , & ! root
3470 communicator , & ! communicator
3471 ierr )
3472 USE module_dm
3473 IMPLICIT NONE
3474 INCLUDE 'mpif.h'
3475 INTEGER field_ofst, glob_ofst
3476 INTEGER my_count, communicator, root, ierr
3477 INTEGER , DIMENSION(*) :: counts, displs
3478 REAL, DIMENSION(*) :: Field, globbuf
3479
3480 CALL mpi_gatherv( Field( field_ofst ), & ! sendbuf
3481 my_count , & ! sendcount
3482 getrealmpitype() , & ! sendtype
3483 globbuf( glob_ofst ) , & ! recvbuf
3484 counts , & ! recvcounts
3485 displs , & ! displs
3486 getrealmpitype() , & ! recvtype
3487 root , & ! root
3488 communicator , & ! communicator
3489 ierr )
3490
3491 END SUBROUTINE wrf_gatherv_real
3492
3493 SUBROUTINE wrf_gatherv_double (Field, field_ofst, &
3494 my_count , & ! sendcount
3495 globbuf, glob_ofst , & ! recvbuf
3496 counts , & ! recvcounts
3497 displs , & ! displs
3498 root , & ! root
3499 communicator , & ! communicator
3500 ierr )
3501 USE module_dm
3502 IMPLICIT NONE
3503 INCLUDE 'mpif.h'
3504 INTEGER field_ofst, glob_ofst
3505 INTEGER my_count, communicator, root, ierr
3506 INTEGER , DIMENSION(*) :: counts, displs
3507 ! this next declaration is REAL, not DOUBLE PRECISION because it will be autopromoted
3508 ! to double precision by the compiler when WRF is compiled for 8 byte reals. Only reason
3509 ! for having this separate routine is so we pass the correct MPI type to mpi_scatterv
3510 ! if we were not indexing the globbuf and Field arrays it would not even matter
3511 REAL, DIMENSION(*) :: Field, globbuf
3512
3513 CALL mpi_gatherv( Field( field_ofst ), & ! sendbuf
3514 my_count , & ! sendcount
3515 MPI_DOUBLE_PRECISION , & ! sendtype
3516 globbuf( glob_ofst ) , & ! recvbuf
3517 counts , & ! recvcounts
3518 displs , & ! displs
3519 MPI_DOUBLE_PRECISION , & ! recvtype
3520 root , & ! root
3521 communicator , & ! communicator
3522 ierr )
3523
3524 END SUBROUTINE wrf_gatherv_double
3525
3526 SUBROUTINE wrf_gatherv_integer (Field, field_ofst, &
3527 my_count , & ! sendcount
3528 globbuf, glob_ofst , & ! recvbuf
3529 counts , & ! recvcounts
3530 displs , & ! displs
3531 root , & ! root
3532 communicator , & ! communicator
3533 ierr )
3534 IMPLICIT NONE
3535 INCLUDE 'mpif.h'
3536 INTEGER field_ofst, glob_ofst
3537 INTEGER my_count, communicator, root, ierr
3538 INTEGER , DIMENSION(*) :: counts, displs
3539 INTEGER, DIMENSION(*) :: Field, globbuf
3540
3541 CALL mpi_gatherv( Field( field_ofst ), & ! sendbuf
3542 my_count , & ! sendcount
3543 MPI_INTEGER , & ! sendtype
3544 globbuf( glob_ofst ) , & ! recvbuf
3545 counts , & ! recvcounts
3546 displs , & ! displs
3547 MPI_INTEGER , & ! recvtype
3548 root , & ! root
3549 communicator , & ! communicator
3550 ierr )
3551
3552 END SUBROUTINE wrf_gatherv_integer
3553
3554 !new stuff 20070124
3555 SUBROUTINE wrf_scatterv_real ( &
3556 globbuf, glob_ofst , & ! recvbuf
3557 counts , & ! recvcounts
3558 Field, field_ofst, &
3559 my_count , & ! sendcount
3560 displs , & ! displs
3561 root , & ! root
3562 communicator , & ! communicator
3563 ierr )
3564 USE module_dm
3565 IMPLICIT NONE
3566 INCLUDE 'mpif.h'
3567 INTEGER field_ofst, glob_ofst
3568 INTEGER my_count, communicator, root, ierr
3569 INTEGER , DIMENSION(*) :: counts, displs
3570 REAL, DIMENSION(*) :: Field, globbuf
3571
3572 CALL mpi_scatterv( &
3573 globbuf( glob_ofst ) , & ! recvbuf
3574 counts , & ! recvcounts
3575 displs , & ! displs
3576 getrealmpitype() , & ! recvtype
3577 Field( field_ofst ), & ! sendbuf
3578 my_count , & ! sendcount
3579 getrealmpitype() , & ! sendtype
3580 root , & ! root
3581 communicator , & ! communicator
3582 ierr )
3583
3584 END SUBROUTINE wrf_scatterv_real
3585
3586 SUBROUTINE wrf_scatterv_double ( &
3587 globbuf, glob_ofst , & ! recvbuf
3588 counts , & ! recvcounts
3589 Field, field_ofst, &
3590 my_count , & ! sendcount
3591 displs , & ! displs
3592 root , & ! root
3593 communicator , & ! communicator
3594 ierr )
3595 USE module_dm
3596 IMPLICIT NONE
3597 INCLUDE 'mpif.h'
3598 INTEGER field_ofst, glob_ofst
3599 INTEGER my_count, communicator, root, ierr
3600 INTEGER , DIMENSION(*) :: counts, displs
3601 ! this next declaration is REAL, not DOUBLE PRECISION because it will be autopromoted
3602 ! to double precision by the compiler when WRF is compiled for 8 byte reals. Only reason
3603 ! for having this separate routine is so we pass the correct MPI type to mpi_scatterv
3604 ! if we were not indexing the globbuf and Field arrays it would not even matter
3605 REAL, DIMENSION(*) :: Field, globbuf
3606
3607 CALL mpi_scatterv( &
3608 globbuf( glob_ofst ) , & ! recvbuf
3609 counts , & ! recvcounts
3610 displs , & ! displs
3611 MPI_DOUBLE_PRECISION , & ! recvtype
3612 Field( field_ofst ), & ! sendbuf
3613 my_count , & ! sendcount
3614 MPI_DOUBLE_PRECISION , & ! sendtype
3615 root , & ! root
3616 communicator , & ! communicator
3617 ierr )
3618
3619 END SUBROUTINE wrf_scatterv_double
3620
3621 SUBROUTINE wrf_scatterv_integer ( &
3622 globbuf, glob_ofst , & ! recvbuf
3623 counts , & ! recvcounts
3624 Field, field_ofst, &
3625 my_count , & ! sendcount
3626 displs , & ! displs
3627 root , & ! root
3628 communicator , & ! communicator
3629 ierr )
3630 IMPLICIT NONE
3631 INCLUDE 'mpif.h'
3632 INTEGER field_ofst, glob_ofst
3633 INTEGER my_count, communicator, root, ierr
3634 INTEGER , DIMENSION(*) :: counts, displs
3635 INTEGER, DIMENSION(*) :: Field, globbuf
3636
3637 CALL mpi_scatterv( &
3638 globbuf( glob_ofst ) , & ! recvbuf
3639 counts , & ! recvcounts
3640 displs , & ! displs
3641 MPI_INTEGER , & ! recvtype
3642 Field( field_ofst ), & ! sendbuf
3643 my_count , & ! sendcount
3644 MPI_INTEGER , & ! sendtype
3645 root , & ! root
3646 communicator , & ! communicator
3647 ierr )
3648
3649 END SUBROUTINE wrf_scatterv_integer
3650 ! end new stuff 20070124
3651
3652 SUBROUTINE wrf_dm_define_comms ( grid )
3653 USE module_domain
3654 IMPLICIT NONE
3655 TYPE(domain) , INTENT (INOUT) :: grid
3656 RETURN
3657 END SUBROUTINE wrf_dm_define_comms
3658
3659 SUBROUTINE set_dm_debug
3660 USE module_dm
3661 IMPLICIT NONE
3662 dm_debug_flag = .TRUE.
3663 END SUBROUTINE set_dm_debug
3664 SUBROUTINE reset_dm_debug
3665 USE module_dm
3666 IMPLICIT NONE
3667 dm_debug_flag = .FALSE.
3668 END SUBROUTINE reset_dm_debug
3669 SUBROUTINE get_dm_debug ( arg )
3670 USE module_dm
3671 IMPLICIT NONE
3672 LOGICAL arg
3673 arg = dm_debug_flag
3674 END SUBROUTINE get_dm_debug