solve_em.F
References to this file elsewhere.
1 !WRF:MEDIATION_LAYER:SOLVER
2
3 SUBROUTINE solve_em ( grid , config_flags &
4 ! Actual arguments generated from Registry
5 #include "em_dummy_new_args.inc"
6 !
7 )
8
9 ! Driver layer modules
10 USE module_domain
11 USE module_configure
12 USE module_driver_constants
13 USE module_machine
14 USE module_tiles
15 USE module_dm
16 ! Mediation layer modules
17 ! Model layer modules
18 USE module_model_constants
19 USE module_small_step_em
20 USE module_em
21 USE module_big_step_utilities_em
22 USE module_bc
23 USE module_bc_em
24 USE module_solvedebug_em
25 USE module_physics_addtendc
26 USE module_diffusion_em
27 ! Registry generated module
28 USE module_state_description
29 USE module_radiation_driver
30 USE module_surface_driver
31 USE module_cumulus_driver
32 USE module_microphysics_driver
33 USE module_microphysics_zero_out
34 USE module_pbl_driver
35 USE module_fddagd_driver
36 USE module_fddaobs_driver
37 USE module_diagnostics
38 #ifdef WRF_CHEM
39 USE module_input_chem_data
40 USE module_chem_utilities
41 #endif
42 #ifdef WRF_PLUS
43 USE module_check
44 #endif
45
46 IMPLICIT NONE
47
48 ! Input data.
49
50 TYPE(domain) , TARGET :: grid
51
52 ! Definitions of dummy arguments to this routine (generated from Registry).
53 #include "em_dummy_new_decl.inc"
54 #include "namelist_defines2.inc"
55
56 ! Structure that contains run-time configuration (namelist) data for domain
57 TYPE (grid_config_rec_type) , INTENT(IN) :: config_flags
58
59 ! Local data
60
61 INTEGER :: k_start , k_end, its, ite, jts, jte
62 INTEGER :: ids , ide , jds , jde , kds , kde , &
63 ims , ime , jms , jme , kms , kme , &
64 ips , ipe , jps , jpe , kps , kpe
65
66 INTEGER :: imsx, imex, jmsx, jmex, kmsx, kmex, &
67 ipsx, ipex, jpsx, jpex, kpsx, kpex, &
68 imsy, imey, jmsy, jmey, kmsy, kmey, &
69 ipsy, ipey, jpsy, jpey, kpsy, kpey
70
71 INTEGER :: ij , iteration
72 INTEGER :: im , num_3d_m , ic , num_3d_c , is , num_3d_s
73 INTEGER :: loop
74 INTEGER :: sz
75
76 LOGICAL :: specified_bdy, channel_bdy
77
78 ! storage for tendencies and decoupled state (generated from Registry)
79
80 #include <em_i1_decl.inc>
81 ! Previous time level of tracer arrays now defined as i1 variables;
82 ! the state 4d arrays now redefined as 1-time level arrays in Registry.
83 ! Benefit: save memory in nested runs, since only 1 domain is active at a
84 ! time. Potential problem on stack-limited architectures: increases
85 ! amount of data on program stack by making these automatic arrays.
86
87 INTEGER :: rc
88 INTEGER :: number_of_small_timesteps, rk_step
89 INTEGER :: klevel,ijm,ijp,i,j,k,size1,size2 ! for prints/plots only
90 INTEGER :: idum1, idum2, dynamics_option
91
92 INTEGER :: rk_order, iwmax, jwmax, kwmax
93 REAL :: dt_rk, dts_rk, dtm, wmax
94 INTEGER :: l,kte,kk
95
96 ! urban related variables
97 INTEGER :: NUM_ROOF_LAYERS, NUM_WALL_LAYERS, NUM_ROAD_LAYERS ! urban
98
99 ! Define benchmarking timers if -DBENCH is compiled
100 #include <bench_solve_em_def.h>
101
102 !----------------------
103 ! Executable statements
104 !----------------------
105
106 ! Needed by some comm layers, grid%e.g. RSL. If needed, nmm_data_calls.inc is
107 ! generated from the registry. The definition of REGISTER_I1 allows
108 ! I1 data to be communicated in this routine if necessary.
109 #ifdef DM_PARALLEL
110 # define REGISTER_I1
111 # include "em_data_calls.inc"
112 #endif
113
114 !<DESCRIPTION>
115 !<pre>
116 ! solve_em is the main driver for advancing a grid a single timestep.
117 ! It is a mediation-layer routine -> DM and SM calls are made where
118 ! needed for parallel processing.
119 !
120 ! solve_em can integrate the equations using 2 time-integration methods
121 !
122 ! - 3rd order Runge-Kutta time integration (recommended)
123 !
124 ! - 2nd order Runge-Kutta time integration
125 !
126 ! The main sections of solve_em are
127 !
128 ! (1) Runge-Kutta (RK) loop
129 !
130 ! (2) Non-timesplit physics (i.grid%e., tendencies computed for updating
131 ! model state variables during the first RK sub-step (loop)
132 !
133 ! (3) Small (acoustic, sound) timestep loop - within the RK sub-steps
134 !
135 ! (4) scalar advance for moist and chem scalar variables (and TKE)
136 ! within the RK sub-steps.
137 !
138 ! (5) time-split physics (after the RK step), currently this includes
139 ! only microphyics
140 !
141 ! A more detailed description of these sections follows.
142 !</pre>
143 !</DESCRIPTION>
144
145 ! Initialize timers if compiled with -DBENCH
146 #include <bench_solve_em_init.h>
147
148 ! set runge-kutta solver (2nd or 3rd order)
149
150 dynamics_option = config_flags%rk_ord
151
152 ! Obtain dimension information stored in the grid data structure.
153
154 CALL get_ijk_from_grid ( grid , &
155 ids, ide, jds, jde, kds, kde, &
156 ims, ime, jms, jme, kms, kme, &
157 ips, ipe, jps, jpe, kps, kpe, &
158 imsx, imex, jmsx, jmex, kmsx, kmex, &
159 ipsx, ipex, jpsx, jpex, kpsx, kpex, &
160 imsy, imey, jmsy, jmey, kmsy, kmey, &
161 ipsy, ipey, jpsy, jpey, kpsy, kpey )
162
163 k_start = kps
164 k_end = kpe
165
166 num_3d_m = num_moist
167 num_3d_c = num_chem
168 num_3d_s = num_scalar
169
170 ! Compute these starting and stopping locations for each tile and number of tiles.
171 ! See: http://www.mmm.ucar.edu/wrf/WG2/topics/settiles
172 CALL set_tiles ( grid , ids , ide , jds , jde , ips , ipe , jps , jpe )
173 !print *,grid%julian,grid%julday,' grid%julian,grid%julday in solve'
174
175 grid%itimestep = grid%itimestep + 1
176
177 !**********************************************************************
178 !
179 ! LET US BEGIN.......
180 !
181 !<DESCRIPTION>
182 !<pre>
183 ! (1) RK integration loop is named the "Runge_Kutta_loop:"
184 !
185 ! Predictor-corrector type time integration.
186 ! Advection terms are evaluated at time t for the predictor step,
187 ! and advection is re-evaluated with the latest predicted value for
188 ! each succeeding time corrector step
189 !
190 ! 2nd order Runge Kutta (rk_order = 2):
191 ! Step 1 is taken to the midpoint predictor, step 2 is the full step.
192 !
193 ! 3rd order Runge Kutta (rk_order = 3):
194 ! Step 1 is taken to from t to dt/3, step 2 is from t to dt/2,
195 ! and step 3 is from t to dt.
196 !
197 ! non-timesplit physics are evaluated during first RK step and
198 ! these physics tendencies are stored for use in each RK pass.
199 !</pre>
200 !</DESCRIPTION>
201 !**********************************************************************
202
203 #ifdef WRF_CHEM
204 !
205 ! prepare chem aerosols for advection before communication
206 !
207
208 kte=min(k_end,kde-1)
209 # ifdef DM_PARALLEL
210 if ( num_chem >= PARAM_FIRST_SCALAR ) then
211 !-----------------------------------------------------------------------
212 ! see matching halo calls below for stencils
213 !--------------------------------------------------------------
214 CALL wrf_debug ( 200 , ' call HALO_RK_CHEM' )
215 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
216 # include "HALO_EM_CHEM_E_3.inc"
217 if( config_flags%progn > 0 ) then
218 # include "HALO_EM_SCALAR_E_3.inc"
219 end if
220 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
221 # include "HALO_EM_CHEM_E_5.inc"
222 if( config_flags%progn > 0 ) then
223 # include "HALO_EM_SCALAR_E_5.inc"
224 end if
225 ELSE
226 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
227 CALL wrf_error_fatal(TRIM(wrf_err_message))
228 ENDIF
229 endif
230 # endif
231 !--------------------------------------------------------------
232 #endif
233
234 rk_order = config_flags%rk_ord
235 IF (grid%time_step_sound == 0) THEN
236 ! auto-set option
237 ! This function will give 4 for 6*dx and 6 for 10*dx and returns even numbers only
238 grid%time_step_sound = max ( 2 * ( INT (300.*grid%dt/grid%dx-0.01) + 1 ), 4 )
239 WRITE(wrf_err_message,*)'dx, dt, time_step_sound=',grid%dx,grid%dt,grid%time_step_sound
240 CALL wrf_debug ( 50 , wrf_err_message )
241 ENDIF
242
243 grid%dts = grid%dt/float(grid%time_step_sound)
244
245 Runge_Kutta_loop: DO rk_step = 1, rk_order
246
247 ! Set the step size and number of small timesteps for
248 ! each part of the timestep
249
250 dtm = grid%dt
251 IF ( rk_order == 1 ) THEN
252
253 write(wrf_err_message,*)' leapfrog removed, error exit for dynamics_option = ',dynamics_option
254 CALL wrf_error_fatal( wrf_err_message )
255
256 ELSE IF ( rk_order == 2 ) THEN ! 2nd order Runge-Kutta timestep
257
258 IF ( rk_step == 1) THEN
259 dt_rk = 0.5*grid%dt
260 dts_rk = grid%dts
261 number_of_small_timesteps = grid%time_step_sound/2
262 ELSE
263 dt_rk = grid%dt
264 dts_rk = grid%dts
265 number_of_small_timesteps = grid%time_step_sound
266 ENDIF
267
268 ELSE IF ( rk_order == 3 ) THEN ! third order Runge-Kutta
269
270 IF ( rk_step == 1) THEN
271 dt_rk = grid%dt/3.
272 dts_rk = dt_rk
273 number_of_small_timesteps = 1
274 ELSE IF (rk_step == 2) THEN
275 dt_rk = 0.5*grid%dt
276 dts_rk = grid%dts
277 number_of_small_timesteps = grid%time_step_sound/2
278 ELSE
279 dt_rk = grid%dt
280 dts_rk = grid%dts
281 number_of_small_timesteps = grid%time_step_sound
282 ENDIF
283
284 ELSE
285
286 write(wrf_err_message,*)' unknown solver, error exit for dynamics_option = ',dynamics_option
287 CALL wrf_error_fatal( wrf_err_message )
288
289 END IF
290
291 !
292 ! Time level t is in the *_2 variable in the first part
293 ! of the step, and in the *_1 variable after the predictor.
294 ! the latest predicted values are stored in the *_2 variables.
295 !
296 CALL wrf_debug ( 200 , ' call rk_step_prep ' )
297
298 BENCH_START(step_prep_tim)
299 !$OMP PARALLEL DO &
300 !$OMP PRIVATE ( ij )
301
302 DO ij = 1 , grid%num_tiles
303
304 CALL rk_step_prep ( config_flags, rk_step, &
305 grid%em_u_2, grid%em_v_2, grid%em_w_2, grid%em_t_2, grid%em_ph_2, grid%em_mu_2, &
306 moist, &
307 grid%em_ru, grid%em_rv, grid%em_rw, grid%em_ww, grid%em_php, grid%em_alt, grid%em_muu, grid%em_muv, &
308 grid%em_mub, grid%em_mut, grid%em_phb, grid%em_pb, grid%em_p, grid%em_al, grid%em_alb, &
309 cqu, cqv, cqw, &
310 grid%msfu, grid%msfv, grid%msft, &
311 grid%em_fnm, grid%em_fnp, grid%em_dnw, grid%rdx, grid%rdy, &
312 num_3d_m, &
313 ids, ide, jds, jde, kds, kde, &
314 ims, ime, jms, jme, kms, kme, &
315 grid%i_start(ij), grid%i_end(ij), &
316 grid%j_start(ij), grid%j_end(ij), &
317 k_start, k_end )
318
319 END DO
320 !$OMP END PARALLEL DO
321 BENCH_END(step_prep_tim)
322
323 #ifdef DM_PARALLEL
324 !-----------------------------------------------------------------------
325 ! Stencils for patch communications (WCS, 29 June 2001)
326 ! Note: the small size of this halo exchange reflects the
327 ! fact that we are carrying the uncoupled variables
328 ! as state variables in the mass coordinate model, as
329 ! opposed to the coupled variables as in the height
330 ! coordinate model.
331 !
332 ! * * * * *
333 ! * * * * * * * * *
334 ! * + * * + * * * + * *
335 ! * * * * * * * * *
336 ! * * * * *
337 !
338 ! 3D variables - note staggering! grid%em_ru(X), grid%em_rv(Y), grid%em_ww(grid%em_z), grid%em_php(grid%em_z)
339 !
340 !j grid%em_ru x
341 !j grid%em_rv x
342 !j grid%em_ww x
343 !j grid%em_php x
344 !j grid%em_alt x
345 !j grid%em_ph_2 x
346 !j grid%em_phb x
347 !
348 ! the following are 2D (xy) variables
349 !
350 !j grid%em_muu x
351 !j grid%em_muv x
352 !j grid%em_mut x
353 !--------------------------------------------------------------
354 # include "HALO_EM_A.inc"
355 #endif
356
357 ! set boundary conditions on variables
358 ! from big_step_prep for use in big_step_proc
359
360 #ifdef DM_PARALLEL
361 # include "PERIOD_BDY_EM_A.inc"
362 #endif
363
364 ! CALL set_tiles ( grid , ids , ide , jds , jde , ips-1 , ipe+1 , jps-1 , jpe+1 )
365
366 BENCH_START(set_phys_bc_tim)
367 !$OMP PARALLEL DO &
368 !$OMP PRIVATE ( ij )
369
370 DO ij = 1 , grid%num_tiles
371
372 CALL wrf_debug ( 200 , ' call rk_phys_bc_dry_1' )
373
374 CALL rk_phys_bc_dry_1( config_flags, grid%em_ru, grid%em_rv, grid%em_rw, grid%em_ww, &
375 grid%em_muu, grid%em_muv, grid%em_mut, grid%em_php, grid%em_alt, grid%em_p, &
376 ids, ide, jds, jde, kds, kde, &
377 ims, ime, jms, jme, kms, kme, &
378 ips, ipe, jps, jpe, kps, kpe, &
379 grid%i_start(ij), grid%i_end(ij), &
380 grid%j_start(ij), grid%j_end(ij), &
381 k_start, k_end )
382 !TBH: need this 2nd timestep and later
383 CALL set_physical_bc3d( grid%em_al, 'p', config_flags, &
384 ids, ide, jds, jde, kds, kde, &
385 ims, ime, jms, jme, kms, kme, &
386 ips, ipe, jps, jpe, kps, kpe, &
387 grid%i_start(ij), grid%i_end(ij), &
388 grid%j_start(ij), grid%j_end(ij), &
389 k_start , k_end )
390 CALL set_physical_bc3d( grid%em_ph_2, 'w', config_flags, &
391 ids, ide, jds, jde, kds, kde, &
392 ims, ime, jms, jme, kms, kme, &
393 ips, ipe, jps, jpe, kps, kpe, &
394 grid%i_start(ij), grid%i_end(ij), &
395 grid%j_start(ij), grid%j_end(ij), &
396 k_start, k_end )
397
398 END DO
399 !$OMP END PARALLEL DO
400 BENCH_END(set_phys_bc_tim)
401
402 rk_step_is_one : IF (rk_step == 1) THEN ! only need to initialize diffusion tendencies
403
404 ! initialize all tendencies to zero in order to update physics
405 ! tendencies first (separate from dry dynamics).
406
407 BENCH_START(init_zero_tend_tim)
408 !$OMP PARALLEL DO &
409 !$OMP PRIVATE ( ij )
410
411 DO ij = 1 , grid%num_tiles
412
413 #ifdef WRF_PLUS
414 if(TEST_init_zero_tendency) THEN
415 CALL t_init_zero_tendency ( ru_tendf, rv_tendf, rw_tendf, &
416 ph_tendf, t_tendf, tke_tend, &
417 mu_tendf, &
418 moist_tend,chem_tend,scalar_tend, &
419 num_3d_m,num_3d_c,num_3d_s, &
420 rk_step, &
421 ids, ide, jds, jde, kds, kde, &
422 ims, ime, jms, jme, kms, kme, &
423 grid%i_start(ij), grid%i_end(ij), &
424 grid%j_start(ij), grid%j_end(ij), &
425 k_start, k_end )
426 END IF
427 #endif
428 CALL wrf_debug ( 200 , ' call init_zero_tendency' )
429 CALL init_zero_tendency ( ru_tendf, rv_tendf, rw_tendf, &
430 ph_tendf, t_tendf, tke_tend, &
431 mu_tendf, &
432 moist_tend,chem_tend,scalar_tend, &
433 num_3d_m,num_3d_c,num_3d_s, &
434 rk_step, &
435 ids, ide, jds, jde, kds, kde, &
436 ims, ime, jms, jme, kms, kme, &
437 grid%i_start(ij), grid%i_end(ij), &
438 grid%j_start(ij), grid%j_end(ij), &
439 k_start, k_end )
440
441 END DO
442 !$OMP END PARALLEL DO
443 BENCH_END(init_zero_tend_tim)
444
445 #ifdef DM_PARALLEL
446 # include "HALO_EM_PHYS_A.inc"
447 #endif
448
449 !<DESCRIPTION>
450 !<pre>
451 !(2) The non-timesplit physics begins with a call to "phy_prep"
452 ! (which computes some diagnostic variables such as temperature,
453 ! pressure, u and v at grid%em_p points, etc). This is followed by
454 ! calls to the physics drivers:
455 !
456 ! radiation,
457 ! surface,
458 ! pbl,
459 ! cumulus,
460 ! 3D TKE and mixing.
461 !<pre>
462 !</DESCRIPTION>
463
464
465 BENCH_START(phy_prep_tim)
466 !$OMP PARALLEL DO &
467 !$OMP PRIVATE ( ij )
468 DO ij = 1 , grid%num_tiles
469
470 #ifdef WRF_PLUS
471 if(TEST_phy_prep) THEN
472 CALL t_phy_prep ( config_flags, &
473 grid%em_mut, grid%em_u_2, grid%em_v_2, grid%em_p, grid%em_pb, grid%em_alt, &
474 grid%em_ph_2, grid%em_phb, grid%em_t_2, grid%tsk, moist, num_3d_m, &
475 mu_3d, rho, &
476 th_phy, p_phy, pi_phy, u_phy, v_phy, &
477 p8w, t_phy, t8w, grid%em_z, z_at_w, &
478 dz8w, grid%em_fnm, grid%em_fnp, &
479 grid%RTHRATEN, &
480 grid%RTHBLTEN, grid%RUBLTEN, grid%RVBLTEN, &
481 grid%RQVBLTEN, grid%RQCBLTEN, grid%RQIBLTEN, &
482 grid%RTHCUTEN, grid%RQVCUTEN, grid%RQCCUTEN, &
483 grid%RQRCUTEN, grid%RQICUTEN, grid%RQSCUTEN, &
484 grid%RTHFTEN, grid%RQVFTEN, &
485 ids, ide, jds, jde, kds, kde, &
486 ims, ime, jms, jme, kms, kme, &
487 grid%i_start(ij), grid%i_end(ij), &
488 grid%j_start(ij), grid%j_end(ij), &
489 k_start, k_end )
490 END IF
491 #endif
492 CALL wrf_debug ( 200 , ' call phy_prep' )
493 CALL phy_prep ( config_flags, &
494 grid%em_mut, grid%em_muu, grid%em_muv, grid%em_u_2, &
495 grid%em_v_2, grid%em_p, grid%em_pb, grid%em_alt, &
496 grid%em_ph_2, grid%em_phb, grid%em_t_2, grid%tsk, moist, num_3d_m, &
497 mu_3d, rho, &
498 th_phy, p_phy, pi_phy, u_phy, v_phy, &
499 p8w, t_phy, t8w, grid%em_z, z_at_w, &
500 dz8w, grid%em_fnm, grid%em_fnp, &
501 grid%rthraten, &
502 grid%rthblten, grid%rublten, grid%rvblten, &
503 grid%rqvblten, grid%rqcblten, grid%rqiblten, &
504 grid%rthcuten, grid%rqvcuten, grid%rqccuten, &
505 grid%rqrcuten, grid%rqicuten, grid%rqscuten, &
506 grid%rthften, grid%rqvften, &
507 grid%RUNDGDTEN, grid%RVNDGDTEN, grid%RTHNDGDTEN, &
508 grid%RQVNDGDTEN, grid%RMUNDGDTEN, &
509 ids, ide, jds, jde, kds, kde, &
510 ims, ime, jms, jme, kms, kme, &
511 grid%i_start(ij), grid%i_end(ij), &
512 grid%j_start(ij), grid%j_end(ij), &
513 k_start, k_end )
514 ENDDO
515 !$OMP END PARALLEL DO
516
517 BENCH_END(phy_prep_tim)
518
519 ! physics to implement
520
521 ! CALL set_tiles ( grid , ids , ide-1 , jds , jde-1 ips , ipe , jps , jpe )
522
523 ! Open MP loops are in physics drivers
524 ! radiation
525
526 !-----------------------------------------------------------------
527 ! urban related variable are added to arguments of radiation_driver
528 !-----------------------------------------------------------------
529
530 CALL wrf_debug ( 200 , ' call radiation_driver' )
531 BENCH_START(rad_driver_tim)
532
533 CALL radiation_driver( &
534 & ACFRCV=grid%acfrcv ,ACFRST=grid%acfrst ,ALBEDO=grid%albedo &
535 & ,CFRACH=grid%cfrach ,CFRACL=grid%cfracl ,CFRACM=grid%cfracm &
536 & ,CUPPT=grid%cuppt ,CZMEAN=grid%czmean ,DT=grid%dt &
537 & ,DZ8W=dz8w ,EMISS=grid%emiss ,GLW=grid%glw &
538 & ,GMT=grid%gmt ,GSW=grid%gsw ,HBOT=grid%hbot &
539 & ,HTOP=grid%htop ,HBOTR=grid%hbotr, HTOPR=grid%htopr ,ICLOUD=config_flags%icloud &
540 & ,ITIMESTEP=grid%itimestep,JULDAY=grid%julday, JULIAN=grid%julian &
541 & ,JULYR=grid%julyr ,LW_PHYSICS=config_flags%ra_lw_physics &
542 & ,NCFRCV=grid%ncfrcv ,NCFRST=grid%ncfrst ,NPHS=1 &
543 & ,P8W=p8w ,P=p_phy ,PI=pi_phy &
544 & ,RADT=grid%radt ,RA_CALL_OFFSET=grid%ra_call_offset &
545 & ,RHO=rho ,RLWTOA=grid%rlwtoa &
546 & ,RSWTOA=grid%rswtoa ,RTHRATEN=grid%rthraten &
547 & ,RTHRATENLW=grid%rthratenlw &
548 & ,RTHRATENSW=grid%rthratensw ,SNOW=grid%snow &
549 & ,STEPRA=grid%stepra ,SWDOWN=grid%swdown ,SWDOWNC=grid%swdownc &
550 & ,SW_PHYSICS=config_flags%ra_sw_physics ,T8W=t8w &
551 & ,T=t_phy ,TAUCLDC=grid%taucldc ,TAUCLDI=grid%taucldi &
552 & ,TSK=grid%tsk ,VEGFRA=grid%vegfra ,WARM_RAIN=grid%warm_rain &
553 & ,XICE=grid%xice &
554 & ,XLAND=grid%xland ,XLAT=grid%xlat ,XLONG=grid%xlong &
555 !Optional urban
556 & ,DECLIN_URB=grid%declin_urb ,COSZ_URB2D=grid%cosz_urb2d &
557 & ,OMG_URB2D=grid%omg_urb2d &
558 !
559 & ,Z=grid%em_z &
560 & ,LEVSIZ=grid%levsiz, N_OZMIXM=num_ozmixm &
561 & ,N_AEROSOLC=num_aerosolc &
562 & ,PAERLEV=grid%paerlev &
563 & ,CAM_ABS_DIM1=grid%cam_abs_dim1, CAM_ABS_DIM2=grid%cam_abs_dim2 &
564 & ,CAM_ABS_FREQ_S=grid%cam_abs_freq_s &
565 & ,XTIME=grid%xtime &
566 ! indexes
567 & ,IDS=ids,IDE=ide, JDS=jds,JDE=jde, KDS=kds,KDE=kde &
568 & ,IMS=ims,IME=ime, JMS=jms,JME=jme, KMS=kms,KME=kme &
569 & ,i_start=grid%i_start,i_end=min(grid%i_end, ide-1) &
570 & ,j_start=grid%j_start,j_end=min(grid%j_end, jde-1) &
571 & ,kts=k_start, kte=min(k_end,kde-1) &
572 & ,num_tiles=grid%num_tiles &
573 ! Optional
574 & , CLDFRA=grid%cldfra &
575 & , PB=grid%em_pb &
576 & , F_ICE_PHY=grid%f_ice_phy,F_RAIN_PHY=grid%f_rain_phy &
577 & , QV=moist(ims,kms,jms,P_QV), F_QV=F_QV &
578 & , QC=moist(ims,kms,jms,P_QC), F_QC=F_QC &
579 & , QR=moist(ims,kms,jms,P_QR), F_QR=F_QR &
580 & , QI=moist(ims,kms,jms,P_QI), F_QI=F_QI &
581 & , QS=moist(ims,kms,jms,P_QS), F_QS=F_QS &
582 & , QG=moist(ims,kms,jms,P_QG), F_QG=F_QG &
583 & , QNDROP=scalar(ims,kms,jms,P_QNDROP), F_QNDROP=F_QNDROP &
584 #ifdef ACFLUX
585 & ,ACSWUPT=acswupt ,ACSWUPTC=acswuptc &
586 & ,ACSWDNT=acswdnt ,ACSWDNTC=acswdntc &
587 & ,ACSWUPB=acswupb ,ACSWUPBC=acswupbc &
588 & ,ACSWDNB=acswdnb ,ACSWDNBC=acswdnbc &
589 & ,ACLWUPT=aclwupt ,ACLWUPTC=aclwuptc &
590 & ,ACLWDNT=aclwdnt ,ACLWDNTC=aclwdntc &
591 & ,ACLWUPB=aclwupb ,ACLWUPBC=aclwupbc &
592 & ,ACLWDNB=aclwdnb ,ACLWDNBC=aclwdnbc &
593 & ,SWUPT=swupt ,SWUPTC=swuptc &
594 & ,SWDNT=swdnt ,SWDNTC=swdntc &
595 & ,SWUPB=swupb ,SWUPBC=swupbc &
596 & ,SWDNB=swdnb ,SWDNBC=swdnbc &
597 & ,LWUPT=lwupt ,LWUPTC=lwuptc &
598 & ,LWDNT=lwdnt ,LWDNTC=lwdntc &
599 & ,LWUPB=lwupb ,LWUPBC=lwupbc &
600 & ,LWDNB=lwdnb ,LWDNBC=lwdnbc &
601 #endif
602 & ,LWCF=grid%lwcf &
603 & ,SWCF=grid%swcf &
604 & ,OLR=grid%olr &
605 & ,OZMIXM=grid%ozmixm, PIN=grid%pin &
606 & ,M_PS_1=grid%m_ps_1, M_PS_2=grid%m_ps_2, AEROSOLC_1=grid%aerosolc_1 &
607 & ,AEROSOLC_2=grid%aerosolc_2, M_HYBI0=grid%m_hybi &
608 & ,ABSTOT=grid%abstot, ABSNXT=grid%absnxt, EMSTOT=grid%emstot &
609 #ifdef WRF_CHEM
610 & ,CU_RAD_FEEDBACK=config_flags%cu_rad_feedback &
611 & ,AER_RA_FEEDBACK=config_flags%aer_ra_feedback &
612 & ,QC_ADJUST=grid%GD_CLOUD_B , QI_ADJUST=grid%GD_CLOUD2_B &
613 & ,PM2_5_DRY=grid%pm2_5_dry, PM2_5_WATER=grid%pm2_5_water &
614 & ,PM2_5_DRY_EC=grid%pm2_5_dry_ec &
615 & ,TAUAER300=grid%tauaer1, TAUAER400=grid%tauaer2 & ! jcb
616 & ,TAUAER600=grid%tauaer3, TAUAER999=grid%tauaer4 & ! jcb
617 & ,GAER300=grid%gaer1, GAER400=grid%gaer2, GAER600=grid%gaer3, GAER999=grid%gaer4 & ! jcb
618 & ,WAER300=grid%waer1, WAER400=grid%waer2, WAER600=grid%waer3, WAER999=grid%waer4 & ! jcb
619 #endif
620 & )
621
622 BENCH_END(rad_driver_tim)
623
624 !********* Surface driver
625 ! surface
626
627 BENCH_START(surf_driver_tim)
628
629 !-----------------------------------------------------------------
630 ! urban related variable are added to arguments of surface_driver
631 !-----------------------------------------------------------------
632 NUM_ROOF_LAYERS = grid%num_soil_layers !urban
633 NUM_WALL_LAYERS = grid%num_soil_layers !urban
634 NUM_ROAD_LAYERS = grid%num_soil_layers !urban
635
636 CALL wrf_debug ( 200 , ' call surface_driver' )
637 CALL surface_driver( &
638 & ACSNOM=grid%acsnom ,ACSNOW=grid%acsnow ,AKHS=grid%akhs &
639 & ,AKMS=grid%akms ,ALBBCK=grid%albbck ,ALBEDO=grid%albedo &
640 & ,BR=br ,CANWAT=grid%canwat ,CHKLOWQ=chklowq &
641 & ,CT=grid%ct ,DT=grid%dt ,DX=grid%dx &
642 & ,DZ8W=dz8w ,DZS=grid%dzs ,FLHC=grid%flhc &
643 & ,FLQC=grid%flqc ,GLW=grid%glw ,GRDFLX=grid%grdflx &
644 & ,GSW=grid%gsw ,SWDOWN=grid%swdown ,GZ1OZ0=gz1oz0 ,HFX=grid%hfx &
645 & ,HT=grid%ht ,IFSNOW=config_flags%ifsnow ,ISFFLX=config_flags%isfflx &
646 & ,ISLTYP=grid%isltyp ,ITIMESTEP=grid%itimestep &
647 & ,IVGTYP=grid%ivgtyp ,LH=grid%lh ,LOWLYR=grid%lowlyr &
648 & ,MAVAIL=grid%mavail ,NUM_SOIL_LAYERS=config_flags%num_soil_layers &
649 & ,P8W=p8w ,PBLH=grid%pblh ,PI_PHY=pi_phy &
650 & ,PSFC=grid%psfc ,PSHLTR=pshltr ,PSIH=psih &
651 & ,PSIM=psim ,P_PHY=p_phy ,Q10=q10 &
652 & ,Q2=grid%q2 ,QFX=grid%qfx ,QSFC=grid%qsfc &
653 & ,QSHLTR=qshltr ,QZ0=grid%qz0 ,RAINCV=grid%raincv &
654 & ,RA_LW_PHYSICS=config_flags%ra_lw_physics ,RHO=rho &
655 & ,RMOL=grid%rmol ,SFCEVP=grid%sfcevp ,SFCEXC=grid%sfcexc &
656 & ,SFCRUNOFF=grid%sfcrunoff &
657 & ,SF_SFCLAY_PHYSICS=config_flags%sf_sfclay_physics &
658 & ,SF_SURFACE_PHYSICS=config_flags%sf_surface_physics ,SH2O=grid%sh2o &
659 & ,SHDMAX=grid%shdmax ,SHDMIN=grid%shdmin ,SMOIS=grid%smois &
660 & ,SMSTAV=grid%smstav ,SMSTOT=grid%smstot ,SNOALB=grid%snoalb &
661 & ,SNOW=grid%snow ,SNOWC=grid%snowc ,SNOWH=grid%snowh &
662 & ,SST=grid%sst ,SST_UPDATE=grid%sst_update &
663 & ,STEPBL=grid%stepbl ,TH10=th10 ,TH2=grid%th2 &
664 & ,THZ0=grid%thz0 ,TH_PHY=th_phy ,TKE_MYJ=grid%tke_myj &
665 & ,TMN=grid%tmn ,TSHLTR=tshltr ,TSK=grid%tsk &
666 & ,TSLB=grid%tslb ,T_PHY=t_phy ,U10=grid%u10 &
667 & ,URATX=grid%uratx ,VRATX=grid%vratx ,TRATX=grid%tratx &
668 & ,UDRUNOFF=grid%udrunoff ,UST=grid%ust ,UZ0=grid%uz0 &
669 & ,U_FRAME=grid%u_frame ,U_PHY=u_phy ,V10=grid%v10 &
670 & ,VEGFRA=grid%vegfra ,VZ0=grid%vz0 ,V_FRAME=grid%v_frame &
671 & ,V_PHY=v_phy ,WARM_RAIN=grid%warm_rain &
672 & ,WSPD=wspd ,XICE=grid%xice ,XLAND=grid%xland &
673 & ,Z0=grid%z0 ,Z=grid%em_z ,ZNT=grid%znt &
674 & ,ZS=grid%zs &
675 & ,DECLIN_URB=grid%declin_urb ,COSZ_URB2D=grid%cosz_urb2d & !I urban
676 & ,OMG_URB2D=grid%omg_urb2d ,xlat_urb2d=grid%XLAT & !I urban
677 & ,NUM_ROOF_LAYERS=num_roof_layers & !I urban
678 & ,NUM_WALL_LAYERS=num_wall_layers & !I urban
679 & ,NUM_ROAD_LAYERS=num_road_layers &
680 & ,DZR=grid%dzr ,DZB=grid%dzb ,DZG=grid%dzg & !I urban
681 & ,TR_URB2D=grid%tr_urb2d ,TB_URB2D=grid%tb_urb2d &
682 & ,TG_URB2D=grid%tg_urb2d & !H urban
683 & ,TC_URB2D=grid%tc_urb2d ,QC_URB2D=grid%qc_urb2d & !H urban
684 & ,UC_URB2D=grid%uc_urb2d & !H urban
685 & ,XXXR_URB2D=grid%xxxr_urb2d &
686 & ,XXXB_URB2D=grid%xxxb_urb2d & !H urban
687 & ,XXXG_URB2D=grid%xxxg_urb2d &
688 & ,XXXC_URB2D=grid%xxxc_urb2d & !H urban
689 & ,TRL_URB3D=grid%trl_urb3d ,TBL_URB3D=grid%tbl_urb3d & !H urban
690 & ,TGL_URB3D=grid%tgl_urb3d & !H urban
691 & ,SH_URB2D=grid%sh_urb2d ,LH_URB2D=grid%lh_urb2d &
692 & ,G_URB2D=grid%g_urb2d & !H urban
693 & ,RN_URB2D=grid%rn_urb2d , TS_URB2D=grid%ts_urb2d & !H urban
694 & ,FRC_URB2D=grid%frc_urb2d & !H urban
695 & ,UTYPE_URB2D=grid%utype_urb2d & !H urban
696 & ,ucmcall=grid%ucmcall & !H urban
697 ! Indexes
698 & ,IDS=ids,IDE=ide, JDS=jds,JDE=jde, KDS=kds,KDE=kde &
699 & ,IMS=ims,IME=ime, JMS=jms,JME=jme, KMS=kms,KME=kme &
700 & , I_START=grid%i_start,I_END=min(grid%i_end, ide-1) &
701 & , J_START=grid%j_start,J_END=min(grid%j_end, jde-1) &
702 & , KTS=k_start, KTE=min(k_end,kde-1) &
703 & , NUM_TILES=grid%num_tiles &
704 ! Optional
705 & ,QV_CURR=moist(ims,kms,jms,P_QV), F_QV=F_QV &
706 & ,QC_CURR=moist(ims,kms,jms,P_QC), F_QC=F_QC &
707 & ,QR_CURR=moist(ims,kms,jms,P_QR), F_QR=F_QR &
708 & ,QI_CURR=moist(ims,kms,jms,P_QI), F_QI=F_QI &
709 & ,QS_CURR=moist(ims,kms,jms,P_QS), F_QS=F_QS &
710 & ,QG_CURR=moist(ims,kms,jms,P_QG), F_QG=F_QG &
711 & ,CAPG=grid%capg, EMISS=grid%emiss, HOL=hol,MOL=grid%mol &
712 & ,RAINBL=grid%rainbl,SR=grid%em_sr &
713 & ,RAINNCV=grid%rainncv,REGIME=regime,T2=grid%t2,THC=grid%thc &
714 & ,QSG=grid%qsg,QVG=grid%qvg,QCG=grid%qcg,SOILT1=grid%soilt1,TSNAV=grid%tsnav & ! ruc lsm
715 & ,SMFR3D=grid%smfr3d,KEEPFR3DFLAG=grid%keepfr3dflag & ! ruc lsm
716 & ,POTEVP=grid%em_POTEVP, SNOPCX=grid%em_SNOPCX, SOILTB=grid%em_SOILTB & ! ruc lsm
717 & )
718 BENCH_END(surf_driver_tim)
719
720 !*********
721 ! pbl
722
723 CALL wrf_debug ( 200 , ' call pbl_driver' )
724 BENCH_START(pbl_driver_tim)
725 CALL pbl_driver( &
726 & AKHS=grid%akhs ,AKMS=grid%akms &
727 & ,BL_PBL_PHYSICS=config_flags%bl_pbl_physics &
728 & ,BR=br ,CHKLOWQ=chklowq ,CT=grid%ct &
729 & ,DT=grid%dt ,DX=grid%dx ,DZ8W=dz8w &
730 & ,EL_MYJ=grid%el_myj ,EXCH_H=grid%exch_h ,GRDFLX=grid%grdflx &
731 & ,GZ1OZ0=gz1oz0 ,HFX=grid%hfx ,HT=grid%ht &
732 & ,ITIMESTEP=grid%itimestep ,KPBL=grid%kpbl &
733 & ,LH=grid%lh ,LOWLYR=grid%lowlyr ,P8W=p8w &
734 & ,PBLH=grid%pblh ,PI_PHY=pi_phy ,PSIH=psih &
735 & ,PSIM=psim ,P_PHY=p_phy ,QFX=grid%qfx &
736 & ,QSFC=grid%qsfc ,QZ0=grid%qz0 &
737 & ,RA_LW_PHYSICS=config_flags%ra_lw_physics &
738 & ,RHO=rho ,RQCBLTEN=grid%rqcblten ,RQIBLTEN=grid%rqiblten &
739 & ,RQVBLTEN=grid%rqvblten ,RTHBLTEN=grid%rthblten ,RUBLTEN=grid%rublten &
740 & ,RVBLTEN=grid%rvblten ,SNOW=grid%snow ,STEPBL=grid%stepbl &
741 & ,THZ0=grid%thz0 ,TH_PHY=th_phy ,TKE_MYJ=grid%tke_myj &
742 & ,TSK=grid%tsk ,T_PHY=t_phy ,UST=grid%ust &
743 & ,UZ0=grid%uz0 ,U_FRAME=grid%u_frame ,U_PHY=u_phy &
744 & ,VZ0=grid%vz0 ,V_FRAME=grid%v_frame ,V_PHY=v_phy &
745 & ,WARM_RAIN=grid%warm_rain ,WSPD=wspd &
746 & ,XICE=grid%xice ,XLAND=grid%xland ,Z=grid%em_z &
747 & ,ZNT=grid%znt &
748 & ,IDS=ids,IDE=ide, JDS=jds,JDE=jde, KDS=kds,KDE=kde &
749 & ,IMS=ims,IME=ime, JMS=jms,JME=jme, KMS=kms,KME=kme &
750 & ,I_START=grid%i_start,I_END=min(grid%i_end, ide-1) &
751 & ,J_START=grid%j_start,J_END=min(grid%j_end, jde-1) &
752 & ,KTS=k_start, KTE=min(k_end,kde-1) &
753 & ,NUM_TILES=grid%num_tiles &
754 ! optional
755 & ,QV_CURR=moist(ims,kms,jms,P_QV), F_QV=F_QV &
756 & ,QC_CURR=moist(ims,kms,jms,P_QC), F_QC=F_QC &
757 & ,QR_CURR=moist(ims,kms,jms,P_QR), F_QR=F_QR &
758 & ,QI_CURR=moist(ims,kms,jms,P_QI), F_QI=F_QI &
759 & ,QS_CURR=moist(ims,kms,jms,P_QS), F_QS=F_QS &
760 & ,QG_CURR=moist(ims,kms,jms,P_QG), F_QG=F_QG &
761 & ,HOL=HOL, MOL=grid%mol, REGIME=REGIME &
762 & )
763
764 BENCH_END(pbl_driver_tim)
765
766 ! cumulus para.
767
768 CALL wrf_debug ( 200 , ' call cumulus_driver' )
769
770 BENCH_START(cu_driver_tim)
771 CALL cumulus_driver( &
772 ! Prognostic variables
773 & U=u_phy ,V=v_phy ,TH=th_phy ,T=t_phy &
774 & ,W=grid%em_w_2 ,P=p_phy ,PI=pi_phy ,RHO=rho &
775 ! Other arguments
776 & ,ITIMESTEP=grid%itimestep ,DT=grid%dt ,DX=grid%dx &
777 & ,RAINC=grid%rainc ,RAINCV=grid%raincv ,NCA=grid%nca &
778 & ,HTOP=grid%cutop ,HBOT=grid%cubot ,KPBL=grid%kpbl &
779 & ,DZ8W=dz8w ,P8W=p8w &
780 & ,W0AVG=grid%w0avg ,STEPCU=grid%stepcu &
781 & ,CLDEFI=grid%cldefi ,LOWLYR=grid%lowlyr ,XLAND=grid%xland &
782 & ,APR_GR=grid%apr_gr ,APR_W=grid%apr_w ,APR_MC=grid%apr_mc &
783 & ,APR_ST=grid%apr_st ,APR_AS=grid%apr_as ,APR_CAPMA=grid%apr_capma &
784 & ,APR_CAPME=grid%apr_capme ,APR_CAPMI=grid%apr_capmi &
785 & ,MASS_FLUX=grid%mass_flux ,XF_ENS=grid%xf_ens &
786 & ,PR_ENS=grid%pr_ens ,HT=grid%ht &
787 & ,ENSDIM=config_flags%ensdim ,MAXIENS=config_flags%maxiens ,MAXENS=config_flags%maxens &
788 & ,MAXENS2=config_flags%maxens2 ,MAXENS3=config_flags%maxens3 &
789 & ,CU_ACT_FLAG=cu_act_flag ,WARM_RAIN=grid%warm_rain &
790 & ,GSW=grid%gsw &
791 ! Selection flag
792 & ,CU_PHYSICS=config_flags%cu_physics &
793 ! Dimension arguments
794 & ,IDS=ids,IDE=ide, JDS=jds,JDE=jde, KDS=kds,KDE=kde &
795 & ,IMS=ims,IME=ime, JMS=jms,JME=jme, KMS=kms,KME=kme &
796 & ,I_START=grid%i_start,I_END=min(grid%i_end, ide-1) &
797 & ,J_START=grid%j_start,J_END=min(grid%j_end, jde-1) &
798 & ,KTS=k_start, KTE=min(k_end,kde-1) &
799 & ,NUM_TILES=grid%num_tiles &
800 ! Moisture tendency arguments
801 & ,RQVCUTEN=grid%rqvcuten , RQCCUTEN=grid%rqccuten &
802 & ,RQSCUTEN=grid%rqscuten , RQICUTEN=grid%rqicuten &
803 & ,RQRCUTEN=grid%rqrcuten , RQVBLTEN=grid%rqvblten &
804 & ,RQVFTEN=grid%rqvften &
805 ! Other tendency arguments
806 & ,RTHRATEN=grid%rthraten , RTHBLTEN=grid%rthblten &
807 & ,RTHCUTEN=grid%rthcuten , RTHFTEN=grid%rthften &
808 ! Moisture tracer arguments
809 & ,QV_CURR=moist(ims,kms,jms,P_QV), F_QV=F_QV &
810 & ,QC_CURR=moist(ims,kms,jms,P_QC), F_QC=F_QC &
811 & ,QR_CURR=moist(ims,kms,jms,P_QR), F_QR=F_QR &
812 & ,QI_CURR=moist(ims,kms,jms,P_QI), F_QI=F_QI &
813 & ,QS_CURR=moist(ims,kms,jms,P_QS), F_QS=F_QS &
814 & ,QG_CURR=moist(ims,kms,jms,P_QG), F_QG=F_QG &
815 #ifdef WRF_CHEM
816 & ,GD_CLOUD=grid%GD_CLOUD,GD_CLOUD2=grid%GD_CLOUD2 &
817 #endif
818 & )
819 BENCH_END(cu_driver_tim)
820
821 ! fdda
822
823 CALL wrf_debug ( 200 , ' call fddagd_driver' )
824
825 BENCH_START(fdda_driver_tim)
826 CALL fddagd_driver(itimestep=grid%itimestep,dt=grid%dt,xtime=grid%XTIME, &
827 id=grid%id, &
828 RUNDGDTEN=grid%rundgdten,RVNDGDTEN=grid%rvndgdten, &
829 RTHNDGDTEN=grid%rthndgdten,RQVNDGDTEN=grid%rqvndgdten, &
830 RMUNDGDTEN=grid%rmundgdten, &
831 u_ndg_old=fdda3d(ims,kms,jms,P_u_ndg_old), &
832 v_ndg_old=fdda3d(ims,kms,jms,P_v_ndg_old), &
833 t_ndg_old=fdda3d(ims,kms,jms,P_t_ndg_old), &
834 q_ndg_old=fdda3d(ims,kms,jms,P_q_ndg_old), &
835 mu_ndg_old=fdda2d(ims,1,jms,P_mu_ndg_old), &
836 u_ndg_new=fdda3d(ims,kms,jms,P_u_ndg_new), &
837 v_ndg_new=fdda3d(ims,kms,jms,P_v_ndg_new), &
838 t_ndg_new=fdda3d(ims,kms,jms,P_t_ndg_new), &
839 q_ndg_new=fdda3d(ims,kms,jms,P_q_ndg_new), &
840 mu_ndg_new=fdda2d(ims,1,jms,P_mu_ndg_new), &
841 u3d=grid%em_u_2,v3d=grid%em_v_2,th_phy=th_phy,rho=rho,moist=moist, &
842 p_phy=p_phy,pi_phy=pi_phy,p8w=p8w,t_phy=t_phy, &
843 dz8w=dz8w,z=grid%em_z,z_at_w=z_at_w, &
844 config_flags=config_flags,dx=grid%DX,n_moist=num_3d_m, &
845 STEPFG=grid%STEPFG, &
846 pblh=grid%pblh,ht=grid%ht, &
847 IDS=ids,IDE=ide, JDS=jds,JDE=jde, KDS=kds,KDE=kde &
848 ,IMS=ims,IME=ime, JMS=jms,JME=jme, KMS=kms,KME=kme &
849 ,I_START=grid%i_start,I_END=min(grid%i_end, ide-1) &
850 ,J_START=grid%j_start,J_END=min(grid%j_end, jde-1) &
851 ,KTS=k_start, KTE=min(k_end,kde-1) &
852 , num_tiles=grid%num_tiles )
853 BENCH_END(fdda_driver_tim)
854
855 ! calculate_phy_tend
856
857 BENCH_START(cal_phy_tend)
858 !$OMP PARALLEL DO &
859 !$OMP PRIVATE ( ij )
860
861 DO ij = 1 , grid%num_tiles
862
863 CALL wrf_debug ( 200 , ' call calculate_phy_tend' )
864 CALL calculate_phy_tend (config_flags,grid%em_mut,grid%em_muu,grid%em_muv,pi_phy, &
865 grid%rthraten, &
866 grid%rublten,grid%rvblten,grid%rthblten, &
867 grid%rqvblten,grid%rqcblten,grid%rqiblten, &
868 grid%rthcuten,grid%rqvcuten,grid%rqccuten,grid%rqrcuten, &
869 grid%rqicuten,grid%rqscuten, &
870 grid%RUNDGDTEN,grid%RVNDGDTEN,grid%RTHNDGDTEN,grid%RQVNDGDTEN, &
871 grid%RMUNDGDTEN, &
872 ids,ide, jds,jde, kds,kde, &
873 ims,ime, jms,jme, kms,kme, &
874 grid%i_start(ij), min(grid%i_end(ij),ide-1), &
875 grid%j_start(ij), min(grid%j_end(ij),jde-1), &
876 k_start , min(k_end,kde-1) )
877
878 ENDDO
879 !$OMP END PARALLEL DO
880 BENCH_END(cal_phy_tend)
881
882 ! tke diffusion
883
884 IF(config_flags%diff_opt .eq. 2 .OR. config_flags%diff_opt .eq. 1) THEN
885
886 BENCH_START(comp_diff_metrics_tim)
887 !$OMP PARALLEL DO &
888 !$OMP PRIVATE ( ij )
889
890 DO ij = 1 , grid%num_tiles
891
892 CALL wrf_debug ( 200 , ' call compute_diff_metrics ' )
893 CALL compute_diff_metrics ( config_flags, grid%em_ph_2, grid%em_phb, grid%em_z, grid%em_rdz, grid%em_rdzw, &
894 grid%em_zx, grid%em_zy, grid%rdx, grid%rdy, &
895 ids, ide, jds, jde, kds, kde, &
896 ims, ime, jms, jme, kms, kme, &
897 grid%i_start(ij), grid%i_end(ij), &
898 grid%j_start(ij), grid%j_end(ij), &
899 k_start , k_end )
900 ENDDO
901 !$OMP END PARALLEL DO
902 BENCH_END(comp_diff_metrics_tim)
903
904 #ifdef DM_PARALLEL
905 # include "PERIOD_BDY_EM_A1.inc"
906 #endif
907
908 BENCH_START(tke_diff_bc_tim)
909 DO ij = 1 , grid%num_tiles
910
911 CALL wrf_debug ( 200 , ' call bc for diffusion_metrics ' )
912 CALL set_physical_bc3d( grid%em_rdzw , 'w', config_flags, &
913 ids, ide, jds, jde, kds, kde, &
914 ims, ime, jms, jme, kms, kme, &
915 ips, ipe, jps, jpe, kps, kpe, &
916 grid%i_start(ij), grid%i_end(ij), &
917 grid%j_start(ij), grid%j_end(ij), &
918 k_start , k_end )
919 CALL set_physical_bc3d( grid%em_rdz , 'w', config_flags, &
920 ids, ide, jds, jde, kds, kde, &
921 ims, ime, jms, jme, kms, kme, &
922 ips, ipe, jps, jpe, kps, kpe, &
923 grid%i_start(ij), grid%i_end(ij), &
924 grid%j_start(ij), grid%j_end(ij), &
925 k_start , k_end )
926 CALL set_physical_bc3d( grid%em_z , 'w', config_flags, &
927 ids, ide, jds, jde, kds, kde, &
928 ims, ime, jms, jme, kms, kme, &
929 ips, ipe, jps, jpe, kps, kpe, &
930 grid%i_start(ij), grid%i_end(ij), &
931 grid%j_start(ij), grid%j_end(ij), &
932 k_start , k_end )
933 CALL set_physical_bc3d( grid%em_zx , 'w', config_flags, &
934 ids, ide, jds, jde, kds, kde, &
935 ims, ime, jms, jme, kms, kme, &
936 ips, ipe, jps, jpe, kps, kpe, &
937 grid%i_start(ij), grid%i_end(ij), &
938 grid%j_start(ij), grid%j_end(ij), &
939 k_start , k_end )
940 CALL set_physical_bc3d( grid%em_zy , 'w', config_flags, &
941 ids, ide, jds, jde, kds, kde, &
942 ims, ime, jms, jme, kms, kme, &
943 ips, ipe, jps, jpe, kps, kpe, &
944 grid%i_start(ij), grid%i_end(ij), &
945 grid%j_start(ij), grid%j_end(ij), &
946 k_start , k_end )
947
948 ENDDO
949 BENCH_END(tke_diff_bc_tim)
950
951 #ifdef DM_PARALLEL
952 # include "HALO_EM_TKE_C.inc"
953 #endif
954
955 BENCH_START(deform_div_tim)
956
957 !$OMP PARALLEL DO &
958 !$OMP PRIVATE ( ij )
959
960 DO ij = 1 , grid%num_tiles
961
962 CALL wrf_debug ( 200 , ' call cal_deform_and_div' )
963 CALL cal_deform_and_div ( config_flags,grid%em_u_2,grid%em_v_2,grid%em_w_2,grid%div, &
964 grid%defor11,grid%defor22,grid%defor33,grid%defor12, &
965 grid%defor13,grid%defor23, &
966 grid%u_base, grid%v_base,grid%msfu,grid%msfv,grid%msft, &
967 grid%rdx, grid%rdy, grid%em_dn, grid%em_dnw, grid%em_rdz, grid%em_rdzw, &
968 grid%em_fnm,grid%em_fnp,grid%cf1,grid%cf2,grid%cf3,grid%em_zx,grid%em_zy, &
969 ids, ide, jds, jde, kds, kde, &
970 ims, ime, jms, jme, kms, kme, &
971 grid%i_start(ij), grid%i_end(ij), &
972 grid%j_start(ij), grid%j_end(ij), &
973 k_start , k_end )
974 ENDDO
975 !$OMP END PARALLEL DO
976 BENCH_END(deform_div_tim)
977
978
979 #ifdef DM_PARALLEL
980 # include "HALO_EM_TKE_D.inc"
981 #endif
982
983
984 ! calculate tke, kmh, and kmv
985
986 BENCH_START(calc_tke_tim)
987 !$OMP PARALLEL DO &
988 !$OMP PRIVATE ( ij )
989
990 DO ij = 1 , grid%num_tiles
991
992 #ifdef WRF_PLUS
993 if(TEST_calculate_km_kh) THEN
994 CALL t_calculate_km_kh( config_flags,grid%dt,grid%dampcoef,grid%zdamp,config_flags%damp_opt, &
995 grid%xkmh,grid%xkmhd,grid%xkmv,grid%xkhh,grid%xkhv,grid%BN2, &
996 grid%khdif,grid%kvdif,grid%div, &
997 grid%defor11,grid%defor22,grid%defor33,grid%defor12, &
998 grid%defor13,grid%defor23, &
999 grid%em_tke_2(ims,kms,jms),p8w,t8w,th_phy, &
1000 t_phy,p_phy,moist,grid%em_dn,grid%em_dnw, &
1001 grid%dx,grid%dy,grid%em_rdz,grid%em_rdzw,config_flags%mix_cr_len,num_3d_m, &
1002 grid%cf1, grid%cf2, grid%cf3, grid%warm_rain, &
1003 grid%kh_tke_upper_bound, grid%kv_tke_upper_bound, &
1004 ids,ide, jds,jde, kds,kde, &
1005 ims,ime, jms,jme, kms,kme, &
1006 grid%i_start(ij), grid%i_end(ij), &
1007 grid%j_start(ij), grid%j_end(ij), &
1008 k_start , k_end )
1009 END IF
1010 #endif
1011 CALL wrf_debug ( 200 , ' call calculate_km_kh' )
1012 CALL calculate_km_kh( config_flags,grid%dt,grid%dampcoef,grid%zdamp,config_flags%damp_opt, &
1013 grid%xkmh,grid%xkmhd,grid%xkmv,grid%xkhh,grid%xkhv,grid%bn2, &
1014 grid%khdif,grid%kvdif,grid%div, &
1015 grid%defor11,grid%defor22,grid%defor33,grid%defor12, &
1016 grid%defor13,grid%defor23, &
1017 grid%em_tke_2,p8w,t8w,th_phy, &
1018 t_phy,p_phy,moist,grid%em_dn,grid%em_dnw, &
1019 grid%dx,grid%dy,grid%em_rdz,grid%em_rdzw,config_flags%mix_cr_len,num_3d_m, &
1020 grid%cf1, grid%cf2, grid%cf3, grid%warm_rain, &
1021 grid%kh_tke_upper_bound, grid%kv_tke_upper_bound, &
1022 ids,ide, jds,jde, kds,kde, &
1023 ims,ime, jms,jme, kms,kme, &
1024 grid%i_start(ij), grid%i_end(ij), &
1025 grid%j_start(ij), grid%j_end(ij), &
1026 k_start , k_end )
1027 ENDDO
1028 !$OMP END PARALLEL DO
1029 BENCH_END(calc_tke_tim)
1030
1031 #ifdef DM_PARALLEL
1032 # include "HALO_EM_TKE_E.inc"
1033 #endif
1034
1035 ENDIF
1036
1037 #ifdef DM_PARALLEL
1038 # include "PERIOD_BDY_EM_PHY_BC.inc"
1039 IF ( config_flags%grid_fdda .eq. 1) THEN
1040 # include "PERIOD_BDY_EM_FDDA_BC.inc"
1041 ENDIF
1042 # include "PERIOD_BDY_EM_CHEM.inc"
1043 #endif
1044
1045 BENCH_START(phy_bc_tim)
1046 !$OMP PARALLEL DO &
1047 !$OMP PRIVATE ( ij )
1048
1049 DO ij = 1 , grid%num_tiles
1050
1051 CALL wrf_debug ( 200 , ' call phy_bc' )
1052 CALL phy_bc (config_flags,grid%div,grid%defor11,grid%defor22,grid%defor33, &
1053 grid%defor12,grid%defor13,grid%defor23, &
1054 grid%xkmh,grid%xkmhd,grid%xkmv,grid%xkhh,grid%xkhv, &
1055 grid%em_tke_2, &
1056 grid%rublten, grid%rvblten, &
1057 ids, ide, jds, jde, kds, kde, &
1058 ims, ime, jms, jme, kms, kme, &
1059 ips, ipe, jps, jpe, kps, kpe, &
1060 grid%i_start(ij), grid%i_end(ij), &
1061 grid%j_start(ij), grid%j_end(ij), &
1062 k_start , k_end )
1063 ENDDO
1064 !$OMP END PARALLEL DO
1065 BENCH_END(phy_bc_tim)
1066
1067 #ifdef DM_PARALLEL
1068 !-----------------------------------------------------------------------
1069 !
1070 ! MPP for some physics tendency, km, kh, deformation, and divergence
1071 !
1072 ! * *
1073 ! * + * * + * +
1074 ! * *
1075 !
1076 ! (for PBL)
1077 ! grid%rublten x
1078 ! grid%rvblten x
1079 !
1080 ! (for diff_opt >= 1)
1081 ! grid%defor11 x
1082 ! grid%defor22 x
1083 ! grid%defor12 x
1084 ! grid%defor13 x
1085 ! grid%defor23 x
1086 ! grid%div x
1087 ! grid%xkmv x
1088 ! grid%xkmh x
1089 ! grid%xkmhd x
1090 ! grid%xkhv x
1091 ! grid%xkhh x
1092 ! tke x
1093 !
1094 !-----------------------------------------------------------------------
1095 IF ( config_flags%bl_pbl_physics .ge. 1 ) THEN
1096 # include "HALO_EM_PHYS_PBL.inc"
1097 ENDIF
1098 IF ( config_flags%grid_fdda .eq. 1) THEN
1099 # include "HALO_EM_FDDA.inc"
1100 ENDIF
1101 IF ( config_flags%diff_opt .ge. 1 ) THEN
1102 # include "HALO_EM_PHYS_DIFFUSION.inc"
1103 ENDIF
1104
1105 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
1106 # include "HALO_EM_TKE_3.inc"
1107 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
1108 # include "HALO_EM_TKE_5.inc"
1109 ELSE
1110 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
1111 CALL wrf_error_fatal(TRIM(wrf_err_message))
1112 ENDIF
1113 #endif
1114
1115 BENCH_START(update_phy_ten_tim)
1116 !$OMP PARALLEL DO &
1117 !$OMP PRIVATE ( ij )
1118
1119 DO ij = 1 , grid%num_tiles
1120
1121 CALL wrf_debug ( 200 , ' call update_phy_ten' )
1122 CALL update_phy_ten(t_tendf, ru_tendf, rv_tendf,moist_tend, &
1123 scalar_tend, mu_tendf, &
1124 grid%rthraten,grid%rthblten,grid%rthcuten,grid%rublten,grid%rvblten, &
1125 grid%rqvblten,grid%rqcblten,grid%rqiblten, &
1126 grid%rqvcuten,grid%rqccuten,grid%rqrcuten,grid%rqicuten,grid%rqscuten, &
1127 grid%RUNDGDTEN,grid%RVNDGDTEN,grid%RTHNDGDTEN,grid%RQVNDGDTEN, &
1128 grid%RMUNDGDTEN, &
1129 num_3d_m,num_3d_s,config_flags,rk_step,grid%adv_moist_cond, &
1130 ids, ide, jds, jde, kds, kde, &
1131 ims, ime, jms, jme, kms, kme, &
1132 grid%i_start(ij), grid%i_end(ij), &
1133 grid%j_start(ij), grid%j_end(ij), &
1134 k_start, k_end )
1135
1136 END DO
1137 !$OMP END PARALLEL DO
1138 BENCH_END(update_phy_ten_tim)
1139
1140 IF( config_flags%diff_opt .eq. 2 .and. config_flags%km_opt .eq. 2 ) THEN
1141
1142 BENCH_START(tke_rhs_tim)
1143 !$OMP PARALLEL DO &
1144 !$OMP PRIVATE ( ij )
1145
1146 DO ij = 1 , grid%num_tiles
1147
1148 CALL tke_rhs ( tke_tend,grid%bn2, &
1149 config_flags,grid%defor11,grid%defor22,grid%defor33, &
1150 grid%defor12,grid%defor13,grid%defor23,grid%em_u_2,grid%em_v_2,grid%em_w_2,grid%div, &
1151 grid%em_tke_2,grid%em_mut, &
1152 th_phy,p_phy,p8w,t8w,grid%em_z,grid%em_fnm,grid%em_fnp, &
1153 grid%cf1,grid%cf2,grid%cf3,grid%msft,grid%xkmh,grid%xkmv,grid%xkhv,grid%rdx,grid%rdy, &
1154 grid%dx,grid%dy,grid%dt,grid%em_zx,grid%em_zy,grid%em_rdz,grid%em_rdzw,grid%em_dn, &
1155 grid%em_dnw,config_flags%mix_cr_len, &
1156 ids, ide, jds, jde, kds, kde, &
1157 ims, ime, jms, jme, kms, kme, &
1158 grid%i_start(ij), grid%i_end(ij), &
1159 grid%j_start(ij), grid%j_end(ij), &
1160 k_start , k_end )
1161
1162 ENDDO
1163 !$OMP END PARALLEL DO
1164 BENCH_END(tke_rhs_tim)
1165
1166 ENDIF
1167
1168 ! calculate vertical diffusion first and then horizontal
1169 ! (keep this order)
1170
1171 IF(config_flags%diff_opt .eq. 2) THEN
1172
1173 IF (config_flags%bl_pbl_physics .eq. 0) THEN
1174
1175 BENCH_START(vert_diff_tim)
1176 !$OMP PARALLEL DO &
1177 !$OMP PRIVATE ( ij )
1178 DO ij = 1 , grid%num_tiles
1179
1180 CALL wrf_debug ( 200 , ' call vertical_diffusion_2 ' )
1181 CALL vertical_diffusion_2( ru_tendf, rv_tendf, rw_tendf, &
1182 t_tendf, tke_tend, &
1183 moist_tend, num_3d_m, &
1184 chem_tend, num_3d_c, &
1185 scalar_tend, num_3d_s, &
1186 grid%em_u_2, grid%em_v_2, &
1187 grid%em_t_2,grid%u_base,grid%v_base,grid%em_t_base,grid%qv_base, &
1188 grid%em_mut,grid%em_tke_2,config_flags, &
1189 grid%defor13,grid%defor23,grid%defor33, &
1190 grid%div, moist, chem, scalar, &
1191 grid%xkmv, grid%xkhv, config_flags%km_opt, &
1192 grid%em_fnm, grid%em_fnp, grid%em_dn, grid%em_dnw, grid%em_rdz, grid%em_rdzw, &
1193 ids, ide, jds, jde, kds, kde, &
1194 ims, ime, jms, jme, kms, kme, &
1195 grid%i_start(ij), grid%i_end(ij), &
1196 grid%j_start(ij), grid%j_end(ij), &
1197 k_start , k_end )
1198
1199 ENDDO
1200 !$OMP END PARALLEL DO
1201 BENCH_END(vert_diff_tim)
1202
1203 ENDIF
1204 !
1205 BENCH_START(hor_diff_tim)
1206 !$OMP PARALLEL DO &
1207 !$OMP PRIVATE ( ij )
1208 DO ij = 1 , grid%num_tiles
1209
1210 CALL wrf_debug ( 200 , ' call horizontal_diffusion_2' )
1211 CALL horizontal_diffusion_2( t_tendf, ru_tendf, rv_tendf, rw_tendf, &
1212 tke_tend, &
1213 moist_tend, num_3d_m, &
1214 chem_tend, num_3d_c, &
1215 scalar_tend, num_3d_s, &
1216 grid%em_t_2, th_phy, &
1217 grid%em_mut, grid%em_tke_2, config_flags, &
1218 grid%defor11, grid%defor22, grid%defor12, &
1219 grid%defor13, grid%defor23, grid%div, &
1220 moist, chem, scalar, &
1221 grid%msfu, grid%msfv, grid%msft, grid%xkmhd, grid%xkhh, config_flags%km_opt, &
1222 grid%rdx, grid%rdy, grid%em_rdz, grid%em_rdzw, &
1223 grid%em_fnm, grid%em_fnp, grid%cf1, grid%cf2, grid%cf3, &
1224 grid%em_zx, grid%em_zy, grid%em_dn, grid%em_dnw, &
1225 ids, ide, jds, jde, kds, kde, &
1226 ims, ime, jms, jme, kms, kme, &
1227 grid%i_start(ij), grid%i_end(ij), &
1228 grid%j_start(ij), grid%j_end(ij), &
1229 k_start , k_end )
1230 ENDDO
1231 !$OMP END PARALLEL DO
1232 BENCH_END(hor_diff_tim)
1233
1234 ENDIF
1235
1236 # ifdef DM_PARALLEL
1237 # include "HALO_OBS_NUDGE.inc"
1238 #endif
1239 !***********************************************************************
1240 ! This section for obs nudging
1241 !$OMP PARALLEL DO &
1242 !$OMP PRIVATE ( ij )
1243
1244 DO ij = 1 , grid%num_tiles
1245
1246 CALL fddaobs_driver (grid%grid_id, model_config_rec%grid_id, &
1247 model_config_rec%parent_id, config_flags%restart, &
1248 grid%obs_nudge_opt, &
1249 grid%obs_ipf_errob, &
1250 grid%obs_ipf_nudob, &
1251 grid%fdda_start, &
1252 grid%fdda_end, &
1253 grid%obs_nudge_wind, &
1254 grid%obs_nudge_temp, &
1255 grid%obs_nudge_mois, &
1256 grid%obs_nudge_pstr, &
1257 grid%obs_coef_wind, &
1258 grid%obs_coef_temp, &
1259 grid%obs_coef_mois, &
1260 grid%obs_coef_pstr, &
1261 grid%obs_rinxy, &
1262 grid%obs_rinsig, &
1263 grid%obs_twindo, &
1264 grid%obs_npfi, &
1265 grid%obs_ionf, &
1266 grid%obs_idynin, &
1267 grid%obs_dtramp, &
1268 model_config_rec%cen_lat(1), &
1269 model_config_rec%cen_lon(1), &
1270 config_flags%truelat1, &
1271 config_flags%truelat2, &
1272 config_flags%map_proj, &
1273 model_config_rec%i_parent_start, &
1274 model_config_rec%j_parent_start, &
1275 grid%parent_grid_ratio, &
1276 grid%max_dom, grid%itimestep, &
1277 grid%dt, grid%gmt, grid%julday, grid%fdob, &
1278 grid%max_obs, &
1279 model_config_rec%nobs_ndg_vars, &
1280 model_config_rec%nobs_err_flds, &
1281 grid%fdob%nstat, grid%fdob%varobs, grid%fdob%errf, &
1282 grid%dx, grid%KPBL,grid%HT, &
1283 grid%em_mut, grid%em_muu, grid%em_muv, &
1284 grid%msft, grid%msfu, grid%msfv, &
1285 p_phy, t_tendf, t0, &
1286 grid%em_u_2, grid%em_v_2, grid%em_t_2, &
1287 moist(:,:,:,P_QV), &
1288 grid%em_pb, grid%p_top, grid%em_p, &
1289 grid%uratx, grid%vratx, grid%tratx, &
1290 ru_tendf, rv_tendf, &
1291 moist_tend(:,:,:,P_QV), grid%em_obs_savwt, &
1292 ids,ide, jds,jde, kds,kde, &
1293 ims,ime, jms,jme, kms,kme, &
1294 grid%i_start(ij), min(grid%i_end(ij),ide-1), &
1295 grid%j_start(ij), min(grid%j_end(ij),jde-1), &
1296 k_start , min(k_end,kde-1) )
1297
1298 ENDDO
1299
1300 !$OMP END PARALLEL DO
1301 !
1302 !***********************************************************************
1303
1304 END IF rk_step_is_one
1305
1306 BENCH_START(rk_tend_tim)
1307 !$OMP PARALLEL DO &
1308 !$OMP PRIVATE ( ij )
1309 DO ij = 1 , grid%num_tiles
1310
1311 #ifdef WRF_PLUS
1312 if(TEST_rk_tendency) THEN
1313 CALL t_rk_tendency ( config_flags, rk_step, &
1314 grid%em_ru_tend, grid%em_rv_tend, rw_tend, ph_tend, t_tend, &
1315 ru_tendf, rv_tendf, rw_tendf, ph_tendf, t_tendf, &
1316 mu_tend, grid%em_u_save, grid%em_v_save, w_save, ph_save, &
1317 grid%em_t_save, mu_save, grid%RTHFTEN, &
1318 grid%em_ru, grid%em_rv, grid%em_rw, grid%em_ww, &
1319 grid%em_u_2, grid%em_v_2, grid%em_w_2, grid%em_t_2, grid%em_ph_2, &
1320 grid%em_u_1, grid%em_v_1, grid%em_w_1, grid%em_t_1, grid%em_ph_1, &
1321 grid%h_diabatic, grid%em_phb, grid%em_t_init, &
1322 grid%em_mu_2, grid%em_mut, grid%em_muu, grid%em_muv, grid%em_mub, &
1323 grid%em_al, grid%em_alt, grid%em_p, grid%em_pb, grid%em_php, cqu, cqv, cqw, &
1324 grid%u_base, grid%v_base, grid%em_t_base, grid%qv_base, grid%z_base, &
1325 grid%msfu, grid%msfv, grid%msft, grid%f, grid%e, grid%sina, grid%cosa, &
1326 grid%em_fnm, grid%em_fnp, grid%em_rdn, grid%em_rdnw, &
1327 grid%dt, grid%rdx, grid%rdy, grid%khdif, grid%kvdif, grid%xkmhd, &
1328 grid%diff_6th_opt, grid%diff_6th_rate, &
1329 grid%dampcoef,grid%zdamp,config_flags%damp_opt, &
1330 grid%cf1, grid%cf2, grid%cf3, grid%cfn, grid%cfn1, num_3d_m, &
1331 config_flags%non_hydrostatic, &
1332 ids, ide, jds, jde, kds, kde, &
1333 ims, ime, jms, jme, kms, kme, &
1334 grid%i_start(ij), grid%i_end(ij), &
1335 grid%j_start(ij), grid%j_end(ij), &
1336 k_start, k_end )
1337 END IF
1338 #endif
1339 CALL wrf_debug ( 200 , ' call rk_tendency' )
1340 CALL rk_tendency ( config_flags, rk_step, &
1341 grid%em_ru_tend, grid%em_rv_tend, rw_tend, ph_tend, t_tend, &
1342 ru_tendf, rv_tendf, rw_tendf, ph_tendf, t_tendf, &
1343 mu_tend, grid%em_u_save, grid%em_v_save, w_save, ph_save, &
1344 grid%em_t_save, mu_save, grid%rthften, &
1345 grid%em_ru, grid%em_rv, grid%em_rw, grid%em_ww, &
1346 grid%em_u_2, grid%em_v_2, grid%em_w_2, grid%em_t_2, grid%em_ph_2, &
1347 grid%em_u_1, grid%em_v_1, grid%em_w_1, grid%em_t_1, grid%em_ph_1, &
1348 grid%h_diabatic, grid%em_phb, grid%em_t_init, &
1349 grid%em_mu_2, grid%em_mut, grid%em_muu, grid%em_muv, grid%em_mub, &
1350 grid%em_al, grid%em_alt, grid%em_p, grid%em_pb, grid%em_php, cqu, cqv, cqw, &
1351 grid%u_base, grid%v_base, grid%em_t_base, grid%qv_base, grid%z_base, &
1352 grid%msfu, grid%msfv, grid%msft, grid%f, grid%e, grid%sina, grid%cosa, &
1353 grid%em_fnm, grid%em_fnp, grid%em_rdn, grid%em_rdnw, &
1354 grid%dt, grid%rdx, grid%rdy, grid%khdif, grid%kvdif, grid%xkmhd, &
1355 grid%diff_6th_opt, grid%diff_6th_factor, &
1356 grid%dampcoef,grid%zdamp,config_flags%damp_opt, &
1357 grid%cf1, grid%cf2, grid%cf3, grid%cfn, grid%cfn1, num_3d_m, &
1358 config_flags%non_hydrostatic, &
1359 ids, ide, jds, jde, kds, kde, &
1360 ims, ime, jms, jme, kms, kme, &
1361 grid%i_start(ij), grid%i_end(ij), &
1362 grid%j_start(ij), grid%j_end(ij), &
1363 k_start, k_end )
1364
1365 #ifdef WRF_PLUS
1366 if(TEST_advect_scalar) THEN
1367 CALL t_advect_scalar ( grid%em_t_2, grid%em_t_2, t_tend, grid%em_ru, grid%em_rv, grid%em_ww, &
1368 grid%em_mut, config_flags, &
1369 grid%msfu, grid%msfv, grid%msft, grid%em_fnm, grid%em_fnp, &
1370 grid%rdx, grid%rdy, grid%em_rdnw, &
1371 ids, ide, jds, jde, kds, kde, &
1372 ims, ime, jms, jme, kms, kme, &
1373 grid%i_start(ij), grid%i_end(ij), &
1374 grid%j_start(ij), grid%j_end(ij), &
1375 k_start, k_end )
1376 END IF
1377 #endif
1378 END DO
1379 !$OMP END PARALLEL DO
1380 BENCH_END(rk_tend_tim)
1381
1382 BENCH_START(relax_bdy_dry_tim)
1383 !$OMP PARALLEL DO &
1384 !$OMP PRIVATE ( ij )
1385 DO ij = 1 , grid%num_tiles
1386
1387 IF( (config_flags%specified .or. config_flags%nested) .and. rk_step == 1 ) THEN
1388
1389 #ifdef WRF_PLUS
1390 if(TEST_relax_bdy_dry) THEN
1391 CALL t_relax_bdy_dry ( config_flags, &
1392 grid%em_u_save, grid%em_v_save, ph_save, grid%em_t_save, &
1393 w_save, mu_tend, &
1394 grid%em_ru, grid%em_rv, grid%em_ph_2, grid%em_t_2, &
1395 grid%em_w_2, grid%em_mu_2, grid%em_mut, &
1396 grid%em_u_b, grid%em_v_b, grid%em_ph_b, grid%em_t_b, grid%em_w_b, &
1397 grid%em_mu_b, &
1398 grid%em_u_bt, grid%em_v_bt, grid%em_ph_bt, grid%em_t_bt, &
1399 grid%em_w_bt, grid%em_mu_bt, &
1400 config_flags%spec_bdy_width, grid%spec_zone, grid%relax_zone, &
1401 grid%dtbc, grid%fcx, grid%gcx, &
1402 ijds, ijde, &
1403 ids,ide, jds,jde, kds,kde, &
1404 ims,ime, jms,jme, kms,kme, &
1405 ips,ipe, jps,jpe, kps,kpe, &
1406 grid%i_start(ij), grid%i_end(ij), &
1407 grid%j_start(ij), grid%j_end(ij), &
1408 k_start, k_end )
1409 END IF
1410 #endif
1411
1412 CALL relax_bdy_dry ( config_flags, &
1413 grid%em_u_save, grid%em_v_save, ph_save, grid%em_t_save, &
1414 w_save, mu_tend, &
1415 grid%em_ru, grid%em_rv, grid%em_ph_2, grid%em_t_2, &
1416 grid%em_w_2, grid%em_mu_2, grid%em_mut, &
1417 grid%em_u_bxs,grid%em_u_bxe,grid%em_u_bys,grid%em_u_bye, &
1418 grid%em_v_bxs,grid%em_v_bxe,grid%em_v_bys,grid%em_v_bye, &
1419 grid%em_ph_bxs,grid%em_ph_bxe,grid%em_ph_bys,grid%em_ph_bye, &
1420 grid%em_t_bxs,grid%em_t_bxe,grid%em_t_bys,grid%em_t_bye, &
1421 grid%em_w_bxs,grid%em_w_bxe,grid%em_w_bys,grid%em_w_bye, &
1422 grid%em_mu_bxs,grid%em_mu_bxe,grid%em_mu_bys,grid%em_mu_bye, &
1423 grid%em_u_btxs,grid%em_u_btxe,grid%em_u_btys,grid%em_u_btye, &
1424 grid%em_v_btxs,grid%em_v_btxe,grid%em_v_btys,grid%em_v_btye, &
1425 grid%em_ph_btxs,grid%em_ph_btxe,grid%em_ph_btys,grid%em_ph_btye, &
1426 grid%em_t_btxs,grid%em_t_btxe,grid%em_t_btys,grid%em_t_btye, &
1427 grid%em_w_btxs,grid%em_w_btxe,grid%em_w_btys,grid%em_w_btye, &
1428 grid%em_mu_btxs,grid%em_mu_btxe,grid%em_mu_btys,grid%em_mu_btye, &
1429 config_flags%spec_bdy_width, grid%spec_zone, grid%relax_zone, &
1430 grid%dtbc, grid%fcx, grid%gcx, &
1431 ids,ide, jds,jde, kds,kde, &
1432 ims,ime, jms,jme, kms,kme, &
1433 ips,ipe, jps,jpe, kps,kpe, &
1434 grid%i_start(ij), grid%i_end(ij), &
1435 grid%j_start(ij), grid%j_end(ij), &
1436 k_start, k_end )
1437
1438
1439 ENDIF
1440 #ifdef WRF_PLUS
1441 if(TEST_rk_addtend_dry) THEN
1442 CALL t_rk_addtend_dry( grid%em_ru_tend, grid%em_rv_tend, rw_tend, ph_tend, t_tend, &
1443 ru_tendf, rv_tendf, rw_tendf, ph_tendf, t_tendf, &
1444 grid%em_u_save, grid%em_v_save, w_save, ph_save, grid%em_t_save, &
1445 mu_tend, mu_tendf, rk_step, &
1446 grid%h_diabatic, grid%em_mut, grid%msft, grid%msfu, grid%msfv, &
1447 ids,ide, jds,jde, kds,kde, &
1448 ims,ime, jms,jme, kms,kme, &
1449 ips,ipe, jps,jpe, kps,kpe, &
1450 grid%i_start(ij), grid%i_end(ij), &
1451 grid%j_start(ij), grid%j_end(ij), &
1452 k_start, k_end )
1453 END IF
1454 #endif
1455
1456 CALL rk_addtend_dry( grid%em_ru_tend, grid%em_rv_tend, rw_tend, ph_tend, t_tend, &
1457 ru_tendf, rv_tendf, rw_tendf, ph_tendf, t_tendf, &
1458 grid%em_u_save, grid%em_v_save, w_save, ph_save, grid%em_t_save, &
1459 mu_tend, mu_tendf, rk_step, &
1460 grid%h_diabatic, grid%em_mut, grid%msft, grid%msfu, grid%msfv, &
1461 ids,ide, jds,jde, kds,kde, &
1462 ims,ime, jms,jme, kms,kme, &
1463 ips,ipe, jps,jpe, kps,kpe, &
1464 grid%i_start(ij), grid%i_end(ij), &
1465 grid%j_start(ij), grid%j_end(ij), &
1466 k_start, k_end )
1467
1468 IF( config_flags%specified .or. config_flags%nested ) THEN
1469 #ifdef WRFVAR_TESTING
1470 if(TEST_spec_bdy_dry) THEN
1471 CALL t_spec_bdy_dry ( config_flags, &
1472 grid%em_ru_tend, grid%em_rv_tend, ph_tend, t_tend, &
1473 rw_tend, mu_tend, &
1474 grid%em_u_b, grid%em_v_b, grid%em_ph_b, grid%em_t_b, &
1475 grid%em_w_b, grid%em_mu_b, &
1476 grid%em_u_bt, grid%em_v_bt, grid%em_ph_bt, grid%em_t_bt, &
1477 grid%em_w_bt, grid%em_mu_bt, &
1478 config_flags%spec_bdy_width, grid%spec_zone, &
1479 ijds, ijde, & ! min/max(id,jd)
1480 ids,ide, jds,jde, kds,kde, & ! domain dims
1481 ims,ime, jms,jme, kms,kme, & ! memory dims
1482 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
1483 grid%i_start(ij), grid%i_end(ij), &
1484 grid%j_start(ij), grid%j_end(ij), &
1485 k_start, k_end )
1486 END IF
1487 #endif
1488 CALL spec_bdy_dry ( config_flags, &
1489 grid%em_ru_tend, grid%em_rv_tend, ph_tend, t_tend, &
1490 rw_tend, mu_tend, &
1491 grid%em_u_bxs,grid%em_u_bxe,grid%em_u_bys,grid%em_u_bye, &
1492 grid%em_v_bxs,grid%em_v_bxe,grid%em_v_bys,grid%em_v_bye, &
1493 grid%em_ph_bxs,grid%em_ph_bxe,grid%em_ph_bys,grid%em_ph_bye, &
1494 grid%em_t_bxs,grid%em_t_bxe,grid%em_t_bys,grid%em_t_bye, &
1495 grid%em_w_bxs,grid%em_w_bxe,grid%em_w_bys,grid%em_w_bye, &
1496 grid%em_mu_bxs,grid%em_mu_bxe,grid%em_mu_bys,grid%em_mu_bye, &
1497 grid%em_u_btxs,grid%em_u_btxe,grid%em_u_btys,grid%em_u_btye, &
1498 grid%em_v_btxs,grid%em_v_btxe,grid%em_v_btys,grid%em_v_btye, &
1499 grid%em_ph_btxs,grid%em_ph_btxe,grid%em_ph_btys,grid%em_ph_btye, &
1500 grid%em_t_btxs,grid%em_t_btxe,grid%em_t_btys,grid%em_t_btye, &
1501 grid%em_w_btxs,grid%em_w_btxe,grid%em_w_btys,grid%em_w_btye, &
1502 grid%em_mu_btxs,grid%em_mu_btxe,grid%em_mu_btys,grid%em_mu_btye, &
1503 config_flags%spec_bdy_width, grid%spec_zone, &
1504 ids,ide, jds,jde, kds,kde, & ! domain dims
1505 ims,ime, jms,jme, kms,kme, & ! memory dims
1506 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
1507 grid%i_start(ij), grid%i_end(ij), &
1508 grid%j_start(ij), grid%j_end(ij), &
1509 k_start, k_end )
1510
1511 ENDIF
1512
1513 END DO
1514 !$OMP END PARALLEL DO
1515 BENCH_END(relax_bdy_dry_tim)
1516
1517 !<DESCRIPTION>
1518 !<pre>
1519 ! (3) Small (acoustic,sound) steps.
1520 !
1521 ! Several acoustic steps are taken each RK pass. A small step
1522 ! sequence begins with calculating perturbation variables
1523 ! and coupling them to the column dry-air-mass mu
1524 ! (call to small_step_prep). This is followed by computing
1525 ! coefficients for the vertically implicit part of the
1526 ! small timestep (call to calc_coef_w).
1527 !
1528 ! The small steps are taken
1529 ! in the named loop "small_steps:". In the small_steps loop, first
1530 ! the horizontal momentum (u and v) are advanced (call to advance_uv),
1531 ! next mu and theta are advanced (call to advance_mu_t) followed by
1532 ! advancing w and the geopotential (call to advance_w). Diagnostic
1533 ! values for pressure and inverse density are updated at the end of
1534 ! each small_step.
1535 !
1536 ! The small-step section ends with the change of the perturbation variables
1537 ! back to full variables (call to small_step_finish).
1538 !</pre>
1539 !</DESCRIPTION>
1540
1541 BENCH_START(small_step_prep_tim)
1542 !$OMP PARALLEL DO &
1543 !$OMP PRIVATE ( ij )
1544
1545 DO ij = 1 , grid%num_tiles
1546
1547 ! Calculate coefficients for the vertically implicit acoustic/gravity wave
1548 ! integration. We only need calculate these for the first pass through -
1549 ! the predictor step. They are reused as is for the corrector step.
1550 ! For third-order RK, we need to recompute these after the first
1551 ! predictor because we may have changed the small timestep -> grid%dts.
1552
1553 CALL wrf_debug ( 200 , ' call calc_coef_w' )
1554
1555 #ifdef WRFVAR_TESTING
1556 if(TEST_small_step_prep) THEN
1557 CALL t_small_step_prep( grid%em_u_1,grid%em_u_2,grid%em_v_1,grid%em_v_2,grid%em_w_1,grid%em_w_2, &
1558 grid%em_t_1,grid%em_t_2,grid%em_ph_1,grid%em_ph_2, &
1559 grid%em_mub, grid%em_mu_1, grid%em_mu_2, &
1560 grid%em_muu, muus, grid%em_muv, muvs, &
1561 grid%em_mut, grid%em_muts, grid%em_mudf, &
1562 grid%em_u_save, grid%em_v_save, w_save, &
1563 grid%em_t_save, ph_save, mu_save, &
1564 grid%em_ww, ww1, &
1565 grid%em_dnw, c2a, grid%em_pb, grid%em_p, grid%em_alt, &
1566 grid%msfu, grid%msfv, grid%msft, &
1567 rk_step, &
1568 ids, ide, jds, jde, kds, kde, &
1569 ims, ime, jms, jme, kms, kme, &
1570 grid%i_start(ij), grid%i_end(ij), &
1571 grid%j_start(ij), grid%j_end(ij), &
1572 k_start , k_end )
1573 END IF
1574 #endif
1575
1576 CALL small_step_prep( grid%em_u_1,grid%em_u_2,grid%em_v_1,grid%em_v_2,grid%em_w_1,grid%em_w_2, &
1577 grid%em_t_1,grid%em_t_2,grid%em_ph_1,grid%em_ph_2, &
1578 grid%em_mub, grid%em_mu_1, grid%em_mu_2, &
1579 grid%em_muu, muus, grid%em_muv, muvs, &
1580 grid%em_mut, grid%em_muts, grid%em_mudf, &
1581 grid%em_u_save, grid%em_v_save, w_save, &
1582 grid%em_t_save, ph_save, mu_save, &
1583 grid%em_ww, ww1, &
1584 grid%em_dnw, c2a, grid%em_pb, grid%em_p, grid%em_alt, &
1585 grid%msfu, grid%msfv, grid%msft, &
1586 rk_step, &
1587 ids, ide, jds, jde, kds, kde, &
1588 ims, ime, jms, jme, kms, kme, &
1589 grid%i_start(ij), grid%i_end(ij), &
1590 grid%j_start(ij), grid%j_end(ij), &
1591 k_start , k_end )
1592 #ifdef WRFVAR_TESTING
1593 if(TEST_calc_p_rho) THEN
1594 CALL t_calc_p_rho( grid%em_al, grid%em_p, grid%em_ph_2, &
1595 grid%em_alt, grid%em_t_2, grid%em_t_save, c2a, pm1, &
1596 grid%em_mu_2, grid%em_muts, grid%em_znu, t0, &
1597 grid%em_rdnw, grid%em_dnw, grid%smdiv, &
1598 config_flags%non_hydrostatic, 0, &
1599 ids, ide, jds, jde, kds, kde, &
1600 ims, ime, jms, jme, kms, kme, &
1601 grid%i_start(ij), grid%i_end(ij), &
1602 grid%j_start(ij), grid%j_end(ij), &
1603 k_start , k_end )
1604 END IF
1605 #endif
1606 CALL calc_p_rho( grid%em_al, grid%em_p, grid%em_ph_2, &
1607 grid%em_alt, grid%em_t_2, grid%em_t_save, c2a, pm1, &
1608 grid%em_mu_2, grid%em_muts, grid%em_znu, t0, &
1609 grid%em_rdnw, grid%em_dnw, grid%smdiv, &
1610 config_flags%non_hydrostatic, 0, &
1611 ids, ide, jds, jde, kds, kde, &
1612 ims, ime, jms, jme, kms, kme, &
1613 grid%i_start(ij), grid%i_end(ij), &
1614 grid%j_start(ij), grid%j_end(ij), &
1615 k_start , k_end )
1616
1617 IF (config_flags%non_hydrostatic) THEN
1618 #ifdef WRFVAR_TESTING
1619 if(TEST_calc_coef_w) THEN
1620 CALL t_calc_coef_w( a,alpha,gamma, &
1621 grid%em_mut, cqw, &
1622 grid%em_rdn, grid%em_rdnw, c2a, &
1623 dts_rk, g, grid%epssm, &
1624 ids, ide, jds, jde, kds, kde, &
1625 ims, ime, jms, jme, kms, kme, &
1626 grid%i_start(ij), grid%i_end(ij), &
1627 grid%j_start(ij), grid%j_end(ij), &
1628 k_start , k_end )
1629 END IF
1630 #endif
1631 CALL calc_coef_w( a,alpha,gamma, &
1632 grid%em_mut, cqw, &
1633 grid%em_rdn, grid%em_rdnw, c2a, &
1634 dts_rk, g, grid%epssm, &
1635 ids, ide, jds, jde, kds, kde, &
1636 ims, ime, jms, jme, kms, kme, &
1637 grid%i_start(ij), grid%i_end(ij), &
1638 grid%j_start(ij), grid%j_end(ij), &
1639 k_start , k_end )
1640
1641 END IF
1642
1643
1644 ENDDO
1645 !$OMP END PARALLEL DO
1646 BENCH_END(small_step_prep_tim)
1647
1648
1649 #ifdef DM_PARALLEL
1650 !-----------------------------------------------------------------------
1651 ! Stencils for patch communications (WCS, 29 June 2001)
1652 ! Note: the small size of this halo exchange reflects the
1653 ! fact that we are carrying the uncoupled variables
1654 ! as state variables in the mass coordinate model, as
1655 ! opposed to the coupled variables as in the height
1656 ! coordinate model.
1657 !
1658 ! * * * * *
1659 ! * * * * * * * * *
1660 ! * + * * + * * * + * *
1661 ! * * * * * * * * *
1662 ! * * * * *
1663 !
1664 ! 3D variables - note staggering! grid%em_ph_2(grid%em_z), grid%em_u_save(X), grid%em_v_save(Y)
1665 !
1666 !j grid%em_ph_2 x
1667 !j grid%em_al x
1668 !j grid%em_p x
1669 !j grid%em_t_1 x
1670 !j grid%em_t_save x
1671 !j grid%em_u_save x
1672 !j grid%em_v_save x
1673 !
1674 ! the following are 2D (xy) variables
1675 !
1676 !j grid%em_mu_1 x
1677 !j grid%em_mu_2 x
1678 !j grid%em_mudf x
1679 !--------------------------------------------------------------
1680 # include "HALO_EM_B.inc"
1681 # include "PERIOD_BDY_EM_B.inc"
1682 #endif
1683
1684 BENCH_START(set_phys_bc2_tim)
1685 !$OMP PARALLEL DO &
1686 !$OMP PRIVATE ( ij )
1687
1688 DO ij = 1 , grid%num_tiles
1689
1690 CALL set_physical_bc3d( grid%em_ru_tend, 'u', config_flags, &
1691 ids, ide, jds, jde, kds, kde, &
1692 ims, ime, jms, jme, kms, kme, &
1693 ips, ipe, jps, jpe, kps, kpe, &
1694 grid%i_start(ij), grid%i_end(ij), &
1695 grid%j_start(ij), grid%j_end(ij), &
1696 k_start , k_end )
1697
1698 CALL set_physical_bc3d( grid%em_rv_tend, 'v', config_flags, &
1699 ids, ide, jds, jde, kds, kde, &
1700 ims, ime, jms, jme, kms, kme, &
1701 ips, ipe, jps, jpe, kps, kpe, &
1702 grid%i_start(ij), grid%i_end(ij), &
1703 grid%j_start(ij), grid%j_end(ij), &
1704 k_start , k_end )
1705
1706 CALL set_physical_bc3d( grid%em_ph_2, 'w', config_flags, &
1707 ids, ide, jds, jde, kds, kde, &
1708 ims, ime, jms, jme, kms, kme, &
1709 ips, ipe, jps, jpe, kps, kpe, &
1710 grid%i_start(ij), grid%i_end(ij), &
1711 grid%j_start(ij), grid%j_end(ij), &
1712 k_start , k_end )
1713
1714 CALL set_physical_bc3d( grid%em_al, 'p', config_flags, &
1715 ids, ide, jds, jde, kds, kde, &
1716 ims, ime, jms, jme, kms, kme, &
1717 ips, ipe, jps, jpe, kps, kpe, &
1718 grid%i_start(ij), grid%i_end(ij), &
1719 grid%j_start(ij), grid%j_end(ij), &
1720 k_start , k_end )
1721
1722 CALL set_physical_bc3d( grid%em_p, 'p', config_flags, &
1723 ids, ide, jds, jde, kds, kde, &
1724 ims, ime, jms, jme, kms, kme, &
1725 ips, ipe, jps, jpe, kps, kpe, &
1726 grid%i_start(ij), grid%i_end(ij), &
1727 grid%j_start(ij), grid%j_end(ij), &
1728 k_start , k_end )
1729
1730 CALL set_physical_bc3d( grid%em_t_1, 'p', config_flags, &
1731 ids, ide, jds, jde, kds, kde, &
1732 ims, ime, jms, jme, kms, kme, &
1733 ips, ipe, jps, jpe, kps, kpe, &
1734 grid%i_start(ij), grid%i_end(ij), &
1735 grid%j_start(ij), grid%j_end(ij), &
1736 k_start , k_end )
1737
1738 CALL set_physical_bc3d( grid%em_t_save, 't', config_flags, &
1739 ids, ide, jds, jde, kds, kde, &
1740 ims, ime, jms, jme, kms, kme, &
1741 ips, ipe, jps, jpe, kps, kpe, &
1742 grid%i_start(ij), grid%i_end(ij), &
1743 grid%j_start(ij), grid%j_end(ij), &
1744 k_start , k_end )
1745
1746 CALL set_physical_bc2d( grid%em_mu_1, 't', config_flags, &
1747 ids, ide, jds, jde, &
1748 ims, ime, jms, jme, &
1749 ips, ipe, jps, jpe, &
1750 grid%i_start(ij), grid%i_end(ij), &
1751 grid%j_start(ij), grid%j_end(ij) )
1752
1753 CALL set_physical_bc2d( grid%em_mu_2, 't', config_flags, &
1754 ids, ide, jds, jde, &
1755 ims, ime, jms, jme, &
1756 ips, ipe, jps, jpe, &
1757 grid%i_start(ij), grid%i_end(ij), &
1758 grid%j_start(ij), grid%j_end(ij) )
1759
1760 CALL set_physical_bc2d( grid%em_mudf, 't', config_flags, &
1761 ids, ide, jds, jde, &
1762 ims, ime, jms, jme, &
1763 ips, ipe, jps, jpe, &
1764 grid%i_start(ij), grid%i_end(ij), &
1765 grid%j_start(ij), grid%j_end(ij) )
1766
1767 END DO
1768 !$OMP END PARALLEL DO
1769 BENCH_END(set_phys_bc2_tim)
1770
1771 small_steps : DO iteration = 1 , number_of_small_timesteps
1772
1773 ! Boundary condition time (or communication time).
1774
1775 #ifdef DM_PARALLEL
1776 # include "PERIOD_BDY_EM_B.inc"
1777 #endif
1778
1779
1780 !$OMP PARALLEL DO &
1781 !$OMP PRIVATE ( ij )
1782
1783 DO ij = 1 , grid%num_tiles
1784
1785 BENCH_START(advance_uv_tim)
1786 #ifdef WRFVAR_TESTING
1787 if(TEST_advance_uv) THEN
1788 CALL t_advance_uv ( grid%em_u_2, grid%em_ru_tend, grid%em_v_2, grid%em_rv_tend, &
1789 grid%em_p, grid%em_pb, &
1790 grid%em_ph_2, grid%em_php, grid%em_alt, grid%em_al, grid%em_mu_2, &
1791 grid%em_muu, cqu, grid%em_muv, cqv, grid%em_mudf, &
1792 grid%rdx, grid%rdy, dts_rk, &
1793 grid%cf1, grid%cf2, grid%cf3, grid%em_fnm, grid%em_fnp, &
1794 grid%emdiv, &
1795 grid%em_rdnw, config_flags,grid%spec_zone, &
1796 config_flags%non_hydrostatic, &
1797 ids, ide, jds, jde, kds, kde, &
1798 ims, ime, jms, jme, kms, kme, &
1799 grid%i_start(ij), grid%i_end(ij), &
1800 grid%j_start(ij), grid%j_end(ij), &
1801 k_start , k_end )
1802 END IF
1803 #endif
1804 CALL advance_uv ( grid%em_u_2, grid%em_ru_tend, grid%em_v_2, grid%em_rv_tend, &
1805 grid%em_p, grid%em_pb, &
1806 grid%em_ph_2, grid%em_php, grid%em_alt, grid%em_al, grid%em_mu_2, &
1807 grid%em_muu, cqu, grid%em_muv, cqv, grid%em_mudf, &
1808 grid%rdx, grid%rdy, dts_rk, &
1809 grid%cf1, grid%cf2, grid%cf3, grid%em_fnm, grid%em_fnp, &
1810 grid%emdiv, &
1811 grid%em_rdnw, config_flags,grid%spec_zone, &
1812 config_flags%non_hydrostatic, &
1813 ids, ide, jds, jde, kds, kde, &
1814 ims, ime, jms, jme, kms, kme, &
1815 grid%i_start(ij), grid%i_end(ij), &
1816 grid%j_start(ij), grid%j_end(ij), &
1817 k_start , k_end )
1818 BENCH_END(advance_uv_tim)
1819
1820 BENCH_START(spec_bdy_uv_tim)
1821 IF( config_flags%specified .or. config_flags%nested ) THEN
1822 #ifdef WRFVAR_TESTING
1823 if(TEST_spec_bdyupdate) THEN
1824 CALL t_spec_bdyupdate(grid%em_u_2, grid%em_ru_tend, dts_rk, &
1825 'u' , config_flags, &
1826 grid%spec_zone, &
1827 ids,ide, jds,jde, kds,kde, & ! domain dims
1828 ims,ime, jms,jme, kms,kme, & ! memory dims
1829 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
1830 grid%i_start(ij), grid%i_end(ij), &
1831 grid%j_start(ij), grid%j_end(ij), &
1832 k_start , k_end )
1833 END IF
1834 #endif
1835 CALL spec_bdyupdate(grid%em_u_2, grid%em_ru_tend, dts_rk, &
1836 'u' , config_flags, &
1837 grid%spec_zone, &
1838 ids,ide, jds,jde, kds,kde, & ! domain dims
1839 ims,ime, jms,jme, kms,kme, & ! memory dims
1840 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
1841 grid%i_start(ij), grid%i_end(ij), &
1842 grid%j_start(ij), grid%j_end(ij), &
1843 k_start , k_end )
1844
1845 CALL spec_bdyupdate(grid%em_v_2, grid%em_rv_tend, dts_rk, &
1846 'v' , config_flags, &
1847 grid%spec_zone, &
1848 ids,ide, jds,jde, kds,kde, & ! domain dims
1849 ims,ime, jms,jme, kms,kme, & ! memory dims
1850 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
1851 grid%i_start(ij), grid%i_end(ij), &
1852 grid%j_start(ij), grid%j_end(ij), &
1853 k_start , k_end )
1854
1855 ENDIF
1856 BENCH_END(spec_bdy_uv_tim)
1857
1858 END DO
1859 !$OMP END PARALLEL DO
1860
1861 #ifdef DM_PARALLEL
1862 !
1863 ! Stencils for patch communications (WCS, 29 June 2001)
1864 !
1865 ! * *
1866 ! * + * * + * +
1867 ! * *
1868 !
1869 ! grid%em_u_2 x
1870 ! grid%em_v_2 x
1871 !
1872 # include "HALO_EM_C.inc"
1873 #endif
1874
1875 !$OMP PARALLEL DO &
1876 !$OMP PRIVATE ( ij )
1877
1878 DO ij = 1 , grid%num_tiles
1879
1880 ! advance the mass in the column, theta, and calculate grid%em_ww
1881
1882 #ifdef WRFVAR_TESTING
1883 if(TEST_advance_mu_t) THEN
1884 CALL t_advance_mu_t( grid%em_ww, ww1, grid%em_u_2, grid%em_u_save, grid%em_v_2, grid%em_v_save, &
1885 grid%em_mu_2, grid%em_mut, muave, grid%em_muts, grid%em_muu, grid%em_muv, &
1886 grid%em_mudf, grid%em_ru_m, grid%em_rv_m, grid%em_ww_m, &
1887 grid%em_t_2, grid%em_t_save, t_2save, t_tend, &
1888 mu_tend, &
1889 grid%rdx, grid%rdy, dts_rk, grid%epssm, &
1890 grid%em_dnw, grid%em_fnm, grid%em_fnp, grid%em_rdnw, &
1891 grid%msfu, grid%msfv, grid%msft, &
1892 iteration, config_flags, &
1893 ids, ide, jds, jde, kds, kde, &
1894 ims, ime, jms, jme, kms, kme, &
1895 grid%i_start(ij), grid%i_end(ij), &
1896 grid%j_start(ij), grid%j_end(ij), &
1897 k_start , k_end )
1898 END IF
1899 #endif
1900 BENCH_START(advance_mu_t_tim)
1901 CALL advance_mu_t( grid%em_ww, ww1, grid%em_u_2, grid%em_u_save, grid%em_v_2, grid%em_v_save, &
1902 grid%em_mu_2, grid%em_mut, muave, grid%em_muts, grid%em_muu, grid%em_muv, &
1903 grid%em_mudf, grid%em_ru_m, grid%em_rv_m, grid%em_ww_m, &
1904 grid%em_t_2, grid%em_t_save, t_2save, t_tend, &
1905 mu_tend, &
1906 grid%rdx, grid%rdy, dts_rk, grid%epssm, &
1907 grid%em_dnw, grid%em_fnm, grid%em_fnp, grid%em_rdnw, &
1908 grid%msfu, grid%msfv, grid%msft, &
1909 iteration, config_flags, &
1910 ids, ide, jds, jde, kds, kde, &
1911 ims, ime, jms, jme, kms, kme, &
1912 grid%i_start(ij), grid%i_end(ij), &
1913 grid%j_start(ij), grid%j_end(ij), &
1914 k_start , k_end )
1915 BENCH_END(advance_mu_t_tim)
1916
1917 BENCH_START(spec_bdy_t_tim)
1918 IF( config_flags%specified .or. config_flags%nested ) THEN
1919
1920 CALL spec_bdyupdate(grid%em_t_2, t_tend, dts_rk, &
1921 't' , config_flags, &
1922 grid%spec_zone, &
1923 ids,ide, jds,jde, kds,kde, & ! domain dims
1924 ims,ime, jms,jme, kms,kme, & ! memory dims
1925 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
1926 grid%i_start(ij), grid%i_end(ij), &
1927 grid%j_start(ij), grid%j_end(ij), &
1928 k_start , k_end )
1929
1930 CALL spec_bdyupdate(grid%em_mu_2, mu_tend, dts_rk, &
1931 'm' , config_flags, &
1932 grid%spec_zone, &
1933 ids,ide, jds,jde, 1 ,1 , & ! domain dims
1934 ims,ime, jms,jme, 1 ,1 , & ! memory dims
1935 ips,ipe, jps,jpe, 1 ,1 , & ! patch dims
1936 grid%i_start(ij), grid%i_end(ij), &
1937 grid%j_start(ij), grid%j_end(ij), &
1938 1 , 1 )
1939
1940 CALL spec_bdyupdate(grid%em_muts, mu_tend, dts_rk, &
1941 'm' , config_flags, &
1942 grid%spec_zone, &
1943 ids,ide, jds,jde, 1 ,1 , & ! domain dims
1944 ims,ime, jms,jme, 1 ,1 , & ! memory dims
1945 ips,ipe, jps,jpe, 1 ,1 , & ! patch dims
1946 grid%i_start(ij), grid%i_end(ij), &
1947 grid%j_start(ij), grid%j_end(ij), &
1948 1 , 1 )
1949 ENDIF
1950 BENCH_END(spec_bdy_t_tim)
1951
1952 ! sumflux accumulates the time-averged mass flux
1953 ! (time averaged over the acoustic steps) for use
1954 ! in the scalar advection (flux divergence). Using
1955 ! time averaged values gives us exact scalar conservation.
1956
1957 #ifdef WRF_PLUS
1958 if(TEST_sumflux) THEN
1959 CALL t_sumflux ( grid%em_u_2, grid%em_v_2, grid%em_ww, &
1960 grid%em_u_save, grid%em_v_save, ww1, &
1961 grid%em_muu, grid%em_muv, &
1962 grid%em_ru_m, grid%em_rv_m, grid%em_ww_m, grid%epssm, &
1963 grid%msfu, grid%msfv, &
1964 iteration, number_of_small_timesteps, &
1965 ids, ide, jds, jde, kds, kde, &
1966 ims, ime, jms, jme, kms, kme, &
1967 grid%i_start(ij), grid%i_end(ij), &
1968 grid%j_start(ij), grid%j_end(ij), &
1969 k_start , k_end )
1970 END IF
1971 #endif
1972 BENCH_START(sumflux_tim)
1973 CALL sumflux ( grid%em_u_2, grid%em_v_2, grid%em_ww, &
1974 grid%em_u_save, grid%em_v_save, ww1, &
1975 grid%em_muu, grid%em_muv, &
1976 grid%em_ru_m, grid%em_rv_m, grid%em_ww_m, grid%epssm, &
1977 grid%msfu, grid%msfv, &
1978 iteration, number_of_small_timesteps, &
1979 ids, ide, jds, jde, kds, kde, &
1980 ims, ime, jms, jme, kms, kme, &
1981 grid%i_start(ij), grid%i_end(ij), &
1982 grid%j_start(ij), grid%j_end(ij), &
1983 k_start , k_end )
1984 BENCH_END(sumflux_tim)
1985
1986 ! small (acoustic) step for the vertical momentum,
1987 ! density and coupled potential temperature.
1988
1989
1990 BENCH_START(advance_w_tim)
1991 IF ( config_flags%non_hydrostatic ) THEN
1992 #ifdef WRF_PLUS
1993 if(TEST_advance_w) THEN
1994 CALL t_advance_w( grid%em_w_2, rw_tend, grid%em_ww, grid%em_u_2, grid%em_v_2, &
1995 grid%em_mu_2, grid%em_mut, muave, grid%em_muts, &
1996 t_2save, grid%em_t_2, grid%em_t_save, &
1997 grid%em_ph_2, ph_save, grid%em_phb, ph_tend, &
1998 grid%ht, c2a, cqw, grid%em_alt, grid%em_alb, &
1999 a, alpha, gamma, &
2000 grid%rdx, grid%rdy, dts_rk, t0, grid%epssm, &
2001 grid%em_dnw, grid%em_fnm, grid%em_fnp, grid%em_rdnw, grid%em_rdn, &
2002 grid%cf1, grid%cf2, grid%cf3, grid%msft, &
2003 config_flags, &
2004 ids,ide, jds,jde, kds,kde, & ! domain dims
2005 ims,ime, jms,jme, kms,kme, & ! memory dims
2006 grid%i_start(ij), grid%i_end(ij), &
2007 grid%j_start(ij), grid%j_end(ij), &
2008 k_start , k_end )
2009 END IF
2010 #endif
2011 CALL advance_w( grid%em_w_2, rw_tend, grid%em_ww, grid%em_u_2, grid%em_v_2, &
2012 grid%em_mu_2, grid%em_mut, muave, grid%em_muts, &
2013 t_2save, grid%em_t_2, grid%em_t_save, &
2014 grid%em_ph_2, ph_save, grid%em_phb, ph_tend, &
2015 grid%ht, c2a, cqw, grid%em_alt, grid%em_alb, &
2016 a, alpha, gamma, &
2017 grid%rdx, grid%rdy, dts_rk, t0, grid%epssm, &
2018 grid%em_dnw, grid%em_fnm, grid%em_fnp, grid%em_rdnw, grid%em_rdn, &
2019 grid%cf1, grid%cf2, grid%cf3, grid%msft, &
2020 config_flags, &
2021 ids,ide, jds,jde, kds,kde, & ! domain dims
2022 ims,ime, jms,jme, kms,kme, & ! memory dims
2023 grid%i_start(ij), grid%i_end(ij), &
2024 grid%j_start(ij), grid%j_end(ij), &
2025 k_start , k_end )
2026 ENDIF
2027 BENCH_END(advance_w_tim)
2028
2029 IF( config_flags%specified .or. config_flags%nested ) THEN
2030
2031 BENCH_START(spec_bdynhyd_tim)
2032 IF (config_flags%non_hydrostatic) THEN
2033 #ifdef WRF_PLUS
2034 if(TEST_spec_bdyupdate_ph) THEN
2035 CALL t_spec_bdyupdate_ph( ph_save, grid%em_ph_2, ph_tend, mu_tend, grid%em_muts, dts_rk, &
2036 'h' , config_flags, &
2037 grid%spec_zone, &
2038 ids,ide, jds,jde, kds,kde, & ! domain dims
2039 ims,ime, jms,jme, kms,kme, & ! memory dims
2040 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2041 grid%i_start(ij), grid%i_end(ij), &
2042 grid%j_start(ij), grid%j_end(ij), &
2043 k_start , k_end )
2044 END IF
2045 #endif
2046 CALL spec_bdyupdate_ph( ph_save, grid%em_ph_2, ph_tend, mu_tend, grid%em_muts, dts_rk, &
2047 'h' , config_flags, &
2048 grid%spec_zone, &
2049 ids,ide, jds,jde, kds,kde, & ! domain dims
2050 ims,ime, jms,jme, kms,kme, & ! memory dims
2051 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2052 grid%i_start(ij), grid%i_end(ij), &
2053 grid%j_start(ij), grid%j_end(ij), &
2054 k_start , k_end )
2055 IF( config_flags%specified ) THEN
2056 CALL zero_grad_bdy ( grid%em_w_2, &
2057 'w' , config_flags, &
2058 grid%spec_zone, &
2059 ids,ide, jds,jde, kds,kde, & ! domain dims
2060 ims,ime, jms,jme, kms,kme, & ! memory dims
2061 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2062 grid%i_start(ij), grid%i_end(ij), &
2063 grid%j_start(ij), grid%j_end(ij), &
2064 k_start , k_end )
2065 ELSE
2066 CALL spec_bdyupdate ( grid%em_w_2, rw_tend, dts_rk, &
2067 'h' , config_flags, &
2068 grid%spec_zone, &
2069 ids,ide, jds,jde, kds,kde, & ! domain dims
2070 ims,ime, jms,jme, kms,kme, & ! memory dims
2071 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2072 grid%i_start(ij), grid%i_end(ij), &
2073 grid%j_start(ij), grid%j_end(ij), &
2074 k_start , k_end )
2075 ENDIF
2076 ENDIF
2077 BENCH_END(spec_bdynhyd_tim)
2078 ENDIF
2079
2080 BENCH_START(cald_p_rho_tim)
2081 CALL calc_p_rho( grid%em_al, grid%em_p, grid%em_ph_2, &
2082 grid%em_alt, grid%em_t_2, grid%em_t_save, c2a, pm1, &
2083 grid%em_mu_2, grid%em_muts, grid%em_znu, t0, &
2084 grid%em_rdnw, grid%em_dnw, grid%smdiv, &
2085 config_flags%non_hydrostatic, iteration, &
2086 ids, ide, jds, jde, kds, kde, &
2087 ims, ime, jms, jme, kms, kme, &
2088 grid%i_start(ij), grid%i_end(ij), &
2089 grid%j_start(ij), grid%j_end(ij), &
2090 k_start , k_end )
2091 BENCH_END(cald_p_rho_tim)
2092
2093 ENDDO
2094 !$OMP END PARALLEL DO
2095
2096 #ifdef DM_PARALLEL
2097 !
2098 ! Stencils for patch communications (WCS, 29 June 2001)
2099 !
2100 ! * *
2101 ! * + * * + * +
2102 ! * *
2103 !
2104 ! grid%em_ph_2 x
2105 ! grid%em_al x
2106 ! grid%em_p x
2107 !
2108 ! 2D variables (x,y)
2109 !
2110 ! grid%em_mu_2 x
2111 ! grid%em_muts x
2112 ! grid%em_mudf x
2113
2114 # include "HALO_EM_C2.inc"
2115 # include "PERIOD_BDY_EM_B3.inc"
2116 #endif
2117
2118 BENCH_START(phys_bc_tim)
2119 !$OMP PARALLEL DO &
2120 !$OMP PRIVATE ( ij )
2121
2122 DO ij = 1 , grid%num_tiles
2123
2124 ! boundary condition set for next small timestep
2125
2126 CALL set_physical_bc3d( grid%em_ph_2, 'w', config_flags, &
2127 ids, ide, jds, jde, kds, kde, &
2128 ims, ime, jms, jme, kms, kme, &
2129 ips, ipe, jps, jpe, kps, kpe, &
2130 grid%i_start(ij), grid%i_end(ij), &
2131 grid%j_start(ij), grid%j_end(ij), &
2132 k_start , k_end )
2133
2134 CALL set_physical_bc3d( grid%em_al, 'p', config_flags, &
2135 ids, ide, jds, jde, kds, kde, &
2136 ims, ime, jms, jme, kms, kme, &
2137 ips, ipe, jps, jpe, kps, kpe, &
2138 grid%i_start(ij), grid%i_end(ij), &
2139 grid%j_start(ij), grid%j_end(ij), &
2140 k_start , k_end )
2141
2142 CALL set_physical_bc3d( grid%em_p, 'p', config_flags, &
2143 ids, ide, jds, jde, kds, kde, &
2144 ims, ime, jms, jme, kms, kme, &
2145 ips, ipe, jps, jpe, kps, kpe, &
2146 grid%i_start(ij), grid%i_end(ij), &
2147 grid%j_start(ij), grid%j_end(ij), &
2148 k_start , k_end )
2149
2150 CALL set_physical_bc2d( grid%em_muts, 't', config_flags, &
2151 ids, ide, jds, jde, &
2152 ims, ime, jms, jme, &
2153 ips, ipe, jps, jpe, &
2154 grid%i_start(ij), grid%i_end(ij), &
2155 grid%j_start(ij), grid%j_end(ij) )
2156
2157 CALL set_physical_bc2d( grid%em_mu_2, 't', config_flags, &
2158 ids, ide, jds, jde, &
2159 ims, ime, jms, jme, &
2160 ips, ipe, jps, jpe, &
2161 grid%i_start(ij), grid%i_end(ij), &
2162 grid%j_start(ij), grid%j_end(ij) )
2163
2164 CALL set_physical_bc2d( grid%em_mudf, 't', config_flags, &
2165 ids, ide, jds, jde, &
2166 ims, ime, jms, jme, &
2167 ips, ipe, jps, jpe, &
2168 grid%i_start(ij), grid%i_end(ij), &
2169 grid%j_start(ij), grid%j_end(ij) )
2170
2171 END DO
2172 !$OMP END PARALLEL DO
2173 BENCH_END(phys_bc_tim)
2174
2175 END DO small_steps
2176
2177 !$OMP PARALLEL DO &
2178 !$OMP PRIVATE ( ij )
2179
2180 DO ij = 1 , grid%num_tiles
2181
2182 CALL wrf_debug ( 200 , ' call rk_small_finish' )
2183
2184 ! change time-perturbation variables back to
2185 ! full perturbation variables.
2186 ! first get updated mu at u and v points
2187
2188 #ifdef WRF_PLUS
2189 if(TEST_calc_mu_uv_1) THEN
2190 CALL t_calc_mu_uv_1 ( config_flags, &
2191 grid%em_muts, muus, muvs, &
2192 ids, ide, jds, jde, kds, kde, &
2193 ims, ime, jms, jme, kms, kme, &
2194 grid%i_start(ij), grid%i_end(ij), &
2195 grid%j_start(ij), grid%j_end(ij), &
2196 k_start , k_end )
2197 END IF
2198 #endif
2199
2200
2201 BENCH_START(calc_mu_uv_tim)
2202 CALL calc_mu_uv_1 ( config_flags, &
2203 grid%em_muts, muus, muvs, &
2204 ids, ide, jds, jde, kds, kde, &
2205 ims, ime, jms, jme, kms, kme, &
2206 grid%i_start(ij), grid%i_end(ij), &
2207 grid%j_start(ij), grid%j_end(ij), &
2208 k_start , k_end )
2209 BENCH_END(calc_mu_uv_tim)
2210 #ifdef WRF_PLUS
2211 if(TEST_small_step_finish) THEN
2212 CALL t_small_step_finish( grid%em_u_2, grid%em_u_1, grid%em_v_2, grid%em_v_1, grid%em_w_2, grid%em_w_1, &
2213 grid%em_t_2, grid%em_t_1, grid%em_ph_2, grid%em_ph_1, grid%em_ww, ww1, &
2214 grid%em_mu_2, grid%em_mu_1, &
2215 grid%em_mut, grid%em_muts, grid%em_muu, muus, grid%em_muv, muvs, &
2216 grid%em_u_save, grid%em_v_save, w_save, &
2217 grid%em_t_save, ph_save, mu_save, &
2218 grid%msfu, grid%msfv, grid%msft, &
2219 grid%h_diabatic, &
2220 number_of_small_timesteps,dts_rk, &
2221 rk_step, rk_order, &
2222 ids, ide, jds, jde, kds, kde, &
2223 ims, ime, jms, jme, kms, kme, &
2224 grid%i_start(ij), grid%i_end(ij), &
2225 grid%j_start(ij), grid%j_end(ij), &
2226 k_start , k_end )
2227 END IF
2228 #endif
2229 BENCH_START(small_step_finish_tim)
2230 CALL small_step_finish( grid%em_u_2, grid%em_u_1, grid%em_v_2, grid%em_v_1, grid%em_w_2, grid%em_w_1, &
2231 grid%em_t_2, grid%em_t_1, grid%em_ph_2, grid%em_ph_1, grid%em_ww, ww1, &
2232 grid%em_mu_2, grid%em_mu_1, &
2233 grid%em_mut, grid%em_muts, grid%em_muu, muus, grid%em_muv, muvs, &
2234 grid%em_u_save, grid%em_v_save, w_save, &
2235 grid%em_t_save, ph_save, mu_save, &
2236 grid%msfu, grid%msfv, grid%msft, &
2237 grid%h_diabatic, &
2238 number_of_small_timesteps,dts_rk, &
2239 rk_step, rk_order, &
2240 ids, ide, jds, jde, kds, kde, &
2241 ims, ime, jms, jme, kms, kme, &
2242 grid%i_start(ij), grid%i_end(ij), &
2243 grid%j_start(ij), grid%j_end(ij), &
2244 k_start , k_end )
2245 ! call to set ru_m, rv_m and ww_m b.c's for PD advection
2246
2247 IF (rk_step == 3) THEN
2248
2249 CALL set_physical_bc3d( grid%em_ru_m, 'u', config_flags, &
2250 ids, ide, jds, jde, kds, kde, &
2251 ims, ime, jms, jme, kms, kme, &
2252 ips, ipe, jps, jpe, kps, kpe, &
2253 grid%i_start(ij), grid%i_end(ij), &
2254 grid%j_start(ij), grid%j_end(ij), &
2255 k_start , k_end )
2256
2257 CALL set_physical_bc3d( grid%em_rv_m, 'v', config_flags, &
2258 ids, ide, jds, jde, kds, kde, &
2259 ims, ime, jms, jme, kms, kme, &
2260 ips, ipe, jps, jpe, kps, kpe, &
2261 grid%i_start(ij), grid%i_end(ij), &
2262 grid%j_start(ij), grid%j_end(ij), &
2263 k_start , k_end )
2264
2265 CALL set_physical_bc3d( grid%em_ww_m, 'w', config_flags, &
2266 ids, ide, jds, jde, kds, kde, &
2267 ims, ime, jms, jme, kms, kme, &
2268 ips, ipe, jps, jpe, kps, kpe, &
2269 grid%i_start(ij), grid%i_end(ij), &
2270 grid%j_start(ij), grid%j_end(ij), &
2271 k_start , k_end )
2272
2273 CALL set_physical_bc2d( grid%em_mut, 't', config_flags, &
2274 ids, ide, jds, jde, &
2275 ims, ime, jms, jme, &
2276 ips, ipe, jps, jpe, &
2277 grid%i_start(ij), grid%i_end(ij), &
2278 grid%j_start(ij), grid%j_end(ij) )
2279
2280 END IF
2281
2282 BENCH_END(small_step_finish_tim)
2283
2284 END DO
2285 !$OMP END PARALLEL DO
2286
2287 !-----------------------------------------------------------------------
2288 ! add in physics tendency first if positive definite advection is used.
2289 ! pd advection applies advective flux limiter on last runge-kutta step
2290 !-----------------------------------------------------------------------
2291 ! first moisture
2292
2293 IF (config_flags%pd_moist .and. (rk_step == rk_order)) THEN
2294
2295 !$OMP PARALLEL DO &
2296 !$OMP PRIVATE ( ij )
2297 DO ij = 1 , grid%num_tiles
2298 CALL wrf_debug ( 200 , ' call rk_update_scalar_pd' )
2299 do im = PARAM_FIRST_SCALAR, num_3d_m
2300 CALL rk_update_scalar_pd( im, im, &
2301 moist_old(ims,kms,jms,im), &
2302 moist_tend(ims,kms,jms,im), &
2303 grid%msft, &
2304 grid%em_mu_1, grid%em_mu_1, grid%em_mub, &
2305 rk_step, dt_rk, grid%spec_zone, &
2306 config_flags, &
2307 ids, ide, jds, jde, kds, kde, &
2308 ims, ime, jms, jme, kms, kme, &
2309 grid%i_start(ij), grid%i_end(ij), &
2310 grid%j_start(ij), grid%j_end(ij), &
2311 k_start , k_end )
2312 ENDDO
2313 END DO
2314 !$OMP END PARALLEL DO
2315
2316 !---------------------- positive definite bc call
2317
2318 !$OMP PARALLEL DO &
2319 !$OMP PRIVATE ( ij )
2320 DO ij = 1 , grid%num_tiles
2321 IF (num_3d_m >= PARAM_FIRST_SCALAR) THEN
2322 DO im = PARAM_FIRST_SCALAR , num_3d_m
2323 CALL set_physical_bc3d( moist_old(ims,kms,jms,im), 'p', config_flags, &
2324 ids, ide, jds, jde, kds, kde, &
2325 ims, ime, jms, jme, kms, kme, &
2326 ips, ipe, jps, jpe, kps, kpe, &
2327 grid%i_start(ij), grid%i_end(ij), &
2328 grid%j_start(ij), grid%j_end(ij), &
2329 k_start , k_end )
2330 END DO
2331 ENDIF
2332 END DO
2333 !$OMP END PARALLEL DO
2334
2335 #ifdef DM_PARALLEL
2336 if(config_flags%pd_moist) then
2337 #ifndef RSL
2338 IF ( config_flags%h_sca_adv_order <= 4 ) THEN
2339 # include "HALO_EM_MOIST_OLD_E_5.inc"
2340 ELSE IF ( config_flags%h_sca_adv_order <= 6 ) THEN
2341 # include "HALO_EM_MOIST_OLD_E_7.inc"
2342 ELSE
2343 WRITE(wrf_err_message,*)'solve_em: invalid h_sca_adv_order = ',config_flags%h_sca_adv_order
2344 CALL wrf_error_fatal(TRIM(wrf_err_message))
2345 ENDIF
2346 #else
2347 WRITE(wrf_err_message,*)'cannot use pd scheme with RSL - use RSL-LITE'
2348 CALL wrf_error_fatal(TRIM(wrf_err_message))
2349 #endif
2350 endif
2351 #endif
2352
2353 END IF ! end if for pd_moist
2354
2355 ! scalars
2356
2357 IF (config_flags%pd_scalar .and. (rk_step == rk_order)) THEN
2358
2359 !$OMP PARALLEL DO &
2360 !$OMP PRIVATE ( ij )
2361
2362 DO ij = 1 , grid%num_tiles
2363 CALL wrf_debug ( 200 , ' call rk_update_scalar_pd' )
2364 do im = PARAM_FIRST_SCALAR, num_3d_s
2365 CALL rk_update_scalar_pd( im, im, &
2366 scalar_old(ims,kms,jms,im), &
2367 scalar_tend(ims,kms,jms,im), &
2368 grid%msft, &
2369 grid%em_mu_1, grid%em_mu_1, grid%em_mub, &
2370 rk_step, dt_rk, grid%spec_zone, &
2371 config_flags, &
2372 ids, ide, jds, jde, kds, kde, &
2373 ims, ime, jms, jme, kms, kme, &
2374 grid%i_start(ij), grid%i_end(ij), &
2375 grid%j_start(ij), grid%j_end(ij), &
2376 k_start , k_end )
2377 ENDDO
2378 ENDDO
2379 !$OMP END PARALLEL DO
2380
2381 !---------------------- positive definite bc call
2382
2383 !$OMP PARALLEL DO &
2384 !$OMP PRIVATE ( ij )
2385
2386 DO ij = 1 , grid%num_tiles
2387 IF (num_3d_m >= PARAM_FIRST_SCALAR) THEN
2388 DO im = PARAM_FIRST_SCALAR , num_3d_s
2389 CALL set_physical_bc3d( scalar_old(ims,kms,jms,im), 'p', config_flags, &
2390 ids, ide, jds, jde, kds, kde, &
2391 ims, ime, jms, jme, kms, kme, &
2392 ips, ipe, jps, jpe, kps, kpe, &
2393 grid%i_start(ij), grid%i_end(ij), &
2394 grid%j_start(ij), grid%j_end(ij), &
2395 k_start , k_end )
2396 END DO
2397 ENDIF
2398 END DO
2399 !$OMP END PARALLEL DO
2400
2401 #ifdef DM_PARALLEL
2402 if(config_flags%pd_scalar) then
2403 #ifndef RSL
2404 IF ( config_flags%h_sca_adv_order <= 4 ) THEN
2405 # include "HALO_EM_SCALAR_OLD_E_5.inc"
2406 ELSE IF ( config_flags%h_sca_adv_order <= 6 ) THEN
2407 # include "HALO_EM_SCALAR_OLD_E_7.inc"
2408 ELSE
2409 WRITE(wrf_err_message,*)'solve_em: invalid h_sca_adv_order = ',config_flags%h_sca_adv_order
2410 CALL wrf_error_fatal(TRIM(wrf_err_message))
2411 ENDIF
2412 #else
2413 WRITE(wrf_err_message,*)'cannot use pd scheme with RSL - use RSL-LITE'
2414 CALL wrf_error_fatal(TRIM(wrf_err_message))
2415 #endif
2416 endif
2417 #endif
2418
2419 END IF ! end if for pd_scalar
2420
2421 ! chem
2422
2423 IF (config_flags%pd_chem .and. (rk_step == rk_order)) THEN
2424
2425 ! write(6,*) ' pd advection for chem '
2426
2427 !$OMP PARALLEL DO &
2428 !$OMP PRIVATE ( ij )
2429 DO ij = 1 , grid%num_tiles
2430 CALL wrf_debug ( 200 , ' call rk_update_scalar_pd' )
2431 do im = PARAM_FIRST_SCALAR, num_3d_c
2432 CALL rk_update_scalar_pd( im, im, &
2433 chem_old(ims,kms,jms,im), &
2434 chem_tend(ims,kms,jms,im), &
2435 grid%msft, &
2436 grid%em_mu_1, grid%em_mu_1, grid%em_mub, &
2437 rk_step, dt_rk, grid%spec_zone, &
2438 config_flags, &
2439 ids, ide, jds, jde, kds, kde, &
2440 ims, ime, jms, jme, kms, kme, &
2441 grid%i_start(ij), grid%i_end(ij), &
2442 grid%j_start(ij), grid%j_end(ij), &
2443 k_start , k_end )
2444 ENDDO
2445 END DO
2446 !$OMP END PARALLEL DO
2447
2448 !---------------------- positive definite bc call
2449
2450 !$OMP PARALLEL DO &
2451 !$OMP PRIVATE ( ij )
2452 DO ij = 1 , grid%num_tiles
2453 IF (num_3d_m >= PARAM_FIRST_SCALAR) THEN
2454 DO im = PARAM_FIRST_SCALAR , num_3d_c
2455 CALL set_physical_bc3d( chem_old(ims,kms,jms,im), 'p', config_flags, &
2456 ids, ide, jds, jde, kds, kde, &
2457 ims, ime, jms, jme, kms, kme, &
2458 ips, ipe, jps, jpe, kps, kpe, &
2459 grid%i_start(ij), grid%i_end(ij), &
2460 grid%j_start(ij), grid%j_end(ij), &
2461 k_start , k_end )
2462 END DO
2463 ENDIF
2464 END DO
2465 !$OMP END PARALLEL DO
2466
2467
2468 #ifdef DM_PARALLEL
2469 if(config_flags%pd_chem) then
2470 #ifndef RSL
2471 IF ( config_flags%h_sca_adv_order <= 4 ) THEN
2472 # include "HALO_EM_CHEM_OLD_E_5.inc"
2473 ELSE IF ( config_flags%h_sca_adv_order <= 6 ) THEN
2474 # include "HALO_EM_CHEM_OLD_E_7.inc"
2475 ELSE
2476 WRITE(wrf_err_message,*)'solve_em: invalid h_sca_adv_order = ',config_flags%h_sca_adv_order
2477 CALL wrf_error_fatal(TRIM(wrf_err_message))
2478 ENDIF
2479 #else
2480 WRITE(wrf_err_message,*)'cannot use pd scheme with RSL - use RSL-LITE'
2481 CALL wrf_error_fatal(TRIM(wrf_err_message))
2482 #endif
2483 endif
2484 #endif
2485
2486 END IF ! end if for pd_chem
2487
2488 ! tke
2489
2490 IF (config_flags%pd_tke .and. (rk_step == rk_order) &
2491 .and. (config_flags%km_opt .eq. 2) ) THEN
2492
2493 ! write(6,*) ' pd advection for tke '
2494
2495 !$OMP PARALLEL DO &
2496 !$OMP PRIVATE ( ij )
2497
2498 DO ij = 1 , grid%num_tiles
2499 CALL wrf_debug ( 200 , ' call rk_update_scalar_pd' )
2500 CALL rk_update_scalar_pd( 1, 1, &
2501 grid%em_tke_1, &
2502 tke_tend(ims,kms,jms), &
2503 grid%msft, &
2504 grid%em_mu_1, grid%em_mu_1, grid%em_mub, &
2505 rk_step, dt_rk, grid%spec_zone, &
2506 config_flags, &
2507 ids, ide, jds, jde, kds, kde, &
2508 ims, ime, jms, jme, kms, kme, &
2509 grid%i_start(ij), grid%i_end(ij), &
2510 grid%j_start(ij), grid%j_end(ij), &
2511 k_start , k_end )
2512 END DO
2513 !$OMP END PARALLEL DO
2514
2515 !---------------------- positive definite bc call
2516
2517 !$OMP PARALLEL DO &
2518 !$OMP PRIVATE ( ij )
2519 DO ij = 1 , grid%num_tiles
2520 CALL set_physical_bc3d( grid%em_tke_1, 'p', config_flags, &
2521 ids, ide, jds, jde, kds, kde, &
2522 ims, ime, jms, jme, kms, kme, &
2523 ips, ipe, jps, jpe, kps, kpe, &
2524 grid%i_start(ij), grid%i_end(ij), &
2525 grid%j_start(ij), grid%j_end(ij), &
2526 k_start , k_end )
2527 END DO
2528 !$OMP END PARALLEL DO
2529
2530 !--- end of positive definite physics tendency update
2531
2532 #ifdef DM_PARALLEL
2533 IF ( config_flags%h_sca_adv_order <= 4 ) THEN
2534 # include "HALO_EM_TKE_OLD_E_5.inc"
2535 ELSE IF ( config_flags%h_sca_adv_order <= 6 ) THEN
2536 # include "HALO_EM_TKE_OLD_E_7.inc"
2537 ELSE
2538 WRITE(wrf_err_message,*)'solve_em: invalid h_sca_adv_order = ',config_flags%h_sca_adv_order
2539 CALL wrf_error_fatal(TRIM(wrf_err_message))
2540 ENDIF
2541 #endif
2542
2543 END IF ! end if for pd_tke
2544
2545 #ifdef DM_PARALLEL
2546 !
2547 ! Stencils for patch communications (WCS, 29 June 2001)
2548 !
2549 !
2550 ! grid%em_ru_m x
2551 ! grid%em_rv_m x
2552 ! grid%em_ww_m x
2553 ! grid%em_mut x
2554 !
2555 !--------------------------------------------------------------
2556
2557 # include "HALO_EM_D.inc"
2558 #endif
2559
2560 !<DESCRIPTION>
2561 !<pre>
2562 ! (4) Still within the RK loop, the scalar variables are advanced.
2563 !
2564 ! For the moist and chem variables, each one is advanced
2565 ! individually, using named loops "moist_variable_loop:"
2566 ! and "chem_variable_loop:". Each RK substep begins by
2567 ! calculating the advective tendency, and, for the first RK step,
2568 ! 3D mixing (calling rk_scalar_tend) followed by an update
2569 ! of the scalar (calling rk_scalar_update).
2570 !</pre>
2571 !</DESCRIPTION>
2572
2573
2574 moist_scalar_advance: IF (num_3d_m >= PARAM_FIRST_SCALAR ) THEN
2575
2576 moist_variable_loop: do im = PARAM_FIRST_SCALAR, num_3d_m
2577
2578 if (grid%adv_moist_cond .or. im==p_qv ) then
2579
2580 !$OMP PARALLEL DO &
2581 !$OMP PRIVATE ( ij )
2582
2583 moist_tile_loop_1: DO ij = 1 , grid%num_tiles
2584
2585 CALL wrf_debug ( 200 , ' call rk_scalar_tend' )
2586
2587 #ifdef WRF_PLUS
2588 if(TEST_rk_scalar_tend) THEN
2589 CALL t_rk_scalar_tend ( im, im, config_flags, &
2590 rk_step, dt_rk, &
2591 grid%em_ru_m, grid%em_rv_m, grid%em_ww_m, &
2592 grid%em_mut, grid%em_alt, &
2593 moist(ims,kms,jms,im), &
2594 moist_tend(ims,kms,jms,im), &
2595 advect_tend,grid%rqvften, &
2596 grid%qv_base, .true., grid%em_fnm, grid%em_fnp, &
2597 grid%msfu, grid%msfv, grid%msft, &
2598 grid%rdx, grid%rdy, grid%em_rdn, grid%em_rdnw, grid%khdif, &
2599 grid%kvdif, grid%xkmhd, &
2600 grid%diff_6th_opt, grid%diff_6th_rate, &
2601 ids, ide, jds, jde, kds, kde, &
2602 ims, ime, jms, jme, kms, kme, &
2603 grid%i_start(ij), grid%i_end(ij), &
2604 grid%j_start(ij), grid%j_end(ij), &
2605 k_start , k_end )
2606 END IF
2607 #endif
2608
2609 BENCH_START(rk_scalar_tend_tim)
2610
2611 CALL rk_scalar_tend ( im, im, config_flags, &
2612 rk_step, dt_rk, &
2613 grid%em_ru_m, grid%em_rv_m, grid%em_ww_m, &
2614 grid%em_mut, grid%em_mub, grid%em_mu_1, &
2615 grid%em_alt, &
2616 moist_old(ims,kms,jms,im), &
2617 moist(ims,kms,jms,im), &
2618 moist_tend(ims,kms,jms,im), &
2619 advect_tend,grid%rqvften, &
2620 grid%qv_base, .true., grid%em_fnm, grid%em_fnp, &
2621 grid%msfu, grid%msfv, grid%msft, &
2622 grid%rdx, grid%rdy, grid%em_rdn, grid%em_rdnw, grid%khdif, &
2623 grid%kvdif, grid%xkmhd, &
2624 grid%diff_6th_opt, grid%diff_6th_factor, &
2625 config_flags%pd_moist, &
2626 ids, ide, jds, jde, kds, kde, &
2627 ims, ime, jms, jme, kms, kme, &
2628 grid%i_start(ij), grid%i_end(ij), &
2629 grid%j_start(ij), grid%j_end(ij), &
2630 k_start , k_end )
2631
2632 BENCH_END(rk_scalar_tend_tim)
2633
2634 BENCH_START(rlx_bdy_scalar_tim)
2635 IF( ( config_flags%specified .or. config_flags%nested ) .and. rk_step == 1 ) THEN
2636 IF ( im .EQ. P_QV .OR. config_flags%nested ) THEN
2637 CALL relax_bdy_scalar ( moist_tend(ims,kms,jms,im), &
2638 moist(ims,kms,jms,im), grid%em_mut, &
2639 moist_bxs(jms,kms,1,im),moist_bxe(jms,kms,1,im),moist_bys(ims,kms,1,im),moist_bye(ims,kms,1,im), &
2640 moist_btxs(jms,kms,1,im),moist_btxe(jms,kms,1,im),moist_btys(ims,kms,1,im),moist_btye(ims,kms,1,im), &
2641 config_flags%spec_bdy_width, grid%spec_zone, grid%relax_zone, &
2642 grid%dtbc, grid%fcx, grid%gcx, &
2643 config_flags, &
2644 ids,ide, jds,jde, kds,kde, & ! domain dims
2645 ims,ime, jms,jme, kms,kme, & ! memory dims
2646 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2647 grid%i_start(ij), grid%i_end(ij), &
2648 grid%j_start(ij), grid%j_end(ij), &
2649 k_start, k_end )
2650 #ifdef WRF_PLUS
2651 if(TEST_spec_bdy_scalar) THEN
2652 CALL t_spec_bdy_scalar ( moist_tend(ims,kms,jms,im), &
2653 moist_b(1,1,1,1,im), &
2654 moist_bt(1,1,1,1,im), &
2655 config_flags%spec_bdy_width, grid%spec_zone, &
2656 config_flags, &
2657 ijds, ijde, & ! min/max(id,jd)
2658 ids,ide, jds,jde, kds,kde, & ! domain dims
2659 ims,ime, jms,jme, kms,kme, & ! memory dims
2660 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2661 grid%i_start(ij), grid%i_end(ij), &
2662 grid%j_start(ij), grid%j_end(ij), &
2663 k_start, k_end )
2664 END IF
2665 #endif
2666 CALL spec_bdy_scalar ( moist_tend(ims,kms,jms,im), &
2667 moist_bxs(jms,kms,1,im),moist_bxe(jms,kms,1,im),moist_bys(ims,kms,1,im),moist_bye(ims,kms,1,im), &
2668 moist_btxs(jms,kms,1,im),moist_btxe(jms,kms,1,im),moist_btys(ims,kms,1,im),moist_btye(ims,kms,1,im), &
2669 config_flags%spec_bdy_width, grid%spec_zone, &
2670 config_flags, &
2671 ids,ide, jds,jde, kds,kde, & ! domain dims
2672 ims,ime, jms,jme, kms,kme, & ! memory dims
2673 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2674 grid%i_start(ij), grid%i_end(ij), &
2675 grid%j_start(ij), grid%j_end(ij), &
2676 k_start, k_end )
2677 ENDIF
2678 ENDIF
2679 BENCH_END(rlx_bdy_scalar_tim)
2680
2681 ENDDO moist_tile_loop_1
2682 !$OMP END PARALLEL DO
2683
2684 !$OMP PARALLEL DO &
2685 !$OMP PRIVATE ( ij )
2686
2687 moist_tile_loop_2: DO ij = 1 , grid%num_tiles
2688
2689 CALL wrf_debug ( 200 , ' call rk_update_scalar' )
2690
2691 #ifdef WRF_PLUS
2692 if(TEST_rk_update_scalar) THEN
2693 CALL t_rk_update_scalar( im, im, &
2694 moist_old(ims,kms,jms,im), &
2695 moist(ims,kms,jms,im), &
2696 moist_tend(ims,kms,jms,im), &
2697 advect_tend, grid%msft, &
2698 grid%em_mu_1, grid%em_mu_2, grid%em_mub, &
2699 rk_step, dt_rk, grid%spec_zone, &
2700 config_flags, &
2701 ids, ide, jds, jde, kds, kde, &
2702 ims, ime, jms, jme, kms, kme, &
2703 grid%i_start(ij), grid%i_end(ij), &
2704 grid%j_start(ij), grid%j_end(ij), &
2705 k_start , k_end )
2706 END IF
2707 #endif
2708 BENCH_START(update_scal_tim)
2709 CALL rk_update_scalar( im, im, &
2710 moist_old(ims,kms,jms,im), &
2711 moist(ims,kms,jms,im), &
2712 moist_tend(ims,kms,jms,im), &
2713 advect_tend, grid%msft, &
2714 grid%em_mu_1, grid%em_mu_2, grid%em_mub, &
2715 rk_step, dt_rk, grid%spec_zone, &
2716 config_flags, &
2717 ids, ide, jds, jde, kds, kde, &
2718 ims, ime, jms, jme, kms, kme, &
2719 grid%i_start(ij), grid%i_end(ij), &
2720 grid%j_start(ij), grid%j_end(ij), &
2721 k_start , k_end )
2722 BENCH_END(update_scal_tim)
2723
2724 BENCH_START(flow_depbdy_tim)
2725 IF( config_flags%specified ) THEN
2726 IF(im .ne. P_QV)THEN
2727 CALL flow_dep_bdy ( moist(ims,kms,jms,im), &
2728 grid%em_ru_m, grid%em_rv_m, config_flags, &
2729 grid%spec_zone, &
2730 ids,ide, jds,jde, kds,kde, & ! domain dims
2731 ims,ime, jms,jme, kms,kme, & ! memory dims
2732 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2733 grid%i_start(ij), grid%i_end(ij), &
2734 grid%j_start(ij), grid%j_end(ij), &
2735 k_start, k_end )
2736 ENDIF
2737 ENDIF
2738 BENCH_END(flow_depbdy_tim)
2739
2740 ENDDO moist_tile_loop_2
2741 !$OMP END PARALLEL DO
2742
2743 ENDIF !-- if (grid%adv_moist_cond .or. im==p_qv ) then
2744
2745 ENDDO moist_variable_loop
2746
2747 ENDIF moist_scalar_advance
2748
2749 BENCH_START(tke_adv_tim)
2750 TKE_advance: IF (config_flags%km_opt .eq. 2) then
2751
2752 #ifdef DM_PARALLEL
2753 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
2754 # include "HALO_EM_TKE_ADVECT_3.inc"
2755 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
2756 # include "HALO_EM_TKE_ADVECT_5.inc"
2757 ELSE
2758 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
2759 CALL wrf_error_fatal(TRIM(wrf_err_message))
2760 ENDIF
2761 #endif
2762
2763 !$OMP PARALLEL DO &
2764 !$OMP PRIVATE ( ij )
2765
2766 tke_tile_loop_1: DO ij = 1 , grid%num_tiles
2767
2768 CALL wrf_debug ( 200 , ' call rk_scalar_tend for tke' )
2769 CALL rk_scalar_tend ( 1, 1, config_flags, &
2770 rk_step, dt_rk, &
2771 grid%em_ru_m, grid%em_rv_m, grid%em_ww_m, &
2772 grid%em_mut, grid%em_mub, grid%em_mu_1, &
2773 grid%em_alt, &
2774 grid%em_tke_1, &
2775 grid%em_tke_2, &
2776 tke_tend(ims,kms,jms), &
2777 advect_tend,grid%rqvften, &
2778 grid%qv_base, .false., grid%em_fnm, grid%em_fnp, &
2779 grid%msfu, grid%msfv, grid%msft, &
2780 grid%rdx, grid%rdy, grid%em_rdn, grid%em_rdnw, grid%khdif, &
2781 grid%kvdif, grid%xkmhd, &
2782 grid%diff_6th_opt, grid%diff_6th_factor, &
2783 config_flags%pd_tke, &
2784 ids, ide, jds, jde, kds, kde, &
2785 ims, ime, jms, jme, kms, kme, &
2786 grid%i_start(ij), grid%i_end(ij), &
2787 grid%j_start(ij), grid%j_end(ij), &
2788 k_start , k_end )
2789
2790 ENDDO tke_tile_loop_1
2791 !$OMP END PARALLEL DO
2792
2793 !$OMP PARALLEL DO &
2794 !$OMP PRIVATE ( ij )
2795
2796 tke_tile_loop_2: DO ij = 1 , grid%num_tiles
2797
2798 CALL wrf_debug ( 200 , ' call rk_update_scalar' )
2799 CALL rk_update_scalar( 1, 1, &
2800 grid%em_tke_1, &
2801 grid%em_tke_2, &
2802 tke_tend(ims,kms,jms), &
2803 advect_tend,grid%msft, &
2804 grid%em_mu_1, grid%em_mu_2, grid%em_mub, &
2805 rk_step, dt_rk, grid%spec_zone, &
2806 config_flags, &
2807 ids, ide, jds, jde, kds, kde, &
2808 ims, ime, jms, jme, kms, kme, &
2809 grid%i_start(ij), grid%i_end(ij), &
2810 grid%j_start(ij), grid%j_end(ij), &
2811 k_start , k_end )
2812
2813 ! bound the tke (greater than 0, less than tke_upper_bound)
2814
2815 CALL bound_tke( grid%em_tke_2, grid%tke_upper_bound, &
2816 ids, ide, jds, jde, kds, kde, &
2817 ims, ime, jms, jme, kms, kme, &
2818 grid%i_start(ij), grid%i_end(ij), &
2819 grid%j_start(ij), grid%j_end(ij), &
2820 k_start , k_end )
2821
2822 IF( config_flags%specified .or. config_flags%nested ) THEN
2823 CALL flow_dep_bdy ( grid%em_tke_2, &
2824 grid%em_ru_m, grid%em_rv_m, config_flags, &
2825 grid%spec_zone, &
2826 ids,ide, jds,jde, kds,kde, & ! domain dims
2827 ims,ime, jms,jme, kms,kme, & ! memory dims
2828 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2829 grid%i_start(ij), grid%i_end(ij), &
2830 grid%j_start(ij), grid%j_end(ij), &
2831 k_start, k_end )
2832 ENDIF
2833 ENDDO tke_tile_loop_2
2834 !$OMP END PARALLEL DO
2835
2836 END IF TKE_advance
2837 BENCH_END(tke_adv_tim)
2838
2839 #ifdef WRF_CHEM
2840 ! next the chemical species
2841 BENCH_START(chem_adv_tim)
2842 chem_scalar_advance: IF (num_3d_c >= PARAM_FIRST_SCALAR) THEN
2843
2844 chem_variable_loop: do ic = PARAM_FIRST_SCALAR, num_3d_c
2845
2846 !$OMP PARALLEL DO &
2847 !$OMP PRIVATE ( ij )
2848
2849 chem_tile_loop_1: DO ij = 1 , grid%num_tiles
2850
2851 CALL wrf_debug ( 200 , ' call rk_scalar_tend in chem_tile_loop_1' )
2852 CALL rk_scalar_tend ( ic, ic, config_flags, &
2853 rk_step, dt_rk, &
2854 grid%em_ru_m, grid%em_rv_m, grid%em_ww_m, &
2855 grid%em_mut, grid%em_mub, grid%em_mu_1, &
2856 grid%em_alt, &
2857 chem_old(ims,kms,jms,ic), &
2858 chem(ims,kms,jms,ic), &
2859 chem_tend(ims,kms,jms,ic), &
2860 advect_tend,grid%rqvften, &
2861 grid%qv_base, .false., grid%em_fnm, grid%em_fnp, &
2862 grid%msfu, grid%msfv, grid%msft, &
2863 grid%rdx, grid%rdy, grid%em_rdn, grid%em_rdnw, &
2864 grid%khdif, grid%kvdif, grid%xkmhd, &
2865 grid%diff_6th_opt, grid%diff_6th_factor, &
2866 config_flags%pd_chem, &
2867 ids, ide, jds, jde, kds, kde, &
2868 ims, ime, jms, jme, kms, kme, &
2869 grid%i_start(ij), grid%i_end(ij), &
2870 grid%j_start(ij), grid%j_end(ij), &
2871 k_start , k_end )
2872 !
2873 ! Currently, chemistry species with specified boundaries (i.e. the mother
2874 ! domain) are being over written by flow_dep_bdy_chem. So, relax_bdy and
2875 ! spec_bdy are only called for nests. For boundary conditions from global model or larger domain,
2876 ! chem is uncoupled, and only used for one row/column on inflow (if have_bcs_chem=.true.)
2877 !
2878 IF( ( config_flags%nested ) .and. rk_step == 1 ) THEN
2879 if(ic.eq.1)CALL wrf_debug ( 10 , ' have_bcs_chem' )
2880
2881 CALL relax_bdy_scalar ( chem_tend(ims,kms,jms,ic), &
2882 chem(ims,kms,jms,ic), grid%em_mut, &
2883 chem_bxs(jms,kms,1,ic),chem_bxe(jms,kms,1,ic),chem_bys(ims,kms,1,ic),chem_bye(ims,kms,1,ic), &
2884 chem_btxs(jms,kms,1,ic),chem_btxe(jms,kms,1,ic),chem_btys(ims,kms,1,ic),chem_btye(ims,kms,1,ic), &
2885 config_flags%spec_bdy_width, grid%spec_zone, grid%relax_zone, &
2886 grid%dtbc, grid%fcx, grid%gcx, &
2887 config_flags, &
2888 ids,ide, jds,jde, kds,kde, & ! domain dims
2889 ims,ime, jms,jme, kms,kme, & ! memory dims
2890 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2891 grid%i_start(ij), grid%i_end(ij), &
2892 grid%j_start(ij), grid%j_end(ij), &
2893 k_start, k_end )
2894 CALL spec_bdy_scalar ( chem_tend(ims,kms,jms,ic), &
2895 chem_bxs(jms,kms,1,ic),chem_bxe(jms,kms,1,ic),chem_bys(ims,kms,1,ic),chem_bye(ims,kms,1,ic), &
2896 chem_btxs(jms,kms,1,ic),chem_btxe(jms,kms,1,ic),chem_btys(ims,kms,1,ic),chem_btye(ims,kms,1,ic), &
2897 config_flags%spec_bdy_width, grid%spec_zone, &
2898 config_flags, &
2899 ids,ide, jds,jde, kds,kde, & ! domain dims
2900 ims,ime, jms,jme, kms,kme, & ! memory dims
2901 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2902 grid%i_start(ij), grid%i_end(ij), &
2903 grid%j_start(ij), grid%j_end(ij), &
2904 k_start, k_end )
2905 ENDIF
2906
2907 ENDDO chem_tile_loop_1
2908
2909 !$OMP END PARALLEL DO
2910
2911
2912 !$OMP PARALLEL DO &
2913 !$OMP PRIVATE ( ij )
2914
2915 chem_tile_loop_2: DO ij = 1 , grid%num_tiles
2916
2917 CALL wrf_debug ( 200 , ' call rk_update_scalar' )
2918 CALL rk_update_scalar( ic, ic, &
2919 chem_old(ims,kms,jms,ic), & ! was chem_1
2920 chem(ims,kms,jms,ic), &
2921 chem_tend(ims,kms,jms,ic), &
2922 advect_tend, grid%msft, &
2923 grid%em_mu_1, grid%em_mu_2, grid%em_mub, &
2924 rk_step, dt_rk, grid%spec_zone, &
2925 config_flags, &
2926 ids, ide, jds, jde, kds, kde, &
2927 ims, ime, jms, jme, kms, kme, &
2928 grid%i_start(ij), grid%i_end(ij), &
2929 grid%j_start(ij), grid%j_end(ij), &
2930 k_start , k_end )
2931
2932
2933 IF( config_flags%specified ) THEN
2934 ! come back to this and figure out why two different routines are needed. JM 20041203
2935 !#ifndef WRF_CHEM
2936 !!$ CALL flow_dep_bdy ( chem(ims,kms,jms,ic), &
2937 !!$ grid%em_ru_m, grid%em_rv_m, config_flags, &
2938 !!$ grid%spec_zone, &
2939 !!$ ids,ide, jds,jde, kds,kde, & ! domain dims
2940 !!$ ims,ime, jms,jme, kms,kme, & ! memory dims
2941 !!$ ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2942 !!$ grid%i_start(ij), grid%i_end(ij), &
2943 !!$ grid%j_start(ij), grid%j_end(ij), &
2944 !!$ k_start, k_end )
2945 !#else
2946 CALL flow_dep_bdy_chem( chem(ims,kms,jms,ic), &
2947 chem_bxs(jms,kms,1,ic),chem_bxe(jms,kms,1,ic),chem_bys(ims,kms,1,ic),chem_bye(ims,kms,1,ic), &
2948 chem_btxs(jms,kms,1,ic),chem_btxe(jms,kms,1,ic),chem_btys(ims,kms,1,ic),chem_btye(ims,kms,1,ic), &
2949 dt_rk, &
2950 config_flags%spec_bdy_width,grid%em_z, &
2951 grid%have_bcs_chem, &
2952 grid%em_ru_m, grid%em_rv_m, config_flags,grid%em_alt, &
2953 grid%em_t_1,grid%em_pb,grid%em_p,t0,p1000mb,rcp,grid%em_ph_2,grid%em_phb,g, &
2954 grid%spec_zone,ic, &
2955 ids,ide, jds,jde, kds,kde, & ! domain dims
2956 ims,ime, jms,jme, kms,kme, & ! memory dims
2957 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2958 grid%i_start(ij), grid%i_end(ij), &
2959 grid%j_start(ij), grid%j_end(ij), &
2960 k_start, k_end )
2961 !#endif
2962 ENDIF
2963
2964
2965 ENDDO chem_tile_loop_2
2966 !$OMP END PARALLEL DO
2967
2968 ENDDO chem_variable_loop
2969
2970 ENDIF chem_scalar_advance
2971 BENCH_END(chem_adv_tim)
2972 #endif
2973
2974 ! next the other scalar species
2975 other_scalar_advance: IF (num_3d_s >= PARAM_FIRST_SCALAR) THEN
2976
2977 scalar_variable_loop: do is = PARAM_FIRST_SCALAR, num_3d_s
2978 !$OMP PARALLEL DO &
2979 !$OMP PRIVATE ( ij )
2980
2981 scalar_tile_loop_1: DO ij = 1 , grid%num_tiles
2982
2983 CALL wrf_debug ( 200 , ' call rk_scalar_tend' )
2984 CALL rk_scalar_tend ( is, is, config_flags, &
2985 rk_step, dt_rk, &
2986 grid%em_ru_m, grid%em_rv_m, grid%em_ww_m, &
2987 grid%em_mut, grid%em_mub, grid%em_mu_1, &
2988 grid%em_alt, &
2989 scalar_old(ims,kms,jms,is), &
2990 scalar(ims,kms,jms,is), &
2991 scalar_tend(ims,kms,jms,is), &
2992 advect_tend,grid%rqvften, &
2993 grid%qv_base, .false., grid%em_fnm, grid%em_fnp, &
2994 grid%msfu, grid%msfv, grid%msft, &
2995 grid%rdx, grid%rdy, grid%em_rdn, grid%em_rdnw, &
2996 grid%khdif, grid%kvdif, grid%xkmhd, &
2997 grid%diff_6th_opt, grid%diff_6th_factor, &
2998 config_flags%pd_scalar, &
2999 ids, ide, jds, jde, kds, kde, &
3000 ims, ime, jms, jme, kms, kme, &
3001 grid%i_start(ij), grid%i_end(ij), &
3002 grid%j_start(ij), grid%j_end(ij), &
3003 k_start , k_end )
3004
3005 IF( config_flags%nested .and. (rk_step == 1) ) THEN
3006
3007 IF (is .eq. P_QNDROP .OR. is .eq. P_QNI) THEN
3008
3009 CALL relax_bdy_scalar ( scalar_tend(ims,kms,jms,is), &
3010 scalar(ims,kms,jms,is), grid%em_mut, &
3011 scalar_bxs(jms,kms,1,is),scalar_bxe(jms,kms,1,is),scalar_bys(ims,kms,1,is),scalar_bye(ims,kms,1,is), &
3012 scalar_btxs(jms,kms,1,is),scalar_btxe(jms,kms,1,is),scalar_btys(ims,kms,1,is),scalar_btye(ims,kms,1,is), &
3013 config_flags%spec_bdy_width, grid%spec_zone, grid%relax_zone, &
3014 grid%dtbc, grid%fcx, grid%gcx, &
3015 config_flags, &
3016 ids,ide, jds,jde, kds,kde, & ! domain dims
3017 ims,ime, jms,jme, kms,kme, & ! memory dims
3018 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
3019 grid%i_start(ij), grid%i_end(ij), &
3020 grid%j_start(ij), grid%j_end(ij), &
3021 k_start, k_end )
3022
3023 CALL spec_bdy_scalar ( scalar_tend(ims,kms,jms,is), &
3024 scalar_bxs(jms,kms,1,is),scalar_bxe(jms,kms,1,is),scalar_bys(ims,kms,1,is),scalar_bye(ims,kms,1,is), &
3025 scalar_btxs(jms,kms,1,is),scalar_btxe(jms,kms,1,is),scalar_btys(ims,kms,1,is),scalar_btye(ims,kms,1,is), &
3026 config_flags%spec_bdy_width, grid%spec_zone, &
3027 config_flags, &
3028 ids,ide, jds,jde, kds,kde, & ! domain dims
3029 ims,ime, jms,jme, kms,kme, & ! memory dims
3030 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
3031 grid%i_start(ij), grid%i_end(ij), &
3032 grid%j_start(ij), grid%j_end(ij), &
3033 k_start, k_end )
3034
3035 ENDIF
3036
3037 ENDIF ! b.c test for chem nested boundary condition
3038
3039 ENDDO scalar_tile_loop_1
3040 !$OMP END PARALLEL DO
3041
3042
3043 !$OMP PARALLEL DO &
3044 !$OMP PRIVATE ( ij )
3045
3046 scalar_tile_loop_2: DO ij = 1 , grid%num_tiles
3047
3048 CALL wrf_debug ( 200 , ' call rk_update_scalar' )
3049 CALL rk_update_scalar( is, is, &
3050 scalar_old(ims,kms,jms,is), & ! was scalar_1
3051 scalar(ims,kms,jms,is), &
3052 scalar_tend(ims,kms,jms,is), &
3053 advect_tend, grid%msft, &
3054 grid%em_mu_1, grid%em_mu_2, grid%em_mub, &
3055 rk_step, dt_rk, grid%spec_zone, &
3056 config_flags, &
3057 ids, ide, jds, jde, kds, kde, &
3058 ims, ime, jms, jme, kms, kme, &
3059 grid%i_start(ij), grid%i_end(ij), &
3060 grid%j_start(ij), grid%j_end(ij), &
3061 k_start , k_end )
3062
3063
3064 IF( config_flags%specified ) THEN
3065 CALL flow_dep_bdy ( scalar(ims,kms,jms,is), &
3066 grid%em_ru_m, grid%em_rv_m, config_flags, &
3067 grid%spec_zone, &
3068 ids,ide, jds,jde, kds,kde, & ! domain dims
3069 ims,ime, jms,jme, kms,kme, & ! memory dims
3070 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
3071 grid%i_start(ij), grid%i_end(ij), &
3072 grid%j_start(ij), grid%j_end(ij), &
3073 k_start, k_end )
3074 ENDIF
3075
3076
3077 ENDDO scalar_tile_loop_2
3078 !$OMP END PARALLEL DO
3079
3080 ENDDO scalar_variable_loop
3081
3082 ENDIF other_scalar_advance
3083
3084 ! update the pressure and density at the new time level
3085
3086 !$OMP PARALLEL DO &
3087 !$OMP PRIVATE ( ij )
3088 DO ij = 1 , grid%num_tiles
3089
3090
3091 #ifdef WRF_PLUS
3092 if(TEST_calc_p_rho_phi) THEN
3093 CALL t_calc_p_rho_phi( moist, num_3d_m, &
3094 grid%em_al, grid%em_alb, grid%em_mu_2, grid%em_muts, &
3095 grid%em_ph_2, grid%em_p, grid%em_pb, grid%em_t_2, &
3096 p0, t0, grid%em_znu, grid%em_dnw, grid%em_rdnw, &
3097 grid%em_rdn, config_flags%non_hydrostatic, &
3098 ids, ide, jds, jde, kds, kde, &
3099 ims, ime, jms, jme, kms, kme, &
3100 grid%i_start(ij), grid%i_end(ij), &
3101 grid%j_start(ij), grid%j_end(ij), &
3102 k_start , k_end )
3103 END IF
3104 #endif
3105 BENCH_START(calc_p_rho_tim)
3106
3107 CALL calc_p_rho_phi( moist, num_3d_m, &
3108 grid%em_al, grid%em_alb, grid%em_mu_2, grid%em_muts, &
3109 grid%em_ph_2, grid%em_p, grid%em_pb, grid%em_t_2, &
3110 p0, t0, grid%em_znu, grid%em_dnw, grid%em_rdnw, &
3111 grid%em_rdn, config_flags%non_hydrostatic, &
3112 ids, ide, jds, jde, kds, kde, &
3113 ims, ime, jms, jme, kms, kme, &
3114 grid%i_start(ij), grid%i_end(ij), &
3115 grid%j_start(ij), grid%j_end(ij), &
3116 k_start , k_end )
3117
3118 BENCH_END(calc_p_rho_tim)
3119
3120 ENDDO
3121 !$OMP END PARALLEL DO
3122
3123 ! Reset the boundary conditions if there is another corrector step.
3124 ! (rk_step < rk_order), else we'll handle it at the end of everything
3125 ! (after the split physics, before exiting the timestep).
3126
3127 rk_step_1_check: IF ( rk_step < rk_order ) THEN
3128
3129 !-----------------------------------------------------------
3130 ! Stencils for patch communications (WCS, 29 June 2001)
3131 !
3132 ! here's where we need a wide comm stencil - these are the
3133 ! uncoupled variables so are used for high order calc in
3134 ! advection and mixong routines.
3135 !
3136 ! * * * * *
3137 ! * * * * * * * * *
3138 ! * + * * + * * * + * *
3139 ! * * * * * * * * *
3140 ! * * * * *
3141 !
3142 !
3143 ! grid%em_u_2 x
3144 ! grid%em_v_2 x
3145 ! grid%em_w_2 x
3146 ! grid%em_t_2 x
3147 ! grid%em_ph_2 x
3148 ! grid%em_al x
3149 !
3150 ! 2D variable
3151 ! grid%em_mu_2 x
3152 !
3153 ! 4D variable
3154 ! moist x
3155 ! chem x
3156 !scalar x
3157
3158 #ifdef DM_PARALLEL
3159 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
3160 # include "HALO_EM_D2_3.inc"
3161 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
3162 # include "HALO_EM_D2_5.inc"
3163 ELSE
3164 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
3165 CALL wrf_error_fatal(TRIM(wrf_err_message))
3166 ENDIF
3167 # include "PERIOD_BDY_EM_D.inc"
3168 # include "PERIOD_BDY_EM_MOIST2.inc"
3169 # include "PERIOD_BDY_EM_CHEM2.inc"
3170 # include "PERIOD_BDY_EM_SCALAR2.inc"
3171 #endif
3172
3173 BENCH_START(bc_end_tim)
3174 !$OMP PARALLEL DO &
3175 !$OMP PRIVATE ( ij )
3176
3177 tile_bc_loop_1: DO ij = 1 , grid%num_tiles
3178
3179
3180 CALL wrf_debug ( 200 , ' call rk_phys_bc_dry_2' )
3181
3182 CALL rk_phys_bc_dry_2( config_flags, &
3183 grid%em_u_2, grid%em_v_2, grid%em_w_2, &
3184 grid%em_t_2, grid%em_ph_2, grid%em_mu_2, &
3185 ids, ide, jds, jde, kds, kde, &
3186 ims, ime, jms, jme, kms, kme, &
3187 ips, ipe, jps, jpe, kps, kpe, &
3188 grid%i_start(ij), grid%i_end(ij), &
3189 grid%j_start(ij), grid%j_end(ij), &
3190 k_start , k_end )
3191
3192 BENCH_START(diag_w_tim)
3193 IF (.not. config_flags%non_hydrostatic) THEN
3194 #ifdef WRF_PLUS
3195 if(TEST_diagnose_w) THEN
3196 CALL t_diagnose_w( ph_tend, grid%em_ph_2, grid%em_ph_1, grid%em_w_2, grid%em_muts, dt_rk, &
3197 grid%em_u_2, grid%em_v_2, grid%ht, &
3198 grid%cf1, grid%cf2, grid%cf3, grid%rdx, grid%rdy, grid%msft, &
3199 ids, ide, jds, jde, kds, kde, &
3200 ims, ime, jms, jme, kms, kme, &
3201 grid%i_start(ij), grid%i_end(ij), &
3202 grid%j_start(ij), grid%j_end(ij), &
3203 k_start , k_end )
3204 END IF
3205 #endif
3206 CALL diagnose_w( ph_tend, grid%em_ph_2, grid%em_ph_1, grid%em_w_2, grid%em_muts, dt_rk, &
3207 grid%em_u_2, grid%em_v_2, grid%ht, &
3208 grid%cf1, grid%cf2, grid%cf3, grid%rdx, grid%rdy, grid%msft, &
3209 ids, ide, jds, jde, kds, kde, &
3210 ims, ime, jms, jme, kms, kme, &
3211 grid%i_start(ij), grid%i_end(ij), &
3212 grid%j_start(ij), grid%j_end(ij), &
3213 k_start , k_end )
3214 ENDIF
3215 BENCH_END(diag_w_tim)
3216
3217 IF (num_3d_m >= PARAM_FIRST_SCALAR) THEN
3218
3219 moisture_loop_bdy_1 : DO im = PARAM_FIRST_SCALAR , num_3d_m
3220
3221 CALL set_physical_bc3d( moist(ims,kms,jms,im), 'p', config_flags, &
3222 ids, ide, jds, jde, kds, kde, &
3223 ims, ime, jms, jme, kms, kme, &
3224 ips, ipe, jps, jpe, kps, kpe, &
3225 grid%i_start(ij), grid%i_end(ij), &
3226 grid%j_start(ij), grid%j_end(ij), &
3227 k_start , k_end )
3228 END DO moisture_loop_bdy_1
3229
3230 ENDIF
3231
3232 IF (num_3d_c >= PARAM_FIRST_SCALAR) THEN
3233
3234 chem_species_bdy_loop_1 : DO ic = PARAM_FIRST_SCALAR , num_3d_c
3235
3236 CALL set_physical_bc3d( chem(ims,kms,jms,ic), 'p', config_flags, &
3237 ids, ide, jds, jde, kds, kde, &
3238 ims, ime, jms, jme, kms, kme, &
3239 ips, ipe, jps, jpe, kps, kpe, &
3240 grid%i_start(ij), grid%i_end(ij), &
3241 grid%j_start(ij), grid%j_end(ij), &
3242 k_start , k_end-1 )
3243
3244 END DO chem_species_bdy_loop_1
3245
3246 END IF
3247
3248 IF (num_3d_s >= PARAM_FIRST_SCALAR) THEN
3249
3250 scalar_species_bdy_loop_1 : DO is = PARAM_FIRST_SCALAR , num_3d_s
3251
3252 CALL set_physical_bc3d( scalar(ims,kms,jms,is), 'p', config_flags, &
3253 ids, ide, jds, jde, kds, kde, &
3254 ims, ime, jms, jme, kms, kme, &
3255 ips, ipe, jps, jpe, kps, kpe, &
3256 grid%i_start(ij), grid%i_end(ij), &
3257 grid%j_start(ij), grid%j_end(ij), &
3258 k_start , k_end-1 )
3259
3260 END DO scalar_species_bdy_loop_1
3261
3262 END IF
3263
3264 IF (config_flags%km_opt .eq. 2) THEN
3265
3266 CALL set_physical_bc3d( grid%em_tke_2 , 'p', config_flags, &
3267 ids, ide, jds, jde, kds, kde, &
3268 ims, ime, jms, jme, kms, kme, &
3269 ips, ipe, jps, jpe, kps, kpe, &
3270 grid%i_start(ij), grid%i_end(ij), &
3271 grid%j_start(ij), grid%j_end(ij), &
3272 k_start , k_end )
3273 END IF
3274
3275 END DO tile_bc_loop_1
3276 !$OMP END PARALLEL DO
3277 BENCH_END(bc_end_tim)
3278
3279
3280 #ifdef DM_PARALLEL
3281
3282 ! * * * * *
3283 ! * * * * * * * * *
3284 ! * + * * + * * * + * *
3285 ! * * * * * * * * *
3286 ! * * * * *
3287
3288 ! moist, chem, scalar, tke x
3289
3290
3291 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
3292 IF ( (config_flags%pd_tke) .and. (rk_step == rk_order-1) ) THEN
3293 # include "HALO_EM_TKE_5.inc"
3294 ELSE
3295 # include "HALO_EM_TKE_3.inc"
3296 ENDIF
3297 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
3298 IF ( (config_flags%pd_tke) .and. (rk_step == rk_order-1) ) THEN
3299 # include "HALO_EM_TKE_7.inc"
3300 ELSE
3301 # include "HALO_EM_TKE_5.inc"
3302 ENDIF
3303 ELSE
3304 WRITE(wrf_err_message,*)'solve_em: invalid h_sca_adv_order = ',config_flags%h_sca_adv_order
3305 CALL wrf_error_fatal(TRIM(wrf_err_message))
3306 ENDIF
3307
3308 #if 0
3309 IF (config_flags%km_opt .eq. 2) THEN
3310 # include "HALO_EM_TKE_F.inc"
3311 ENDIF
3312 #endif
3313
3314 if ( num_moist .ge. PARAM_FIRST_SCALAR ) then
3315 IF ( config_flags%h_sca_adv_order <= 4 ) THEN
3316 IF ( (config_flags%pd_moist) .and. (rk_step == rk_order-1) ) THEN
3317 # include "HALO_EM_MOIST_E_5.inc"
3318 ELSE
3319 # include "HALO_EM_MOIST_E_3.inc"
3320 END IF
3321 ELSE IF ( config_flags%h_sca_adv_order <= 6 ) THEN
3322 IF ( (config_flags%pd_moist) .and. (rk_step == rk_order-1) ) THEN
3323 # include "HALO_EM_MOIST_E_7.inc"
3324 ELSE
3325 # include "HALO_EM_MOIST_E_5.inc"
3326 END IF
3327 ELSE
3328 WRITE(wrf_err_message,*)'solve_em: invalid h_sca_adv_order = ',config_flags%h_sca_adv_order
3329 CALL wrf_error_fatal(TRIM(wrf_err_message))
3330 ENDIF
3331 endif
3332 if ( num_chem >= PARAM_FIRST_SCALAR ) then
3333 IF ( config_flags%h_sca_adv_order <= 4 ) THEN
3334 IF ( (config_flags%pd_chem) .and. (rk_step == rk_order-1) ) THEN
3335 # include "HALO_EM_CHEM_E_5.inc"
3336 ELSE
3337 # include "HALO_EM_CHEM_E_3.inc"
3338 END IF
3339 ELSE IF ( config_flags%h_sca_adv_order <= 6 ) THEN
3340 IF ( (config_flags%pd_chem) .and. (rk_step == rk_order-1) ) THEN
3341 # include "HALO_EM_CHEM_E_7.inc"
3342 ELSE
3343 # include "HALO_EM_CHEM_E_5.inc"
3344 END IF
3345 ELSE
3346 WRITE(wrf_err_message,*)'solve_em: invalid h_sca_adv_order = ',config_flags%h_sca_adv_order
3347 CALL wrf_error_fatal(TRIM(wrf_err_message))
3348 ENDIF
3349 endif
3350 if ( num_scalar >= PARAM_FIRST_SCALAR ) then
3351 IF ( config_flags%h_sca_adv_order <= 4 ) THEN
3352 IF ( (config_flags%pd_scalar) .and. (rk_step == rk_order-1) ) THEN
3353 # include "HALO_EM_SCALAR_E_5.inc"
3354 ELSE
3355 # include "HALO_EM_SCALAR_E_3.inc"
3356 END IF
3357 ELSE IF ( config_flags%h_sca_adv_order <= 6 ) THEN
3358 IF ( (config_flags%pd_scalar) .and. (rk_step == rk_order-1) ) THEN
3359 # include "HALO_EM_SCALAR_E_7.inc"
3360 ELSE
3361 # include "HALO_EM_SCALAR_E_5.inc"
3362 END IF
3363 ELSE
3364 WRITE(wrf_err_message,*)'solve_em: invalid h_sca_adv_order = ',config_flags%h_sca_adv_order
3365 CALL wrf_error_fatal(TRIM(wrf_err_message))
3366 ENDIF
3367 endif
3368 #endif
3369
3370 ENDIF rk_step_1_check
3371
3372
3373 !**********************************************************
3374 !
3375 ! end of RK predictor-corrector loop
3376 !
3377 !**********************************************************
3378
3379 END DO Runge_Kutta_loop
3380
3381 !$OMP PARALLEL DO &
3382 !$OMP PRIVATE ( ij )
3383
3384 DO ij = 1 , grid%num_tiles
3385
3386 BENCH_START(advance_ppt_tim)
3387 CALL wrf_debug ( 200 , ' call advance_ppt' )
3388 CALL advance_ppt(grid%rthcuten,grid%rqvcuten,grid%rqccuten,grid%rqrcuten, &
3389 grid%rqicuten,grid%rqscuten,grid%rainc,grid%raincv,grid%nca, &
3390 grid%htop,grid%hbot,grid%cutop,grid%cubot, &
3391 grid%cuppt, config_flags, &
3392 ids,ide, jds,jde, kds,kde, &
3393 ims,ime, jms,jme, kms,kme, &
3394 grid%i_start(ij), grid%i_end(ij), &
3395 grid%j_start(ij), grid%j_end(ij), &
3396 k_start , k_end )
3397 BENCH_END(advance_ppt_tim)
3398
3399 ENDDO
3400 !$OMP END PARALLEL DO
3401
3402 !<DESCRIPTION>
3403 !<pre>
3404 ! (5) time-split physics.
3405 !
3406 ! Microphysics are the only time split physics in the WRF model
3407 ! at this time. Split-physics begins with the calculation of
3408 ! needed diagnostic quantities (pressure, temperature, etc.)
3409 ! followed by a call to the microphysics driver,
3410 ! and finishes with a clean-up, storing off of a diabatic tendency
3411 ! from the moist physics, and a re-calulation of the diagnostic
3412 ! quantities pressure and density.
3413 !</pre>
3414 !</DESCRIPTION>
3415
3416 IF (config_flags%mp_physics /= 0) then
3417
3418 IF( config_flags%specified .or. config_flags%nested ) THEN
3419 sz = grid%spec_zone
3420 ELSE
3421 sz = 0
3422 ENDIF
3423
3424 !$OMP PARALLEL DO &
3425 !$OMP PRIVATE ( ij, its, ite, jts, jte )
3426
3427 scalar_tile_loop_1a: DO ij = 1 , grid%num_tiles
3428
3429 IF ( config_flags%periodic_x ) THEN
3430 its = max(grid%i_start(ij),ids)
3431 ite = min(grid%i_end(ij),ide-1)
3432 ELSE
3433 its = max(grid%i_start(ij),ids+sz)
3434 ite = min(grid%i_end(ij),ide-1-sz)
3435 ENDIF
3436 jts = max(grid%j_start(ij),jds+sz)
3437 jte = min(grid%j_end(ij),jde-1-sz)
3438
3439 CALL wrf_debug ( 200 , ' call moist_physics_prep' )
3440 BENCH_START(moist_physics_prep_tim)
3441 CALL moist_physics_prep_em( grid%em_t_2, grid%em_t_1, t0, rho, &
3442 grid%em_al, grid%em_alb, grid%em_p, p8w, p0, grid%em_pb, &
3443 grid%em_ph_2, grid%em_phb, th_phy, pi_phy, p_phy, &
3444 grid%em_z, z_at_w, dz8w, &
3445 dtm, grid%h_diabatic, &
3446 config_flags,grid%em_fnm, grid%em_fnp, &
3447 ids, ide, jds, jde, kds, kde, &
3448 ims, ime, jms, jme, kms, kme, &
3449 its, ite, jts, jte, &
3450 k_start , k_end )
3451 BENCH_END(moist_physics_prep_tim)
3452 END DO scalar_tile_loop_1a
3453 !$OMP END PARALLEL DO
3454
3455 CALL wrf_debug ( 200 , ' call microphysics_driver' )
3456
3457 grid%em_sr = 0.
3458 specified_bdy = config_flags%specified .OR. config_flags%nested
3459 channel_bdy = config_flags%specified .AND. config_flags%periodic_x
3460
3461 #if 0
3462 BENCH_START(microswap_1)
3463 ! for load balancing; communication to redistribute the points
3464 IF ( config_flags%mp_physics .EQ. ETAMPNEW ) THEN
3465 #include "SWAP_ETAMP_NEW.inc"
3466 ELSE IF ( config_flags%mp_physics .EQ. WSM3SCHEME ) THEN
3467 #include "SWAP_WSM3.inc"
3468 ENDIF
3469 BENCH_END(microswap_1)
3470 #endif
3471
3472 BENCH_START(micro_driver_tim)
3473
3474 CALL microphysics_driver( &
3475 & DT=dtm ,DX=grid%dx ,DY=grid%dy &
3476 & ,DZ8W=dz8w ,F_ICE_PHY=grid%f_ice_phy &
3477 & ,ITIMESTEP=grid%itimestep ,LOWLYR=grid%lowlyr &
3478 & ,P8W=p8w ,P=p_phy ,PI_PHY=pi_phy &
3479 & ,RHO=rho ,SPEC_ZONE=grid%spec_zone &
3480 & ,SR=grid%em_sr ,TH=th_phy &
3481 & ,WARM_RAIN=grid%warm_rain &
3482 & ,T8W=t8w &
3483 & ,CLDFRA=grid%cldfra, EXCH_H=grid%exch_h &
3484 & ,NSOURCE=grid%qndropsource &
3485 #ifdef WRF_CHEM
3486 & ,QLSINK=grid%qlsink,CLDFRA_OLD=grid%cldfra_old &
3487 & ,PRECR=grid%precr, PRECI=grid%preci, PRECS=grid%precs, PRECG=grid%precg &
3488 #endif
3489 & ,XLAND=grid%xland &
3490 & ,SPECIFIED=specified_bdy, CHANNEL_SWITCH=channel_bdy &
3491 & ,F_RAIN_PHY=grid%f_rain_phy &
3492 & ,F_RIMEF_PHY=grid%f_rimef_phy &
3493 & ,MP_PHYSICS=config_flags%mp_physics &
3494 & ,ID=grid%id &
3495 & ,IDS=ids,IDE=ide, JDS=jds,JDE=jde, KDS=kds,KDE=kde &
3496 & ,IMS=ims,IME=ime, JMS=jms,JME=jme, KMS=kms,KME=kme &
3497 & ,I_START=grid%i_start,I_END=min(grid%i_end, ide-1) &
3498 & ,J_START=grid%j_start,J_END=min(grid%j_end, jde-1) &
3499 & ,KTS=k_start, KTE=min(k_end,kde-1) &
3500 & ,NUM_TILES=grid%num_tiles &
3501 & ,NAER=grid%naer &
3502 ! Optional
3503 & , RAINNC=grid%rainnc, RAINNCV=grid%rainncv &
3504 & , SNOWNC=grid%snownc, SNOWNCV=grid%snowncv &
3505 & , GRAUPELNC=grid%graupelnc, GRAUPELNCV=grid%graupelncv &
3506 & , W=grid%em_w_2, Z=grid%em_z, HT=grid%ht &
3507 & , MP_RESTART_STATE=grid%mp_restart_state &
3508 & , TBPVS_STATE=grid%tbpvs_state & ! etampnew
3509 & , TBPVS0_STATE=grid%tbpvs0_state & ! etampnew
3510 & , QV_CURR=moist(ims,kms,jms,P_QV), F_QV=F_QV &
3511 & , QC_CURR=moist(ims,kms,jms,P_QC), F_QC=F_QC &
3512 & , QR_CURR=moist(ims,kms,jms,P_QR), F_QR=F_QR &
3513 & , QI_CURR=moist(ims,kms,jms,P_QI), F_QI=F_QI &
3514 & , QS_CURR=moist(ims,kms,jms,P_QS), F_QS=F_QS &
3515 & , QG_CURR=moist(ims,kms,jms,P_QG), F_QG=F_QG &
3516 & , QNDROP_CURR=scalar(ims,kms,jms,P_QNDROP), F_QNDROP=F_QNDROP &
3517 & , QNI_CURR=scalar(ims,kms,jms,P_QNI), F_QNI=F_QNI &
3518 & , QT_CURR=scalar(ims,kms,jms,P_QT), F_QT=F_QT &
3519 )
3520 BENCH_END(micro_driver_tim)
3521
3522 #if 0
3523 BENCH_START(microswap_2)
3524 ! for load balancing; communication to redistribute the points
3525 IF ( config_flags%mp_physics .EQ. ETAMPNEW ) THEN
3526 #include "SWAP_ETAMP_NEW.inc"
3527 ELSE IF ( config_flags%mp_physics .EQ. WSM3SCHEME ) THEN
3528 #include "SWAP_WSM3.inc"
3529 ENDIF
3530 BENCH_END(microswap_2)
3531 #endif
3532
3533 CALL wrf_debug ( 200 , ' call moist_physics_finish' )
3534 BENCH_START(moist_phys_end_tim)
3535 !$OMP PARALLEL DO &
3536 !$OMP PRIVATE ( ij, its, ite, jts, jte )
3537
3538 scalar_tile_loop_1b: DO ij = 1 , grid%num_tiles
3539
3540 IF ( config_flags%periodic_x ) THEN
3541 its = max(grid%i_start(ij),ids)
3542 ite = min(grid%i_end(ij),ide-1)
3543 ELSE
3544 its = max(grid%i_start(ij),ids+sz)
3545 ite = min(grid%i_end(ij),ide-1-sz)
3546 ENDIF
3547 jts = max(grid%j_start(ij),jds+sz)
3548 jte = min(grid%j_end(ij),jde-1-sz)
3549
3550 CALL microphysics_zero_out ( &
3551 moist , num_moist , config_flags , &
3552 ids, ide, jds, jde, kds, kde, &
3553 ims, ime, jms, jme, kms, kme, &
3554 its, ite, jts, jte, &
3555 k_start , k_end )
3556
3557 CALL moist_physics_finish_em( grid%em_t_2, grid%em_t_1, t0, grid%em_muts, th_phy, &
3558 grid%h_diabatic, dtm, config_flags, &
3559 ids, ide, jds, jde, kds, kde, &
3560 ims, ime, jms, jme, kms, kme, &
3561 its, ite, jts, jte, &
3562 k_start , k_end )
3563
3564 CALL calc_p_rho_phi( moist, num_3d_m, &
3565 grid%em_al, grid%em_alb, grid%em_mu_2, grid%em_muts, &
3566 grid%em_ph_2, grid%em_p, grid%em_pb, grid%em_t_2, &
3567 p0, t0, grid%em_znu, grid%em_dnw, grid%em_rdnw, &
3568 grid%em_rdn, config_flags%non_hydrostatic, &
3569 ids, ide, jds, jde, kds, kde, &
3570 ims, ime, jms, jme, kms, kme, &
3571 its, ite, jts, jte, &
3572 k_start , k_end )
3573
3574 END DO scalar_tile_loop_1b
3575 !$OMP END PARALLEL DO
3576 BENCH_END(moist_phys_end_tim)
3577
3578 ENDIF
3579
3580 IF (.not. config_flags%non_hydrostatic) THEN
3581 !$OMP PARALLEL DO &
3582 !$OMP PRIVATE ( ij )
3583 DO ij = 1 , grid%num_tiles
3584 CALL diagnose_w( ph_tend, grid%em_ph_2, grid%em_ph_1, grid%em_w_2, grid%em_muts, dt_rk, &
3585 grid%em_u_2, grid%em_v_2, grid%ht, &
3586 grid%cf1, grid%cf2, grid%cf3, grid%rdx, grid%rdy, grid%msft, &
3587 ids, ide, jds, jde, kds, kde, &
3588 ims, ime, jms, jme, kms, kme, &
3589 grid%i_start(ij), grid%i_end(ij), &
3590 grid%j_start(ij), grid%j_end(ij), &
3591 k_start , k_end )
3592
3593 END DO
3594 !$OMP END PARALLEL DO
3595 ENDIF
3596
3597 chem_tile_loop_3: DO ij = 1 , grid%num_tiles
3598
3599 CALL wrf_debug ( 200 , ' call scalar_tile_loop_2' )
3600
3601 IF ( num_3d_c >= PARAM_FIRST_SCALAR ) then
3602
3603 !
3604 ! tiled chemistry not here, it is called from solve_interface, and found in chem_driver
3605 !
3606
3607 END IF
3608
3609 END DO chem_tile_loop_3
3610
3611
3612 ! We're finished except for boundary condition (and patch) update
3613
3614 ! Boundary condition time (or communication time). At this time, we have
3615 ! implemented periodic and symmetric physical boundary conditions.
3616
3617 ! b.c. routine for data within patch.
3618
3619 ! we need to do both time levels of
3620 ! data because the time filter only works in the physical solution space.
3621
3622 ! First, do patch communications for boundary conditions (periodicity)
3623
3624 !-----------------------------------------------------------
3625 ! Stencils for patch communications (WCS, 29 June 2001)
3626 !
3627 ! here's where we need a wide comm stencil - these are the
3628 ! uncoupled variables so are used for high order calc in
3629 ! advection and mixong routines.
3630 !
3631 ! * * * * *
3632 ! * * * * * * * * *
3633 ! * + * * + * * * + * *
3634 ! * * * * * * * * *
3635 ! * * * * *
3636 !
3637 ! grid%em_u_1 x
3638 ! grid%em_u_2 x
3639 ! grid%em_v_1 x
3640 ! grid%em_v_2 x
3641 ! grid%em_w_1 x
3642 ! grid%em_w_2 x
3643 ! grid%em_t_1 x
3644 ! grid%em_t_2 x
3645 ! grid%em_ph_1 x
3646 ! grid%em_ph_2 x
3647 ! grid%em_tke_1 x
3648 ! grid%em_tke_2 x
3649 !
3650 ! 2D variables
3651 ! grid%em_mu_1 x
3652 ! grid%em_mu_2 x
3653 !
3654 ! 4D variables
3655 ! moist x
3656 ! chem x
3657 ! scalar x
3658 !----------------------------------------------------------
3659
3660
3661 #ifdef DM_PARALLEL
3662 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
3663 # include "HALO_EM_D3_3.inc"
3664 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
3665 # include "HALO_EM_D3_5.inc"
3666 ELSE
3667 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
3668 CALL wrf_error_fatal(TRIM(wrf_err_message))
3669 ENDIF
3670 # include "PERIOD_BDY_EM_D3.inc"
3671 # include "PERIOD_BDY_EM_MOIST.inc"
3672 # include "PERIOD_BDY_EM_CHEM.inc"
3673 # include "PERIOD_BDY_EM_SCALAR.inc"
3674 #endif
3675
3676 ! now set physical b.c on a patch
3677
3678 BENCH_START(bc_2d_tim)
3679 !$OMP PARALLEL DO &
3680 !$OMP PRIVATE ( ij )
3681
3682 tile_bc_loop_2: DO ij = 1 , grid%num_tiles
3683
3684
3685 CALL wrf_debug ( 200 , ' call set_phys_bc_dry_2' )
3686
3687 CALL set_phys_bc_dry_2( config_flags, &
3688 grid%em_u_1, grid%em_u_2, grid%em_v_1, grid%em_v_2, grid%em_w_1, grid%em_w_2, &
3689 grid%em_t_1, grid%em_t_2, grid%em_ph_1, grid%em_ph_2, grid%em_mu_1, grid%em_mu_2, &
3690 ids, ide, jds, jde, kds, kde, &
3691 ims, ime, jms, jme, kms, kme, &
3692 ips, ipe, jps, jpe, kps, kpe, &
3693 grid%i_start(ij), grid%i_end(ij), &
3694 grid%j_start(ij), grid%j_end(ij), &
3695 k_start , k_end )
3696
3697 CALL set_physical_bc3d( grid%em_tke_1, 'p', config_flags, &
3698 ids, ide, jds, jde, kds, kde, &
3699 ims, ime, jms, jme, kms, kme, &
3700 ips, ipe, jps, jpe, kps, kpe, &
3701 grid%i_start(ij), grid%i_end(ij), &
3702 grid%j_start(ij), grid%j_end(ij), &
3703 k_start , k_end-1 )
3704 CALL set_physical_bc3d( grid%em_tke_2 , 'p', config_flags, &
3705 ids, ide, jds, jde, kds, kde, &
3706 ims, ime, jms, jme, kms, kme, &
3707 ips, ipe, jps, jpe, kps, kpe, &
3708 grid%i_start(ij), grid%i_end(ij), &
3709 grid%j_start(ij), grid%j_end(ij), &
3710 k_start , k_end )
3711
3712 moisture_loop_bdy_2 : DO im = PARAM_FIRST_SCALAR , num_3d_m
3713
3714 CALL set_physical_bc3d( moist(ims,kms,jms,im), 'p', &
3715 config_flags, &
3716 ids, ide, jds, jde, kds, kde, &
3717 ims, ime, jms, jme, kms, kme, &
3718 ips, ipe, jps, jpe, kps, kpe, &
3719 grid%i_start(ij), grid%i_end(ij), &
3720 grid%j_start(ij), grid%j_end(ij), &
3721 k_start , k_end )
3722
3723 END DO moisture_loop_bdy_2
3724
3725 chem_species_bdy_loop_2 : DO ic = PARAM_FIRST_SCALAR , num_3d_c
3726
3727 CALL set_physical_bc3d( chem(ims,kms,jms,ic) , 'p', config_flags, &
3728 ids, ide, jds, jde, kds, kde, &
3729 ims, ime, jms, jme, kms, kme, &
3730 ips, ipe, jps, jpe, kps, kpe, &
3731 grid%i_start(ij), grid%i_end(ij), &
3732 grid%j_start(ij), grid%j_end(ij), &
3733 k_start , k_end )
3734
3735 END DO chem_species_bdy_loop_2
3736
3737 scalar_species_bdy_loop_2 : DO is = PARAM_FIRST_SCALAR , num_3d_s
3738
3739 CALL set_physical_bc3d( scalar(ims,kms,jms,is) , 'p', config_flags, &
3740 ids, ide, jds, jde, kds, kde, &
3741 ims, ime, jms, jme, kms, kme, &
3742 ips, ipe, jps, jpe, kps, kpe, &
3743 grid%i_start(ij), grid%i_end(ij), &
3744 grid%j_start(ij), grid%j_end(ij), &
3745 k_start , k_end )
3746
3747 END DO scalar_species_bdy_loop_2
3748
3749 END DO tile_bc_loop_2
3750 !$OMP END PARALLEL DO
3751 BENCH_END(bc_2d_tim)
3752
3753 IF( config_flags%specified .or. config_flags%nested ) THEN
3754 grid%dtbc = grid%dtbc + grid%dt
3755 ENDIF
3756
3757 ! calculate some model diagnostics.
3758
3759 CALL wrf_debug ( 200 , ' call diagnostic_driver' )
3760
3761 CALL diagnostic_output_calc( &
3762 & DPSDT=grid%dpsdt ,DMUDT=grid%dmudt &
3763 & ,P_PHY=p_phy ,PK1M=grid%pk1m &
3764 & ,MU_2=grid%em_mu_2 ,MU_2M=grid%mu_2m &
3765 & ,U=grid%em_u_2 ,V=grid%em_v_2 &
3766 & ,RAINCV=grid%raincv ,RAINNCV=grid%rainncv &
3767 & ,RAINC=grid%rainc ,RAINNC=grid%rainnc &
3768 & ,HFX=grid%hfx ,SFCEVP=grid%sfcevp ,LH=grid%lh &
3769 & ,DT=grid%dt ,SBW=config_flags%spec_bdy_width &
3770 & ,XTIME=grid%xtime &
3771 ! Selection flag
3772 & ,DIAG_PRINT=config_flags%diag_print &
3773 ! Dimension arguments
3774 & ,IDS=ids,IDE=ide, JDS=jds,JDE=jde, KDS=kds,KDE=kde &
3775 & ,IMS=ims,IME=ime, JMS=jms,JME=jme, KMS=kms,KME=kme &
3776 & ,IPS=ips,IPE=ipe, JPS=jps,JPE=jpe, KPS=kps,KPE=kpe &
3777 & ,I_START=grid%i_start,I_END=min(grid%i_end, ide-1) &
3778 & ,J_START=grid%j_start,J_END=min(grid%j_end, jde-1) &
3779 & ,KTS=k_start, KTE=min(k_end,kde-1) &
3780 & ,NUM_TILES=grid%num_tiles &
3781 & )
3782
3783 #ifdef DM_PARALLEL
3784 !-----------------------------------------------------------------------
3785 ! see above
3786 !--------------------------------------------------------------
3787 CALL wrf_debug ( 200 , ' call HALO_RK_E' )
3788 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
3789 # include "HALO_EM_E_3.inc"
3790 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
3791 # include "HALO_EM_E_5.inc"
3792 ELSE
3793 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
3794 CALL wrf_error_fatal(TRIM(wrf_err_message))
3795 ENDIF
3796 #endif
3797
3798 #ifdef DM_PARALLEL
3799 if ( num_moist >= PARAM_FIRST_SCALAR ) then
3800 !-----------------------------------------------------------------------
3801 ! see above
3802 !--------------------------------------------------------------
3803 CALL wrf_debug ( 200 , ' call HALO_RK_MOIST' )
3804 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
3805 # include "HALO_EM_MOIST_E_3.inc"
3806 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
3807 # include "HALO_EM_MOIST_E_5.inc"
3808 ELSE
3809 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
3810 CALL wrf_error_fatal(TRIM(wrf_err_message))
3811 ENDIF
3812 endif
3813 if ( num_chem >= PARAM_FIRST_SCALAR ) then
3814 !-----------------------------------------------------------------------
3815 ! see above
3816 !--------------------------------------------------------------
3817 CALL wrf_debug ( 200 , ' call HALO_RK_CHEM' )
3818 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
3819 # include "HALO_EM_CHEM_E_3.inc"
3820 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
3821 # include "HALO_EM_CHEM_E_5.inc"
3822 ELSE
3823 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
3824 CALL wrf_error_fatal(TRIM(wrf_err_message))
3825 ENDIF
3826 endif
3827 if ( num_scalar >= PARAM_FIRST_SCALAR ) then
3828 !-----------------------------------------------------------------------
3829 ! see above
3830 !--------------------------------------------------------------
3831 CALL wrf_debug ( 200 , ' call HALO_RK_SCALAR' )
3832 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
3833 # include "HALO_EM_SCALAR_E_3.inc"
3834 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
3835 # include "HALO_EM_SCALAR_E_5.inc"
3836 ELSE
3837 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
3838 CALL wrf_error_fatal(TRIM(wrf_err_message))
3839 ENDIF
3840 endif
3841 #endif
3842
3843 CALL wrf_debug ( 200 , ' call end of solve_em' )
3844
3845 ! Finish timers if compiled with -DBENCH.
3846 #include <bench_solve_em_end.h>
3847
3848 RETURN
3849
3850 END SUBROUTINE solve_em
3851