solve_em.F
References to this file elsewhere.
1 !WRF:MEDIATION_LAYER:SOLVER
2
3 SUBROUTINE solve_em ( grid , config_flags &
4 ! Actual arguments generated from Registry
5 #include "em_dummy_new_args.inc"
6 !
7 )
8
9 ! Driver layer modules
10 USE module_domain
11 USE module_configure
12 USE module_driver_constants
13 USE module_machine
14 USE module_tiles
15 USE module_dm
16 ! Mediation layer modules
17 ! Model layer modules
18 USE module_model_constants
19 USE module_small_step_em
20 USE module_em
21 USE module_big_step_utilities_em
22 USE module_bc
23 USE module_bc_em
24 USE module_solvedebug_em
25 USE module_physics_addtendc
26 USE module_diffusion_em
27 ! Registry generated module
28 USE module_state_description
29 USE module_radiation_driver
30 USE module_surface_driver
31 USE module_cumulus_driver
32 USE module_microphysics_driver
33 USE module_microphysics_zero_out
34 USE module_pbl_driver
35 USE module_fddagd_driver
36 USE module_fddaobs_driver
37 USE module_diagnostics
38 #ifdef WRF_CHEM
39 USE module_input_chem_data
40 USE module_chem_utilities
41 #endif
42 #ifdef WRF_PLUS
43 USE module_check
44 #endif
45
46 IMPLICIT NONE
47
48 ! Input data.
49
50 TYPE(domain) , TARGET :: grid
51
52 ! Definitions of dummy arguments to this routine (generated from Registry).
53 #include "em_dummy_new_decl.inc"
54 #include "namelist_defines2.inc"
55
56 ! Structure that contains run-time configuration (namelist) data for domain
57 TYPE (grid_config_rec_type) , INTENT(IN) :: config_flags
58
59 ! Local data
60
61 INTEGER :: k_start , k_end, its, ite, jts, jte
62 INTEGER :: ids , ide , jds , jde , kds , kde , &
63 ims , ime , jms , jme , kms , kme , &
64 ips , ipe , jps , jpe , kps , kpe
65
66 INTEGER :: imsx, imex, jmsx, jmex, kmsx, kmex, &
67 ipsx, ipex, jpsx, jpex, kpsx, kpex, &
68 imsy, imey, jmsy, jmey, kmsy, kmey, &
69 ipsy, ipey, jpsy, jpey, kpsy, kpey
70
71 INTEGER :: ij , iteration
72 INTEGER :: im , num_3d_m , ic , num_3d_c , is , num_3d_s
73 INTEGER :: loop
74 INTEGER :: ijds, ijde
75 INTEGER :: sz
76
77 LOGICAL :: specified_bdy, channel_bdy
78
79
80 ! storage for tendencies and decoupled state (generated from Registry)
81
82 #include <em_i1_decl.inc>
83 ! Previous time level of tracer arrays now defined as i1 variables;
84 ! the state 4d arrays now redefined as 1-time level arrays in Registry.
85 ! Benefit: save memory in nested runs, since only 1 domain is active at a
86 ! time. Potential problem on stack-limited architectures: increases
87 ! amount of data on program stack by making these automatic arrays.
88
89 INTEGER :: rc
90 INTEGER :: number_of_small_timesteps, rk_step
91 INTEGER :: klevel,ijm,ijp,i,j,k,size1,size2 ! for prints/plots only
92 INTEGER :: idum1, idum2, dynamics_option
93
94 INTEGER :: rk_order, iwmax, jwmax, kwmax
95 REAL :: dt_rk, dts_rk, dtm, wmax
96 INTEGER :: l,kte,kk
97
98 ! urban related variables
99 INTEGER :: NUM_ROOF_LAYERS, NUM_WALL_LAYERS, NUM_ROAD_LAYERS ! urban
100
101 ! Define benchmarking timers if -DBENCH is compiled
102 #include <bench_solve_em_def.h>
103
104 !----------------------
105 ! Executable statements
106 !----------------------
107
108 ! Needed by some comm layers, grid%e.g. RSL. If needed, nmm_data_calls.inc is
109 ! generated from the registry. The definition of REGISTER_I1 allows
110 ! I1 data to be communicated in this routine if necessary.
111 #ifdef DM_PARALLEL
112 # define REGISTER_I1
113 # include "em_data_calls.inc"
114 #endif
115
116 !<DESCRIPTION>
117 !<pre>
118 ! solve_em is the main driver for advancing a grid a single timestep.
119 ! It is a mediation-layer routine -> DM and SM calls are made where
120 ! needed for parallel processing.
121 !
122 ! solve_em can integrate the equations using 2 time-integration methods
123 !
124 ! - 3rd order Runge-Kutta time integration (recommended)
125 !
126 ! - 2nd order Runge-Kutta time integration
127 !
128 ! The main sections of solve_em are
129 !
130 ! (1) Runge-Kutta (RK) loop
131 !
132 ! (2) Non-timesplit physics (i.grid%e., tendencies computed for updating
133 ! model state variables during the first RK sub-step (loop)
134 !
135 ! (3) Small (acoustic, sound) timestep loop - within the RK sub-steps
136 !
137 ! (4) scalar advance for moist and chem scalar variables (and TKE)
138 ! within the RK sub-steps.
139 !
140 ! (5) time-split physics (after the RK step), currently this includes
141 ! only microphyics
142 !
143 ! A more detailed description of these sections follows.
144 !</pre>
145 !</DESCRIPTION>
146
147 ! Initialize timers if compiled with -DBENCH
148 #include <bench_solve_em_init.h>
149
150 ! set runge-kutta solver (2nd or 3rd order)
151
152 dynamics_option = config_flags%rk_ord
153
154 ! Obtain dimension information stored in the grid data structure.
155
156 CALL get_ijk_from_grid ( grid , &
157 ids, ide, jds, jde, kds, kde, &
158 ims, ime, jms, jme, kms, kme, &
159 ips, ipe, jps, jpe, kps, kpe, &
160 imsx, imex, jmsx, jmex, kmsx, kmex, &
161 ipsx, ipex, jpsx, jpex, kpsx, kpex, &
162 imsy, imey, jmsy, jmey, kmsy, kmey, &
163 ipsy, ipey, jpsy, jpey, kpsy, kpey )
164
165 k_start = kps
166 k_end = kpe
167
168 ijds = min(ids, jds)
169 ijde = max(ide, jde)
170
171 num_3d_m = num_moist
172 num_3d_c = num_chem
173 num_3d_s = num_scalar
174
175 ! Compute these starting and stopping locations for each tile and number of tiles.
176 ! See: http://www.mmm.ucar.edu/wrf/WG2/topics/settiles
177 CALL set_tiles ( grid , ids , ide , jds , jde , ips , ipe , jps , jpe )
178 !print *,grid%julian,grid%julday,' grid%julian,grid%julday in solve'
179
180 grid%itimestep = grid%itimestep + 1
181
182 !**********************************************************************
183 !
184 ! LET US BEGIN.......
185 !
186 !<DESCRIPTION>
187 !<pre>
188 ! (1) RK integration loop is named the "Runge_Kutta_loop:"
189 !
190 ! Predictor-corrector type time integration.
191 ! Advection terms are evaluated at time t for the predictor step,
192 ! and advection is re-evaluated with the latest predicted value for
193 ! each succeeding time corrector step
194 !
195 ! 2nd order Runge Kutta (rk_order = 2):
196 ! Step 1 is taken to the midpoint predictor, step 2 is the full step.
197 !
198 ! 3rd order Runge Kutta (rk_order = 3):
199 ! Step 1 is taken to from t to dt/3, step 2 is from t to dt/2,
200 ! and step 3 is from t to dt.
201 !
202 ! non-timesplit physics are evaluated during first RK step and
203 ! these physics tendencies are stored for use in each RK pass.
204 !</pre>
205 !</DESCRIPTION>
206 !**********************************************************************
207
208 #ifdef WRF_CHEM
209 !
210 ! prepare chem aerosols for advection before communication
211 !
212
213 kte=min(k_end,kde-1)
214 # ifdef DM_PARALLEL
215 if ( num_chem >= PARAM_FIRST_SCALAR ) then
216 !-----------------------------------------------------------------------
217 ! see above
218 !--------------------------------------------------------------
219 CALL wrf_debug ( 200 , ' call HALO_RK_CHEM' )
220 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
221 # include "HALO_EM_CHEM_E_3.inc"
222 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
223 # include "HALO_EM_CHEM_E_5.inc"
224 ELSE
225 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
226 CALL wrf_error_fatal(TRIM(wrf_err_message))
227 ENDIF
228 endif
229 # endif
230 !--------------------------------------------------------------
231 #endif
232
233 rk_order = config_flags%rk_ord
234 IF (grid%time_step_sound == 0) THEN
235 ! auto-set option
236 ! This function will give 4 for 6*dx and 6 for 10*dx and returns even numbers only
237 grid%time_step_sound = max ( 2 * ( INT (300.*grid%dt/grid%dx-0.01) + 1 ), 4 )
238 WRITE(wrf_err_message,*)'dx, dt, time_step_sound=',grid%dx,grid%dt,grid%time_step_sound
239 CALL wrf_debug ( 50 , wrf_err_message )
240 ENDIF
241
242 grid%dts = grid%dt/float(grid%time_step_sound)
243
244 Runge_Kutta_loop: DO rk_step = 1, rk_order
245
246 ! Set the step size and number of small timesteps for
247 ! each part of the timestep
248
249 dtm = grid%dt
250 IF ( rk_order == 1 ) THEN
251
252 write(wrf_err_message,*)' leapfrog removed, error exit for dynamics_option = ',dynamics_option
253 CALL wrf_error_fatal( wrf_err_message )
254
255 ELSE IF ( rk_order == 2 ) THEN ! 2nd order Runge-Kutta timestep
256
257 IF ( rk_step == 1) THEN
258 dt_rk = 0.5*grid%dt
259 dts_rk = grid%dts
260 number_of_small_timesteps = grid%time_step_sound/2
261 ELSE
262 dt_rk = grid%dt
263 dts_rk = grid%dts
264 number_of_small_timesteps = grid%time_step_sound
265 ENDIF
266
267 ELSE IF ( rk_order == 3 ) THEN ! third order Runge-Kutta
268
269 IF ( rk_step == 1) THEN
270 dt_rk = grid%dt/3.
271 dts_rk = dt_rk
272 number_of_small_timesteps = 1
273 ELSE IF (rk_step == 2) THEN
274 dt_rk = 0.5*grid%dt
275 dts_rk = grid%dts
276 number_of_small_timesteps = grid%time_step_sound/2
277 ELSE
278 dt_rk = grid%dt
279 dts_rk = grid%dts
280 number_of_small_timesteps = grid%time_step_sound
281 ENDIF
282
283 ELSE
284
285 write(wrf_err_message,*)' unknown solver, error exit for dynamics_option = ',dynamics_option
286 CALL wrf_error_fatal( wrf_err_message )
287
288 END IF
289
290 !
291 ! Time level t is in the *_2 variable in the first part
292 ! of the step, and in the *_1 variable after the predictor.
293 ! the latest predicted values are stored in the *_2 variables.
294 !
295 CALL wrf_debug ( 200 , ' call rk_step_prep ' )
296
297 BENCH_START(step_prep_tim)
298 !$OMP PARALLEL DO &
299 !$OMP PRIVATE ( ij )
300
301 DO ij = 1 , grid%num_tiles
302
303 CALL rk_step_prep ( config_flags, rk_step, &
304 grid%em_u_2, grid%em_v_2, grid%em_w_2, grid%em_t_2, grid%em_ph_2, grid%em_mu_2, &
305 moist, &
306 grid%em_ru, grid%em_rv, grid%em_rw, grid%em_ww, grid%em_php, grid%em_alt, grid%em_muu, grid%em_muv, &
307 grid%em_mub, grid%em_mut, grid%em_phb, grid%em_pb, grid%em_p, grid%em_al, grid%em_alb, &
308 cqu, cqv, cqw, &
309 grid%msfu, grid%msfv, grid%msft, &
310 grid%em_fnm, grid%em_fnp, grid%em_dnw, grid%rdx, grid%rdy, &
311 num_3d_m, &
312 ids, ide, jds, jde, kds, kde, &
313 ims, ime, jms, jme, kms, kme, &
314 grid%i_start(ij), grid%i_end(ij), &
315 grid%j_start(ij), grid%j_end(ij), &
316 k_start, k_end )
317
318 END DO
319 !$OMP END PARALLEL DO
320 BENCH_END(step_prep_tim)
321
322 #ifdef DM_PARALLEL
323 !-----------------------------------------------------------------------
324 ! Stencils for patch communications (WCS, 29 June 2001)
325 ! Note: the small size of this halo exchange reflects the
326 ! fact that we are carrying the uncoupled variables
327 ! as state variables in the mass coordinate model, as
328 ! opposed to the coupled variables as in the height
329 ! coordinate model.
330 !
331 ! * * * * *
332 ! * * * * * * * * *
333 ! * + * * + * * * + * *
334 ! * * * * * * * * *
335 ! * * * * *
336 !
337 ! 3D variables - note staggering! grid%em_ru(X), grid%em_rv(Y), grid%em_ww(grid%em_z), grid%em_php(grid%em_z)
338 !
339 !j grid%em_ru x
340 !j grid%em_rv x
341 !j grid%em_ww x
342 !j grid%em_php x
343 !j grid%em_alt x
344 !j grid%em_ph_2 x
345 !j grid%em_phb x
346 !
347 ! the following are 2D (xy) variables
348 !
349 !j grid%em_muu x
350 !j grid%em_muv x
351 !j grid%em_mut x
352 !--------------------------------------------------------------
353 # include "HALO_EM_A.inc"
354 #endif
355
356 ! set boundary conditions on variables
357 ! from big_step_prep for use in big_step_proc
358
359 #ifdef DM_PARALLEL
360 # include "PERIOD_BDY_EM_A.inc"
361 #endif
362
363 ! CALL set_tiles ( grid , ids , ide , jds , jde , ips-1 , ipe+1 , jps-1 , jpe+1 )
364
365 BENCH_START(set_phys_bc_tim)
366 !$OMP PARALLEL DO &
367 !$OMP PRIVATE ( ij )
368
369 DO ij = 1 , grid%num_tiles
370
371 CALL wrf_debug ( 200 , ' call rk_phys_bc_dry_1' )
372
373 CALL rk_phys_bc_dry_1( config_flags, grid%em_ru, grid%em_rv, grid%em_rw, grid%em_ww, &
374 grid%em_muu, grid%em_muv, grid%em_mut, grid%em_php, grid%em_alt, grid%em_p, &
375 ids, ide, jds, jde, kds, kde, &
376 ims, ime, jms, jme, kms, kme, &
377 ips, ipe, jps, jpe, kps, kpe, &
378 grid%i_start(ij), grid%i_end(ij), &
379 grid%j_start(ij), grid%j_end(ij), &
380 k_start, k_end )
381 !TBH: need this 2nd timestep and later
382 CALL set_physical_bc3d( grid%em_al, 'grid%em_p', config_flags, &
383 ids, ide, jds, jde, kds, kde, &
384 ims, ime, jms, jme, kms, kme, &
385 ips, ipe, jps, jpe, kps, kpe, &
386 grid%i_start(ij), grid%i_end(ij), &
387 grid%j_start(ij), grid%j_end(ij), &
388 k_start , k_end )
389 CALL set_physical_bc3d( grid%em_ph_2, 'w', config_flags, &
390 ids, ide, jds, jde, kds, kde, &
391 ims, ime, jms, jme, kms, kme, &
392 ips, ipe, jps, jpe, kps, kpe, &
393 grid%i_start(ij), grid%i_end(ij), &
394 grid%j_start(ij), grid%j_end(ij), &
395 k_start, k_end )
396
397 END DO
398 !$OMP END PARALLEL DO
399 BENCH_END(set_phys_bc_tim)
400
401 rk_step_is_one : IF (rk_step == 1) THEN ! only need to initialize diffusion tendencies
402
403 ! initialize all tendencies to zero in order to update physics
404 ! tendencies first (separate from dry dynamics).
405
406 BENCH_START(init_zero_tend_tim)
407 !$OMP PARALLEL DO &
408 !$OMP PRIVATE ( ij )
409
410 DO ij = 1 , grid%num_tiles
411
412 #ifdef WRF_PLUS
413 if(TEST_init_zero_tendency) THEN
414 CALL t_init_zero_tendency ( ru_tendf, rv_tendf, rw_tendf, &
415 ph_tendf, t_tendf, tke_tend, &
416 mu_tendf, &
417 moist_tend,chem_tend,scalar_tend, &
418 num_3d_m,num_3d_c,num_3d_s, &
419 rk_step, &
420 ids, ide, jds, jde, kds, kde, &
421 ims, ime, jms, jme, kms, kme, &
422 grid%i_start(ij), grid%i_end(ij), &
423 grid%j_start(ij), grid%j_end(ij), &
424 k_start, k_end )
425 END IF
426 #endif
427 CALL wrf_debug ( 200 , ' call init_zero_tendency' )
428 CALL init_zero_tendency ( ru_tendf, rv_tendf, rw_tendf, &
429 ph_tendf, t_tendf, tke_tend, &
430 mu_tendf, &
431 moist_tend,chem_tend,scalar_tend, &
432 num_3d_m,num_3d_c,num_3d_s, &
433 rk_step, &
434 ids, ide, jds, jde, kds, kde, &
435 ims, ime, jms, jme, kms, kme, &
436 grid%i_start(ij), grid%i_end(ij), &
437 grid%j_start(ij), grid%j_end(ij), &
438 k_start, k_end )
439
440 END DO
441 !$OMP END PARALLEL DO
442 BENCH_END(init_zero_tend_tim)
443
444 #ifdef DM_PARALLEL
445 # include "HALO_EM_PHYS_A.inc"
446 #endif
447
448 !<DESCRIPTION>
449 !<pre>
450 !(2) The non-timesplit physics begins with a call to "phy_prep"
451 ! (which computes some diagnostic variables such as temperature,
452 ! pressure, u and v at grid%em_p points, etc). This is followed by
453 ! calls to the physics drivers:
454 !
455 ! radiation,
456 ! surface,
457 ! pbl,
458 ! cumulus,
459 ! 3D TKE and mixing.
460 !<pre>
461 !</DESCRIPTION>
462
463
464 BENCH_START(phy_prep_tim)
465 !$OMP PARALLEL DO &
466 !$OMP PRIVATE ( ij )
467 DO ij = 1 , grid%num_tiles
468
469 #ifdef WRF_PLUS
470 if(TEST_phy_prep) THEN
471 CALL t_phy_prep ( config_flags, &
472 grid%em_mut, grid%em_u_2, grid%em_v_2, grid%em_p, grid%em_pb, grid%em_alt, &
473 grid%em_ph_2, grid%em_phb, grid%em_t_2, grid%tsk, moist, num_3d_m, &
474 mu_3d, rho, &
475 th_phy, p_phy, pi_phy, u_phy, v_phy, &
476 p8w, t_phy, t8w, grid%em_z, z_at_w, &
477 dz8w, grid%em_fnm, grid%em_fnp, &
478 grid%RTHRATEN, &
479 grid%RTHBLTEN, grid%RUBLTEN, grid%RVBLTEN, &
480 grid%RQVBLTEN, grid%RQCBLTEN, grid%RQIBLTEN, &
481 grid%RTHCUTEN, grid%RQVCUTEN, grid%RQCCUTEN, &
482 grid%RQRCUTEN, grid%RQICUTEN, grid%RQSCUTEN, &
483 grid%RTHFTEN, grid%RQVFTEN, &
484 ids, ide, jds, jde, kds, kde, &
485 ims, ime, jms, jme, kms, kme, &
486 grid%i_start(ij), grid%i_end(ij), &
487 grid%j_start(ij), grid%j_end(ij), &
488 k_start, k_end )
489 END IF
490 #endif
491 CALL wrf_debug ( 200 , ' call phy_prep' )
492 CALL phy_prep ( config_flags, &
493 grid%em_mut, grid%em_muu, grid%em_muv, grid%em_u_2, &
494 grid%em_v_2, grid%em_p, grid%em_pb, grid%em_alt, &
495 grid%em_ph_2, grid%em_phb, grid%em_t_2, grid%tsk, moist, num_3d_m, &
496 mu_3d, rho, &
497 th_phy, p_phy, pi_phy, u_phy, v_phy, &
498 p8w, t_phy, t8w, grid%em_z, z_at_w, &
499 dz8w, grid%em_fnm, grid%em_fnp, &
500 grid%rthraten, &
501 grid%rthblten, grid%rublten, grid%rvblten, &
502 grid%rqvblten, grid%rqcblten, grid%rqiblten, &
503 grid%rthcuten, grid%rqvcuten, grid%rqccuten, &
504 grid%rqrcuten, grid%rqicuten, grid%rqscuten, &
505 grid%rthften, grid%rqvften, &
506 grid%RUNDGDTEN, grid%RVNDGDTEN, grid%RTHNDGDTEN, &
507 grid%RQVNDGDTEN, grid%RMUNDGDTEN, &
508 ids, ide, jds, jde, kds, kde, &
509 ims, ime, jms, jme, kms, kme, &
510 grid%i_start(ij), grid%i_end(ij), &
511 grid%j_start(ij), grid%j_end(ij), &
512 k_start, k_end )
513 ENDDO
514 !$OMP END PARALLEL DO
515
516 BENCH_END(phy_prep_tim)
517
518 ! physics to implement
519
520 ! CALL set_tiles ( grid , ids , ide-1 , jds , jde-1 ips , ipe , jps , jpe )
521
522 ! Open MP loops are in physics drivers
523 ! radiation
524
525 !-----------------------------------------------------------------
526 ! urban related variable are added to arguments of radiation_driver
527 !-----------------------------------------------------------------
528
529 CALL wrf_debug ( 200 , ' call radiation_driver' )
530 BENCH_START(rad_driver_tim)
531
532 CALL radiation_driver( &
533 & ACFRCV=grid%acfrcv ,ACFRST=grid%acfrst ,ALBEDO=grid%albedo &
534 & ,CFRACH=grid%cfrach ,CFRACL=grid%cfracl ,CFRACM=grid%cfracm &
535 & ,CUPPT=grid%cuppt ,CZMEAN=grid%czmean ,DT=grid%dt &
536 & ,DZ8W=dz8w ,EMISS=grid%emiss ,GLW=grid%glw &
537 & ,GMT=grid%gmt ,GSW=grid%gsw ,HBOT=grid%hbot &
538 & ,HTOP=grid%htop ,HBOTR=grid%hbotr, HTOPR=grid%htopr ,ICLOUD=config_flags%icloud &
539 & ,ITIMESTEP=grid%itimestep,JULDAY=grid%julday, JULIAN=grid%julian &
540 & ,JULYR=grid%julyr ,LW_PHYSICS=config_flags%ra_lw_physics &
541 & ,NCFRCV=grid%ncfrcv ,NCFRST=grid%ncfrst ,NPHS=1 &
542 & ,P8W=p8w ,P=p_phy ,PI=pi_phy &
543 & ,RADT=grid%radt ,RA_CALL_OFFSET=grid%ra_call_offset &
544 & ,RHO=rho ,RLWTOA=grid%rlwtoa &
545 & ,RSWTOA=grid%rswtoa ,RTHRATEN=grid%rthraten &
546 & ,RTHRATENLW=grid%rthratenlw &
547 & ,RTHRATENSW=grid%rthratensw ,SNOW=grid%snow &
548 & ,STEPRA=grid%stepra ,SWDOWN=grid%swdown ,SWDOWNC=grid%swdownc &
549 & ,SW_PHYSICS=config_flags%ra_sw_physics ,T8W=t8w &
550 & ,T=t_phy ,TAUCLDC=grid%taucldc ,TAUCLDI=grid%taucldi &
551 & ,TSK=grid%tsk ,VEGFRA=grid%vegfra ,WARM_RAIN=grid%warm_rain &
552 & ,XICE=grid%xice &
553 & ,XLAND=grid%xland ,XLAT=grid%xlat ,XLONG=grid%xlong &
554 !Optional urban
555 & ,DECLIN_URB=grid%declin_urb ,COSZ_URB2D=grid%cosz_urb2d &
556 & ,OMG_URB2D=grid%omg_urb2d &
557 !
558 & ,Z=grid%em_z &
559 & ,LEVSIZ=grid%levsiz, N_OZMIXM=num_ozmixm &
560 & ,N_AEROSOLC=num_aerosolc &
561 & ,PAERLEV=grid%paerlev &
562 & ,CAM_ABS_DIM1=grid%cam_abs_dim1, CAM_ABS_DIM2=grid%cam_abs_dim2 &
563 & ,CAM_ABS_FREQ_S=grid%cam_abs_freq_s &
564 & ,XTIME=grid%xtime &
565 ! indexes
566 & ,IDS=ids,IDE=ide, JDS=jds,JDE=jde, KDS=kds,KDE=kde &
567 & ,IMS=ims,IME=ime, JMS=jms,JME=jme, KMS=kms,KME=kme &
568 & ,i_start=grid%i_start,i_end=min(grid%i_end, ide-1) &
569 & ,j_start=grid%j_start,j_end=min(grid%j_end, jde-1) &
570 & ,kts=k_start, kte=min(k_end,kde-1) &
571 & ,num_tiles=grid%num_tiles &
572 ! Optional
573 & , CLDFRA=grid%cldfra &
574 & , PB=grid%em_pb &
575 & , F_ICE_PHY=grid%f_ice_phy,F_RAIN_PHY=grid%f_rain_phy &
576 & , QV=moist(ims,kms,jms,P_QV), F_QV=F_QV &
577 & , QC=moist(ims,kms,jms,P_QC), F_QC=F_QC &
578 & , QR=moist(ims,kms,jms,P_QR), F_QR=F_QR &
579 & , QI=moist(ims,kms,jms,P_QI), F_QI=F_QI &
580 & , QS=moist(ims,kms,jms,P_QS), F_QS=F_QS &
581 & , QG=moist(ims,kms,jms,P_QG), F_QG=F_QG &
582 #ifdef ACFLUX
583 & ,ACSWUPT=acswupt ,ACSWUPTC=acswuptc &
584 & ,ACSWDNT=acswdnt ,ACSWDNTC=acswdntc &
585 & ,ACSWUPB=acswupb ,ACSWUPBC=acswupbc &
586 & ,ACSWDNB=acswdnb ,ACSWDNBC=acswdnbc &
587 & ,ACLWUPT=aclwupt ,ACLWUPTC=aclwuptc &
588 & ,ACLWDNT=aclwdnt ,ACLWDNTC=aclwdntc &
589 & ,ACLWUPB=aclwupb ,ACLWUPBC=aclwupbc &
590 & ,ACLWDNB=aclwdnb ,ACLWDNBC=aclwdnbc &
591 & ,SWUPT=swupt ,SWUPTC=swuptc &
592 & ,SWDNT=swdnt ,SWDNTC=swdntc &
593 & ,SWUPB=swupb ,SWUPBC=swupbc &
594 & ,SWDNB=swdnb ,SWDNBC=swdnbc &
595 & ,LWUPT=lwupt ,LWUPTC=lwuptc &
596 & ,LWDNT=lwdnt ,LWDNTC=lwdntc &
597 & ,LWUPB=lwupb ,LWUPBC=lwupbc &
598 & ,LWDNB=lwdnb ,LWDNBC=lwdnbc &
599 #endif
600 & ,LWCF=grid%lwcf &
601 & ,SWCF=grid%swcf &
602 & ,OLR=grid%olr &
603 & ,OZMIXM=grid%ozmixm, PIN=grid%pin &
604 & ,M_PS_1=grid%m_ps_1, M_PS_2=grid%m_ps_2, AEROSOLC_1=grid%aerosolc_1 &
605 & ,AEROSOLC_2=grid%aerosolc_2, M_HYBI0=grid%m_hybi &
606 & ,ABSTOT=grid%abstot, ABSNXT=grid%absnxt, EMSTOT=grid%emstot &
607 #ifdef WRF_CHEM
608 & ,QC_ADJUST=grid%GD_CLOUD , QI_ADJUST=grid%GD_CLOUD2 &
609 & ,PM2_5_DRY=grid%pm2_5_dry, PM2_5_WATER=grid%pm2_5_water &
610 & ,PM2_5_DRY_EC=grid%pm2_5_dry_ec &
611 & ,TAUAER300=grid%tauaer1, TAUAER400=grid%tauaer2 & ! jcb
612 & ,TAUAER600=grid%tauaer3, TAUAER999=grid%tauaer4 & ! jcb
613 & ,GAER300=grid%gaer1, GAER400=grid%gaer2, GAER600=grid%gaer3, GAER999=grid%gaer4 & ! jcb
614 & ,WAER300=grid%waer1, WAER400=grid%waer2, WAER600=grid%waer3, WAER999=grid%waer4 & ! jcb
615 #endif
616 & )
617
618 BENCH_END(rad_driver_tim)
619
620 !********* Surface driver
621 ! surface
622
623 BENCH_START(surf_driver_tim)
624
625 !-----------------------------------------------------------------
626 ! urban related variable are added to arguments of surface_driver
627 !-----------------------------------------------------------------
628 NUM_ROOF_LAYERS = grid%num_soil_layers !urban
629 NUM_WALL_LAYERS = grid%num_soil_layers !urban
630 NUM_ROAD_LAYERS = grid%num_soil_layers !urban
631
632 CALL wrf_debug ( 200 , ' call surface_driver' )
633 CALL surface_driver( &
634 & ACSNOM=grid%acsnom ,ACSNOW=grid%acsnow ,AKHS=grid%akhs &
635 & ,AKMS=grid%akms ,ALBBCK=grid%albbck ,ALBEDO=grid%albedo &
636 & ,BR=br ,CANWAT=grid%canwat ,CHKLOWQ=chklowq &
637 & ,CT=grid%ct ,DT=grid%dt ,DX=grid%dx &
638 & ,DZ8W=dz8w ,DZS=grid%dzs ,FLHC=grid%flhc &
639 & ,FLQC=grid%flqc ,GLW=grid%glw ,GRDFLX=grid%grdflx &
640 & ,GSW=grid%gsw ,SWDOWN=grid%swdown ,GZ1OZ0=gz1oz0 ,HFX=grid%hfx &
641 & ,HT=grid%ht ,IFSNOW=config_flags%ifsnow ,ISFFLX=config_flags%isfflx &
642 & ,ISLTYP=grid%isltyp ,ITIMESTEP=grid%itimestep &
643 & ,IVGTYP=grid%ivgtyp ,LH=grid%lh ,LOWLYR=grid%lowlyr &
644 & ,MAVAIL=grid%mavail ,NUM_SOIL_LAYERS=config_flags%num_soil_layers &
645 & ,P8W=p8w ,PBLH=grid%pblh ,PI_PHY=pi_phy &
646 & ,PSFC=grid%psfc ,PSHLTR=pshltr ,PSIH=psih &
647 & ,PSIM=psim ,P_PHY=p_phy ,Q10=q10 &
648 & ,Q2=grid%q2 ,QFX=grid%qfx ,QSFC=grid%qsfc &
649 & ,QSHLTR=qshltr ,QZ0=grid%qz0 ,RAINCV=grid%raincv &
650 & ,RA_LW_PHYSICS=config_flags%ra_lw_physics ,RHO=rho &
651 & ,RMOL=grid%rmol ,SFCEVP=grid%sfcevp ,SFCEXC=grid%sfcexc &
652 & ,SFCRUNOFF=grid%sfcrunoff &
653 & ,SF_SFCLAY_PHYSICS=config_flags%sf_sfclay_physics &
654 & ,SF_SURFACE_PHYSICS=config_flags%sf_surface_physics ,SH2O=grid%sh2o &
655 & ,SHDMAX=grid%shdmax ,SHDMIN=grid%shdmin ,SMOIS=grid%smois &
656 & ,SMSTAV=grid%smstav ,SMSTOT=grid%smstot ,SNOALB=grid%snoalb &
657 & ,SNOW=grid%snow ,SNOWC=grid%snowc ,SNOWH=grid%snowh &
658 & ,SST=grid%sst ,SST_UPDATE=grid%sst_update &
659 & ,STEPBL=grid%stepbl ,TH10=th10 ,TH2=grid%th2 &
660 & ,THZ0=grid%thz0 ,TH_PHY=th_phy ,TKE_MYJ=grid%tke_myj &
661 & ,TMN=grid%tmn ,TSHLTR=tshltr ,TSK=grid%tsk &
662 & ,TSLB=grid%tslb ,T_PHY=t_phy ,U10=grid%u10 &
663 & ,URATX=grid%uratx ,VRATX=grid%vratx ,TRATX=grid%tratx &
664 & ,UDRUNOFF=grid%udrunoff ,UST=grid%ust ,UZ0=grid%uz0 &
665 & ,U_FRAME=grid%u_frame ,U_PHY=u_phy ,V10=grid%v10 &
666 & ,VEGFRA=grid%vegfra ,VZ0=grid%vz0 ,V_FRAME=grid%v_frame &
667 & ,V_PHY=v_phy ,WARM_RAIN=grid%warm_rain &
668 & ,WSPD=wspd ,XICE=grid%xice ,XLAND=grid%xland &
669 & ,Z0=grid%z0 ,Z=grid%em_z ,ZNT=grid%znt &
670 & ,ZS=grid%zs &
671 & ,DECLIN_URB=grid%declin_urb ,COSZ_URB2D=grid%cosz_urb2d & !I urban
672 & ,OMG_URB2D=grid%omg_urb2d ,xlat_urb2d=grid%XLAT & !I urban
673 & ,NUM_ROOF_LAYERS=num_roof_layers & !I urban
674 & ,NUM_WALL_LAYERS=num_wall_layers & !I urban
675 & ,NUM_ROAD_LAYERS=num_road_layers &
676 & ,DZR=grid%dzr ,DZB=grid%dzb ,DZG=grid%dzg & !I urban
677 & ,TR_URB2D=grid%tr_urb2d ,TB_URB2D=grid%tb_urb2d &
678 & ,TG_URB2D=grid%tg_urb2d & !H urban
679 & ,TC_URB2D=grid%tc_urb2d ,QC_URB2D=grid%qc_urb2d & !H urban
680 & ,UC_URB2D=grid%uc_urb2d & !H urban
681 & ,XXXR_URB2D=grid%xxxr_urb2d &
682 & ,XXXB_URB2D=grid%xxxb_urb2d & !H urban
683 & ,XXXG_URB2D=grid%xxxg_urb2d &
684 & ,XXXC_URB2D=grid%xxxc_urb2d & !H urban
685 & ,TRL_URB3D=grid%trl_urb3d ,TBL_URB3D=grid%tbl_urb3d & !H urban
686 & ,TGL_URB3D=grid%tgl_urb3d & !H urban
687 & ,SH_URB2D=grid%sh_urb2d ,LH_URB2D=grid%lh_urb2d &
688 & ,G_URB2D=grid%g_urb2d & !H urban
689 & ,RN_URB2D=grid%rn_urb2d , TS_URB2D=grid%ts_urb2d & !H urban
690 & ,FRC_URB2D=grid%frc_urb2d & !H urban
691 & ,UTYPE_URB2D=grid%utype_urb2d & !H urban
692 & ,ucmcall=grid%ucmcall & !H urban
693 ! Indexes
694 & ,IDS=ids,IDE=ide, JDS=jds,JDE=jde, KDS=kds,KDE=kde &
695 & ,IMS=ims,IME=ime, JMS=jms,JME=jme, KMS=kms,KME=kme &
696 & , I_START=grid%i_start,I_END=min(grid%i_end, ide-1) &
697 & , J_START=grid%j_start,J_END=min(grid%j_end, jde-1) &
698 & , KTS=k_start, KTE=min(k_end,kde-1) &
699 & , NUM_TILES=grid%num_tiles &
700 ! Optional
701 & ,QV_CURR=moist(ims,kms,jms,P_QV), F_QV=F_QV &
702 & ,QC_CURR=moist(ims,kms,jms,P_QC), F_QC=F_QC &
703 & ,QR_CURR=moist(ims,kms,jms,P_QR), F_QR=F_QR &
704 & ,QI_CURR=moist(ims,kms,jms,P_QI), F_QI=F_QI &
705 & ,QS_CURR=moist(ims,kms,jms,P_QS), F_QS=F_QS &
706 & ,QG_CURR=moist(ims,kms,jms,P_QG), F_QG=F_QG &
707 & ,CAPG=grid%capg, EMISS=grid%emiss, HOL=hol,MOL=grid%mol &
708 & ,RAINBL=grid%rainbl,SR=grid%em_sr &
709 & ,RAINNCV=grid%rainncv,REGIME=regime,T2=grid%t2,THC=grid%thc &
710 & ,QSG=grid%qsg,QVG=grid%qvg,QCG=grid%qcg,SOILT1=grid%soilt1,TSNAV=grid%tsnav & ! ruc lsm
711 & ,SMFR3D=grid%smfr3d,KEEPFR3DFLAG=grid%keepfr3dflag & ! ruc lsm
712 & ,POTEVP=grid%em_POTEVP, SNOPCX=grid%em_SNOPCX, SOILTB=grid%em_SOILTB & ! ruc lsm
713 & )
714 BENCH_END(surf_driver_tim)
715
716 !*********
717 ! pbl
718
719 CALL wrf_debug ( 200 , ' call pbl_driver' )
720 BENCH_START(pbl_driver_tim)
721 CALL pbl_driver( &
722 & AKHS=grid%akhs ,AKMS=grid%akms &
723 & ,BL_PBL_PHYSICS=config_flags%bl_pbl_physics &
724 & ,BR=br ,CHKLOWQ=chklowq ,CT=grid%ct &
725 & ,DT=grid%dt ,DX=grid%dx ,DZ8W=dz8w &
726 & ,EL_MYJ=grid%el_myj ,EXCH_H=grid%exch_h ,GRDFLX=grid%grdflx &
727 & ,GZ1OZ0=gz1oz0 ,HFX=grid%hfx ,HT=grid%ht &
728 & ,ITIMESTEP=grid%itimestep ,KPBL=grid%kpbl &
729 & ,LH=grid%lh ,LOWLYR=grid%lowlyr ,P8W=p8w &
730 & ,PBLH=grid%pblh ,PI_PHY=pi_phy ,PSIH=psih &
731 & ,PSIM=psim ,P_PHY=p_phy ,QFX=grid%qfx &
732 & ,QSFC=grid%qsfc ,QZ0=grid%qz0 &
733 & ,RA_LW_PHYSICS=config_flags%ra_lw_physics &
734 & ,RHO=rho ,RQCBLTEN=grid%rqcblten ,RQIBLTEN=grid%rqiblten &
735 & ,RQVBLTEN=grid%rqvblten ,RTHBLTEN=grid%rthblten ,RUBLTEN=grid%rublten &
736 & ,RVBLTEN=grid%rvblten ,SNOW=grid%snow ,STEPBL=grid%stepbl &
737 & ,THZ0=grid%thz0 ,TH_PHY=th_phy ,TKE_MYJ=grid%tke_myj &
738 & ,TSK=grid%tsk ,T_PHY=t_phy ,UST=grid%ust &
739 & ,UZ0=grid%uz0 ,U_FRAME=grid%u_frame ,U_PHY=u_phy &
740 & ,VZ0=grid%vz0 ,V_FRAME=grid%v_frame ,V_PHY=v_phy &
741 & ,WARM_RAIN=grid%warm_rain ,WSPD=wspd &
742 & ,XICE=grid%xice ,XLAND=grid%xland ,Z=grid%em_z &
743 & ,ZNT=grid%znt &
744 & ,IDS=ids,IDE=ide, JDS=jds,JDE=jde, KDS=kds,KDE=kde &
745 & ,IMS=ims,IME=ime, JMS=jms,JME=jme, KMS=kms,KME=kme &
746 & ,I_START=grid%i_start,I_END=min(grid%i_end, ide-1) &
747 & ,J_START=grid%j_start,J_END=min(grid%j_end, jde-1) &
748 & ,KTS=k_start, KTE=min(k_end,kde-1) &
749 & ,NUM_TILES=grid%num_tiles &
750 ! optional
751 & ,QV_CURR=moist(ims,kms,jms,P_QV), F_QV=F_QV &
752 & ,QC_CURR=moist(ims,kms,jms,P_QC), F_QC=F_QC &
753 & ,QR_CURR=moist(ims,kms,jms,P_QR), F_QR=F_QR &
754 & ,QI_CURR=moist(ims,kms,jms,P_QI), F_QI=F_QI &
755 & ,QS_CURR=moist(ims,kms,jms,P_QS), F_QS=F_QS &
756 & ,QG_CURR=moist(ims,kms,jms,P_QG), F_QG=F_QG &
757 & ,HOL=HOL, MOL=grid%mol, REGIME=REGIME &
758 & )
759
760 BENCH_END(pbl_driver_tim)
761
762 ! cumulus para.
763
764 CALL wrf_debug ( 200 , ' call cumulus_driver' )
765
766 BENCH_START(cu_driver_tim)
767 CALL cumulus_driver( &
768 ! Prognostic variables
769 & U=u_phy ,V=v_phy ,TH=th_phy ,T=t_phy &
770 & ,W=grid%em_w_2 ,P=p_phy ,PI=pi_phy ,RHO=rho &
771 ! Other arguments
772 & ,ITIMESTEP=grid%itimestep ,DT=grid%dt ,DX=grid%dx &
773 & ,RAINC=grid%rainc ,RAINCV=grid%raincv ,NCA=grid%nca &
774 & ,HTOP=grid%cutop ,HBOT=grid%cubot ,KPBL=grid%kpbl &
775 & ,DZ8W=dz8w ,P8W=p8w &
776 & ,W0AVG=grid%w0avg ,STEPCU=grid%stepcu &
777 & ,CLDEFI=grid%cldefi ,LOWLYR=grid%lowlyr ,XLAND=grid%xland &
778 & ,APR_GR=grid%apr_gr ,APR_W=grid%apr_w ,APR_MC=grid%apr_mc &
779 & ,APR_ST=grid%apr_st ,APR_AS=grid%apr_as ,APR_CAPMA=grid%apr_capma &
780 & ,APR_CAPME=grid%apr_capme ,APR_CAPMI=grid%apr_capmi &
781 & ,MASS_FLUX=grid%mass_flux ,XF_ENS=grid%xf_ens &
782 & ,PR_ENS=grid%pr_ens ,HT=grid%ht &
783 & ,ENSDIM=config_flags%ensdim ,MAXIENS=config_flags%maxiens ,MAXENS=config_flags%maxens &
784 & ,MAXENS2=config_flags%maxens2 ,MAXENS3=config_flags%maxens3 &
785 & ,CU_ACT_FLAG=cu_act_flag ,WARM_RAIN=grid%warm_rain &
786 & ,GSW=grid%gsw &
787 ! Selection flag
788 & ,CU_PHYSICS=config_flags%cu_physics &
789 ! Dimension arguments
790 & ,IDS=ids,IDE=ide, JDS=jds,JDE=jde, KDS=kds,KDE=kde &
791 & ,IMS=ims,IME=ime, JMS=jms,JME=jme, KMS=kms,KME=kme &
792 & ,I_START=grid%i_start,I_END=min(grid%i_end, ide-1) &
793 & ,J_START=grid%j_start,J_END=min(grid%j_end, jde-1) &
794 & ,KTS=k_start, KTE=min(k_end,kde-1) &
795 & ,NUM_TILES=grid%num_tiles &
796 ! Moisture tendency arguments
797 & ,RQVCUTEN=grid%rqvcuten , RQCCUTEN=grid%rqccuten &
798 & ,RQSCUTEN=grid%rqscuten , RQICUTEN=grid%rqicuten &
799 & ,RQRCUTEN=grid%rqrcuten , RQVBLTEN=grid%rqvblten &
800 & ,RQVFTEN=grid%rqvften &
801 ! Other tendency arguments
802 & ,RTHRATEN=grid%rthraten , RTHBLTEN=grid%rthblten &
803 & ,RTHCUTEN=grid%rthcuten , RTHFTEN=grid%rthften &
804 ! Moisture tracer arguments
805 & ,QV_CURR=moist(ims,kms,jms,P_QV), F_QV=F_QV &
806 & ,QC_CURR=moist(ims,kms,jms,P_QC), F_QC=F_QC &
807 & ,QR_CURR=moist(ims,kms,jms,P_QR), F_QR=F_QR &
808 & ,QI_CURR=moist(ims,kms,jms,P_QI), F_QI=F_QI &
809 & ,QS_CURR=moist(ims,kms,jms,P_QS), F_QS=F_QS &
810 & ,QG_CURR=moist(ims,kms,jms,P_QG), F_QG=F_QG &
811 #ifdef WRF_CHEM
812 & ,GD_CLOUD=grid%GD_CLOUD,GD_CLOUD2=grid%GD_CLOUD2 &
813 #endif
814 & )
815 BENCH_END(cu_driver_tim)
816
817 ! fdda
818
819 CALL wrf_debug ( 200 , ' call fddagd_driver' )
820
821 BENCH_START(fdda_driver_tim)
822 CALL fddagd_driver(itimestep=grid%itimestep,dt=grid%dt,xtime=grid%XTIME, &
823 id=grid%id, &
824 RUNDGDTEN=grid%rundgdten,RVNDGDTEN=grid%rvndgdten, &
825 RTHNDGDTEN=grid%rthndgdten,RQVNDGDTEN=grid%rqvndgdten, &
826 RMUNDGDTEN=grid%rmundgdten, &
827 u_ndg_old=fdda3d(ims,kms,jms,P_u_ndg_old), &
828 v_ndg_old=fdda3d(ims,kms,jms,P_v_ndg_old), &
829 t_ndg_old=fdda3d(ims,kms,jms,P_t_ndg_old), &
830 q_ndg_old=fdda3d(ims,kms,jms,P_q_ndg_old), &
831 mu_ndg_old=fdda2d(ims,1,jms,P_mu_ndg_old), &
832 u_ndg_new=fdda3d(ims,kms,jms,P_u_ndg_new), &
833 v_ndg_new=fdda3d(ims,kms,jms,P_v_ndg_new), &
834 t_ndg_new=fdda3d(ims,kms,jms,P_t_ndg_new), &
835 q_ndg_new=fdda3d(ims,kms,jms,P_q_ndg_new), &
836 mu_ndg_new=fdda2d(ims,1,jms,P_mu_ndg_new), &
837 u3d=grid%em_u_2,v3d=grid%em_v_2,th_phy=th_phy,rho=rho,moist=moist, &
838 p_phy=p_phy,pi_phy=pi_phy,p8w=p8w,t_phy=t_phy, &
839 dz8w=dz8w,z=grid%em_z,z_at_w=z_at_w, &
840 config_flags=config_flags,dx=grid%DX,n_moist=num_3d_m, &
841 STEPFG=grid%STEPFG, &
842 pblh=grid%pblh,ht=grid%ht, &
843 IDS=ids,IDE=ide, JDS=jds,JDE=jde, KDS=kds,KDE=kde &
844 ,IMS=ims,IME=ime, JMS=jms,JME=jme, KMS=kms,KME=kme &
845 ,I_START=grid%i_start,I_END=min(grid%i_end, ide-1) &
846 ,J_START=grid%j_start,J_END=min(grid%j_end, jde-1) &
847 ,KTS=k_start, KTE=min(k_end,kde-1) &
848 , num_tiles=grid%num_tiles )
849 BENCH_END(fdda_driver_tim)
850
851 ! calculate_phy_tend
852
853 BENCH_START(cal_phy_tend)
854 !$OMP PARALLEL DO &
855 !$OMP PRIVATE ( ij )
856
857 DO ij = 1 , grid%num_tiles
858
859 CALL wrf_debug ( 200 , ' call calculate_phy_tend' )
860 CALL calculate_phy_tend (config_flags,grid%em_mut,grid%em_muu,grid%em_muv,pi_phy, &
861 grid%rthraten, &
862 grid%rublten,grid%rvblten,grid%rthblten, &
863 grid%rqvblten,grid%rqcblten,grid%rqiblten, &
864 grid%rthcuten,grid%rqvcuten,grid%rqccuten,grid%rqrcuten, &
865 grid%rqicuten,grid%rqscuten, &
866 grid%RUNDGDTEN,grid%RVNDGDTEN,grid%RTHNDGDTEN,grid%RQVNDGDTEN, &
867 grid%RMUNDGDTEN, &
868 ids,ide, jds,jde, kds,kde, &
869 ims,ime, jms,jme, kms,kme, &
870 grid%i_start(ij), min(grid%i_end(ij),ide-1), &
871 grid%j_start(ij), min(grid%j_end(ij),jde-1), &
872 k_start , min(k_end,kde-1) )
873
874 ENDDO
875 !$OMP END PARALLEL DO
876 BENCH_END(cal_phy_tend)
877
878 ! tke diffusion
879
880 IF(config_flags%diff_opt .eq. 2 .OR. config_flags%diff_opt .eq. 1) THEN
881
882 BENCH_START(comp_diff_metrics_tim)
883 !$OMP PARALLEL DO &
884 !$OMP PRIVATE ( ij )
885
886 DO ij = 1 , grid%num_tiles
887
888 CALL wrf_debug ( 200 , ' call compute_diff_metrics ' )
889 CALL compute_diff_metrics ( config_flags, grid%em_ph_2, grid%em_phb, grid%em_z, grid%em_rdz, grid%em_rdzw, &
890 grid%em_zx, grid%em_zy, grid%rdx, grid%rdy, &
891 ids, ide, jds, jde, kds, kde, &
892 ims, ime, jms, jme, kms, kme, &
893 grid%i_start(ij), grid%i_end(ij), &
894 grid%j_start(ij), grid%j_end(ij), &
895 k_start , k_end )
896 ENDDO
897 !$OMP END PARALLEL DO
898 BENCH_END(comp_diff_metrics_tim)
899
900 #ifdef DM_PARALLEL
901 # include "PERIOD_BDY_EM_A1.inc"
902 #endif
903
904 BENCH_START(tke_diff_bc_tim)
905 DO ij = 1 , grid%num_tiles
906
907 CALL wrf_debug ( 200 , ' call bc for diffusion_metrics ' )
908 CALL set_physical_bc3d( grid%em_rdzw , 'w', config_flags, &
909 ids, ide, jds, jde, kds, kde, &
910 ims, ime, jms, jme, kms, kme, &
911 ips, ipe, jps, jpe, kps, kpe, &
912 grid%i_start(ij), grid%i_end(ij), &
913 grid%j_start(ij), grid%j_end(ij), &
914 k_start , k_end )
915 CALL set_physical_bc3d( grid%em_rdz , 'w', config_flags, &
916 ids, ide, jds, jde, kds, kde, &
917 ims, ime, jms, jme, kms, kme, &
918 ips, ipe, jps, jpe, kps, kpe, &
919 grid%i_start(ij), grid%i_end(ij), &
920 grid%j_start(ij), grid%j_end(ij), &
921 k_start , k_end )
922 CALL set_physical_bc3d( grid%em_z , 'w', config_flags, &
923 ids, ide, jds, jde, kds, kde, &
924 ims, ime, jms, jme, kms, kme, &
925 ips, ipe, jps, jpe, kps, kpe, &
926 grid%i_start(ij), grid%i_end(ij), &
927 grid%j_start(ij), grid%j_end(ij), &
928 k_start , k_end )
929 CALL set_physical_bc3d( grid%em_zx , 'w', config_flags, &
930 ids, ide, jds, jde, kds, kde, &
931 ims, ime, jms, jme, kms, kme, &
932 ips, ipe, jps, jpe, kps, kpe, &
933 grid%i_start(ij), grid%i_end(ij), &
934 grid%j_start(ij), grid%j_end(ij), &
935 k_start , k_end )
936 CALL set_physical_bc3d( grid%em_zy , 'w', config_flags, &
937 ids, ide, jds, jde, kds, kde, &
938 ims, ime, jms, jme, kms, kme, &
939 ips, ipe, jps, jpe, kps, kpe, &
940 grid%i_start(ij), grid%i_end(ij), &
941 grid%j_start(ij), grid%j_end(ij), &
942 k_start , k_end )
943
944 ENDDO
945 BENCH_END(tke_diff_bc_tim)
946
947 #ifdef DM_PARALLEL
948 # include "HALO_EM_TKE_C.inc"
949 #endif
950
951 BENCH_START(deform_div_tim)
952
953 !$OMP PARALLEL DO &
954 !$OMP PRIVATE ( ij )
955
956 DO ij = 1 , grid%num_tiles
957
958 CALL wrf_debug ( 200 , ' call cal_deform_and_div' )
959 CALL cal_deform_and_div ( config_flags,grid%em_u_2,grid%em_v_2,grid%em_w_2,grid%div, &
960 grid%defor11,grid%defor22,grid%defor33,grid%defor12, &
961 grid%defor13,grid%defor23, &
962 grid%u_base, grid%v_base,grid%msfu,grid%msfv,grid%msft, &
963 grid%rdx, grid%rdy, grid%em_dn, grid%em_dnw, grid%em_rdz, grid%em_rdzw, &
964 grid%em_fnm,grid%em_fnp,grid%cf1,grid%cf2,grid%cf3,grid%em_zx,grid%em_zy, &
965 ids, ide, jds, jde, kds, kde, &
966 ims, ime, jms, jme, kms, kme, &
967 grid%i_start(ij), grid%i_end(ij), &
968 grid%j_start(ij), grid%j_end(ij), &
969 k_start , k_end )
970 ENDDO
971 !$OMP END PARALLEL DO
972 BENCH_END(deform_div_tim)
973
974
975 #ifdef DM_PARALLEL
976 # include "HALO_EM_TKE_D.inc"
977 #endif
978
979
980 ! calculate tke, kmh, and kmv
981
982 BENCH_START(calc_tke_tim)
983 !$OMP PARALLEL DO &
984 !$OMP PRIVATE ( ij )
985
986 DO ij = 1 , grid%num_tiles
987
988 #ifdef WRF_PLUS
989 if(TEST_calculate_km_kh) THEN
990 CALL t_calculate_km_kh( config_flags,grid%dt,grid%dampcoef,grid%zdamp,config_flags%damp_opt, &
991 grid%xkmh,grid%xkmhd,grid%xkmv,grid%xkhh,grid%xkhv,grid%BN2, &
992 grid%khdif,grid%kvdif,grid%div, &
993 grid%defor11,grid%defor22,grid%defor33,grid%defor12, &
994 grid%defor13,grid%defor23, &
995 grid%em_tke_2(ims,kms,jms),p8w,t8w,th_phy, &
996 t_phy,p_phy,moist,grid%em_dn,grid%em_dnw, &
997 grid%dx,grid%dy,grid%em_rdz,grid%em_rdzw,config_flags%mix_cr_len,num_3d_m, &
998 grid%cf1, grid%cf2, grid%cf3, grid%warm_rain, &
999 grid%kh_tke_upper_bound, grid%kv_tke_upper_bound, &
1000 ids,ide, jds,jde, kds,kde, &
1001 ims,ime, jms,jme, kms,kme, &
1002 grid%i_start(ij), grid%i_end(ij), &
1003 grid%j_start(ij), grid%j_end(ij), &
1004 k_start , k_end )
1005 END IF
1006 #endif
1007 CALL wrf_debug ( 200 , ' call calculate_km_kh' )
1008 CALL calculate_km_kh( config_flags,grid%dt,grid%dampcoef,grid%zdamp,config_flags%damp_opt, &
1009 grid%xkmh,grid%xkmhd,grid%xkmv,grid%xkhh,grid%xkhv,grid%bn2, &
1010 grid%khdif,grid%kvdif,grid%div, &
1011 grid%defor11,grid%defor22,grid%defor33,grid%defor12, &
1012 grid%defor13,grid%defor23, &
1013 grid%em_tke_2,p8w,t8w,th_phy, &
1014 t_phy,p_phy,moist,grid%em_dn,grid%em_dnw, &
1015 grid%dx,grid%dy,grid%em_rdz,grid%em_rdzw,config_flags%mix_cr_len,num_3d_m, &
1016 grid%cf1, grid%cf2, grid%cf3, grid%warm_rain, &
1017 grid%kh_tke_upper_bound, grid%kv_tke_upper_bound, &
1018 ids,ide, jds,jde, kds,kde, &
1019 ims,ime, jms,jme, kms,kme, &
1020 grid%i_start(ij), grid%i_end(ij), &
1021 grid%j_start(ij), grid%j_end(ij), &
1022 k_start , k_end )
1023 ENDDO
1024 !$OMP END PARALLEL DO
1025 BENCH_END(calc_tke_tim)
1026
1027 #ifdef DM_PARALLEL
1028 # include "HALO_EM_TKE_E.inc"
1029 #endif
1030
1031 ENDIF
1032
1033 #ifdef DM_PARALLEL
1034 # include "PERIOD_BDY_EM_PHY_BC.inc"
1035 IF ( config_flags%grid_fdda .eq. 1) THEN
1036 # include "PERIOD_BDY_EM_FDDA_BC.inc"
1037 ENDIF
1038 # include "PERIOD_BDY_EM_CHEM.inc"
1039 #endif
1040
1041 BENCH_START(phy_bc_tim)
1042 !$OMP PARALLEL DO &
1043 !$OMP PRIVATE ( ij )
1044
1045 DO ij = 1 , grid%num_tiles
1046
1047 CALL wrf_debug ( 200 , ' call phy_bc' )
1048 CALL phy_bc (config_flags,grid%div,grid%defor11,grid%defor22,grid%defor33, &
1049 grid%defor12,grid%defor13,grid%defor23, &
1050 grid%xkmh,grid%xkmhd,grid%xkmv,grid%xkhh,grid%xkhv, &
1051 grid%em_tke_2, &
1052 grid%rublten, grid%rvblten, &
1053 ids, ide, jds, jde, kds, kde, &
1054 ims, ime, jms, jme, kms, kme, &
1055 ips, ipe, jps, jpe, kps, kpe, &
1056 grid%i_start(ij), grid%i_end(ij), &
1057 grid%j_start(ij), grid%j_end(ij), &
1058 k_start , k_end )
1059 ENDDO
1060 !$OMP END PARALLEL DO
1061 BENCH_END(phy_bc_tim)
1062
1063 #ifdef DM_PARALLEL
1064 !-----------------------------------------------------------------------
1065 !
1066 ! MPP for some physics tendency, km, kh, deformation, and divergence
1067 !
1068 ! * *
1069 ! * + * * + * +
1070 ! * *
1071 !
1072 ! (for PBL)
1073 ! grid%rublten x
1074 ! grid%rvblten x
1075 !
1076 ! (for diff_opt >= 1)
1077 ! grid%defor11 x
1078 ! grid%defor22 x
1079 ! grid%defor12 x
1080 ! grid%defor13 x
1081 ! grid%defor23 x
1082 ! grid%div x
1083 ! grid%xkmv x
1084 ! grid%xkmh x
1085 ! grid%xkmhd x
1086 ! grid%xkhv x
1087 ! grid%xkhh x
1088 ! tke x
1089 !
1090 !-----------------------------------------------------------------------
1091 IF ( config_flags%bl_pbl_physics .ge. 1 ) THEN
1092 # include "HALO_EM_PHYS_PBL.inc"
1093 ENDIF
1094 IF ( config_flags%grid_fdda .eq. 1) THEN
1095 # include "HALO_EM_FDDA.inc"
1096 ENDIF
1097 IF ( config_flags%diff_opt .ge. 1 ) THEN
1098 # include "HALO_EM_PHYS_DIFFUSION.inc"
1099 ENDIF
1100
1101 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
1102 # include "HALO_EM_TKE_3.inc"
1103 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
1104 # include "HALO_EM_TKE_5.inc"
1105 ELSE
1106 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
1107 CALL wrf_error_fatal(TRIM(wrf_err_message))
1108 ENDIF
1109 #endif
1110
1111 BENCH_START(update_phy_ten_tim)
1112 !$OMP PARALLEL DO &
1113 !$OMP PRIVATE ( ij )
1114
1115 DO ij = 1 , grid%num_tiles
1116
1117 CALL wrf_debug ( 200 , ' call update_phy_ten' )
1118 CALL update_phy_ten(t_tendf, ru_tendf, rv_tendf,moist_tend, &
1119 mu_tendf, &
1120 grid%rthraten,grid%rthblten,grid%rthcuten,grid%rublten,grid%rvblten, &
1121 grid%rqvblten,grid%rqcblten,grid%rqiblten, &
1122 grid%rqvcuten,grid%rqccuten,grid%rqrcuten,grid%rqicuten,grid%rqscuten, &
1123 grid%RUNDGDTEN,grid%RVNDGDTEN,grid%RTHNDGDTEN,grid%RQVNDGDTEN, &
1124 grid%RMUNDGDTEN, &
1125 num_3d_m,config_flags,rk_step, &
1126 ids, ide, jds, jde, kds, kde, &
1127 ims, ime, jms, jme, kms, kme, &
1128 grid%i_start(ij), grid%i_end(ij), &
1129 grid%j_start(ij), grid%j_end(ij), &
1130 k_start, k_end )
1131
1132 END DO
1133 !$OMP END PARALLEL DO
1134 BENCH_END(update_phy_ten_tim)
1135
1136 IF( config_flags%diff_opt .eq. 2 .and. config_flags%km_opt .eq. 2 ) THEN
1137
1138 BENCH_START(tke_rhs_tim)
1139 !$OMP PARALLEL DO &
1140 !$OMP PRIVATE ( ij )
1141
1142 DO ij = 1 , grid%num_tiles
1143
1144 CALL tke_rhs ( tke_tend,grid%bn2, &
1145 config_flags,grid%defor11,grid%defor22,grid%defor33, &
1146 grid%defor12,grid%defor13,grid%defor23,grid%em_u_2,grid%em_v_2,grid%em_w_2,grid%div, &
1147 grid%em_tke_2,grid%em_mut, &
1148 th_phy,p_phy,p8w,t8w,grid%em_z,grid%em_fnm,grid%em_fnp, &
1149 grid%cf1,grid%cf2,grid%cf3,grid%msft,grid%xkmh,grid%xkmv,grid%xkhv,grid%rdx,grid%rdy, &
1150 grid%dx,grid%dy,grid%dt,grid%em_zx,grid%em_zy,grid%em_rdz,grid%em_rdzw,grid%em_dn, &
1151 grid%em_dnw,config_flags%mix_cr_len, &
1152 ids, ide, jds, jde, kds, kde, &
1153 ims, ime, jms, jme, kms, kme, &
1154 grid%i_start(ij), grid%i_end(ij), &
1155 grid%j_start(ij), grid%j_end(ij), &
1156 k_start , k_end )
1157
1158 ENDDO
1159 !$OMP END PARALLEL DO
1160 BENCH_END(tke_rhs_tim)
1161
1162 ENDIF
1163
1164 ! calculate vertical diffusion first and then horizontal
1165 ! (keep this order)
1166
1167 IF(config_flags%diff_opt .eq. 2) THEN
1168
1169 IF (config_flags%bl_pbl_physics .eq. 0) THEN
1170
1171 BENCH_START(vert_diff_tim)
1172 !$OMP PARALLEL DO &
1173 !$OMP PRIVATE ( ij )
1174 DO ij = 1 , grid%num_tiles
1175
1176 CALL wrf_debug ( 200 , ' call vertical_diffusion_2 ' )
1177 CALL vertical_diffusion_2( ru_tendf, rv_tendf, rw_tendf, &
1178 t_tendf, tke_tend, &
1179 moist_tend, num_3d_m, &
1180 chem_tend, num_3d_c, &
1181 scalar_tend, num_3d_s, &
1182 grid%em_u_2, grid%em_v_2, &
1183 grid%em_t_2,grid%u_base,grid%v_base,grid%em_t_base,grid%qv_base, &
1184 grid%em_mut,grid%em_tke_2,config_flags, &
1185 grid%defor13,grid%defor23,grid%defor33, &
1186 grid%div, moist, chem, scalar, &
1187 grid%xkmv, grid%xkhv, config_flags%km_opt, &
1188 grid%em_fnm, grid%em_fnp, grid%em_dn, grid%em_dnw, grid%em_rdz, grid%em_rdzw, &
1189 ids, ide, jds, jde, kds, kde, &
1190 ims, ime, jms, jme, kms, kme, &
1191 grid%i_start(ij), grid%i_end(ij), &
1192 grid%j_start(ij), grid%j_end(ij), &
1193 k_start , k_end )
1194
1195 ENDDO
1196 !$OMP END PARALLEL DO
1197 BENCH_END(vert_diff_tim)
1198
1199 ENDIF
1200 !
1201 BENCH_START(hor_diff_tim)
1202 !$OMP PARALLEL DO &
1203 !$OMP PRIVATE ( ij )
1204 DO ij = 1 , grid%num_tiles
1205
1206 CALL wrf_debug ( 200 , ' call horizontal_diffusion_2' )
1207 CALL horizontal_diffusion_2( t_tendf, ru_tendf, rv_tendf, rw_tendf, &
1208 tke_tend, &
1209 moist_tend, num_3d_m, &
1210 chem_tend, num_3d_c, &
1211 scalar_tend, num_3d_s, &
1212 grid%em_t_2, th_phy, &
1213 grid%em_mut, grid%em_tke_2, config_flags, &
1214 grid%defor11, grid%defor22, grid%defor12, &
1215 grid%defor13, grid%defor23, grid%div, &
1216 moist, chem, scalar, &
1217 grid%msfu, grid%msfv, grid%msft, grid%xkmhd, grid%xkhh, config_flags%km_opt, &
1218 grid%rdx, grid%rdy, grid%em_rdz, grid%em_rdzw, &
1219 grid%em_fnm, grid%em_fnp, grid%cf1, grid%cf2, grid%cf3, &
1220 grid%em_zx, grid%em_zy, grid%em_dn, grid%em_dnw, &
1221 ids, ide, jds, jde, kds, kde, &
1222 ims, ime, jms, jme, kms, kme, &
1223 grid%i_start(ij), grid%i_end(ij), &
1224 grid%j_start(ij), grid%j_end(ij), &
1225 k_start , k_end )
1226 ENDDO
1227 !$OMP END PARALLEL DO
1228 BENCH_END(hor_diff_tim)
1229
1230 ENDIF
1231
1232 # ifdef DM_PARALLEL
1233 # include "HALO_OBS_NUDGE.inc"
1234 #endif
1235 !***********************************************************************
1236 ! This section for obs nudging
1237 !$OMP PARALLEL DO &
1238 !$OMP PRIVATE ( ij )
1239
1240 DO ij = 1 , grid%num_tiles
1241
1242 CALL fddaobs_driver (grid%grid_id, model_config_rec%grid_id, &
1243 model_config_rec%parent_id, config_flags%restart, &
1244 grid%obs_nudge_opt, &
1245 grid%obs_ipf_errob, &
1246 grid%obs_ipf_nudob, &
1247 grid%fdda_start, &
1248 grid%fdda_end, &
1249 grid%obs_nudge_wind, &
1250 grid%obs_nudge_temp, &
1251 grid%obs_nudge_mois, &
1252 grid%obs_nudge_pstr, &
1253 grid%obs_coef_wind, &
1254 grid%obs_coef_temp, &
1255 grid%obs_coef_mois, &
1256 grid%obs_coef_pstr, &
1257 grid%obs_rinxy, &
1258 grid%obs_rinsig, &
1259 grid%obs_twindo, &
1260 grid%obs_npfi, &
1261 grid%obs_ionf, &
1262 grid%obs_idynin, &
1263 grid%obs_dtramp, &
1264 model_config_rec%cen_lat(1), &
1265 model_config_rec%cen_lon(1), &
1266 config_flags%truelat1, &
1267 config_flags%truelat2, &
1268 config_flags%map_proj, &
1269 model_config_rec%i_parent_start, &
1270 model_config_rec%j_parent_start, &
1271 grid%parent_grid_ratio, &
1272 grid%max_dom, grid%itimestep, &
1273 grid%dt, grid%gmt, grid%julday, grid%fdob, &
1274 grid%max_obs, &
1275 model_config_rec%nobs_ndg_vars, &
1276 model_config_rec%nobs_err_flds, &
1277 grid%fdob%nstat, grid%fdob%varobs, grid%fdob%errf, &
1278 grid%dx, grid%KPBL,grid%HT, &
1279 grid%em_mut, grid%em_muu, grid%em_muv, &
1280 grid%msft, grid%msfu, grid%msfv, &
1281 p_phy, t_tendf, t0, &
1282 grid%em_u_2, grid%em_v_2, grid%em_t_2, &
1283 moist(:,:,:,P_QV), &
1284 grid%em_pb, grid%p_top, grid%em_p, &
1285 grid%uratx, grid%vratx, grid%tratx, &
1286 ru_tendf, rv_tendf, &
1287 moist_tend(:,:,:,P_QV), grid%em_obs_savwt, &
1288 ids,ide, jds,jde, kds,kde, &
1289 ims,ime, jms,jme, kms,kme, &
1290 grid%i_start(ij), min(grid%i_end(ij),ide-1), &
1291 grid%j_start(ij), min(grid%j_end(ij),jde-1), &
1292 k_start , min(k_end,kde-1) )
1293
1294 ENDDO
1295
1296 !$OMP END PARALLEL DO
1297 !
1298 !***********************************************************************
1299
1300 END IF rk_step_is_one
1301
1302 BENCH_START(rk_tend_tim)
1303 !$OMP PARALLEL DO &
1304 !$OMP PRIVATE ( ij )
1305 DO ij = 1 , grid%num_tiles
1306
1307 #ifdef WRF_PLUS
1308 if(TEST_rk_tendency) THEN
1309 CALL t_rk_tendency ( config_flags, rk_step, &
1310 grid%em_ru_tend, grid%em_rv_tend, rw_tend, ph_tend, t_tend, &
1311 ru_tendf, rv_tendf, rw_tendf, ph_tendf, t_tendf, &
1312 mu_tend, grid%em_u_save, grid%em_v_save, w_save, ph_save, &
1313 grid%em_t_save, mu_save, grid%RTHFTEN, &
1314 grid%em_ru, grid%em_rv, grid%em_rw, grid%em_ww, &
1315 grid%em_u_2, grid%em_v_2, grid%em_w_2, grid%em_t_2, grid%em_ph_2, &
1316 grid%em_u_1, grid%em_v_1, grid%em_w_1, grid%em_t_1, grid%em_ph_1, &
1317 grid%h_diabatic, grid%em_phb, grid%em_t_init, &
1318 grid%em_mu_2, grid%em_mut, grid%em_muu, grid%em_muv, grid%em_mub, &
1319 grid%em_al, grid%em_alt, grid%em_p, grid%em_pb, grid%em_php, cqu, cqv, cqw, &
1320 grid%u_base, grid%v_base, grid%em_t_base, grid%qv_base, grid%z_base, &
1321 grid%msfu, grid%msfv, grid%msft, grid%f, grid%e, grid%sina, grid%cosa, &
1322 grid%em_fnm, grid%em_fnp, grid%em_rdn, grid%em_rdnw, &
1323 grid%dt, grid%rdx, grid%rdy, grid%khdif, grid%kvdif, grid%xkmhd, &
1324 grid%diff_6th_opt, grid%diff_6th_rate, &
1325 grid%dampcoef,grid%zdamp,config_flags%damp_opt, &
1326 grid%cf1, grid%cf2, grid%cf3, grid%cfn, grid%cfn1, num_3d_m, &
1327 config_flags%non_hydrostatic, &
1328 ids, ide, jds, jde, kds, kde, &
1329 ims, ime, jms, jme, kms, kme, &
1330 grid%i_start(ij), grid%i_end(ij), &
1331 grid%j_start(ij), grid%j_end(ij), &
1332 k_start, k_end )
1333 END IF
1334 #endif
1335 CALL wrf_debug ( 200 , ' call rk_tendency' )
1336 CALL rk_tendency ( config_flags, rk_step, &
1337 grid%em_ru_tend, grid%em_rv_tend, rw_tend, ph_tend, t_tend, &
1338 ru_tendf, rv_tendf, rw_tendf, ph_tendf, t_tendf, &
1339 mu_tend, grid%em_u_save, grid%em_v_save, w_save, ph_save, &
1340 grid%em_t_save, mu_save, grid%rthften, &
1341 grid%em_ru, grid%em_rv, grid%em_rw, grid%em_ww, &
1342 grid%em_u_2, grid%em_v_2, grid%em_w_2, grid%em_t_2, grid%em_ph_2, &
1343 grid%em_u_1, grid%em_v_1, grid%em_w_1, grid%em_t_1, grid%em_ph_1, &
1344 grid%h_diabatic, grid%em_phb, grid%em_t_init, &
1345 grid%em_mu_2, grid%em_mut, grid%em_muu, grid%em_muv, grid%em_mub, &
1346 grid%em_al, grid%em_alt, grid%em_p, grid%em_pb, grid%em_php, cqu, cqv, cqw, &
1347 grid%u_base, grid%v_base, grid%em_t_base, grid%qv_base, grid%z_base, &
1348 grid%msfu, grid%msfv, grid%msft, grid%f, grid%e, grid%sina, grid%cosa, &
1349 grid%em_fnm, grid%em_fnp, grid%em_rdn, grid%em_rdnw, &
1350 grid%dt, grid%rdx, grid%rdy, grid%khdif, grid%kvdif, grid%xkmhd, &
1351 grid%diff_6th_opt, grid%diff_6th_factor, &
1352 grid%dampcoef,grid%zdamp,config_flags%damp_opt, &
1353 grid%cf1, grid%cf2, grid%cf3, grid%cfn, grid%cfn1, num_3d_m, &
1354 config_flags%non_hydrostatic, &
1355 ids, ide, jds, jde, kds, kde, &
1356 ims, ime, jms, jme, kms, kme, &
1357 grid%i_start(ij), grid%i_end(ij), &
1358 grid%j_start(ij), grid%j_end(ij), &
1359 k_start, k_end )
1360
1361 #ifdef WRF_PLUS
1362 if(TEST_advect_scalar) THEN
1363 CALL t_advect_scalar ( grid%em_t_2, grid%em_t_2, t_tend, grid%em_ru, grid%em_rv, grid%em_ww, &
1364 grid%em_mut, config_flags, &
1365 grid%msfu, grid%msfv, grid%msft, grid%em_fnm, grid%em_fnp, &
1366 grid%rdx, grid%rdy, grid%em_rdnw, &
1367 ids, ide, jds, jde, kds, kde, &
1368 ims, ime, jms, jme, kms, kme, &
1369 grid%i_start(ij), grid%i_end(ij), &
1370 grid%j_start(ij), grid%j_end(ij), &
1371 k_start, k_end )
1372 END IF
1373 #endif
1374 END DO
1375 !$OMP END PARALLEL DO
1376 BENCH_END(rk_tend_tim)
1377
1378 BENCH_START(relax_bdy_dry_tim)
1379 !$OMP PARALLEL DO &
1380 !$OMP PRIVATE ( ij )
1381 DO ij = 1 , grid%num_tiles
1382
1383 IF( (config_flags%specified .or. config_flags%nested) .and. rk_step == 1 ) THEN
1384
1385 #ifdef WRF_PLUS
1386 if(TEST_relax_bdy_dry) THEN
1387 CALL t_relax_bdy_dry ( config_flags, &
1388 grid%em_u_save, grid%em_v_save, ph_save, grid%em_t_save, &
1389 w_save, mu_tend, &
1390 grid%em_ru, grid%em_rv, grid%em_ph_2, grid%em_t_2, &
1391 grid%em_w_2, grid%em_mu_2, grid%em_mut, &
1392 grid%em_u_b, grid%em_v_b, grid%em_ph_b, grid%em_t_b, grid%em_w_b, &
1393 grid%em_mu_b, &
1394 grid%em_u_bt, grid%em_v_bt, grid%em_ph_bt, grid%em_t_bt, &
1395 grid%em_w_bt, grid%em_mu_bt, &
1396 config_flags%spec_bdy_width, grid%spec_zone, grid%relax_zone, &
1397 grid%dtbc, grid%fcx, grid%gcx, &
1398 ijds, ijde, &
1399 ids,ide, jds,jde, kds,kde, &
1400 ims,ime, jms,jme, kms,kme, &
1401 ips,ipe, jps,jpe, kps,kpe, &
1402 grid%i_start(ij), grid%i_end(ij), &
1403 grid%j_start(ij), grid%j_end(ij), &
1404 k_start, k_end )
1405 END IF
1406 #endif
1407
1408 CALL relax_bdy_dry ( config_flags, &
1409 grid%em_u_save, grid%em_v_save, ph_save, grid%em_t_save, &
1410 w_save, mu_tend, &
1411 grid%em_ru, grid%em_rv, grid%em_ph_2, grid%em_t_2, &
1412 grid%em_w_2, grid%em_mu_2, grid%em_mut, &
1413 grid%em_u_b, grid%em_v_b, grid%em_ph_b, grid%em_t_b, grid%em_w_b, &
1414 grid%em_mu_b, &
1415 grid%em_u_bt, grid%em_v_bt, grid%em_ph_bt, grid%em_t_bt, &
1416 grid%em_w_bt, grid%em_mu_bt, &
1417 config_flags%spec_bdy_width, grid%spec_zone, grid%relax_zone, &
1418 grid%dtbc, grid%fcx, grid%gcx, &
1419 ijds, ijde, &
1420 ids,ide, jds,jde, kds,kde, &
1421 ims,ime, jms,jme, kms,kme, &
1422 ips,ipe, jps,jpe, kps,kpe, &
1423 grid%i_start(ij), grid%i_end(ij), &
1424 grid%j_start(ij), grid%j_end(ij), &
1425 k_start, k_end )
1426
1427
1428 ENDIF
1429 #ifdef WRF_PLUS
1430 if(TEST_rk_addtend_dry) THEN
1431 CALL t_rk_addtend_dry( grid%em_ru_tend, grid%em_rv_tend, rw_tend, ph_tend, t_tend, &
1432 ru_tendf, rv_tendf, rw_tendf, ph_tendf, t_tendf, &
1433 grid%em_u_save, grid%em_v_save, w_save, ph_save, grid%em_t_save, &
1434 mu_tend, mu_tendf, rk_step, &
1435 grid%h_diabatic, grid%em_mut, grid%msft, grid%msfu, grid%msfv, &
1436 ids,ide, jds,jde, kds,kde, &
1437 ims,ime, jms,jme, kms,kme, &
1438 ips,ipe, jps,jpe, kps,kpe, &
1439 grid%i_start(ij), grid%i_end(ij), &
1440 grid%j_start(ij), grid%j_end(ij), &
1441 k_start, k_end )
1442 END IF
1443 #endif
1444
1445 CALL rk_addtend_dry( grid%em_ru_tend, grid%em_rv_tend, rw_tend, ph_tend, t_tend, &
1446 ru_tendf, rv_tendf, rw_tendf, ph_tendf, t_tendf, &
1447 grid%em_u_save, grid%em_v_save, w_save, ph_save, grid%em_t_save, &
1448 mu_tend, mu_tendf, rk_step, &
1449 grid%h_diabatic, grid%em_mut, grid%msft, grid%msfu, grid%msfv, &
1450 ids,ide, jds,jde, kds,kde, &
1451 ims,ime, jms,jme, kms,kme, &
1452 ips,ipe, jps,jpe, kps,kpe, &
1453 grid%i_start(ij), grid%i_end(ij), &
1454 grid%j_start(ij), grid%j_end(ij), &
1455 k_start, k_end )
1456
1457 IF( config_flags%specified .or. config_flags%nested ) THEN
1458 #ifdef WRFVAR_TESTING
1459 if(TEST_spec_bdy_dry) THEN
1460 CALL t_spec_bdy_dry ( config_flags, &
1461 grid%em_ru_tend, grid%em_rv_tend, ph_tend, t_tend, &
1462 rw_tend, mu_tend, &
1463 grid%em_u_b, grid%em_v_b, grid%em_ph_b, grid%em_t_b, &
1464 grid%em_w_b, grid%em_mu_b, &
1465 grid%em_u_bt, grid%em_v_bt, grid%em_ph_bt, grid%em_t_bt, &
1466 grid%em_w_bt, grid%em_mu_bt, &
1467 config_flags%spec_bdy_width, grid%spec_zone, &
1468 ijds, ijde, & ! min/max(id,jd)
1469 ids,ide, jds,jde, kds,kde, & ! domain dims
1470 ims,ime, jms,jme, kms,kme, & ! memory dims
1471 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
1472 grid%i_start(ij), grid%i_end(ij), &
1473 grid%j_start(ij), grid%j_end(ij), &
1474 k_start, k_end )
1475 END IF
1476 #endif
1477 CALL spec_bdy_dry ( config_flags, &
1478 grid%em_ru_tend, grid%em_rv_tend, ph_tend, t_tend, &
1479 rw_tend, mu_tend, &
1480 grid%em_u_b, grid%em_v_b, grid%em_ph_b, grid%em_t_b, &
1481 grid%em_w_b, grid%em_mu_b, &
1482 grid%em_u_bt, grid%em_v_bt, grid%em_ph_bt, grid%em_t_bt, &
1483 grid%em_w_bt, grid%em_mu_bt, &
1484 config_flags%spec_bdy_width, grid%spec_zone, &
1485 ijds, ijde, & ! min/max(id,jd)
1486 ids,ide, jds,jde, kds,kde, & ! domain dims
1487 ims,ime, jms,jme, kms,kme, & ! memory dims
1488 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
1489 grid%i_start(ij), grid%i_end(ij), &
1490 grid%j_start(ij), grid%j_end(ij), &
1491 k_start, k_end )
1492
1493 ENDIF
1494
1495 END DO
1496 !$OMP END PARALLEL DO
1497 BENCH_END(relax_bdy_dry_tim)
1498
1499 !<DESCRIPTION>
1500 !<pre>
1501 ! (3) Small (acoustic,sound) steps.
1502 !
1503 ! Several acoustic steps are taken each RK pass. A small step
1504 ! sequence begins with calculating perturbation variables
1505 ! and coupling them to the column dry-air-mass mu
1506 ! (call to small_step_prep). This is followed by computing
1507 ! coefficients for the vertically implicit part of the
1508 ! small timestep (call to calc_coef_w).
1509 !
1510 ! The small steps are taken
1511 ! in the named loop "small_steps:". In the small_steps loop, first
1512 ! the horizontal momentum (u and v) are advanced (call to advance_uv),
1513 ! next mu and theta are advanced (call to advance_mu_t) followed by
1514 ! advancing w and the geopotential (call to advance_w). Diagnostic
1515 ! values for pressure and inverse density are updated at the end of
1516 ! each small_step.
1517 !
1518 ! The small-step section ends with the change of the perturbation variables
1519 ! back to full variables (call to small_step_finish).
1520 !</pre>
1521 !</DESCRIPTION>
1522
1523 BENCH_START(small_step_prep_tim)
1524 !$OMP PARALLEL DO &
1525 !$OMP PRIVATE ( ij )
1526
1527 DO ij = 1 , grid%num_tiles
1528
1529 ! Calculate coefficients for the vertically implicit acoustic/gravity wave
1530 ! integration. We only need calculate these for the first pass through -
1531 ! the predictor step. They are reused as is for the corrector step.
1532 ! For third-order RK, we need to recompute these after the first
1533 ! predictor because we may have changed the small timestep -> grid%dts.
1534
1535 CALL wrf_debug ( 200 , ' call calc_coef_w' )
1536
1537 #ifdef WRFVAR_TESTING
1538 if(TEST_small_step_prep) THEN
1539 CALL t_small_step_prep( grid%em_u_1,grid%em_u_2,grid%em_v_1,grid%em_v_2,grid%em_w_1,grid%em_w_2, &
1540 grid%em_t_1,grid%em_t_2,grid%em_ph_1,grid%em_ph_2, &
1541 grid%em_mub, grid%em_mu_1, grid%em_mu_2, &
1542 grid%em_muu, muus, grid%em_muv, muvs, &
1543 grid%em_mut, grid%em_muts, grid%em_mudf, &
1544 grid%em_u_save, grid%em_v_save, w_save, &
1545 grid%em_t_save, ph_save, mu_save, &
1546 grid%em_ww, ww1, &
1547 grid%em_dnw, c2a, grid%em_pb, grid%em_p, grid%em_alt, &
1548 grid%msfu, grid%msfv, grid%msft, &
1549 rk_step, &
1550 ids, ide, jds, jde, kds, kde, &
1551 ims, ime, jms, jme, kms, kme, &
1552 grid%i_start(ij), grid%i_end(ij), &
1553 grid%j_start(ij), grid%j_end(ij), &
1554 k_start , k_end )
1555 END IF
1556 #endif
1557
1558 CALL small_step_prep( grid%em_u_1,grid%em_u_2,grid%em_v_1,grid%em_v_2,grid%em_w_1,grid%em_w_2, &
1559 grid%em_t_1,grid%em_t_2,grid%em_ph_1,grid%em_ph_2, &
1560 grid%em_mub, grid%em_mu_1, grid%em_mu_2, &
1561 grid%em_muu, muus, grid%em_muv, muvs, &
1562 grid%em_mut, grid%em_muts, grid%em_mudf, &
1563 grid%em_u_save, grid%em_v_save, w_save, &
1564 grid%em_t_save, ph_save, mu_save, &
1565 grid%em_ww, ww1, &
1566 grid%em_dnw, c2a, grid%em_pb, grid%em_p, grid%em_alt, &
1567 grid%msfu, grid%msfv, grid%msft, &
1568 rk_step, &
1569 ids, ide, jds, jde, kds, kde, &
1570 ims, ime, jms, jme, kms, kme, &
1571 grid%i_start(ij), grid%i_end(ij), &
1572 grid%j_start(ij), grid%j_end(ij), &
1573 k_start , k_end )
1574 #ifdef WRFVAR_TESTING
1575 if(TEST_calc_p_rho) THEN
1576 CALL t_calc_p_rho( grid%em_al, grid%em_p, grid%em_ph_2, &
1577 grid%em_alt, grid%em_t_2, grid%em_t_save, c2a, pm1, &
1578 grid%em_mu_2, grid%em_muts, grid%em_znu, t0, &
1579 grid%em_rdnw, grid%em_dnw, grid%smdiv, &
1580 config_flags%non_hydrostatic, 0, &
1581 ids, ide, jds, jde, kds, kde, &
1582 ims, ime, jms, jme, kms, kme, &
1583 grid%i_start(ij), grid%i_end(ij), &
1584 grid%j_start(ij), grid%j_end(ij), &
1585 k_start , k_end )
1586 END IF
1587 #endif
1588 CALL calc_p_rho( grid%em_al, grid%em_p, grid%em_ph_2, &
1589 grid%em_alt, grid%em_t_2, grid%em_t_save, c2a, pm1, &
1590 grid%em_mu_2, grid%em_muts, grid%em_znu, t0, &
1591 grid%em_rdnw, grid%em_dnw, grid%smdiv, &
1592 config_flags%non_hydrostatic, 0, &
1593 ids, ide, jds, jde, kds, kde, &
1594 ims, ime, jms, jme, kms, kme, &
1595 grid%i_start(ij), grid%i_end(ij), &
1596 grid%j_start(ij), grid%j_end(ij), &
1597 k_start , k_end )
1598
1599 IF (config_flags%non_hydrostatic) THEN
1600 #ifdef WRFVAR_TESTING
1601 if(TEST_calc_coef_w) THEN
1602 CALL t_calc_coef_w( a,alpha,gamma, &
1603 grid%em_mut, cqw, &
1604 grid%em_rdn, grid%em_rdnw, c2a, &
1605 dts_rk, g, grid%epssm, &
1606 ids, ide, jds, jde, kds, kde, &
1607 ims, ime, jms, jme, kms, kme, &
1608 grid%i_start(ij), grid%i_end(ij), &
1609 grid%j_start(ij), grid%j_end(ij), &
1610 k_start , k_end )
1611 END IF
1612 #endif
1613 CALL calc_coef_w( a,alpha,gamma, &
1614 grid%em_mut, cqw, &
1615 grid%em_rdn, grid%em_rdnw, c2a, &
1616 dts_rk, g, grid%epssm, &
1617 ids, ide, jds, jde, kds, kde, &
1618 ims, ime, jms, jme, kms, kme, &
1619 grid%i_start(ij), grid%i_end(ij), &
1620 grid%j_start(ij), grid%j_end(ij), &
1621 k_start , k_end )
1622
1623 END IF
1624
1625
1626 ENDDO
1627 !$OMP END PARALLEL DO
1628 BENCH_END(small_step_prep_tim)
1629
1630
1631 #ifdef DM_PARALLEL
1632 !-----------------------------------------------------------------------
1633 ! Stencils for patch communications (WCS, 29 June 2001)
1634 ! Note: the small size of this halo exchange reflects the
1635 ! fact that we are carrying the uncoupled variables
1636 ! as state variables in the mass coordinate model, as
1637 ! opposed to the coupled variables as in the height
1638 ! coordinate model.
1639 !
1640 ! * * * * *
1641 ! * * * * * * * * *
1642 ! * + * * + * * * + * *
1643 ! * * * * * * * * *
1644 ! * * * * *
1645 !
1646 ! 3D variables - note staggering! grid%em_ph_2(grid%em_z), grid%em_u_save(X), grid%em_v_save(Y)
1647 !
1648 !j grid%em_ph_2 x
1649 !j grid%em_al x
1650 !j grid%em_p x
1651 !j grid%em_t_1 x
1652 !j grid%em_t_save x
1653 !j grid%em_u_save x
1654 !j grid%em_v_save x
1655 !
1656 ! the following are 2D (xy) variables
1657 !
1658 !j grid%em_mu_1 x
1659 !j grid%em_mu_2 x
1660 !j grid%em_mudf x
1661 !--------------------------------------------------------------
1662 # include "HALO_EM_B.inc"
1663 # include "PERIOD_BDY_EM_B.inc"
1664 #endif
1665
1666 BENCH_START(set_phys_bc2_tim)
1667 !$OMP PARALLEL DO &
1668 !$OMP PRIVATE ( ij )
1669
1670 DO ij = 1 , grid%num_tiles
1671
1672 CALL set_physical_bc3d( grid%em_ru_tend, 'u', config_flags, &
1673 ids, ide, jds, jde, kds, kde, &
1674 ims, ime, jms, jme, kms, kme, &
1675 ips, ipe, jps, jpe, kps, kpe, &
1676 grid%i_start(ij), grid%i_end(ij), &
1677 grid%j_start(ij), grid%j_end(ij), &
1678 k_start , k_end )
1679
1680 CALL set_physical_bc3d( grid%em_rv_tend, 'v', config_flags, &
1681 ids, ide, jds, jde, kds, kde, &
1682 ims, ime, jms, jme, kms, kme, &
1683 ips, ipe, jps, jpe, kps, kpe, &
1684 grid%i_start(ij), grid%i_end(ij), &
1685 grid%j_start(ij), grid%j_end(ij), &
1686 k_start , k_end )
1687
1688 CALL set_physical_bc3d( grid%em_ph_2, 'w', config_flags, &
1689 ids, ide, jds, jde, kds, kde, &
1690 ims, ime, jms, jme, kms, kme, &
1691 ips, ipe, jps, jpe, kps, kpe, &
1692 grid%i_start(ij), grid%i_end(ij), &
1693 grid%j_start(ij), grid%j_end(ij), &
1694 k_start , k_end )
1695
1696 CALL set_physical_bc3d( grid%em_al, 'grid%em_p', config_flags, &
1697 ids, ide, jds, jde, kds, kde, &
1698 ims, ime, jms, jme, kms, kme, &
1699 ips, ipe, jps, jpe, kps, kpe, &
1700 grid%i_start(ij), grid%i_end(ij), &
1701 grid%j_start(ij), grid%j_end(ij), &
1702 k_start , k_end )
1703
1704 CALL set_physical_bc3d( grid%em_p, 'grid%em_p', config_flags, &
1705 ids, ide, jds, jde, kds, kde, &
1706 ims, ime, jms, jme, kms, kme, &
1707 ips, ipe, jps, jpe, kps, kpe, &
1708 grid%i_start(ij), grid%i_end(ij), &
1709 grid%j_start(ij), grid%j_end(ij), &
1710 k_start , k_end )
1711
1712 CALL set_physical_bc3d( grid%em_t_1, 'grid%em_p', config_flags, &
1713 ids, ide, jds, jde, kds, kde, &
1714 ims, ime, jms, jme, kms, kme, &
1715 ips, ipe, jps, jpe, kps, kpe, &
1716 grid%i_start(ij), grid%i_end(ij), &
1717 grid%j_start(ij), grid%j_end(ij), &
1718 k_start , k_end )
1719
1720 CALL set_physical_bc3d( grid%em_t_save, 't', config_flags, &
1721 ids, ide, jds, jde, kds, kde, &
1722 ims, ime, jms, jme, kms, kme, &
1723 ips, ipe, jps, jpe, kps, kpe, &
1724 grid%i_start(ij), grid%i_end(ij), &
1725 grid%j_start(ij), grid%j_end(ij), &
1726 k_start , k_end )
1727
1728 CALL set_physical_bc2d( grid%em_mu_1, 't', config_flags, &
1729 ids, ide, jds, jde, &
1730 ims, ime, jms, jme, &
1731 ips, ipe, jps, jpe, &
1732 grid%i_start(ij), grid%i_end(ij), &
1733 grid%j_start(ij), grid%j_end(ij) )
1734
1735 CALL set_physical_bc2d( grid%em_mu_2, 't', config_flags, &
1736 ids, ide, jds, jde, &
1737 ims, ime, jms, jme, &
1738 ips, ipe, jps, jpe, &
1739 grid%i_start(ij), grid%i_end(ij), &
1740 grid%j_start(ij), grid%j_end(ij) )
1741
1742 CALL set_physical_bc2d( grid%em_mudf, 't', config_flags, &
1743 ids, ide, jds, jde, &
1744 ims, ime, jms, jme, &
1745 ips, ipe, jps, jpe, &
1746 grid%i_start(ij), grid%i_end(ij), &
1747 grid%j_start(ij), grid%j_end(ij) )
1748
1749 END DO
1750 !$OMP END PARALLEL DO
1751 BENCH_END(set_phys_bc2_tim)
1752
1753 small_steps : DO iteration = 1 , number_of_small_timesteps
1754
1755 ! Boundary condition time (or communication time).
1756
1757 #ifdef DM_PARALLEL
1758 # include "PERIOD_BDY_EM_B.inc"
1759 #endif
1760
1761
1762 !$OMP PARALLEL DO &
1763 !$OMP PRIVATE ( ij )
1764
1765 DO ij = 1 , grid%num_tiles
1766
1767 BENCH_START(advance_uv_tim)
1768 #ifdef WRFVAR_TESTING
1769 if(TEST_advance_uv) THEN
1770 CALL t_advance_uv ( grid%em_u_2, grid%em_ru_tend, grid%em_v_2, grid%em_rv_tend, &
1771 grid%em_p, grid%em_pb, &
1772 grid%em_ph_2, grid%em_php, grid%em_alt, grid%em_al, grid%em_mu_2, &
1773 grid%em_muu, cqu, grid%em_muv, cqv, grid%em_mudf, &
1774 grid%rdx, grid%rdy, dts_rk, &
1775 grid%cf1, grid%cf2, grid%cf3, grid%em_fnm, grid%em_fnp, &
1776 grid%emdiv, &
1777 grid%em_rdnw, config_flags,grid%spec_zone, &
1778 config_flags%non_hydrostatic, &
1779 ids, ide, jds, jde, kds, kde, &
1780 ims, ime, jms, jme, kms, kme, &
1781 grid%i_start(ij), grid%i_end(ij), &
1782 grid%j_start(ij), grid%j_end(ij), &
1783 k_start , k_end )
1784 END IF
1785 #endif
1786 CALL advance_uv ( grid%em_u_2, grid%em_ru_tend, grid%em_v_2, grid%em_rv_tend, &
1787 grid%em_p, grid%em_pb, &
1788 grid%em_ph_2, grid%em_php, grid%em_alt, grid%em_al, grid%em_mu_2, &
1789 grid%em_muu, cqu, grid%em_muv, cqv, grid%em_mudf, &
1790 grid%rdx, grid%rdy, dts_rk, &
1791 grid%cf1, grid%cf2, grid%cf3, grid%em_fnm, grid%em_fnp, &
1792 grid%emdiv, &
1793 grid%em_rdnw, config_flags,grid%spec_zone, &
1794 config_flags%non_hydrostatic, &
1795 ids, ide, jds, jde, kds, kde, &
1796 ims, ime, jms, jme, kms, kme, &
1797 grid%i_start(ij), grid%i_end(ij), &
1798 grid%j_start(ij), grid%j_end(ij), &
1799 k_start , k_end )
1800 BENCH_END(advance_uv_tim)
1801
1802 BENCH_START(spec_bdy_uv_tim)
1803 IF( config_flags%specified .or. config_flags%nested ) THEN
1804 #ifdef WRFVAR_TESTING
1805 if(TEST_spec_bdyupdate) THEN
1806 CALL t_spec_bdyupdate(grid%em_u_2, grid%em_ru_tend, dts_rk, &
1807 'u' , config_flags, &
1808 grid%spec_zone, &
1809 ids,ide, jds,jde, kds,kde, & ! domain dims
1810 ims,ime, jms,jme, kms,kme, & ! memory dims
1811 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
1812 grid%i_start(ij), grid%i_end(ij), &
1813 grid%j_start(ij), grid%j_end(ij), &
1814 k_start , k_end )
1815 END IF
1816 #endif
1817 CALL spec_bdyupdate(grid%em_u_2, grid%em_ru_tend, dts_rk, &
1818 'u' , config_flags, &
1819 grid%spec_zone, &
1820 ids,ide, jds,jde, kds,kde, & ! domain dims
1821 ims,ime, jms,jme, kms,kme, & ! memory dims
1822 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
1823 grid%i_start(ij), grid%i_end(ij), &
1824 grid%j_start(ij), grid%j_end(ij), &
1825 k_start , k_end )
1826
1827 CALL spec_bdyupdate(grid%em_v_2, grid%em_rv_tend, dts_rk, &
1828 'v' , config_flags, &
1829 grid%spec_zone, &
1830 ids,ide, jds,jde, kds,kde, & ! domain dims
1831 ims,ime, jms,jme, kms,kme, & ! memory dims
1832 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
1833 grid%i_start(ij), grid%i_end(ij), &
1834 grid%j_start(ij), grid%j_end(ij), &
1835 k_start , k_end )
1836
1837 ENDIF
1838 BENCH_END(spec_bdy_uv_tim)
1839
1840 END DO
1841 !$OMP END PARALLEL DO
1842
1843 #ifdef DM_PARALLEL
1844 !
1845 ! Stencils for patch communications (WCS, 29 June 2001)
1846 !
1847 ! * *
1848 ! * + * * + * +
1849 ! * *
1850 !
1851 ! grid%em_u_2 x
1852 ! grid%em_v_2 x
1853 !
1854 # include "HALO_EM_C.inc"
1855 #endif
1856
1857 !$OMP PARALLEL DO &
1858 !$OMP PRIVATE ( ij )
1859
1860 DO ij = 1 , grid%num_tiles
1861
1862 ! advance the mass in the column, theta, and calculate grid%em_ww
1863
1864 #ifdef WRFVAR_TESTING
1865 if(TEST_advance_mu_t) THEN
1866 CALL t_advance_mu_t( grid%em_ww, ww1, grid%em_u_2, grid%em_u_save, grid%em_v_2, grid%em_v_save, &
1867 grid%em_mu_2, grid%em_mut, muave, grid%em_muts, grid%em_muu, grid%em_muv, &
1868 grid%em_mudf, grid%em_ru_m, grid%em_rv_m, grid%em_ww_m, &
1869 grid%em_t_2, grid%em_t_save, t_2save, t_tend, &
1870 mu_tend, &
1871 grid%rdx, grid%rdy, dts_rk, grid%epssm, &
1872 grid%em_dnw, grid%em_fnm, grid%em_fnp, grid%em_rdnw, &
1873 grid%msfu, grid%msfv, grid%msft, &
1874 iteration, config_flags, &
1875 ids, ide, jds, jde, kds, kde, &
1876 ims, ime, jms, jme, kms, kme, &
1877 grid%i_start(ij), grid%i_end(ij), &
1878 grid%j_start(ij), grid%j_end(ij), &
1879 k_start , k_end )
1880 END IF
1881 #endif
1882 BENCH_START(advance_mu_t_tim)
1883 CALL advance_mu_t( grid%em_ww, ww1, grid%em_u_2, grid%em_u_save, grid%em_v_2, grid%em_v_save, &
1884 grid%em_mu_2, grid%em_mut, muave, grid%em_muts, grid%em_muu, grid%em_muv, &
1885 grid%em_mudf, grid%em_ru_m, grid%em_rv_m, grid%em_ww_m, &
1886 grid%em_t_2, grid%em_t_save, t_2save, t_tend, &
1887 mu_tend, &
1888 grid%rdx, grid%rdy, dts_rk, grid%epssm, &
1889 grid%em_dnw, grid%em_fnm, grid%em_fnp, grid%em_rdnw, &
1890 grid%msfu, grid%msfv, grid%msft, &
1891 iteration, config_flags, &
1892 ids, ide, jds, jde, kds, kde, &
1893 ims, ime, jms, jme, kms, kme, &
1894 grid%i_start(ij), grid%i_end(ij), &
1895 grid%j_start(ij), grid%j_end(ij), &
1896 k_start , k_end )
1897 BENCH_END(advance_mu_t_tim)
1898
1899 BENCH_START(spec_bdy_t_tim)
1900 IF( config_flags%specified .or. config_flags%nested ) THEN
1901
1902 CALL spec_bdyupdate(grid%em_t_2, t_tend, dts_rk, &
1903 't' , config_flags, &
1904 grid%spec_zone, &
1905 ids,ide, jds,jde, kds,kde, & ! domain dims
1906 ims,ime, jms,jme, kms,kme, & ! memory dims
1907 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
1908 grid%i_start(ij), grid%i_end(ij), &
1909 grid%j_start(ij), grid%j_end(ij), &
1910 k_start , k_end )
1911
1912 CALL spec_bdyupdate(grid%em_mu_2, mu_tend, dts_rk, &
1913 'm' , config_flags, &
1914 grid%spec_zone, &
1915 ids,ide, jds,jde, 1 ,1 , & ! domain dims
1916 ims,ime, jms,jme, 1 ,1 , & ! memory dims
1917 ips,ipe, jps,jpe, 1 ,1 , & ! patch dims
1918 grid%i_start(ij), grid%i_end(ij), &
1919 grid%j_start(ij), grid%j_end(ij), &
1920 1 , 1 )
1921
1922 CALL spec_bdyupdate(grid%em_muts, mu_tend, dts_rk, &
1923 'm' , config_flags, &
1924 grid%spec_zone, &
1925 ids,ide, jds,jde, 1 ,1 , & ! domain dims
1926 ims,ime, jms,jme, 1 ,1 , & ! memory dims
1927 ips,ipe, jps,jpe, 1 ,1 , & ! patch dims
1928 grid%i_start(ij), grid%i_end(ij), &
1929 grid%j_start(ij), grid%j_end(ij), &
1930 1 , 1 )
1931 ENDIF
1932 BENCH_END(spec_bdy_t_tim)
1933
1934 ! sumflux accumulates the time-averged mass flux
1935 ! (time averaged over the acoustic steps) for use
1936 ! in the scalar advection (flux divergence). Using
1937 ! time averaged values gives us exact scalar conservation.
1938
1939 #ifdef WRF_PLUS
1940 if(TEST_sumflux) THEN
1941 CALL t_sumflux ( grid%em_u_2, grid%em_v_2, grid%em_ww, &
1942 grid%em_u_save, grid%em_v_save, ww1, &
1943 grid%em_muu, grid%em_muv, &
1944 grid%em_ru_m, grid%em_rv_m, grid%em_ww_m, grid%epssm, &
1945 grid%msfu, grid%msfv, &
1946 iteration, number_of_small_timesteps, &
1947 ids, ide, jds, jde, kds, kde, &
1948 ims, ime, jms, jme, kms, kme, &
1949 grid%i_start(ij), grid%i_end(ij), &
1950 grid%j_start(ij), grid%j_end(ij), &
1951 k_start , k_end )
1952 END IF
1953 #endif
1954 BENCH_START(sumflux_tim)
1955 CALL sumflux ( grid%em_u_2, grid%em_v_2, grid%em_ww, &
1956 grid%em_u_save, grid%em_v_save, ww1, &
1957 grid%em_muu, grid%em_muv, &
1958 grid%em_ru_m, grid%em_rv_m, grid%em_ww_m, grid%epssm, &
1959 grid%msfu, grid%msfv, &
1960 iteration, number_of_small_timesteps, &
1961 ids, ide, jds, jde, kds, kde, &
1962 ims, ime, jms, jme, kms, kme, &
1963 grid%i_start(ij), grid%i_end(ij), &
1964 grid%j_start(ij), grid%j_end(ij), &
1965 k_start , k_end )
1966 BENCH_END(sumflux_tim)
1967
1968 ! small (acoustic) step for the vertical momentum,
1969 ! density and coupled potential temperature.
1970
1971
1972 BENCH_START(advance_w_tim)
1973 IF ( config_flags%non_hydrostatic ) THEN
1974 #ifdef WRF_PLUS
1975 if(TEST_advance_w) THEN
1976 CALL t_advance_w( grid%em_w_2, rw_tend, grid%em_ww, grid%em_u_2, grid%em_v_2, &
1977 grid%em_mu_2, grid%em_mut, muave, grid%em_muts, &
1978 t_2save, grid%em_t_2, grid%em_t_save, &
1979 grid%em_ph_2, ph_save, grid%em_phb, ph_tend, &
1980 grid%ht, c2a, cqw, grid%em_alt, grid%em_alb, &
1981 a, alpha, gamma, &
1982 grid%rdx, grid%rdy, dts_rk, t0, grid%epssm, &
1983 grid%em_dnw, grid%em_fnm, grid%em_fnp, grid%em_rdnw, grid%em_rdn, &
1984 grid%cf1, grid%cf2, grid%cf3, grid%msft, &
1985 config_flags, &
1986 ids,ide, jds,jde, kds,kde, & ! domain dims
1987 ims,ime, jms,jme, kms,kme, & ! memory dims
1988 grid%i_start(ij), grid%i_end(ij), &
1989 grid%j_start(ij), grid%j_end(ij), &
1990 k_start , k_end )
1991 END IF
1992 #endif
1993 CALL advance_w( grid%em_w_2, rw_tend, grid%em_ww, grid%em_u_2, grid%em_v_2, &
1994 grid%em_mu_2, grid%em_mut, muave, grid%em_muts, &
1995 t_2save, grid%em_t_2, grid%em_t_save, &
1996 grid%em_ph_2, ph_save, grid%em_phb, ph_tend, &
1997 grid%ht, c2a, cqw, grid%em_alt, grid%em_alb, &
1998 a, alpha, gamma, &
1999 grid%rdx, grid%rdy, dts_rk, t0, grid%epssm, &
2000 grid%em_dnw, grid%em_fnm, grid%em_fnp, grid%em_rdnw, grid%em_rdn, &
2001 grid%cf1, grid%cf2, grid%cf3, grid%msft, &
2002 config_flags, &
2003 ids,ide, jds,jde, kds,kde, & ! domain dims
2004 ims,ime, jms,jme, kms,kme, & ! memory dims
2005 grid%i_start(ij), grid%i_end(ij), &
2006 grid%j_start(ij), grid%j_end(ij), &
2007 k_start , k_end )
2008 ENDIF
2009 BENCH_END(advance_w_tim)
2010
2011 IF( config_flags%specified .or. config_flags%nested ) THEN
2012
2013 BENCH_START(spec_bdynhyd_tim)
2014 IF (config_flags%non_hydrostatic) THEN
2015 #ifdef WRF_PLUS
2016 if(TEST_spec_bdyupdate_ph) THEN
2017 CALL t_spec_bdyupdate_ph( ph_save, grid%em_ph_2, ph_tend, mu_tend, grid%em_muts, dts_rk, &
2018 'h' , config_flags, &
2019 grid%spec_zone, &
2020 ids,ide, jds,jde, kds,kde, & ! domain dims
2021 ims,ime, jms,jme, kms,kme, & ! memory dims
2022 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2023 grid%i_start(ij), grid%i_end(ij), &
2024 grid%j_start(ij), grid%j_end(ij), &
2025 k_start , k_end )
2026 END IF
2027 #endif
2028 CALL spec_bdyupdate_ph( ph_save, grid%em_ph_2, ph_tend, mu_tend, grid%em_muts, dts_rk, &
2029 'h' , config_flags, &
2030 grid%spec_zone, &
2031 ids,ide, jds,jde, kds,kde, & ! domain dims
2032 ims,ime, jms,jme, kms,kme, & ! memory dims
2033 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2034 grid%i_start(ij), grid%i_end(ij), &
2035 grid%j_start(ij), grid%j_end(ij), &
2036 k_start , k_end )
2037 IF( config_flags%specified ) THEN
2038 CALL zero_grad_bdy ( grid%em_w_2, &
2039 'w' , config_flags, &
2040 grid%spec_zone, &
2041 ids,ide, jds,jde, kds,kde, & ! domain dims
2042 ims,ime, jms,jme, kms,kme, & ! memory dims
2043 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2044 grid%i_start(ij), grid%i_end(ij), &
2045 grid%j_start(ij), grid%j_end(ij), &
2046 k_start , k_end )
2047 ELSE
2048 CALL spec_bdyupdate ( grid%em_w_2, rw_tend, dts_rk, &
2049 'h' , config_flags, &
2050 grid%spec_zone, &
2051 ids,ide, jds,jde, kds,kde, & ! domain dims
2052 ims,ime, jms,jme, kms,kme, & ! memory dims
2053 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2054 grid%i_start(ij), grid%i_end(ij), &
2055 grid%j_start(ij), grid%j_end(ij), &
2056 k_start , k_end )
2057 ENDIF
2058 ENDIF
2059 BENCH_END(spec_bdynhyd_tim)
2060 ENDIF
2061
2062 BENCH_START(cald_p_rho_tim)
2063 CALL calc_p_rho( grid%em_al, grid%em_p, grid%em_ph_2, &
2064 grid%em_alt, grid%em_t_2, grid%em_t_save, c2a, pm1, &
2065 grid%em_mu_2, grid%em_muts, grid%em_znu, t0, &
2066 grid%em_rdnw, grid%em_dnw, grid%smdiv, &
2067 config_flags%non_hydrostatic, iteration, &
2068 ids, ide, jds, jde, kds, kde, &
2069 ims, ime, jms, jme, kms, kme, &
2070 grid%i_start(ij), grid%i_end(ij), &
2071 grid%j_start(ij), grid%j_end(ij), &
2072 k_start , k_end )
2073 BENCH_END(cald_p_rho_tim)
2074
2075 ENDDO
2076 !$OMP END PARALLEL DO
2077
2078 #ifdef DM_PARALLEL
2079 !
2080 ! Stencils for patch communications (WCS, 29 June 2001)
2081 !
2082 ! * *
2083 ! * + * * + * +
2084 ! * *
2085 !
2086 ! grid%em_ph_2 x
2087 ! grid%em_al x
2088 ! grid%em_p x
2089 !
2090 ! 2D variables (x,y)
2091 !
2092 ! grid%em_mu_2 x
2093 ! grid%em_muts x
2094 ! grid%em_mudf x
2095
2096 # include "HALO_EM_C2.inc"
2097 # include "PERIOD_BDY_EM_B3.inc"
2098 #endif
2099
2100 BENCH_START(phys_bc_tim)
2101 !$OMP PARALLEL DO &
2102 !$OMP PRIVATE ( ij )
2103
2104 DO ij = 1 , grid%num_tiles
2105
2106 ! boundary condition set for next small timestep
2107
2108 CALL set_physical_bc3d( grid%em_ph_2, 'w', config_flags, &
2109 ids, ide, jds, jde, kds, kde, &
2110 ims, ime, jms, jme, kms, kme, &
2111 ips, ipe, jps, jpe, kps, kpe, &
2112 grid%i_start(ij), grid%i_end(ij), &
2113 grid%j_start(ij), grid%j_end(ij), &
2114 k_start , k_end )
2115
2116 CALL set_physical_bc3d( grid%em_al, 'grid%em_p', config_flags, &
2117 ids, ide, jds, jde, kds, kde, &
2118 ims, ime, jms, jme, kms, kme, &
2119 ips, ipe, jps, jpe, kps, kpe, &
2120 grid%i_start(ij), grid%i_end(ij), &
2121 grid%j_start(ij), grid%j_end(ij), &
2122 k_start , k_end )
2123
2124 CALL set_physical_bc3d( grid%em_p, 'grid%em_p', config_flags, &
2125 ids, ide, jds, jde, kds, kde, &
2126 ims, ime, jms, jme, kms, kme, &
2127 ips, ipe, jps, jpe, kps, kpe, &
2128 grid%i_start(ij), grid%i_end(ij), &
2129 grid%j_start(ij), grid%j_end(ij), &
2130 k_start , k_end )
2131
2132 CALL set_physical_bc2d( grid%em_muts, 't', config_flags, &
2133 ids, ide, jds, jde, &
2134 ims, ime, jms, jme, &
2135 ips, ipe, jps, jpe, &
2136 grid%i_start(ij), grid%i_end(ij), &
2137 grid%j_start(ij), grid%j_end(ij) )
2138
2139 CALL set_physical_bc2d( grid%em_mu_2, 't', config_flags, &
2140 ids, ide, jds, jde, &
2141 ims, ime, jms, jme, &
2142 ips, ipe, jps, jpe, &
2143 grid%i_start(ij), grid%i_end(ij), &
2144 grid%j_start(ij), grid%j_end(ij) )
2145
2146 CALL set_physical_bc2d( grid%em_mudf, 't', config_flags, &
2147 ids, ide, jds, jde, &
2148 ims, ime, jms, jme, &
2149 ips, ipe, jps, jpe, &
2150 grid%i_start(ij), grid%i_end(ij), &
2151 grid%j_start(ij), grid%j_end(ij) )
2152
2153 END DO
2154 !$OMP END PARALLEL DO
2155 BENCH_END(phys_bc_tim)
2156
2157 END DO small_steps
2158
2159 !$OMP PARALLEL DO &
2160 !$OMP PRIVATE ( ij )
2161
2162 DO ij = 1 , grid%num_tiles
2163
2164 CALL wrf_debug ( 200 , ' call rk_small_finish' )
2165
2166 ! change time-perturbation variables back to
2167 ! full perturbation variables.
2168 ! first get updated mu at u and v points
2169
2170 #ifdef WRF_PLUS
2171 if(TEST_calc_mu_uv_1) THEN
2172 CALL t_calc_mu_uv_1 ( config_flags, &
2173 grid%em_muts, muus, muvs, &
2174 ids, ide, jds, jde, kds, kde, &
2175 ims, ime, jms, jme, kms, kme, &
2176 grid%i_start(ij), grid%i_end(ij), &
2177 grid%j_start(ij), grid%j_end(ij), &
2178 k_start , k_end )
2179 END IF
2180 #endif
2181
2182
2183 BENCH_START(calc_mu_uv_tim)
2184 CALL calc_mu_uv_1 ( config_flags, &
2185 grid%em_muts, muus, muvs, &
2186 ids, ide, jds, jde, kds, kde, &
2187 ims, ime, jms, jme, kms, kme, &
2188 grid%i_start(ij), grid%i_end(ij), &
2189 grid%j_start(ij), grid%j_end(ij), &
2190 k_start , k_end )
2191 BENCH_END(calc_mu_uv_tim)
2192 #ifdef WRF_PLUS
2193 if(TEST_small_step_finish) THEN
2194 CALL t_small_step_finish( grid%em_u_2, grid%em_u_1, grid%em_v_2, grid%em_v_1, grid%em_w_2, grid%em_w_1, &
2195 grid%em_t_2, grid%em_t_1, grid%em_ph_2, grid%em_ph_1, grid%em_ww, ww1, &
2196 grid%em_mu_2, grid%em_mu_1, &
2197 grid%em_mut, grid%em_muts, grid%em_muu, muus, grid%em_muv, muvs, &
2198 grid%em_u_save, grid%em_v_save, w_save, &
2199 grid%em_t_save, ph_save, mu_save, &
2200 grid%msfu, grid%msfv, grid%msft, &
2201 grid%h_diabatic, &
2202 number_of_small_timesteps,dts_rk, &
2203 rk_step, rk_order, &
2204 ids, ide, jds, jde, kds, kde, &
2205 ims, ime, jms, jme, kms, kme, &
2206 grid%i_start(ij), grid%i_end(ij), &
2207 grid%j_start(ij), grid%j_end(ij), &
2208 k_start , k_end )
2209 END IF
2210 #endif
2211 BENCH_START(small_step_finish_tim)
2212 CALL small_step_finish( grid%em_u_2, grid%em_u_1, grid%em_v_2, grid%em_v_1, grid%em_w_2, grid%em_w_1, &
2213 grid%em_t_2, grid%em_t_1, grid%em_ph_2, grid%em_ph_1, grid%em_ww, ww1, &
2214 grid%em_mu_2, grid%em_mu_1, &
2215 grid%em_mut, grid%em_muts, grid%em_muu, muus, grid%em_muv, muvs, &
2216 grid%em_u_save, grid%em_v_save, w_save, &
2217 grid%em_t_save, ph_save, mu_save, &
2218 grid%msfu, grid%msfv, grid%msft, &
2219 grid%h_diabatic, &
2220 number_of_small_timesteps,dts_rk, &
2221 rk_step, rk_order, &
2222 ids, ide, jds, jde, kds, kde, &
2223 ims, ime, jms, jme, kms, kme, &
2224 grid%i_start(ij), grid%i_end(ij), &
2225 grid%j_start(ij), grid%j_end(ij), &
2226 k_start , k_end )
2227 ! call to set ru_m, rv_m and ww_m b.c's for PD advection
2228
2229 IF (rk_step == 3) THEN
2230
2231 CALL set_physical_bc3d( grid%em_ru_m, 'u', config_flags, &
2232 ids, ide, jds, jde, kds, kde, &
2233 ims, ime, jms, jme, kms, kme, &
2234 ips, ipe, jps, jpe, kps, kpe, &
2235 grid%i_start(ij), grid%i_end(ij), &
2236 grid%j_start(ij), grid%j_end(ij), &
2237 k_start , k_end )
2238
2239 CALL set_physical_bc3d( grid%em_rv_m, 'v', config_flags, &
2240 ids, ide, jds, jde, kds, kde, &
2241 ims, ime, jms, jme, kms, kme, &
2242 ips, ipe, jps, jpe, kps, kpe, &
2243 grid%i_start(ij), grid%i_end(ij), &
2244 grid%j_start(ij), grid%j_end(ij), &
2245 k_start , k_end )
2246
2247 CALL set_physical_bc3d( grid%em_ww_m, 'w', config_flags, &
2248 ids, ide, jds, jde, kds, kde, &
2249 ims, ime, jms, jme, kms, kme, &
2250 ips, ipe, jps, jpe, kps, kpe, &
2251 grid%i_start(ij), grid%i_end(ij), &
2252 grid%j_start(ij), grid%j_end(ij), &
2253 k_start , k_end )
2254
2255 CALL set_physical_bc2d( grid%em_mut, 't', config_flags, &
2256 ids, ide, jds, jde, &
2257 ims, ime, jms, jme, &
2258 ips, ipe, jps, jpe, &
2259 grid%i_start(ij), grid%i_end(ij), &
2260 grid%j_start(ij), grid%j_end(ij) )
2261
2262 END IF
2263
2264 BENCH_END(small_step_finish_tim)
2265
2266 END DO
2267 !$OMP END PARALLEL DO
2268
2269 !-----------------------------------------------------------------------
2270 ! add in physics tendency first if positive definite advection is used.
2271 ! pd advection applies advective flux limiter on last runge-kutta step
2272 !-----------------------------------------------------------------------
2273 ! first moisture
2274
2275 IF (config_flags%pd_moist .and. (rk_step == rk_order)) THEN
2276
2277 !$OMP PARALLEL DO &
2278 !$OMP PRIVATE ( ij )
2279 DO ij = 1 , grid%num_tiles
2280 CALL wrf_debug ( 200 , ' call rk_update_scalar_pd' )
2281 do im = PARAM_FIRST_SCALAR, num_3d_m
2282 CALL rk_update_scalar_pd( im, im, &
2283 moist_old(ims,kms,jms,im), &
2284 moist_tend(ims,kms,jms,im), &
2285 grid%msft, &
2286 grid%em_mu_1, grid%em_mu_1, grid%em_mub, &
2287 rk_step, dt_rk, grid%spec_zone, &
2288 config_flags, &
2289 ids, ide, jds, jde, kds, kde, &
2290 ims, ime, jms, jme, kms, kme, &
2291 grid%i_start(ij), grid%i_end(ij), &
2292 grid%j_start(ij), grid%j_end(ij), &
2293 k_start , k_end )
2294 ENDDO
2295 END DO
2296 !$OMP END PARALLEL DO
2297
2298 !---------------------- positive definite bc call
2299
2300 !$OMP PARALLEL DO &
2301 !$OMP PRIVATE ( ij )
2302 DO ij = 1 , grid%num_tiles
2303 IF (num_3d_m >= PARAM_FIRST_SCALAR) THEN
2304 DO im = PARAM_FIRST_SCALAR , num_3d_m
2305 CALL set_physical_bc3d( moist_old(ims,kms,jms,im), 'p', config_flags, &
2306 ids, ide, jds, jde, kds, kde, &
2307 ims, ime, jms, jme, kms, kme, &
2308 ips, ipe, jps, jpe, kps, kpe, &
2309 grid%i_start(ij), grid%i_end(ij), &
2310 grid%j_start(ij), grid%j_end(ij), &
2311 k_start , k_end )
2312 END DO
2313 ENDIF
2314 END DO
2315 !$OMP END PARALLEL DO
2316
2317 #ifdef DM_PARALLEL
2318 if(config_flags%pd_moist) then
2319 #ifndef RSL
2320 IF ( config_flags%h_sca_adv_order <= 4 ) THEN
2321 # include "HALO_EM_MOIST_OLD_E_5.inc"
2322 ELSE IF ( config_flags%h_sca_adv_order <= 6 ) THEN
2323 # include "HALO_EM_MOIST_OLD_E_7.inc"
2324 ELSE
2325 WRITE(wrf_err_message,*)'solve_em: invalid h_sca_adv_order = ',config_flags%h_sca_adv_order
2326 CALL wrf_error_fatal(TRIM(wrf_err_message))
2327 ENDIF
2328 #else
2329 WRITE(wrf_err_message,*)'cannot use pd scheme with RSL - use RSL-LITE'
2330 CALL wrf_error_fatal(TRIM(wrf_err_message))
2331 #endif
2332 endif
2333 #endif
2334
2335 END IF ! end if for pd_moist
2336
2337 ! scalars
2338
2339 IF (config_flags%pd_scalar .and. (rk_step == rk_order)) THEN
2340
2341 !$OMP PARALLEL DO &
2342 !$OMP PRIVATE ( ij )
2343
2344 DO ij = 1 , grid%num_tiles
2345 CALL wrf_debug ( 200 , ' call rk_update_scalar_pd' )
2346 do im = PARAM_FIRST_SCALAR, num_3d_s
2347 CALL rk_update_scalar_pd( im, im, &
2348 scalar_old(ims,kms,jms,im), &
2349 scalar_tend(ims,kms,jms,im), &
2350 grid%msft, &
2351 grid%em_mu_1, grid%em_mu_1, grid%em_mub, &
2352 rk_step, dt_rk, grid%spec_zone, &
2353 config_flags, &
2354 ids, ide, jds, jde, kds, kde, &
2355 ims, ime, jms, jme, kms, kme, &
2356 grid%i_start(ij), grid%i_end(ij), &
2357 grid%j_start(ij), grid%j_end(ij), &
2358 k_start , k_end )
2359 ENDDO
2360 ENDDO
2361 !$OMP END PARALLEL DO
2362
2363 !---------------------- positive definite bc call
2364
2365 !$OMP PARALLEL DO &
2366 !$OMP PRIVATE ( ij )
2367
2368 DO ij = 1 , grid%num_tiles
2369 IF (num_3d_m >= PARAM_FIRST_SCALAR) THEN
2370 DO im = PARAM_FIRST_SCALAR , num_3d_s
2371 CALL set_physical_bc3d( scalar_old(ims,kms,jms,im), 'p', config_flags, &
2372 ids, ide, jds, jde, kds, kde, &
2373 ims, ime, jms, jme, kms, kme, &
2374 ips, ipe, jps, jpe, kps, kpe, &
2375 grid%i_start(ij), grid%i_end(ij), &
2376 grid%j_start(ij), grid%j_end(ij), &
2377 k_start , k_end )
2378 END DO
2379 ENDIF
2380 END DO
2381 !$OMP END PARALLEL DO
2382
2383 #ifdef DM_PARALLEL
2384 if(config_flags%pd_scalar) then
2385 #ifndef RSL
2386 IF ( config_flags%h_sca_adv_order <= 4 ) THEN
2387 # include "HALO_EM_SCALAR_OLD_E_5.inc"
2388 ELSE IF ( config_flags%h_sca_adv_order <= 6 ) THEN
2389 # include "HALO_EM_SCALAR_OLD_E_7.inc"
2390 ELSE
2391 WRITE(wrf_err_message,*)'solve_em: invalid h_sca_adv_order = ',config_flags%h_sca_adv_order
2392 CALL wrf_error_fatal(TRIM(wrf_err_message))
2393 ENDIF
2394 #else
2395 WRITE(wrf_err_message,*)'cannot use pd scheme with RSL - use RSL-LITE'
2396 CALL wrf_error_fatal(TRIM(wrf_err_message))
2397 #endif
2398 endif
2399 #endif
2400
2401 END IF ! end if for pd_scalar
2402
2403 ! chem
2404
2405 IF (config_flags%pd_chem .and. (rk_step == rk_order)) THEN
2406
2407 write(6,*) ' pd advection for chem '
2408
2409 !$OMP PARALLEL DO &
2410 !$OMP PRIVATE ( ij )
2411 DO ij = 1 , grid%num_tiles
2412 CALL wrf_debug ( 200 , ' call rk_update_scalar_pd' )
2413 do im = PARAM_FIRST_SCALAR, num_3d_c
2414 CALL rk_update_scalar_pd( im, im, &
2415 chem_old(ims,kms,jms,im), &
2416 chem_tend(ims,kms,jms,im), &
2417 grid%msft, &
2418 grid%em_mu_1, grid%em_mu_1, grid%em_mub, &
2419 rk_step, dt_rk, grid%spec_zone, &
2420 config_flags, &
2421 ids, ide, jds, jde, kds, kde, &
2422 ims, ime, jms, jme, kms, kme, &
2423 grid%i_start(ij), grid%i_end(ij), &
2424 grid%j_start(ij), grid%j_end(ij), &
2425 k_start , k_end )
2426 ENDDO
2427 END DO
2428 !$OMP END PARALLEL DO
2429
2430 !---------------------- positive definite bc call
2431
2432 !$OMP PARALLEL DO &
2433 !$OMP PRIVATE ( ij )
2434 DO ij = 1 , grid%num_tiles
2435 IF (num_3d_m >= PARAM_FIRST_SCALAR) THEN
2436 DO im = PARAM_FIRST_SCALAR , num_3d_c
2437 CALL set_physical_bc3d( chem_old(ims,kms,jms,im), 'p', config_flags, &
2438 ids, ide, jds, jde, kds, kde, &
2439 ims, ime, jms, jme, kms, kme, &
2440 ips, ipe, jps, jpe, kps, kpe, &
2441 grid%i_start(ij), grid%i_end(ij), &
2442 grid%j_start(ij), grid%j_end(ij), &
2443 k_start , k_end )
2444 END DO
2445 ENDIF
2446 END DO
2447 !$OMP END PARALLEL DO
2448
2449
2450 #ifdef DM_PARALLEL
2451 if(config_flags%pd_chem) then
2452 #ifndef RSL
2453 IF ( config_flags%h_sca_adv_order <= 4 ) THEN
2454 # include "HALO_EM_CHEM_OLD_E_5.inc"
2455 ELSE IF ( config_flags%h_sca_adv_order <= 6 ) THEN
2456 # include "HALO_EM_CHEM_OLD_E_7.inc"
2457 ELSE
2458 WRITE(wrf_err_message,*)'solve_em: invalid h_sca_adv_order = ',config_flags%h_sca_adv_order
2459 CALL wrf_error_fatal(TRIM(wrf_err_message))
2460 ENDIF
2461 #else
2462 WRITE(wrf_err_message,*)'cannot use pd scheme with RSL - use RSL-LITE'
2463 CALL wrf_error_fatal(TRIM(wrf_err_message))
2464 #endif
2465 endif
2466 #endif
2467
2468 END IF ! end if for pd_chem
2469
2470 ! tke
2471
2472 IF (config_flags%pd_tke .and. (rk_step == rk_order) &
2473 .and. (config_flags%km_opt .eq. 2) ) THEN
2474
2475 write(6,*) ' pd advection for tke '
2476
2477 !$OMP PARALLEL DO &
2478 !$OMP PRIVATE ( ij )
2479
2480 DO ij = 1 , grid%num_tiles
2481 CALL wrf_debug ( 200 , ' call rk_update_scalar_pd' )
2482 CALL rk_update_scalar_pd( 1, 1, &
2483 grid%em_tke_1, &
2484 tke_tend(ims,kms,jms), &
2485 grid%msft, &
2486 grid%em_mu_1, grid%em_mu_1, grid%em_mub, &
2487 rk_step, dt_rk, grid%spec_zone, &
2488 config_flags, &
2489 ids, ide, jds, jde, kds, kde, &
2490 ims, ime, jms, jme, kms, kme, &
2491 grid%i_start(ij), grid%i_end(ij), &
2492 grid%j_start(ij), grid%j_end(ij), &
2493 k_start , k_end )
2494 END DO
2495 !$OMP END PARALLEL DO
2496
2497 !---------------------- positive definite bc call
2498
2499 !$OMP PARALLEL DO &
2500 !$OMP PRIVATE ( ij )
2501 DO ij = 1 , grid%num_tiles
2502 CALL set_physical_bc3d( grid%em_tke_1, 'p', config_flags, &
2503 ids, ide, jds, jde, kds, kde, &
2504 ims, ime, jms, jme, kms, kme, &
2505 ips, ipe, jps, jpe, kps, kpe, &
2506 grid%i_start(ij), grid%i_end(ij), &
2507 grid%j_start(ij), grid%j_end(ij), &
2508 k_start , k_end )
2509 END DO
2510 !$OMP END PARALLEL DO
2511
2512 !--- end of positive definite physics tendency update
2513
2514 #ifdef DM_PARALLEL
2515 IF ( config_flags%h_sca_adv_order <= 4 ) THEN
2516 # include "HALO_EM_TKE_OLD_E_5.inc"
2517 ELSE IF ( config_flags%h_sca_adv_order <= 6 ) THEN
2518 # include "HALO_EM_TKE_OLD_E_7.inc"
2519 ELSE
2520 WRITE(wrf_err_message,*)'solve_em: invalid h_sca_adv_order = ',config_flags%h_sca_adv_order
2521 CALL wrf_error_fatal(TRIM(wrf_err_message))
2522 ENDIF
2523 #endif
2524
2525 END IF ! end if for pd_tke
2526
2527 #ifdef DM_PARALLEL
2528 !
2529 ! Stencils for patch communications (WCS, 29 June 2001)
2530 !
2531 !
2532 ! grid%em_ru_m x
2533 ! grid%em_rv_m x
2534 ! grid%em_ww_m x
2535 ! grid%em_mut x
2536 !
2537 !--------------------------------------------------------------
2538
2539 # include "HALO_EM_D.inc"
2540 #endif
2541
2542 !<DESCRIPTION>
2543 !<pre>
2544 ! (4) Still within the RK loop, the scalar variables are advanced.
2545 !
2546 ! For the moist and chem variables, each one is advanced
2547 ! individually, using named loops "moist_variable_loop:"
2548 ! and "chem_variable_loop:". Each RK substep begins by
2549 ! calculating the advective tendency, and, for the first RK step,
2550 ! 3D mixing (calling rk_scalar_tend) followed by an update
2551 ! of the scalar (calling rk_scalar_update).
2552 !</pre>
2553 !</DESCRIPTION>
2554
2555
2556 moist_scalar_advance: IF (num_3d_m >= PARAM_FIRST_SCALAR ) THEN
2557
2558 moist_variable_loop: do im = PARAM_FIRST_SCALAR, num_3d_m
2559
2560 if (grid%adv_moist_cond .or. im==p_qv ) then
2561
2562 !$OMP PARALLEL DO &
2563 !$OMP PRIVATE ( ij )
2564
2565 moist_tile_loop_1: DO ij = 1 , grid%num_tiles
2566
2567 CALL wrf_debug ( 200 , ' call rk_scalar_tend' )
2568
2569 #ifdef WRF_PLUS
2570 if(TEST_rk_scalar_tend) THEN
2571 CALL t_rk_scalar_tend ( im, im, config_flags, &
2572 rk_step, dt_rk, &
2573 grid%em_ru_m, grid%em_rv_m, grid%em_ww_m, &
2574 grid%em_mut, grid%em_alt, &
2575 moist(ims,kms,jms,im), &
2576 moist_tend(ims,kms,jms,im), &
2577 advect_tend,grid%rqvften, &
2578 grid%qv_base, .true., grid%em_fnm, grid%em_fnp, &
2579 grid%msfu, grid%msfv, grid%msft, &
2580 grid%rdx, grid%rdy, grid%em_rdn, grid%em_rdnw, grid%khdif, &
2581 grid%kvdif, grid%xkmhd, &
2582 grid%diff_6th_opt, grid%diff_6th_rate, &
2583 ids, ide, jds, jde, kds, kde, &
2584 ims, ime, jms, jme, kms, kme, &
2585 grid%i_start(ij), grid%i_end(ij), &
2586 grid%j_start(ij), grid%j_end(ij), &
2587 k_start , k_end )
2588 END IF
2589 #endif
2590
2591 BENCH_START(rk_scalar_tend_tim)
2592
2593 CALL rk_scalar_tend ( im, im, config_flags, &
2594 rk_step, dt_rk, &
2595 grid%em_ru_m, grid%em_rv_m, grid%em_ww_m, &
2596 grid%em_mut, grid%em_mub, grid%em_mu_1, &
2597 grid%em_alt, &
2598 moist_old(ims,kms,jms,im), &
2599 moist(ims,kms,jms,im), &
2600 moist_tend(ims,kms,jms,im), &
2601 advect_tend,grid%rqvften, &
2602 grid%qv_base, .true., grid%em_fnm, grid%em_fnp, &
2603 grid%msfu, grid%msfv, grid%msft, &
2604 grid%rdx, grid%rdy, grid%em_rdn, grid%em_rdnw, grid%khdif, &
2605 grid%kvdif, grid%xkmhd, &
2606 grid%diff_6th_opt, grid%diff_6th_factor, &
2607 config_flags%pd_moist, &
2608 ids, ide, jds, jde, kds, kde, &
2609 ims, ime, jms, jme, kms, kme, &
2610 grid%i_start(ij), grid%i_end(ij), &
2611 grid%j_start(ij), grid%j_end(ij), &
2612 k_start , k_end )
2613
2614 BENCH_END(rk_scalar_tend_tim)
2615
2616 BENCH_START(rlx_bdy_scalar_tim)
2617 IF( ( config_flags%specified .or. config_flags%nested ) .and. rk_step == 1 ) THEN
2618 IF ( im .EQ. P_QV .OR. config_flags%nested ) THEN
2619 CALL relax_bdy_scalar ( moist_tend(ims,kms,jms,im), &
2620 moist(ims,kms,jms,im), grid%em_mut, &
2621 moist_b(1,1,1,1,im), &
2622 moist_bt(1,1,1,1,im), &
2623 config_flags%spec_bdy_width, grid%spec_zone, grid%relax_zone, &
2624 grid%dtbc, grid%fcx, grid%gcx, &
2625 config_flags, &
2626 ijds, ijde, & ! min/max(id,jd)
2627 ids,ide, jds,jde, kds,kde, & ! domain dims
2628 ims,ime, jms,jme, kms,kme, & ! memory dims
2629 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2630 grid%i_start(ij), grid%i_end(ij), &
2631 grid%j_start(ij), grid%j_end(ij), &
2632 k_start, k_end )
2633 #ifdef WRF_PLUS
2634 if(TEST_spec_bdy_scalar) THEN
2635 CALL t_spec_bdy_scalar ( moist_tend(ims,kms,jms,im), &
2636 moist_b(1,1,1,1,im), &
2637 moist_bt(1,1,1,1,im), &
2638 config_flags%spec_bdy_width, grid%spec_zone, &
2639 config_flags, &
2640 ijds, ijde, & ! min/max(id,jd)
2641 ids,ide, jds,jde, kds,kde, & ! domain dims
2642 ims,ime, jms,jme, kms,kme, & ! memory dims
2643 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2644 grid%i_start(ij), grid%i_end(ij), &
2645 grid%j_start(ij), grid%j_end(ij), &
2646 k_start, k_end )
2647 END IF
2648 #endif
2649 CALL spec_bdy_scalar ( moist_tend(ims,kms,jms,im), &
2650 moist_b(1,1,1,1,im), &
2651 moist_bt(1,1,1,1,im), &
2652 config_flags%spec_bdy_width, grid%spec_zone, &
2653 config_flags, &
2654 ijds, ijde, & ! min/max(id,jd)
2655 ids,ide, jds,jde, kds,kde, & ! domain dims
2656 ims,ime, jms,jme, kms,kme, & ! memory dims
2657 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2658 grid%i_start(ij), grid%i_end(ij), &
2659 grid%j_start(ij), grid%j_end(ij), &
2660 k_start, k_end )
2661 ENDIF
2662 ENDIF
2663 BENCH_END(rlx_bdy_scalar_tim)
2664
2665 ENDDO moist_tile_loop_1
2666 !$OMP END PARALLEL DO
2667
2668 !$OMP PARALLEL DO &
2669 !$OMP PRIVATE ( ij )
2670
2671 moist_tile_loop_2: DO ij = 1 , grid%num_tiles
2672
2673 CALL wrf_debug ( 200 , ' call rk_update_scalar' )
2674
2675 #ifdef WRF_PLUS
2676 if(TEST_rk_update_scalar) THEN
2677 CALL t_rk_update_scalar( im, im, &
2678 moist_old(ims,kms,jms,im), &
2679 moist(ims,kms,jms,im), &
2680 moist_tend(ims,kms,jms,im), &
2681 advect_tend, grid%msft, &
2682 grid%em_mu_1, grid%em_mu_2, grid%em_mub, &
2683 rk_step, dt_rk, grid%spec_zone, &
2684 config_flags, &
2685 ids, ide, jds, jde, kds, kde, &
2686 ims, ime, jms, jme, kms, kme, &
2687 grid%i_start(ij), grid%i_end(ij), &
2688 grid%j_start(ij), grid%j_end(ij), &
2689 k_start , k_end )
2690 END IF
2691 #endif
2692 BENCH_START(update_scal_tim)
2693 CALL rk_update_scalar( im, im, &
2694 moist_old(ims,kms,jms,im), &
2695 moist(ims,kms,jms,im), &
2696 moist_tend(ims,kms,jms,im), &
2697 advect_tend, grid%msft, &
2698 grid%em_mu_1, grid%em_mu_2, grid%em_mub, &
2699 rk_step, dt_rk, grid%spec_zone, &
2700 config_flags, &
2701 ids, ide, jds, jde, kds, kde, &
2702 ims, ime, jms, jme, kms, kme, &
2703 grid%i_start(ij), grid%i_end(ij), &
2704 grid%j_start(ij), grid%j_end(ij), &
2705 k_start , k_end )
2706 BENCH_END(update_scal_tim)
2707
2708 BENCH_START(flow_depbdy_tim)
2709 IF( config_flags%specified ) THEN
2710 IF(im .ne. P_QV)THEN
2711 CALL flow_dep_bdy ( moist(ims,kms,jms,im), &
2712 grid%em_ru_m, grid%em_rv_m, config_flags, &
2713 grid%spec_zone, &
2714 ids,ide, jds,jde, kds,kde, & ! domain dims
2715 ims,ime, jms,jme, kms,kme, & ! memory dims
2716 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2717 grid%i_start(ij), grid%i_end(ij), &
2718 grid%j_start(ij), grid%j_end(ij), &
2719 k_start, k_end )
2720 ENDIF
2721 ENDIF
2722 BENCH_END(flow_depbdy_tim)
2723
2724 ENDDO moist_tile_loop_2
2725 !$OMP END PARALLEL DO
2726
2727 ENDIF !-- if (grid%adv_moist_cond .or. im==p_qv ) then
2728
2729 ENDDO moist_variable_loop
2730
2731 ENDIF moist_scalar_advance
2732
2733 BENCH_START(tke_adv_tim)
2734 TKE_advance: IF (config_flags%km_opt .eq. 2) then
2735
2736 #ifdef DM_PARALLEL
2737 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
2738 # include "HALO_EM_TKE_ADVECT_3.inc"
2739 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
2740 # include "HALO_EM_TKE_ADVECT_5.inc"
2741 ELSE
2742 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
2743 CALL wrf_error_fatal(TRIM(wrf_err_message))
2744 ENDIF
2745 #endif
2746
2747 !$OMP PARALLEL DO &
2748 !$OMP PRIVATE ( ij )
2749
2750 tke_tile_loop_1: DO ij = 1 , grid%num_tiles
2751
2752 CALL wrf_debug ( 200 , ' call rk_scalar_tend for tke' )
2753 CALL rk_scalar_tend ( 1, 1, config_flags, &
2754 rk_step, dt_rk, &
2755 grid%em_ru_m, grid%em_rv_m, grid%em_ww_m, &
2756 grid%em_mut, grid%em_mub, grid%em_mu_1, &
2757 grid%em_alt, &
2758 grid%em_tke_1, &
2759 grid%em_tke_2, &
2760 tke_tend(ims,kms,jms), &
2761 advect_tend,grid%rqvften, &
2762 grid%qv_base, .false., grid%em_fnm, grid%em_fnp, &
2763 grid%msfu, grid%msfv, grid%msft, &
2764 grid%rdx, grid%rdy, grid%em_rdn, grid%em_rdnw, grid%khdif, &
2765 grid%kvdif, grid%xkmhd, &
2766 grid%diff_6th_opt, grid%diff_6th_factor, &
2767 config_flags%pd_tke, &
2768 ids, ide, jds, jde, kds, kde, &
2769 ims, ime, jms, jme, kms, kme, &
2770 grid%i_start(ij), grid%i_end(ij), &
2771 grid%j_start(ij), grid%j_end(ij), &
2772 k_start , k_end )
2773
2774 ENDDO tke_tile_loop_1
2775 !$OMP END PARALLEL DO
2776
2777 !$OMP PARALLEL DO &
2778 !$OMP PRIVATE ( ij )
2779
2780 tke_tile_loop_2: DO ij = 1 , grid%num_tiles
2781
2782 CALL wrf_debug ( 200 , ' call rk_update_scalar' )
2783 CALL rk_update_scalar( 1, 1, &
2784 grid%em_tke_1, &
2785 grid%em_tke_2, &
2786 tke_tend(ims,kms,jms), &
2787 advect_tend,grid%msft, &
2788 grid%em_mu_1, grid%em_mu_2, grid%em_mub, &
2789 rk_step, dt_rk, grid%spec_zone, &
2790 config_flags, &
2791 ids, ide, jds, jde, kds, kde, &
2792 ims, ime, jms, jme, kms, kme, &
2793 grid%i_start(ij), grid%i_end(ij), &
2794 grid%j_start(ij), grid%j_end(ij), &
2795 k_start , k_end )
2796
2797 ! bound the tke (greater than 0, less than tke_upper_bound)
2798
2799 CALL bound_tke( grid%em_tke_2, grid%tke_upper_bound, &
2800 ids, ide, jds, jde, kds, kde, &
2801 ims, ime, jms, jme, kms, kme, &
2802 grid%i_start(ij), grid%i_end(ij), &
2803 grid%j_start(ij), grid%j_end(ij), &
2804 k_start , k_end )
2805
2806 IF( config_flags%specified .or. config_flags%nested ) THEN
2807 CALL flow_dep_bdy ( grid%em_tke_2, &
2808 grid%em_ru_m, grid%em_rv_m, config_flags, &
2809 grid%spec_zone, &
2810 ids,ide, jds,jde, kds,kde, & ! domain dims
2811 ims,ime, jms,jme, kms,kme, & ! memory dims
2812 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2813 grid%i_start(ij), grid%i_end(ij), &
2814 grid%j_start(ij), grid%j_end(ij), &
2815 k_start, k_end )
2816 ENDIF
2817 ENDDO tke_tile_loop_2
2818 !$OMP END PARALLEL DO
2819
2820 END IF TKE_advance
2821 BENCH_END(tke_adv_tim)
2822
2823 #ifdef WRF_CHEM
2824 ! next the chemical species
2825 BENCH_START(chem_adv_tim)
2826 chem_scalar_advance: IF (num_3d_c >= PARAM_FIRST_SCALAR) THEN
2827
2828 chem_variable_loop: do ic = PARAM_FIRST_SCALAR, num_3d_c
2829
2830 !$OMP PARALLEL DO &
2831 !$OMP PRIVATE ( ij )
2832
2833 chem_tile_loop_1: DO ij = 1 , grid%num_tiles
2834
2835 CALL wrf_debug ( 200 , ' call rk_scalar_tend in chem_tile_loop_1' )
2836 CALL rk_scalar_tend ( ic, ic, config_flags, &
2837 rk_step, dt_rk, &
2838 grid%em_ru_m, grid%em_rv_m, grid%em_ww_m, &
2839 grid%em_mut, grid%em_mub, grid%em_mu_1, &
2840 grid%em_alt, &
2841 chem_old(ims,kms,jms,ic), &
2842 chem(ims,kms,jms,ic), &
2843 chem_tend(ims,kms,jms,ic), &
2844 advect_tend,grid%rqvften, &
2845 grid%qv_base, .false., grid%em_fnm, grid%em_fnp, &
2846 grid%msfu, grid%msfv, grid%msft, &
2847 grid%rdx, grid%rdy, grid%em_rdn, grid%em_rdnw, &
2848 grid%khdif, grid%kvdif, grid%xkmhd, &
2849 grid%diff_6th_opt, grid%diff_6th_factor, &
2850 config_flags%pd_chem, &
2851 ids, ide, jds, jde, kds, kde, &
2852 ims, ime, jms, jme, kms, kme, &
2853 grid%i_start(ij), grid%i_end(ij), &
2854 grid%j_start(ij), grid%j_end(ij), &
2855 k_start , k_end )
2856 !
2857 ! Currently, chemistry species with specified boundaries (i.grid%e. the mother
2858 ! domain) are being over written by flow_dep_bdy_chem. So, relax_bdy and
2859 ! spec_bdy are only called for nests. For boundary conditions from global model or larger domain,
2860 ! chem is uncoupled, and only used for one row/column on inflow (if have_bcs_chem=.true.)
2861 !
2862 IF( ( config_flags%nested ) .and. rk_step == 1 ) THEN
2863 if(ic.eq.1)CALL wrf_debug ( 10 , ' have_bcs_chem' )
2864
2865 CALL relax_bdy_scalar ( chem_tend(ims,kms,jms,ic), &
2866 chem(ims,kms,jms,ic), grid%em_mut, &
2867 chem_b(1,1,1,1,ic), &
2868 chem_bt(1,1,1,1,ic), &
2869 config_flags%spec_bdy_width, grid%spec_zone, grid%relax_zone, &
2870 grid%dtbc, grid%fcx, grid%gcx, &
2871 config_flags, &
2872 ijds, ijde, & ! min/max(id,jd)
2873 ids,ide, jds,jde, kds,kde, & ! domain dims
2874 ims,ime, jms,jme, kms,kme, & ! memory dims
2875 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2876 grid%i_start(ij), grid%i_end(ij), &
2877 grid%j_start(ij), grid%j_end(ij), &
2878 k_start, k_end )
2879 CALL spec_bdy_scalar ( chem_tend(ims,kms,jms,ic), &
2880 chem_b(1,1,1,1,ic), &
2881 chem_bt(1,1,1,1,ic), &
2882 config_flags%spec_bdy_width, grid%spec_zone, &
2883 config_flags, &
2884 ijds, ijde, & ! min/max(id,jd)
2885 ids,ide, jds,jde, kds,kde, & ! domain dims
2886 ims,ime, jms,jme, kms,kme, & ! memory dims
2887 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2888 grid%i_start(ij), grid%i_end(ij), &
2889 grid%j_start(ij), grid%j_end(ij), &
2890 k_start, k_end )
2891 ENDIF
2892
2893 ENDDO chem_tile_loop_1
2894
2895 !$OMP END PARALLEL DO
2896
2897
2898 !$OMP PARALLEL DO &
2899 !$OMP PRIVATE ( ij )
2900
2901 chem_tile_loop_2: DO ij = 1 , grid%num_tiles
2902
2903 CALL wrf_debug ( 200 , ' call rk_update_scalar' )
2904 CALL rk_update_scalar( ic, ic, &
2905 chem_old(ims,kms,jms,ic), & ! was chem_1
2906 chem(ims,kms,jms,ic), &
2907 chem_tend(ims,kms,jms,ic), &
2908 advect_tend, grid%msft, &
2909 grid%em_mu_1, grid%em_mu_2, grid%em_mub, &
2910 rk_step, dt_rk, grid%spec_zone, &
2911 config_flags, &
2912 ids, ide, jds, jde, kds, kde, &
2913 ims, ime, jms, jme, kms, kme, &
2914 grid%i_start(ij), grid%i_end(ij), &
2915 grid%j_start(ij), grid%j_end(ij), &
2916 k_start , k_end )
2917
2918
2919 IF( config_flags%specified ) THEN
2920 ! come back to this and figure out why two different routines are needed. JM 20041203
2921 !#ifndef WRF_CHEM
2922 !!$ CALL flow_dep_bdy ( chem(ims,kms,jms,ic), &
2923 !!$ grid%em_ru_m, grid%em_rv_m, config_flags, &
2924 !!$ grid%spec_zone, &
2925 !!$ ids,ide, jds,jde, kds,kde, & ! domain dims
2926 !!$ ims,ime, jms,jme, kms,kme, & ! memory dims
2927 !!$ ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2928 !!$ grid%i_start(ij), grid%i_end(ij), &
2929 !!$ grid%j_start(ij), grid%j_end(ij), &
2930 !!$ k_start, k_end )
2931 !#else
2932 CALL flow_dep_bdy_chem( chem(ims,kms,jms,ic), &
2933 chem_b(1,1,1,1,ic), &
2934 chem_bt(1,1,1,1,ic), dt_rk, &
2935 config_flags%spec_bdy_width,grid%em_z, &
2936 ijds, ijde,grid%have_bcs_chem, &
2937 grid%em_ru_m, grid%em_rv_m, config_flags,grid%em_alt, &
2938 grid%em_t_1,grid%em_pb,grid%em_p,t0,p1000mb,rcp,grid%em_ph_2,grid%em_phb,g, &
2939 grid%spec_zone,ic, &
2940 ids,ide, jds,jde, kds,kde, & ! domain dims
2941 ims,ime, jms,jme, kms,kme, & ! memory dims
2942 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2943 grid%i_start(ij), grid%i_end(ij), &
2944 grid%j_start(ij), grid%j_end(ij), &
2945 k_start, k_end )
2946 !#endif
2947 ENDIF
2948
2949
2950 ENDDO chem_tile_loop_2
2951 !$OMP END PARALLEL DO
2952
2953 ENDDO chem_variable_loop
2954
2955 ENDIF chem_scalar_advance
2956 BENCH_END(chem_adv_tim)
2957 #endif
2958
2959 ! next the other scalar species
2960 other_scalar_advance: IF (num_3d_s >= PARAM_FIRST_SCALAR) THEN
2961
2962 scalar_variable_loop: do is = PARAM_FIRST_SCALAR, num_3d_s
2963 !$OMP PARALLEL DO &
2964 !$OMP PRIVATE ( ij )
2965
2966 scalar_tile_loop_1: DO ij = 1 , grid%num_tiles
2967
2968 CALL wrf_debug ( 200 , ' call rk_scalar_tend' )
2969 CALL rk_scalar_tend ( is, is, config_flags, &
2970 rk_step, dt_rk, &
2971 grid%em_ru_m, grid%em_rv_m, grid%em_ww_m, &
2972 grid%em_mut, grid%em_mub, grid%em_mu_1, &
2973 grid%em_alt, &
2974 scalar_old(ims,kms,jms,is), &
2975 scalar(ims,kms,jms,is), &
2976 scalar_tend(ims,kms,jms,is), &
2977 advect_tend,grid%rqvften, &
2978 grid%qv_base, .false., grid%em_fnm, grid%em_fnp, &
2979 grid%msfu, grid%msfv, grid%msft, &
2980 grid%rdx, grid%rdy, grid%em_rdn, grid%em_rdnw, &
2981 grid%khdif, grid%kvdif, grid%xkmhd, &
2982 grid%diff_6th_opt, grid%diff_6th_factor, &
2983 config_flags%pd_scalar, &
2984 ids, ide, jds, jde, kds, kde, &
2985 ims, ime, jms, jme, kms, kme, &
2986 grid%i_start(ij), grid%i_end(ij), &
2987 grid%j_start(ij), grid%j_end(ij), &
2988 k_start , k_end )
2989
2990 IF( config_flags%nested .and. (rk_step == 1) ) THEN
2991
2992 IF (is .eq. P_QNI) THEN
2993
2994 CALL relax_bdy_scalar ( scalar_tend(ims,kms,jms,is), &
2995 scalar(ims,kms,jms,is), grid%em_mut, &
2996 scalar_b(1,1,1,1,is), &
2997 scalar_bt(1,1,1,1,is), &
2998 config_flags%spec_bdy_width, grid%spec_zone, grid%relax_zone, &
2999 grid%dtbc, grid%fcx, grid%gcx, &
3000 config_flags, &
3001 ijds, ijde, & ! min/max(id,jd)
3002 ids,ide, jds,jde, kds,kde, & ! domain dims
3003 ims,ime, jms,jme, kms,kme, & ! memory dims
3004 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
3005 grid%i_start(ij), grid%i_end(ij), &
3006 grid%j_start(ij), grid%j_end(ij), &
3007 k_start, k_end )
3008
3009 CALL spec_bdy_scalar ( scalar_tend(ims,kms,jms,is), &
3010 scalar_b(1,1,1,1,is), &
3011 scalar_bt(1,1,1,1,is), &
3012 config_flags%spec_bdy_width, grid%spec_zone, &
3013 config_flags, &
3014 ijds, ijde, & ! min/max(id,jd)
3015 ids,ide, jds,jde, kds,kde, & ! domain dims
3016 ims,ime, jms,jme, kms,kme, & ! memory dims
3017 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
3018 grid%i_start(ij), grid%i_end(ij), &
3019 grid%j_start(ij), grid%j_end(ij), &
3020 k_start, k_end )
3021
3022 ENDIF
3023
3024 ENDIF ! b.c test for chem nested boundary condition
3025
3026 ENDDO scalar_tile_loop_1
3027 !$OMP END PARALLEL DO
3028
3029
3030 !$OMP PARALLEL DO &
3031 !$OMP PRIVATE ( ij )
3032
3033 scalar_tile_loop_2: DO ij = 1 , grid%num_tiles
3034
3035 CALL wrf_debug ( 200 , ' call rk_update_scalar' )
3036 CALL rk_update_scalar( is, is, &
3037 scalar_old(ims,kms,jms,is), & ! was scalar_1
3038 scalar(ims,kms,jms,is), &
3039 scalar_tend(ims,kms,jms,is), &
3040 advect_tend, grid%msft, &
3041 grid%em_mu_1, grid%em_mu_2, grid%em_mub, &
3042 rk_step, dt_rk, grid%spec_zone, &
3043 config_flags, &
3044 ids, ide, jds, jde, kds, kde, &
3045 ims, ime, jms, jme, kms, kme, &
3046 grid%i_start(ij), grid%i_end(ij), &
3047 grid%j_start(ij), grid%j_end(ij), &
3048 k_start , k_end )
3049
3050
3051 IF( config_flags%specified ) THEN
3052 CALL flow_dep_bdy ( scalar(ims,kms,jms,is), &
3053 grid%em_ru_m, grid%em_rv_m, config_flags, &
3054 grid%spec_zone, &
3055 ids,ide, jds,jde, kds,kde, & ! domain dims
3056 ims,ime, jms,jme, kms,kme, & ! memory dims
3057 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
3058 grid%i_start(ij), grid%i_end(ij), &
3059 grid%j_start(ij), grid%j_end(ij), &
3060 k_start, k_end )
3061 ENDIF
3062
3063
3064 ENDDO scalar_tile_loop_2
3065 !$OMP END PARALLEL DO
3066
3067 ENDDO scalar_variable_loop
3068
3069 ENDIF other_scalar_advance
3070
3071 ! update the pressure and density at the new time level
3072
3073 !$OMP PARALLEL DO &
3074 !$OMP PRIVATE ( ij )
3075 DO ij = 1 , grid%num_tiles
3076
3077
3078 #ifdef WRF_PLUS
3079 if(TEST_calc_p_rho_phi) THEN
3080 CALL t_calc_p_rho_phi( moist, num_3d_m, &
3081 grid%em_al, grid%em_alb, grid%em_mu_2, grid%em_muts, &
3082 grid%em_ph_2, grid%em_p, grid%em_pb, grid%em_t_2, &
3083 p0, t0, grid%em_znu, grid%em_dnw, grid%em_rdnw, &
3084 grid%em_rdn, config_flags%non_hydrostatic, &
3085 ids, ide, jds, jde, kds, kde, &
3086 ims, ime, jms, jme, kms, kme, &
3087 grid%i_start(ij), grid%i_end(ij), &
3088 grid%j_start(ij), grid%j_end(ij), &
3089 k_start , k_end )
3090 END IF
3091 #endif
3092 BENCH_START(calc_p_rho_tim)
3093
3094 CALL calc_p_rho_phi( moist, num_3d_m, &
3095 grid%em_al, grid%em_alb, grid%em_mu_2, grid%em_muts, &
3096 grid%em_ph_2, grid%em_p, grid%em_pb, grid%em_t_2, &
3097 p0, t0, grid%em_znu, grid%em_dnw, grid%em_rdnw, &
3098 grid%em_rdn, config_flags%non_hydrostatic, &
3099 ids, ide, jds, jde, kds, kde, &
3100 ims, ime, jms, jme, kms, kme, &
3101 grid%i_start(ij), grid%i_end(ij), &
3102 grid%j_start(ij), grid%j_end(ij), &
3103 k_start , k_end )
3104
3105 BENCH_END(calc_p_rho_tim)
3106
3107 ENDDO
3108 !$OMP END PARALLEL DO
3109
3110 ! Reset the boundary conditions if there is another corrector step.
3111 ! (rk_step < rk_order), else we'll handle it at the end of everything
3112 ! (after the split physics, before exiting the timestep).
3113
3114 rk_step_1_check: IF ( rk_step < rk_order ) THEN
3115
3116 !-----------------------------------------------------------
3117 ! Stencils for patch communications (WCS, 29 June 2001)
3118 !
3119 ! here's where we need a wide comm stencil - these are the
3120 ! uncoupled variables so are used for high order calc in
3121 ! advection and mixong routines.
3122 !
3123 ! * * * * *
3124 ! * * * * * * * * *
3125 ! * + * * + * * * + * *
3126 ! * * * * * * * * *
3127 ! * * * * *
3128 !
3129 !
3130 ! grid%em_u_2 x
3131 ! grid%em_v_2 x
3132 ! grid%em_w_2 x
3133 ! grid%em_t_2 x
3134 ! grid%em_ph_2 x
3135 ! grid%em_al x
3136 !
3137 ! 2D variable
3138 ! grid%em_mu_2 x
3139 !
3140 ! 4D variable
3141 ! moist x
3142 ! chem x
3143 !scalar x
3144
3145 #ifdef DM_PARALLEL
3146 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
3147 # include "HALO_EM_D2_3.inc"
3148 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
3149 # include "HALO_EM_D2_5.inc"
3150 ELSE
3151 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
3152 CALL wrf_error_fatal(TRIM(wrf_err_message))
3153 ENDIF
3154 # include "PERIOD_BDY_EM_D.inc"
3155 # include "PERIOD_BDY_EM_MOIST2.inc"
3156 # include "PERIOD_BDY_EM_CHEM2.inc"
3157 # include "PERIOD_BDY_EM_SCALAR2.inc"
3158 #endif
3159
3160 BENCH_START(bc_end_tim)
3161 !$OMP PARALLEL DO &
3162 !$OMP PRIVATE ( ij )
3163
3164 tile_bc_loop_1: DO ij = 1 , grid%num_tiles
3165
3166
3167 CALL wrf_debug ( 200 , ' call rk_phys_bc_dry_2' )
3168
3169 CALL rk_phys_bc_dry_2( config_flags, &
3170 grid%em_u_2, grid%em_v_2, grid%em_w_2, &
3171 grid%em_t_2, grid%em_ph_2, grid%em_mu_2, &
3172 ids, ide, jds, jde, kds, kde, &
3173 ims, ime, jms, jme, kms, kme, &
3174 ips, ipe, jps, jpe, kps, kpe, &
3175 grid%i_start(ij), grid%i_end(ij), &
3176 grid%j_start(ij), grid%j_end(ij), &
3177 k_start , k_end )
3178
3179 BENCH_START(diag_w_tim)
3180 IF (.not. config_flags%non_hydrostatic) THEN
3181 #ifdef WRF_PLUS
3182 if(TEST_diagnose_w) THEN
3183 CALL t_diagnose_w( ph_tend, grid%em_ph_2, grid%em_ph_1, grid%em_w_2, grid%em_muts, dt_rk, &
3184 grid%em_u_2, grid%em_v_2, grid%ht, &
3185 grid%cf1, grid%cf2, grid%cf3, grid%rdx, grid%rdy, grid%msft, &
3186 ids, ide, jds, jde, kds, kde, &
3187 ims, ime, jms, jme, kms, kme, &
3188 grid%i_start(ij), grid%i_end(ij), &
3189 grid%j_start(ij), grid%j_end(ij), &
3190 k_start , k_end )
3191 END IF
3192 #endif
3193 CALL diagnose_w( ph_tend, grid%em_ph_2, grid%em_ph_1, grid%em_w_2, grid%em_muts, dt_rk, &
3194 grid%em_u_2, grid%em_v_2, grid%ht, &
3195 grid%cf1, grid%cf2, grid%cf3, grid%rdx, grid%rdy, grid%msft, &
3196 ids, ide, jds, jde, kds, kde, &
3197 ims, ime, jms, jme, kms, kme, &
3198 grid%i_start(ij), grid%i_end(ij), &
3199 grid%j_start(ij), grid%j_end(ij), &
3200 k_start , k_end )
3201 ENDIF
3202 BENCH_END(diag_w_tim)
3203
3204 IF (num_3d_m >= PARAM_FIRST_SCALAR) THEN
3205
3206 moisture_loop_bdy_1 : DO im = PARAM_FIRST_SCALAR , num_3d_m
3207
3208 CALL set_physical_bc3d( moist(ims,kms,jms,im), 'grid%em_p', config_flags, &
3209 ids, ide, jds, jde, kds, kde, &
3210 ims, ime, jms, jme, kms, kme, &
3211 ips, ipe, jps, jpe, kps, kpe, &
3212 grid%i_start(ij), grid%i_end(ij), &
3213 grid%j_start(ij), grid%j_end(ij), &
3214 k_start , k_end )
3215 END DO moisture_loop_bdy_1
3216
3217 ENDIF
3218
3219 IF (num_3d_c >= PARAM_FIRST_SCALAR) THEN
3220
3221 chem_species_bdy_loop_1 : DO ic = PARAM_FIRST_SCALAR , num_3d_c
3222
3223 CALL set_physical_bc3d( chem(ims,kms,jms,ic), 'grid%em_p', config_flags, &
3224 ids, ide, jds, jde, kds, kde, &
3225 ims, ime, jms, jme, kms, kme, &
3226 ips, ipe, jps, jpe, kps, kpe, &
3227 grid%i_start(ij), grid%i_end(ij), &
3228 grid%j_start(ij), grid%j_end(ij), &
3229 k_start , k_end-1 )
3230
3231 END DO chem_species_bdy_loop_1
3232
3233 END IF
3234
3235 IF (num_3d_s >= PARAM_FIRST_SCALAR) THEN
3236
3237 scalar_species_bdy_loop_1 : DO is = PARAM_FIRST_SCALAR , num_3d_s
3238
3239 CALL set_physical_bc3d( scalar(ims,kms,jms,is), 'grid%em_p', config_flags, &
3240 ids, ide, jds, jde, kds, kde, &
3241 ims, ime, jms, jme, kms, kme, &
3242 ips, ipe, jps, jpe, kps, kpe, &
3243 grid%i_start(ij), grid%i_end(ij), &
3244 grid%j_start(ij), grid%j_end(ij), &
3245 k_start , k_end-1 )
3246
3247 END DO scalar_species_bdy_loop_1
3248
3249 END IF
3250
3251 IF (config_flags%km_opt .eq. 2) THEN
3252
3253 CALL set_physical_bc3d( grid%em_tke_2 , 'grid%em_p', config_flags, &
3254 ids, ide, jds, jde, kds, kde, &
3255 ims, ime, jms, jme, kms, kme, &
3256 ips, ipe, jps, jpe, kps, kpe, &
3257 grid%i_start(ij), grid%i_end(ij), &
3258 grid%j_start(ij), grid%j_end(ij), &
3259 k_start , k_end )
3260 END IF
3261
3262 END DO tile_bc_loop_1
3263 !$OMP END PARALLEL DO
3264 BENCH_END(bc_end_tim)
3265
3266
3267 #ifdef DM_PARALLEL
3268
3269 ! * * * * *
3270 ! * * * * * * * * *
3271 ! * + * * + * * * + * *
3272 ! * * * * * * * * *
3273 ! * * * * *
3274
3275 ! moist, chem, scalar, tke x
3276
3277
3278 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
3279 IF ( (config_flags%pd_tke) .and. (rk_step == rk_order-1) ) THEN
3280 # include "HALO_EM_TKE_5.inc"
3281 ELSE
3282 # include "HALO_EM_TKE_3.inc"
3283 ENDIF
3284 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
3285 IF ( (config_flags%pd_tke) .and. (rk_step == rk_order-1) ) THEN
3286 # include "HALO_EM_TKE_7.inc"
3287 ELSE
3288 # include "HALO_EM_TKE_5.inc"
3289 ENDIF
3290 ELSE
3291 WRITE(wrf_err_message,*)'solve_em: invalid h_sca_adv_order = ',config_flags%h_sca_adv_order
3292 CALL wrf_error_fatal(TRIM(wrf_err_message))
3293 ENDIF
3294
3295 #if 0
3296 IF (config_flags%km_opt .eq. 2) THEN
3297 # include "HALO_EM_TKE_F.inc"
3298 ENDIF
3299 #endif
3300
3301 if ( num_moist .ge. PARAM_FIRST_SCALAR ) then
3302 IF ( config_flags%h_sca_adv_order <= 4 ) THEN
3303 IF ( (config_flags%pd_moist) .and. (rk_step == rk_order-1) ) THEN
3304 # include "HALO_EM_MOIST_E_5.inc"
3305 ELSE
3306 # include "HALO_EM_MOIST_E_3.inc"
3307 END IF
3308 ELSE IF ( config_flags%h_sca_adv_order <= 6 ) THEN
3309 IF ( (config_flags%pd_moist) .and. (rk_step == rk_order-1) ) THEN
3310 # include "HALO_EM_MOIST_E_7.inc"
3311 ELSE
3312 # include "HALO_EM_MOIST_E_5.inc"
3313 END IF
3314 ELSE
3315 WRITE(wrf_err_message,*)'solve_em: invalid h_sca_adv_order = ',config_flags%h_sca_adv_order
3316 CALL wrf_error_fatal(TRIM(wrf_err_message))
3317 ENDIF
3318 endif
3319 if ( num_chem >= PARAM_FIRST_SCALAR ) then
3320 IF ( config_flags%h_sca_adv_order <= 4 ) THEN
3321 IF ( (config_flags%pd_chem) .and. (rk_step == rk_order-1) ) THEN
3322 # include "HALO_EM_CHEM_E_5.inc"
3323 ELSE
3324 # include "HALO_EM_CHEM_E_3.inc"
3325 END IF
3326 ELSE IF ( config_flags%h_sca_adv_order <= 6 ) THEN
3327 IF ( (config_flags%pd_chem) .and. (rk_step == rk_order-1) ) THEN
3328 # include "HALO_EM_CHEM_E_7.inc"
3329 ELSE
3330 # include "HALO_EM_CHEM_E_5.inc"
3331 END IF
3332 ELSE
3333 WRITE(wrf_err_message,*)'solve_em: invalid h_sca_adv_order = ',config_flags%h_sca_adv_order
3334 CALL wrf_error_fatal(TRIM(wrf_err_message))
3335 ENDIF
3336 endif
3337 if ( num_scalar >= PARAM_FIRST_SCALAR ) then
3338 IF ( config_flags%h_sca_adv_order <= 4 ) THEN
3339 IF ( (config_flags%pd_scalar) .and. (rk_step == rk_order-1) ) THEN
3340 # include "HALO_EM_SCALAR_E_5.inc"
3341 ELSE
3342 # include "HALO_EM_SCALAR_E_3.inc"
3343 END IF
3344 ELSE IF ( config_flags%h_sca_adv_order <= 6 ) THEN
3345 IF ( (config_flags%pd_scalar) .and. (rk_step == rk_order-1) ) THEN
3346 # include "HALO_EM_SCALAR_E_7.inc"
3347 ELSE
3348 # include "HALO_EM_SCALAR_E_5.inc"
3349 END IF
3350 ELSE
3351 WRITE(wrf_err_message,*)'solve_em: invalid h_sca_adv_order = ',config_flags%h_sca_adv_order
3352 CALL wrf_error_fatal(TRIM(wrf_err_message))
3353 ENDIF
3354 endif
3355 #endif
3356
3357 ENDIF rk_step_1_check
3358
3359
3360 !**********************************************************
3361 !
3362 ! end of RK predictor-corrector loop
3363 !
3364 !**********************************************************
3365
3366 END DO Runge_Kutta_loop
3367
3368 !$OMP PARALLEL DO &
3369 !$OMP PRIVATE ( ij )
3370
3371 DO ij = 1 , grid%num_tiles
3372
3373 BENCH_START(advance_ppt_tim)
3374 CALL wrf_debug ( 200 , ' call advance_ppt' )
3375 CALL advance_ppt(grid%rthcuten,grid%rqvcuten,grid%rqccuten,grid%rqrcuten, &
3376 grid%rqicuten,grid%rqscuten,grid%rainc,grid%raincv,grid%nca, &
3377 grid%htop,grid%hbot,grid%cutop,grid%cubot, &
3378 grid%cuppt, config_flags, &
3379 ids,ide, jds,jde, kds,kde, &
3380 ims,ime, jms,jme, kms,kme, &
3381 grid%i_start(ij), grid%i_end(ij), &
3382 grid%j_start(ij), grid%j_end(ij), &
3383 k_start , k_end )
3384 BENCH_END(advance_ppt_tim)
3385
3386 ENDDO
3387 !$OMP END PARALLEL DO
3388
3389 !<DESCRIPTION>
3390 !<pre>
3391 ! (5) time-split physics.
3392 !
3393 ! Microphysics are the only time split physics in the WRF model
3394 ! at this time. Split-physics begins with the calculation of
3395 ! needed diagnostic quantities (pressure, temperature, etc.)
3396 ! followed by a call to the microphysics driver,
3397 ! and finishes with a clean-up, storing off of a diabatic tendency
3398 ! from the moist physics, and a re-calulation of the diagnostic
3399 ! quantities pressure and density.
3400 !</pre>
3401 !</DESCRIPTION>
3402
3403 IF (config_flags%mp_physics /= 0) then
3404
3405 IF( config_flags%specified .or. config_flags%nested ) THEN
3406 sz = grid%spec_zone
3407 ELSE
3408 sz = 0
3409 ENDIF
3410
3411 !$OMP PARALLEL DO &
3412 !$OMP PRIVATE ( ij, its, ite, jts, jte )
3413
3414 scalar_tile_loop_1a: DO ij = 1 , grid%num_tiles
3415
3416 IF ( config_flags%periodic_x ) THEN
3417 its = max(grid%i_start(ij),ids)
3418 ite = min(grid%i_end(ij),ide-1)
3419 ELSE
3420 its = max(grid%i_start(ij),ids+sz)
3421 ite = min(grid%i_end(ij),ide-1-sz)
3422 ENDIF
3423 jts = max(grid%j_start(ij),jds+sz)
3424 jte = min(grid%j_end(ij),jde-1-sz)
3425
3426 CALL wrf_debug ( 200 , ' call moist_physics_prep' )
3427 BENCH_START(moist_physics_prep_tim)
3428 CALL moist_physics_prep_em( grid%em_t_2, grid%em_t_1, t0, rho, &
3429 grid%em_al, grid%em_alb, grid%em_p, p8w, p0, grid%em_pb, &
3430 grid%em_ph_2, grid%em_phb, th_phy, pi_phy, p_phy, &
3431 grid%em_z, z_at_w, dz8w, &
3432 dtm, grid%h_diabatic, &
3433 config_flags,grid%em_fnm, grid%em_fnp, &
3434 ids, ide, jds, jde, kds, kde, &
3435 ims, ime, jms, jme, kms, kme, &
3436 its, ite, jts, jte, &
3437 k_start , k_end )
3438 BENCH_END(moist_physics_prep_tim)
3439 END DO scalar_tile_loop_1a
3440 !$OMP END PARALLEL DO
3441
3442 CALL wrf_debug ( 200 , ' call microphysics_driver' )
3443
3444 grid%em_sr = 0.
3445 specified_bdy = config_flags%specified .OR. config_flags%nested
3446 channel_bdy = config_flags%specified .AND. config_flags%periodic_x
3447
3448 #if 0
3449 BENCH_START(microswap_1)
3450 ! for load balancing; communication to redistribute the points
3451 IF ( config_flags%mp_physics .EQ. ETAMPNEW ) THEN
3452 #include "SWAP_ETAMP_NEW.inc"
3453 ELSE IF ( config_flags%mp_physics .EQ. WSM3SCHEME ) THEN
3454 #include "SWAP_WSM3.inc"
3455 ENDIF
3456 BENCH_END(microswap_1)
3457 #endif
3458
3459 BENCH_START(micro_driver_tim)
3460
3461 CALL microphysics_driver( &
3462 & DT=dtm ,DX=grid%dx ,DY=grid%dy &
3463 & ,DZ8W=dz8w ,F_ICE_PHY=grid%f_ice_phy &
3464 & ,ITIMESTEP=grid%itimestep ,LOWLYR=grid%lowlyr &
3465 & ,P8W=p8w ,P=p_phy ,PI_PHY=pi_phy &
3466 & ,RHO=rho ,SPEC_ZONE=grid%spec_zone &
3467 & ,SR=grid%em_sr ,TH=th_phy &
3468 & ,WARM_RAIN=grid%warm_rain ,XLAND=grid%xland &
3469 & ,SPECIFIED=specified_bdy, CHANNEL_SWITCH=channel_bdy &
3470 & ,F_RAIN_PHY=grid%f_rain_phy &
3471 & ,F_RIMEF_PHY=grid%f_rimef_phy &
3472 & ,MP_PHYSICS=config_flags%mp_physics &
3473 & ,IDS=ids,IDE=ide, JDS=jds,JDE=jde, KDS=kds,KDE=kde &
3474 & ,IMS=ims,IME=ime, JMS=jms,JME=jme, KMS=kms,KME=kme &
3475 & ,I_START=grid%i_start,I_END=min(grid%i_end, ide-1) &
3476 & ,J_START=grid%j_start,J_END=min(grid%j_end, jde-1) &
3477 & ,KTS=k_start, KTE=min(k_end,kde-1) &
3478 & ,NUM_TILES=grid%num_tiles &
3479 ! Optional
3480 & , RAINNC=grid%rainnc, RAINNCV=grid%rainncv &
3481 & , SNOWNC=grid%snownc, SNOWNCV=grid%snowncv &
3482 & , GRAUPELNC=grid%graupelnc, GRAUPELNCV=grid%graupelncv &
3483 & , W=grid%em_w_2, Z=grid%em_z, HT=grid%ht &
3484 & , MP_RESTART_STATE=grid%mp_restart_state &
3485 & , TBPVS_STATE=grid%tbpvs_state & ! etampnew
3486 & , TBPVS0_STATE=grid%tbpvs0_state & ! etampnew
3487 & , QV_CURR=moist(ims,kms,jms,P_QV), F_QV=F_QV &
3488 & , QC_CURR=moist(ims,kms,jms,P_QC), F_QC=F_QC &
3489 & , QR_CURR=moist(ims,kms,jms,P_QR), F_QR=F_QR &
3490 & , QI_CURR=moist(ims,kms,jms,P_QI), F_QI=F_QI &
3491 & , QS_CURR=moist(ims,kms,jms,P_QS), F_QS=F_QS &
3492 & , QG_CURR=moist(ims,kms,jms,P_QG), F_QG=F_QG &
3493 & , QNI_CURR=scalar(ims,kms,jms,P_QNI), F_QNI=F_QNI &
3494 & , QT_CURR=scalar(ims,kms,jms,P_QT), F_QT=F_QT &
3495 )
3496 BENCH_END(micro_driver_tim)
3497
3498 #if 0
3499 BENCH_START(microswap_2)
3500 ! for load balancing; communication to redistribute the points
3501 IF ( config_flags%mp_physics .EQ. ETAMPNEW ) THEN
3502 #include "SWAP_ETAMP_NEW.inc"
3503 ELSE IF ( config_flags%mp_physics .EQ. WSM3SCHEME ) THEN
3504 #include "SWAP_WSM3.inc"
3505 ENDIF
3506 BENCH_END(microswap_2)
3507 #endif
3508
3509 CALL wrf_debug ( 200 , ' call moist_physics_finish' )
3510 BENCH_START(moist_phys_end_tim)
3511 !$OMP PARALLEL DO &
3512 !$OMP PRIVATE ( ij, its, ite, jts, jte )
3513
3514 scalar_tile_loop_1b: DO ij = 1 , grid%num_tiles
3515
3516 IF ( config_flags%periodic_x ) THEN
3517 its = max(grid%i_start(ij),ids)
3518 ite = min(grid%i_end(ij),ide-1)
3519 ELSE
3520 its = max(grid%i_start(ij),ids+sz)
3521 ite = min(grid%i_end(ij),ide-1-sz)
3522 ENDIF
3523 jts = max(grid%j_start(ij),jds+sz)
3524 jte = min(grid%j_end(ij),jde-1-sz)
3525
3526 CALL microphysics_zero_out ( &
3527 moist , num_moist , config_flags , &
3528 ids, ide, jds, jde, kds, kde, &
3529 ims, ime, jms, jme, kms, kme, &
3530 its, ite, jts, jte, &
3531 k_start , k_end )
3532
3533 CALL moist_physics_finish_em( grid%em_t_2, grid%em_t_1, t0, grid%em_muts, th_phy, &
3534 grid%h_diabatic, dtm, config_flags, &
3535 ids, ide, jds, jde, kds, kde, &
3536 ims, ime, jms, jme, kms, kme, &
3537 its, ite, jts, jte, &
3538 k_start , k_end )
3539
3540 CALL calc_p_rho_phi( moist, num_3d_m, &
3541 grid%em_al, grid%em_alb, grid%em_mu_2, grid%em_muts, &
3542 grid%em_ph_2, grid%em_p, grid%em_pb, grid%em_t_2, &
3543 p0, t0, grid%em_znu, grid%em_dnw, grid%em_rdnw, &
3544 grid%em_rdn, config_flags%non_hydrostatic, &
3545 ids, ide, jds, jde, kds, kde, &
3546 ims, ime, jms, jme, kms, kme, &
3547 its, ite, jts, jte, &
3548 k_start , k_end )
3549
3550 END DO scalar_tile_loop_1b
3551 !$OMP END PARALLEL DO
3552 BENCH_END(moist_phys_end_tim)
3553
3554 ENDIF
3555
3556 IF (.not. config_flags%non_hydrostatic) THEN
3557 !$OMP PARALLEL DO &
3558 !$OMP PRIVATE ( ij )
3559 DO ij = 1 , grid%num_tiles
3560 CALL diagnose_w( ph_tend, grid%em_ph_2, grid%em_ph_1, grid%em_w_2, grid%em_muts, dt_rk, &
3561 grid%em_u_2, grid%em_v_2, grid%ht, &
3562 grid%cf1, grid%cf2, grid%cf3, grid%rdx, grid%rdy, grid%msft, &
3563 ids, ide, jds, jde, kds, kde, &
3564 ims, ime, jms, jme, kms, kme, &
3565 grid%i_start(ij), grid%i_end(ij), &
3566 grid%j_start(ij), grid%j_end(ij), &
3567 k_start , k_end )
3568
3569 END DO
3570 !$OMP END PARALLEL DO
3571 ENDIF
3572
3573 chem_tile_loop_3: DO ij = 1 , grid%num_tiles
3574
3575 CALL wrf_debug ( 200 , ' call scalar_tile_loop_2' )
3576
3577 IF ( num_3d_c >= PARAM_FIRST_SCALAR ) then
3578
3579 !
3580 ! tiled chemistry not here, it is called from solve_interface, and found in chem_driver
3581 !
3582
3583 END IF
3584
3585 END DO chem_tile_loop_3
3586
3587
3588 ! We're finished except for boundary condition (and patch) update
3589
3590 ! Boundary condition time (or communication time). At this time, we have
3591 ! implemented periodic and symmetric physical boundary conditions.
3592
3593 ! b.c. routine for data within patch.
3594
3595 ! we need to do both time levels of
3596 ! data because the time filter only works in the physical solution space.
3597
3598 ! First, do patch communications for boundary conditions (periodicity)
3599
3600 !-----------------------------------------------------------
3601 ! Stencils for patch communications (WCS, 29 June 2001)
3602 !
3603 ! here's where we need a wide comm stencil - these are the
3604 ! uncoupled variables so are used for high order calc in
3605 ! advection and mixong routines.
3606 !
3607 ! * * * * *
3608 ! * * * * * * * * *
3609 ! * + * * + * * * + * *
3610 ! * * * * * * * * *
3611 ! * * * * *
3612 !
3613 ! grid%em_u_1 x
3614 ! grid%em_u_2 x
3615 ! grid%em_v_1 x
3616 ! grid%em_v_2 x
3617 ! grid%em_w_1 x
3618 ! grid%em_w_2 x
3619 ! grid%em_t_1 x
3620 ! grid%em_t_2 x
3621 ! grid%em_ph_1 x
3622 ! grid%em_ph_2 x
3623 ! grid%em_tke_1 x
3624 ! grid%em_tke_2 x
3625 !
3626 ! 2D variables
3627 ! grid%em_mu_1 x
3628 ! grid%em_mu_2 x
3629 !
3630 ! 4D variables
3631 ! moist x
3632 ! chem x
3633 ! scalar x
3634 !----------------------------------------------------------
3635
3636
3637 #ifdef DM_PARALLEL
3638 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
3639 # include "HALO_EM_D3_3.inc"
3640 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
3641 # include "HALO_EM_D3_5.inc"
3642 ELSE
3643 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
3644 CALL wrf_error_fatal(TRIM(wrf_err_message))
3645 ENDIF
3646 # include "PERIOD_BDY_EM_D3.inc"
3647 # include "PERIOD_BDY_EM_MOIST.inc"
3648 # include "PERIOD_BDY_EM_CHEM.inc"
3649 # include "PERIOD_BDY_EM_SCALAR.inc"
3650 #endif
3651
3652 ! now set physical b.c on a patch
3653
3654 BENCH_START(bc_2d_tim)
3655 !$OMP PARALLEL DO &
3656 !$OMP PRIVATE ( ij )
3657
3658 tile_bc_loop_2: DO ij = 1 , grid%num_tiles
3659
3660
3661 CALL wrf_debug ( 200 , ' call set_phys_bc_dry_2' )
3662
3663 CALL set_phys_bc_dry_2( config_flags, &
3664 grid%em_u_1, grid%em_u_2, grid%em_v_1, grid%em_v_2, grid%em_w_1, grid%em_w_2, &
3665 grid%em_t_1, grid%em_t_2, grid%em_ph_1, grid%em_ph_2, grid%em_mu_1, grid%em_mu_2, &
3666 ids, ide, jds, jde, kds, kde, &
3667 ims, ime, jms, jme, kms, kme, &
3668 ips, ipe, jps, jpe, kps, kpe, &
3669 grid%i_start(ij), grid%i_end(ij), &
3670 grid%j_start(ij), grid%j_end(ij), &
3671 k_start , k_end )
3672
3673 CALL set_physical_bc3d( grid%em_tke_1, 'grid%em_p', config_flags, &
3674 ids, ide, jds, jde, kds, kde, &
3675 ims, ime, jms, jme, kms, kme, &
3676 ips, ipe, jps, jpe, kps, kpe, &
3677 grid%i_start(ij), grid%i_end(ij), &
3678 grid%j_start(ij), grid%j_end(ij), &
3679 k_start , k_end-1 )
3680 CALL set_physical_bc3d( grid%em_tke_2 , 'grid%em_p', config_flags, &
3681 ids, ide, jds, jde, kds, kde, &
3682 ims, ime, jms, jme, kms, kme, &
3683 ips, ipe, jps, jpe, kps, kpe, &
3684 grid%i_start(ij), grid%i_end(ij), &
3685 grid%j_start(ij), grid%j_end(ij), &
3686 k_start , k_end )
3687
3688 moisture_loop_bdy_2 : DO im = PARAM_FIRST_SCALAR , num_3d_m
3689
3690 CALL set_physical_bc3d( moist(ims,kms,jms,im), 'grid%em_p', &
3691 config_flags, &
3692 ids, ide, jds, jde, kds, kde, &
3693 ims, ime, jms, jme, kms, kme, &
3694 ips, ipe, jps, jpe, kps, kpe, &
3695 grid%i_start(ij), grid%i_end(ij), &
3696 grid%j_start(ij), grid%j_end(ij), &
3697 k_start , k_end )
3698
3699 END DO moisture_loop_bdy_2
3700
3701 chem_species_bdy_loop_2 : DO ic = PARAM_FIRST_SCALAR , num_3d_c
3702
3703 CALL set_physical_bc3d( chem(ims,kms,jms,ic) , 'grid%em_p', config_flags, &
3704 ids, ide, jds, jde, kds, kde, &
3705 ims, ime, jms, jme, kms, kme, &
3706 ips, ipe, jps, jpe, kps, kpe, &
3707 grid%i_start(ij), grid%i_end(ij), &
3708 grid%j_start(ij), grid%j_end(ij), &
3709 k_start , k_end )
3710
3711 END DO chem_species_bdy_loop_2
3712
3713 scalar_species_bdy_loop_2 : DO is = PARAM_FIRST_SCALAR , num_3d_s
3714
3715 CALL set_physical_bc3d( scalar(ims,kms,jms,is) , 'grid%em_p', config_flags, &
3716 ids, ide, jds, jde, kds, kde, &
3717 ims, ime, jms, jme, kms, kme, &
3718 ips, ipe, jps, jpe, kps, kpe, &
3719 grid%i_start(ij), grid%i_end(ij), &
3720 grid%j_start(ij), grid%j_end(ij), &
3721 k_start , k_end )
3722
3723 END DO scalar_species_bdy_loop_2
3724
3725 END DO tile_bc_loop_2
3726 !$OMP END PARALLEL DO
3727 BENCH_END(bc_2d_tim)
3728
3729 IF( config_flags%specified .or. config_flags%nested ) THEN
3730 grid%dtbc = grid%dtbc + grid%dt
3731 ENDIF
3732
3733 ! calculate some model diagnostics.
3734
3735 CALL wrf_debug ( 200 , ' call diagnostic_driver' )
3736
3737 CALL diagnostic_output_calc( &
3738 & DPSDT=grid%dpsdt ,DMUDT=grid%dmudt &
3739 & ,P_PHY=p_phy ,PK1M=grid%pk1m &
3740 & ,MU_2=grid%em_mu_2 ,MU_2M=grid%mu_2m &
3741 & ,U=grid%em_u_2 ,V=grid%em_v_2 &
3742 & ,RAINCV=grid%raincv ,RAINNCV=grid%rainncv &
3743 & ,RAINC=grid%rainc ,RAINNC=grid%rainnc &
3744 & ,HFX=grid%hfx ,SFCEVP=grid%sfcevp ,LH=grid%lh &
3745 & ,DT=grid%dt ,SBW=config_flags%spec_bdy_width &
3746 & ,XTIME=grid%xtime &
3747 ! Selection flag
3748 & ,DIAG_PRINT=config_flags%diag_print &
3749 ! Dimension arguments
3750 & ,IDS=ids,IDE=ide, JDS=jds,JDE=jde, KDS=kds,KDE=kde &
3751 & ,IMS=ims,IME=ime, JMS=jms,JME=jme, KMS=kms,KME=kme &
3752 & ,IPS=ips,IPE=ipe, JPS=jps,JPE=jpe, KPS=kps,KPE=kpe &
3753 & ,I_START=grid%i_start,I_END=min(grid%i_end, ide-1) &
3754 & ,J_START=grid%j_start,J_END=min(grid%j_end, jde-1) &
3755 & ,KTS=k_start, KTE=min(k_end,kde-1) &
3756 & ,NUM_TILES=grid%num_tiles &
3757 & )
3758
3759 #ifdef DM_PARALLEL
3760 !-----------------------------------------------------------------------
3761 ! see above
3762 !--------------------------------------------------------------
3763 CALL wrf_debug ( 200 , ' call HALO_RK_E' )
3764 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
3765 # include "HALO_EM_E_3.inc"
3766 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
3767 # include "HALO_EM_E_5.inc"
3768 ELSE
3769 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
3770 CALL wrf_error_fatal(TRIM(wrf_err_message))
3771 ENDIF
3772 #endif
3773
3774 #ifdef DM_PARALLEL
3775 if ( num_moist >= PARAM_FIRST_SCALAR ) then
3776 !-----------------------------------------------------------------------
3777 ! see above
3778 !--------------------------------------------------------------
3779 CALL wrf_debug ( 200 , ' call HALO_RK_MOIST' )
3780 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
3781 # include "HALO_EM_MOIST_E_3.inc"
3782 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
3783 # include "HALO_EM_MOIST_E_5.inc"
3784 ELSE
3785 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
3786 CALL wrf_error_fatal(TRIM(wrf_err_message))
3787 ENDIF
3788 endif
3789 if ( num_chem >= PARAM_FIRST_SCALAR ) then
3790 !-----------------------------------------------------------------------
3791 ! see above
3792 !--------------------------------------------------------------
3793 CALL wrf_debug ( 200 , ' call HALO_RK_CHEM' )
3794 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
3795 # include "HALO_EM_CHEM_E_3.inc"
3796 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
3797 # include "HALO_EM_CHEM_E_5.inc"
3798 ELSE
3799 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
3800 CALL wrf_error_fatal(TRIM(wrf_err_message))
3801 ENDIF
3802 endif
3803 if ( num_scalar >= PARAM_FIRST_SCALAR ) then
3804 !-----------------------------------------------------------------------
3805 ! see above
3806 !--------------------------------------------------------------
3807 CALL wrf_debug ( 200 , ' call HALO_RK_SCALAR' )
3808 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
3809 # include "HALO_EM_SCALAR_E_3.inc"
3810 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
3811 # include "HALO_EM_SCALAR_E_5.inc"
3812 ELSE
3813 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
3814 CALL wrf_error_fatal(TRIM(wrf_err_message))
3815 ENDIF
3816 endif
3817 #endif
3818
3819 CALL wrf_debug ( 200 , ' call end of solve_em' )
3820
3821 ! Finish timers if compiled with -DBENCH.
3822 #include <bench_solve_em_end.h>
3823
3824 RETURN
3825
3826 END SUBROUTINE solve_em