solve_em-merge.F
References to this file elsewhere.
1 !WRF:MEDIATION_LAYER:SOLVER
2
3 SUBROUTINE solve_em ( grid , config_flags , &
4 ! Actual arguments generated from Registry
5 #include "em_dummy_new_args.inc"
6 !
7 )
8
9 ! Driver layer modules
10 USE module_domain
11 USE module_configure
12 USE module_driver_constants
13 USE module_machine
14 USE module_tiles
15 USE module_dm
16 ! Mediation layer modules
17 ! Model layer modules
18 USE module_model_constants
19 USE module_small_step_em
20 USE module_em
21 USE module_big_step_utilities_em
22 USE module_bc
23 USE module_bc_em
24 USE module_solvedebug_em
25 USE module_physics_addtendc
26 USE module_diffusion_em
27 ! Registry generated module
28 USE module_state_description
29 USE module_radiation_driver
30 USE module_surface_driver
31 USE module_cumulus_driver
32 USE module_microphysics_driver
33 USE module_microphysics_zero_out
34 USE module_pbl_driver
35 #ifdef WRF_CHEM
36 USE module_input_chem_data
37 USE module_chem_utilities
38 #endif
39 #ifdef WRF_PLUS
40 USE module_check
41 #endif
42
43 IMPLICIT NONE
44
45 ! Input data.
46
47 TYPE(domain) , TARGET :: grid
48
49 ! Definitions of dummy arguments to this routine (generated from Registry).
50 #include <em_dummy_new_decl.inc>
51 #include "namelist_defines2.inc"
52
53 ! Structure that contains run-time configuration (namelist) data for domain
54 TYPE (grid_config_rec_type) , INTENT(IN) :: config_flags
55
56 ! Local data
57
58 INTEGER :: k_start , k_end, its, ite, jts, jte
59 INTEGER :: ids , ide , jds , jde , kds , kde , &
60 ims , ime , jms , jme , kms , kme , &
61 ips , ipe , jps , jpe , kps , kpe
62 INTEGER :: ij , iteration
63 INTEGER :: im , num_3d_m , ic , num_3d_c , is , num_3d_s
64 INTEGER :: loop
65 INTEGER :: ijds, ijde
66 INTEGER :: sz
67
68 LOGICAL :: specified_bdy, channel_bdy
69
70
71 ! storage for tendencies and decoupled state (generated from Registry)
72 #include <em_i1_decl.inc>
73
74 INTEGER :: rc
75 INTEGER :: number_of_small_timesteps, rk_step
76 INTEGER :: klevel,ijm,ijp,i,j,k,size1,size2 ! for prints/plots only
77 INTEGER :: idum1, idum2, dynamics_option
78
79 INTEGER :: rk_order, iwmax, jwmax, kwmax
80 REAL :: dt_rk, dts_rk, dtm, wmax
81 INTEGER :: l,kte,kk
82
83
84 ! These are used if -DDEREF_KLUDGE is compiled
85 ! see http://www.mmm.ucar.edu/wrf/WG2/topics/deref_kludge.htm
86 INTEGER :: sm31 , em31 , sm32 , em32 , sm33 , em33
87 INTEGER :: sm31x , em31x , sm32x , em32x , sm33x , em33x
88 INTEGER :: sm31y , em31y , sm32y , em32y , sm33y , em33y
89
90 ! Define benchmarking timers if -DBENCH is compiled
91 #include <bench_solve_em_def.h>
92
93 !----------------------
94 ! Executable statements
95 !----------------------
96
97 ! Needed by some comm layers, grid%e.g. RSL. If needed, nmm_data_calls.inc is
98 ! generated from the registry. The definition of REGISTER_I1 allows
99 ! I1 data to be communicated in this routine if necessary.
100 #ifdef DM_PARALLEL
101 # define REGISTER_I1
102 # include <em_data_calls.inc>
103 #endif
104
105 !<DESCRIPTION>
106 !<pre>
107 ! solve_em is the main driver for advancing a grid a single timestep.
108 ! It is a mediation-layer routine -> DM and SM calls are made where
109 ! needed for parallel processing.
110 !
111 ! solve_em can integrate the equations using 2 time-integration methods
112 !
113 ! - 3rd order Runge-Kutta time integration (recommended)
114 !
115 ! - 2nd order Runge-Kutta time integration
116 !
117 ! The main sections of solve_em are
118 !
119 ! (1) Runge-Kutta (RK) loop
120 !
121 ! (2) Non-timesplit physics (i.grid%e., tendencies computed for updating
122 ! model state variables during the first RK sub-step (loop)
123 !
124 ! (3) Small (acoustic, sound) timestep loop - within the RK sub-steps
125 !
126 ! (4) scalar advance for moist and chem scalar variables (and TKE)
127 ! within the RK sub-steps.
128 !
129 ! (5) time-split physics (after the RK step), currently this includes
130 ! only microphyics
131 !
132 ! A more detailed description of these sections follows.
133 !</pre>
134 !</DESCRIPTION>
135
136 ! Initialize timers if compiled with -DBENCH
137 #include <bench_solve_em_init.h>
138
139 ! set runge-kutta solver (2nd or 3rd order)
140
141 dynamics_option = config_flags%rk_ord
142
143 ! Obtain dimension information stored in the grid data structure.
144 CALL get_ijk_from_grid ( grid , &
145 ids, ide, jds, jde, kds, kde, &
146 ims, ime, jms, jme, kms, kme, &
147 ips, ipe, jps, jpe, kps, kpe )
148
149 k_start = kps
150 k_end = kpe
151
152 ijds = min(ids, jds)
153 ijde = max(ide, jde)
154
155 num_3d_m = num_moist
156 num_3d_c = num_chem
157 num_3d_s = num_scalar
158
159 ! Compute these starting and stopping locations for each tile and number of tiles.
160 ! See: http://www.mmm.ucar.edu/wrf/WG2/topics/settiles
161 CALL set_tiles ( grid , ids , ide , jds , jde , ips , ipe , jps , jpe )
162 !print *,grid%julian,grid%julday,' grid%julian,grid%julday in solve'
163
164 grid%itimestep = grid%itimestep + 1
165
166 !**********************************************************************
167 !
168 ! LET US BEGIN.......
169 !
170 !<DESCRIPTION>
171 !<pre>
172 ! (1) RK integration loop is named the "Runge_Kutta_loop:"
173 !
174 ! Predictor-corrector type time integration.
175 ! Advection terms are evaluated at time t for the predictor step,
176 ! and advection is re-evaluated with the latest predicted value for
177 ! each succeeding time corrector step
178 !
179 ! 2nd order Runge Kutta (rk_order = 2):
180 ! Step 1 is taken to the midpoint predictor, step 2 is the full step.
181 !
182 ! 3rd order Runge Kutta (rk_order = 3):
183 ! Step 1 is taken to from t to dt/3, step 2 is from t to dt/2,
184 ! and step 3 is from t to dt.
185 !
186 ! non-timesplit physics are evaluated during first RK step and
187 ! these physics tendencies are stored for use in each RK pass.
188 !</pre>
189 !</DESCRIPTION>
190 !**********************************************************************
191
192 #ifdef WRF_CHEM
193 !
194 ! prepare chem aerosols for advection before communication
195 !
196
197 kte=min(k_end,kde-1)
198 # ifdef DM_PARALLEL
199 if ( num_chem >= PARAM_FIRST_SCALAR ) then
200 !-----------------------------------------------------------------------
201 ! see above
202 !--------------------------------------------------------------
203 CALL wrf_debug ( 200 , ' call HALO_RK_CHEM' )
204 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
205 # include "HALO_EM_CHEM_E_3.inc"
206 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
207 # include "HALO_EM_CHEM_E_5.inc"
208 ELSE
209 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
210 CALL wrf_error_fatal(TRIM(wrf_err_message))
211 ENDIF
212 endif
213 # endif
214 !--------------------------------------------------------------
215 #endif
216
217 rk_order = config_flags%rk_ord
218 IF (grid%time_step_sound == 0) THEN
219 ! auto-set option
220 ! This function will give 4 for 6*dx and 6 for 10*dx and returns even numbers only
221 grid%time_step_sound = max ( 2 * ( INT (300.*grid%dt/grid%dx-0.01) + 1 ), 4 )
222 WRITE(wrf_err_message,*)'dx, dt, time_step_sound=',grid%dx,grid%dt,grid%time_step_sound
223 CALL wrf_debug ( 50 , wrf_err_message )
224 ENDIF
225
226 grid%dts = grid%dt/float(grid%time_step_sound)
227
228 Runge_Kutta_loop: DO rk_step = 1, rk_order
229
230 ! Set the step size and number of small timesteps for
231 ! each part of the timestep
232
233 dtm = grid%dt
234 IF ( rk_order == 2 ) THEN ! 2nd order Runge-Kutta timestep
235
236 IF ( rk_step == 1) THEN
237 dt_rk = 0.5*grid%dt
238 dts_rk = grid%dts
239 number_of_small_timesteps = grid%time_step_sound/2
240 ELSE
241 dt_rk = grid%dt
242 dts_rk = grid%dts
243 number_of_small_timesteps = grid%time_step_sound
244 ENDIF
245
246 ELSE IF ( rk_order == 3 ) THEN ! third order Runge-Kutta
247
248 IF ( rk_step == 1) THEN
249 dt_rk = grid%dt/3.
250 dts_rk = dt_rk
251 number_of_small_timesteps = 1
252 ELSE IF (rk_step == 2) THEN
253 dt_rk = 0.5*grid%dt
254 dts_rk = grid%dts
255 number_of_small_timesteps = grid%time_step_sound/2
256 ELSE
257 dt_rk = grid%dt
258 dts_rk = grid%dts
259 number_of_small_timesteps = grid%time_step_sound
260 ENDIF
261
262 ELSE
263
264 write(wrf_err_message,*)' unknown solver, error exit for dynamics_option = ',dynamics_option
265 CALL wrf_error_fatal( wrf_err_message )
266
267 END IF
268
269 !
270 ! Time level t is in the *_2 variable in the first part
271 ! of the step, and in the *_1 variable after the predictor.
272 ! the latest predicted values are stored in the *_2 variables.
273 !
274 CALL wrf_debug ( 200 , ' call rk_step_prep ' )
275
276 BENCH_START(step_prep_tim)
277 !$OMP PARALLEL DO &
278 !$OMP PRIVATE ( ij )
279
280 DO ij = 1 , grid%num_tiles
281
282 CALL rk_step_prep ( config_flags, rk_step, &
283 grid%em_u_2, grid%em_v_2, grid%em_w_2, grid%em_t_2, grid%em_ph_2, grid%em_mu_2, &
284 moist, &
285 grid%em_ru, grid%em_rv, grid%em_rw, grid%em_ww, grid%em_php, grid%em_alt, grid%em_muu, grid%em_muv, &
286 grid%em_mub, grid%em_mut, grid%em_phb, grid%em_pb, grid%em_p, grid%em_al, grid%em_alb, &
287 cqu, cqv, cqw, &
288 grid%msfu, grid%msfv, grid%msft, &
289 grid%em_fnm, grid%em_fnp, grid%em_dnw, grid%rdx, grid%rdy, &
290 num_3d_m, &
291 ids, ide, jds, jde, kds, kde, &
292 ims, ime, jms, jme, kms, kme, &
293 grid%i_start(ij), grid%i_end(ij), &
294 grid%j_start(ij), grid%j_end(ij), &
295 k_start, k_end )
296
297 END DO
298 !$OMP END PARALLEL DO
299 BENCH_END(step_prep_tim)
300
301 #ifdef DM_PARALLEL
302 !-----------------------------------------------------------------------
303 ! Stencils for patch communications (WCS, 29 June 2001)
304 ! Note: the small size of this halo exchange reflects the
305 ! fact that we are carrying the uncoupled variables
306 ! as state variables in the mass coordinate model, as
307 ! opposed to the coupled variables as in the height
308 ! coordinate model.
309 !
310 ! * * * * *
311 ! * * * * * * * * *
312 ! * + * * + * * * + * *
313 ! * * * * * * * * *
314 ! * * * * *
315 !
316 ! 3D variables - note staggering! grid%em_ru(X), grid%em_rv(Y), grid%em_ww(grid%em_z), grid%em_php(grid%em_z)
317 !
318 !j grid%em_ru x
319 !j grid%em_rv x
320 !j grid%em_ww x
321 !j grid%em_php x
322 !j grid%em_alt x
323 !j grid%em_ph_2 x
324 !j grid%em_phb x
325 !
326 ! the following are 2D (xy) variables
327 !
328 !j grid%em_muu x
329 !j grid%em_muv x
330 !j grid%em_mut x
331 !--------------------------------------------------------------
332 # include "HALO_EM_A.inc"
333 #endif
334
335 ! set boundary conditions on variables
336 ! from big_step_prep for use in big_step_proc
337
338 #ifdef DM_PARALLEL
339 # include "PERIOD_BDY_EM_A.inc"
340 #endif
341
342 ! CALL set_tiles ( grid , ids , ide , jds , jde , ips-1 , ipe+1 , jps-1 , jpe+1 )
343
344 BENCH_START(set_phys_bc_tim)
345 !$OMP PARALLEL DO &
346 !$OMP PRIVATE ( ij )
347
348 DO ij = 1 , grid%num_tiles
349
350 CALL wrf_debug ( 200 , ' call rk_phys_bc_dry_1' )
351
352 CALL rk_phys_bc_dry_1( config_flags, grid%em_ru, grid%em_rv, grid%em_rw, grid%em_ww, &
353 grid%em_muu, grid%em_muv, grid%em_mut, grid%em_php, grid%em_alt, grid%em_p, &
354 ids, ide, jds, jde, kds, kde, &
355 ims, ime, jms, jme, kms, kme, &
356 ips, ipe, jps, jpe, kps, kpe, &
357 grid%i_start(ij), grid%i_end(ij), &
358 grid%j_start(ij), grid%j_end(ij), &
359 k_start, k_end )
360 !TBH: need this 2nd timestep and later
361 CALL set_physical_bc3d( grid%em_al, 'grid%em_p', config_flags, &
362 ids, ide, jds, jde, kds, kde, &
363 ims, ime, jms, jme, kms, kme, &
364 ips, ipe, jps, jpe, kps, kpe, &
365 grid%i_start(ij), grid%i_end(ij), &
366 grid%j_start(ij), grid%j_end(ij), &
367 k_start , k_end )
368 CALL set_physical_bc3d( grid%em_ph_2, 'w', config_flags, &
369 ids, ide, jds, jde, kds, kde, &
370 ims, ime, jms, jme, kms, kme, &
371 ips, ipe, jps, jpe, kps, kpe, &
372 grid%i_start(ij), grid%i_end(ij), &
373 grid%j_start(ij), grid%j_end(ij), &
374 k_start, k_end )
375
376 END DO
377 !$OMP END PARALLEL DO
378 BENCH_END(set_phys_bc_tim)
379
380 rk_step_is_one : IF (rk_step == 1) THEN ! only need to initialize diffusion tendencies
381
382 ! initialize all tendencies to zero in order to update physics
383 ! tendencies first (separate from dry dynamics).
384
385 BENCH_START(init_zero_tend_tim)
386 !$OMP PARALLEL DO &
387 !$OMP PRIVATE ( ij )
388
389 DO ij = 1 , grid%num_tiles
390
391 CALL wrf_debug ( 200 , ' call init_zero_tendency' )
392 #ifdef WRF_PLUS
393 if(TEST_init_zero_tendency) THEN
394 CALL t_init_zero_tendency ( ru_tendf, rv_tendf, rw_tendf, &
395 ph_tendf, t_tendf, tke_tend, &
396 moist_tend,chem_tend,scalar_tend, &
397 num_3d_m,num_3d_c,num_3d_s,rk_step, &
398 ids, ide, jds, jde, kds, kde, &
399 ims, ime, jms, jme, kms, kme, &
400 grid%i_start(ij), grid%i_end(ij), &
401 grid%j_start(ij), grid%j_end(ij), &
402 k_start, k_end )
403 END IF
404 #endif
405 CALL init_zero_tendency ( ru_tendf, rv_tendf, rw_tendf, &
406 ph_tendf, t_tendf, tke_tend, &
407 moist_tend,chem_tend,scalar_tend, &
408 num_3d_m,num_3d_c,num_3d_s, &
409 rk_step, &
410 ids, ide, jds, jde, kds, kde, &
411 ims, ime, jms, jme, kms, kme, &
412 grid%i_start(ij), grid%i_end(ij), &
413 grid%j_start(ij), grid%j_end(ij), &
414 k_start, k_end )
415
416 END DO
417 !$OMP END PARALLEL DO
418 BENCH_END(init_zero_tend_tim)
419
420 #ifdef DM_PARALLEL
421 # include "HALO_EM_PHYS_A.inc"
422 #endif
423
424 !<DESCRIPTION>
425 !<pre>
426 !(2) The non-timesplit physics begins with a call to "phy_prep"
427 ! (which computes some diagnostic variables such as temperature,
428 ! pressure, u and v at grid%em_p points, etc). This is followed by
429 ! calls to the physics drivers:
430 !
431 ! radiation,
432 ! surface,
433 ! pbl,
434 ! cumulus,
435 ! 3D TKE and mixing.
436 !<pre>
437 !</DESCRIPTION>
438
439
440 BENCH_START(phy_prep_tim)
441 !$OMP PARALLEL DO &
442 !$OMP PRIVATE ( ij )
443 DO ij = 1 , grid%num_tiles
444
445 CALL wrf_debug ( 200 , ' call phy_prep' )
446 #ifdef WRF_PLUS
447 if(TEST_phy_prep) THEN
448 CALL t_phy_prep ( config_flags, &
449 grid%em_mut, grid%em_u_2, grid%em_v_2, grid%em_p, grid%em_pb, grid%em_alt, &
450 grid%em_ph_2, grid%em_phb, grid%em_t_2, grid%tsk, moist, num_3d_m, &
451 mu_3d, rho, &
452 th_phy, p_phy, pi_phy, u_phy, v_phy, &
453 p8w, t_phy, t8w, grid%em_z, z_at_w, &
454 dz8w, grid%em_fnm, grid%em_fnp, &
455 grid%rthraten, &
456 grid%rthblten, grid%rublten, grid%rvblten, &
457 grid%rqvblten, grid%rqcblten, grid%rqiblten, &
458 grid%rthcuten, grid%rqvcuten, grid%rqccuten, &
459 grid%rqrcuten, grid%rqicuten, grid%rqscuten, &
460 grid%rthften, grid%rqvften, &
461 ids, ide, jds, jde, kds, kde, &
462 ims, ime, jms, jme, kms, kme, &
463 grid%i_start(ij), grid%i_end(ij), &
464 grid%j_start(ij), grid%j_end(ij), &
465 k_start, k_end )
466 END IF
467 #endif
468 CALL phy_prep ( config_flags, &
469 grid%em_mut, grid%em_u_2, grid%em_v_2, grid%em_p, grid%em_pb, grid%em_alt, &
470 grid%em_ph_2, grid%em_phb, grid%em_t_2, grid%tsk, moist, num_3d_m, &
471 mu_3d, rho, &
472 th_phy, p_phy, pi_phy, u_phy, v_phy, &
473 p8w, t_phy, t8w, grid%em_z, z_at_w, &
474 dz8w, grid%em_fnm, grid%em_fnp, &
475 grid%rthraten, &
476 grid%rthblten, grid%rublten, grid%rvblten, &
477 grid%rqvblten, grid%rqcblten, grid%rqiblten, &
478 grid%rthcuten, grid%rqvcuten, grid%rqccuten, &
479 grid%rqrcuten, grid%rqicuten, grid%rqscuten, &
480 grid%rthften, grid%rqvften, &
481 ids, ide, jds, jde, kds, kde, &
482 ims, ime, jms, jme, kms, kme, &
483 grid%i_start(ij), grid%i_end(ij), &
484 grid%j_start(ij), grid%j_end(ij), &
485 k_start, k_end )
486 ENDDO
487 !$OMP END PARALLEL DO
488
489 BENCH_END(phy_prep_tim)
490
491 ! physics to implement
492
493 ! CALL set_tiles ( grid , ids , ide-1 , jds , jde-1 ips , ipe , jps , jpe )
494
495 ! Open MP loops are in physics drivers
496 ! radiation
497
498 CALL wrf_debug ( 200 , ' call radiation_driver' )
499 BENCH_START(rad_driver_tim)
500 CALL radiation_driver( &
501 & ACFRCV=grid%acfrcv ,ACFRST=grid%acfrst ,ALBEDO=grid%albedo &
502 & ,CFRACH=grid%cfrach ,CFRACL=grid%cfracl ,CFRACM=grid%cfracm &
503 & ,CUPPT=grid%cuppt ,CZMEAN=grid%czmean ,DT=grid%dt &
504 & ,DZ8W=dz8w ,EMISS=grid%emiss ,GLW=grid%glw &
505 & ,GMT=grid%gmt ,GSW=grid%gsw ,HBOT=grid%hbot &
506 & ,HTOP=grid%htop ,HBOTR=grid%hbotr, HTOPR=grid%htopr ,ICLOUD=config_flags%icloud &
507 & ,ITIMESTEP=grid%itimestep,JULDAY=grid%julday, JULIAN=grid%julian &
508 & ,JULYR=grid%julyr ,LW_PHYSICS=config_flags%ra_lw_physics &
509 & ,NCFRCV=grid%ncfrcv ,NCFRST=grid%ncfrst ,NPHS=1 &
510 & ,P8W=p8w ,P=p_phy ,PI=pi_phy &
511 & ,RADT=grid%radt ,RA_CALL_OFFSET=grid%ra_call_offset &
512 & ,RHO=rho ,RLWTOA=grid%rlwtoa &
513 & ,RSWTOA=grid%rswtoa ,RTHRATEN=grid%rthraten &
514 & ,RTHRATENLW=grid%rthratenlw &
515 & ,RTHRATENSW=grid%rthratensw ,SNOW=grid%snow &
516 & ,STEPRA=grid%stepra ,SWDOWN=grid%swdown ,SWDOWNC=grid%swdownc &
517 & ,SW_PHYSICS=config_flags%ra_sw_physics ,T8W=t8w &
518 & ,T=t_phy ,TAUCLDC=grid%taucldc ,TAUCLDI=grid%taucldi &
519 & ,TSK=grid%tsk ,VEGFRA=grid%vegfra ,WARM_RAIN=grid%warm_rain &
520 & ,XICE=grid%xice &
521 & ,XLAND=grid%xland ,XLAT=grid%xlat ,XLONG=grid%xlong &
522 & ,Z=grid%em_z &
523 & ,LEVSIZ=grid%levsiz, N_OZMIXM=num_ozmixm &
524 & ,N_AEROSOLC=num_aerosolc &
525 & ,PAERLEV=grid%paerlev &
526 & ,CAM_ABS_DIM1=grid%cam_abs_dim1, CAM_ABS_DIM2=grid%cam_abs_dim2 &
527 & ,CAM_ABS_FREQ_S=grid%cam_abs_freq_s &
528 & ,XTIME=grid%xtime &
529 ! indexes
530 & ,IDS=ids,IDE=ide, JDS=jds,JDE=jde, KDS=kds,KDE=kde &
531 & ,IMS=ims,IME=ime, JMS=jms,JME=jme, KMS=kms,KME=kme &
532 & ,i_start=grid%i_start,i_end=min(grid%i_end, ide-1) &
533 & ,j_start=grid%j_start,j_end=min(grid%j_end, jde-1) &
534 & ,kts=k_start, kte=min(k_end,kde-1) &
535 & ,num_tiles=grid%num_tiles &
536 ! Optional
537 & , CLDFRA=grid%cldfra &
538 & , PB=grid%em_pb &
539 & , F_ICE_PHY=grid%f_ice_phy,F_RAIN_PHY=grid%f_rain_phy &
540 & , QV=moist(ims,kms,jms,P_QV), F_QV=F_QV &
541 & , QC=moist(ims,kms,jms,P_QC), F_QC=F_QC &
542 & , QR=moist(ims,kms,jms,P_QR), F_QR=F_QR &
543 & , QI=moist(ims,kms,jms,P_QI), F_QI=F_QI &
544 & , QS=moist(ims,kms,jms,P_QS), F_QS=F_QS &
545 & , QG=moist(ims,kms,jms,P_QG), F_QG=F_QG &
546 #ifdef ACFLUX
547 & ,ACSWUPT=acswupt ,ACSWUPTC=acswuptc &
548 & ,ACSWDNT=acswdnt ,ACSWDNTC=acswdntc &
549 & ,ACSWUPB=acswupb ,ACSWUPBC=acswupbc &
550 & ,ACSWDNB=acswdnb ,ACSWDNBC=acswdnbc &
551 & ,ACLWUPT=aclwupt ,ACLWUPTC=aclwuptc &
552 & ,ACLWDNT=aclwdnt ,ACLWDNTC=aclwdntc &
553 & ,ACLWUPB=aclwupb ,ACLWUPBC=aclwupbc &
554 & ,ACLWDNB=aclwdnb ,ACLWDNBC=aclwdnbc &
555 & ,SWUPT=swupt ,SWUPTC=swuptc &
556 & ,SWDNT=swdnt ,SWDNTC=swdntc &
557 & ,SWUPB=swupb ,SWUPBC=swupbc &
558 & ,SWDNB=swdnb ,SWDNBC=swdnbc &
559 & ,LWUPT=lwupt ,LWUPTC=lwuptc &
560 & ,LWDNT=lwdnt ,LWDNTC=lwdntc &
561 & ,LWUPB=lwupb ,LWUPBC=lwupbc &
562 & ,LWDNB=lwdnb ,LWDNBC=lwdnbc &
563 #endif
564 & ,LWCF=grid%lwcf &
565 & ,SWCF=grid%swcf &
566 & ,OLR=grid%olr &
567 & ,OZMIXM=grid%ozmixm, PIN=grid%pin &
568 & ,M_PS_1=grid%m_ps_1, M_PS_2=grid%m_ps_2, AEROSOLC_1=grid%aerosolc_1 &
569 & ,AEROSOLC_2=grid%aerosolc_2, M_HYBI0=grid%m_hybi &
570 & ,ABSTOT=grid%abstot, ABSNXT=grid%absnxt, EMSTOT=grid%emstot &
571 #ifdef WRF_CHEM
572 & ,PM2_5_DRY=pm2_5_dry, PM2_5_WATER=pm2_5_water &
573 & ,PM2_5_DRY_EC=pm2_5_dry_ec &
574 & ,TAUAER300=tauaer1, TAUAER400=tauaer2, TAUAER600=tauaer3, TAUAER999=tauaer4 & ! jcb
575 & ,GAER300=gaer1, GAER400=gaer2, GAER600=gaer3, GAER999=gaer4 & ! jcb
576 & ,WAER300=waer1, WAER400=waer2, WAER600=waer3, WAER999=waer4 & ! jcb
577 #endif
578 & )
579
580 BENCH_END(rad_driver_tim)
581
582 !********* Surface driver
583 ! surface
584
585 BENCH_START(surf_driver_tim)
586
587 CALL wrf_debug ( 200 , ' call surface_driver' )
588 CALL surface_driver( &
589 & ACSNOM=grid%acsnom ,ACSNOW=grid%acsnow ,AKHS=grid%akhs &
590 & ,AKMS=grid%akms ,ALBBCK=grid%albbck ,ALBEDO=grid%albedo &
591 & ,BR=br ,CANWAT=grid%canwat ,CHKLOWQ=chklowq &
592 & ,CT=grid%ct ,DT=grid%dt ,DX=grid%dx &
593 & ,DZ8W=dz8w ,DZS=grid%dzs ,FLHC=grid%flhc &
594 & ,FLQC=grid%flqc ,GLW=grid%glw ,GRDFLX=grid%grdflx &
595 & ,GSW=grid%gsw ,GZ1OZ0=gz1oz0 ,HFX=grid%hfx &
596 & ,HT=grid%ht ,IFSNOW=config_flags%ifsnow ,ISFFLX=config_flags%isfflx &
597 & ,ISLTYP=grid%isltyp ,ITIMESTEP=grid%itimestep &
598 & ,IVGTYP=grid%ivgtyp ,LH=grid%lh ,LOWLYR=grid%lowlyr &
599 & ,MAVAIL=grid%mavail ,NUM_SOIL_LAYERS=config_flags%num_soil_layers &
600 & ,P8W=p8w ,PBLH=grid%pblh ,PI_PHY=pi_phy &
601 & ,PSFC=grid%psfc ,PSHLTR=pshltr ,PSIH=psih &
602 & ,PSIM=psim ,P_PHY=p_phy ,Q10=q10 &
603 & ,Q2=grid%q2 ,QFX=grid%qfx ,QSFC=grid%qsfc &
604 & ,QSHLTR=qshltr ,QZ0=grid%qz0 ,RAINCV=grid%raincv &
605 & ,RA_LW_PHYSICS=config_flags%ra_lw_physics ,RHO=rho &
606 & ,RMOL=grid%rmol ,SFCEVP=grid%sfcevp ,SFCEXC=grid%sfcexc &
607 & ,SFCRUNOFF=grid%sfcrunoff &
608 & ,SF_SFCLAY_PHYSICS=config_flags%sf_sfclay_physics &
609 & ,SF_SURFACE_PHYSICS=config_flags%sf_surface_physics ,SH2O=grid%sh2o &
610 & ,SHDMAX=grid%shdmax ,SHDMIN=grid%shdmin ,SMOIS=grid%smois &
611 & ,SMSTAV=grid%smstav ,SMSTOT=grid%smstot ,SNOALB=grid%snoalb &
612 & ,SNOW=grid%snow ,SNOWC=grid%snowc ,SNOWH=grid%snowh &
613 & ,SST=grid%sst ,SST_UPDATE=grid%sst_update &
614 & ,STEPBL=grid%stepbl ,TH10=th10 ,TH2=grid%th2 &
615 & ,THZ0=grid%thz0 ,TH_PHY=th_phy ,TKE_MYJ=grid%tke_myj &
616 & ,TMN=grid%tmn ,TSHLTR=tshltr ,TSK=grid%tsk &
617 & ,TSLB=grid%tslb ,T_PHY=t_phy ,U10=grid%u10 &
618 & ,UDRUNOFF=grid%udrunoff ,UST=grid%ust ,UZ0=grid%uz0 &
619 & ,U_FRAME=grid%u_frame ,U_PHY=u_phy ,V10=grid%v10 &
620 & ,VEGFRA=grid%vegfra ,VZ0=grid%vz0 ,V_FRAME=grid%v_frame &
621 & ,V_PHY=v_phy ,WARM_RAIN=grid%warm_rain &
622 & ,WSPD=wspd ,XICE=grid%xice ,XLAND=grid%xland &
623 & ,Z0=grid%z0 ,Z=grid%em_z ,ZNT=grid%znt &
624 & ,ZS=grid%zs &
625 ! Indexes
626 & ,IDS=ids,IDE=ide, JDS=jds,JDE=jde, KDS=kds,KDE=kde &
627 & ,IMS=ims,IME=ime, JMS=jms,JME=jme, KMS=kms,KME=kme &
628 & , I_START=grid%i_start,I_END=min(grid%i_end, ide-1) &
629 & , J_START=grid%j_start,J_END=min(grid%j_end, jde-1) &
630 & , KTS=k_start, KTE=min(k_end,kde-1) &
631 & , NUM_TILES=grid%num_tiles &
632 ! Optional
633 & ,QV_CURR=moist(ims,kms,jms,P_QV), F_QV=F_QV &
634 & ,QC_CURR=moist(ims,kms,jms,P_QC), F_QC=F_QC &
635 & ,QR_CURR=moist(ims,kms,jms,P_QR), F_QR=F_QR &
636 & ,QI_CURR=moist(ims,kms,jms,P_QI), F_QI=F_QI &
637 & ,QS_CURR=moist(ims,kms,jms,P_QS), F_QS=F_QS &
638 & ,QG_CURR=moist(ims,kms,jms,P_QG), F_QG=F_QG &
639 & ,CAPG=grid%capg, EMISS=grid%emiss, HOL=hol,MOL=grid%mol &
640 & ,RAINBL=grid%rainbl &
641 & ,RAINNCV=grid%rainncv,REGIME=regime,T2=grid%t2,THC=grid%thc &
642 & ,QSG=grid%qsg,QVG=grid%qvg,QCG=grid%qcg,SOILT1=grid%soilt1,TSNAV=grid%tsnav & ! ruc lsm
643 & ,SMFR3D=grid%smfr3d,KEEPFR3DFLAG=grid%keepfr3dflag & ! ruc lsm
644 & )
645 BENCH_END(surf_driver_tim)
646
647 !*********
648 ! pbl
649
650 CALL wrf_debug ( 200 , ' call pbl_driver' )
651 BENCH_START(pbl_driver_tim)
652 CALL pbl_driver( &
653 & AKHS=grid%akhs ,AKMS=grid%akms &
654 & ,BL_PBL_PHYSICS=config_flags%bl_pbl_physics &
655 & ,BR=br ,CHKLOWQ=chklowq ,CT=grid%ct &
656 & ,DT=grid%dt ,DX=grid%dx ,DZ8W=dz8w &
657 & ,EL_MYJ=grid%el_myj ,EXCH_H=grid%exch_h ,GRDFLX=grid%grdflx &
658 & ,GZ1OZ0=gz1oz0 ,HFX=grid%hfx ,HT=grid%ht &
659 & ,ITIMESTEP=grid%itimestep ,KPBL=grid%kpbl &
660 & ,LH=grid%lh ,LOWLYR=grid%lowlyr ,P8W=p8w &
661 & ,PBLH=grid%pblh ,PI_PHY=pi_phy ,PSIH=psih &
662 & ,PSIM=psim ,P_PHY=p_phy ,QFX=grid%qfx &
663 & ,QSFC=grid%qsfc ,QZ0=grid%qz0 &
664 & ,RA_LW_PHYSICS=config_flags%ra_lw_physics &
665 & ,RHO=rho ,RQCBLTEN=grid%rqcblten ,RQIBLTEN=grid%rqiblten &
666 & ,RQVBLTEN=grid%rqvblten ,RTHBLTEN=grid%rthblten ,RUBLTEN=grid%rublten &
667 & ,RVBLTEN=grid%rvblten ,SNOW=grid%snow ,STEPBL=grid%stepbl &
668 & ,THZ0=grid%thz0 ,TH_PHY=th_phy ,TKE_MYJ=grid%tke_myj &
669 & ,TSK=grid%tsk ,T_PHY=t_phy ,UST=grid%ust &
670 & ,UZ0=grid%uz0 ,U_FRAME=grid%u_frame ,U_PHY=u_phy &
671 & ,VZ0=grid%vz0 ,V_FRAME=grid%v_frame ,V_PHY=v_phy &
672 & ,WARM_RAIN=grid%warm_rain ,WSPD=wspd &
673 & ,XICE=grid%xice ,XLAND=grid%xland ,Z=grid%em_z &
674 & ,ZNT=grid%znt &
675 & ,IDS=ids,IDE=ide, JDS=jds,JDE=jde, KDS=kds,KDE=kde &
676 & ,IMS=ims,IME=ime, JMS=jms,JME=jme, KMS=kms,KME=kme &
677 & ,I_START=grid%i_start,I_END=min(grid%i_end, ide-1) &
678 & ,J_START=grid%j_start,J_END=min(grid%j_end, jde-1) &
679 & ,KTS=k_start, KTE=min(k_end,kde-1) &
680 & ,NUM_TILES=grid%num_tiles &
681 ! optional
682 & ,QV_CURR=moist(ims,kms,jms,P_QV), F_QV=F_QV &
683 & ,QC_CURR=moist(ims,kms,jms,P_QC), F_QC=F_QC &
684 & ,QR_CURR=moist(ims,kms,jms,P_QR), F_QR=F_QR &
685 & ,QI_CURR=moist(ims,kms,jms,P_QI), F_QI=F_QI &
686 & ,QS_CURR=moist(ims,kms,jms,P_QS), F_QS=F_QS &
687 & ,QG_CURR=moist(ims,kms,jms,P_QG), F_QG=F_QG &
688 & ,HOL=HOL, MOL=grid%mol, REGIME=REGIME &
689 & )
690
691 BENCH_END(pbl_driver_tim)
692
693 ! cumulus para.
694
695 CALL wrf_debug ( 200 , ' call cumulus_driver' )
696
697 BENCH_START(cu_driver_tim)
698 CALL cumulus_driver( &
699 ! Prognostic variables
700 & U=u_phy ,V=v_phy ,TH=th_phy ,T=t_phy &
701 & ,W=grid%em_w_2 ,P=p_phy ,PI=pi_phy ,RHO=rho &
702 ! Other arguments
703 & ,ITIMESTEP=grid%itimestep ,DT=grid%dt ,DX=grid%dx &
704 & ,RAINC=grid%rainc ,RAINCV=grid%raincv ,NCA=grid%nca &
705 & ,HTOP=grid%cutop ,HBOT=grid%cubot ,KPBL=grid%kpbl &
706 & ,DZ8W=dz8w ,P8W=p8w &
707 & ,W0AVG=grid%w0avg ,STEPCU=grid%stepcu &
708 & ,CLDEFI=grid%cldefi ,LOWLYR=grid%lowlyr ,XLAND=grid%xland &
709 & ,APR_GR=grid%apr_gr ,APR_W=grid%apr_w ,APR_MC=grid%apr_mc &
710 & ,APR_ST=grid%apr_st ,APR_AS=grid%apr_as ,APR_CAPMA=grid%apr_capma &
711 & ,APR_CAPME=grid%apr_capme ,APR_CAPMI=grid%apr_capmi &
712 & ,MASS_FLUX=grid%mass_flux ,XF_ENS=grid%xf_ens &
713 & ,PR_ENS=grid%pr_ens ,HT=grid%ht &
714 & ,ENSDIM=config_flags%ensdim ,MAXIENS=config_flags%maxiens ,MAXENS=config_flags%maxens &
715 & ,MAXENS2=config_flags%maxens2 ,MAXENS3=config_flags%maxens3 &
716 & ,CU_ACT_FLAG=cu_act_flag ,WARM_RAIN=grid%warm_rain &
717 ! Selection flag
718 & ,CU_PHYSICS=config_flags%cu_physics &
719 ! Dimension arguments
720 & ,IDS=ids,IDE=ide, JDS=jds,JDE=jde, KDS=kds,KDE=kde &
721 & ,IMS=ims,IME=ime, JMS=jms,JME=jme, KMS=kms,KME=kme &
722 & ,I_START=grid%i_start,I_END=min(grid%i_end, ide-1) &
723 & ,J_START=grid%j_start,J_END=min(grid%j_end, jde-1) &
724 & ,KTS=k_start, KTE=min(k_end,kde-1) &
725 & ,NUM_TILES=grid%num_tiles &
726 ! Moisture tendency arguments
727 & ,RQVCUTEN=grid%rqvcuten , RQCCUTEN=grid%rqccuten &
728 & ,RQSCUTEN=grid%rqscuten , RQICUTEN=grid%rqicuten &
729 & ,RQRCUTEN=grid%rqrcuten , RQVBLTEN=grid%rqvblten &
730 & ,RQVFTEN=grid%rqvften &
731 ! Other tendency arguments
732 & ,RTHRATEN=grid%rthraten , RTHBLTEN=grid%rthblten &
733 & ,RTHCUTEN=grid%rthcuten , RTHFTEN=grid%rthften &
734 ! Moisture tracer arguments
735 & ,QV_CURR=moist(ims,kms,jms,P_QV), F_QV=F_QV &
736 & ,QC_CURR=moist(ims,kms,jms,P_QC), F_QC=F_QC &
737 & ,QR_CURR=moist(ims,kms,jms,P_QR), F_QR=F_QR &
738 & ,QI_CURR=moist(ims,kms,jms,P_QI), F_QI=F_QI &
739 & ,QS_CURR=moist(ims,kms,jms,P_QS), F_QS=F_QS &
740 & ,QG_CURR=moist(ims,kms,jms,P_QG), F_QG=F_QG &
741 & )
742 BENCH_END(cu_driver_tim)
743
744 ! calculate_phy_tend
745
746 BENCH_START(cal_phy_tend)
747 !$OMP PARALLEL DO &
748 !$OMP PRIVATE ( ij )
749
750 DO ij = 1 , grid%num_tiles
751
752 CALL wrf_debug ( 200 , ' call calculate_phy_tend' )
753 CALL calculate_phy_tend (config_flags,grid%em_mut,pi_phy, &
754 grid%rthraten, &
755 grid%rublten,grid%rvblten,grid%rthblten, &
756 grid%rqvblten,grid%rqcblten,grid%rqiblten, &
757 grid%rthcuten,grid%rqvcuten,grid%rqccuten,grid%rqrcuten, &
758 grid%rqicuten,grid%rqscuten, &
759 ids,ide, jds,jde, kds,kde, &
760 ims,ime, jms,jme, kms,kme, &
761 grid%i_start(ij), min(grid%i_end(ij),ide-1), &
762 grid%j_start(ij), min(grid%j_end(ij),jde-1), &
763 k_start , min(k_end,kde-1) )
764
765 ENDDO
766 !$OMP END PARALLEL DO
767 BENCH_END(cal_phy_tend)
768
769 ! tke diffusion
770
771 IF(config_flags%diff_opt .eq. 2 .OR. config_flags%diff_opt .eq. 1) THEN
772
773 BENCH_START(comp_diff_metrics_tim)
774 !$OMP PARALLEL DO &
775 !$OMP PRIVATE ( ij )
776
777 DO ij = 1 , grid%num_tiles
778
779 CALL wrf_debug ( 200 , ' call compute_diff_metrics ' )
780 CALL compute_diff_metrics ( config_flags, grid%em_ph_2, grid%em_phb, grid%em_z, grid%em_rdz, grid%em_rdzw, &
781 grid%em_zx, grid%em_zy, grid%rdx, grid%rdy, &
782 ids, ide, jds, jde, kds, kde, &
783 ims, ime, jms, jme, kms, kme, &
784 grid%i_start(ij), grid%i_end(ij), &
785 grid%j_start(ij), grid%j_end(ij), &
786 k_start , k_end )
787 ENDDO
788 !$OMP END PARALLEL DO
789 BENCH_END(comp_diff_metrics_tim)
790
791 #ifdef DM_PARALLEL
792 # include "PERIOD_BDY_EM_A1.inc"
793 #endif
794
795 BENCH_START(tke_diff_bc_tim)
796 DO ij = 1 , grid%num_tiles
797
798 CALL wrf_debug ( 200 , ' call bc for diffusion_metrics ' )
799 CALL set_physical_bc3d( grid%em_rdzw , 'w', config_flags, &
800 ids, ide, jds, jde, kds, kde, &
801 ims, ime, jms, jme, kms, kme, &
802 ips, ipe, jps, jpe, kps, kpe, &
803 grid%i_start(ij), grid%i_end(ij), &
804 grid%j_start(ij), grid%j_end(ij), &
805 k_start , k_end )
806 CALL set_physical_bc3d( grid%em_rdz , 'w', config_flags, &
807 ids, ide, jds, jde, kds, kde, &
808 ims, ime, jms, jme, kms, kme, &
809 ips, ipe, jps, jpe, kps, kpe, &
810 grid%i_start(ij), grid%i_end(ij), &
811 grid%j_start(ij), grid%j_end(ij), &
812 k_start , k_end )
813 CALL set_physical_bc3d( grid%em_z , 'w', config_flags, &
814 ids, ide, jds, jde, kds, kde, &
815 ims, ime, jms, jme, kms, kme, &
816 ips, ipe, jps, jpe, kps, kpe, &
817 grid%i_start(ij), grid%i_end(ij), &
818 grid%j_start(ij), grid%j_end(ij), &
819 k_start , k_end )
820 CALL set_physical_bc3d( grid%em_zx , 'w', config_flags, &
821 ids, ide, jds, jde, kds, kde, &
822 ims, ime, jms, jme, kms, kme, &
823 ips, ipe, jps, jpe, kps, kpe, &
824 grid%i_start(ij), grid%i_end(ij), &
825 grid%j_start(ij), grid%j_end(ij), &
826 k_start , k_end )
827 CALL set_physical_bc3d( grid%em_zy , 'w', config_flags, &
828 ids, ide, jds, jde, kds, kde, &
829 ims, ime, jms, jme, kms, kme, &
830 ips, ipe, jps, jpe, kps, kpe, &
831 grid%i_start(ij), grid%i_end(ij), &
832 grid%j_start(ij), grid%j_end(ij), &
833 k_start , k_end )
834
835 ENDDO
836 BENCH_END(tke_diff_bc_tim)
837
838 #ifdef DM_PARALLEL
839 # include "HALO_EM_TKE_C.inc"
840 #endif
841
842 BENCH_START(deform_div_tim)
843
844 !$OMP PARALLEL DO &
845 !$OMP PRIVATE ( ij )
846
847 DO ij = 1 , grid%num_tiles
848
849 CALL wrf_debug ( 200 , ' call cal_deform_and_div' )
850 CALL cal_deform_and_div ( config_flags,grid%em_u_2,grid%em_v_2,grid%em_w_2,grid%div, &
851 grid%defor11,grid%defor22,grid%defor33,grid%defor12, &
852 grid%defor13,grid%defor23, &
853 grid%u_base, grid%v_base,grid%msfu,grid%msfv,grid%msft, &
854 grid%rdx, grid%rdy, grid%em_dn, grid%em_dnw, grid%em_rdz, grid%em_rdzw, &
855 grid%em_fnm,grid%em_fnp,grid%cf1,grid%cf2,grid%cf3,grid%em_zx,grid%em_zy, &
856 ids, ide, jds, jde, kds, kde, &
857 ims, ime, jms, jme, kms, kme, &
858 grid%i_start(ij), grid%i_end(ij), &
859 grid%j_start(ij), grid%j_end(ij), &
860 k_start , k_end )
861 ENDDO
862 !$OMP END PARALLEL DO
863 BENCH_END(deform_div_tim)
864
865
866 #ifdef DM_PARALLEL
867 # include "HALO_EM_TKE_D.inc"
868 #endif
869
870
871 ! calculate tke, kmh, and kmv
872
873 BENCH_START(calc_tke_tim)
874 !$OMP PARALLEL DO &
875 !$OMP PRIVATE ( ij )
876
877 DO ij = 1 , grid%num_tiles
878
879 CALL wrf_debug ( 200 , ' call calculate_km_kh' )
880 #ifdef WRF_PLUS
881 if(TEST_calculate_km_kh) THEN
882 CALL t_calculate_km_kh( config_flags,grid%dt,grid%dampcoef,grid%zdamp,config_flags%damp_opt, &
883 grid%xkmh,grid%xkmhd,grid%xkmv,grid%xkhh,grid%xkhv,grid%bn2, &
884 grid%khdif,grid%kvdif,grid%div, &
885 grid%defor11,grid%defor22,grid%defor33,grid%defor12, &
886 grid%defor13,grid%defor23, &
887 grid%em_tke_2,p8w,t8w,th_phy, &
888 t_phy,p_phy,moist,grid%em_dn,grid%em_dnw, &
889 grid%dx,grid%dy,grid%em_rdz,grid%em_rdzw,config_flags%mix_cr_len,num_3d_m, &
890 grid%cf1, grid%cf2, grid%cf3, grid%warm_rain, &
891 grid%kh_tke_upper_bound, grid%kv_tke_upper_bound, &
892 ids,ide, jds,jde, kds,kde, &
893 ims,ime, jms,jme, kms,kme, &
894 grid%i_start(ij), grid%i_end(ij), &
895 grid%j_start(ij), grid%j_end(ij), &
896 k_start , k_end )
897 END IF
898 #endif
899 CALL calculate_km_kh( config_flags,grid%dt,grid%dampcoef,grid%zdamp,config_flags%damp_opt, &
900 grid%xkmh,grid%xkmhd,grid%xkmv,grid%xkhh,grid%xkhv,grid%bn2, &
901 grid%khdif,grid%kvdif,grid%div, &
902 grid%defor11,grid%defor22,grid%defor33,grid%defor12, &
903 grid%defor13,grid%defor23, &
904 grid%em_tke_2,p8w,t8w,th_phy, &
905 t_phy,p_phy,moist,grid%em_dn,grid%em_dnw, &
906 grid%dx,grid%dy,grid%em_rdz,grid%em_rdzw,config_flags%mix_cr_len,num_3d_m, &
907 grid%cf1, grid%cf2, grid%cf3, grid%warm_rain, &
908 grid%kh_tke_upper_bound, grid%kv_tke_upper_bound, &
909 ids,ide, jds,jde, kds,kde, &
910 ims,ime, jms,jme, kms,kme, &
911 grid%i_start(ij), grid%i_end(ij), &
912 grid%j_start(ij), grid%j_end(ij), &
913 k_start , k_end )
914 ENDDO
915 !$OMP END PARALLEL DO
916 BENCH_END(calc_tke_tim)
917
918 #ifdef DM_PARALLEL
919 # include "HALO_EM_TKE_E.inc"
920 #endif
921
922 ENDIF
923
924 #ifdef DM_PARALLEL
925 # include "PERIOD_BDY_EM_PHY_BC.inc"
926 # include "PERIOD_BDY_EM_CHEM.inc"
927 #endif
928
929 BENCH_START(phy_bc_tim)
930 !$OMP PARALLEL DO &
931 !$OMP PRIVATE ( ij )
932
933 DO ij = 1 , grid%num_tiles
934
935 CALL wrf_debug ( 200 , ' call phy_bc' )
936 CALL phy_bc (config_flags,grid%div,grid%defor11,grid%defor22,grid%defor33, &
937 grid%defor12,grid%defor13,grid%defor23, &
938 grid%xkmh,grid%xkmhd,grid%xkmv,grid%xkhh,grid%xkhv, &
939 grid%em_tke_2, &
940 grid%rublten, grid%rvblten, &
941 ids, ide, jds, jde, kds, kde, &
942 ims, ime, jms, jme, kms, kme, &
943 ips, ipe, jps, jpe, kps, kpe, &
944 grid%i_start(ij), grid%i_end(ij), &
945 grid%j_start(ij), grid%j_end(ij), &
946 k_start , k_end )
947 ENDDO
948 !$OMP END PARALLEL DO
949 BENCH_END(phy_bc_tim)
950
951 #ifdef DM_PARALLEL
952 !-----------------------------------------------------------------------
953 !
954 ! MPP for some physics tendency, km, kh, deformation, and divergence
955 !
956 ! * *
957 ! * + * * + * +
958 ! * *
959 !
960 ! (for PBL)
961 ! grid%rublten x
962 ! grid%rvblten x
963 !
964 ! (for diff_opt >= 1)
965 ! grid%defor11 x
966 ! grid%defor22 x
967 ! grid%defor12 x
968 ! grid%defor13 x
969 ! grid%defor23 x
970 ! grid%div x
971 ! grid%xkmv x
972 ! grid%xkmh x
973 ! grid%xkmhd x
974 ! grid%xkhv x
975 ! grid%xkhh x
976 ! tke x
977 !
978 !-----------------------------------------------------------------------
979 IF ( config_flags%bl_pbl_physics .ge. 1 ) THEN
980 # include "HALO_EM_PHYS_PBL.inc"
981 ENDIF
982 IF ( config_flags%diff_opt .ge. 1 ) THEN
983 # include "HALO_EM_PHYS_DIFFUSION.inc"
984 ENDIF
985
986 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
987 # include "HALO_EM_TKE_3.inc"
988 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
989 # include "HALO_EM_TKE_5.inc"
990 ELSE
991 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
992 CALL wrf_error_fatal(TRIM(wrf_err_message))
993 ENDIF
994 #endif
995
996 BENCH_START(update_phy_ten_tim)
997 !$OMP PARALLEL DO &
998 !$OMP PRIVATE ( ij )
999
1000 DO ij = 1 , grid%num_tiles
1001
1002 CALL wrf_debug ( 200 , ' call update_phy_ten' )
1003 CALL update_phy_ten(t_tendf, ru_tendf, rv_tendf,moist_tend, &
1004 grid%rthraten,grid%rthblten,grid%rthcuten,grid%rublten,grid%rvblten, &
1005 grid%rqvblten,grid%rqcblten,grid%rqiblten, &
1006 grid%rqvcuten,grid%rqccuten,grid%rqrcuten,grid%rqicuten,grid%rqscuten, &
1007 num_3d_m,config_flags,rk_step, &
1008 ids, ide, jds, jde, kds, kde, &
1009 ims, ime, jms, jme, kms, kme, &
1010 grid%i_start(ij), grid%i_end(ij), &
1011 grid%j_start(ij), grid%j_end(ij), &
1012 k_start, k_end )
1013
1014 END DO
1015 !$OMP END PARALLEL DO
1016 BENCH_END(update_phy_ten_tim)
1017
1018 IF( config_flags%diff_opt .eq. 2 .and. config_flags%km_opt .eq. 2 ) THEN
1019
1020 BENCH_START(tke_rhs_tim)
1021 !$OMP PARALLEL DO &
1022 !$OMP PRIVATE ( ij )
1023
1024 DO ij = 1 , grid%num_tiles
1025
1026 CALL tke_rhs ( tke_tend,grid%bn2, &
1027 config_flags,grid%defor11,grid%defor22,grid%defor33, &
1028 grid%defor12,grid%defor13,grid%defor23,grid%em_u_2,grid%em_v_2,grid%em_w_2,grid%div, &
1029 grid%em_tke_2,grid%em_mut, &
1030 th_phy,p_phy,p8w,t8w,grid%em_z,grid%em_fnm,grid%em_fnp, &
1031 grid%cf1,grid%cf2,grid%cf3,grid%msft,grid%xkmh,grid%xkmv,grid%xkhv,grid%rdx,grid%rdy, &
1032 grid%dx,grid%dy,grid%dt,grid%em_zx,grid%em_zy,grid%em_rdz,grid%em_rdzw,grid%em_dn,grid%em_dnw,config_flags%mix_cr_len, &
1033 ids, ide, jds, jde, kds, kde, &
1034 ims, ime, jms, jme, kms, kme, &
1035 grid%i_start(ij), grid%i_end(ij), &
1036 grid%j_start(ij), grid%j_end(ij), &
1037 k_start , k_end )
1038
1039 ENDDO
1040 !$OMP END PARALLEL DO
1041 BENCH_END(tke_rhs_tim)
1042
1043 ENDIF
1044
1045 ! calculate vertical diffusion first and then horizontal
1046 ! (keep this order)
1047
1048 IF(config_flags%diff_opt .eq. 2) THEN
1049
1050 IF (config_flags%bl_pbl_physics .eq. 0) THEN
1051
1052 BENCH_START(vert_diff_tim)
1053 !$OMP PARALLEL DO &
1054 !$OMP PRIVATE ( ij )
1055 DO ij = 1 , grid%num_tiles
1056
1057 CALL wrf_debug ( 200 , ' call vertical_diffusion_2 ' )
1058 CALL vertical_diffusion_2( ru_tendf, rv_tendf, rw_tendf, &
1059 t_tendf, tke_tend, &
1060 moist_tend, num_3d_m, &
1061 chem_tend, num_3d_c, &
1062 scalar_tend, num_3d_s, &
1063 grid%em_u_2, grid%em_v_2, &
1064 grid%em_t_2,grid%u_base,grid%v_base,grid%em_t_base,grid%qv_base, &
1065 grid%em_mut,grid%em_tke_2,config_flags, &
1066 grid%defor13,grid%defor23,grid%defor33, &
1067 grid%div, moist, chem, scalar, &
1068 grid%xkmv, grid%xkhv, config_flags%km_opt, &
1069 grid%em_fnm, grid%em_fnp, grid%em_dn, grid%em_dnw, grid%em_rdz, grid%em_rdzw, &
1070 ids, ide, jds, jde, kds, kde, &
1071 ims, ime, jms, jme, kms, kme, &
1072 grid%i_start(ij), grid%i_end(ij), &
1073 grid%j_start(ij), grid%j_end(ij), &
1074 k_start , k_end )
1075
1076 ENDDO
1077 !$OMP END PARALLEL DO
1078 BENCH_END(vert_diff_tim)
1079
1080 ENDIF
1081 !
1082 BENCH_START(hor_diff_tim)
1083 !$OMP PARALLEL DO &
1084 !$OMP PRIVATE ( ij )
1085 DO ij = 1 , grid%num_tiles
1086
1087 CALL wrf_debug ( 200 , ' call horizontal_diffusion_2' )
1088 CALL horizontal_diffusion_2( t_tendf, ru_tendf, rv_tendf, rw_tendf, &
1089 tke_tend, &
1090 moist_tend, num_3d_m, &
1091 chem_tend, num_3d_c, &
1092 scalar_tend, num_3d_s, &
1093 grid%em_t_2, th_phy, &
1094 grid%em_mut, grid%em_tke_2, config_flags, &
1095 grid%defor11, grid%defor22, grid%defor12, &
1096 grid%defor13, grid%defor23, grid%div, &
1097 moist, chem, scalar, &
1098 grid%msfu, grid%msfv, grid%msft, grid%xkmhd, grid%xkhh, config_flags%km_opt, &
1099 grid%rdx, grid%rdy, grid%em_rdz, grid%em_rdzw, &
1100 grid%em_fnm, grid%em_fnp, grid%cf1, grid%cf2, grid%cf3, &
1101 grid%em_zx, grid%em_zy, grid%em_dn, grid%em_dnw, &
1102 ids, ide, jds, jde, kds, kde, &
1103 ims, ime, jms, jme, kms, kme, &
1104 grid%i_start(ij), grid%i_end(ij), &
1105 grid%j_start(ij), grid%j_end(ij), &
1106 k_start , k_end )
1107 ENDDO
1108 !$OMP END PARALLEL DO
1109 BENCH_END(hor_diff_tim)
1110
1111 ENDIF
1112
1113 END IF rk_step_is_one
1114
1115 BENCH_START(rk_tend_tim)
1116 !$OMP PARALLEL DO &
1117 !$OMP PRIVATE ( ij )
1118 DO ij = 1 , grid%num_tiles
1119
1120 CALL wrf_debug ( 200 , ' call rk_tendency' )
1121 #ifdef WRF_PLUS
1122 if(TEST_rk_tendency) THEN
1123 CALL t_rk_tendency ( config_flags, rk_step, &
1124 grid%em_ru_tend, grid%em_rv_tend, rw_tend, ph_tend, t_tend, &
1125 ru_tendf, rv_tendf, rw_tendf, ph_tendf, t_tendf, &
1126 mu_tend, grid%em_u_save, grid%em_v_save, w_save, ph_save, &
1127 grid%em_t_save, mu_save, grid%rthften, &
1128 grid%em_ru, grid%em_rv, grid%em_rw, grid%em_ww, &
1129 grid%em_u_2, grid%em_v_2, grid%em_w_2, grid%em_t_2, grid%em_ph_2, &
1130 grid%em_u_1, grid%em_v_1, grid%em_w_1, grid%em_t_1, grid%em_ph_1, &
1131 grid%h_diabatic, grid%em_phb, grid%em_t_init, &
1132 grid%em_mu_2, grid%em_mut, grid%em_muu, grid%em_muv, grid%em_mub, &
1133 grid%em_al, grid%em_alt, grid%em_p, grid%em_pb, grid%em_php, cqu, cqv, cqw, &
1134 grid%u_base, grid%v_base, grid%em_t_base, grid%qv_base, grid%z_base, &
1135 grid%msfu, grid%msfv, grid%msft, grid%f, grid%e, grid%sina, grid%cosa, &
1136 grid%em_fnm, grid%em_fnp, grid%em_rdn, grid%em_rdnw, &
1137 grid%dt, grid%rdx, grid%rdy, grid%khdif, grid%kvdif, grid%xkmhd, &
1138 grid%diff_6th_opt, grid%diff_6th_rate, &
1139 grid%dampcoef,grid%zdamp,config_flags%damp_opt, &
1140 grid%cf1, grid%cf2, grid%cf3, grid%cfn, grid%cfn1, num_3d_m, &
1141 config_flags%non_hydrostatic, &
1142 ids, ide, jds, jde, kds, kde, &
1143 ims, ime, jms, jme, kms, kme, &
1144 grid%i_start(ij), grid%i_end(ij), &
1145 grid%j_start(ij), grid%j_end(ij), &
1146 k_start, k_end )
1147 END IF
1148 #endif
1149 CALL rk_tendency ( config_flags, rk_step, &
1150 grid%em_ru_tend, grid%em_rv_tend, rw_tend, ph_tend, t_tend, &
1151 ru_tendf, rv_tendf, rw_tendf, ph_tendf, t_tendf, &
1152 mu_tend, grid%em_u_save, grid%em_v_save, w_save, ph_save, &
1153 grid%em_t_save, mu_save, grid%rthften, &
1154 grid%em_ru, grid%em_rv, grid%em_rw, grid%em_ww, &
1155 grid%em_u_2, grid%em_v_2, grid%em_w_2, grid%em_t_2, grid%em_ph_2, &
1156 grid%em_u_1, grid%em_v_1, grid%em_w_1, grid%em_t_1, grid%em_ph_1, &
1157 grid%h_diabatic, grid%em_phb, grid%em_t_init, &
1158 grid%em_mu_2, grid%em_mut, grid%em_muu, grid%em_muv, grid%em_mub, &
1159 grid%em_al, grid%em_alt, grid%em_p, grid%em_pb, grid%em_php, cqu, cqv, cqw, &
1160 grid%u_base, grid%v_base, grid%em_t_base, grid%qv_base, grid%z_base, &
1161 grid%msfu, grid%msfv, grid%msft, grid%f, grid%e, grid%sina, grid%cosa, &
1162 grid%em_fnm, grid%em_fnp, grid%em_rdn, grid%em_rdnw, &
1163 grid%dt, grid%rdx, grid%rdy, grid%khdif, grid%kvdif, grid%xkmhd, &
1164 grid%diff_6th_opt, grid%diff_6th_rate, &
1165 grid%dampcoef,grid%zdamp,config_flags%damp_opt, &
1166 grid%cf1, grid%cf2, grid%cf3, grid%cfn, grid%cfn1, num_3d_m, &
1167 config_flags%non_hydrostatic, &
1168 ids, ide, jds, jde, kds, kde, &
1169 ims, ime, jms, jme, kms, kme, &
1170 grid%i_start(ij), grid%i_end(ij), &
1171 grid%j_start(ij), grid%j_end(ij), &
1172 k_start, k_end )
1173
1174 #ifdef WRF_PLUS
1175 if(TEST_advect_scalar) THEN
1176 CALL t_advect_scalar ( grid%em_t_2, grid%em_t_2, grid%em_t_tend, grid%em_ru, grid%em_rv, grid%em_ww, &
1177 grid%mut, config_flags, &
1178 grid%em_msfu, grid%em_msfv, grid%em_msft, grid%em_fnm, grid%em_fnp, &
1179 grid%em_rdx, grid%em_rdy, grid%em_rdnw, &
1180 ids, ide, jds, jde, kds, kde, &
1181 ims, ime, jms, jme, kms, kme, &
1182 grid%i_start(ij), grid%i_end(ij), &
1183 grid%j_start(ij), grid%j_end(ij), &
1184 k_start, k_end )
1185 END IF
1186 #endif
1187 END DO
1188 !$OMP END PARALLEL DO
1189 BENCH_END(rk_tend_tim)
1190
1191 BENCH_START(relax_bdy_dry_tim)
1192 !$OMP PARALLEL DO &
1193 !$OMP PRIVATE ( ij )
1194 DO ij = 1 , grid%num_tiles
1195
1196 IF( (config_flags%specified .or. config_flags%nested) .and. rk_step == 1 ) THEN
1197
1198 #ifdef WRF_PLUS
1199 if(TEST_relax_bdy_dry) THEN
1200 CALL t_relax_bdy_dry ( config_flags, &
1201 grid%em_u_save, grid%em_v_save, ph_save, grid%em_t_save, &
1202 w_save, mu_tend, &
1203 grid%em_ru, grid%em_rv, grid%em_ph_2, grid%em_t_2, &
1204 grid%em_w_2, grid%em_mu_2, grid%em_mut, &
1205 grid%em_u_b, grid%em_v_b, grid%em_ph_b, grid%em_t_b, grid%em_w_b, &
1206 grid%em_mu_b, &
1207 grid%em_u_bt, grid%em_v_bt, grid%em_ph_bt, grid%em_t_bt, &
1208 grid%em_w_bt, grid%em_mu_bt, &
1209 config_flags%spec_bdy_width, grid%spec_zone, grid%relax_zone, &
1210 grid%dtbc, grid%fcx, grid%gcx, &
1211 ijds, ijde, &
1212 ids,ide, jds,jde, kds,kde, &
1213 ims,ime, jms,jme, kms,kme, &
1214 ips,ipe, jps,jpe, kps,kpe, &
1215 grid%i_start(ij), grid%i_end(ij), &
1216 grid%j_start(ij), grid%j_end(ij), &
1217 k_start, k_end )
1218 END IF
1219 #endif
1220
1221 CALL relax_bdy_dry ( config_flags, &
1222 grid%em_u_save, grid%em_v_save, ph_save, grid%em_t_save, &
1223 w_save, mu_tend, &
1224 grid%em_ru, grid%em_rv, grid%em_ph_2, grid%em_t_2, &
1225 grid%em_w_2, grid%em_mu_2, grid%em_mut, &
1226 grid%em_u_b, grid%em_v_b, grid%em_ph_b, grid%em_t_b, grid%em_w_b, &
1227 grid%em_mu_b, &
1228 grid%em_u_bt, grid%em_v_bt, grid%em_ph_bt, grid%em_t_bt, &
1229 grid%em_w_bt, grid%em_mu_bt, &
1230 config_flags%spec_bdy_width, grid%spec_zone, grid%relax_zone, &
1231 grid%dtbc, grid%fcx, grid%gcx, &
1232 ijds, ijde, &
1233 ids,ide, jds,jde, kds,kde, &
1234 ims,ime, jms,jme, kms,kme, &
1235 ips,ipe, jps,jpe, kps,kpe, &
1236 grid%i_start(ij), grid%i_end(ij), &
1237 grid%j_start(ij), grid%j_end(ij), &
1238 k_start, k_end )
1239
1240
1241 ENDIF
1242 #ifdef WRF_PLUS
1243 if(TEST_rk_addtend_dry) THEN
1244 CALL t_rk_addtend_dry( grid%em_ru_tend, grid%em_rv_tend, rw_tend, ph_tend, t_tend, &
1245 ru_tendf, rv_tendf, rw_tendf, ph_tendf, t_tendf, &
1246 grid%em_u_save, grid%em_v_save, w_save, ph_save, grid%em_t_save, rk_step,&
1247 grid%h_diabatic, grid%em_mut, grid%msft, grid%msfu, grid%msfv, &
1248 ids,ide, jds,jde, kds,kde, &
1249 ims,ime, jms,jme, kms,kme, &
1250 ips,ipe, jps,jpe, kps,kpe, &
1251 grid%i_start(ij), grid%i_end(ij), &
1252 grid%j_start(ij), grid%j_end(ij), &
1253 k_start, k_end )
1254 END IF
1255 #endif
1256
1257 CALL rk_addtend_dry( grid%em_ru_tend, grid%em_rv_tend, rw_tend, ph_tend, t_tend, &
1258 ru_tendf, rv_tendf, rw_tendf, ph_tendf, t_tendf, &
1259 grid%em_u_save, grid%em_v_save, w_save, ph_save, grid%em_t_save, rk_step,&
1260 grid%h_diabatic, grid%em_mut, grid%msft, grid%msfu, grid%msfv, &
1261 ids,ide, jds,jde, kds,kde, &
1262 ims,ime, jms,jme, kms,kme, &
1263 ips,ipe, jps,jpe, kps,kpe, &
1264 grid%i_start(ij), grid%i_end(ij), &
1265 grid%j_start(ij), grid%j_end(ij), &
1266 k_start, k_end )
1267
1268 IF( config_flags%specified .or. config_flags%nested ) THEN
1269 #ifdef WRFVAR_TESTING
1270 if(TEST_spec_bdy_dry) THEN
1271 CALL t_spec_bdy_dry ( config_flags, &
1272 grid%em_ru_tend, grid%em_rv_tend, ph_tend, t_tend, &
1273 rw_tend, mu_tend, &
1274 grid%em_u_b, grid%em_v_b, grid%em_ph_b, grid%em_t_b, &
1275 grid%em_w_b, grid%em_mu_b, &
1276 grid%em_u_bt, grid%em_v_bt, grid%em_ph_bt, grid%em_t_bt, &
1277 grid%em_w_bt, grid%em_mu_bt, &
1278 config_flags%spec_bdy_width, grid%spec_zone, &
1279 ijds, ijde, & ! min/max(id,jd)
1280 ids,ide, jds,jde, kds,kde, & ! domain dims
1281 ims,ime, jms,jme, kms,kme, & ! memory dims
1282 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
1283 grid%i_start(ij), grid%i_end(ij), &
1284 grid%j_start(ij), grid%j_end(ij), &
1285 k_start, k_end )
1286 END IF
1287 #endif
1288 CALL spec_bdy_dry ( config_flags, &
1289 grid%em_ru_tend, grid%em_rv_tend, ph_tend, t_tend, &
1290 rw_tend, mu_tend, &
1291 grid%em_u_b, grid%em_v_b, grid%em_ph_b, grid%em_t_b, &
1292 grid%em_w_b, grid%em_mu_b, &
1293 grid%em_u_bt, grid%em_v_bt, grid%em_ph_bt, grid%em_t_bt, &
1294 grid%em_w_bt, grid%em_mu_bt, &
1295 config_flags%spec_bdy_width, grid%spec_zone, &
1296 ijds, ijde, & ! min/max(id,jd)
1297 ids,ide, jds,jde, kds,kde, & ! domain dims
1298 ims,ime, jms,jme, kms,kme, & ! memory dims
1299 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
1300 grid%i_start(ij), grid%i_end(ij), &
1301 grid%j_start(ij), grid%j_end(ij), &
1302 k_start, k_end )
1303
1304 ENDIF
1305
1306 END DO
1307 !$OMP END PARALLEL DO
1308 BENCH_END(relax_bdy_dry_tim)
1309
1310 !<DESCRIPTION>
1311 !<pre>
1312 ! (3) Small (acoustic,sound) steps.
1313 !
1314 ! Several acoustic steps are taken each RK pass. A small step
1315 ! sequence begins with calculating perturbation variables
1316 ! and coupling them to the column dry-air-mass mu
1317 ! (call to small_step_prep). This is followed by computing
1318 ! coefficients for the vertically implicit part of the
1319 ! small timestep (call to calc_coef_w).
1320 !
1321 ! The small steps are taken
1322 ! in the named loop "small_steps:". In the small_steps loop, first
1323 ! the horizontal momentum (u and v) are advanced (call to advance_uv),
1324 ! next mu and theta are advanced (call to advance_mu_t) followed by
1325 ! advancing w and the geopotential (call to advance_w). Diagnostic
1326 ! values for pressure and inverse density are updated at the end of
1327 ! each small_step.
1328 !
1329 ! The small-step section ends with the change of the perturbation variables
1330 ! back to full variables (call to small_step_finish).
1331 !</pre>
1332 !</DESCRIPTION>
1333
1334 BENCH_START(small_step_prep_tim)
1335 !$OMP PARALLEL DO &
1336 !$OMP PRIVATE ( ij )
1337
1338 DO ij = 1 , grid%num_tiles
1339
1340 ! Calculate coefficients for the vertically implicit acoustic/gravity wave
1341 ! integration. We only need calculate these for the first pass through -
1342 ! the predictor step. They are reused as is for the corrector step.
1343 ! For third-order RK, we need to recompute these after the first
1344 ! predictor because we may have changed the small timestep -> grid%dts.
1345
1346 CALL wrf_debug ( 200 , ' call calc_coef_w' )
1347
1348 #ifdef WRFVAR_TESTING
1349 if(TEST_small_step_prep) THEN
1350 CALL t_small_step_prep( u_1,u_2,v_1,v_2,w_1,w_2, &
1351 t_1,t_2,ph_1,ph_2, &
1352 mub, mu_1, mu_2, &
1353 muu, muus, muv, muvs, &
1354 mut, muts, mudf, &
1355 u_save, v_save, w_save, &
1356 t_save, ph_save, mu_save, &
1357 ww, ww1, &
1358 dnw, c2a, pb, p, alt, &
1359 msfu, msfv, msft, &
1360 rk_step, &
1361 CALL small_step_prep( grid%em_u_1,grid%em_u_2,grid%em_v_1,grid%em_v_2,grid%em_w_1,grid%em_w_2, &
1362 grid%em_t_1,grid%em_t_2,grid%em_ph_1,grid%em_ph_2, &
1363 grid%em_mub, grid%em_mu_1, grid%em_mu_2, &
1364 grid%em_muu, muus, grid%em_muv, muvs, &
1365 grid%em_mut, grid%em_muts, grid%em_mudf, &
1366 grid%em_u_save, grid%em_v_save, w_save, &
1367 grid%em_t_save, ph_save, mu_save, &
1368 grid%em_ww, ww1, &
1369 grid%em_dnw, c2a, grid%em_pb, grid%em_p, grid%em_alt, &
1370 grid%msfu, grid%msfv, grid%msft, &
1371 rk_step, &
1372 ids, ide, jds, jde, kds, kde, &
1373 ims, ime, jms, jme, kms, kme, &
1374 grid%i_start(ij), grid%i_end(ij), &
1375 grid%j_start(ij), grid%j_end(ij), &
1376 k_start , k_end )
1377 END IF
1378 #endif
1379
1380 CALL small_step_prep( u_1,u_2,v_1,v_2,w_1,w_2, &
1381 t_1,t_2,ph_1,ph_2, &
1382 mub, mu_1, mu_2, &
1383 muu, muus, muv, muvs, &
1384 mut, muts, mudf, &
1385 u_save, v_save, w_save, &
1386 t_save, ph_save, mu_save, &
1387 ww, ww1, &
1388 dnw, c2a, pb, p, alt, &
1389 msfu, msfv, msft, &
1390 rk_step, &
1391 ids, ide, jds, jde, kds, kde, &
1392 ims, ime, jms, jme, kms, kme, &
1393 grid%i_start(ij), grid%i_end(ij), &
1394 grid%j_start(ij), grid%j_end(ij), &
1395 k_start , k_end )
1396 #ifdef WRFVAR_TESTING
1397 if(TEST_calc_p_rho) THEN
1398 CALL t_calc_p_rho( al, p, ph_2, &
1399 alt, t_2, t_save, c2a, pm1, &
1400 mu_2, muts, znu, t0, &
1401 rdnw, dnw, smdiv, &
1402 non_hydrostatic, 0, &
1403 ids, ide, jds, jde, kds, kde, &
1404 ims, ime, jms, jme, kms, kme, &
1405 grid%i_start(ij), grid%i_end(ij), &
1406 grid%j_start(ij), grid%j_end(ij), &
1407 k_start , k_end )
1408 END IF
1409 #endif
1410 CALL calc_p_rho( al, p, ph_2, &
1411 alt, t_2, t_save, c2a, pm1, &
1412 mu_2, muts, znu, t0, &
1413 rdnw, dnw, grid%smdiv, &
1414 CALL calc_p_rho( grid%em_al, grid%em_p, grid%em_ph_2, &
1415 grid%em_alt, grid%em_t_2, grid%em_t_save, c2a, pm1, &
1416 grid%em_mu_2, grid%em_muts, grid%em_znu, t0, &
1417 grid%em_rdnw, grid%em_dnw, grid%smdiv, &
1418 config_flags%non_hydrostatic, 0, &
1419 ids, ide, jds, jde, kds, kde, &
1420 ims, ime, jms, jme, kms, kme, &
1421 grid%i_start(ij), grid%i_end(ij), &
1422 grid%j_start(ij), grid%j_end(ij), &
1423 k_start , k_end )
1424
1425 IF (config_flags%non_hydrostatic) THEN
1426 #ifdef WRFVAR_TESTING
1427 if(TEST_calc_coef_w) THEN
1428 CALL t_calc_coef_w( a,alpha,gamma, &
1429 mut, cqw, &
1430 rdn, rdnw, c2a, &
1431 dts_rk, g, epssm, &
1432 ids, ide, jds, jde, kds, kde, &
1433 ims, ime, jms, jme, kms, kme, &
1434 grid%i_start(ij), grid%i_end(ij), &
1435 grid%j_start(ij), grid%j_end(ij), &
1436 k_start , k_end )
1437 END IF
1438 #endif
1439 IF (config_flags%non_hydrostatic) &
1440 CALL calc_coef_w( a,alpha,gamma, &
1441 grid%em_mut, cqw, &
1442 grid%em_rdn, grid%em_rdnw, c2a, &
1443 dts_rk, g, grid%epssm, &
1444 ids, ide, jds, jde, kds, kde, &
1445 ims, ime, jms, jme, kms, kme, &
1446 grid%i_start(ij), grid%i_end(ij), &
1447 grid%j_start(ij), grid%j_end(ij), &
1448 k_start , k_end )
1449
1450
1451 ENDDO
1452 !$OMP END PARALLEL DO
1453 BENCH_END(small_step_prep_tim)
1454
1455
1456 #ifdef DM_PARALLEL
1457 !-----------------------------------------------------------------------
1458 ! Stencils for patch communications (WCS, 29 June 2001)
1459 ! Note: the small size of this halo exchange reflects the
1460 ! fact that we are carrying the uncoupled variables
1461 ! as state variables in the mass coordinate model, as
1462 ! opposed to the coupled variables as in the height
1463 ! coordinate model.
1464 !
1465 ! * * * * *
1466 ! * * * * * * * * *
1467 ! * + * * + * * * + * *
1468 ! * * * * * * * * *
1469 ! * * * * *
1470 !
1471 ! 3D variables - note staggering! grid%em_ph_2(grid%em_z), grid%em_u_save(X), grid%em_v_save(Y)
1472 !
1473 !j grid%em_ph_2 x
1474 !j grid%em_al x
1475 !j grid%em_p x
1476 !j grid%em_t_1 x
1477 !j grid%em_t_save x
1478 !j grid%em_u_save x
1479 !j grid%em_v_save x
1480 !
1481 ! the following are 2D (xy) variables
1482 !
1483 !j grid%em_mu_1 x
1484 !j grid%em_mu_2 x
1485 !j grid%em_mudf x
1486 !--------------------------------------------------------------
1487 # include "HALO_EM_B.inc"
1488 # include "PERIOD_BDY_EM_B.inc"
1489 #endif
1490
1491 BENCH_START(set_phys_bc2_tim)
1492 !$OMP PARALLEL DO &
1493 !$OMP PRIVATE ( ij )
1494
1495 DO ij = 1 , grid%num_tiles
1496
1497 CALL set_physical_bc3d( grid%em_ru_tend, 'u', config_flags, &
1498 ids, ide, jds, jde, kds, kde, &
1499 ims, ime, jms, jme, kms, kme, &
1500 ips, ipe, jps, jpe, kps, kpe, &
1501 grid%i_start(ij), grid%i_end(ij), &
1502 grid%j_start(ij), grid%j_end(ij), &
1503 k_start , k_end )
1504
1505 CALL set_physical_bc3d( grid%em_rv_tend, 'v', config_flags, &
1506 ids, ide, jds, jde, kds, kde, &
1507 ims, ime, jms, jme, kms, kme, &
1508 ips, ipe, jps, jpe, kps, kpe, &
1509 grid%i_start(ij), grid%i_end(ij), &
1510 grid%j_start(ij), grid%j_end(ij), &
1511 k_start , k_end )
1512
1513 CALL set_physical_bc3d( grid%em_ph_2, 'w', config_flags, &
1514 ids, ide, jds, jde, kds, kde, &
1515 ims, ime, jms, jme, kms, kme, &
1516 ips, ipe, jps, jpe, kps, kpe, &
1517 grid%i_start(ij), grid%i_end(ij), &
1518 grid%j_start(ij), grid%j_end(ij), &
1519 k_start , k_end )
1520
1521 CALL set_physical_bc3d( grid%em_al, 'grid%em_p', config_flags, &
1522 ids, ide, jds, jde, kds, kde, &
1523 ims, ime, jms, jme, kms, kme, &
1524 ips, ipe, jps, jpe, kps, kpe, &
1525 grid%i_start(ij), grid%i_end(ij), &
1526 grid%j_start(ij), grid%j_end(ij), &
1527 k_start , k_end )
1528
1529 CALL set_physical_bc3d( grid%em_p, 'grid%em_p', config_flags, &
1530 ids, ide, jds, jde, kds, kde, &
1531 ims, ime, jms, jme, kms, kme, &
1532 ips, ipe, jps, jpe, kps, kpe, &
1533 grid%i_start(ij), grid%i_end(ij), &
1534 grid%j_start(ij), grid%j_end(ij), &
1535 k_start , k_end )
1536
1537 CALL set_physical_bc3d( grid%em_t_1, 'grid%em_p', config_flags, &
1538 ids, ide, jds, jde, kds, kde, &
1539 ims, ime, jms, jme, kms, kme, &
1540 ips, ipe, jps, jpe, kps, kpe, &
1541 grid%i_start(ij), grid%i_end(ij), &
1542 grid%j_start(ij), grid%j_end(ij), &
1543 k_start , k_end )
1544
1545 CALL set_physical_bc3d( grid%em_t_save, 't', config_flags, &
1546 ids, ide, jds, jde, kds, kde, &
1547 ims, ime, jms, jme, kms, kme, &
1548 ips, ipe, jps, jpe, kps, kpe, &
1549 grid%i_start(ij), grid%i_end(ij), &
1550 grid%j_start(ij), grid%j_end(ij), &
1551 k_start , k_end )
1552
1553 CALL set_physical_bc2d( grid%em_mu_1, 't', config_flags, &
1554 ids, ide, jds, jde, &
1555 ims, ime, jms, jme, &
1556 ips, ipe, jps, jpe, &
1557 grid%i_start(ij), grid%i_end(ij), &
1558 grid%j_start(ij), grid%j_end(ij) )
1559
1560 CALL set_physical_bc2d( grid%em_mu_2, 't', config_flags, &
1561 ids, ide, jds, jde, &
1562 ims, ime, jms, jme, &
1563 ips, ipe, jps, jpe, &
1564 grid%i_start(ij), grid%i_end(ij), &
1565 grid%j_start(ij), grid%j_end(ij) )
1566
1567 CALL set_physical_bc2d( grid%em_mudf, 't', config_flags, &
1568 ids, ide, jds, jde, &
1569 ims, ime, jms, jme, &
1570 ips, ipe, jps, jpe, &
1571 grid%i_start(ij), grid%i_end(ij), &
1572 grid%j_start(ij), grid%j_end(ij) )
1573
1574 END DO
1575 !$OMP END PARALLEL DO
1576 BENCH_END(set_phys_bc2_tim)
1577
1578 small_steps : DO iteration = 1 , number_of_small_timesteps
1579
1580 ! Boundary condition time (or communication time).
1581
1582 #ifdef DM_PARALLEL
1583 # include "PERIOD_BDY_EM_B.inc"
1584 #endif
1585
1586
1587 !$OMP PARALLEL DO &
1588 !$OMP PRIVATE ( ij )
1589
1590 DO ij = 1 , grid%num_tiles
1591
1592 BENCH_START(advance_uv_tim)
1593 #ifdef WRFVAR_TESTING
1594 if(TEST_advance_uv) THEN
1595 CALL t_advance_uv ( u_2, ru_tend, v_2, rv_tend, &
1596 p, pb, &
1597 ph_2, php, alt, al, mu_2, &
1598 muu, cqu, muv, cqv, mudf, &
1599 rdx, rdy, dts_rk, &
1600 cf1, cf2, cf3, fnm, fnp, &
1601 emdiv, &
1602 rdnw, config_flags,spec_zone, &
1603 non_hydrostatic, &
1604 ids, ide, jds, jde, kds, kde, &
1605 ims, ime, jms, jme, kms, kme, &
1606 grid%i_start(ij), grid%i_end(ij), &
1607 grid%j_start(ij), grid%j_end(ij), &
1608 k_start , k_end )
1609 END IF
1610 #endif
1611 CALL advance_uv ( u_2, ru_tend, v_2, rv_tend, &
1612 p, pb, &
1613 ph_2, php, alt, al, mu_2, &
1614 muu, cqu, muv, cqv, mudf, &
1615 rdx, rdy, dts_rk, &
1616 cf1, cf2, cf3, fnm, fnp, &
1617 CALL advance_uv ( grid%em_u_2, grid%em_ru_tend, grid%em_v_2, grid%em_rv_tend, &
1618 grid%em_p, grid%em_pb, &
1619 grid%em_ph_2, grid%em_php, grid%em_alt, grid%em_al, grid%em_mu_2, &
1620 grid%em_muu, cqu, grid%em_muv, cqv, grid%em_mudf, &
1621 grid%rdx, grid%rdy, dts_rk, &
1622 grid%cf1, grid%cf2, grid%cf3, grid%em_fnm, grid%em_fnp, &
1623 grid%emdiv, &
1624 grid%em_rdnw, config_flags,grid%spec_zone, &
1625 config_flags%non_hydrostatic, &
1626 ids, ide, jds, jde, kds, kde, &
1627 ims, ime, jms, jme, kms, kme, &
1628 grid%i_start(ij), grid%i_end(ij), &
1629 grid%j_start(ij), grid%j_end(ij), &
1630 k_start , k_end )
1631 BENCH_END(advance_uv_tim)
1632
1633 BENCH_START(spec_bdy_uv_tim)
1634 IF( config_flags%specified .or. config_flags%nested ) THEN
1635 #ifdef WRFVAR_TESTING
1636 if(TEST_spec_bdyupdate) THEN
1637 CALL t_spec_bdyupdate(u_2, ru_tend, dts_rk, &
1638 CALL spec_bdyupdate(grid%em_u_2, grid%em_ru_tend, dts_rk, &
1639 'u' , config_flags, &
1640 spec_zone, &
1641 ids,ide, jds,jde, kds,kde, & ! domain dims
1642 ims,ime, jms,jme, kms,kme, & ! memory dims
1643 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
1644 grid%i_start(ij), grid%i_end(ij), &
1645 grid%j_start(ij), grid%j_end(ij), &
1646 k_start , k_end )
1647 END IF
1648 #endif
1649 CALL spec_bdyupdate(u_2, ru_tend, dts_rk, &
1650 'u' , config_flags, &
1651 grid%spec_zone, &
1652 ids,ide, jds,jde, kds,kde, & ! domain dims
1653 ims,ime, jms,jme, kms,kme, & ! memory dims
1654 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
1655 grid%i_start(ij), grid%i_end(ij), &
1656 grid%j_start(ij), grid%j_end(ij), &
1657 k_start , k_end )
1658
1659 CALL spec_bdyupdate(grid%em_v_2, grid%em_rv_tend, dts_rk, &
1660 'v' , config_flags, &
1661 grid%spec_zone, &
1662 ids,ide, jds,jde, kds,kde, & ! domain dims
1663 ims,ime, jms,jme, kms,kme, & ! memory dims
1664 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
1665 grid%i_start(ij), grid%i_end(ij), &
1666 grid%j_start(ij), grid%j_end(ij), &
1667 k_start , k_end )
1668
1669 ENDIF
1670 BENCH_END(spec_bdy_uv_tim)
1671
1672 END DO
1673 !$OMP END PARALLEL DO
1674
1675 #ifdef DM_PARALLEL
1676 !
1677 ! Stencils for patch communications (WCS, 29 June 2001)
1678 !
1679 ! * *
1680 ! * + * * + * +
1681 ! * *
1682 !
1683 ! grid%em_u_2 x
1684 ! grid%em_v_2 x
1685 !
1686 # include "HALO_EM_C.inc"
1687 #endif
1688
1689 !$OMP PARALLEL DO &
1690 !$OMP PRIVATE ( ij )
1691
1692 DO ij = 1 , grid%num_tiles
1693
1694 ! advance the mass in the column, theta, and calculate grid%em_ww
1695
1696 #ifdef WRFVAR_TESTING
1697 if(TEST_advance_mu_t) THEN
1698 CALL t_advance_mu_t( ww, ww1, u_2, u_save, v_2, v_save, &
1699 mu_2, mut, muave, muts, muu, muv, &
1700 mudf, ru_m, rv_m, ww_m, &
1701 t_2, t_save, t_2save, t_tend, &
1702 mu_tend, &
1703 rdx, rdy, dts_rk, epssm, &
1704 dnw, fnm, fnp, rdnw, &
1705 msfu, msfv, msft, &
1706 iteration, config_flags, &
1707 ids, ide, jds, jde, kds, kde, &
1708 ims, ime, jms, jme, kms, kme, &
1709 grid%i_start(ij), grid%i_end(ij), &
1710 grid%j_start(ij), grid%j_end(ij), &
1711 k_start , k_end )
1712 END IF
1713 #endif
1714 BENCH_START(advance_mu_t_tim)
1715 CALL advance_mu_t( grid%em_ww, ww1, grid%em_u_2, grid%em_u_save, grid%em_v_2, grid%em_v_save, &
1716 grid%em_mu_2, grid%em_mut, muave, grid%em_muts, grid%em_muu, grid%em_muv, &
1717 grid%em_mudf, grid%em_ru_m, grid%em_rv_m, ww_m, &
1718 grid%em_t_2, grid%em_t_save, t_2save, t_tend, &
1719 mu_tend, &
1720 grid%rdx, grid%rdy, dts_rk, grid%epssm, &
1721 grid%em_dnw, grid%em_fnm, grid%em_fnp, grid%em_rdnw, &
1722 grid%msfu, grid%msfv, grid%msft, &
1723 iteration, config_flags, &
1724 ids, ide, jds, jde, kds, kde, &
1725 ims, ime, jms, jme, kms, kme, &
1726 grid%i_start(ij), grid%i_end(ij), &
1727 grid%j_start(ij), grid%j_end(ij), &
1728 k_start , k_end )
1729 BENCH_END(advance_mu_t_tim)
1730
1731 BENCH_START(spec_bdy_t_tim)
1732 IF( config_flags%specified .or. config_flags%nested ) THEN
1733
1734 CALL spec_bdyupdate(grid%em_t_2, t_tend, dts_rk, &
1735 't' , config_flags, &
1736 grid%spec_zone, &
1737 ids,ide, jds,jde, kds,kde, & ! domain dims
1738 ims,ime, jms,jme, kms,kme, & ! memory dims
1739 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
1740 grid%i_start(ij), grid%i_end(ij), &
1741 grid%j_start(ij), grid%j_end(ij), &
1742 k_start , k_end )
1743
1744 CALL spec_bdyupdate(grid%em_mu_2, mu_tend, dts_rk, &
1745 'm' , config_flags, &
1746 grid%spec_zone, &
1747 ids,ide, jds,jde, 1 ,1 , & ! domain dims
1748 ims,ime, jms,jme, 1 ,1 , & ! memory dims
1749 ips,ipe, jps,jpe, 1 ,1 , & ! patch dims
1750 grid%i_start(ij), grid%i_end(ij), &
1751 grid%j_start(ij), grid%j_end(ij), &
1752 1 , 1 )
1753
1754 CALL spec_bdyupdate(grid%em_muts, mu_tend, dts_rk, &
1755 'm' , config_flags, &
1756 grid%spec_zone, &
1757 ids,ide, jds,jde, 1 ,1 , & ! domain dims
1758 ims,ime, jms,jme, 1 ,1 , & ! memory dims
1759 ips,ipe, jps,jpe, 1 ,1 , & ! patch dims
1760 grid%i_start(ij), grid%i_end(ij), &
1761 grid%j_start(ij), grid%j_end(ij), &
1762 1 , 1 )
1763 ENDIF
1764 BENCH_END(spec_bdy_t_tim)
1765
1766 ! sumflux accumulates the time-averged mass flux
1767 ! (time averaged over the acoustic steps) for use
1768 ! in the scalar advection (flux divergence). Using
1769 ! time averaged values gives us exact scalar conservation.
1770
1771 #ifdef WRF_PLUS
1772 if(TEST_sumflux) THEN
1773 CALL t_sumflux ( grid%em_u_2, grid%em_v_2, grid%em_ww, &
1774 grid%em_u_save, grid%em_v_save, ww1, &
1775 grid%em_muu, grid%em_muv, &
1776 grid%em_ru_m, grid%em_rv_m, ww_m, grid%epssm, &
1777 grid%msfu, grid%msfv, &
1778 iteration, number_of_small_timesteps, &
1779 ids, ide, jds, jde, kds, kde, &
1780 ims, ime, jms, jme, kms, kme, &
1781 grid%i_start(ij), grid%i_end(ij), &
1782 grid%j_start(ij), grid%j_end(ij), &
1783 k_start , k_end )
1784 END IF
1785 #endif
1786 BENCH_START(sumflux_tim)
1787 CALL sumflux ( grid%em_u_2, grid%em_v_2, grid%em_ww, &
1788 grid%em_u_save, grid%em_v_save, ww1, &
1789 grid%em_muu, grid%em_muv, &
1790 grid%em_ru_m, grid%em_rv_m, ww_m, grid%epssm, &
1791 grid%msfu, grid%msfv, &
1792 iteration, number_of_small_timesteps, &
1793 ids, ide, jds, jde, kds, kde, &
1794 ims, ime, jms, jme, kms, kme, &
1795 grid%i_start(ij), grid%i_end(ij), &
1796 grid%j_start(ij), grid%j_end(ij), &
1797 k_start , k_end )
1798 BENCH_END(sumflux_tim)
1799
1800 ! small (acoustic) step for the vertical momentum,
1801 ! density and coupled potential temperature.
1802
1803
1804 BENCH_START(advance_w_tim)
1805 IF ( config_flags%non_hydrostatic ) THEN
1806 #ifdef WRF_PLUS
1807 if(TEST_advance_w) THEN
1808 CALL t_advance_w( w_2, rw_tend, ww, u_2, v_2, &
1809 mu_2, mut, muave, muts, &
1810 t_2save, t_2, t_save, &
1811 ph_2, ph_save, phb, ph_tend, &
1812 ht, c2a, cqw, alt, alb, &
1813 a, alpha, gamma, &
1814 rdx, rdy, dts_rk, t0, grid%epssm, &
1815 dnw, fnm, fnp, rdnw, rdn, &
1816 cf1, cf2, cf3, msft, &
1817 config_flags, &
1818 ids,ide, jds,jde, kds,kde, & ! domain dims
1819 ims,ime, jms,jme, kms,kme, & ! memory dims
1820 grid%i_start(ij), grid%i_end(ij), &
1821 grid%j_start(ij), grid%j_end(ij), &
1822 k_start , k_end )
1823 END IF
1824 #endif
1825 CALL advance_w( w_2, rw_tend, ww, u_2, v_2, &
1826 mu_2, mut, muave, muts, &
1827 t_2save, t_2, t_save, &
1828 ph_2, ph_save, phb, ph_tend, &
1829 ht, c2a, cqw, alt, alb, &
1830 a, alpha, gamma, &
1831 rdx, rdy, dts_rk, t0, grid%epssm, &
1832 dnw, fnm, fnp, rdnw, rdn, &
1833 cf1, cf2, cf3, msft, &
1834 config_flags, &
1835 ids,ide, jds,jde, kds,kde, & ! domain dims
1836 ims,ime, jms,jme, kms,kme, & ! memory dims
1837 grid%i_start(ij), grid%i_end(ij), &
1838 grid%j_start(ij), grid%j_end(ij), &
1839 k_start , k_end )
1840 ENDIF
1841 BENCH_END(advance_w_tim)
1842
1843 IF( config_flags%specified .or. config_flags%nested ) THEN
1844
1845 BENCH_START(spec_bdynhyd_tim)
1846 IF (config_flags%non_hydrostatic) THEN
1847 #ifdef WRF_PLUS
1848 if(TEST_spec_bdyupdate_ph) THEN
1849 CALL t_spec_bdyupdate_ph( ph_save, ph_2, ph_tend, mu_tend, muts, dts_rk, &
1850 'h' , config_flags, &
1851 grid%spec_zone, &
1852 ids,ide, jds,jde, kds,kde, & ! domain dims
1853 ims,ime, jms,jme, kms,kme, & ! memory dims
1854 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
1855 grid%i_start(ij), grid%i_end(ij), &
1856 grid%j_start(ij), grid%j_end(ij), &
1857 k_start , k_end )
1858 END IF
1859 #endif
1860 CALL spec_bdyupdate_ph( ph_save, ph_2, ph_tend, mu_tend, muts, dts_rk, &
1861 'h' , config_flags, &
1862 grid%spec_zone, &
1863 ids,ide, jds,jde, kds,kde, & ! domain dims
1864 ims,ime, jms,jme, kms,kme, & ! memory dims
1865 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
1866 grid%i_start(ij), grid%i_end(ij), &
1867 grid%j_start(ij), grid%j_end(ij), &
1868 k_start , k_end )
1869 IF( config_flags%specified ) THEN
1870 CALL zero_grad_bdy ( grid%em_w_2, &
1871 'w' , config_flags, &
1872 grid%spec_zone, &
1873 ids,ide, jds,jde, kds,kde, & ! domain dims
1874 ims,ime, jms,jme, kms,kme, & ! memory dims
1875 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
1876 grid%i_start(ij), grid%i_end(ij), &
1877 grid%j_start(ij), grid%j_end(ij), &
1878 k_start , k_end )
1879 ELSE
1880 CALL spec_bdyupdate ( grid%em_w_2, rw_tend, dts_rk, &
1881 'h' , config_flags, &
1882 grid%spec_zone, &
1883 ids,ide, jds,jde, kds,kde, & ! domain dims
1884 ims,ime, jms,jme, kms,kme, & ! memory dims
1885 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
1886 grid%i_start(ij), grid%i_end(ij), &
1887 grid%j_start(ij), grid%j_end(ij), &
1888 k_start , k_end )
1889 ENDIF
1890 ENDIF
1891 BENCH_END(spec_bdynhyd_tim)
1892 ENDIF
1893
1894 BENCH_START(cald_p_rho_tim)
1895 CALL calc_p_rho( grid%em_al, grid%em_p, grid%em_ph_2, &
1896 grid%em_alt, grid%em_t_2, grid%em_t_save, c2a, pm1, &
1897 grid%em_mu_2, grid%em_muts, grid%em_znu, t0, &
1898 grid%em_rdnw, grid%em_dnw, grid%smdiv, &
1899 config_flags%non_hydrostatic, iteration, &
1900 ids, ide, jds, jde, kds, kde, &
1901 ims, ime, jms, jme, kms, kme, &
1902 grid%i_start(ij), grid%i_end(ij), &
1903 grid%j_start(ij), grid%j_end(ij), &
1904 k_start , k_end )
1905 BENCH_END(cald_p_rho_tim)
1906
1907 ENDDO
1908 !$OMP END PARALLEL DO
1909
1910 #ifdef DM_PARALLEL
1911 !
1912 ! Stencils for patch communications (WCS, 29 June 2001)
1913 !
1914 ! * *
1915 ! * + * * + * +
1916 ! * *
1917 !
1918 ! grid%em_ph_2 x
1919 ! grid%em_al x
1920 ! grid%em_p x
1921 !
1922 ! 2D variables (x,y)
1923 !
1924 ! grid%em_mu_2 x
1925 ! grid%em_muts x
1926 ! grid%em_mudf x
1927
1928 # include "HALO_EM_C2.inc"
1929 # include "PERIOD_BDY_EM_B3.inc"
1930 #endif
1931
1932 BENCH_START(phys_bc_tim)
1933 !$OMP PARALLEL DO &
1934 !$OMP PRIVATE ( ij )
1935
1936 DO ij = 1 , grid%num_tiles
1937
1938 ! boundary condition set for next small timestep
1939
1940 CALL set_physical_bc3d( grid%em_ph_2, 'w', config_flags, &
1941 ids, ide, jds, jde, kds, kde, &
1942 ims, ime, jms, jme, kms, kme, &
1943 ips, ipe, jps, jpe, kps, kpe, &
1944 grid%i_start(ij), grid%i_end(ij), &
1945 grid%j_start(ij), grid%j_end(ij), &
1946 k_start , k_end )
1947
1948 CALL set_physical_bc3d( grid%em_al, 'grid%em_p', config_flags, &
1949 ids, ide, jds, jde, kds, kde, &
1950 ims, ime, jms, jme, kms, kme, &
1951 ips, ipe, jps, jpe, kps, kpe, &
1952 grid%i_start(ij), grid%i_end(ij), &
1953 grid%j_start(ij), grid%j_end(ij), &
1954 k_start , k_end )
1955
1956 CALL set_physical_bc3d( grid%em_p, 'grid%em_p', config_flags, &
1957 ids, ide, jds, jde, kds, kde, &
1958 ims, ime, jms, jme, kms, kme, &
1959 ips, ipe, jps, jpe, kps, kpe, &
1960 grid%i_start(ij), grid%i_end(ij), &
1961 grid%j_start(ij), grid%j_end(ij), &
1962 k_start , k_end )
1963
1964 CALL set_physical_bc2d( grid%em_muts, 't', config_flags, &
1965 ids, ide, jds, jde, &
1966 ims, ime, jms, jme, &
1967 ips, ipe, jps, jpe, &
1968 grid%i_start(ij), grid%i_end(ij), &
1969 grid%j_start(ij), grid%j_end(ij) )
1970
1971 CALL set_physical_bc2d( grid%em_mu_2, 't', config_flags, &
1972 ids, ide, jds, jde, &
1973 ims, ime, jms, jme, &
1974 ips, ipe, jps, jpe, &
1975 grid%i_start(ij), grid%i_end(ij), &
1976 grid%j_start(ij), grid%j_end(ij) )
1977
1978 CALL set_physical_bc2d( grid%em_mudf, 't', config_flags, &
1979 ids, ide, jds, jde, &
1980 ims, ime, jms, jme, &
1981 ips, ipe, jps, jpe, &
1982 grid%i_start(ij), grid%i_end(ij), &
1983 grid%j_start(ij), grid%j_end(ij) )
1984
1985 END DO
1986 !$OMP END PARALLEL DO
1987 BENCH_END(phys_bc_tim)
1988
1989 END DO small_steps
1990
1991 !$OMP PARALLEL DO &
1992 !$OMP PRIVATE ( ij )
1993
1994 DO ij = 1 , grid%num_tiles
1995
1996 CALL wrf_debug ( 200 , ' call rk_small_finish' )
1997
1998 ! change time-perturbation variables back to
1999 ! full perturbation variables.
2000 ! first get updated mu at u and v points
2001
2002 #ifdef WRF_PLUS
2003 if(TEST_calc_mu_uv_1) THEN
2004 CALL t_calc_mu_uv_1 ( config_flags, &
2005 grid%em_muts, muus, muvs, &
2006 ids, ide, jds, jde, kds, kde, &
2007 ims, ime, jms, jme, kms, kme, &
2008 grid%i_start(ij), grid%i_end(ij), &
2009 grid%j_start(ij), grid%j_end(ij), &
2010 k_start , k_end )
2011 END IF
2012 #endif
2013
2014
2015 BENCH_START(calc_mu_uv_tim)
2016 CALL calc_mu_uv_1 ( config_flags, &
2017 grid%em_muts, muus, muvs, &
2018 ids, ide, jds, jde, kds, kde, &
2019 ims, ime, jms, jme, kms, kme, &
2020 grid%i_start(ij), grid%i_end(ij), &
2021 grid%j_start(ij), grid%j_end(ij), &
2022 k_start , k_end )
2023 BENCH_END(calc_mu_uv_tim)
2024 #ifdef WRF_PLUS
2025 if(TEST_small_step_finish) THEN
2026 CALL t_small_step_finish( grid%em_u_2, grid%em_u_1, grid%em_v_2, grid%em_v_1, grid%em_w_2, grid%em_w_1, &
2027 grid%em_t_2, grid%em_t_1, grid%em_ph_2, grid%em_ph_1, grid%em_ww, ww1, &
2028 grid%em_mu_2, grid%em_mu_1, &
2029 grid%em_mut, grid%em_muts, grid%em_muu, muus, grid%em_muv, muvs, &
2030 grid%em_u_save, grid%em_v_save, w_save, &
2031 grid%em_t_save, ph_save, mu_save, &
2032 grid%msfu, grid%msfv, grid%msft, &
2033 grid%h_diabatic, &
2034 number_of_small_timesteps,dts_rk, &
2035 rk_step, rk_order, &
2036 ids, ide, jds, jde, kds, kde, &
2037 ims, ime, jms, jme, kms, kme, &
2038 grid%i_start(ij), grid%i_end(ij), &
2039 grid%j_start(ij), grid%j_end(ij), &
2040 k_start , k_end )
2041 END IF
2042 #endif
2043 BENCH_START(small_step_finish_tim)
2044 CALL small_step_finish( grid%em_u_2, grid%em_u_1, grid%em_v_2, grid%em_v_1, grid%em_w_2, grid%em_w_1, &
2045 grid%em_t_2, grid%em_t_1, grid%em_ph_2, grid%em_ph_1, grid%em_ww, ww1, &
2046 grid%em_mu_2, grid%em_mu_1, &
2047 grid%em_mut, grid%em_muts, grid%em_muu, muus, grid%em_muv, muvs, &
2048 grid%em_u_save, grid%em_v_save, w_save, &
2049 grid%em_t_save, ph_save, mu_save, &
2050 grid%msfu, grid%msfv, grid%msft, &
2051 grid%h_diabatic, &
2052 number_of_small_timesteps,dts_rk, &
2053 rk_step, rk_order, &
2054 ids, ide, jds, jde, kds, kde, &
2055 ims, ime, jms, jme, kms, kme, &
2056 grid%i_start(ij), grid%i_end(ij), &
2057 grid%j_start(ij), grid%j_end(ij), &
2058 k_start , k_end )
2059 BENCH_END(small_step_finish_tim)
2060
2061 END DO
2062 !$OMP END PARALLEL DO
2063
2064 #ifdef DM_PARALLEL
2065 !
2066 ! Stencils for patch communications (WCS, 29 June 2001)
2067 !
2068 !
2069 ! grid%em_ru_m x
2070 ! grid%em_rv_m x
2071 !
2072 !--------------------------------------------------------------
2073
2074 # include "HALO_EM_D.inc"
2075 #endif
2076
2077 !<DESCRIPTION>
2078 !<pre>
2079 ! (4) Still within the RK loop, the scalar variables are advanced.
2080 !
2081 ! For the moist and chem variables, each one is advanced
2082 ! individually, using named loops "moist_variable_loop:"
2083 ! and "chem_variable_loop:". Each RK substep begins by
2084 ! calculating the advective tendency, and, for the first RK step,
2085 ! 3D mixing (calling rk_scalar_tend) followed by an update
2086 ! of the scalar (calling rk_scalar_update).
2087 !</pre>
2088 !</DESCRIPTION>
2089
2090
2091 moist_scalar_advance: IF (num_3d_m >= PARAM_FIRST_SCALAR ) THEN
2092
2093 moist_variable_loop: do im = PARAM_FIRST_SCALAR, num_3d_m
2094
2095 if (grid%adv_moist_cond .or. (im==p_qv) ) then
2096
2097 !$OMP PARALLEL DO &
2098 !$OMP PRIVATE ( ij )
2099
2100 moist_tile_loop_1: DO ij = 1 , grid%num_tiles
2101
2102 CALL wrf_debug ( 200 , ' call rk_scalar_tend' )
2103
2104 #ifdef WRF_PLUS
2105 if(TEST_rk_scalar_tend) THEN
2106 CALL t_rk_scalar_tend ( im, im, config_flags, &
2107 rk_step, dt_rk, &
2108 grid%em_ru_m, grid%em_rv_m, ww_m, &
2109 grid%em_mut, grid%em_alt, &
2110 moist(ims,kms,jms,im), &
2111 moist_tend(ims,kms,jms,im), &
2112 advect_tend,grid%rqvften, &
2113 grid%qv_base, .true., grid%em_fnm, grid%em_fnp, &
2114 grid%msfu, grid%msfv, grid%msft, &
2115 grid%rdx, grid%rdy, grid%em_rdn, grid%em_rdnw, grid%khdif, &
2116 grid%kvdif, grid%xkmhd, &
2117 grid%diff_6th_opt, grid%diff_6th_rate, &
2118 ids, ide, jds, jde, kds, kde, &
2119 ims, ime, jms, jme, kms, kme, &
2120 grid%i_start(ij), grid%i_end(ij), &
2121 grid%j_start(ij), grid%j_end(ij), &
2122 k_start , k_end )
2123 END IF
2124 #endif
2125
2126 BENCH_START(rk_scalar_tend_tim)
2127 CALL rk_scalar_tend ( im, im, config_flags, &
2128 rk_step, dt_rk, &
2129 grid%em_ru_m, grid%em_rv_m, ww_m, &
2130 grid%em_mut, grid%em_alt, &
2131 moist(ims,kms,jms,im), &
2132 moist_tend(ims,kms,jms,im), &
2133 advect_tend,grid%rqvften, &
2134 grid%qv_base, .true., grid%em_fnm, grid%em_fnp, &
2135 grid%msfu, grid%msfv, grid%msft, &
2136 grid%rdx, grid%rdy, grid%em_rdn, grid%em_rdnw, grid%khdif, &
2137 grid%kvdif, grid%xkmhd, &
2138 grid%diff_6th_opt, grid%diff_6th_rate, &
2139 ids, ide, jds, jde, kds, kde, &
2140 ims, ime, jms, jme, kms, kme, &
2141 grid%i_start(ij), grid%i_end(ij), &
2142 grid%j_start(ij), grid%j_end(ij), &
2143 k_start , k_end )
2144
2145 BENCH_END(rk_scalar_tend_tim)
2146
2147 BENCH_START(rlx_bdy_scalar_tim)
2148 IF( ( config_flags%specified .or. config_flags%nested ) .and. rk_step == 1 ) THEN
2149 IF ( im .EQ. P_QV .OR. config_flags%nested ) THEN
2150 CALL relax_bdy_scalar ( moist_tend(ims,kms,jms,im), &
2151 moist(ims,kms,jms,im), grid%em_mut, &
2152 moist_b(1,1,1,1,im), &
2153 moist_bt(1,1,1,1,im), &
2154 config_flags%spec_bdy_width, grid%spec_zone, grid%relax_zone, &
2155 grid%dtbc, grid%fcx, grid%gcx, &
2156 config_flags, &
2157 ijds, ijde, & ! min/max(id,jd)
2158 ids,ide, jds,jde, kds,kde, & ! domain dims
2159 ims,ime, jms,jme, kms,kme, & ! memory dims
2160 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2161 grid%i_start(ij), grid%i_end(ij), &
2162 grid%j_start(ij), grid%j_end(ij), &
2163 k_start, k_end )
2164 #ifdef WRF_PLUS
2165 if(TEST_spec_bdy_scalar) THEN
2166 CALL t_spec_bdy_scalar ( moist_tend(ims,kms,jms,im), &
2167 moist_b(1,1,1,1,im), &
2168 moist_bt(1,1,1,1,im), &
2169 config_flags%spec_bdy_width, grid%spec_zone, &
2170 config_flags, &
2171 ijds, ijde, & ! min/max(id,jd)
2172 ids,ide, jds,jde, kds,kde, & ! domain dims
2173 ims,ime, jms,jme, kms,kme, & ! memory dims
2174 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2175 grid%i_start(ij), grid%i_end(ij), &
2176 grid%j_start(ij), grid%j_end(ij), &
2177 k_start, k_end )
2178 END IF
2179 #endif
2180
2181 CALL spec_bdy_scalar ( moist_tend(ims,kms,jms,im), &
2182 moist_b(1,1,1,1,im), &
2183 moist_bt(1,1,1,1,im), &
2184 config_flags%spec_bdy_width, grid%spec_zone, &
2185 config_flags, &
2186 ijds, ijde, & ! min/max(id,jd)
2187 ids,ide, jds,jde, kds,kde, & ! domain dims
2188 ims,ime, jms,jme, kms,kme, & ! memory dims
2189 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2190 grid%i_start(ij), grid%i_end(ij), &
2191 grid%j_start(ij), grid%j_end(ij), &
2192 k_start, k_end )
2193 ENDIF
2194 ENDIF
2195 BENCH_END(rlx_bdy_scalar_tim)
2196
2197 ENDDO moist_tile_loop_1
2198 !$OMP END PARALLEL DO
2199
2200 !$OMP PARALLEL DO &
2201 !$OMP PRIVATE ( ij )
2202
2203 moist_tile_loop_2: DO ij = 1 , grid%num_tiles
2204
2205 CALL wrf_debug ( 200 , ' call rk_update_scalar' )
2206
2207 #ifdef WRF_PLUS
2208 if(TEST_rk_update_scalar) THEN
2209 CALL t_rk_update_scalar( im, im, &
2210 moist_old(ims,kms,jms,im), &
2211 moist(ims,kms,jms,im), &
2212 moist_tend(ims,kms,jms,im), &
2213 advect_tend, grid%msft, &
2214 grid%em_mu_1, grid%em_mu_2, grid%em_mub, &
2215 rk_step, dt_rk, grid%spec_zone, &
2216 config_flags, &
2217 ids, ide, jds, jde, kds, kde, &
2218 ims, ime, jms, jme, kms, kme, &
2219 grid%i_start(ij), grid%i_end(ij), &
2220 grid%j_start(ij), grid%j_end(ij), &
2221 k_start , k_end )
2222 END IF
2223 #endif
2224 BENCH_START(update_scal_tim)
2225 CALL rk_update_scalar( im, im, &
2226 moist_old(ims,kms,jms,im), &
2227 moist(ims,kms,jms,im), &
2228 moist_tend(ims,kms,jms,im), &
2229 advect_tend, grid%msft, &
2230 grid%em_mu_1, grid%em_mu_2, grid%em_mub, &
2231 rk_step, dt_rk, grid%spec_zone, &
2232 config_flags, &
2233 ids, ide, jds, jde, kds, kde, &
2234 ims, ime, jms, jme, kms, kme, &
2235 grid%i_start(ij), grid%i_end(ij), &
2236 grid%j_start(ij), grid%j_end(ij), &
2237 k_start , k_end )
2238 BENCH_END(update_scal_tim)
2239
2240 BENCH_START(flow_depbdy_tim)
2241 IF( config_flags%specified ) THEN
2242 IF(im .ne. P_QV)THEN
2243 CALL flow_dep_bdy ( moist(ims,kms,jms,im), &
2244 grid%em_ru_m, grid%em_rv_m, config_flags, &
2245 grid%spec_zone, &
2246 ids,ide, jds,jde, kds,kde, & ! domain dims
2247 ims,ime, jms,jme, kms,kme, & ! memory dims
2248 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2249 grid%i_start(ij), grid%i_end(ij), &
2250 grid%j_start(ij), grid%j_end(ij), &
2251 k_start, k_end )
2252 ENDIF
2253 ENDIF
2254 BENCH_END(flow_depbdy_tim)
2255
2256 ENDDO moist_tile_loop_2
2257 !$OMP END PARALLEL DO
2258
2259 ENDIF !-- if (grid%adv_moist_cond .or. (im==p_qv) ) then
2260
2261 ENDDO moist_variable_loop
2262
2263 ENDIF moist_scalar_advance
2264
2265 BENCH_START(tke_adv_tim)
2266 TKE_advance: IF (config_flags%km_opt .eq. 2) then
2267
2268 #ifdef DM_PARALLEL
2269 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
2270 # include "HALO_EM_TKE_ADVECT_3.inc"
2271 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
2272 # include "HALO_EM_TKE_ADVECT_5.inc"
2273 ELSE
2274 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
2275 CALL wrf_error_fatal(TRIM(wrf_err_message))
2276 ENDIF
2277 #endif
2278
2279 !$OMP PARALLEL DO &
2280 !$OMP PRIVATE ( ij )
2281
2282 tke_tile_loop_1: DO ij = 1 , grid%num_tiles
2283
2284 CALL wrf_debug ( 200 , ' call rk_scalar_tend for tke' )
2285 CALL rk_scalar_tend ( 1, 1, config_flags, &
2286 rk_step, dt_rk, &
2287 grid%em_ru_m, grid%em_rv_m, ww_m, &
2288 grid%em_mut, grid%em_alt, &
2289 grid%em_tke_2, &
2290 tke_tend(ims,kms,jms), &
2291 advect_tend,grid%rqvften, &
2292 grid%qv_base, .false., grid%em_fnm, grid%em_fnp, &
2293 grid%msfu, grid%msfv, grid%msft, &
2294 grid%rdx, grid%rdy, grid%em_rdn, grid%em_rdnw, grid%khdif, &
2295 grid%kvdif, grid%xkmhd, &
2296 grid%diff_6th_opt, grid%diff_6th_rate, &
2297 ids, ide, jds, jde, kds, kde, &
2298 ims, ime, jms, jme, kms, kme, &
2299 grid%i_start(ij), grid%i_end(ij), &
2300 grid%j_start(ij), grid%j_end(ij), &
2301 k_start , k_end )
2302
2303 ENDDO tke_tile_loop_1
2304 !$OMP END PARALLEL DO
2305
2306 !$OMP PARALLEL DO &
2307 !$OMP PRIVATE ( ij )
2308
2309 tke_tile_loop_2: DO ij = 1 , grid%num_tiles
2310
2311 CALL wrf_debug ( 200 , ' call rk_update_scalar' )
2312 CALL rk_update_scalar( 1, 1, &
2313 grid%em_tke_1, &
2314 grid%em_tke_2, &
2315 tke_tend(ims,kms,jms), &
2316 advect_tend,grid%msft, &
2317 grid%em_mu_1, grid%em_mu_2, grid%em_mub, &
2318 rk_step, dt_rk, grid%spec_zone, &
2319 config_flags, &
2320 ids, ide, jds, jde, kds, kde, &
2321 ims, ime, jms, jme, kms, kme, &
2322 grid%i_start(ij), grid%i_end(ij), &
2323 grid%j_start(ij), grid%j_end(ij), &
2324 k_start , k_end )
2325
2326 ! bound the tke (greater than 0, less than tke_upper_bound)
2327
2328 CALL bound_tke( grid%em_tke_2, grid%tke_upper_bound, &
2329 ids, ide, jds, jde, kds, kde, &
2330 ims, ime, jms, jme, kms, kme, &
2331 grid%i_start(ij), grid%i_end(ij), &
2332 grid%j_start(ij), grid%j_end(ij), &
2333 k_start , k_end )
2334
2335 IF( config_flags%specified .or. config_flags%nested ) THEN
2336 CALL flow_dep_bdy ( grid%em_tke_2, &
2337 grid%em_ru_m, grid%em_rv_m, config_flags, &
2338 grid%spec_zone, &
2339 ids,ide, jds,jde, kds,kde, & ! domain dims
2340 ims,ime, jms,jme, kms,kme, & ! memory dims
2341 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2342 grid%i_start(ij), grid%i_end(ij), &
2343 grid%j_start(ij), grid%j_end(ij), &
2344 k_start, k_end )
2345 ENDIF
2346 ENDDO tke_tile_loop_2
2347 !$OMP END PARALLEL DO
2348
2349 END IF TKE_advance
2350 BENCH_END(tke_adv_tim)
2351
2352 #ifdef WRF_CHEM
2353 ! next the chemical species
2354 BENCH_START(chem_adv_tim)
2355 chem_scalar_advance: IF (num_3d_c >= PARAM_FIRST_SCALAR) THEN
2356
2357 chem_variable_loop: do ic = PARAM_FIRST_SCALAR, num_3d_c
2358
2359 !$OMP PARALLEL DO &
2360 !$OMP PRIVATE ( ij )
2361
2362 chem_tile_loop_1: DO ij = 1 , grid%num_tiles
2363
2364 CALL wrf_debug ( 200 , ' call rk_scalar_tend in chem_tile_loop_1' )
2365 CALL rk_scalar_tend ( ic, ic, config_flags, &
2366 rk_step, dt_rk, &
2367 grid%em_ru_m, grid%em_rv_m, ww_m, &
2368 grid%em_mut, grid%em_alt, &
2369 chem(ims,kms,jms,ic), &
2370 chem_tend(ims,kms,jms,ic), &
2371 advect_tend,grid%rqvften, &
2372 grid%qv_base, .false., grid%em_fnm, grid%em_fnp, &
2373 grid%msfu, grid%msfv, grid%msft, &
2374 grid%rdx, grid%rdy, grid%em_rdn, grid%em_rdnw, &
2375 grid%khdif, grid%kvdif, grid%xkmhd, &
2376 grid%diff_6th_opt, grid%diff_6th_rate, &
2377 ids, ide, jds, jde, kds, kde, &
2378 ims, ime, jms, jme, kms, kme, &
2379 grid%i_start(ij), grid%i_end(ij), &
2380 grid%j_start(ij), grid%j_end(ij), &
2381 k_start , k_end )
2382 !
2383 ! Currently, chemistry species with specified boundaries (i.grid%e. the mother
2384 ! domain) are being over written by flow_dep_bdy_chem. So, relax_bdy and
2385 ! spec_bdy are only called for nests. This will need to be modfied for
2386 ! cycling of chem data between runs or if certain species are sent in
2387 ! from a GCM. William.Gustafson@pnl.gov
2388 !
2389 IF( ( config_flags%nested ) .and. rk_step == 1 ) THEN
2390 CALL relax_bdy_scalar ( chem_tend(ims,kms,jms,ic), &
2391 chem(ims,kms,jms,ic), grid%em_mut, &
2392 chem_b(1,1,1,1,ic), &
2393 chem_bt(1,1,1,1,ic), &
2394 config_flags%spec_bdy_width, grid%spec_zone, grid%relax_zone, &
2395 grid%dtbc, grid%fcx, grid%gcx, &
2396 config_flags, &
2397 ijds, ijde, & ! min/max(id,jd)
2398 ids,ide, jds,jde, kds,kde, & ! domain dims
2399 ims,ime, jms,jme, kms,kme, & ! memory dims
2400 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2401 grid%i_start(ij), grid%i_end(ij), &
2402 grid%j_start(ij), grid%j_end(ij), &
2403 k_start, k_end )
2404 CALL spec_bdy_scalar ( chem_tend(ims,kms,jms,ic), &
2405 chem_b(1,1,1,1,ic), &
2406 chem_bt(1,1,1,1,ic), &
2407 config_flags%spec_bdy_width, grid%spec_zone, &
2408 config_flags, &
2409 ijds, ijde, & ! min/max(id,jd)
2410 ids,ide, jds,jde, kds,kde, & ! domain dims
2411 ims,ime, jms,jme, kms,kme, & ! memory dims
2412 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2413 grid%i_start(ij), grid%i_end(ij), &
2414 grid%j_start(ij), grid%j_end(ij), &
2415 k_start, k_end )
2416 ENDIF
2417
2418 ENDDO chem_tile_loop_1
2419
2420 !$OMP END PARALLEL DO
2421
2422
2423 !$OMP PARALLEL DO &
2424 !$OMP PRIVATE ( ij )
2425
2426 chem_tile_loop_2: DO ij = 1 , grid%num_tiles
2427
2428 CALL wrf_debug ( 200 , ' call rk_update_scalar' )
2429 CALL rk_update_scalar( ic, ic, &
2430 chem_old(ims,kms,jms,ic), & ! was chem_1
2431 chem(ims,kms,jms,ic), &
2432 chem_tend(ims,kms,jms,ic), &
2433 advect_tend, grid%msft, &
2434 grid%em_mu_1, grid%em_mu_2, grid%em_mub, &
2435 rk_step, dt_rk, grid%spec_zone, &
2436 config_flags, &
2437 ids, ide, jds, jde, kds, kde, &
2438 ims, ime, jms, jme, kms, kme, &
2439 grid%i_start(ij), grid%i_end(ij), &
2440 grid%j_start(ij), grid%j_end(ij), &
2441 k_start , k_end )
2442
2443
2444 IF( config_flags%specified ) THEN
2445 ! come back to this and figure out why two different routines are needed. JM 20041203
2446 !#ifndef WRF_CHEM
2447 !!$ CALL flow_dep_bdy ( chem(ims,kms,jms,ic), &
2448 !!$ grid%em_ru_m, grid%em_rv_m, config_flags, &
2449 !!$ grid%spec_zone, &
2450 !!$ ids,ide, jds,jde, kds,kde, & ! domain dims
2451 !!$ ims,ime, jms,jme, kms,kme, & ! memory dims
2452 !!$ ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2453 !!$ grid%i_start(ij), grid%i_end(ij), &
2454 !!$ grid%j_start(ij), grid%j_end(ij), &
2455 !!$ k_start, k_end )
2456 !#else
2457 CALL flow_dep_bdy_chem( chem(ims,kms,jms,ic),grid%em_z, &
2458 grid%em_ru_m, grid%em_rv_m, config_flags,grid%em_alt, &
2459 grid%em_t_1,grid%em_pb,grid%em_p,t0,p1000mb,rcp,grid%em_ph_2,grid%em_phb,g, &
2460 grid%spec_zone,ic, &
2461 ids,ide, jds,jde, kds,kde, & ! domain dims
2462 ims,ime, jms,jme, kms,kme, & ! memory dims
2463 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2464 grid%i_start(ij), grid%i_end(ij), &
2465 grid%j_start(ij), grid%j_end(ij), &
2466 k_start, k_end )
2467 !#endif
2468 ENDIF
2469
2470
2471 ENDDO chem_tile_loop_2
2472 !$OMP END PARALLEL DO
2473
2474 ENDDO chem_variable_loop
2475
2476 ENDIF chem_scalar_advance
2477 BENCH_END(chem_adv_tim)
2478 #endif
2479
2480 ! next the other scalar species
2481 other_scalar_advance: IF (num_3d_s >= PARAM_FIRST_SCALAR) THEN
2482
2483 scalar_variable_loop: do is = PARAM_FIRST_SCALAR, num_3d_s
2484 !$OMP PARALLEL DO &
2485 !$OMP PRIVATE ( ij )
2486
2487 scalar_tile_loop_1: DO ij = 1 , grid%num_tiles
2488
2489 CALL wrf_debug ( 200 , ' call rk_scalar_tend' )
2490 CALL rk_scalar_tend ( is, is, config_flags, &
2491 rk_step, dt_rk, &
2492 grid%em_ru_m, grid%em_rv_m, ww_m, &
2493 grid%em_mut, grid%em_alt, &
2494 scalar(ims,kms,jms,is), &
2495 scalar_tend(ims,kms,jms,is), &
2496 advect_tend,grid%rqvften, &
2497 grid%qv_base, .false., grid%em_fnm, grid%em_fnp, &
2498 grid%msfu, grid%msfv, grid%msft, &
2499 grid%rdx, grid%rdy, grid%em_rdn, grid%em_rdnw, &
2500 grid%khdif, grid%kvdif, grid%xkmhd, &
2501 grid%diff_6th_opt, grid%diff_6th_rate, &
2502 ids, ide, jds, jde, kds, kde, &
2503 ims, ime, jms, jme, kms, kme, &
2504 grid%i_start(ij), grid%i_end(ij), &
2505 grid%j_start(ij), grid%j_end(ij), &
2506 k_start , k_end )
2507
2508 IF( config_flags%nested .and. (rk_step == 1) ) THEN
2509
2510 IF (is .eq. P_QNI) THEN
2511
2512 CALL relax_bdy_scalar ( scalar_tend(ims,kms,jms,is), &
2513 scalar(ims,kms,jms,is), grid%em_mut, &
2514 scalar_b(1,1,1,1,is), &
2515 scalar_bt(1,1,1,1,is), &
2516 config_flags%spec_bdy_width, grid%spec_zone, grid%relax_zone, &
2517 grid%dtbc, grid%fcx, grid%gcx, &
2518 config_flags, &
2519 ijds, ijde, & ! min/max(id,jd)
2520 ids,ide, jds,jde, kds,kde, & ! domain dims
2521 ims,ime, jms,jme, kms,kme, & ! memory dims
2522 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2523 grid%i_start(ij), grid%i_end(ij), &
2524 grid%j_start(ij), grid%j_end(ij), &
2525 k_start, k_end )
2526
2527 CALL spec_bdy_scalar ( scalar_tend(ims,kms,jms,is), &
2528 scalar_b(1,1,1,1,is), &
2529 scalar_bt(1,1,1,1,is), &
2530 config_flags%spec_bdy_width, grid%spec_zone, &
2531 config_flags, &
2532 ijds, ijde, & ! min/max(id,jd)
2533 ids,ide, jds,jde, kds,kde, & ! domain dims
2534 ims,ime, jms,jme, kms,kme, & ! memory dims
2535 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2536 grid%i_start(ij), grid%i_end(ij), &
2537 grid%j_start(ij), grid%j_end(ij), &
2538 k_start, k_end )
2539
2540 ENDIF
2541
2542 ENDIF ! b.c test for chem nested boundary condition
2543
2544 ENDDO scalar_tile_loop_1
2545 !$OMP END PARALLEL DO
2546
2547
2548 !$OMP PARALLEL DO &
2549 !$OMP PRIVATE ( ij )
2550
2551 scalar_tile_loop_2: DO ij = 1 , grid%num_tiles
2552
2553 CALL wrf_debug ( 200 , ' call rk_update_scalar' )
2554 CALL rk_update_scalar( is, is, &
2555 scalar_old(ims,kms,jms,is), & ! was scalar_1
2556 scalar(ims,kms,jms,is), &
2557 scalar_tend(ims,kms,jms,is), &
2558 advect_tend, grid%msft, &
2559 grid%em_mu_1, grid%em_mu_2, grid%em_mub, &
2560 rk_step, dt_rk, grid%spec_zone, &
2561 config_flags, &
2562 ids, ide, jds, jde, kds, kde, &
2563 ims, ime, jms, jme, kms, kme, &
2564 grid%i_start(ij), grid%i_end(ij), &
2565 grid%j_start(ij), grid%j_end(ij), &
2566 k_start , k_end )
2567
2568
2569 IF( config_flags%specified ) THEN
2570 CALL flow_dep_bdy ( scalar(ims,kms,jms,is), &
2571 grid%em_ru_m, grid%em_rv_m, config_flags, &
2572 grid%spec_zone, &
2573 ids,ide, jds,jde, kds,kde, & ! domain dims
2574 ims,ime, jms,jme, kms,kme, & ! memory dims
2575 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2576 grid%i_start(ij), grid%i_end(ij), &
2577 grid%j_start(ij), grid%j_end(ij), &
2578 k_start, k_end )
2579 ENDIF
2580
2581
2582 ENDDO scalar_tile_loop_2
2583 !$OMP END PARALLEL DO
2584
2585 ENDDO scalar_variable_loop
2586
2587 ENDIF other_scalar_advance
2588
2589 ! update the pressure and density at the new time level
2590
2591 !$OMP PARALLEL DO &
2592 !$OMP PRIVATE ( ij )
2593 DO ij = 1 , grid%num_tiles
2594
2595
2596 #ifdef WRF_PLUS
2597 if(TEST_calc_p_rho_phi) THEN
2598 CALL t_calc_p_rho_phi( moist, num_3d_m, &
2599 grid%em_al, grid%em_alb, grid%em_mu_2, grid%em_muts, &
2600 grid%em_ph_2, grid%em_p, grid%em_pb, grid%em_t_2, &
2601 p0, t0, grid%em_znu, grid%em_dnw, grid%em_rdnw, &
2602 grid%em_rdn, config_flags%non_hydrostatic, &
2603 ids, ide, jds, jde, kds, kde, &
2604 ims, ime, jms, jme, kms, kme, &
2605 grid%i_start(ij), grid%i_end(ij), &
2606 grid%j_start(ij), grid%j_end(ij), &
2607 k_start , k_end )
2608 END IF
2609 #endif
2610 BENCH_START(calc_p_rho_tim)
2611
2612 CALL calc_p_rho_phi( moist, num_3d_m, &
2613 grid%em_al, grid%em_alb, grid%em_mu_2, grid%em_muts, &
2614 grid%em_ph_2, grid%em_p, grid%em_pb, grid%em_t_2, &
2615 p0, t0, grid%em_znu, grid%em_dnw, grid%em_rdnw, &
2616 grid%em_rdn, config_flags%non_hydrostatic, &
2617 ids, ide, jds, jde, kds, kde, &
2618 ims, ime, jms, jme, kms, kme, &
2619 grid%i_start(ij), grid%i_end(ij), &
2620 grid%j_start(ij), grid%j_end(ij), &
2621 k_start , k_end )
2622
2623 BENCH_END(calc_p_rho_tim)
2624
2625 ENDDO
2626 !$OMP END PARALLEL DO
2627
2628 ! Reset the boundary conditions if there is another corrector step.
2629 ! (rk_step < rk_order), else we'll handle it at the end of everything
2630 ! (after the split physics, before exiting the timestep).
2631
2632 rk_step_1_check: IF ( rk_step < rk_order ) THEN
2633
2634 !-----------------------------------------------------------
2635 ! Stencils for patch communications (WCS, 29 June 2001)
2636 !
2637 ! here's where we need a wide comm stencil - these are the
2638 ! uncoupled variables so are used for high order calc in
2639 ! advection and mixong routines.
2640 !
2641 ! * * * * *
2642 ! * * * * * * * * *
2643 ! * + * * + * * * + * *
2644 ! * * * * * * * * *
2645 ! * * * * *
2646 !
2647 !
2648 ! grid%em_u_2 x
2649 ! grid%em_v_2 x
2650 ! grid%em_w_2 x
2651 ! grid%em_t_2 x
2652 ! grid%em_ph_2 x
2653 ! grid%em_al x
2654 !
2655 ! 2D variable
2656 ! grid%em_mu_2 x
2657 !
2658 ! 4D variable
2659 ! moist x
2660 ! chem x
2661 !scalar x
2662
2663 #ifdef DM_PARALLEL
2664 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
2665 # include "HALO_EM_D2_3.inc"
2666 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
2667 # include "HALO_EM_D2_5.inc"
2668 ELSE
2669 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
2670 CALL wrf_error_fatal(TRIM(wrf_err_message))
2671 ENDIF
2672 # include "PERIOD_BDY_EM_D.inc"
2673 # include "PERIOD_BDY_EM_MOIST.inc"
2674 # include "PERIOD_BDY_EM_CHEM.inc"
2675 # include "PERIOD_BDY_EM_SCALAR.inc"
2676 #endif
2677
2678 BENCH_START(bc_end_tim)
2679 !$OMP PARALLEL DO &
2680 !$OMP PRIVATE ( ij )
2681
2682 tile_bc_loop_1: DO ij = 1 , grid%num_tiles
2683
2684
2685 CALL wrf_debug ( 200 , ' call rk_phys_bc_dry_2' )
2686
2687 CALL rk_phys_bc_dry_2( config_flags, &
2688 grid%em_u_2, grid%em_v_2, grid%em_w_2, &
2689 grid%em_t_2, grid%em_ph_2, grid%em_mu_2, &
2690 ids, ide, jds, jde, kds, kde, &
2691 ims, ime, jms, jme, kms, kme, &
2692 ips, ipe, jps, jpe, kps, kpe, &
2693 grid%i_start(ij), grid%i_end(ij), &
2694 grid%j_start(ij), grid%j_end(ij), &
2695 k_start , k_end )
2696
2697 BENCH_START(diag_w_tim)
2698 IF (.not. config_flags%non_hydrostatic) THEN
2699 #ifdef WRF_PLUS
2700 if(TEST_diagnose_w) THEN
2701 CALL t_diagnose_w( ph_tend, grid%em_ph_2, grid%em_ph_1, grid%em_w_2, grid%em_muts, dt_rk, &
2702 grid%em_u_2, grid%em_v_2, grid%ht, &
2703 grid%cf1, grid%cf2, grid%cf3, grid%rdx, grid%rdy, grid%msft, &
2704 ids, ide, jds, jde, kds, kde, &
2705 ims, ime, jms, jme, kms, kme, &
2706 grid%i_start(ij), grid%i_end(ij), &
2707 grid%j_start(ij), grid%j_end(ij), &
2708 k_start , k_end )
2709 END IF
2710 #endif
2711 CALL diagnose_w( ph_tend, grid%em_ph_2, grid%em_ph_1, grid%em_w_2, grid%em_muts, dt_rk, &
2712 grid%em_u_2, grid%em_v_2, grid%ht, &
2713 grid%cf1, grid%cf2, grid%cf3, grid%rdx, grid%rdy, grid%msft, &
2714 ids, ide, jds, jde, kds, kde, &
2715 ims, ime, jms, jme, kms, kme, &
2716 grid%i_start(ij), grid%i_end(ij), &
2717 grid%j_start(ij), grid%j_end(ij), &
2718 k_start , k_end )
2719 ENDIF
2720 BENCH_END(diag_w_tim)
2721
2722 IF (num_3d_m >= PARAM_FIRST_SCALAR) THEN
2723
2724 moisture_loop_bdy_1 : DO im = PARAM_FIRST_SCALAR , num_3d_m
2725
2726 CALL set_physical_bc3d( moist(ims,kms,jms,im), 'grid%em_p', config_flags, &
2727 ids, ide, jds, jde, kds, kde, &
2728 ims, ime, jms, jme, kms, kme, &
2729 ips, ipe, jps, jpe, kps, kpe, &
2730 grid%i_start(ij), grid%i_end(ij), &
2731 grid%j_start(ij), grid%j_end(ij), &
2732 k_start , k_end )
2733 END DO moisture_loop_bdy_1
2734
2735 ENDIF
2736
2737 IF (num_3d_c >= PARAM_FIRST_SCALAR) THEN
2738
2739 chem_species_bdy_loop_1 : DO ic = PARAM_FIRST_SCALAR , num_3d_c
2740
2741 CALL set_physical_bc3d( chem(ims,kms,jms,ic), 'grid%em_p', config_flags, &
2742 ids, ide, jds, jde, kds, kde, &
2743 ims, ime, jms, jme, kms, kme, &
2744 ips, ipe, jps, jpe, kps, kpe, &
2745 grid%i_start(ij), grid%i_end(ij), &
2746 grid%j_start(ij), grid%j_end(ij), &
2747 k_start , k_end-1 )
2748
2749 END DO chem_species_bdy_loop_1
2750
2751 END IF
2752
2753 IF (num_3d_s >= PARAM_FIRST_SCALAR) THEN
2754
2755 scalar_species_bdy_loop_1 : DO is = PARAM_FIRST_SCALAR , num_3d_s
2756
2757 CALL set_physical_bc3d( scalar(ims,kms,jms,is), 'grid%em_p', config_flags, &
2758 ids, ide, jds, jde, kds, kde, &
2759 ims, ime, jms, jme, kms, kme, &
2760 ips, ipe, jps, jpe, kps, kpe, &
2761 grid%i_start(ij), grid%i_end(ij), &
2762 grid%j_start(ij), grid%j_end(ij), &
2763 k_start , k_end-1 )
2764
2765 END DO scalar_species_bdy_loop_1
2766
2767 END IF
2768
2769 IF (config_flags%km_opt .eq. 2) THEN
2770
2771 CALL set_physical_bc3d( grid%em_tke_2 , 'grid%em_p', config_flags, &
2772 ids, ide, jds, jde, kds, kde, &
2773 ims, ime, jms, jme, kms, kme, &
2774 ips, ipe, jps, jpe, kps, kpe, &
2775 grid%i_start(ij), grid%i_end(ij), &
2776 grid%j_start(ij), grid%j_end(ij), &
2777 k_start , k_end )
2778 END IF
2779
2780 END DO tile_bc_loop_1
2781 !$OMP END PARALLEL DO
2782 BENCH_END(bc_end_tim)
2783
2784
2785 #ifdef DM_PARALLEL
2786
2787 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
2788 # include "HALO_EM_TKE_3.inc"
2789 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
2790 # include "HALO_EM_TKE_5.inc"
2791 ELSE
2792 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
2793 CALL wrf_error_fatal(TRIM(wrf_err_message))
2794 ENDIF
2795
2796 #if 0
2797 IF (config_flags%km_opt .eq. 2) THEN
2798 # include "HALO_EM_TKE_F.inc"
2799 ENDIF
2800 #endif
2801
2802 if ( num_moist .ge. PARAM_FIRST_SCALAR ) then
2803
2804 ! * * * * *
2805 ! * * * * * * * * *
2806 ! * + * * + * * * + * *
2807 ! * * * * * * * * *
2808 ! * * * * *
2809
2810 ! moist x
2811
2812 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
2813 # include "HALO_EM_MOIST_E_3.inc"
2814 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
2815 # include "HALO_EM_MOIST_E_5.inc"
2816 ELSE
2817 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
2818 CALL wrf_error_fatal(TRIM(wrf_err_message))
2819 ENDIF
2820 endif
2821 if ( num_chem >= PARAM_FIRST_SCALAR ) then
2822
2823 ! * * * * *
2824 ! * * * * * * * * *
2825 ! * + * * + * * * + * *
2826 ! * * * * * * * * *
2827 ! * * * * *
2828
2829 ! chem x
2830
2831 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
2832 # include "HALO_EM_CHEM_E_3.inc"
2833 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
2834 # include "HALO_EM_CHEM_E_5.inc"
2835 ELSE
2836 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
2837 CALL wrf_error_fatal(TRIM(wrf_err_message))
2838 ENDIF
2839 endif
2840 if ( num_scalar >= PARAM_FIRST_SCALAR ) then
2841
2842 ! * * * * *
2843 ! * * * * * * * * *
2844 ! * + * * + * * * + * *
2845 ! * * * * * * * * *
2846 ! * * * * *
2847
2848 ! scalar x
2849
2850 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
2851 # include "HALO_EM_SCALAR_E_3.inc"
2852 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
2853 # include "HALO_EM_SCALAR_E_5.inc"
2854 ELSE
2855 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
2856 CALL wrf_error_fatal(TRIM(wrf_err_message))
2857 ENDIF
2858 endif
2859 #endif
2860
2861 ENDIF rk_step_1_check
2862
2863
2864 !**********************************************************
2865 !
2866 ! end of RK predictor-corrector loop
2867 !
2868 !**********************************************************
2869
2870 END DO Runge_Kutta_loop
2871
2872 !$OMP PARALLEL DO &
2873 !$OMP PRIVATE ( ij )
2874
2875 DO ij = 1 , grid%num_tiles
2876
2877 BENCH_START(advance_ppt_tim)
2878 CALL wrf_debug ( 200 , ' call advance_ppt' )
2879 CALL advance_ppt(grid%rthcuten,grid%rqvcuten,grid%rqccuten,grid%rqrcuten, &
2880 grid%rqicuten,grid%rqscuten,grid%rainc,grid%raincv,grid%nca, &
2881 grid%htop,grid%hbot,grid%cutop,grid%cubot, &
2882 grid%cuppt, config_flags, &
2883 ids,ide, jds,jde, kds,kde, &
2884 ims,ime, jms,jme, kms,kme, &
2885 grid%i_start(ij), grid%i_end(ij), &
2886 grid%j_start(ij), grid%j_end(ij), &
2887 k_start , k_end )
2888 BENCH_END(advance_ppt_tim)
2889
2890 ENDDO
2891 !$OMP END PARALLEL DO
2892
2893 !<DESCRIPTION>
2894 !<pre>
2895 ! (5) time-split physics.
2896 !
2897 ! Microphysics are the only time split physics in the WRF model
2898 ! at this time. Split-physics begins with the calculation of
2899 ! needed diagnostic quantities (pressure, temperature, etc.)
2900 ! followed by a call to the microphysics driver,
2901 ! and finishes with a clean-up, storing off of a diabatic tendency
2902 ! from the moist physics, and a re-calulation of the diagnostic
2903 ! quantities pressure and density.
2904 !</pre>
2905 !</DESCRIPTION>
2906
2907 IF (config_flags%mp_physics /= 0) then
2908
2909 IF( config_flags%specified .or. config_flags%nested ) THEN
2910 sz = grid%spec_zone
2911 ELSE
2912 sz = 0
2913 ENDIF
2914
2915 !$OMP PARALLEL DO &
2916 !$OMP PRIVATE ( ij, its, ite, jts, jte )
2917
2918 scalar_tile_loop_1a: DO ij = 1 , grid%num_tiles
2919
2920 IF ( config_flags%periodic_x ) THEN
2921 its = max(grid%i_start(ij),ids)
2922 ite = min(grid%i_end(ij),ide-1)
2923 ELSE
2924 its = max(grid%i_start(ij),ids+sz)
2925 ite = min(grid%i_end(ij),ide-1-sz)
2926 ENDIF
2927 jts = max(grid%j_start(ij),jds+sz)
2928 jte = min(grid%j_end(ij),jde-1-sz)
2929
2930 CALL wrf_debug ( 200 , ' call moist_physics_prep' )
2931 BENCH_START(moist_physics_prep_tim)
2932 CALL moist_physics_prep_em( grid%em_t_2, grid%em_t_1, t0, rho, &
2933 grid%em_al, grid%em_alb, grid%em_p, p8w, p0, grid%em_pb, &
2934 grid%em_ph_2, grid%em_phb, th_phy, pi_phy, p_phy, &
2935 grid%em_z, z_at_w, dz8w, &
2936 dtm, grid%h_diabatic, &
2937 config_flags,grid%em_fnm, grid%em_fnp, &
2938 ids, ide, jds, jde, kds, kde, &
2939 ims, ime, jms, jme, kms, kme, &
2940 its, ite, jts, jte, &
2941 k_start , k_end )
2942 BENCH_END(moist_physics_prep_tim)
2943 END DO scalar_tile_loop_1a
2944 !$OMP END PARALLEL DO
2945
2946 CALL wrf_debug ( 200 , ' call microphysics_driver' )
2947
2948 grid%sr = 0.
2949 specified_bdy = config_flags%specified .OR. config_flags%nested
2950 channel_bdy = config_flags%specified .AND. config_flags%periodic_x
2951
2952 #if 0
2953 BENCH_START(microswap_1)
2954 ! for load balancing; communication to redistribute the points
2955 IF ( config_flags%mp_physics .EQ. ETAMPNEW ) THEN
2956 #include "SWAP_ETAMP_NEW.inc"
2957 ELSE IF ( config_flags%mp_physics .EQ. WSM3SCHEME ) THEN
2958 #include "SWAP_WSM3.inc"
2959 ENDIF
2960 BENCH_END(microswap_1)
2961 #endif
2962
2963 BENCH_START(micro_driver_tim)
2964
2965 CALL microphysics_driver( &
2966 & DT=dtm ,DX=grid%dx ,DY=grid%dy &
2967 & ,DZ8W=dz8w ,F_ICE_PHY=grid%f_ice_phy &
2968 & ,ITIMESTEP=grid%itimestep ,LOWLYR=grid%lowlyr &
2969 & ,P8W=p8w ,P=p_phy ,PI_PHY=pi_phy &
2970 & ,RHO=rho ,SPEC_ZONE=grid%spec_zone &
2971 & ,SR=grid%sr ,TH=th_phy &
2972 & ,WARM_RAIN=grid%warm_rain ,XLAND=grid%xland &
2973 & ,SPECIFIED=specified_bdy, CHANNEL_SWITCH=channel_bdy &
2974 & ,F_RAIN_PHY=grid%f_rain_phy &
2975 & ,F_RIMEF_PHY=grid%f_rimef_phy &
2976 & ,MP_PHYSICS=config_flags%mp_physics &
2977 & ,IDS=ids,IDE=ide, JDS=jds,JDE=jde, KDS=kds,KDE=kde &
2978 & ,IMS=ims,IME=ime, JMS=jms,JME=jme, KMS=kms,KME=kme &
2979 & ,I_START=grid%i_start,I_END=min(grid%i_end, ide-1) &
2980 & ,J_START=grid%j_start,J_END=min(grid%j_end, jde-1) &
2981 & ,KTS=k_start, KTE=min(k_end,kde-1) &
2982 & ,NUM_TILES=grid%num_tiles &
2983 ! Optional
2984 & , RAINNC=grid%rainnc, RAINNCV=grid%rainncv &
2985 & , SNOWNC=grid%snownc, SNOWNCV=grid%snowncv &
2986 & , GRAUPELNC=grid%graupelnc, GRAUPELNCV=grid%graupelncv &
2987 & , W=grid%em_w_2, Z=grid%em_z, HT=grid%ht &
2988 & , MP_RESTART_STATE=grid%mp_restart_state &
2989 & , TBPVS_STATE=grid%tbpvs_state & ! etampnew
2990 & , TBPVS0_STATE=grid%tbpvs0_state & ! etampnew
2991 & , QV_CURR=moist(ims,kms,jms,P_QV), F_QV=F_QV &
2992 & , QC_CURR=moist(ims,kms,jms,P_QC), F_QC=F_QC &
2993 & , QR_CURR=moist(ims,kms,jms,P_QR), F_QR=F_QR &
2994 & , QI_CURR=moist(ims,kms,jms,P_QI), F_QI=F_QI &
2995 & , QS_CURR=moist(ims,kms,jms,P_QS), F_QS=F_QS &
2996 & , QG_CURR=moist(ims,kms,jms,P_QG), F_QG=F_QG &
2997 & , QNI_CURR=scalar(ims,kms,jms,P_QNI), F_QNI=F_QNI &
2998 & , QT_CURR=scalar(ims,kms,jms,P_QT), F_QT=F_QT &
2999 )
3000 BENCH_END(micro_driver_tim)
3001
3002 #if 0
3003 BENCH_START(microswap_2)
3004 ! for load balancing; communication to redistribute the points
3005 IF ( config_flags%mp_physics .EQ. ETAMPNEW ) THEN
3006 #include "SWAP_ETAMP_NEW.inc"
3007 ELSE IF ( config_flags%mp_physics .EQ. WSM3SCHEME ) THEN
3008 #include "SWAP_WSM3.inc"
3009 ENDIF
3010 BENCH_END(microswap_2)
3011 #endif
3012
3013 CALL wrf_debug ( 200 , ' call moist_physics_finish' )
3014 BENCH_START(moist_phys_end_tim)
3015 !$OMP PARALLEL DO &
3016 !$OMP PRIVATE ( ij, its, ite, jts, jte )
3017
3018 scalar_tile_loop_1b: DO ij = 1 , grid%num_tiles
3019
3020 IF ( config_flags%periodic_x ) THEN
3021 its = max(grid%i_start(ij),ids)
3022 ite = min(grid%i_end(ij),ide-1)
3023 ELSE
3024 its = max(grid%i_start(ij),ids+sz)
3025 ite = min(grid%i_end(ij),ide-1-sz)
3026 ENDIF
3027 jts = max(grid%j_start(ij),jds+sz)
3028 jte = min(grid%j_end(ij),jde-1-sz)
3029
3030 CALL microphysics_zero_out ( &
3031 moist , num_moist , config_flags , &
3032 ids, ide, jds, jde, kds, kde, &
3033 ims, ime, jms, jme, kms, kme, &
3034 its, ite, jts, jte, &
3035 k_start , k_end )
3036
3037 CALL moist_physics_finish_em( grid%em_t_2, grid%em_t_1, t0, grid%em_muts, th_phy, &
3038 grid%h_diabatic, dtm, config_flags, &
3039 ids, ide, jds, jde, kds, kde, &
3040 ims, ime, jms, jme, kms, kme, &
3041 its, ite, jts, jte, &
3042 k_start , k_end )
3043
3044 CALL calc_p_rho_phi( moist, num_3d_m, &
3045 grid%em_al, grid%em_alb, grid%em_mu_2, grid%em_muts, &
3046 grid%em_ph_2, grid%em_p, grid%em_pb, grid%em_t_2, &
3047 p0, t0, grid%em_znu, grid%em_dnw, grid%em_rdnw, &
3048 grid%em_rdn, config_flags%non_hydrostatic, &
3049 ids, ide, jds, jde, kds, kde, &
3050 ims, ime, jms, jme, kms, kme, &
3051 its, ite, jts, jte, &
3052 k_start , k_end )
3053
3054 END DO scalar_tile_loop_1b
3055 !$OMP END PARALLEL DO
3056 BENCH_END(moist_phys_end_tim)
3057
3058 ENDIF
3059
3060 IF (.not. config_flags%non_hydrostatic) THEN
3061 !$OMP PARALLEL DO &
3062 !$OMP PRIVATE ( ij )
3063 DO ij = 1 , grid%num_tiles
3064 CALL diagnose_w( ph_tend, grid%em_ph_2, grid%em_ph_1, grid%em_w_2, grid%em_muts, dt_rk, &
3065 grid%em_u_2, grid%em_v_2, grid%ht, &
3066 grid%cf1, grid%cf2, grid%cf3, grid%rdx, grid%rdy, grid%msft, &
3067 ids, ide, jds, jde, kds, kde, &
3068 ims, ime, jms, jme, kms, kme, &
3069 grid%i_start(ij), grid%i_end(ij), &
3070 grid%j_start(ij), grid%j_end(ij), &
3071 k_start , k_end )
3072
3073 END DO
3074 !$OMP END PARALLEL DO
3075 ENDIF
3076
3077 chem_tile_loop_3: DO ij = 1 , grid%num_tiles
3078
3079 CALL wrf_debug ( 200 , ' call scalar_tile_loop_2' )
3080
3081 IF ( num_3d_c >= PARAM_FIRST_SCALAR ) then
3082
3083 !
3084 ! tiled chemistry not here, it is called from solve_interface, and found in chem_driver
3085 !
3086
3087 END IF
3088
3089 END DO chem_tile_loop_3
3090
3091 ! We're finished except for boundary condition (and patch) update
3092
3093 ! Boundary condition time (or communication time). At this time, we have
3094 ! implemented periodic and symmetric physical boundary conditions.
3095
3096 ! b.c. routine for data within patch.
3097
3098 ! we need to do both time levels of
3099 ! data because the time filter only works in the physical solution space.
3100
3101 ! First, do patch communications for boundary conditions (periodicity)
3102
3103 !-----------------------------------------------------------
3104 ! Stencils for patch communications (WCS, 29 June 2001)
3105 !
3106 ! here's where we need a wide comm stencil - these are the
3107 ! uncoupled variables so are used for high order calc in
3108 ! advection and mixong routines.
3109 !
3110 ! * * * * *
3111 ! * * * * * * * * *
3112 ! * + * * + * * * + * *
3113 ! * * * * * * * * *
3114 ! * * * * *
3115 !
3116 ! grid%em_u_1 x
3117 ! grid%em_u_2 x
3118 ! grid%em_v_1 x
3119 ! grid%em_v_2 x
3120 ! grid%em_w_1 x
3121 ! grid%em_w_2 x
3122 ! grid%em_t_1 x
3123 ! grid%em_t_2 x
3124 ! grid%em_ph_1 x
3125 ! grid%em_ph_2 x
3126 ! grid%em_tke_1 x
3127 ! grid%em_tke_2 x
3128 !
3129 ! 2D variables
3130 ! grid%em_mu_1 x
3131 ! grid%em_mu_2 x
3132 !
3133 ! 4D variables
3134 ! moist x
3135 ! chem x
3136 ! scalar x
3137 !----------------------------------------------------------
3138
3139
3140 #ifdef DM_PARALLEL
3141 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
3142 # include "HALO_EM_D3_3.inc"
3143 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
3144 # include "HALO_EM_D3_5.inc"
3145 ELSE
3146 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
3147 CALL wrf_error_fatal(TRIM(wrf_err_message))
3148 ENDIF
3149 # include "PERIOD_BDY_EM_D3.inc"
3150 # include "PERIOD_BDY_EM_MOIST.inc"
3151 # include "PERIOD_BDY_EM_CHEM.inc"
3152 # include "PERIOD_BDY_EM_SCALAR.inc"
3153 #endif
3154
3155 ! now set physical b.c on a patch
3156
3157 BENCH_START(bc_2d_tim)
3158 !$OMP PARALLEL DO &
3159 !$OMP PRIVATE ( ij )
3160
3161 tile_bc_loop_2: DO ij = 1 , grid%num_tiles
3162
3163
3164 CALL wrf_debug ( 200 , ' call set_phys_bc_dry_2' )
3165
3166 CALL set_phys_bc_dry_2( config_flags, &
3167 grid%em_u_1, grid%em_u_2, grid%em_v_1, grid%em_v_2, grid%em_w_1, grid%em_w_2, &
3168 grid%em_t_1, grid%em_t_2, grid%em_ph_1, grid%em_ph_2, grid%em_mu_1, grid%em_mu_2, &
3169 ids, ide, jds, jde, kds, kde, &
3170 ims, ime, jms, jme, kms, kme, &
3171 ips, ipe, jps, jpe, kps, kpe, &
3172 grid%i_start(ij), grid%i_end(ij), &
3173 grid%j_start(ij), grid%j_end(ij), &
3174 k_start , k_end )
3175
3176 CALL set_physical_bc3d( grid%em_tke_1, 'grid%em_p', config_flags, &
3177 ids, ide, jds, jde, kds, kde, &
3178 ims, ime, jms, jme, kms, kme, &
3179 ips, ipe, jps, jpe, kps, kpe, &
3180 grid%i_start(ij), grid%i_end(ij), &
3181 grid%j_start(ij), grid%j_end(ij), &
3182 k_start , k_end-1 )
3183 CALL set_physical_bc3d( grid%em_tke_2 , 'grid%em_p', config_flags, &
3184 ids, ide, jds, jde, kds, kde, &
3185 ims, ime, jms, jme, kms, kme, &
3186 ips, ipe, jps, jpe, kps, kpe, &
3187 grid%i_start(ij), grid%i_end(ij), &
3188 grid%j_start(ij), grid%j_end(ij), &
3189 k_start , k_end )
3190
3191 moisture_loop_bdy_2 : DO im = PARAM_FIRST_SCALAR , num_3d_m
3192
3193 CALL set_physical_bc3d( moist(ims,kms,jms,im), 'grid%em_p', &
3194 config_flags, &
3195 ids, ide, jds, jde, kds, kde, &
3196 ims, ime, jms, jme, kms, kme, &
3197 ips, ipe, jps, jpe, kps, kpe, &
3198 grid%i_start(ij), grid%i_end(ij), &
3199 grid%j_start(ij), grid%j_end(ij), &
3200 k_start , k_end )
3201
3202 END DO moisture_loop_bdy_2
3203
3204 chem_species_bdy_loop_2 : DO ic = PARAM_FIRST_SCALAR , num_3d_c
3205
3206 CALL set_physical_bc3d( chem(ims,kms,jms,ic) , 'grid%em_p', config_flags, &
3207 ids, ide, jds, jde, kds, kde, &
3208 ims, ime, jms, jme, kms, kme, &
3209 ips, ipe, jps, jpe, kps, kpe, &
3210 grid%i_start(ij), grid%i_end(ij), &
3211 grid%j_start(ij), grid%j_end(ij), &
3212 k_start , k_end )
3213
3214 END DO chem_species_bdy_loop_2
3215
3216 scalar_species_bdy_loop_2 : DO is = PARAM_FIRST_SCALAR , num_3d_s
3217
3218 CALL set_physical_bc3d( scalar(ims,kms,jms,is) , 'grid%em_p', config_flags, &
3219 ids, ide, jds, jde, kds, kde, &
3220 ims, ime, jms, jme, kms, kme, &
3221 ips, ipe, jps, jpe, kps, kpe, &
3222 grid%i_start(ij), grid%i_end(ij), &
3223 grid%j_start(ij), grid%j_end(ij), &
3224 k_start , k_end )
3225
3226 END DO scalar_species_bdy_loop_2
3227
3228 END DO tile_bc_loop_2
3229 !$OMP END PARALLEL DO
3230 BENCH_END(bc_2d_tim)
3231
3232 IF( config_flags%specified .or. config_flags%nested ) THEN
3233 grid%dtbc = grid%dtbc + grid%dt
3234 ENDIF
3235
3236 #ifdef DM_PARALLEL
3237 !-----------------------------------------------------------------------
3238 ! see above
3239 !--------------------------------------------------------------
3240 CALL wrf_debug ( 200 , ' call HALO_RK_E' )
3241 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
3242 # include "HALO_EM_E_3.inc"
3243 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
3244 # include "HALO_EM_E_5.inc"
3245 ELSE
3246 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
3247 CALL wrf_error_fatal(TRIM(wrf_err_message))
3248 ENDIF
3249 #endif
3250
3251 #ifdef DM_PARALLEL
3252 if ( num_moist >= PARAM_FIRST_SCALAR ) then
3253 !-----------------------------------------------------------------------
3254 ! see above
3255 !--------------------------------------------------------------
3256 CALL wrf_debug ( 200 , ' call HALO_RK_MOIST' )
3257 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
3258 # include "HALO_EM_MOIST_E_3.inc"
3259 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
3260 # include "HALO_EM_MOIST_E_5.inc"
3261 ELSE
3262 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
3263 CALL wrf_error_fatal(TRIM(wrf_err_message))
3264 ENDIF
3265 endif
3266 if ( num_chem >= PARAM_FIRST_SCALAR ) then
3267 !-----------------------------------------------------------------------
3268 ! see above
3269 !--------------------------------------------------------------
3270 CALL wrf_debug ( 200 , ' call HALO_RK_CHEM' )
3271 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
3272 # include "HALO_EM_CHEM_E_3.inc"
3273 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
3274 # include "HALO_EM_CHEM_E_5.inc"
3275 ELSE
3276 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
3277 CALL wrf_error_fatal(TRIM(wrf_err_message))
3278 ENDIF
3279 endif
3280 if ( num_scalar >= PARAM_FIRST_SCALAR ) then
3281 !-----------------------------------------------------------------------
3282 ! see above
3283 !--------------------------------------------------------------
3284 CALL wrf_debug ( 200 , ' call HALO_RK_SCALAR' )
3285 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
3286 # include "HALO_EM_SCALAR_E_3.inc"
3287 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
3288 # include "HALO_EM_SCALAR_E_5.inc"
3289 ELSE
3290 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
3291 CALL wrf_error_fatal(TRIM(wrf_err_message))
3292 ENDIF
3293 endif
3294 #endif
3295
3296 CALL wrf_debug ( 200 , ' call end of solve_em' )
3297
3298 ! Finish timers if compiled with -DBENCH.
3299 #include <bench_solve_em_end.h>
3300
3301 ! See comment before earlier #include of this file.
3302 #define COPY_OUT
3303 #include <em_scalar_derefs.inc>
3304
3305 RETURN
3306
3307 END SUBROUTINE solve_em