solve_em.F
References to this file elsewhere.
1 !WRF:MEDIATION_LAYER:SOLVER
2
3 SUBROUTINE solve_em ( grid , config_flags &
4 ! Actual arguments generated from Registry
5 #include "em_dummy_new_args.inc"
6 !
7 )
8
9 ! Driver layer modules
10 USE module_domain
11 USE module_configure
12 USE module_driver_constants
13 USE module_machine
14 USE module_tiles
15 USE module_dm
16 ! Mediation layer modules
17 ! Model layer modules
18 USE module_model_constants
19 USE module_small_step_em
20 USE module_em
21 USE module_big_step_utilities_em
22 USE module_bc
23 USE module_bc_em
24 USE module_solvedebug_em
25 USE module_physics_addtendc
26 USE module_diffusion_em
27 ! Registry generated module
28 USE module_state_description
29 USE module_radiation_driver
30 USE module_surface_driver
31 USE module_cumulus_driver
32 USE module_microphysics_driver
33 USE module_microphysics_zero_out
34 USE module_pbl_driver
35 USE module_fddagd_driver
36 USE module_fddaobs_driver
37 USE module_diagnostics
38 #ifdef WRF_CHEM
39 USE module_input_chem_data
40 USE module_chem_utilities
41 #endif
42
43 IMPLICIT NONE
44
45 ! Input data.
46
47 TYPE(domain) , TARGET :: grid
48
49 ! Definitions of dummy arguments to this routine (generated from Registry).
50 #include "em_dummy_new_decl.inc"
51
52 ! Structure that contains run-time configuration (namelist) data for domain
53 TYPE (grid_config_rec_type) , INTENT(IN) :: config_flags
54
55 ! Local data
56
57 INTEGER :: k_start , k_end, its, ite, jts, jte
58 INTEGER :: ids , ide , jds , jde , kds , kde , &
59 ims , ime , jms , jme , kms , kme , &
60 ips , ipe , jps , jpe , kps , kpe
61
62 INTEGER :: sids , side , sjds , sjde , skds , skde , &
63 sims , sime , sjms , sjme , skms , skme , &
64 sips , sipe , sjps , sjpe , skps , skpe
65
66
67 INTEGER :: imsx, imex, jmsx, jmex, kmsx, kmex, &
68 ipsx, ipex, jpsx, jpex, kpsx, kpex, &
69 imsy, imey, jmsy, jmey, kmsy, kmey, &
70 ipsy, ipey, jpsy, jpey, kpsy, kpey
71
72 INTEGER :: ij , iteration
73 INTEGER :: im , num_3d_m , ic , num_3d_c , is , num_3d_s
74 INTEGER :: loop
75 INTEGER :: sz
76
77 LOGICAL :: specified_bdy, channel_bdy
78
79 ! storage for tendencies and decoupled state (generated from Registry)
80
81 #include <em_i1_decl.inc>
82 ! Previous time level of tracer arrays now defined as i1 variables;
83 ! the state 4d arrays now redefined as 1-time level arrays in Registry.
84 ! Benefit: save memory in nested runs, since only 1 domain is active at a
85 ! time. Potential problem on stack-limited architectures: increases
86 ! amount of data on program stack by making these automatic arrays.
87
88 INTEGER :: rc
89 INTEGER :: number_of_small_timesteps, rk_step
90 INTEGER :: klevel,ijm,ijp,i,j,k,size1,size2 ! for prints/plots only
91 INTEGER :: idum1, idum2, dynamics_option
92
93 INTEGER :: rk_order, iwmax, jwmax, kwmax
94 REAL :: dt_rk, dts_rk, dtm, wmax
95 INTEGER :: l,kte,kk
96
97 ! urban related variables
98 INTEGER :: NUM_ROOF_LAYERS, NUM_WALL_LAYERS, NUM_ROAD_LAYERS ! urban
99
100 ! Define benchmarking timers if -DBENCH is compiled
101 #include <bench_solve_em_def.h>
102
103 !----------------------
104 ! Executable statements
105 !----------------------
106
107 ! Needed by some comm layers, grid%e.g. RSL. If needed, nmm_data_calls.inc is
108 ! generated from the registry. The definition of REGISTER_I1 allows
109 ! I1 data to be communicated in this routine if necessary.
110 #ifdef DM_PARALLEL
111 # define REGISTER_I1
112 # include "em_data_calls.inc"
113 #endif
114
115 !<DESCRIPTION>
116 !<pre>
117 ! solve_em is the main driver for advancing a grid a single timestep.
118 ! It is a mediation-layer routine -> DM and SM calls are made where
119 ! needed for parallel processing.
120 !
121 ! solve_em can integrate the equations using 3 time-integration methods
122 !
123 ! - 3rd order Runge-Kutta time integration (recommended)
124 !
125 ! - 2nd order Runge-Kutta time integration
126 !
127 ! The main sections of solve_em are
128 !
129 ! (1) Runge-Kutta (RK) loop
130 !
131 ! (2) Non-timesplit physics (i.grid%e., tendencies computed for updating
132 ! model state variables during the first RK sub-step (loop)
133 !
134 ! (3) Small (acoustic, sound) timestep loop - within the RK sub-steps
135 !
136 ! (4) scalar advance for moist and chem scalar variables (and TKE)
137 ! within the RK sub-steps.
138 !
139 ! (5) time-split physics (after the RK step), currently this includes
140 ! only microphyics
141 !
142 ! A more detailed description of these sections follows.
143 !</pre>
144 !</DESCRIPTION>
145
146 ! Initialize timers if compiled with -DBENCH
147 #include <bench_solve_em_init.h>
148
149 ! set runge-kutta solver (2nd or 3rd order)
150
151 dynamics_option = config_flags%rk_ord
152
153 ! Obtain dimension information stored in the grid data structure.
154
155 CALL get_ijk_from_grid ( grid , &
156 ids, ide, jds, jde, kds, kde, &
157 ims, ime, jms, jme, kms, kme, &
158 ips, ipe, jps, jpe, kps, kpe, &
159 imsx, imex, jmsx, jmex, kmsx, kmex, &
160 ipsx, ipex, jpsx, jpex, kpsx, kpex, &
161 imsy, imey, jmsy, jmey, kmsy, kmey, &
162 ipsy, ipey, jpsy, jpey, kpsy, kpey )
163
164 CALL get_ijk_from_subgrid ( grid , &
165 sids, side, sjds, sjde, skds, skde, &
166 sims, sime, sjms, sjme, skms, skme, &
167 sips, sipe, sjps, sjpe, skps, skpe )
168
169 k_start = kps
170 k_end = kpe
171
172 num_3d_m = num_moist
173 num_3d_c = num_chem
174 num_3d_s = num_scalar
175
176 ! Compute these starting and stopping locations for each tile and number of tiles.
177 ! See: http://www.mmm.ucar.edu/wrf/WG2/topics/settiles
178 CALL set_tiles ( grid , ids , ide , jds , jde , ips , ipe , jps , jpe )
179 !print *,grid%julian,grid%julday,' grid%julian,grid%julday in solve'
180
181 grid%itimestep = grid%itimestep + 1
182
183 !**********************************************************************
184 !
185 ! LET US BEGIN.......
186 !
187 !<DESCRIPTION>
188 !<pre>
189 ! (1) RK integration loop is named the "Runge_Kutta_loop:"
190 !
191 ! Predictor-corrector type time integration.
192 ! Advection terms are evaluated at time t for the predictor step,
193 ! and advection is re-evaluated with the latest predicted value for
194 ! each succeeding time corrector step
195 !
196 ! 2nd order Runge Kutta (rk_order = 2):
197 ! Step 1 is taken to the midpoint predictor, step 2 is the full step.
198 !
199 ! 3rd order Runge Kutta (rk_order = 3):
200 ! Step 1 is taken to from t to dt/3, step 2 is from t to dt/2,
201 ! and step 3 is from t to dt.
202 !
203 ! non-timesplit physics are evaluated during first RK step and
204 ! these physics tendencies are stored for use in each RK pass.
205 !</pre>
206 !</DESCRIPTION>
207 !**********************************************************************
208
209 #ifdef WRF_CHEM
210 !
211 ! prepare chem aerosols for advection before communication
212 !
213
214 kte=min(k_end,kde-1)
215 # ifdef DM_PARALLEL
216 if ( num_chem >= PARAM_FIRST_SCALAR ) then
217 !-----------------------------------------------------------------------
218 ! see matching halo calls below for stencils
219 !--------------------------------------------------------------
220 CALL wrf_debug ( 200 , ' call HALO_RK_CHEM' )
221 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
222 # include "HALO_EM_CHEM_E_3.inc"
223 if( config_flags%progn > 0 ) then
224 # include "HALO_EM_SCALAR_E_3.inc"
225 end if
226 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
227 # include "HALO_EM_CHEM_E_5.inc"
228 if( config_flags%progn > 0 ) then
229 # include "HALO_EM_SCALAR_E_5.inc"
230 end if
231 ELSE
232 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
233 CALL wrf_error_fatal(TRIM(wrf_err_message))
234 ENDIF
235 endif
236 # endif
237 !--------------------------------------------------------------
238 #endif
239
240 rk_order = config_flags%rk_ord
241 IF (grid%time_step_sound == 0) THEN
242 ! auto-set option
243 ! This function will give 4 for 6*dx and 6 for 10*dx and returns even numbers only
244 grid%time_step_sound = max ( 2 * ( INT (300.*grid%dt/grid%dx-0.01) + 1 ), 4 )
245 WRITE(wrf_err_message,*)'dx, dt, time_step_sound=',grid%dx,grid%dt,grid%time_step_sound
246 CALL wrf_debug ( 50 , wrf_err_message )
247 ENDIF
248
249 grid%dts = grid%dt/float(grid%time_step_sound)
250
251 Runge_Kutta_loop: DO rk_step = 1, rk_order
252
253 ! Set the step size and number of small timesteps for
254 ! each part of the timestep
255
256 dtm = grid%dt
257 IF ( rk_order == 1 ) THEN
258
259 write(wrf_err_message,*)' leapfrog removed, error exit for dynamics_option = ',dynamics_option
260 CALL wrf_error_fatal( wrf_err_message )
261
262 ELSE IF ( rk_order == 2 ) THEN ! 2nd order Runge-Kutta timestep
263
264 IF ( rk_step == 1) THEN
265 dt_rk = 0.5*grid%dt
266 dts_rk = grid%dts
267 number_of_small_timesteps = grid%time_step_sound/2
268 ELSE
269 dt_rk = grid%dt
270 dts_rk = grid%dts
271 number_of_small_timesteps = grid%time_step_sound
272 ENDIF
273
274 ELSE IF ( rk_order == 3 ) THEN ! third order Runge-Kutta
275
276 IF ( rk_step == 1) THEN
277 dt_rk = grid%dt/3.
278 dts_rk = dt_rk
279 number_of_small_timesteps = 1
280 ELSE IF (rk_step == 2) THEN
281 dt_rk = 0.5*grid%dt
282 dts_rk = grid%dts
283 number_of_small_timesteps = grid%time_step_sound/2
284 ELSE
285 dt_rk = grid%dt
286 dts_rk = grid%dts
287 number_of_small_timesteps = grid%time_step_sound
288 ENDIF
289
290 ELSE
291
292 write(wrf_err_message,*)' unknown solver, error exit for dynamics_option = ',dynamics_option
293 CALL wrf_error_fatal( wrf_err_message )
294
295 END IF
296
297 !
298 ! Time level t is in the *_2 variable in the first part
299 ! of the step, and in the *_1 variable after the predictor.
300 ! the latest predicted values are stored in the *_2 variables.
301 !
302 CALL wrf_debug ( 200 , ' call rk_step_prep ' )
303
304 BENCH_START(step_prep_tim)
305 !$OMP PARALLEL DO &
306 !$OMP PRIVATE ( ij )
307
308 DO ij = 1 , grid%num_tiles
309
310 CALL rk_step_prep ( config_flags, rk_step, &
311 grid%em_u_2, grid%em_v_2, grid%em_w_2, grid%em_t_2, grid%em_ph_2, grid%em_mu_2, &
312 moist, &
313 grid%em_ru, grid%em_rv, grid%em_rw, grid%em_ww, grid%em_php, grid%em_alt, grid%em_muu, grid%em_muv, &
314 grid%em_mub, grid%em_mut, grid%em_phb, grid%em_pb, grid%em_p, grid%em_al, grid%em_alb, &
315 cqu, cqv, cqw, &
316 grid%msfu, grid%msfv, grid%msft, &
317 grid%em_fnm, grid%em_fnp, grid%em_dnw, grid%rdx, grid%rdy, &
318 num_3d_m, &
319 ids, ide, jds, jde, kds, kde, &
320 ims, ime, jms, jme, kms, kme, &
321 grid%i_start(ij), grid%i_end(ij), &
322 grid%j_start(ij), grid%j_end(ij), &
323 k_start, k_end )
324
325 END DO
326 !$OMP END PARALLEL DO
327 BENCH_END(step_prep_tim)
328
329 #ifdef DM_PARALLEL
330 !-----------------------------------------------------------------------
331 ! Stencils for patch communications (WCS, 29 June 2001)
332 ! Note: the small size of this halo exchange reflects the
333 ! fact that we are carrying the uncoupled variables
334 ! as state variables in the mass coordinate model, as
335 ! opposed to the coupled variables as in the height
336 ! coordinate model.
337 !
338 ! * * * * *
339 ! * * * * * * * * *
340 ! * + * * + * * * + * *
341 ! * * * * * * * * *
342 ! * * * * *
343 !
344 ! 3D variables - note staggering! grid%em_ru(X), grid%em_rv(Y), grid%em_ww(grid%em_z), grid%em_php(grid%em_z)
345 !
346 !j grid%em_ru x
347 !j grid%em_rv x
348 !j grid%em_ww x
349 !j grid%em_php x
350 !j grid%em_alt x
351 !j grid%em_ph_2 x
352 !j grid%em_phb x
353 !
354 ! the following are 2D (xy) variables
355 !
356 !j grid%em_muu x
357 !j grid%em_muv x
358 !j grid%em_mut x
359 !--------------------------------------------------------------
360 # include "HALO_EM_A.inc"
361 #endif
362
363 ! set boundary conditions on variables
364 ! from big_step_prep for use in big_step_proc
365
366 #ifdef DM_PARALLEL
367 # include "PERIOD_BDY_EM_A.inc"
368 #endif
369
370 ! CALL set_tiles ( grid , ids , ide , jds , jde , ips-1 , ipe+1 , jps-1 , jpe+1 )
371
372 BENCH_START(set_phys_bc_tim)
373 !$OMP PARALLEL DO &
374 !$OMP PRIVATE ( ij )
375
376 DO ij = 1 , grid%num_tiles
377
378 CALL wrf_debug ( 200 , ' call rk_phys_bc_dry_1' )
379
380 CALL rk_phys_bc_dry_1( config_flags, grid%em_ru, grid%em_rv, grid%em_rw, grid%em_ww, &
381 grid%em_muu, grid%em_muv, grid%em_mut, grid%em_php, grid%em_alt, grid%em_p, &
382 ids, ide, jds, jde, kds, kde, &
383 ims, ime, jms, jme, kms, kme, &
384 ips, ipe, jps, jpe, kps, kpe, &
385 grid%i_start(ij), grid%i_end(ij), &
386 grid%j_start(ij), grid%j_end(ij), &
387 k_start, k_end )
388 !TBH: need this 2nd timestep and later
389 CALL set_physical_bc3d( grid%em_al, 'p', config_flags, &
390 ids, ide, jds, jde, kds, kde, &
391 ims, ime, jms, jme, kms, kme, &
392 ips, ipe, jps, jpe, kps, kpe, &
393 grid%i_start(ij), grid%i_end(ij), &
394 grid%j_start(ij), grid%j_end(ij), &
395 k_start , k_end )
396 CALL set_physical_bc3d( grid%em_ph_2, 'w', config_flags, &
397 ids, ide, jds, jde, kds, kde, &
398 ims, ime, jms, jme, kms, kme, &
399 ips, ipe, jps, jpe, kps, kpe, &
400 grid%i_start(ij), grid%i_end(ij), &
401 grid%j_start(ij), grid%j_end(ij), &
402 k_start, k_end )
403
404 END DO
405 !$OMP END PARALLEL DO
406 BENCH_END(set_phys_bc_tim)
407
408 rk_step_is_one : IF (rk_step == 1) THEN ! only need to initialize diffusion tendencies
409
410 ! initialize all tendencies to zero in order to update physics
411 ! tendencies first (separate from dry dynamics).
412
413 BENCH_START(init_zero_tend_tim)
414 !$OMP PARALLEL DO &
415 !$OMP PRIVATE ( ij )
416
417 DO ij = 1 , grid%num_tiles
418
419 CALL wrf_debug ( 200 , ' call init_zero_tendency' )
420 CALL init_zero_tendency ( ru_tendf, rv_tendf, rw_tendf, &
421 ph_tendf, t_tendf, tke_tend, &
422 mu_tendf, &
423 moist_tend,chem_tend,scalar_tend, &
424 num_3d_m,num_3d_c,num_3d_s, &
425 rk_step, &
426 ids, ide, jds, jde, kds, kde, &
427 ims, ime, jms, jme, kms, kme, &
428 grid%i_start(ij), grid%i_end(ij), &
429 grid%j_start(ij), grid%j_end(ij), &
430 k_start, k_end )
431
432 END DO
433 !$OMP END PARALLEL DO
434 BENCH_END(init_zero_tend_tim)
435
436 #ifdef DM_PARALLEL
437 # include "HALO_EM_PHYS_A.inc"
438 #endif
439
440 !<DESCRIPTION>
441 !<pre>
442 !(2) The non-timesplit physics begins with a call to "phy_prep"
443 ! (which computes some diagnostic variables such as temperature,
444 ! pressure, u and v at grid%em_p points, etc). This is followed by
445 ! calls to the physics drivers:
446 !
447 ! radiation,
448 ! surface,
449 ! pbl,
450 ! cumulus,
451 ! 3D TKE and mixing.
452 !<pre>
453 !</DESCRIPTION>
454
455
456 BENCH_START(phy_prep_tim)
457 !$OMP PARALLEL DO &
458 !$OMP PRIVATE ( ij )
459 DO ij = 1 , grid%num_tiles
460
461 CALL wrf_debug ( 200 , ' call phy_prep' )
462 CALL phy_prep ( config_flags, &
463 grid%em_mut, grid%em_muu, grid%em_muv, grid%em_u_2, &
464 grid%em_v_2, grid%em_p, grid%em_pb, grid%em_alt, &
465 grid%em_ph_2, grid%em_phb, grid%em_t_2, grid%tsk, moist, num_3d_m, &
466 mu_3d, rho, &
467 th_phy, p_phy, pi_phy, u_phy, v_phy, &
468 p8w, t_phy, t8w, grid%em_z, z_at_w, &
469 dz8w, grid%em_fnm, grid%em_fnp, &
470 grid%rthraten, &
471 grid%rthblten, grid%rublten, grid%rvblten, &
472 grid%rqvblten, grid%rqcblten, grid%rqiblten, &
473 grid%rthcuten, grid%rqvcuten, grid%rqccuten, &
474 grid%rqrcuten, grid%rqicuten, grid%rqscuten, &
475 grid%rthften, grid%rqvften, &
476 grid%RUNDGDTEN, grid%RVNDGDTEN, grid%RTHNDGDTEN, &
477 grid%RQVNDGDTEN, grid%RMUNDGDTEN, &
478 ids, ide, jds, jde, kds, kde, &
479 ims, ime, jms, jme, kms, kme, &
480 grid%i_start(ij), grid%i_end(ij), &
481 grid%j_start(ij), grid%j_end(ij), &
482 k_start, k_end )
483 ENDDO
484 !$OMP END PARALLEL DO
485
486 BENCH_END(phy_prep_tim)
487
488 ! physics to implement
489
490 ! CALL set_tiles ( grid , ids , ide-1 , jds , jde-1 ips , ipe , jps , jpe )
491
492 ! Open MP loops are in physics drivers
493 ! radiation
494
495 !-----------------------------------------------------------------
496 ! urban related variable are added to arguments of radiation_driver
497 !-----------------------------------------------------------------
498
499 CALL wrf_debug ( 200 , ' call radiation_driver' )
500 BENCH_START(rad_driver_tim)
501
502 CALL radiation_driver( &
503 & ACFRCV=grid%acfrcv ,ACFRST=grid%acfrst ,ALBEDO=grid%albedo &
504 & ,CFRACH=grid%cfrach ,CFRACL=grid%cfracl ,CFRACM=grid%cfracm &
505 & ,CUPPT=grid%cuppt ,CZMEAN=grid%czmean ,DT=grid%dt &
506 & ,DZ8W=dz8w ,EMISS=grid%emiss ,GLW=grid%glw &
507 & ,GMT=grid%gmt ,GSW=grid%gsw ,HBOT=grid%hbot &
508 & ,HTOP=grid%htop ,HBOTR=grid%hbotr, HTOPR=grid%htopr ,ICLOUD=config_flags%icloud &
509 & ,ITIMESTEP=grid%itimestep,JULDAY=grid%julday, JULIAN=grid%julian &
510 & ,JULYR=grid%julyr ,LW_PHYSICS=config_flags%ra_lw_physics &
511 & ,NCFRCV=grid%ncfrcv ,NCFRST=grid%ncfrst ,NPHS=1 &
512 & ,P8W=p8w ,P=p_phy ,PI=pi_phy &
513 & ,RADT=grid%radt ,RA_CALL_OFFSET=grid%ra_call_offset &
514 & ,RHO=rho ,RLWTOA=grid%rlwtoa &
515 & ,RSWTOA=grid%rswtoa ,RTHRATEN=grid%rthraten &
516 & ,RTHRATENLW=grid%rthratenlw &
517 & ,RTHRATENSW=grid%rthratensw ,SNOW=grid%snow &
518 & ,STEPRA=grid%stepra ,SWDOWN=grid%swdown ,SWDOWNC=grid%swdownc &
519 & ,SW_PHYSICS=config_flags%ra_sw_physics ,T8W=t8w &
520 & ,T=t_phy ,TAUCLDC=grid%taucldc ,TAUCLDI=grid%taucldi &
521 & ,TSK=grid%tsk ,VEGFRA=grid%vegfra ,WARM_RAIN=grid%warm_rain &
522 & ,XICE=grid%xice &
523 & ,XLAND=grid%xland ,XLAT=grid%xlat ,XLONG=grid%xlong &
524 !Optional urban
525 & ,DECLIN_URB=grid%declin_urb ,COSZ_URB2D=grid%cosz_urb2d &
526 & ,OMG_URB2D=grid%omg_urb2d &
527 !
528 & ,Z=grid%em_z &
529 & ,LEVSIZ=grid%levsiz, N_OZMIXM=num_ozmixm &
530 & ,N_AEROSOLC=num_aerosolc &
531 & ,PAERLEV=grid%paerlev &
532 & ,CAM_ABS_DIM1=grid%cam_abs_dim1, CAM_ABS_DIM2=grid%cam_abs_dim2 &
533 & ,CAM_ABS_FREQ_S=grid%cam_abs_freq_s &
534 & ,XTIME=grid%xtime &
535 ! indexes
536 & ,IDS=ids,IDE=ide, JDS=jds,JDE=jde, KDS=kds,KDE=kde &
537 & ,IMS=ims,IME=ime, JMS=jms,JME=jme, KMS=kms,KME=kme &
538 & ,i_start=grid%i_start,i_end=min(grid%i_end, ide-1) &
539 & ,j_start=grid%j_start,j_end=min(grid%j_end, jde-1) &
540 & ,kts=k_start, kte=min(k_end,kde-1) &
541 & ,num_tiles=grid%num_tiles &
542 ! Optional
543 & , CLDFRA=grid%cldfra &
544 & , PB=grid%em_pb &
545 & , F_ICE_PHY=grid%f_ice_phy,F_RAIN_PHY=grid%f_rain_phy &
546 & , QV=moist(ims,kms,jms,P_QV), F_QV=F_QV &
547 & , QC=moist(ims,kms,jms,P_QC), F_QC=F_QC &
548 & , QR=moist(ims,kms,jms,P_QR), F_QR=F_QR &
549 & , QI=moist(ims,kms,jms,P_QI), F_QI=F_QI &
550 & , QS=moist(ims,kms,jms,P_QS), F_QS=F_QS &
551 & , QG=moist(ims,kms,jms,P_QG), F_QG=F_QG &
552 & , QNDROP=scalar(ims,kms,jms,P_QNDROP), F_QNDROP=F_QNDROP &
553 #ifdef ACFLUX
554 & ,ACSWUPT=acswupt ,ACSWUPTC=acswuptc &
555 & ,ACSWDNT=acswdnt ,ACSWDNTC=acswdntc &
556 & ,ACSWUPB=acswupb ,ACSWUPBC=acswupbc &
557 & ,ACSWDNB=acswdnb ,ACSWDNBC=acswdnbc &
558 & ,ACLWUPT=aclwupt ,ACLWUPTC=aclwuptc &
559 & ,ACLWDNT=aclwdnt ,ACLWDNTC=aclwdntc &
560 & ,ACLWUPB=aclwupb ,ACLWUPBC=aclwupbc &
561 & ,ACLWDNB=aclwdnb ,ACLWDNBC=aclwdnbc &
562 & ,SWUPT=swupt ,SWUPTC=swuptc &
563 & ,SWDNT=swdnt ,SWDNTC=swdntc &
564 & ,SWUPB=swupb ,SWUPBC=swupbc &
565 & ,SWDNB=swdnb ,SWDNBC=swdnbc &
566 & ,LWUPT=lwupt ,LWUPTC=lwuptc &
567 & ,LWDNT=lwdnt ,LWDNTC=lwdntc &
568 & ,LWUPB=lwupb ,LWUPBC=lwupbc &
569 & ,LWDNB=lwdnb ,LWDNBC=lwdnbc &
570 #endif
571 & ,LWCF=grid%lwcf &
572 & ,SWCF=grid%swcf &
573 & ,OLR=grid%olr &
574 & ,OZMIXM=grid%ozmixm, PIN=grid%pin &
575 & ,M_PS_1=grid%m_ps_1, M_PS_2=grid%m_ps_2, AEROSOLC_1=grid%aerosolc_1 &
576 & ,AEROSOLC_2=grid%aerosolc_2, M_HYBI0=grid%m_hybi &
577 & ,ABSTOT=grid%abstot, ABSNXT=grid%absnxt, EMSTOT=grid%emstot &
578 #ifdef WRF_CHEM
579 & ,CU_RAD_FEEDBACK=config_flags%cu_rad_feedback &
580 & ,AER_RA_FEEDBACK=config_flags%aer_ra_feedback &
581 & ,QC_ADJUST=grid%GD_CLOUD_B , QI_ADJUST=grid%GD_CLOUD2_B &
582 & ,PM2_5_DRY=grid%pm2_5_dry, PM2_5_WATER=grid%pm2_5_water &
583 & ,PM2_5_DRY_EC=grid%pm2_5_dry_ec &
584 & ,TAUAER300=grid%tauaer1, TAUAER400=grid%tauaer2 & ! jcb
585 & ,TAUAER600=grid%tauaer3, TAUAER999=grid%tauaer4 & ! jcb
586 & ,GAER300=grid%gaer1, GAER400=grid%gaer2, GAER600=grid%gaer3, GAER999=grid%gaer4 & ! jcb
587 & ,WAER300=grid%waer1, WAER400=grid%waer2, WAER600=grid%waer3, WAER999=grid%waer4 & ! jcb
588 #endif
589 & )
590
591 BENCH_END(rad_driver_tim)
592
593 !********* Surface driver
594 ! surface
595
596 BENCH_START(surf_driver_tim)
597
598 !-----------------------------------------------------------------
599 ! urban related variable are added to arguments of surface_driver
600 !-----------------------------------------------------------------
601 NUM_ROOF_LAYERS = grid%num_soil_layers !urban
602 NUM_WALL_LAYERS = grid%num_soil_layers !urban
603 NUM_ROAD_LAYERS = grid%num_soil_layers !urban
604
605 CALL wrf_debug ( 200 , ' call surface_driver' )
606 CALL surface_driver( &
607 & ACSNOM=grid%acsnom ,ACSNOW=grid%acsnow ,AKHS=grid%akhs &
608 & ,AKMS=grid%akms ,ALBBCK=grid%albbck ,ALBEDO=grid%albedo &
609 & ,BR=br ,CANWAT=grid%canwat ,CHKLOWQ=chklowq &
610 & ,CT=grid%ct ,DT=grid%dt ,DX=grid%dx &
611 & ,DZ8W=dz8w ,DZS=grid%dzs ,FLHC=grid%flhc &
612 & ,FLQC=grid%flqc ,GLW=grid%glw ,GRDFLX=grid%grdflx &
613 & ,GSW=grid%gsw ,SWDOWN=grid%swdown ,GZ1OZ0=gz1oz0 ,HFX=grid%hfx &
614 & ,HT=grid%ht ,IFSNOW=config_flags%ifsnow ,ISFFLX=config_flags%isfflx &
615 & ,ISLTYP=grid%isltyp ,ITIMESTEP=grid%itimestep &
616 & ,IVGTYP=grid%ivgtyp ,LH=grid%lh ,LOWLYR=grid%lowlyr &
617 & ,MAVAIL=grid%mavail ,NUM_SOIL_LAYERS=config_flags%num_soil_layers &
618 & ,P8W=p8w ,PBLH=grid%pblh ,PI_PHY=pi_phy &
619 & ,PSFC=grid%psfc ,PSHLTR=grid%pshltr ,PSIH=psih &
620 & ,PSIM=psim ,P_PHY=p_phy ,Q10=grid%q10 &
621 & ,Q2=grid%q2 ,QFX=grid%qfx ,QSFC=grid%qsfc &
622 & ,QSHLTR=grid%qshltr ,QZ0=grid%qz0 ,RAINCV=grid%raincv &
623 & ,RA_LW_PHYSICS=config_flags%ra_lw_physics ,RHO=rho &
624 & ,RMOL=grid%rmol ,SFCEVP=grid%sfcevp ,SFCEXC=grid%sfcexc &
625 & ,SFCRUNOFF=grid%sfcrunoff &
626 & ,SF_SFCLAY_PHYSICS=config_flags%sf_sfclay_physics &
627 & ,SF_SURFACE_PHYSICS=config_flags%sf_surface_physics ,SH2O=grid%sh2o &
628 & ,SHDMAX=grid%shdmax ,SHDMIN=grid%shdmin ,SMOIS=grid%smois &
629 & ,SMSTAV=grid%smstav ,SMSTOT=grid%smstot ,SNOALB=grid%snoalb &
630 & ,SNOW=grid%snow ,SNOWC=grid%snowc ,SNOWH=grid%snowh &
631 & ,SST=grid%sst ,SST_UPDATE=grid%sst_update &
632 & ,STEPBL=grid%stepbl ,TH10=grid%th10 ,TH2=grid%th2 &
633 & ,THZ0=grid%thz0 ,TH_PHY=th_phy ,TKE_MYJ=grid%tke_myj &
634 & ,TMN=grid%tmn ,TSHLTR=grid%tshltr ,TSK=grid%tsk &
635 & ,TSLB=grid%tslb ,T_PHY=t_phy ,U10=grid%u10 &
636 & ,URATX=grid%uratx ,VRATX=grid%vratx ,TRATX=grid%tratx &
637 & ,UDRUNOFF=grid%udrunoff ,UST=grid%ust ,UZ0=grid%uz0 &
638 & ,U_FRAME=grid%u_frame ,U_PHY=u_phy ,V10=grid%v10 &
639 & ,VEGFRA=grid%vegfra ,VZ0=grid%vz0 ,V_FRAME=grid%v_frame &
640 & ,V_PHY=v_phy ,WARM_RAIN=grid%warm_rain &
641 & ,WSPD=wspd ,XICE=grid%xice ,XLAND=grid%xland &
642 & ,Z0=grid%z0 ,Z=grid%em_z ,ZNT=grid%znt &
643 & ,ZS=grid%zs &
644 & ,DECLIN_URB=grid%declin_urb ,COSZ_URB2D=grid%cosz_urb2d & !I urban
645 & ,OMG_URB2D=grid%omg_urb2d ,xlat_urb2d=grid%XLAT & !I urban
646 & ,NUM_ROOF_LAYERS=num_roof_layers & !I urban
647 & ,NUM_WALL_LAYERS=num_wall_layers & !I urban
648 & ,NUM_ROAD_LAYERS=num_road_layers &
649 & ,DZR=grid%dzr ,DZB=grid%dzb ,DZG=grid%dzg & !I urban
650 & ,TR_URB2D=grid%tr_urb2d ,TB_URB2D=grid%tb_urb2d &
651 & ,TG_URB2D=grid%tg_urb2d & !H urban
652 & ,TC_URB2D=grid%tc_urb2d ,QC_URB2D=grid%qc_urb2d & !H urban
653 & ,UC_URB2D=grid%uc_urb2d & !H urban
654 & ,XXXR_URB2D=grid%xxxr_urb2d &
655 & ,XXXB_URB2D=grid%xxxb_urb2d & !H urban
656 & ,XXXG_URB2D=grid%xxxg_urb2d &
657 & ,XXXC_URB2D=grid%xxxc_urb2d & !H urban
658 & ,TRL_URB3D=grid%trl_urb3d ,TBL_URB3D=grid%tbl_urb3d & !H urban
659 & ,TGL_URB3D=grid%tgl_urb3d & !H urban
660 & ,SH_URB2D=grid%sh_urb2d ,LH_URB2D=grid%lh_urb2d &
661 & ,G_URB2D=grid%g_urb2d & !H urban
662 & ,RN_URB2D=grid%rn_urb2d , TS_URB2D=grid%ts_urb2d & !H urban
663 & ,FRC_URB2D=grid%frc_urb2d & !H urban
664 & ,UTYPE_URB2D=grid%utype_urb2d & !H urban
665 & ,ucmcall=grid%ucmcall & !H urban
666 ! Indexes
667 & ,IDS=ids,IDE=ide, JDS=jds,JDE=jde, KDS=kds,KDE=kde &
668 & ,IMS=ims,IME=ime, JMS=jms,JME=jme, KMS=kms,KME=kme &
669 & , I_START=grid%i_start,I_END=min(grid%i_end, ide-1) &
670 & , J_START=grid%j_start,J_END=min(grid%j_end, jde-1) &
671 & , KTS=k_start, KTE=min(k_end,kde-1) &
672 & , NUM_TILES=grid%num_tiles &
673 ! Optional
674 & ,QV_CURR=moist(ims,kms,jms,P_QV), F_QV=F_QV &
675 & ,QC_CURR=moist(ims,kms,jms,P_QC), F_QC=F_QC &
676 & ,QR_CURR=moist(ims,kms,jms,P_QR), F_QR=F_QR &
677 & ,QI_CURR=moist(ims,kms,jms,P_QI), F_QI=F_QI &
678 & ,QS_CURR=moist(ims,kms,jms,P_QS), F_QS=F_QS &
679 & ,QG_CURR=moist(ims,kms,jms,P_QG), F_QG=F_QG &
680 & ,CAPG=grid%capg, EMISS=grid%emiss, HOL=hol,MOL=grid%mol &
681 & ,RAINBL=grid%rainbl,SR=grid%em_sr &
682 & ,RAINNCV=grid%rainncv,REGIME=regime,T2=grid%t2,THC=grid%thc &
683 & ,QSG=grid%qsg,QVG=grid%qvg,QCG=grid%qcg,SOILT1=grid%soilt1,TSNAV=grid%tsnav & ! ruc lsm
684 & ,SMFR3D=grid%smfr3d,KEEPFR3DFLAG=grid%keepfr3dflag & ! ruc lsm
685 & ,POTEVP=grid%em_POTEVP, SNOPCX=grid%em_SNOPCX, SOILTB=grid%em_SOILTB & ! ruc lsm
686 & )
687 BENCH_END(surf_driver_tim)
688
689 !*********
690 ! pbl
691
692 CALL wrf_debug ( 200 , ' call pbl_driver' )
693 BENCH_START(pbl_driver_tim)
694 CALL pbl_driver( &
695 & AKHS=grid%akhs ,AKMS=grid%akms &
696 & ,BL_PBL_PHYSICS=config_flags%bl_pbl_physics &
697 & ,BR=br ,CHKLOWQ=chklowq ,CT=grid%ct &
698 & ,DT=grid%dt ,DX=grid%dx ,DZ8W=dz8w &
699 & ,EL_MYJ=grid%el_myj ,EXCH_H=grid%exch_h ,GRDFLX=grid%grdflx &
700 & ,GZ1OZ0=gz1oz0 ,HFX=grid%hfx ,HT=grid%ht &
701 & ,ITIMESTEP=grid%itimestep ,KPBL=grid%kpbl &
702 & ,LH=grid%lh ,LOWLYR=grid%lowlyr ,P8W=p8w &
703 & ,PBLH=grid%pblh ,PI_PHY=pi_phy ,PSIH=psih &
704 & ,PSIM=psim ,P_PHY=p_phy ,QFX=grid%qfx &
705 & ,QSFC=grid%qsfc ,QZ0=grid%qz0 &
706 & ,RA_LW_PHYSICS=config_flags%ra_lw_physics &
707 & ,RHO=rho ,RQCBLTEN=grid%rqcblten ,RQIBLTEN=grid%rqiblten &
708 & ,RQVBLTEN=grid%rqvblten ,RTHBLTEN=grid%rthblten ,RUBLTEN=grid%rublten &
709 & ,RVBLTEN=grid%rvblten ,SNOW=grid%snow ,STEPBL=grid%stepbl &
710 & ,THZ0=grid%thz0 ,TH_PHY=th_phy ,TKE_MYJ=grid%tke_myj &
711 & ,TSK=grid%tsk ,T_PHY=t_phy ,UST=grid%ust &
712 & ,UZ0=grid%uz0 ,U_FRAME=grid%u_frame ,U_PHY=u_phy &
713 & ,VZ0=grid%vz0 ,V_FRAME=grid%v_frame ,V_PHY=v_phy &
714 & ,WARM_RAIN=grid%warm_rain ,WSPD=wspd &
715 & ,XICE=grid%xice ,XLAND=grid%xland ,Z=grid%em_z &
716 & ,ZNT=grid%znt &
717 & ,IDS=ids,IDE=ide, JDS=jds,JDE=jde, KDS=kds,KDE=kde &
718 & ,IMS=ims,IME=ime, JMS=jms,JME=jme, KMS=kms,KME=kme &
719 & ,I_START=grid%i_start,I_END=min(grid%i_end, ide-1) &
720 & ,J_START=grid%j_start,J_END=min(grid%j_end, jde-1) &
721 & ,KTS=k_start, KTE=min(k_end,kde-1) &
722 & ,NUM_TILES=grid%num_tiles &
723 ! optional
724 & ,QV_CURR=moist(ims,kms,jms,P_QV), F_QV=F_QV &
725 & ,QC_CURR=moist(ims,kms,jms,P_QC), F_QC=F_QC &
726 & ,QR_CURR=moist(ims,kms,jms,P_QR), F_QR=F_QR &
727 & ,QI_CURR=moist(ims,kms,jms,P_QI), F_QI=F_QI &
728 & ,QS_CURR=moist(ims,kms,jms,P_QS), F_QS=F_QS &
729 & ,QG_CURR=moist(ims,kms,jms,P_QG), F_QG=F_QG &
730 & ,HOL=HOL, MOL=grid%mol, REGIME=REGIME &
731 & )
732
733 BENCH_END(pbl_driver_tim)
734
735 ! cumulus para.
736
737 CALL wrf_debug ( 200 , ' call cumulus_driver' )
738
739 BENCH_START(cu_driver_tim)
740 CALL cumulus_driver( &
741 ! Prognostic variables
742 & U=u_phy ,V=v_phy ,TH=th_phy ,T=t_phy &
743 & ,W=grid%em_w_2 ,P=p_phy ,PI=pi_phy ,RHO=rho &
744 ! Other arguments
745 & ,ITIMESTEP=grid%itimestep ,DT=grid%dt ,DX=grid%dx &
746 & ,RAINC=grid%rainc ,RAINCV=grid%raincv ,NCA=grid%nca &
747 & ,HTOP=grid%cutop ,HBOT=grid%cubot ,KPBL=grid%kpbl &
748 & ,DZ8W=dz8w ,P8W=p8w &
749 & ,W0AVG=grid%w0avg ,STEPCU=grid%stepcu &
750 & ,CLDEFI=grid%cldefi ,LOWLYR=grid%lowlyr ,XLAND=grid%xland &
751 & ,APR_GR=grid%apr_gr ,APR_W=grid%apr_w ,APR_MC=grid%apr_mc &
752 & ,APR_ST=grid%apr_st ,APR_AS=grid%apr_as ,APR_CAPMA=grid%apr_capma &
753 & ,APR_CAPME=grid%apr_capme ,APR_CAPMI=grid%apr_capmi &
754 & ,MASS_FLUX=grid%mass_flux ,XF_ENS=grid%xf_ens &
755 & ,PR_ENS=grid%pr_ens ,HT=grid%ht &
756 & ,ENSDIM=config_flags%ensdim ,MAXIENS=config_flags%maxiens ,MAXENS=config_flags%maxens &
757 & ,MAXENS2=config_flags%maxens2 ,MAXENS3=config_flags%maxens3 &
758 & ,CU_ACT_FLAG=cu_act_flag ,WARM_RAIN=grid%warm_rain &
759 & ,GSW=grid%gsw &
760 ! Selection flag
761 & ,CU_PHYSICS=config_flags%cu_physics &
762 ! Dimension arguments
763 & ,IDS=ids,IDE=ide, JDS=jds,JDE=jde, KDS=kds,KDE=kde &
764 & ,IMS=ims,IME=ime, JMS=jms,JME=jme, KMS=kms,KME=kme &
765 & ,I_START=grid%i_start,I_END=min(grid%i_end, ide-1) &
766 & ,J_START=grid%j_start,J_END=min(grid%j_end, jde-1) &
767 & ,KTS=k_start, KTE=min(k_end,kde-1) &
768 & ,NUM_TILES=grid%num_tiles &
769 ! Moisture tendency arguments
770 & ,RQVCUTEN=grid%rqvcuten , RQCCUTEN=grid%rqccuten &
771 & ,RQSCUTEN=grid%rqscuten , RQICUTEN=grid%rqicuten &
772 & ,RQRCUTEN=grid%rqrcuten , RQVBLTEN=grid%rqvblten &
773 & ,RQVFTEN=grid%rqvften &
774 ! Other tendency arguments
775 & ,RTHRATEN=grid%rthraten , RTHBLTEN=grid%rthblten &
776 & ,RTHCUTEN=grid%rthcuten , RTHFTEN=grid%rthften &
777 ! Moisture tracer arguments
778 & ,QV_CURR=moist(ims,kms,jms,P_QV), F_QV=F_QV &
779 & ,QC_CURR=moist(ims,kms,jms,P_QC), F_QC=F_QC &
780 & ,QR_CURR=moist(ims,kms,jms,P_QR), F_QR=F_QR &
781 & ,QI_CURR=moist(ims,kms,jms,P_QI), F_QI=F_QI &
782 & ,QS_CURR=moist(ims,kms,jms,P_QS), F_QS=F_QS &
783 & ,QG_CURR=moist(ims,kms,jms,P_QG), F_QG=F_QG &
784 #ifdef WRF_CHEM
785 & ,GD_CLOUD=grid%GD_CLOUD,GD_CLOUD2=grid%GD_CLOUD2 &
786 #endif
787 & )
788 BENCH_END(cu_driver_tim)
789
790 ! fdda
791
792 CALL wrf_debug ( 200 , ' call fddagd_driver' )
793
794 BENCH_START(fdda_driver_tim)
795 CALL fddagd_driver(itimestep=grid%itimestep,dt=grid%dt,xtime=grid%XTIME, &
796 id=grid%id, &
797 RUNDGDTEN=grid%rundgdten,RVNDGDTEN=grid%rvndgdten, &
798 RTHNDGDTEN=grid%rthndgdten,RQVNDGDTEN=grid%rqvndgdten, &
799 RMUNDGDTEN=grid%rmundgdten, &
800 u_ndg_old=fdda3d(ims,kms,jms,P_u_ndg_old), &
801 v_ndg_old=fdda3d(ims,kms,jms,P_v_ndg_old), &
802 t_ndg_old=fdda3d(ims,kms,jms,P_t_ndg_old), &
803 q_ndg_old=fdda3d(ims,kms,jms,P_q_ndg_old), &
804 mu_ndg_old=fdda2d(ims,1,jms,P_mu_ndg_old), &
805 u_ndg_new=fdda3d(ims,kms,jms,P_u_ndg_new), &
806 v_ndg_new=fdda3d(ims,kms,jms,P_v_ndg_new), &
807 t_ndg_new=fdda3d(ims,kms,jms,P_t_ndg_new), &
808 q_ndg_new=fdda3d(ims,kms,jms,P_q_ndg_new), &
809 mu_ndg_new=fdda2d(ims,1,jms,P_mu_ndg_new), &
810 u3d=grid%em_u_2,v3d=grid%em_v_2,th_phy=th_phy,rho=rho,moist=moist, &
811 p_phy=p_phy,pi_phy=pi_phy,p8w=p8w,t_phy=t_phy, &
812 dz8w=dz8w,z=grid%em_z,z_at_w=z_at_w, &
813 config_flags=config_flags,dx=grid%DX,n_moist=num_3d_m, &
814 STEPFG=grid%STEPFG, &
815 pblh=grid%pblh,ht=grid%ht, &
816 IDS=ids,IDE=ide, JDS=jds,JDE=jde, KDS=kds,KDE=kde &
817 ,IMS=ims,IME=ime, JMS=jms,JME=jme, KMS=kms,KME=kme &
818 ,I_START=grid%i_start,I_END=min(grid%i_end, ide-1) &
819 ,J_START=grid%j_start,J_END=min(grid%j_end, jde-1) &
820 ,KTS=k_start, KTE=min(k_end,kde-1) &
821 , num_tiles=grid%num_tiles )
822 BENCH_END(fdda_driver_tim)
823
824 ! calculate_phy_tend
825
826 BENCH_START(cal_phy_tend)
827 !$OMP PARALLEL DO &
828 !$OMP PRIVATE ( ij )
829
830 DO ij = 1 , grid%num_tiles
831
832 CALL wrf_debug ( 200 , ' call calculate_phy_tend' )
833 CALL calculate_phy_tend (config_flags,grid%em_mut,grid%em_muu,grid%em_muv,pi_phy, &
834 grid%rthraten, &
835 grid%rublten,grid%rvblten,grid%rthblten, &
836 grid%rqvblten,grid%rqcblten,grid%rqiblten, &
837 grid%rthcuten,grid%rqvcuten,grid%rqccuten,grid%rqrcuten, &
838 grid%rqicuten,grid%rqscuten, &
839 grid%RUNDGDTEN,grid%RVNDGDTEN,grid%RTHNDGDTEN,grid%RQVNDGDTEN, &
840 grid%RMUNDGDTEN, &
841 ids,ide, jds,jde, kds,kde, &
842 ims,ime, jms,jme, kms,kme, &
843 grid%i_start(ij), min(grid%i_end(ij),ide-1), &
844 grid%j_start(ij), min(grid%j_end(ij),jde-1), &
845 k_start , min(k_end,kde-1) )
846
847 ENDDO
848 !$OMP END PARALLEL DO
849 BENCH_END(cal_phy_tend)
850
851 ! tke diffusion
852
853 IF(config_flags%diff_opt .eq. 2 .OR. config_flags%diff_opt .eq. 1) THEN
854
855 BENCH_START(comp_diff_metrics_tim)
856 !$OMP PARALLEL DO &
857 !$OMP PRIVATE ( ij )
858
859 DO ij = 1 , grid%num_tiles
860
861 CALL wrf_debug ( 200 , ' call compute_diff_metrics ' )
862 CALL compute_diff_metrics ( config_flags, grid%em_ph_2, grid%em_phb, grid%em_z, grid%em_rdz, grid%em_rdzw, &
863 grid%em_zx, grid%em_zy, grid%rdx, grid%rdy, &
864 ids, ide, jds, jde, kds, kde, &
865 ims, ime, jms, jme, kms, kme, &
866 grid%i_start(ij), grid%i_end(ij), &
867 grid%j_start(ij), grid%j_end(ij), &
868 k_start , k_end )
869 ENDDO
870 !$OMP END PARALLEL DO
871 BENCH_END(comp_diff_metrics_tim)
872
873 #ifdef DM_PARALLEL
874 # include "PERIOD_BDY_EM_A1.inc"
875 #endif
876
877 BENCH_START(tke_diff_bc_tim)
878 DO ij = 1 , grid%num_tiles
879
880 CALL wrf_debug ( 200 , ' call bc for diffusion_metrics ' )
881 CALL set_physical_bc3d( grid%em_rdzw , 'w', config_flags, &
882 ids, ide, jds, jde, kds, kde, &
883 ims, ime, jms, jme, kms, kme, &
884 ips, ipe, jps, jpe, kps, kpe, &
885 grid%i_start(ij), grid%i_end(ij), &
886 grid%j_start(ij), grid%j_end(ij), &
887 k_start , k_end )
888 CALL set_physical_bc3d( grid%em_rdz , 'w', config_flags, &
889 ids, ide, jds, jde, kds, kde, &
890 ims, ime, jms, jme, kms, kme, &
891 ips, ipe, jps, jpe, kps, kpe, &
892 grid%i_start(ij), grid%i_end(ij), &
893 grid%j_start(ij), grid%j_end(ij), &
894 k_start , k_end )
895 CALL set_physical_bc3d( grid%em_z , 'w', config_flags, &
896 ids, ide, jds, jde, kds, kde, &
897 ims, ime, jms, jme, kms, kme, &
898 ips, ipe, jps, jpe, kps, kpe, &
899 grid%i_start(ij), grid%i_end(ij), &
900 grid%j_start(ij), grid%j_end(ij), &
901 k_start , k_end )
902 CALL set_physical_bc3d( grid%em_zx , 'w', config_flags, &
903 ids, ide, jds, jde, kds, kde, &
904 ims, ime, jms, jme, kms, kme, &
905 ips, ipe, jps, jpe, kps, kpe, &
906 grid%i_start(ij), grid%i_end(ij), &
907 grid%j_start(ij), grid%j_end(ij), &
908 k_start , k_end )
909 CALL set_physical_bc3d( grid%em_zy , 'w', config_flags, &
910 ids, ide, jds, jde, kds, kde, &
911 ims, ime, jms, jme, kms, kme, &
912 ips, ipe, jps, jpe, kps, kpe, &
913 grid%i_start(ij), grid%i_end(ij), &
914 grid%j_start(ij), grid%j_end(ij), &
915 k_start , k_end )
916
917 ENDDO
918 BENCH_END(tke_diff_bc_tim)
919
920 #ifdef DM_PARALLEL
921 # include "HALO_EM_TKE_C.inc"
922 #endif
923
924 BENCH_START(deform_div_tim)
925
926 !$OMP PARALLEL DO &
927 !$OMP PRIVATE ( ij )
928
929 DO ij = 1 , grid%num_tiles
930
931 CALL wrf_debug ( 200 , ' call cal_deform_and_div' )
932 CALL cal_deform_and_div ( config_flags,grid%em_u_2,grid%em_v_2,grid%em_w_2,grid%div, &
933 grid%defor11,grid%defor22,grid%defor33,grid%defor12, &
934 grid%defor13,grid%defor23, &
935 grid%u_base, grid%v_base,grid%msfu,grid%msfv,grid%msft, &
936 grid%rdx, grid%rdy, grid%em_dn, grid%em_dnw, grid%em_rdz, grid%em_rdzw, &
937 grid%em_fnm,grid%em_fnp,grid%cf1,grid%cf2,grid%cf3,grid%em_zx,grid%em_zy, &
938 ids, ide, jds, jde, kds, kde, &
939 ims, ime, jms, jme, kms, kme, &
940 grid%i_start(ij), grid%i_end(ij), &
941 grid%j_start(ij), grid%j_end(ij), &
942 k_start , k_end )
943 ENDDO
944 !$OMP END PARALLEL DO
945 BENCH_END(deform_div_tim)
946
947
948 #ifdef DM_PARALLEL
949 # include "HALO_EM_TKE_D.inc"
950 #endif
951
952
953 ! calculate tke, kmh, and kmv
954
955 BENCH_START(calc_tke_tim)
956 !$OMP PARALLEL DO &
957 !$OMP PRIVATE ( ij )
958
959 DO ij = 1 , grid%num_tiles
960
961 CALL wrf_debug ( 200 , ' call calculate_km_kh' )
962 CALL calculate_km_kh( config_flags,grid%dt,grid%dampcoef,grid%zdamp,config_flags%damp_opt, &
963 grid%xkmh,grid%xkmhd,grid%xkmv,grid%xkhh,grid%xkhv,grid%bn2, &
964 grid%khdif,grid%kvdif,grid%div, &
965 grid%defor11,grid%defor22,grid%defor33,grid%defor12, &
966 grid%defor13,grid%defor23, &
967 grid%em_tke_2,p8w,t8w,th_phy, &
968 t_phy,p_phy,moist,grid%em_dn,grid%em_dnw, &
969 grid%dx,grid%dy,grid%em_rdz,grid%em_rdzw,config_flags%mix_cr_len,num_3d_m, &
970 grid%cf1, grid%cf2, grid%cf3, grid%warm_rain, &
971 grid%kh_tke_upper_bound, grid%kv_tke_upper_bound, &
972 ids,ide, jds,jde, kds,kde, &
973 ims,ime, jms,jme, kms,kme, &
974 grid%i_start(ij), grid%i_end(ij), &
975 grid%j_start(ij), grid%j_end(ij), &
976 k_start , k_end )
977 ENDDO
978 !$OMP END PARALLEL DO
979 BENCH_END(calc_tke_tim)
980
981 #ifdef DM_PARALLEL
982 # include "HALO_EM_TKE_E.inc"
983 #endif
984
985 ENDIF
986
987 #ifdef DM_PARALLEL
988 # include "PERIOD_BDY_EM_PHY_BC.inc"
989 IF ( config_flags%grid_fdda .eq. 1) THEN
990 # include "PERIOD_BDY_EM_FDDA_BC.inc"
991 ENDIF
992 # include "PERIOD_BDY_EM_CHEM.inc"
993 #endif
994
995 BENCH_START(phy_bc_tim)
996 !$OMP PARALLEL DO &
997 !$OMP PRIVATE ( ij )
998
999 DO ij = 1 , grid%num_tiles
1000
1001 CALL wrf_debug ( 200 , ' call phy_bc' )
1002 CALL phy_bc (config_flags,grid%div,grid%defor11,grid%defor22,grid%defor33, &
1003 grid%defor12,grid%defor13,grid%defor23, &
1004 grid%xkmh,grid%xkmhd,grid%xkmv,grid%xkhh,grid%xkhv, &
1005 grid%em_tke_2, &
1006 grid%rublten, grid%rvblten, &
1007 ids, ide, jds, jde, kds, kde, &
1008 ims, ime, jms, jme, kms, kme, &
1009 ips, ipe, jps, jpe, kps, kpe, &
1010 grid%i_start(ij), grid%i_end(ij), &
1011 grid%j_start(ij), grid%j_end(ij), &
1012 k_start , k_end )
1013 ENDDO
1014 !$OMP END PARALLEL DO
1015 BENCH_END(phy_bc_tim)
1016
1017 #ifdef DM_PARALLEL
1018 !-----------------------------------------------------------------------
1019 !
1020 ! MPP for some physics tendency, km, kh, deformation, and divergence
1021 !
1022 ! * *
1023 ! * + * * + * +
1024 ! * *
1025 !
1026 ! (for PBL)
1027 ! grid%rublten x
1028 ! grid%rvblten x
1029 !
1030 ! (for diff_opt >= 1)
1031 ! grid%defor11 x
1032 ! grid%defor22 x
1033 ! grid%defor12 x
1034 ! grid%defor13 x
1035 ! grid%defor23 x
1036 ! grid%div x
1037 ! grid%xkmv x
1038 ! grid%xkmh x
1039 ! grid%xkmhd x
1040 ! grid%xkhv x
1041 ! grid%xkhh x
1042 ! tke x
1043 !
1044 !-----------------------------------------------------------------------
1045 IF ( config_flags%bl_pbl_physics .ge. 1 ) THEN
1046 # include "HALO_EM_PHYS_PBL.inc"
1047 ENDIF
1048 IF ( config_flags%grid_fdda .eq. 1) THEN
1049 # include "HALO_EM_FDDA.inc"
1050 ENDIF
1051 IF ( config_flags%diff_opt .ge. 1 ) THEN
1052 # include "HALO_EM_PHYS_DIFFUSION.inc"
1053 ENDIF
1054
1055 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
1056 # include "HALO_EM_TKE_3.inc"
1057 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
1058 # include "HALO_EM_TKE_5.inc"
1059 ELSE
1060 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
1061 CALL wrf_error_fatal(TRIM(wrf_err_message))
1062 ENDIF
1063 #endif
1064
1065 BENCH_START(update_phy_ten_tim)
1066 !$OMP PARALLEL DO &
1067 !$OMP PRIVATE ( ij )
1068
1069 DO ij = 1 , grid%num_tiles
1070
1071 CALL wrf_debug ( 200 , ' call update_phy_ten' )
1072 CALL update_phy_ten(t_tendf, ru_tendf, rv_tendf,moist_tend, &
1073 scalar_tend, mu_tendf, &
1074 grid%rthraten,grid%rthblten,grid%rthcuten,grid%rublten,grid%rvblten, &
1075 grid%rqvblten,grid%rqcblten,grid%rqiblten, &
1076 grid%rqvcuten,grid%rqccuten,grid%rqrcuten,grid%rqicuten,grid%rqscuten, &
1077 grid%RUNDGDTEN,grid%RVNDGDTEN,grid%RTHNDGDTEN,grid%RQVNDGDTEN, &
1078 grid%RMUNDGDTEN, &
1079 num_3d_m,num_3d_s,config_flags,rk_step,grid%adv_moist_cond, &
1080 ids, ide, jds, jde, kds, kde, &
1081 ims, ime, jms, jme, kms, kme, &
1082 grid%i_start(ij), grid%i_end(ij), &
1083 grid%j_start(ij), grid%j_end(ij), &
1084 k_start, k_end )
1085
1086 END DO
1087 !$OMP END PARALLEL DO
1088 BENCH_END(update_phy_ten_tim)
1089
1090 IF( config_flags%diff_opt .eq. 2 .and. config_flags%km_opt .eq. 2 ) THEN
1091
1092 BENCH_START(tke_rhs_tim)
1093 !$OMP PARALLEL DO &
1094 !$OMP PRIVATE ( ij )
1095
1096 DO ij = 1 , grid%num_tiles
1097
1098 CALL tke_rhs ( tke_tend,grid%bn2, &
1099 config_flags,grid%defor11,grid%defor22,grid%defor33, &
1100 grid%defor12,grid%defor13,grid%defor23,grid%em_u_2,grid%em_v_2,grid%em_w_2,grid%div, &
1101 grid%em_tke_2,grid%em_mut, &
1102 th_phy,p_phy,p8w,t8w,grid%em_z,grid%em_fnm,grid%em_fnp, &
1103 grid%cf1,grid%cf2,grid%cf3,grid%msft,grid%xkmh,grid%xkmv,grid%xkhv,grid%rdx,grid%rdy, &
1104 grid%dx,grid%dy,grid%dt,grid%em_zx,grid%em_zy,grid%em_rdz,grid%em_rdzw,grid%em_dn, &
1105 grid%em_dnw,config_flags%mix_cr_len, &
1106 ids, ide, jds, jde, kds, kde, &
1107 ims, ime, jms, jme, kms, kme, &
1108 grid%i_start(ij), grid%i_end(ij), &
1109 grid%j_start(ij), grid%j_end(ij), &
1110 k_start , k_end )
1111
1112 ENDDO
1113 !$OMP END PARALLEL DO
1114 BENCH_END(tke_rhs_tim)
1115
1116 ENDIF
1117
1118 ! calculate vertical diffusion first and then horizontal
1119 ! (keep this order)
1120
1121 IF(config_flags%diff_opt .eq. 2) THEN
1122
1123 IF (config_flags%bl_pbl_physics .eq. 0) THEN
1124
1125 BENCH_START(vert_diff_tim)
1126 !$OMP PARALLEL DO &
1127 !$OMP PRIVATE ( ij )
1128 DO ij = 1 , grid%num_tiles
1129
1130 CALL wrf_debug ( 200 , ' call vertical_diffusion_2 ' )
1131 CALL vertical_diffusion_2( ru_tendf, rv_tendf, rw_tendf, &
1132 t_tendf, tke_tend, &
1133 moist_tend, num_3d_m, &
1134 chem_tend, num_3d_c, &
1135 scalar_tend, num_3d_s, &
1136 grid%em_u_2, grid%em_v_2, &
1137 grid%em_t_2,grid%u_base,grid%v_base,grid%em_t_base,grid%qv_base, &
1138 grid%em_mut,grid%em_tke_2,config_flags, &
1139 grid%defor13,grid%defor23,grid%defor33, &
1140 grid%div, moist, chem, scalar, &
1141 grid%xkmv, grid%xkhv, config_flags%km_opt, &
1142 grid%em_fnm, grid%em_fnp, grid%em_dn, grid%em_dnw, grid%em_rdz, grid%em_rdzw, &
1143 ids, ide, jds, jde, kds, kde, &
1144 ims, ime, jms, jme, kms, kme, &
1145 grid%i_start(ij), grid%i_end(ij), &
1146 grid%j_start(ij), grid%j_end(ij), &
1147 k_start , k_end )
1148
1149 ENDDO
1150 !$OMP END PARALLEL DO
1151 BENCH_END(vert_diff_tim)
1152
1153 ENDIF
1154 !
1155 BENCH_START(hor_diff_tim)
1156 !$OMP PARALLEL DO &
1157 !$OMP PRIVATE ( ij )
1158 DO ij = 1 , grid%num_tiles
1159
1160 CALL wrf_debug ( 200 , ' call horizontal_diffusion_2' )
1161 CALL horizontal_diffusion_2( t_tendf, ru_tendf, rv_tendf, rw_tendf, &
1162 tke_tend, &
1163 moist_tend, num_3d_m, &
1164 chem_tend, num_3d_c, &
1165 scalar_tend, num_3d_s, &
1166 grid%em_t_2, th_phy, &
1167 grid%em_mut, grid%em_tke_2, config_flags, &
1168 grid%defor11, grid%defor22, grid%defor12, &
1169 grid%defor13, grid%defor23, grid%div, &
1170 moist, chem, scalar, &
1171 grid%msfu, grid%msfv, grid%msft, grid%xkmhd, grid%xkhh, config_flags%km_opt, &
1172 grid%rdx, grid%rdy, grid%em_rdz, grid%em_rdzw, &
1173 grid%em_fnm, grid%em_fnp, grid%cf1, grid%cf2, grid%cf3, &
1174 grid%em_zx, grid%em_zy, grid%em_dn, grid%em_dnw, &
1175 ids, ide, jds, jde, kds, kde, &
1176 ims, ime, jms, jme, kms, kme, &
1177 grid%i_start(ij), grid%i_end(ij), &
1178 grid%j_start(ij), grid%j_end(ij), &
1179 k_start , k_end )
1180 ENDDO
1181 !$OMP END PARALLEL DO
1182 BENCH_END(hor_diff_tim)
1183
1184 ENDIF
1185
1186 # ifdef DM_PARALLEL
1187 # include "HALO_OBS_NUDGE.inc"
1188 #endif
1189 !***********************************************************************
1190 ! This section for obs nudging
1191 !$OMP PARALLEL DO &
1192 !$OMP PRIVATE ( ij )
1193
1194 DO ij = 1 , grid%num_tiles
1195
1196 CALL fddaobs_driver (grid%grid_id, model_config_rec%grid_id, &
1197 model_config_rec%parent_id, config_flags%restart, &
1198 grid%obs_nudge_opt, &
1199 grid%obs_ipf_errob, &
1200 grid%obs_ipf_nudob, &
1201 grid%fdda_start, &
1202 grid%fdda_end, &
1203 grid%obs_nudge_wind, &
1204 grid%obs_nudge_temp, &
1205 grid%obs_nudge_mois, &
1206 grid%obs_nudge_pstr, &
1207 grid%obs_coef_wind, &
1208 grid%obs_coef_temp, &
1209 grid%obs_coef_mois, &
1210 grid%obs_coef_pstr, &
1211 grid%obs_rinxy, &
1212 grid%obs_rinsig, &
1213 grid%obs_twindo, &
1214 grid%obs_npfi, &
1215 grid%obs_ionf, &
1216 grid%obs_idynin, &
1217 grid%obs_dtramp, &
1218 model_config_rec%cen_lat(1), &
1219 model_config_rec%cen_lon(1), &
1220 config_flags%truelat1, &
1221 config_flags%truelat2, &
1222 config_flags%map_proj, &
1223 model_config_rec%i_parent_start, &
1224 model_config_rec%j_parent_start, &
1225 grid%parent_grid_ratio, &
1226 grid%max_dom, grid%itimestep, &
1227 grid%dt, grid%gmt, grid%julday, grid%fdob, &
1228 grid%max_obs, &
1229 model_config_rec%nobs_ndg_vars, &
1230 model_config_rec%nobs_err_flds, &
1231 grid%fdob%nstat, grid%fdob%varobs, grid%fdob%errf, &
1232 grid%dx, grid%KPBL,grid%HT, &
1233 grid%em_mut, grid%em_muu, grid%em_muv, &
1234 grid%msft, grid%msfu, grid%msfv, &
1235 p_phy, t_tendf, t0, &
1236 grid%em_u_2, grid%em_v_2, grid%em_t_2, &
1237 moist(:,:,:,P_QV), &
1238 grid%em_pb, grid%p_top, grid%em_p, &
1239 grid%uratx, grid%vratx, grid%tratx, &
1240 ru_tendf, rv_tendf, &
1241 moist_tend(:,:,:,P_QV), grid%em_obs_savwt, &
1242 ids,ide, jds,jde, kds,kde, &
1243 ims,ime, jms,jme, kms,kme, &
1244 grid%i_start(ij), min(grid%i_end(ij),ide-1), &
1245 grid%j_start(ij), min(grid%j_end(ij),jde-1), &
1246 k_start , min(k_end,kde-1) )
1247
1248 ENDDO
1249
1250 !$OMP END PARALLEL DO
1251 !
1252 !***********************************************************************
1253
1254 END IF rk_step_is_one
1255
1256 BENCH_START(rk_tend_tim)
1257 !$OMP PARALLEL DO &
1258 !$OMP PRIVATE ( ij )
1259 DO ij = 1 , grid%num_tiles
1260
1261 CALL wrf_debug ( 200 , ' call rk_tendency' )
1262 CALL rk_tendency ( config_flags, rk_step, &
1263 grid%em_ru_tend, grid%em_rv_tend, rw_tend, ph_tend, t_tend, &
1264 ru_tendf, rv_tendf, rw_tendf, ph_tendf, t_tendf, &
1265 mu_tend, grid%em_u_save, grid%em_v_save, w_save, ph_save, &
1266 grid%em_t_save, mu_save, grid%rthften, &
1267 grid%em_ru, grid%em_rv, grid%em_rw, grid%em_ww, &
1268 grid%em_u_2, grid%em_v_2, grid%em_w_2, grid%em_t_2, grid%em_ph_2, &
1269 grid%em_u_1, grid%em_v_1, grid%em_w_1, grid%em_t_1, grid%em_ph_1, &
1270 grid%h_diabatic, grid%em_phb, grid%em_t_init, &
1271 grid%em_mu_2, grid%em_mut, grid%em_muu, grid%em_muv, grid%em_mub, &
1272 grid%em_al, grid%em_alt, grid%em_p, grid%em_pb, grid%em_php, cqu, cqv, cqw, &
1273 grid%u_base, grid%v_base, grid%em_t_base, grid%qv_base, grid%z_base, &
1274 grid%msfu, grid%msfv, grid%msft, grid%f, grid%e, grid%sina, grid%cosa, &
1275 grid%em_fnm, grid%em_fnp, grid%em_rdn, grid%em_rdnw, &
1276 grid%dt, grid%rdx, grid%rdy, grid%khdif, grid%kvdif, grid%xkmhd, &
1277 grid%diff_6th_opt, grid%diff_6th_factor, &
1278 grid%dampcoef,grid%zdamp,config_flags%damp_opt, &
1279 grid%cf1, grid%cf2, grid%cf3, grid%cfn, grid%cfn1, num_3d_m, &
1280 config_flags%non_hydrostatic, &
1281 ids, ide, jds, jde, kds, kde, &
1282 ims, ime, jms, jme, kms, kme, &
1283 grid%i_start(ij), grid%i_end(ij), &
1284 grid%j_start(ij), grid%j_end(ij), &
1285 k_start, k_end )
1286 END DO
1287 !$OMP END PARALLEL DO
1288 BENCH_END(rk_tend_tim)
1289
1290 BENCH_START(relax_bdy_dry_tim)
1291 !$OMP PARALLEL DO &
1292 !$OMP PRIVATE ( ij )
1293 DO ij = 1 , grid%num_tiles
1294
1295 IF( (config_flags%specified .or. config_flags%nested) .and. rk_step == 1 ) THEN
1296
1297 CALL relax_bdy_dry ( config_flags, &
1298 grid%em_u_save, grid%em_v_save, ph_save, grid%em_t_save, &
1299 w_save, mu_tend, &
1300 grid%em_ru, grid%em_rv, grid%em_ph_2, grid%em_t_2, &
1301 grid%em_w_2, grid%em_mu_2, grid%em_mut, &
1302 grid%em_u_bxs,grid%em_u_bxe,grid%em_u_bys,grid%em_u_bye, &
1303 grid%em_v_bxs,grid%em_v_bxe,grid%em_v_bys,grid%em_v_bye, &
1304 grid%em_ph_bxs,grid%em_ph_bxe,grid%em_ph_bys,grid%em_ph_bye, &
1305 grid%em_t_bxs,grid%em_t_bxe,grid%em_t_bys,grid%em_t_bye, &
1306 grid%em_w_bxs,grid%em_w_bxe,grid%em_w_bys,grid%em_w_bye, &
1307 grid%em_mu_bxs,grid%em_mu_bxe,grid%em_mu_bys,grid%em_mu_bye, &
1308 grid%em_u_btxs,grid%em_u_btxe,grid%em_u_btys,grid%em_u_btye, &
1309 grid%em_v_btxs,grid%em_v_btxe,grid%em_v_btys,grid%em_v_btye, &
1310 grid%em_ph_btxs,grid%em_ph_btxe,grid%em_ph_btys,grid%em_ph_btye, &
1311 grid%em_t_btxs,grid%em_t_btxe,grid%em_t_btys,grid%em_t_btye, &
1312 grid%em_w_btxs,grid%em_w_btxe,grid%em_w_btys,grid%em_w_btye, &
1313 grid%em_mu_btxs,grid%em_mu_btxe,grid%em_mu_btys,grid%em_mu_btye, &
1314 config_flags%spec_bdy_width, grid%spec_zone, grid%relax_zone, &
1315 grid%dtbc, grid%fcx, grid%gcx, &
1316 ids,ide, jds,jde, kds,kde, &
1317 ims,ime, jms,jme, kms,kme, &
1318 ips,ipe, jps,jpe, kps,kpe, &
1319 grid%i_start(ij), grid%i_end(ij), &
1320 grid%j_start(ij), grid%j_end(ij), &
1321 k_start, k_end )
1322
1323
1324 ENDIF
1325
1326 CALL rk_addtend_dry( grid%em_ru_tend, grid%em_rv_tend, rw_tend, ph_tend, t_tend, &
1327 ru_tendf, rv_tendf, rw_tendf, ph_tendf, t_tendf, &
1328 grid%em_u_save, grid%em_v_save, w_save, ph_save, grid%em_t_save, &
1329 mu_tend, mu_tendf, rk_step, &
1330 grid%h_diabatic, grid%em_mut, grid%msft, grid%msfu, grid%msfv, &
1331 ids,ide, jds,jde, kds,kde, &
1332 ims,ime, jms,jme, kms,kme, &
1333 ips,ipe, jps,jpe, kps,kpe, &
1334 grid%i_start(ij), grid%i_end(ij), &
1335 grid%j_start(ij), grid%j_end(ij), &
1336 k_start, k_end )
1337
1338 IF( config_flags%specified .or. config_flags%nested ) THEN
1339 CALL spec_bdy_dry ( config_flags, &
1340 grid%em_ru_tend, grid%em_rv_tend, ph_tend, t_tend, &
1341 rw_tend, mu_tend, &
1342 grid%em_u_bxs,grid%em_u_bxe,grid%em_u_bys,grid%em_u_bye, &
1343 grid%em_v_bxs,grid%em_v_bxe,grid%em_v_bys,grid%em_v_bye, &
1344 grid%em_ph_bxs,grid%em_ph_bxe,grid%em_ph_bys,grid%em_ph_bye, &
1345 grid%em_t_bxs,grid%em_t_bxe,grid%em_t_bys,grid%em_t_bye, &
1346 grid%em_w_bxs,grid%em_w_bxe,grid%em_w_bys,grid%em_w_bye, &
1347 grid%em_mu_bxs,grid%em_mu_bxe,grid%em_mu_bys,grid%em_mu_bye, &
1348 grid%em_u_btxs,grid%em_u_btxe,grid%em_u_btys,grid%em_u_btye, &
1349 grid%em_v_btxs,grid%em_v_btxe,grid%em_v_btys,grid%em_v_btye, &
1350 grid%em_ph_btxs,grid%em_ph_btxe,grid%em_ph_btys,grid%em_ph_btye, &
1351 grid%em_t_btxs,grid%em_t_btxe,grid%em_t_btys,grid%em_t_btye, &
1352 grid%em_w_btxs,grid%em_w_btxe,grid%em_w_btys,grid%em_w_btye, &
1353 grid%em_mu_btxs,grid%em_mu_btxe,grid%em_mu_btys,grid%em_mu_btye, &
1354 config_flags%spec_bdy_width, grid%spec_zone, &
1355 ids,ide, jds,jde, kds,kde, & ! domain dims
1356 ims,ime, jms,jme, kms,kme, & ! memory dims
1357 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
1358 grid%i_start(ij), grid%i_end(ij), &
1359 grid%j_start(ij), grid%j_end(ij), &
1360 k_start, k_end )
1361
1362 ENDIF
1363
1364 END DO
1365 !$OMP END PARALLEL DO
1366 BENCH_END(relax_bdy_dry_tim)
1367
1368 !<DESCRIPTION>
1369 !<pre>
1370 ! (3) Small (acoustic,sound) steps.
1371 !
1372 ! Several acoustic steps are taken each RK pass. A small step
1373 ! sequence begins with calculating perturbation variables
1374 ! and coupling them to the column dry-air-mass mu
1375 ! (call to small_step_prep). This is followed by computing
1376 ! coefficients for the vertically implicit part of the
1377 ! small timestep (call to calc_coef_w).
1378 !
1379 ! The small steps are taken
1380 ! in the named loop "small_steps:". In the small_steps loop, first
1381 ! the horizontal momentum (u and v) are advanced (call to advance_uv),
1382 ! next mu and theta are advanced (call to advance_mu_t) followed by
1383 ! advancing w and the geopotential (call to advance_w). Diagnostic
1384 ! values for pressure and inverse density are updated at the end of
1385 ! each small_step.
1386 !
1387 ! The small-step section ends with the change of the perturbation variables
1388 ! back to full variables (call to small_step_finish).
1389 !</pre>
1390 !</DESCRIPTION>
1391
1392 BENCH_START(small_step_prep_tim)
1393 !$OMP PARALLEL DO &
1394 !$OMP PRIVATE ( ij )
1395
1396 DO ij = 1 , grid%num_tiles
1397
1398 ! Calculate coefficients for the vertically implicit acoustic/gravity wave
1399 ! integration. We only need calculate these for the first pass through -
1400 ! the predictor step. They are reused as is for the corrector step.
1401 ! For third-order RK, we need to recompute these after the first
1402 ! predictor because we may have changed the small timestep -> grid%dts.
1403
1404 CALL wrf_debug ( 200 , ' call calc_coef_w' )
1405
1406 CALL small_step_prep( grid%em_u_1,grid%em_u_2,grid%em_v_1,grid%em_v_2,grid%em_w_1,grid%em_w_2, &
1407 grid%em_t_1,grid%em_t_2,grid%em_ph_1,grid%em_ph_2, &
1408 grid%em_mub, grid%em_mu_1, grid%em_mu_2, &
1409 grid%em_muu, muus, grid%em_muv, muvs, &
1410 grid%em_mut, grid%em_muts, grid%em_mudf, &
1411 grid%em_u_save, grid%em_v_save, w_save, &
1412 grid%em_t_save, ph_save, mu_save, &
1413 grid%em_ww, ww1, &
1414 grid%em_dnw, c2a, grid%em_pb, grid%em_p, grid%em_alt, &
1415 grid%msfu, grid%msfv, grid%msft, &
1416 rk_step, &
1417 ids, ide, jds, jde, kds, kde, &
1418 ims, ime, jms, jme, kms, kme, &
1419 grid%i_start(ij), grid%i_end(ij), &
1420 grid%j_start(ij), grid%j_end(ij), &
1421 k_start , k_end )
1422 CALL calc_p_rho( grid%em_al, grid%em_p, grid%em_ph_2, &
1423 grid%em_alt, grid%em_t_2, grid%em_t_save, c2a, pm1, &
1424 grid%em_mu_2, grid%em_muts, grid%em_znu, t0, &
1425 grid%em_rdnw, grid%em_dnw, grid%smdiv, &
1426 config_flags%non_hydrostatic, 0, &
1427 ids, ide, jds, jde, kds, kde, &
1428 ims, ime, jms, jme, kms, kme, &
1429 grid%i_start(ij), grid%i_end(ij), &
1430 grid%j_start(ij), grid%j_end(ij), &
1431 k_start , k_end )
1432
1433 IF (config_flags%non_hydrostatic) &
1434 CALL calc_coef_w( a,alpha,gamma, &
1435 grid%em_mut, cqw, &
1436 grid%em_rdn, grid%em_rdnw, c2a, &
1437 dts_rk, g, grid%epssm, &
1438 ids, ide, jds, jde, kds, kde, &
1439 ims, ime, jms, jme, kms, kme, &
1440 grid%i_start(ij), grid%i_end(ij), &
1441 grid%j_start(ij), grid%j_end(ij), &
1442 k_start , k_end )
1443
1444
1445 ENDDO
1446 !$OMP END PARALLEL DO
1447 BENCH_END(small_step_prep_tim)
1448
1449
1450 #ifdef DM_PARALLEL
1451 !-----------------------------------------------------------------------
1452 ! Stencils for patch communications (WCS, 29 June 2001)
1453 ! Note: the small size of this halo exchange reflects the
1454 ! fact that we are carrying the uncoupled variables
1455 ! as state variables in the mass coordinate model, as
1456 ! opposed to the coupled variables as in the height
1457 ! coordinate model.
1458 !
1459 ! * * * * *
1460 ! * * * * * * * * *
1461 ! * + * * + * * * + * *
1462 ! * * * * * * * * *
1463 ! * * * * *
1464 !
1465 ! 3D variables - note staggering! grid%em_ph_2(grid%em_z), grid%em_u_save(X), grid%em_v_save(Y)
1466 !
1467 !j grid%em_ph_2 x
1468 !j grid%em_al x
1469 !j grid%em_p x
1470 !j grid%em_t_1 x
1471 !j grid%em_t_save x
1472 !j grid%em_u_save x
1473 !j grid%em_v_save x
1474 !
1475 ! the following are 2D (xy) variables
1476 !
1477 !j grid%em_mu_1 x
1478 !j grid%em_mu_2 x
1479 !j grid%em_mudf x
1480 !--------------------------------------------------------------
1481 # include "HALO_EM_B.inc"
1482 # include "PERIOD_BDY_EM_B.inc"
1483 #endif
1484
1485 BENCH_START(set_phys_bc2_tim)
1486 !$OMP PARALLEL DO &
1487 !$OMP PRIVATE ( ij )
1488
1489 DO ij = 1 , grid%num_tiles
1490
1491 CALL set_physical_bc3d( grid%em_ru_tend, 'u', config_flags, &
1492 ids, ide, jds, jde, kds, kde, &
1493 ims, ime, jms, jme, kms, kme, &
1494 ips, ipe, jps, jpe, kps, kpe, &
1495 grid%i_start(ij), grid%i_end(ij), &
1496 grid%j_start(ij), grid%j_end(ij), &
1497 k_start , k_end )
1498
1499 CALL set_physical_bc3d( grid%em_rv_tend, 'v', config_flags, &
1500 ids, ide, jds, jde, kds, kde, &
1501 ims, ime, jms, jme, kms, kme, &
1502 ips, ipe, jps, jpe, kps, kpe, &
1503 grid%i_start(ij), grid%i_end(ij), &
1504 grid%j_start(ij), grid%j_end(ij), &
1505 k_start , k_end )
1506
1507 CALL set_physical_bc3d( grid%em_ph_2, 'w', config_flags, &
1508 ids, ide, jds, jde, kds, kde, &
1509 ims, ime, jms, jme, kms, kme, &
1510 ips, ipe, jps, jpe, kps, kpe, &
1511 grid%i_start(ij), grid%i_end(ij), &
1512 grid%j_start(ij), grid%j_end(ij), &
1513 k_start , k_end )
1514
1515 CALL set_physical_bc3d( grid%em_al, 'p', config_flags, &
1516 ids, ide, jds, jde, kds, kde, &
1517 ims, ime, jms, jme, kms, kme, &
1518 ips, ipe, jps, jpe, kps, kpe, &
1519 grid%i_start(ij), grid%i_end(ij), &
1520 grid%j_start(ij), grid%j_end(ij), &
1521 k_start , k_end )
1522
1523 CALL set_physical_bc3d( grid%em_p, 'p', config_flags, &
1524 ids, ide, jds, jde, kds, kde, &
1525 ims, ime, jms, jme, kms, kme, &
1526 ips, ipe, jps, jpe, kps, kpe, &
1527 grid%i_start(ij), grid%i_end(ij), &
1528 grid%j_start(ij), grid%j_end(ij), &
1529 k_start , k_end )
1530
1531 CALL set_physical_bc3d( grid%em_t_1, 'p', config_flags, &
1532 ids, ide, jds, jde, kds, kde, &
1533 ims, ime, jms, jme, kms, kme, &
1534 ips, ipe, jps, jpe, kps, kpe, &
1535 grid%i_start(ij), grid%i_end(ij), &
1536 grid%j_start(ij), grid%j_end(ij), &
1537 k_start , k_end )
1538
1539 CALL set_physical_bc3d( grid%em_t_save, 't', config_flags, &
1540 ids, ide, jds, jde, kds, kde, &
1541 ims, ime, jms, jme, kms, kme, &
1542 ips, ipe, jps, jpe, kps, kpe, &
1543 grid%i_start(ij), grid%i_end(ij), &
1544 grid%j_start(ij), grid%j_end(ij), &
1545 k_start , k_end )
1546
1547 CALL set_physical_bc2d( grid%em_mu_1, 't', config_flags, &
1548 ids, ide, jds, jde, &
1549 ims, ime, jms, jme, &
1550 ips, ipe, jps, jpe, &
1551 grid%i_start(ij), grid%i_end(ij), &
1552 grid%j_start(ij), grid%j_end(ij) )
1553
1554 CALL set_physical_bc2d( grid%em_mu_2, 't', config_flags, &
1555 ids, ide, jds, jde, &
1556 ims, ime, jms, jme, &
1557 ips, ipe, jps, jpe, &
1558 grid%i_start(ij), grid%i_end(ij), &
1559 grid%j_start(ij), grid%j_end(ij) )
1560
1561 CALL set_physical_bc2d( grid%em_mudf, 't', config_flags, &
1562 ids, ide, jds, jde, &
1563 ims, ime, jms, jme, &
1564 ips, ipe, jps, jpe, &
1565 grid%i_start(ij), grid%i_end(ij), &
1566 grid%j_start(ij), grid%j_end(ij) )
1567
1568 END DO
1569 !$OMP END PARALLEL DO
1570 BENCH_END(set_phys_bc2_tim)
1571
1572 small_steps : DO iteration = 1 , number_of_small_timesteps
1573
1574 ! Boundary condition time (or communication time).
1575
1576 #ifdef DM_PARALLEL
1577 # include "PERIOD_BDY_EM_B.inc"
1578 #endif
1579
1580
1581 !$OMP PARALLEL DO &
1582 !$OMP PRIVATE ( ij )
1583
1584 DO ij = 1 , grid%num_tiles
1585
1586 BENCH_START(advance_uv_tim)
1587 CALL advance_uv ( grid%em_u_2, grid%em_ru_tend, grid%em_v_2, grid%em_rv_tend, &
1588 grid%em_p, grid%em_pb, &
1589 grid%em_ph_2, grid%em_php, grid%em_alt, grid%em_al, grid%em_mu_2, &
1590 grid%em_muu, cqu, grid%em_muv, cqv, grid%em_mudf, &
1591 grid%rdx, grid%rdy, dts_rk, &
1592 grid%cf1, grid%cf2, grid%cf3, grid%em_fnm, grid%em_fnp, &
1593 grid%emdiv, &
1594 grid%em_rdnw, config_flags,grid%spec_zone, &
1595 config_flags%non_hydrostatic, &
1596 ids, ide, jds, jde, kds, kde, &
1597 ims, ime, jms, jme, kms, kme, &
1598 grid%i_start(ij), grid%i_end(ij), &
1599 grid%j_start(ij), grid%j_end(ij), &
1600 k_start , k_end )
1601 BENCH_END(advance_uv_tim)
1602
1603 BENCH_START(spec_bdy_uv_tim)
1604 IF( config_flags%specified .or. config_flags%nested ) THEN
1605 CALL spec_bdyupdate(grid%em_u_2, grid%em_ru_tend, dts_rk, &
1606 'u' , config_flags, &
1607 grid%spec_zone, &
1608 ids,ide, jds,jde, kds,kde, & ! domain dims
1609 ims,ime, jms,jme, kms,kme, & ! memory dims
1610 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
1611 grid%i_start(ij), grid%i_end(ij), &
1612 grid%j_start(ij), grid%j_end(ij), &
1613 k_start , k_end )
1614
1615 CALL spec_bdyupdate(grid%em_v_2, grid%em_rv_tend, dts_rk, &
1616 'v' , config_flags, &
1617 grid%spec_zone, &
1618 ids,ide, jds,jde, kds,kde, & ! domain dims
1619 ims,ime, jms,jme, kms,kme, & ! memory dims
1620 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
1621 grid%i_start(ij), grid%i_end(ij), &
1622 grid%j_start(ij), grid%j_end(ij), &
1623 k_start , k_end )
1624
1625 ENDIF
1626 BENCH_END(spec_bdy_uv_tim)
1627
1628 END DO
1629 !$OMP END PARALLEL DO
1630
1631 #ifdef DM_PARALLEL
1632 !
1633 ! Stencils for patch communications (WCS, 29 June 2001)
1634 !
1635 ! * *
1636 ! * + * * + * +
1637 ! * *
1638 !
1639 ! grid%em_u_2 x
1640 ! grid%em_v_2 x
1641 !
1642 # include "HALO_EM_C.inc"
1643 #endif
1644
1645 !$OMP PARALLEL DO &
1646 !$OMP PRIVATE ( ij )
1647
1648 DO ij = 1 , grid%num_tiles
1649
1650 ! advance the mass in the column, theta, and calculate grid%em_ww
1651
1652 BENCH_START(advance_mu_t_tim)
1653 CALL advance_mu_t( grid%em_ww, ww1, grid%em_u_2, grid%em_u_save, grid%em_v_2, grid%em_v_save, &
1654 grid%em_mu_2, grid%em_mut, muave, grid%em_muts, grid%em_muu, grid%em_muv, &
1655 grid%em_mudf, grid%em_ru_m, grid%em_rv_m, grid%em_ww_m, &
1656 grid%em_t_2, grid%em_t_save, t_2save, t_tend, &
1657 mu_tend, &
1658 grid%rdx, grid%rdy, dts_rk, grid%epssm, &
1659 grid%em_dnw, grid%em_fnm, grid%em_fnp, grid%em_rdnw, &
1660 grid%msfu, grid%msfv, grid%msft, &
1661 iteration, config_flags, &
1662 ids, ide, jds, jde, kds, kde, &
1663 ims, ime, jms, jme, kms, kme, &
1664 grid%i_start(ij), grid%i_end(ij), &
1665 grid%j_start(ij), grid%j_end(ij), &
1666 k_start , k_end )
1667 BENCH_END(advance_mu_t_tim)
1668
1669 BENCH_START(spec_bdy_t_tim)
1670 IF( config_flags%specified .or. config_flags%nested ) THEN
1671
1672 CALL spec_bdyupdate(grid%em_t_2, t_tend, dts_rk, &
1673 't' , config_flags, &
1674 grid%spec_zone, &
1675 ids,ide, jds,jde, kds,kde, & ! domain dims
1676 ims,ime, jms,jme, kms,kme, & ! memory dims
1677 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
1678 grid%i_start(ij), grid%i_end(ij), &
1679 grid%j_start(ij), grid%j_end(ij), &
1680 k_start , k_end )
1681
1682 CALL spec_bdyupdate(grid%em_mu_2, mu_tend, dts_rk, &
1683 'm' , config_flags, &
1684 grid%spec_zone, &
1685 ids,ide, jds,jde, 1 ,1 , & ! domain dims
1686 ims,ime, jms,jme, 1 ,1 , & ! memory dims
1687 ips,ipe, jps,jpe, 1 ,1 , & ! patch dims
1688 grid%i_start(ij), grid%i_end(ij), &
1689 grid%j_start(ij), grid%j_end(ij), &
1690 1 , 1 )
1691
1692 CALL spec_bdyupdate(grid%em_muts, mu_tend, dts_rk, &
1693 'm' , config_flags, &
1694 grid%spec_zone, &
1695 ids,ide, jds,jde, 1 ,1 , & ! domain dims
1696 ims,ime, jms,jme, 1 ,1 , & ! memory dims
1697 ips,ipe, jps,jpe, 1 ,1 , & ! patch dims
1698 grid%i_start(ij), grid%i_end(ij), &
1699 grid%j_start(ij), grid%j_end(ij), &
1700 1 , 1 )
1701 ENDIF
1702 BENCH_END(spec_bdy_t_tim)
1703
1704 ! sumflux accumulates the time-averged mass flux
1705 ! (time averaged over the acoustic steps) for use
1706 ! in the scalar advection (flux divergence). Using
1707 ! time averaged values gives us exact scalar conservation.
1708
1709 BENCH_START(sumflux_tim)
1710 CALL sumflux ( grid%em_u_2, grid%em_v_2, grid%em_ww, &
1711 grid%em_u_save, grid%em_v_save, ww1, &
1712 grid%em_muu, grid%em_muv, &
1713 grid%em_ru_m, grid%em_rv_m, grid%em_ww_m, grid%epssm, &
1714 grid%msfu, grid%msfv, &
1715 iteration, number_of_small_timesteps, &
1716 ids, ide, jds, jde, kds, kde, &
1717 ims, ime, jms, jme, kms, kme, &
1718 grid%i_start(ij), grid%i_end(ij), &
1719 grid%j_start(ij), grid%j_end(ij), &
1720 k_start , k_end )
1721 BENCH_END(sumflux_tim)
1722
1723 ! small (acoustic) step for the vertical momentum,
1724 ! density and coupled potential temperature.
1725
1726
1727 BENCH_START(advance_w_tim)
1728 IF ( config_flags%non_hydrostatic ) THEN
1729 CALL advance_w( grid%em_w_2, rw_tend, grid%em_ww, w_save, grid%em_u_2, grid%em_v_2, &
1730 grid%em_mu_2, grid%em_mut, muave, grid%em_muts, &
1731 t_2save, grid%em_t_2, grid%em_t_save, &
1732 grid%em_ph_2, ph_save, grid%em_phb, ph_tend, &
1733 grid%ht, c2a, cqw, grid%em_alt, grid%em_alb, &
1734 a, alpha, gamma, &
1735 grid%rdx, grid%rdy, dts_rk, t0, grid%epssm, &
1736 grid%em_dnw, grid%em_fnm, grid%em_fnp, grid%em_rdnw, grid%em_rdn, &
1737 grid%cf1, grid%cf2, grid%cf3, grid%msft, &
1738 config_flags, &
1739 ids,ide, jds,jde, kds,kde, & ! domain dims
1740 ims,ime, jms,jme, kms,kme, & ! memory dims
1741 grid%i_start(ij), grid%i_end(ij), &
1742 grid%j_start(ij), grid%j_end(ij), &
1743 k_start , k_end )
1744 ENDIF
1745 BENCH_END(advance_w_tim)
1746
1747 IF( config_flags%specified .or. config_flags%nested ) THEN
1748
1749 BENCH_START(spec_bdynhyd_tim)
1750 IF (config_flags%non_hydrostatic) THEN
1751 CALL spec_bdyupdate_ph( ph_save, grid%em_ph_2, ph_tend, mu_tend, grid%em_muts, dts_rk, &
1752 'h' , config_flags, &
1753 grid%spec_zone, &
1754 ids,ide, jds,jde, kds,kde, & ! domain dims
1755 ims,ime, jms,jme, kms,kme, & ! memory dims
1756 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
1757 grid%i_start(ij), grid%i_end(ij), &
1758 grid%j_start(ij), grid%j_end(ij), &
1759 k_start , k_end )
1760 IF( config_flags%specified ) THEN
1761 CALL zero_grad_bdy ( grid%em_w_2, &
1762 'w' , config_flags, &
1763 grid%spec_zone, &
1764 ids,ide, jds,jde, kds,kde, & ! domain dims
1765 ims,ime, jms,jme, kms,kme, & ! memory dims
1766 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
1767 grid%i_start(ij), grid%i_end(ij), &
1768 grid%j_start(ij), grid%j_end(ij), &
1769 k_start , k_end )
1770 ELSE
1771 CALL spec_bdyupdate ( grid%em_w_2, rw_tend, dts_rk, &
1772 'h' , config_flags, &
1773 grid%spec_zone, &
1774 ids,ide, jds,jde, kds,kde, & ! domain dims
1775 ims,ime, jms,jme, kms,kme, & ! memory dims
1776 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
1777 grid%i_start(ij), grid%i_end(ij), &
1778 grid%j_start(ij), grid%j_end(ij), &
1779 k_start , k_end )
1780 ENDIF
1781 ENDIF
1782 BENCH_END(spec_bdynhyd_tim)
1783 ENDIF
1784
1785 BENCH_START(cald_p_rho_tim)
1786 CALL calc_p_rho( grid%em_al, grid%em_p, grid%em_ph_2, &
1787 grid%em_alt, grid%em_t_2, grid%em_t_save, c2a, pm1, &
1788 grid%em_mu_2, grid%em_muts, grid%em_znu, t0, &
1789 grid%em_rdnw, grid%em_dnw, grid%smdiv, &
1790 config_flags%non_hydrostatic, iteration, &
1791 ids, ide, jds, jde, kds, kde, &
1792 ims, ime, jms, jme, kms, kme, &
1793 grid%i_start(ij), grid%i_end(ij), &
1794 grid%j_start(ij), grid%j_end(ij), &
1795 k_start , k_end )
1796 BENCH_END(cald_p_rho_tim)
1797
1798 ENDDO
1799 !$OMP END PARALLEL DO
1800
1801 #ifdef DM_PARALLEL
1802 !
1803 ! Stencils for patch communications (WCS, 29 June 2001)
1804 !
1805 ! * *
1806 ! * + * * + * +
1807 ! * *
1808 !
1809 ! grid%em_ph_2 x
1810 ! grid%em_al x
1811 ! grid%em_p x
1812 !
1813 ! 2D variables (x,y)
1814 !
1815 ! grid%em_mu_2 x
1816 ! grid%em_muts x
1817 ! grid%em_mudf x
1818
1819 # include "HALO_EM_C2.inc"
1820 # include "PERIOD_BDY_EM_B3.inc"
1821 #endif
1822
1823 BENCH_START(phys_bc_tim)
1824 !$OMP PARALLEL DO &
1825 !$OMP PRIVATE ( ij )
1826
1827 DO ij = 1 , grid%num_tiles
1828
1829 ! boundary condition set for next small timestep
1830
1831 CALL set_physical_bc3d( grid%em_ph_2, 'w', config_flags, &
1832 ids, ide, jds, jde, kds, kde, &
1833 ims, ime, jms, jme, kms, kme, &
1834 ips, ipe, jps, jpe, kps, kpe, &
1835 grid%i_start(ij), grid%i_end(ij), &
1836 grid%j_start(ij), grid%j_end(ij), &
1837 k_start , k_end )
1838
1839 CALL set_physical_bc3d( grid%em_al, 'p', config_flags, &
1840 ids, ide, jds, jde, kds, kde, &
1841 ims, ime, jms, jme, kms, kme, &
1842 ips, ipe, jps, jpe, kps, kpe, &
1843 grid%i_start(ij), grid%i_end(ij), &
1844 grid%j_start(ij), grid%j_end(ij), &
1845 k_start , k_end )
1846
1847 CALL set_physical_bc3d( grid%em_p, 'p', config_flags, &
1848 ids, ide, jds, jde, kds, kde, &
1849 ims, ime, jms, jme, kms, kme, &
1850 ips, ipe, jps, jpe, kps, kpe, &
1851 grid%i_start(ij), grid%i_end(ij), &
1852 grid%j_start(ij), grid%j_end(ij), &
1853 k_start , k_end )
1854
1855 CALL set_physical_bc2d( grid%em_muts, 't', config_flags, &
1856 ids, ide, jds, jde, &
1857 ims, ime, jms, jme, &
1858 ips, ipe, jps, jpe, &
1859 grid%i_start(ij), grid%i_end(ij), &
1860 grid%j_start(ij), grid%j_end(ij) )
1861
1862 CALL set_physical_bc2d( grid%em_mu_2, 't', config_flags, &
1863 ids, ide, jds, jde, &
1864 ims, ime, jms, jme, &
1865 ips, ipe, jps, jpe, &
1866 grid%i_start(ij), grid%i_end(ij), &
1867 grid%j_start(ij), grid%j_end(ij) )
1868
1869 CALL set_physical_bc2d( grid%em_mudf, 't', config_flags, &
1870 ids, ide, jds, jde, &
1871 ims, ime, jms, jme, &
1872 ips, ipe, jps, jpe, &
1873 grid%i_start(ij), grid%i_end(ij), &
1874 grid%j_start(ij), grid%j_end(ij) )
1875
1876 END DO
1877 !$OMP END PARALLEL DO
1878 BENCH_END(phys_bc_tim)
1879
1880 END DO small_steps
1881
1882 !$OMP PARALLEL DO &
1883 !$OMP PRIVATE ( ij )
1884
1885 DO ij = 1 , grid%num_tiles
1886
1887 CALL wrf_debug ( 200 , ' call rk_small_finish' )
1888
1889 ! change time-perturbation variables back to
1890 ! full perturbation variables.
1891 ! first get updated mu at u and v points
1892
1893 BENCH_START(calc_mu_uv_tim)
1894 CALL calc_mu_uv_1 ( config_flags, &
1895 grid%em_muts, muus, muvs, &
1896 ids, ide, jds, jde, kds, kde, &
1897 ims, ime, jms, jme, kms, kme, &
1898 grid%i_start(ij), grid%i_end(ij), &
1899 grid%j_start(ij), grid%j_end(ij), &
1900 k_start , k_end )
1901 BENCH_END(calc_mu_uv_tim)
1902 BENCH_START(small_step_finish_tim)
1903 CALL small_step_finish( grid%em_u_2, grid%em_u_1, grid%em_v_2, grid%em_v_1, grid%em_w_2, grid%em_w_1, &
1904 grid%em_t_2, grid%em_t_1, grid%em_ph_2, grid%em_ph_1, grid%em_ww, ww1, &
1905 grid%em_mu_2, grid%em_mu_1, &
1906 grid%em_mut, grid%em_muts, grid%em_muu, muus, grid%em_muv, muvs, &
1907 grid%em_u_save, grid%em_v_save, w_save, &
1908 grid%em_t_save, ph_save, mu_save, &
1909 grid%msfu, grid%msfv, grid%msft, &
1910 grid%h_diabatic, &
1911 number_of_small_timesteps,dts_rk, &
1912 rk_step, rk_order, &
1913 ids, ide, jds, jde, kds, kde, &
1914 ims, ime, jms, jme, kms, kme, &
1915 grid%i_start(ij), grid%i_end(ij), &
1916 grid%j_start(ij), grid%j_end(ij), &
1917 k_start , k_end )
1918 ! call to set ru_m, rv_m and ww_m b.c's for PD advection
1919
1920 IF (rk_step == 3) THEN
1921
1922 CALL set_physical_bc3d( grid%em_ru_m, 'u', config_flags, &
1923 ids, ide, jds, jde, kds, kde, &
1924 ims, ime, jms, jme, kms, kme, &
1925 ips, ipe, jps, jpe, kps, kpe, &
1926 grid%i_start(ij), grid%i_end(ij), &
1927 grid%j_start(ij), grid%j_end(ij), &
1928 k_start , k_end )
1929
1930 CALL set_physical_bc3d( grid%em_rv_m, 'v', config_flags, &
1931 ids, ide, jds, jde, kds, kde, &
1932 ims, ime, jms, jme, kms, kme, &
1933 ips, ipe, jps, jpe, kps, kpe, &
1934 grid%i_start(ij), grid%i_end(ij), &
1935 grid%j_start(ij), grid%j_end(ij), &
1936 k_start , k_end )
1937
1938 CALL set_physical_bc3d( grid%em_ww_m, 'w', config_flags, &
1939 ids, ide, jds, jde, kds, kde, &
1940 ims, ime, jms, jme, kms, kme, &
1941 ips, ipe, jps, jpe, kps, kpe, &
1942 grid%i_start(ij), grid%i_end(ij), &
1943 grid%j_start(ij), grid%j_end(ij), &
1944 k_start , k_end )
1945
1946 CALL set_physical_bc2d( grid%em_mut, 't', config_flags, &
1947 ids, ide, jds, jde, &
1948 ims, ime, jms, jme, &
1949 ips, ipe, jps, jpe, &
1950 grid%i_start(ij), grid%i_end(ij), &
1951 grid%j_start(ij), grid%j_end(ij) )
1952
1953 END IF
1954
1955 BENCH_END(small_step_finish_tim)
1956
1957 END DO
1958 !$OMP END PARALLEL DO
1959
1960 !-----------------------------------------------------------------------
1961 ! add in physics tendency first if positive definite advection is used.
1962 ! pd advection applies advective flux limiter on last runge-kutta step
1963 !-----------------------------------------------------------------------
1964 ! first moisture
1965
1966 IF (config_flags%pd_moist .and. (rk_step == rk_order)) THEN
1967
1968 !$OMP PARALLEL DO &
1969 !$OMP PRIVATE ( ij )
1970 DO ij = 1 , grid%num_tiles
1971 CALL wrf_debug ( 200 , ' call rk_update_scalar_pd' )
1972 do im = PARAM_FIRST_SCALAR, num_3d_m
1973 CALL rk_update_scalar_pd( im, im, &
1974 moist_old(ims,kms,jms,im), &
1975 moist_tend(ims,kms,jms,im), &
1976 grid%msft, &
1977 grid%em_mu_1, grid%em_mu_1, grid%em_mub, &
1978 rk_step, dt_rk, grid%spec_zone, &
1979 config_flags, &
1980 ids, ide, jds, jde, kds, kde, &
1981 ims, ime, jms, jme, kms, kme, &
1982 grid%i_start(ij), grid%i_end(ij), &
1983 grid%j_start(ij), grid%j_end(ij), &
1984 k_start , k_end )
1985 ENDDO
1986 END DO
1987 !$OMP END PARALLEL DO
1988
1989 !---------------------- positive definite bc call
1990 #ifdef DM_PARALLEL
1991 if(config_flags%pd_moist) then
1992 #ifndef RSL
1993 IF ( config_flags%h_sca_adv_order <= 4 ) THEN
1994 # include "HALO_EM_MOIST_OLD_E_5.inc"
1995 ELSE IF ( config_flags%h_sca_adv_order <= 6 ) THEN
1996 # include "HALO_EM_MOIST_OLD_E_7.inc"
1997 ELSE
1998 WRITE(wrf_err_message,*)'solve_em: invalid h_sca_adv_order = ',config_flags%h_sca_adv_order
1999 CALL wrf_error_fatal(TRIM(wrf_err_message))
2000 ENDIF
2001 #else
2002 WRITE(wrf_err_message,*)'cannot use pd scheme with RSL - use RSL-LITE'
2003 CALL wrf_error_fatal(TRIM(wrf_err_message))
2004 #endif
2005 endif
2006 #endif
2007
2008 #ifdef DM_PARALLEL
2009 # include "PERIOD_BDY_EM_MOIST_OLD.inc"
2010 #endif
2011
2012 !$OMP PARALLEL DO &
2013 !$OMP PRIVATE ( ij )
2014 DO ij = 1 , grid%num_tiles
2015 IF (num_3d_m >= PARAM_FIRST_SCALAR) THEN
2016 DO im = PARAM_FIRST_SCALAR , num_3d_m
2017 CALL set_physical_bc3d( moist_old(ims,kms,jms,im), 'p', config_flags, &
2018 ids, ide, jds, jde, kds, kde, &
2019 ims, ime, jms, jme, kms, kme, &
2020 ips, ipe, jps, jpe, kps, kpe, &
2021 grid%i_start(ij), grid%i_end(ij), &
2022 grid%j_start(ij), grid%j_end(ij), &
2023 k_start , k_end )
2024 END DO
2025 ENDIF
2026 END DO
2027 !$OMP END PARALLEL DO
2028
2029 END IF ! end if for pd_moist
2030
2031 ! scalars
2032
2033 IF (config_flags%pd_scalar .and. (rk_step == rk_order)) THEN
2034
2035 !$OMP PARALLEL DO &
2036 !$OMP PRIVATE ( ij )
2037
2038 DO ij = 1 , grid%num_tiles
2039 CALL wrf_debug ( 200 , ' call rk_update_scalar_pd' )
2040 do im = PARAM_FIRST_SCALAR, num_3d_s
2041 CALL rk_update_scalar_pd( im, im, &
2042 scalar_old(ims,kms,jms,im), &
2043 scalar_tend(ims,kms,jms,im), &
2044 grid%msft, &
2045 grid%em_mu_1, grid%em_mu_1, grid%em_mub, &
2046 rk_step, dt_rk, grid%spec_zone, &
2047 config_flags, &
2048 ids, ide, jds, jde, kds, kde, &
2049 ims, ime, jms, jme, kms, kme, &
2050 grid%i_start(ij), grid%i_end(ij), &
2051 grid%j_start(ij), grid%j_end(ij), &
2052 k_start , k_end )
2053 ENDDO
2054 ENDDO
2055 !$OMP END PARALLEL DO
2056
2057 !---------------------- positive definite bc call
2058 #ifdef DM_PARALLEL
2059 if(config_flags%pd_scalar) then
2060 #ifndef RSL
2061 IF ( config_flags%h_sca_adv_order <= 4 ) THEN
2062 # include "HALO_EM_SCALAR_OLD_E_5.inc"
2063 ELSE IF ( config_flags%h_sca_adv_order <= 6 ) THEN
2064 # include "HALO_EM_SCALAR_OLD_E_7.inc"
2065 ELSE
2066 WRITE(wrf_err_message,*)'solve_em: invalid h_sca_adv_order = ',config_flags%h_sca_adv_order
2067 CALL wrf_error_fatal(TRIM(wrf_err_message))
2068 ENDIF
2069 #else
2070 WRITE(wrf_err_message,*)'cannot use pd scheme with RSL - use RSL-LITE'
2071 CALL wrf_error_fatal(TRIM(wrf_err_message))
2072 #endif
2073 endif
2074 #endif
2075
2076 #ifdef DM_PARALLEL
2077 # include "PERIOD_BDY_EM_SCALAR_OLD.inc"
2078 #endif
2079 !$OMP PARALLEL DO &
2080 !$OMP PRIVATE ( ij )
2081
2082 DO ij = 1 , grid%num_tiles
2083 IF (num_3d_m >= PARAM_FIRST_SCALAR) THEN
2084 DO im = PARAM_FIRST_SCALAR , num_3d_s
2085 CALL set_physical_bc3d( scalar_old(ims,kms,jms,im), 'p', config_flags, &
2086 ids, ide, jds, jde, kds, kde, &
2087 ims, ime, jms, jme, kms, kme, &
2088 ips, ipe, jps, jpe, kps, kpe, &
2089 grid%i_start(ij), grid%i_end(ij), &
2090 grid%j_start(ij), grid%j_end(ij), &
2091 k_start , k_end )
2092 END DO
2093 ENDIF
2094 END DO
2095 !$OMP END PARALLEL DO
2096
2097 END IF ! end if for pd_scalar
2098
2099 ! chem
2100
2101 IF (config_flags%pd_chem .and. (rk_step == rk_order)) THEN
2102
2103 ! write(6,*) ' pd advection for chem '
2104
2105 !$OMP PARALLEL DO &
2106 !$OMP PRIVATE ( ij )
2107 DO ij = 1 , grid%num_tiles
2108 CALL wrf_debug ( 200 , ' call rk_update_scalar_pd' )
2109 do im = PARAM_FIRST_SCALAR, num_3d_c
2110 CALL rk_update_scalar_pd( im, im, &
2111 chem_old(ims,kms,jms,im), &
2112 chem_tend(ims,kms,jms,im), &
2113 grid%msft, &
2114 grid%em_mu_1, grid%em_mu_1, grid%em_mub, &
2115 rk_step, dt_rk, grid%spec_zone, &
2116 config_flags, &
2117 ids, ide, jds, jde, kds, kde, &
2118 ims, ime, jms, jme, kms, kme, &
2119 grid%i_start(ij), grid%i_end(ij), &
2120 grid%j_start(ij), grid%j_end(ij), &
2121 k_start , k_end )
2122 ENDDO
2123 END DO
2124 !$OMP END PARALLEL DO
2125
2126 !---------------------- positive definite bc call
2127 #ifdef DM_PARALLEL
2128 if(config_flags%pd_chem) then
2129 #ifndef RSL
2130 IF ( config_flags%h_sca_adv_order <= 4 ) THEN
2131 # include "HALO_EM_CHEM_OLD_E_5.inc"
2132 ELSE IF ( config_flags%h_sca_adv_order <= 6 ) THEN
2133 # include "HALO_EM_CHEM_OLD_E_7.inc"
2134 ELSE
2135 WRITE(wrf_err_message,*)'solve_em: invalid h_sca_adv_order = ',config_flags%h_sca_adv_order
2136 CALL wrf_error_fatal(TRIM(wrf_err_message))
2137 ENDIF
2138 #else
2139 WRITE(wrf_err_message,*)'cannot use pd scheme with RSL - use RSL-LITE'
2140 CALL wrf_error_fatal(TRIM(wrf_err_message))
2141 #endif
2142 endif
2143 #endif
2144
2145 #ifdef DM_PARALLEL
2146 # include "PERIOD_BDY_EM_CHEM_OLD.inc"
2147 #endif
2148
2149 !$OMP PARALLEL DO &
2150 !$OMP PRIVATE ( ij )
2151 DO ij = 1 , grid%num_tiles
2152 IF (num_3d_m >= PARAM_FIRST_SCALAR) THEN
2153 DO im = PARAM_FIRST_SCALAR , num_3d_c
2154 !print*,"~before set_physical_bc3d, im, grid id=",im,grid%id
2155 CALL set_physical_bc3d( chem_old(ims,kms,jms,im), 'p', config_flags, &
2156 ids, ide, jds, jde, kds, kde, &
2157 ims, ime, jms, jme, kms, kme, &
2158 ips, ipe, jps, jpe, kps, kpe, &
2159 grid%i_start(ij), grid%i_end(ij), &
2160 grid%j_start(ij), grid%j_end(ij), &
2161 k_start , k_end )
2162 END DO
2163 ENDIF
2164 END DO
2165 !$OMP END PARALLEL DO
2166
2167 END IF ! end if for pd_chem
2168
2169 ! tke
2170
2171 IF (config_flags%pd_tke .and. (rk_step == rk_order) &
2172 .and. (config_flags%km_opt .eq. 2) ) THEN
2173
2174 ! write(6,*) ' pd advection for tke '
2175
2176 !$OMP PARALLEL DO &
2177 !$OMP PRIVATE ( ij )
2178
2179 DO ij = 1 , grid%num_tiles
2180 CALL wrf_debug ( 200 , ' call rk_update_scalar_pd' )
2181 CALL rk_update_scalar_pd( 1, 1, &
2182 grid%em_tke_1, &
2183 tke_tend(ims,kms,jms), &
2184 grid%msft, &
2185 grid%em_mu_1, grid%em_mu_1, grid%em_mub, &
2186 rk_step, dt_rk, grid%spec_zone, &
2187 config_flags, &
2188 ids, ide, jds, jde, kds, kde, &
2189 ims, ime, jms, jme, kms, kme, &
2190 grid%i_start(ij), grid%i_end(ij), &
2191 grid%j_start(ij), grid%j_end(ij), &
2192 k_start , k_end )
2193 END DO
2194 !$OMP END PARALLEL DO
2195
2196 !---------------------- positive definite bc call
2197 #ifdef DM_PARALLEL
2198 if(config_flags%pd_tke) then
2199 #ifndef RSL
2200 IF ( config_flags%h_sca_adv_order <= 4 ) THEN
2201 # include "HALO_EM_TKE_OLD_E_5.inc"
2202 ELSE IF ( config_flags%h_sca_adv_order <= 6 ) THEN
2203 # include "HALO_EM_TKE_OLD_E_7.inc"
2204 ELSE
2205 WRITE(wrf_err_message,*)'solve_em: invalid h_sca_adv_order = ',config_flags%h_sca_adv_order
2206 CALL wrf_error_fatal(TRIM(wrf_err_message))
2207 ENDIF
2208 #else
2209 WRITE(wrf_err_message,*)'cannot use pd scheme with RSL - use RSL-LITE'
2210 CALL wrf_error_fatal(TRIM(wrf_err_message))
2211 #endif
2212 endif
2213 #endif
2214
2215 #ifdef DM_PARALLEL
2216 # include "PERIOD_BDY_EM_TKE_OLD.inc"
2217 #endif
2218
2219 !$OMP PARALLEL DO &
2220 !$OMP PRIVATE ( ij )
2221 DO ij = 1 , grid%num_tiles
2222 CALL set_physical_bc3d( grid%em_tke_1, 'p', config_flags, &
2223 ids, ide, jds, jde, kds, kde, &
2224 ims, ime, jms, jme, kms, kme, &
2225 ips, ipe, jps, jpe, kps, kpe, &
2226 grid%i_start(ij), grid%i_end(ij), &
2227 grid%j_start(ij), grid%j_end(ij), &
2228 k_start , k_end )
2229 END DO
2230 !$OMP END PARALLEL DO
2231
2232 !--- end of positive definite physics tendency update
2233
2234 END IF ! end if for pd_tke
2235
2236 #ifdef DM_PARALLEL
2237 !
2238 ! Stencils for patch communications (WCS, 29 June 2001)
2239 !
2240 !
2241 ! grid%em_ru_m x
2242 ! grid%em_rv_m x
2243 ! grid%em_ww_m x
2244 ! grid%em_mut x
2245 !
2246 !--------------------------------------------------------------
2247
2248 # include "HALO_EM_D.inc"
2249 #endif
2250
2251 !<DESCRIPTION>
2252 !<pre>
2253 ! (4) Still within the RK loop, the scalar variables are advanced.
2254 !
2255 ! For the moist and chem variables, each one is advanced
2256 ! individually, using named loops "moist_variable_loop:"
2257 ! and "chem_variable_loop:". Each RK substep begins by
2258 ! calculating the advective tendency, and, for the first RK step,
2259 ! 3D mixing (calling rk_scalar_tend) followed by an update
2260 ! of the scalar (calling rk_scalar_update).
2261 !</pre>
2262 !</DESCRIPTION>
2263
2264
2265 moist_scalar_advance: IF (num_3d_m >= PARAM_FIRST_SCALAR ) THEN
2266
2267 moist_variable_loop: do im = PARAM_FIRST_SCALAR, num_3d_m
2268
2269 if (grid%adv_moist_cond .or. im==p_qv ) then
2270
2271 !$OMP PARALLEL DO &
2272 !$OMP PRIVATE ( ij )
2273
2274 moist_tile_loop_1: DO ij = 1 , grid%num_tiles
2275
2276 CALL wrf_debug ( 200 , ' call rk_scalar_tend' )
2277
2278 BENCH_START(rk_scalar_tend_tim)
2279
2280 CALL rk_scalar_tend ( im, im, config_flags, &
2281 rk_step, dt_rk, &
2282 grid%em_ru_m, grid%em_rv_m, grid%em_ww_m, &
2283 grid%em_mut, grid%em_mub, grid%em_mu_1, &
2284 grid%em_alt, &
2285 moist_old(ims,kms,jms,im), &
2286 moist(ims,kms,jms,im), &
2287 moist_tend(ims,kms,jms,im), &
2288 advect_tend,grid%rqvften, &
2289 grid%qv_base, .true., grid%em_fnm, grid%em_fnp, &
2290 grid%msfu, grid%msfv, grid%msft, &
2291 grid%rdx, grid%rdy, grid%em_rdn, grid%em_rdnw, grid%khdif, &
2292 grid%kvdif, grid%xkmhd, &
2293 grid%diff_6th_opt, grid%diff_6th_factor, &
2294 config_flags%pd_moist, &
2295 ids, ide, jds, jde, kds, kde, &
2296 ims, ime, jms, jme, kms, kme, &
2297 grid%i_start(ij), grid%i_end(ij), &
2298 grid%j_start(ij), grid%j_end(ij), &
2299 k_start , k_end )
2300
2301 BENCH_END(rk_scalar_tend_tim)
2302
2303 BENCH_START(rlx_bdy_scalar_tim)
2304 IF( ( config_flags%specified .or. config_flags%nested ) .and. rk_step == 1 ) THEN
2305 IF ( im .EQ. P_QV .OR. config_flags%nested ) THEN
2306 CALL relax_bdy_scalar ( moist_tend(ims,kms,jms,im), &
2307 moist(ims,kms,jms,im), grid%em_mut, &
2308 moist_bxs(jms,kms,1,im),moist_bxe(jms,kms,1,im), &
2309 moist_bys(ims,kms,1,im),moist_bye(ims,kms,1,im), &
2310 moist_btxs(jms,kms,1,im),moist_btxe(jms,kms,1,im), &
2311 moist_btys(ims,kms,1,im),moist_btye(ims,kms,1,im), &
2312 config_flags%spec_bdy_width, grid%spec_zone, grid%relax_zone, &
2313 grid%dtbc, grid%fcx, grid%gcx, &
2314 config_flags, &
2315 ids,ide, jds,jde, kds,kde, & ! domain dims
2316 ims,ime, jms,jme, kms,kme, & ! memory dims
2317 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2318 grid%i_start(ij), grid%i_end(ij), &
2319 grid%j_start(ij), grid%j_end(ij), &
2320 k_start, k_end )
2321
2322 CALL spec_bdy_scalar ( moist_tend(ims,kms,jms,im), &
2323 moist_bxs(jms,kms,1,im),moist_bxe(jms,kms,1,im), &
2324 moist_bys(ims,kms,1,im),moist_bye(ims,kms,1,im), &
2325 moist_btxs(jms,kms,1,im),moist_btxe(jms,kms,1,im), &
2326 moist_btys(ims,kms,1,im),moist_btye(ims,kms,1,im), &
2327 config_flags%spec_bdy_width, grid%spec_zone, &
2328 config_flags, &
2329 ids,ide, jds,jde, kds,kde, & ! domain dims
2330 ims,ime, jms,jme, kms,kme, & ! memory dims
2331 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2332 grid%i_start(ij), grid%i_end(ij), &
2333 grid%j_start(ij), grid%j_end(ij), &
2334 k_start, k_end )
2335 ENDIF
2336 ENDIF
2337 BENCH_END(rlx_bdy_scalar_tim)
2338
2339 ENDDO moist_tile_loop_1
2340 !$OMP END PARALLEL DO
2341
2342 !$OMP PARALLEL DO &
2343 !$OMP PRIVATE ( ij )
2344
2345 moist_tile_loop_2: DO ij = 1 , grid%num_tiles
2346
2347 CALL wrf_debug ( 200 , ' call rk_update_scalar' )
2348
2349 BENCH_START(update_scal_tim)
2350 CALL rk_update_scalar( im, im, &
2351 moist_old(ims,kms,jms,im), &
2352 moist(ims,kms,jms,im), &
2353 moist_tend(ims,kms,jms,im), &
2354 advect_tend, grid%msft, &
2355 grid%em_mu_1, grid%em_mu_2, grid%em_mub, &
2356 rk_step, dt_rk, grid%spec_zone, &
2357 config_flags, &
2358 ids, ide, jds, jde, kds, kde, &
2359 ims, ime, jms, jme, kms, kme, &
2360 grid%i_start(ij), grid%i_end(ij), &
2361 grid%j_start(ij), grid%j_end(ij), &
2362 k_start , k_end )
2363 BENCH_END(update_scal_tim)
2364
2365 BENCH_START(flow_depbdy_tim)
2366 IF( config_flags%specified ) THEN
2367 IF(im .ne. P_QV)THEN
2368 CALL flow_dep_bdy ( moist(ims,kms,jms,im), &
2369 grid%em_ru_m, grid%em_rv_m, config_flags, &
2370 grid%spec_zone, &
2371 ids,ide, jds,jde, kds,kde, & ! domain dims
2372 ims,ime, jms,jme, kms,kme, & ! memory dims
2373 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2374 grid%i_start(ij), grid%i_end(ij), &
2375 grid%j_start(ij), grid%j_end(ij), &
2376 k_start, k_end )
2377 ENDIF
2378 ENDIF
2379 BENCH_END(flow_depbdy_tim)
2380
2381 ENDDO moist_tile_loop_2
2382 !$OMP END PARALLEL DO
2383
2384 ENDIF !-- if (grid%adv_moist_cond .or. im==p_qv ) then
2385
2386 ENDDO moist_variable_loop
2387
2388 ENDIF moist_scalar_advance
2389
2390 BENCH_START(tke_adv_tim)
2391 TKE_advance: IF (config_flags%km_opt .eq. 2) then
2392
2393 #ifdef DM_PARALLEL
2394 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
2395 # include "HALO_EM_TKE_ADVECT_3.inc"
2396 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
2397 # include "HALO_EM_TKE_ADVECT_5.inc"
2398 ELSE
2399 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
2400 CALL wrf_error_fatal(TRIM(wrf_err_message))
2401 ENDIF
2402 #endif
2403
2404 !$OMP PARALLEL DO &
2405 !$OMP PRIVATE ( ij )
2406
2407 tke_tile_loop_1: DO ij = 1 , grid%num_tiles
2408
2409 CALL wrf_debug ( 200 , ' call rk_scalar_tend for tke' )
2410 CALL rk_scalar_tend ( 1, 1, config_flags, &
2411 rk_step, dt_rk, &
2412 grid%em_ru_m, grid%em_rv_m, grid%em_ww_m, &
2413 grid%em_mut, grid%em_mub, grid%em_mu_1, &
2414 grid%em_alt, &
2415 grid%em_tke_1, &
2416 grid%em_tke_2, &
2417 tke_tend(ims,kms,jms), &
2418 advect_tend,grid%rqvften, &
2419 grid%qv_base, .false., grid%em_fnm, grid%em_fnp, &
2420 grid%msfu, grid%msfv, grid%msft, &
2421 grid%rdx, grid%rdy, grid%em_rdn, grid%em_rdnw, grid%khdif, &
2422 grid%kvdif, grid%xkmhd, &
2423 grid%diff_6th_opt, grid%diff_6th_factor, &
2424 config_flags%pd_tke, &
2425 ids, ide, jds, jde, kds, kde, &
2426 ims, ime, jms, jme, kms, kme, &
2427 grid%i_start(ij), grid%i_end(ij), &
2428 grid%j_start(ij), grid%j_end(ij), &
2429 k_start , k_end )
2430
2431 ENDDO tke_tile_loop_1
2432 !$OMP END PARALLEL DO
2433
2434 !$OMP PARALLEL DO &
2435 !$OMP PRIVATE ( ij )
2436
2437 tke_tile_loop_2: DO ij = 1 , grid%num_tiles
2438
2439 CALL wrf_debug ( 200 , ' call rk_update_scalar' )
2440 CALL rk_update_scalar( 1, 1, &
2441 grid%em_tke_1, &
2442 grid%em_tke_2, &
2443 tke_tend(ims,kms,jms), &
2444 advect_tend,grid%msft, &
2445 grid%em_mu_1, grid%em_mu_2, grid%em_mub, &
2446 rk_step, dt_rk, grid%spec_zone, &
2447 config_flags, &
2448 ids, ide, jds, jde, kds, kde, &
2449 ims, ime, jms, jme, kms, kme, &
2450 grid%i_start(ij), grid%i_end(ij), &
2451 grid%j_start(ij), grid%j_end(ij), &
2452 k_start , k_end )
2453
2454 ! bound the tke (greater than 0, less than tke_upper_bound)
2455
2456 CALL bound_tke( grid%em_tke_2, grid%tke_upper_bound, &
2457 ids, ide, jds, jde, kds, kde, &
2458 ims, ime, jms, jme, kms, kme, &
2459 grid%i_start(ij), grid%i_end(ij), &
2460 grid%j_start(ij), grid%j_end(ij), &
2461 k_start , k_end )
2462
2463 IF( config_flags%specified .or. config_flags%nested ) THEN
2464 CALL flow_dep_bdy ( grid%em_tke_2, &
2465 grid%em_ru_m, grid%em_rv_m, config_flags, &
2466 grid%spec_zone, &
2467 ids,ide, jds,jde, kds,kde, & ! domain dims
2468 ims,ime, jms,jme, kms,kme, & ! memory dims
2469 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2470 grid%i_start(ij), grid%i_end(ij), &
2471 grid%j_start(ij), grid%j_end(ij), &
2472 k_start, k_end )
2473 ENDIF
2474 ENDDO tke_tile_loop_2
2475 !$OMP END PARALLEL DO
2476
2477 END IF TKE_advance
2478 BENCH_END(tke_adv_tim)
2479
2480 #ifdef WRF_CHEM
2481 ! next the chemical species
2482 BENCH_START(chem_adv_tim)
2483 chem_scalar_advance: IF (num_3d_c >= PARAM_FIRST_SCALAR) THEN
2484
2485 chem_variable_loop: do ic = PARAM_FIRST_SCALAR, num_3d_c
2486
2487 !$OMP PARALLEL DO &
2488 !$OMP PRIVATE ( ij )
2489
2490 chem_tile_loop_1: DO ij = 1 , grid%num_tiles
2491
2492 CALL wrf_debug ( 200 , ' call rk_scalar_tend in chem_tile_loop_1' )
2493 CALL rk_scalar_tend ( ic, ic, config_flags, &
2494 rk_step, dt_rk, &
2495 grid%em_ru_m, grid%em_rv_m, grid%em_ww_m, &
2496 grid%em_mut, grid%em_mub, grid%em_mu_1, &
2497 grid%em_alt, &
2498 chem_old(ims,kms,jms,ic), &
2499 chem(ims,kms,jms,ic), &
2500 chem_tend(ims,kms,jms,ic), &
2501 advect_tend,grid%rqvften, &
2502 grid%qv_base, .false., grid%em_fnm, grid%em_fnp, &
2503 grid%msfu, grid%msfv, grid%msft, &
2504 grid%rdx, grid%rdy, grid%em_rdn, grid%em_rdnw, &
2505 grid%khdif, grid%kvdif, grid%xkmhd, &
2506 grid%diff_6th_opt, grid%diff_6th_factor, &
2507 config_flags%pd_chem, &
2508 ids, ide, jds, jde, kds, kde, &
2509 ims, ime, jms, jme, kms, kme, &
2510 grid%i_start(ij), grid%i_end(ij), &
2511 grid%j_start(ij), grid%j_end(ij), &
2512 k_start , k_end )
2513 !
2514 ! Currently, chemistry species with specified boundaries (i.e. the mother
2515 ! domain) are being over written by flow_dep_bdy_chem. So, relax_bdy and
2516 ! spec_bdy are only called for nests. For boundary conditions from global model or larger domain,
2517 ! chem is uncoupled, and only used for one row/column on inflow (if have_bcs_chem=.true.)
2518 !
2519 IF( ( config_flags%nested ) .and. rk_step == 1 ) THEN
2520 if(ic.eq.1)CALL wrf_debug ( 10 , ' have_bcs_chem' )
2521
2522 CALL relax_bdy_scalar ( chem_tend(ims,kms,jms,ic), &
2523 chem(ims,kms,jms,ic), grid%em_mut, &
2524 chem_bxs(jms,kms,1,ic),chem_bxe(jms,kms,1,ic),chem_bys(ims,kms,1,ic),chem_bye(ims,kms,1,ic), &
2525 chem_btxs(jms,kms,1,ic),chem_btxe(jms,kms,1,ic),chem_btys(ims,kms,1,ic),chem_btye(ims,kms,1,ic), &
2526 config_flags%spec_bdy_width, grid%spec_zone, grid%relax_zone, &
2527 grid%dtbc, grid%fcx, grid%gcx, &
2528 config_flags, &
2529 ids,ide, jds,jde, kds,kde, & ! domain dims
2530 ims,ime, jms,jme, kms,kme, & ! memory dims
2531 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2532 grid%i_start(ij), grid%i_end(ij), &
2533 grid%j_start(ij), grid%j_end(ij), &
2534 k_start, k_end )
2535 CALL spec_bdy_scalar ( chem_tend(ims,kms,jms,ic), &
2536 chem_bxs(jms,kms,1,ic),chem_bxe(jms,kms,1,ic),chem_bys(ims,kms,1,ic),chem_bye(ims,kms,1,ic), &
2537 chem_btxs(jms,kms,1,ic),chem_btxe(jms,kms,1,ic),chem_btys(ims,kms,1,ic),chem_btye(ims,kms,1,ic), &
2538 config_flags%spec_bdy_width, grid%spec_zone, &
2539 config_flags, &
2540 ids,ide, jds,jde, kds,kde, & ! domain dims
2541 ims,ime, jms,jme, kms,kme, & ! memory dims
2542 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2543 grid%i_start(ij), grid%i_end(ij), &
2544 grid%j_start(ij), grid%j_end(ij), &
2545 k_start, k_end )
2546 ENDIF
2547
2548 ENDDO chem_tile_loop_1
2549
2550 !$OMP END PARALLEL DO
2551
2552
2553 !$OMP PARALLEL DO &
2554 !$OMP PRIVATE ( ij )
2555
2556 chem_tile_loop_2: DO ij = 1 , grid%num_tiles
2557
2558 CALL wrf_debug ( 200 , ' call rk_update_scalar' )
2559 CALL rk_update_scalar( ic, ic, &
2560 chem_old(ims,kms,jms,ic), & ! was chem_1
2561 chem(ims,kms,jms,ic), &
2562 chem_tend(ims,kms,jms,ic), &
2563 advect_tend, grid%msft, &
2564 grid%em_mu_1, grid%em_mu_2, grid%em_mub, &
2565 rk_step, dt_rk, grid%spec_zone, &
2566 config_flags, &
2567 ids, ide, jds, jde, kds, kde, &
2568 ims, ime, jms, jme, kms, kme, &
2569 grid%i_start(ij), grid%i_end(ij), &
2570 grid%j_start(ij), grid%j_end(ij), &
2571 k_start , k_end )
2572
2573
2574 IF( config_flags%specified ) THEN
2575 ! come back to this and figure out why two different routines are needed. JM 20041203
2576 !#ifndef WRF_CHEM
2577 !!$ CALL flow_dep_bdy ( chem(ims,kms,jms,ic), &
2578 !!$ grid%em_ru_m, grid%em_rv_m, config_flags, &
2579 !!$ grid%spec_zone, &
2580 !!$ ids,ide, jds,jde, kds,kde, & ! domain dims
2581 !!$ ims,ime, jms,jme, kms,kme, & ! memory dims
2582 !!$ ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2583 !!$ grid%i_start(ij), grid%i_end(ij), &
2584 !!$ grid%j_start(ij), grid%j_end(ij), &
2585 !!$ k_start, k_end )
2586 !#else
2587 CALL flow_dep_bdy_chem( chem(ims,kms,jms,ic), &
2588 chem_bxs(jms,kms,1,ic), chem_btxs(jms,kms,1,ic), &
2589 chem_bxe(jms,kms,1,ic), chem_btxe(jms,kms,1,ic), &
2590 chem_bys(ims,kms,1,ic), chem_btys(ims,kms,1,ic), &
2591 chem_bye(ims,kms,1,ic), chem_btye(ims,kms,1,ic), &
2592 dt_rk+grid%dtbc, &
2593 config_flags%spec_bdy_width,grid%em_z, &
2594 grid%have_bcs_chem, &
2595 grid%em_ru_m, grid%em_rv_m, config_flags,grid%em_alt, &
2596 grid%em_t_1,grid%em_pb,grid%em_p,t0,p1000mb,rcp,grid%em_ph_2,grid%em_phb,g, &
2597 grid%spec_zone,ic, &
2598 ids,ide, jds,jde, kds,kde, & ! domain dims
2599 ims,ime, jms,jme, kms,kme, & ! memory dims
2600 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2601 grid%i_start(ij), grid%i_end(ij), &
2602 grid%j_start(ij), grid%j_end(ij), &
2603 k_start, k_end )
2604 !#endif
2605 ENDIF
2606
2607
2608 ENDDO chem_tile_loop_2
2609 !$OMP END PARALLEL DO
2610
2611 ENDDO chem_variable_loop
2612
2613 ENDIF chem_scalar_advance
2614 BENCH_END(chem_adv_tim)
2615 #endif
2616
2617 ! next the other scalar species
2618 other_scalar_advance: IF (num_3d_s >= PARAM_FIRST_SCALAR) THEN
2619
2620 scalar_variable_loop: do is = PARAM_FIRST_SCALAR, num_3d_s
2621 !$OMP PARALLEL DO &
2622 !$OMP PRIVATE ( ij )
2623
2624 scalar_tile_loop_1: DO ij = 1 , grid%num_tiles
2625
2626 CALL wrf_debug ( 200 , ' call rk_scalar_tend' )
2627 CALL rk_scalar_tend ( is, is, config_flags, &
2628 rk_step, dt_rk, &
2629 grid%em_ru_m, grid%em_rv_m, grid%em_ww_m, &
2630 grid%em_mut, grid%em_mub, grid%em_mu_1, &
2631 grid%em_alt, &
2632 scalar_old(ims,kms,jms,is), &
2633 scalar(ims,kms,jms,is), &
2634 scalar_tend(ims,kms,jms,is), &
2635 advect_tend,grid%rqvften, &
2636 grid%qv_base, .false., grid%em_fnm, grid%em_fnp, &
2637 grid%msfu, grid%msfv, grid%msft, &
2638 grid%rdx, grid%rdy, grid%em_rdn, grid%em_rdnw, &
2639 grid%khdif, grid%kvdif, grid%xkmhd, &
2640 grid%diff_6th_opt, grid%diff_6th_factor, &
2641 config_flags%pd_scalar, &
2642 ids, ide, jds, jde, kds, kde, &
2643 ims, ime, jms, jme, kms, kme, &
2644 grid%i_start(ij), grid%i_end(ij), &
2645 grid%j_start(ij), grid%j_end(ij), &
2646 k_start , k_end )
2647
2648 IF( config_flags%nested .and. (rk_step == 1) ) THEN
2649
2650 IF (is .eq. P_QNDROP .OR. is .eq. P_QNI) THEN
2651
2652 CALL relax_bdy_scalar ( scalar_tend(ims,kms,jms,is), &
2653 scalar(ims,kms,jms,is), grid%em_mut, &
2654 scalar_bxs(jms,kms,1,is),scalar_bxe(jms,kms,1,is), &
2655 scalar_bys(ims,kms,1,is),scalar_bye(ims,kms,1,is), &
2656 scalar_btxs(jms,kms,1,is),scalar_btxe(jms,kms,1,is), &
2657 scalar_btys(ims,kms,1,is),scalar_btye(ims,kms,1,is), &
2658 config_flags%spec_bdy_width, grid%spec_zone, grid%relax_zone, &
2659 grid%dtbc, grid%fcx, grid%gcx, &
2660 config_flags, &
2661 ids,ide, jds,jde, kds,kde, & ! domain dims
2662 ims,ime, jms,jme, kms,kme, & ! memory dims
2663 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2664 grid%i_start(ij), grid%i_end(ij), &
2665 grid%j_start(ij), grid%j_end(ij), &
2666 k_start, k_end )
2667
2668 CALL spec_bdy_scalar ( scalar_tend(ims,kms,jms,is), &
2669 scalar_bxs(jms,kms,1,is),scalar_bxe(jms,kms,1,is), &
2670 scalar_bys(ims,kms,1,is),scalar_bye(ims,kms,1,is), &
2671 scalar_btxs(jms,kms,1,is),scalar_btxe(jms,kms,1,is), &
2672 scalar_btys(ims,kms,1,is),scalar_btye(ims,kms,1,is), &
2673 config_flags%spec_bdy_width, grid%spec_zone, &
2674 config_flags, &
2675 ids,ide, jds,jde, kds,kde, & ! domain dims
2676 ims,ime, jms,jme, kms,kme, & ! memory dims
2677 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2678 grid%i_start(ij), grid%i_end(ij), &
2679 grid%j_start(ij), grid%j_end(ij), &
2680 k_start, k_end )
2681
2682 ENDIF
2683
2684 ENDIF ! b.c test for chem nested boundary condition
2685
2686 ENDDO scalar_tile_loop_1
2687 !$OMP END PARALLEL DO
2688
2689
2690 !$OMP PARALLEL DO &
2691 !$OMP PRIVATE ( ij )
2692
2693 scalar_tile_loop_2: DO ij = 1 , grid%num_tiles
2694
2695 CALL wrf_debug ( 200 , ' call rk_update_scalar' )
2696 CALL rk_update_scalar( is, is, &
2697 scalar_old(ims,kms,jms,is), & ! was scalar_1
2698 scalar(ims,kms,jms,is), &
2699 scalar_tend(ims,kms,jms,is), &
2700 advect_tend, grid%msft, &
2701 grid%em_mu_1, grid%em_mu_2, grid%em_mub, &
2702 rk_step, dt_rk, grid%spec_zone, &
2703 config_flags, &
2704 ids, ide, jds, jde, kds, kde, &
2705 ims, ime, jms, jme, kms, kme, &
2706 grid%i_start(ij), grid%i_end(ij), &
2707 grid%j_start(ij), grid%j_end(ij), &
2708 k_start , k_end )
2709
2710
2711 IF( config_flags%specified ) THEN
2712 CALL flow_dep_bdy ( scalar(ims,kms,jms,is), &
2713 grid%em_ru_m, grid%em_rv_m, config_flags, &
2714 grid%spec_zone, &
2715 ids,ide, jds,jde, kds,kde, & ! domain dims
2716 ims,ime, jms,jme, kms,kme, & ! memory dims
2717 ips,ipe, jps,jpe, kps,kpe, & ! patch dims
2718 grid%i_start(ij), grid%i_end(ij), &
2719 grid%j_start(ij), grid%j_end(ij), &
2720 k_start, k_end )
2721 ENDIF
2722
2723
2724 ENDDO scalar_tile_loop_2
2725 !$OMP END PARALLEL DO
2726
2727 ENDDO scalar_variable_loop
2728
2729 ENDIF other_scalar_advance
2730
2731 ! update the pressure and density at the new time level
2732
2733 !$OMP PARALLEL DO &
2734 !$OMP PRIVATE ( ij )
2735 DO ij = 1 , grid%num_tiles
2736
2737 BENCH_START(calc_p_rho_tim)
2738
2739 CALL calc_p_rho_phi( moist, num_3d_m, &
2740 grid%em_al, grid%em_alb, grid%em_mu_2, grid%em_muts, &
2741 grid%em_ph_2, grid%em_p, grid%em_pb, grid%em_t_2, &
2742 p0, t0, grid%em_znu, grid%em_dnw, grid%em_rdnw, &
2743 grid%em_rdn, config_flags%non_hydrostatic, &
2744 ids, ide, jds, jde, kds, kde, &
2745 ims, ime, jms, jme, kms, kme, &
2746 grid%i_start(ij), grid%i_end(ij), &
2747 grid%j_start(ij), grid%j_end(ij), &
2748 k_start , k_end )
2749
2750 BENCH_END(calc_p_rho_tim)
2751
2752 ENDDO
2753 !$OMP END PARALLEL DO
2754
2755 ! Reset the boundary conditions if there is another corrector step.
2756 ! (rk_step < rk_order), else we'll handle it at the end of everything
2757 ! (after the split physics, before exiting the timestep).
2758
2759 rk_step_1_check: IF ( rk_step < rk_order ) THEN
2760
2761 !-----------------------------------------------------------
2762 ! Stencils for patch communications (WCS, 29 June 2001)
2763 !
2764 ! here's where we need a wide comm stencil - these are the
2765 ! uncoupled variables so are used for high order calc in
2766 ! advection and mixong routines.
2767 !
2768 ! * * * * *
2769 ! * * * * * * * * *
2770 ! * + * * + * * * + * *
2771 ! * * * * * * * * *
2772 ! * * * * *
2773 !
2774 !
2775 ! grid%em_u_2 x
2776 ! grid%em_v_2 x
2777 ! grid%em_w_2 x
2778 ! grid%em_t_2 x
2779 ! grid%em_ph_2 x
2780 ! grid%em_al x
2781 !
2782 ! 2D variable
2783 ! grid%em_mu_2 x
2784 !
2785 ! 4D variable
2786 ! moist x
2787 ! chem x
2788 !scalar x
2789
2790 #ifdef DM_PARALLEL
2791 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
2792 # include "HALO_EM_D2_3.inc"
2793 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
2794 # include "HALO_EM_D2_5.inc"
2795 ELSE
2796 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
2797 CALL wrf_error_fatal(TRIM(wrf_err_message))
2798 ENDIF
2799 # include "PERIOD_BDY_EM_D.inc"
2800 # include "PERIOD_BDY_EM_MOIST2.inc"
2801 # include "PERIOD_BDY_EM_CHEM2.inc"
2802 # include "PERIOD_BDY_EM_SCALAR2.inc"
2803 #endif
2804
2805 BENCH_START(bc_end_tim)
2806 !$OMP PARALLEL DO &
2807 !$OMP PRIVATE ( ij )
2808
2809 tile_bc_loop_1: DO ij = 1 , grid%num_tiles
2810
2811
2812 CALL wrf_debug ( 200 , ' call rk_phys_bc_dry_2' )
2813
2814 CALL rk_phys_bc_dry_2( config_flags, &
2815 grid%em_u_2, grid%em_v_2, grid%em_w_2, &
2816 grid%em_t_2, grid%em_ph_2, grid%em_mu_2, &
2817 ids, ide, jds, jde, kds, kde, &
2818 ims, ime, jms, jme, kms, kme, &
2819 ips, ipe, jps, jpe, kps, kpe, &
2820 grid%i_start(ij), grid%i_end(ij), &
2821 grid%j_start(ij), grid%j_end(ij), &
2822 k_start , k_end )
2823
2824 BENCH_START(diag_w_tim)
2825 IF (.not. config_flags%non_hydrostatic) THEN
2826 CALL diagnose_w( ph_tend, grid%em_ph_2, grid%em_ph_1, grid%em_w_2, grid%em_muts, dt_rk, &
2827 grid%em_u_2, grid%em_v_2, grid%ht, &
2828 grid%cf1, grid%cf2, grid%cf3, grid%rdx, grid%rdy, grid%msft, &
2829 ids, ide, jds, jde, kds, kde, &
2830 ims, ime, jms, jme, kms, kme, &
2831 grid%i_start(ij), grid%i_end(ij), &
2832 grid%j_start(ij), grid%j_end(ij), &
2833 k_start , k_end )
2834 ENDIF
2835 BENCH_END(diag_w_tim)
2836
2837 IF (num_3d_m >= PARAM_FIRST_SCALAR) THEN
2838
2839 moisture_loop_bdy_1 : DO im = PARAM_FIRST_SCALAR , num_3d_m
2840
2841 CALL set_physical_bc3d( moist(ims,kms,jms,im), 'p', config_flags, &
2842 ids, ide, jds, jde, kds, kde, &
2843 ims, ime, jms, jme, kms, kme, &
2844 ips, ipe, jps, jpe, kps, kpe, &
2845 grid%i_start(ij), grid%i_end(ij), &
2846 grid%j_start(ij), grid%j_end(ij), &
2847 k_start , k_end )
2848 END DO moisture_loop_bdy_1
2849
2850 ENDIF
2851
2852 IF (num_3d_c >= PARAM_FIRST_SCALAR) THEN
2853
2854 chem_species_bdy_loop_1 : DO ic = PARAM_FIRST_SCALAR , num_3d_c
2855
2856 CALL set_physical_bc3d( chem(ims,kms,jms,ic), 'p', config_flags, &
2857 ids, ide, jds, jde, kds, kde, &
2858 ims, ime, jms, jme, kms, kme, &
2859 ips, ipe, jps, jpe, kps, kpe, &
2860 grid%i_start(ij), grid%i_end(ij), &
2861 grid%j_start(ij), grid%j_end(ij), &
2862 k_start , k_end-1 )
2863
2864 END DO chem_species_bdy_loop_1
2865
2866 END IF
2867
2868 IF (num_3d_s >= PARAM_FIRST_SCALAR) THEN
2869
2870 scalar_species_bdy_loop_1 : DO is = PARAM_FIRST_SCALAR , num_3d_s
2871
2872 CALL set_physical_bc3d( scalar(ims,kms,jms,is), 'p', config_flags, &
2873 ids, ide, jds, jde, kds, kde, &
2874 ims, ime, jms, jme, kms, kme, &
2875 ips, ipe, jps, jpe, kps, kpe, &
2876 grid%i_start(ij), grid%i_end(ij), &
2877 grid%j_start(ij), grid%j_end(ij), &
2878 k_start , k_end-1 )
2879
2880 END DO scalar_species_bdy_loop_1
2881
2882 END IF
2883
2884 IF (config_flags%km_opt .eq. 2) THEN
2885
2886 CALL set_physical_bc3d( grid%em_tke_2 , 'p', config_flags, &
2887 ids, ide, jds, jde, kds, kde, &
2888 ims, ime, jms, jme, kms, kme, &
2889 ips, ipe, jps, jpe, kps, kpe, &
2890 grid%i_start(ij), grid%i_end(ij), &
2891 grid%j_start(ij), grid%j_end(ij), &
2892 k_start , k_end )
2893 END IF
2894
2895 END DO tile_bc_loop_1
2896 !$OMP END PARALLEL DO
2897 BENCH_END(bc_end_tim)
2898
2899
2900 #ifdef DM_PARALLEL
2901
2902 ! * * * * *
2903 ! * * * * * * * * *
2904 ! * + * * + * * * + * *
2905 ! * * * * * * * * *
2906 ! * * * * *
2907
2908 ! moist, chem, scalar, tke x
2909
2910
2911 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
2912 IF ( (config_flags%pd_tke) .and. (rk_step == rk_order-1) ) THEN
2913 # include "HALO_EM_TKE_5.inc"
2914 ELSE
2915 # include "HALO_EM_TKE_3.inc"
2916 ENDIF
2917 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
2918 IF ( (config_flags%pd_tke) .and. (rk_step == rk_order-1) ) THEN
2919 # include "HALO_EM_TKE_7.inc"
2920 ELSE
2921 # include "HALO_EM_TKE_5.inc"
2922 ENDIF
2923 ELSE
2924 WRITE(wrf_err_message,*)'solve_em: invalid h_sca_adv_order = ',config_flags%h_sca_adv_order
2925 CALL wrf_error_fatal(TRIM(wrf_err_message))
2926 ENDIF
2927
2928 #if 0
2929 IF (config_flags%km_opt .eq. 2) THEN
2930 # include "HALO_EM_TKE_F.inc"
2931 ENDIF
2932 #endif
2933
2934 if ( num_moist .ge. PARAM_FIRST_SCALAR ) then
2935 IF ( config_flags%h_sca_adv_order <= 4 ) THEN
2936 IF ( (config_flags%pd_moist) .and. (rk_step == rk_order-1) ) THEN
2937 # include "HALO_EM_MOIST_E_5.inc"
2938 ELSE
2939 # include "HALO_EM_MOIST_E_3.inc"
2940 END IF
2941 ELSE IF ( config_flags%h_sca_adv_order <= 6 ) THEN
2942 IF ( (config_flags%pd_moist) .and. (rk_step == rk_order-1) ) THEN
2943 # include "HALO_EM_MOIST_E_7.inc"
2944 ELSE
2945 # include "HALO_EM_MOIST_E_5.inc"
2946 END IF
2947 ELSE
2948 WRITE(wrf_err_message,*)'solve_em: invalid h_sca_adv_order = ',config_flags%h_sca_adv_order
2949 CALL wrf_error_fatal(TRIM(wrf_err_message))
2950 ENDIF
2951 endif
2952 if ( num_chem >= PARAM_FIRST_SCALAR ) then
2953 IF ( config_flags%h_sca_adv_order <= 4 ) THEN
2954 IF ( (config_flags%pd_chem) .and. (rk_step == rk_order-1) ) THEN
2955 # include "HALO_EM_CHEM_E_5.inc"
2956 ELSE
2957 # include "HALO_EM_CHEM_E_3.inc"
2958 END IF
2959 ELSE IF ( config_flags%h_sca_adv_order <= 6 ) THEN
2960 IF ( (config_flags%pd_chem) .and. (rk_step == rk_order-1) ) THEN
2961 # include "HALO_EM_CHEM_E_7.inc"
2962 ELSE
2963 # include "HALO_EM_CHEM_E_5.inc"
2964 END IF
2965 ELSE
2966 WRITE(wrf_err_message,*)'solve_em: invalid h_sca_adv_order = ',config_flags%h_sca_adv_order
2967 CALL wrf_error_fatal(TRIM(wrf_err_message))
2968 ENDIF
2969 endif
2970 if ( num_scalar >= PARAM_FIRST_SCALAR ) then
2971 IF ( config_flags%h_sca_adv_order <= 4 ) THEN
2972 IF ( (config_flags%pd_scalar) .and. (rk_step == rk_order-1) ) THEN
2973 # include "HALO_EM_SCALAR_E_5.inc"
2974 ELSE
2975 # include "HALO_EM_SCALAR_E_3.inc"
2976 END IF
2977 ELSE IF ( config_flags%h_sca_adv_order <= 6 ) THEN
2978 IF ( (config_flags%pd_scalar) .and. (rk_step == rk_order-1) ) THEN
2979 # include "HALO_EM_SCALAR_E_7.inc"
2980 ELSE
2981 # include "HALO_EM_SCALAR_E_5.inc"
2982 END IF
2983 ELSE
2984 WRITE(wrf_err_message,*)'solve_em: invalid h_sca_adv_order = ',config_flags%h_sca_adv_order
2985 CALL wrf_error_fatal(TRIM(wrf_err_message))
2986 ENDIF
2987 endif
2988 #endif
2989
2990 ENDIF rk_step_1_check
2991
2992
2993 !**********************************************************
2994 !
2995 ! end of RK predictor-corrector loop
2996 !
2997 !**********************************************************
2998
2999 END DO Runge_Kutta_loop
3000
3001 !$OMP PARALLEL DO &
3002 !$OMP PRIVATE ( ij )
3003
3004 DO ij = 1 , grid%num_tiles
3005
3006 BENCH_START(advance_ppt_tim)
3007 CALL wrf_debug ( 200 , ' call advance_ppt' )
3008 CALL advance_ppt(grid%rthcuten,grid%rqvcuten,grid%rqccuten,grid%rqrcuten, &
3009 grid%rqicuten,grid%rqscuten,grid%rainc,grid%raincv,grid%nca, &
3010 grid%htop,grid%hbot,grid%cutop,grid%cubot, &
3011 grid%cuppt, config_flags, &
3012 ids,ide, jds,jde, kds,kde, &
3013 ims,ime, jms,jme, kms,kme, &
3014 grid%i_start(ij), grid%i_end(ij), &
3015 grid%j_start(ij), grid%j_end(ij), &
3016 k_start , k_end )
3017 BENCH_END(advance_ppt_tim)
3018
3019 ENDDO
3020 !$OMP END PARALLEL DO
3021
3022 !<DESCRIPTION>
3023 !<pre>
3024 ! (5) time-split physics.
3025 !
3026 ! Microphysics are the only time split physics in the WRF model
3027 ! at this time. Split-physics begins with the calculation of
3028 ! needed diagnostic quantities (pressure, temperature, etc.)
3029 ! followed by a call to the microphysics driver,
3030 ! and finishes with a clean-up, storing off of a diabatic tendency
3031 ! from the moist physics, and a re-calulation of the diagnostic
3032 ! quantities pressure and density.
3033 !</pre>
3034 !</DESCRIPTION>
3035
3036 IF (config_flags%mp_physics /= 0) then
3037
3038 IF( config_flags%specified .or. config_flags%nested ) THEN
3039 sz = grid%spec_zone
3040 ELSE
3041 sz = 0
3042 ENDIF
3043
3044 !$OMP PARALLEL DO &
3045 !$OMP PRIVATE ( ij, its, ite, jts, jte )
3046
3047 scalar_tile_loop_1a: DO ij = 1 , grid%num_tiles
3048
3049 IF ( config_flags%periodic_x ) THEN
3050 its = max(grid%i_start(ij),ids)
3051 ite = min(grid%i_end(ij),ide-1)
3052 ELSE
3053 its = max(grid%i_start(ij),ids+sz)
3054 ite = min(grid%i_end(ij),ide-1-sz)
3055 ENDIF
3056 jts = max(grid%j_start(ij),jds+sz)
3057 jte = min(grid%j_end(ij),jde-1-sz)
3058
3059 CALL wrf_debug ( 200 , ' call moist_physics_prep' )
3060 BENCH_START(moist_physics_prep_tim)
3061 CALL moist_physics_prep_em( grid%em_t_2, grid%em_t_1, t0, rho, &
3062 grid%em_al, grid%em_alb, grid%em_p, p8w, p0, grid%em_pb, &
3063 grid%em_ph_2, grid%em_phb, th_phy, pi_phy, p_phy, &
3064 grid%em_z, z_at_w, dz8w, &
3065 dtm, grid%h_diabatic, &
3066 config_flags,grid%em_fnm, grid%em_fnp, &
3067 ids, ide, jds, jde, kds, kde, &
3068 ims, ime, jms, jme, kms, kme, &
3069 its, ite, jts, jte, &
3070 k_start , k_end )
3071 BENCH_END(moist_physics_prep_tim)
3072 END DO scalar_tile_loop_1a
3073 !$OMP END PARALLEL DO
3074
3075 CALL wrf_debug ( 200 , ' call microphysics_driver' )
3076
3077 grid%em_sr = 0.
3078 specified_bdy = config_flags%specified .OR. config_flags%nested
3079 channel_bdy = config_flags%specified .AND. config_flags%periodic_x
3080
3081 #if 0
3082 BENCH_START(microswap_1)
3083 ! for load balancing; communication to redistribute the points
3084 IF ( config_flags%mp_physics .EQ. ETAMPNEW ) THEN
3085 #include "SWAP_ETAMP_NEW.inc"
3086 ELSE IF ( config_flags%mp_physics .EQ. WSM3SCHEME ) THEN
3087 #include "SWAP_WSM3.inc"
3088 ENDIF
3089 BENCH_END(microswap_1)
3090 #endif
3091
3092 BENCH_START(micro_driver_tim)
3093
3094 CALL microphysics_driver( &
3095 & DT=dtm ,DX=grid%dx ,DY=grid%dy &
3096 & ,DZ8W=dz8w ,F_ICE_PHY=grid%f_ice_phy &
3097 & ,ITIMESTEP=grid%itimestep ,LOWLYR=grid%lowlyr &
3098 & ,P8W=p8w ,P=p_phy ,PI_PHY=pi_phy &
3099 & ,RHO=rho ,SPEC_ZONE=grid%spec_zone &
3100 & ,SR=grid%em_sr ,TH=th_phy &
3101 & ,WARM_RAIN=grid%warm_rain &
3102 & ,T8W=t8w &
3103 & ,CLDFRA=grid%cldfra, EXCH_H=grid%exch_h &
3104 & ,NSOURCE=grid%qndropsource &
3105 #ifdef WRF_CHEM
3106 & ,QLSINK=grid%qlsink,CLDFRA_OLD=grid%cldfra_old &
3107 & ,PRECR=grid%precr, PRECI=grid%preci, PRECS=grid%precs, PRECG=grid%precg &
3108 #endif
3109 & ,XLAND=grid%xland &
3110 & ,SPECIFIED=specified_bdy, CHANNEL_SWITCH=channel_bdy &
3111 & ,F_RAIN_PHY=grid%f_rain_phy &
3112 & ,F_RIMEF_PHY=grid%f_rimef_phy &
3113 & ,MP_PHYSICS=config_flags%mp_physics &
3114 & ,ID=grid%id &
3115 & ,IDS=ids,IDE=ide, JDS=jds,JDE=jde, KDS=kds,KDE=kde &
3116 & ,IMS=ims,IME=ime, JMS=jms,JME=jme, KMS=kms,KME=kme &
3117 & ,I_START=grid%i_start,I_END=min(grid%i_end, ide-1) &
3118 & ,J_START=grid%j_start,J_END=min(grid%j_end, jde-1) &
3119 & ,KTS=k_start, KTE=min(k_end,kde-1) &
3120 & ,NUM_TILES=grid%num_tiles &
3121 & ,NAER=grid%naer &
3122 ! Optional
3123 & , RAINNC=grid%rainnc, RAINNCV=grid%rainncv &
3124 & , SNOWNC=grid%snownc, SNOWNCV=grid%snowncv &
3125 & , GRAUPELNC=grid%graupelnc, GRAUPELNCV=grid%graupelncv &
3126 & , W=grid%em_w_2, Z=grid%em_z, HT=grid%ht &
3127 & , MP_RESTART_STATE=grid%mp_restart_state &
3128 & , TBPVS_STATE=grid%tbpvs_state & ! etampnew
3129 & , TBPVS0_STATE=grid%tbpvs0_state & ! etampnew
3130 & , QV_CURR=moist(ims,kms,jms,P_QV), F_QV=F_QV &
3131 & , QC_CURR=moist(ims,kms,jms,P_QC), F_QC=F_QC &
3132 & , QR_CURR=moist(ims,kms,jms,P_QR), F_QR=F_QR &
3133 & , QI_CURR=moist(ims,kms,jms,P_QI), F_QI=F_QI &
3134 & , QS_CURR=moist(ims,kms,jms,P_QS), F_QS=F_QS &
3135 & , QG_CURR=moist(ims,kms,jms,P_QG), F_QG=F_QG &
3136 & , QNDROP_CURR=scalar(ims,kms,jms,P_QNDROP), F_QNDROP=F_QNDROP &
3137 & , QNI_CURR=scalar(ims,kms,jms,P_QNI), F_QNI=F_QNI &
3138 & , QT_CURR=scalar(ims,kms,jms,P_QT), F_QT=F_QT &
3139 )
3140 BENCH_END(micro_driver_tim)
3141
3142 #if 0
3143 BENCH_START(microswap_2)
3144 ! for load balancing; communication to redistribute the points
3145 IF ( config_flags%mp_physics .EQ. ETAMPNEW ) THEN
3146 #include "SWAP_ETAMP_NEW.inc"
3147 ELSE IF ( config_flags%mp_physics .EQ. WSM3SCHEME ) THEN
3148 #include "SWAP_WSM3.inc"
3149 ENDIF
3150 BENCH_END(microswap_2)
3151 #endif
3152
3153 CALL wrf_debug ( 200 , ' call moist_physics_finish' )
3154 BENCH_START(moist_phys_end_tim)
3155 !$OMP PARALLEL DO &
3156 !$OMP PRIVATE ( ij, its, ite, jts, jte )
3157
3158 scalar_tile_loop_1b: DO ij = 1 , grid%num_tiles
3159
3160 IF ( config_flags%periodic_x ) THEN
3161 its = max(grid%i_start(ij),ids)
3162 ite = min(grid%i_end(ij),ide-1)
3163 ELSE
3164 its = max(grid%i_start(ij),ids+sz)
3165 ite = min(grid%i_end(ij),ide-1-sz)
3166 ENDIF
3167 jts = max(grid%j_start(ij),jds+sz)
3168 jte = min(grid%j_end(ij),jde-1-sz)
3169
3170 CALL microphysics_zero_out ( &
3171 moist , num_moist , config_flags , &
3172 ids, ide, jds, jde, kds, kde, &
3173 ims, ime, jms, jme, kms, kme, &
3174 its, ite, jts, jte, &
3175 k_start , k_end )
3176
3177 CALL moist_physics_finish_em( grid%em_t_2, grid%em_t_1, t0, grid%em_muts, th_phy, &
3178 grid%h_diabatic, dtm, config_flags, &
3179 ids, ide, jds, jde, kds, kde, &
3180 ims, ime, jms, jme, kms, kme, &
3181 its, ite, jts, jte, &
3182 k_start , k_end )
3183
3184 CALL calc_p_rho_phi( moist, num_3d_m, &
3185 grid%em_al, grid%em_alb, grid%em_mu_2, grid%em_muts, &
3186 grid%em_ph_2, grid%em_p, grid%em_pb, grid%em_t_2, &
3187 p0, t0, grid%em_znu, grid%em_dnw, grid%em_rdnw, &
3188 grid%em_rdn, config_flags%non_hydrostatic, &
3189 ids, ide, jds, jde, kds, kde, &
3190 ims, ime, jms, jme, kms, kme, &
3191 its, ite, jts, jte, &
3192 k_start , k_end )
3193
3194 END DO scalar_tile_loop_1b
3195 !$OMP END PARALLEL DO
3196 BENCH_END(moist_phys_end_tim)
3197
3198 ENDIF
3199
3200 IF (.not. config_flags%non_hydrostatic) THEN
3201 #ifdef DM_PARALLEL
3202 # include "HALO_EM_HYDRO_UV.inc"
3203 # include "PERIOD_EM_HYDRO_UV.inc"
3204 #endif
3205 !$OMP PARALLEL DO &
3206 !$OMP PRIVATE ( ij )
3207 DO ij = 1 , grid%num_tiles
3208 CALL diagnose_w( ph_tend, grid%em_ph_2, grid%em_ph_1, grid%em_w_2, grid%em_muts, dt_rk, &
3209 grid%em_u_2, grid%em_v_2, grid%ht, &
3210 grid%cf1, grid%cf2, grid%cf3, grid%rdx, grid%rdy, grid%msft, &
3211 ids, ide, jds, jde, kds, kde, &
3212 ims, ime, jms, jme, kms, kme, &
3213 grid%i_start(ij), grid%i_end(ij), &
3214 grid%j_start(ij), grid%j_end(ij), &
3215 k_start , k_end )
3216
3217 END DO
3218 !$OMP END PARALLEL DO
3219 ENDIF
3220
3221 chem_tile_loop_3: DO ij = 1 , grid%num_tiles
3222
3223 CALL wrf_debug ( 200 , ' call scalar_tile_loop_2' )
3224
3225 IF ( num_3d_c >= PARAM_FIRST_SCALAR ) then
3226
3227 !
3228 ! tiled chemistry not here, it is called from solve_interface, and found in chem_driver
3229 !
3230
3231 END IF
3232
3233 END DO chem_tile_loop_3
3234
3235
3236 ! We're finished except for boundary condition (and patch) update
3237
3238 ! Boundary condition time (or communication time). At this time, we have
3239 ! implemented periodic and symmetric physical boundary conditions.
3240
3241 ! b.c. routine for data within patch.
3242
3243 ! we need to do both time levels of
3244 ! data because the time filter only works in the physical solution space.
3245
3246 ! First, do patch communications for boundary conditions (periodicity)
3247
3248 !-----------------------------------------------------------
3249 ! Stencils for patch communications (WCS, 29 June 2001)
3250 !
3251 ! here's where we need a wide comm stencil - these are the
3252 ! uncoupled variables so are used for high order calc in
3253 ! advection and mixong routines.
3254 !
3255 ! * * * * *
3256 ! * * * * * * * * *
3257 ! * + * * + * * * + * *
3258 ! * * * * * * * * *
3259 ! * * * * *
3260 !
3261 ! grid%em_u_1 x
3262 ! grid%em_u_2 x
3263 ! grid%em_v_1 x
3264 ! grid%em_v_2 x
3265 ! grid%em_w_1 x
3266 ! grid%em_w_2 x
3267 ! grid%em_t_1 x
3268 ! grid%em_t_2 x
3269 ! grid%em_ph_1 x
3270 ! grid%em_ph_2 x
3271 ! grid%em_tke_1 x
3272 ! grid%em_tke_2 x
3273 !
3274 ! 2D variables
3275 ! grid%em_mu_1 x
3276 ! grid%em_mu_2 x
3277 !
3278 ! 4D variables
3279 ! moist x
3280 ! chem x
3281 ! scalar x
3282 !----------------------------------------------------------
3283
3284
3285 #ifdef DM_PARALLEL
3286 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
3287 # include "HALO_EM_D3_3.inc"
3288 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
3289 # include "HALO_EM_D3_5.inc"
3290 ELSE
3291 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
3292 CALL wrf_error_fatal(TRIM(wrf_err_message))
3293 ENDIF
3294 # include "PERIOD_BDY_EM_D3.inc"
3295 # include "PERIOD_BDY_EM_MOIST.inc"
3296 # include "PERIOD_BDY_EM_CHEM.inc"
3297 # include "PERIOD_BDY_EM_SCALAR.inc"
3298 #endif
3299
3300 ! now set physical b.c on a patch
3301
3302 BENCH_START(bc_2d_tim)
3303 !$OMP PARALLEL DO &
3304 !$OMP PRIVATE ( ij )
3305
3306 tile_bc_loop_2: DO ij = 1 , grid%num_tiles
3307
3308
3309 CALL wrf_debug ( 200 , ' call set_phys_bc_dry_2' )
3310
3311 CALL set_phys_bc_dry_2( config_flags, &
3312 grid%em_u_1, grid%em_u_2, grid%em_v_1, grid%em_v_2, grid%em_w_1, grid%em_w_2, &
3313 grid%em_t_1, grid%em_t_2, grid%em_ph_1, grid%em_ph_2, grid%em_mu_1, grid%em_mu_2, &
3314 ids, ide, jds, jde, kds, kde, &
3315 ims, ime, jms, jme, kms, kme, &
3316 ips, ipe, jps, jpe, kps, kpe, &
3317 grid%i_start(ij), grid%i_end(ij), &
3318 grid%j_start(ij), grid%j_end(ij), &
3319 k_start , k_end )
3320
3321 CALL set_physical_bc3d( grid%em_tke_1, 'p', config_flags, &
3322 ids, ide, jds, jde, kds, kde, &
3323 ims, ime, jms, jme, kms, kme, &
3324 ips, ipe, jps, jpe, kps, kpe, &
3325 grid%i_start(ij), grid%i_end(ij), &
3326 grid%j_start(ij), grid%j_end(ij), &
3327 k_start , k_end-1 )
3328 CALL set_physical_bc3d( grid%em_tke_2 , 'p', config_flags, &
3329 ids, ide, jds, jde, kds, kde, &
3330 ims, ime, jms, jme, kms, kme, &
3331 ips, ipe, jps, jpe, kps, kpe, &
3332 grid%i_start(ij), grid%i_end(ij), &
3333 grid%j_start(ij), grid%j_end(ij), &
3334 k_start , k_end )
3335
3336 moisture_loop_bdy_2 : DO im = PARAM_FIRST_SCALAR , num_3d_m
3337
3338 CALL set_physical_bc3d( moist(ims,kms,jms,im), 'p', &
3339 config_flags, &
3340 ids, ide, jds, jde, kds, kde, &
3341 ims, ime, jms, jme, kms, kme, &
3342 ips, ipe, jps, jpe, kps, kpe, &
3343 grid%i_start(ij), grid%i_end(ij), &
3344 grid%j_start(ij), grid%j_end(ij), &
3345 k_start , k_end )
3346
3347 END DO moisture_loop_bdy_2
3348
3349 chem_species_bdy_loop_2 : DO ic = PARAM_FIRST_SCALAR , num_3d_c
3350
3351 CALL set_physical_bc3d( chem(ims,kms,jms,ic) , 'p', config_flags, &
3352 ids, ide, jds, jde, kds, kde, &
3353 ims, ime, jms, jme, kms, kme, &
3354 ips, ipe, jps, jpe, kps, kpe, &
3355 grid%i_start(ij), grid%i_end(ij), &
3356 grid%j_start(ij), grid%j_end(ij), &
3357 k_start , k_end )
3358
3359 END DO chem_species_bdy_loop_2
3360
3361 scalar_species_bdy_loop_2 : DO is = PARAM_FIRST_SCALAR , num_3d_s
3362
3363 CALL set_physical_bc3d( scalar(ims,kms,jms,is) , 'p', config_flags, &
3364 ids, ide, jds, jde, kds, kde, &
3365 ims, ime, jms, jme, kms, kme, &
3366 ips, ipe, jps, jpe, kps, kpe, &
3367 grid%i_start(ij), grid%i_end(ij), &
3368 grid%j_start(ij), grid%j_end(ij), &
3369 k_start , k_end )
3370
3371 END DO scalar_species_bdy_loop_2
3372
3373 END DO tile_bc_loop_2
3374 !$OMP END PARALLEL DO
3375 BENCH_END(bc_2d_tim)
3376
3377 IF( config_flags%specified .or. config_flags%nested ) THEN
3378 grid%dtbc = grid%dtbc + grid%dt
3379 ENDIF
3380
3381 ! calculate some model diagnostics.
3382
3383 CALL wrf_debug ( 200 , ' call diagnostic_driver' )
3384
3385 CALL diagnostic_output_calc( &
3386 & DPSDT=grid%dpsdt ,DMUDT=grid%dmudt &
3387 & ,P_PHY=p_phy ,PK1M=grid%pk1m &
3388 & ,MU_2=grid%em_mu_2 ,MU_2M=grid%mu_2m &
3389 & ,U=grid%em_u_2 ,V=grid%em_v_2 &
3390 & ,RAINCV=grid%raincv ,RAINNCV=grid%rainncv &
3391 & ,RAINC=grid%rainc ,RAINNC=grid%rainnc &
3392 & ,HFX=grid%hfx ,SFCEVP=grid%sfcevp ,LH=grid%lh &
3393 & ,DT=grid%dt ,SBW=config_flags%spec_bdy_width &
3394 & ,XTIME=grid%xtime &
3395 ! Selection flag
3396 & ,DIAG_PRINT=config_flags%diag_print &
3397 ! Dimension arguments
3398 & ,IDS=ids,IDE=ide, JDS=jds,JDE=jde, KDS=kds,KDE=kde &
3399 & ,IMS=ims,IME=ime, JMS=jms,JME=jme, KMS=kms,KME=kme &
3400 & ,IPS=ips,IPE=ipe, JPS=jps,JPE=jpe, KPS=kps,KPE=kpe &
3401 & ,I_START=grid%i_start,I_END=min(grid%i_end, ide-1) &
3402 & ,J_START=grid%j_start,J_END=min(grid%j_end, jde-1) &
3403 & ,KTS=k_start, KTE=min(k_end,kde-1) &
3404 & ,NUM_TILES=grid%num_tiles &
3405 & )
3406
3407 #ifdef DM_PARALLEL
3408 !-----------------------------------------------------------------------
3409 ! see above
3410 !--------------------------------------------------------------
3411 CALL wrf_debug ( 200 , ' call HALO_RK_E' )
3412 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
3413 # include "HALO_EM_E_3.inc"
3414 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
3415 # include "HALO_EM_E_5.inc"
3416 ELSE
3417 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
3418 CALL wrf_error_fatal(TRIM(wrf_err_message))
3419 ENDIF
3420 #endif
3421
3422 #ifdef DM_PARALLEL
3423 if ( num_moist >= PARAM_FIRST_SCALAR ) then
3424 !-----------------------------------------------------------------------
3425 ! see above
3426 !--------------------------------------------------------------
3427 CALL wrf_debug ( 200 , ' call HALO_RK_MOIST' )
3428 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
3429 # include "HALO_EM_MOIST_E_3.inc"
3430 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
3431 # include "HALO_EM_MOIST_E_5.inc"
3432 ELSE
3433 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
3434 CALL wrf_error_fatal(TRIM(wrf_err_message))
3435 ENDIF
3436 endif
3437 if ( num_chem >= PARAM_FIRST_SCALAR ) then
3438 !-----------------------------------------------------------------------
3439 ! see above
3440 !--------------------------------------------------------------
3441 CALL wrf_debug ( 200 , ' call HALO_RK_CHEM' )
3442 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
3443 # include "HALO_EM_CHEM_E_3.inc"
3444 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
3445 # include "HALO_EM_CHEM_E_5.inc"
3446 ELSE
3447 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
3448 CALL wrf_error_fatal(TRIM(wrf_err_message))
3449 ENDIF
3450 endif
3451 if ( num_scalar >= PARAM_FIRST_SCALAR ) then
3452 !-----------------------------------------------------------------------
3453 ! see above
3454 !--------------------------------------------------------------
3455 CALL wrf_debug ( 200 , ' call HALO_RK_SCALAR' )
3456 IF ( config_flags%h_mom_adv_order <= 4 ) THEN
3457 # include "HALO_EM_SCALAR_E_3.inc"
3458 ELSE IF ( config_flags%h_mom_adv_order <= 6 ) THEN
3459 # include "HALO_EM_SCALAR_E_5.inc"
3460 ELSE
3461 WRITE(wrf_err_message,*)'solve_em: invalid h_mom_adv_order = ',config_flags%h_mom_adv_order
3462 CALL wrf_error_fatal(TRIM(wrf_err_message))
3463 ENDIF
3464 endif
3465 #endif
3466
3467 CALL wrf_debug ( 200 , ' call end of solve_em' )
3468
3469 ! Finish timers if compiled with -DBENCH.
3470 #include <bench_solve_em_end.h>
3471
3472 RETURN
3473
3474 END SUBROUTINE solve_em
3475