This source file includes following definitions.
- _compute_minimum_sleep
- sim_idle_ms_sleep
- sim_os_set_thread_priority
- sim_os_set_thread_priority
- sim_os_msec
- sim_os_sleep
- sim_timer_exit
- sim_os_ms_sleep_init
- sim_os_ms_sleep
- sim_os_msec
- sim_os_sleep
- sim_os_ms_sleep_init
- sim_os_ms_sleep
- sim_timespec_diff
- sim_rtcn_init_all
- sim_rtcn_init
- sim_rtcn_init_unit
- sim_rtcn_calb
- sim_timer_init
- sim_show_timers
- sim_show_clock_queues
- sim_timer_clr_catchup
- sim_timer_set_catchup
- sim_timer_show_catchup
- sim_timer_tick_svc
- win32_usleep
- sim_usleep
- _timespec_to_double
- _double_to_timespec
- sim_timer_clock_tick_svc
- _rtcn_configure_calibrated_clock
- sim_timer_clock_reset
- sim_start_timer_services
- sim_stop_timer_services
- sim_timer_inst_per_sec
- sim_timer_activate
- sim_timer_activate_after
- sim_register_clock_unit_tmr
- _sim_coschedule_cancel
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53 #include "sim_defs.h"
54 #include <ctype.h>
55 #include <math.h>
56
57 #if defined(__MACH__) && defined(__APPLE__) && \
58 ( defined(__PPC__) || defined(_ARCH_PPC) )
59 # include <mach/clock.h>
60 # include <mach/mach.h>
61 # ifdef MACOSXPPC
62 # undef MACOSXPPC
63 # endif
64 # define MACOSXPPC 1
65 #endif
66
67
68 #define SIM_INTERNAL_CLK (SIM_NTIMERS+(1<<30))
69 #define SIM_INTERNAL_UNIT sim_internal_timer_unit
70 #ifndef MIN
71 # define MIN(a,b) (((a) < (b)) ? (a) : (b))
72 #endif
73 #ifndef MAX
74 # define MAX(a,b) (((a) > (b)) ? (a) : (b))
75 #endif
76
77 uint32 sim_idle_ms_sleep (unsigned int msec);
78
79 static int32 sim_calb_tmr = -1;
80 static int32 sim_calb_tmr_last = -1;
81 static double sim_inst_per_sec_last = 0;
82
83 static uint32 sim_idle_rate_ms = 0;
84 static uint32 sim_os_sleep_min_ms = 0;
85 static uint32 sim_os_sleep_inc_ms = 0;
86 static uint32 sim_os_clock_resoluton_ms = 0;
87 static uint32 sim_os_tick_hz = 0;
88 static uint32 sim_idle_calib_pct = 0;
89 static UNIT *sim_clock_unit[SIM_NTIMERS+1] = {NULL};
90 UNIT * volatile sim_clock_cosched_queue[SIM_NTIMERS+1] = {NULL};
91 static int32 sim_cosched_interval[SIM_NTIMERS+1];
92 static t_bool sim_catchup_ticks = FALSE;
93
94 #define sleep1Samples 10
95
96 static uint32 _compute_minimum_sleep (void)
97 {
98 uint32 i, tot, tim;
99
100 sim_os_set_thread_priority (PRIORITY_ABOVE_NORMAL);
101 sim_idle_ms_sleep (1);
102 for (i = 0, tot = 0; i < sleep1Samples; i++)
103 tot += sim_idle_ms_sleep (1);
104 tim = tot / sleep1Samples;
105 sim_os_sleep_min_ms = tim;
106 sim_idle_ms_sleep (1);
107 for (i = 0, tot = 0; i < sleep1Samples; i++)
108 tot += sim_idle_ms_sleep (sim_os_sleep_min_ms + 1);
109 tim = tot / sleep1Samples;
110 sim_os_sleep_inc_ms = tim - sim_os_sleep_min_ms;
111 sim_os_set_thread_priority (PRIORITY_NORMAL);
112 return sim_os_sleep_min_ms;
113 }
114
115 uint32 sim_idle_ms_sleep (unsigned int msec)
116 {
117 return sim_os_ms_sleep (msec);
118 }
119
120 #if defined(_WIN32)
121
122
123
124
125 t_stat sim_os_set_thread_priority (int below_normal_above)
126 {
127 const static int val[3] = {THREAD_PRIORITY_BELOW_NORMAL, THREAD_PRIORITY_NORMAL, THREAD_PRIORITY_ABOVE_NORMAL};
128
129 if ((below_normal_above < -1) || (below_normal_above > 1))
130 return SCPE_ARG;
131 SetThreadPriority (GetCurrentThread(), val[1 + below_normal_above]);
132 return SCPE_OK;
133 }
134 #else
135
136 t_stat sim_os_set_thread_priority (int below_normal_above)
137 {
138 int sched_policy, min_prio, max_prio;
139 struct sched_param sched_priority;
140
141 # ifndef __gnu_hurd__
142 if ((below_normal_above < -1) || (below_normal_above > 1))
143 return SCPE_ARG;
144
145 pthread_getschedparam (pthread_self(), &sched_policy, &sched_priority);
146 min_prio = sched_get_priority_min(sched_policy);
147 max_prio = sched_get_priority_max(sched_policy);
148 switch (below_normal_above) {
149 case PRIORITY_BELOW_NORMAL:
150 sched_priority.sched_priority = min_prio;
151 break;
152 case PRIORITY_NORMAL:
153 sched_priority.sched_priority = (max_prio + min_prio) / 2;
154 break;
155 case PRIORITY_ABOVE_NORMAL:
156 sched_priority.sched_priority = max_prio;
157 break;
158 }
159 pthread_setschedparam (pthread_self(), sched_policy, &sched_priority);
160 # endif
161 return SCPE_OK;
162 }
163 #endif
164
165
166
167 #if defined (_WIN32)
168
169
170
171 const t_bool rtc_avail = TRUE;
172
173 uint32 sim_os_msec (void)
174 {
175 return timeGetTime ();
176 }
177
178 void sim_os_sleep (unsigned int sec)
179 {
180 Sleep (sec * 1000);
181 return;
182 }
183
184 void sim_timer_exit (void)
185 {
186 timeEndPeriod (sim_idle_rate_ms);
187 return;
188 }
189
190 uint32 sim_os_ms_sleep_init (void)
191 {
192 TIMECAPS timers;
193
194 if (timeGetDevCaps (&timers, sizeof (timers)) != TIMERR_NOERROR)
195 return 0;
196 if (timers.wPeriodMin == 0)
197 return 0;
198 if (timeBeginPeriod (timers.wPeriodMin) != TIMERR_NOERROR)
199 return 0;
200 atexit (sim_timer_exit);
201
202 return _compute_minimum_sleep ();
203 }
204
205 uint32 sim_os_ms_sleep (unsigned int msec)
206 {
207 uint32 stime = sim_os_msec();
208
209 Sleep (msec);
210 return sim_os_msec () - stime;
211 }
212
213 #else
214
215
216
217 # include <time.h>
218 # include <sys/time.h>
219 # include <signal.h>
220 # include <unistd.h>
221 # define NANOS_PER_MILLI 1000000
222 # define MILLIS_PER_SEC 1000
223
224 const t_bool rtc_avail = TRUE;
225
226 uint32 sim_os_msec (void)
227 {
228 struct timeval cur;
229 struct timezone foo;
230 int st1ret;
231 uint32 msec;
232
233 st1ret = gettimeofday (&cur, &foo);
234 if (st1ret != 0)
235 {
236 fprintf (stderr, "\rFATAL: gettimeofday failure! Aborting at %s[%s:%d]\r\n",
237 __func__, __FILE__, __LINE__);
238 # if defined(USE_BACKTRACE)
239 # ifdef SIGUSR2
240 (void)raise(SIGUSR2);
241
242 # endif
243 # endif
244 abort();
245 }
246 msec = (((uint32) cur.tv_sec) * 1000UL) + (((uint32) cur.tv_usec) / 1000UL);
247 return msec;
248 }
249
250 void sim_os_sleep (unsigned int sec)
251 {
252 sleep (sec);
253 return;
254 }
255
256 uint32 sim_os_ms_sleep_init (void)
257 {
258 return _compute_minimum_sleep ();
259 }
260
261 uint32 sim_os_ms_sleep (unsigned int milliseconds)
262 {
263 uint32 stime = sim_os_msec ();
264 struct timespec treq;
265
266 treq.tv_sec = milliseconds / MILLIS_PER_SEC;
267 treq.tv_nsec = (milliseconds % MILLIS_PER_SEC) * NANOS_PER_MILLI;
268 (void) nanosleep (&treq, NULL);
269 return sim_os_msec () - stime;
270 }
271
272 #endif
273
274
275 void
276 sim_timespec_diff (struct timespec *diff, const struct timespec *min, struct timespec *sub)
277 {
278
279 *diff = *min;
280
281 while (sub->tv_nsec > diff->tv_nsec) {
282 --diff->tv_sec;
283 diff->tv_nsec += 1000000000;
284 }
285 diff->tv_nsec -= sub->tv_nsec;
286 diff->tv_sec -= sub->tv_sec;
287
288 while (diff->tv_nsec > 1000000000) {
289 ++diff->tv_sec;
290 diff->tv_nsec -= 1000000000;
291 }
292 }
293
294
295
296 static double _timespec_to_double (struct timespec *time);
297 static void _double_to_timespec (struct timespec *time, double dtime);
298 static void _rtcn_configure_calibrated_clock (int32 newtmr);
299 static void _sim_coschedule_cancel(UNIT *uptr);
300
301
302
303 static int32 rtc_ticks[SIM_NTIMERS+1] = { 0 };
304 static uint32 rtc_hz[SIM_NTIMERS+1] = { 0 };
305 static uint32 rtc_rtime[SIM_NTIMERS+1] = { 0 };
306 static uint32 rtc_vtime[SIM_NTIMERS+1] = { 0 };
307 static double rtc_gtime[SIM_NTIMERS+1] = { 0 };
308 static uint32 rtc_nxintv[SIM_NTIMERS+1] = { 0 };
309 static int32 rtc_based[SIM_NTIMERS+1] = { 0 };
310 static int32 rtc_currd[SIM_NTIMERS+1] = { 0 };
311 static int32 rtc_initd[SIM_NTIMERS+1] = { 0 };
312 static uint32 rtc_elapsed[SIM_NTIMERS+1] = { 0 };
313 static uint32 rtc_calibrations[SIM_NTIMERS+1] = { 0 };
314 static double rtc_clock_skew_max[SIM_NTIMERS+1] = { 0 };
315 static double rtc_clock_start_gtime[SIM_NTIMERS+1] = { 0 };
316 static double rtc_clock_tick_size[SIM_NTIMERS+1] = { 0 };
317 static uint32 rtc_calib_initializations[SIM_NTIMERS+1] = { 0 };
318 static double rtc_calib_tick_time[SIM_NTIMERS+1] = { 0 };
319 static double rtc_calib_tick_time_tot[SIM_NTIMERS+1] = { 0 };
320 static uint32 rtc_calib_ticks_acked[SIM_NTIMERS+1] = { 0 };
321 static uint32 rtc_calib_ticks_acked_tot[SIM_NTIMERS+1] = { 0 };
322 static uint32 rtc_clock_ticks[SIM_NTIMERS+1] = { 0 };
323 static uint32 rtc_clock_ticks_tot[SIM_NTIMERS+1] = { 0 };
324 static double rtc_clock_catchup_base_time[SIM_NTIMERS+1] = { 0 };
325 static uint32 rtc_clock_catchup_ticks[SIM_NTIMERS+1] = { 0 };
326 static uint32 rtc_clock_catchup_ticks_tot[SIM_NTIMERS+1] = { 0 };
327 static t_bool rtc_clock_catchup_pending[SIM_NTIMERS+1] = { 0 };
328 static t_bool rtc_clock_catchup_eligible[SIM_NTIMERS+1] = { 0 };
329 static uint32 rtc_clock_time_idled[SIM_NTIMERS+1] = { 0 };
330 static uint32 rtc_clock_calib_skip_idle[SIM_NTIMERS+1] = { 0 };
331 static uint32 rtc_clock_calib_gap2big[SIM_NTIMERS+1] = { 0 };
332 static uint32 rtc_clock_calib_backwards[SIM_NTIMERS+1] = { 0 };
333
334 UNIT sim_timer_units[SIM_NTIMERS+1];
335
336 UNIT sim_internal_timer_unit;
337 UNIT sim_throttle_unit;
338
339 t_stat sim_throt_svc (UNIT *uptr);
340 t_stat sim_timer_tick_svc (UNIT *uptr);
341
342 #define DBG_TRC 0x008
343 #define DBG_CAL 0x010
344 #define DBG_TIM 0x020
345 #define DBG_ACK 0x080
346 DEBTAB sim_timer_debug[] = {
347 {"TRACE", DBG_TRC, "Trace routine calls"},
348 {"IACK", DBG_ACK, "interrupt acknowledgement activities"},
349 {"CALIB", DBG_CAL, "Calibration activities"},
350 {"TIME", DBG_TIM, "Activation and scheduling activities"},
351 {0}
352 };
353
354
355 extern DEVICE sim_timer_dev;
356 extern DEVICE sim_throttle_dev;
357
358 void sim_rtcn_init_all (void)
359 {
360 int32 tmr;
361
362 for (tmr = 0; tmr <= SIM_NTIMERS; tmr++)
363 if (rtc_initd[tmr] != 0)
364 sim_rtcn_init (rtc_initd[tmr], tmr);
365 return;
366 }
367
368 int32 sim_rtcn_init (int32 time, int32 tmr)
369 {
370 return sim_rtcn_init_unit (NULL, time, tmr);
371 }
372
373 int32 sim_rtcn_init_unit (UNIT *uptr, int32 time, int32 tmr)
374 {
375 if (time == 0)
376 time = 1;
377 if (tmr == SIM_INTERNAL_CLK)
378 tmr = SIM_NTIMERS;
379 else {
380 if ((tmr < 0) || (tmr >= SIM_NTIMERS))
381 return time;
382 }
383
384
385
386
387
388 if (rtc_currd[tmr])
389 time = rtc_currd[tmr];
390 if (!uptr)
391 uptr = sim_clock_unit[tmr];
392 sim_debug (DBG_CAL, &sim_timer_dev, "_sim_rtcn_init_unit(unit=%s, time=%d, tmr=%d)\n", sim_uname(uptr), time, tmr);
393 if (uptr) {
394 if (!sim_clock_unit[tmr])
395 sim_register_clock_unit_tmr (uptr, tmr);
396 }
397 rtc_clock_start_gtime[tmr] = sim_gtime();
398 rtc_rtime[tmr] = sim_os_msec ();
399 rtc_vtime[tmr] = rtc_rtime[tmr];
400 rtc_nxintv[tmr] = 1000;
401 rtc_ticks[tmr] = 0;
402 rtc_hz[tmr] = 0;
403 rtc_based[tmr] = time;
404 rtc_currd[tmr] = time;
405 rtc_initd[tmr] = time;
406 rtc_elapsed[tmr] = 0;
407 rtc_calibrations[tmr] = 0;
408 rtc_clock_ticks_tot[tmr] += rtc_clock_ticks[tmr];
409 rtc_clock_ticks[tmr] = 0;
410 rtc_calib_tick_time_tot[tmr] += rtc_calib_tick_time[tmr];
411 rtc_calib_tick_time[tmr] = 0;
412 rtc_clock_catchup_pending[tmr] = FALSE;
413 rtc_clock_catchup_eligible[tmr] = FALSE;
414 rtc_clock_catchup_ticks_tot[tmr] += rtc_clock_catchup_ticks[tmr];
415 rtc_clock_catchup_ticks[tmr] = 0;
416 rtc_calib_ticks_acked_tot[tmr] += rtc_calib_ticks_acked[tmr];
417 rtc_calib_ticks_acked[tmr] = 0;
418 ++rtc_calib_initializations[tmr];
419 _rtcn_configure_calibrated_clock (tmr);
420 return time;
421 }
422
423 int32 sim_rtcn_calb (uint32 ticksper, int32 tmr)
424 {
425
426 if (tmr == SIM_INTERNAL_CLK)
427 tmr = SIM_NTIMERS;
428 else {
429 if ((tmr < 0) || (tmr >= SIM_NTIMERS))
430 return 10000;
431 }
432 if (rtc_hz[tmr] != ticksper) {
433 rtc_hz[tmr] = ticksper;
434 rtc_clock_tick_size[tmr] = 1.0/ticksper;
435 _rtcn_configure_calibrated_clock (tmr);
436 rtc_currd[tmr] = (int32)(sim_timer_inst_per_sec()/ticksper);
437 }
438 if (sim_clock_unit[tmr] == NULL) {
439 rtc_clock_ticks[tmr] += 1;
440 rtc_calib_tick_time[tmr] += rtc_clock_tick_size[tmr];
441 }
442 if (rtc_clock_catchup_pending[tmr]) {
443 ++rtc_clock_catchup_ticks[tmr];
444 rtc_clock_catchup_pending[tmr] = FALSE;
445 }
446 return rtc_currd[tmr];
447 }
448
449
450
451 t_bool sim_timer_init (void)
452 {
453 int tmr;
454 uint32 clock_start, clock_last, clock_now;
455
456 sim_debug (DBG_TRC, &sim_timer_dev, "sim_timer_init()\n");
457 for (tmr=0; tmr<=SIM_NTIMERS; tmr++) {
458 sim_timer_units[tmr].action = &sim_timer_tick_svc;
459 sim_timer_units[tmr].flags = UNIT_DIS | UNIT_IDLE;
460 }
461 SIM_INTERNAL_UNIT.flags = UNIT_DIS | UNIT_IDLE;
462 sim_register_internal_device (&sim_timer_dev);
463 sim_register_clock_unit_tmr (&SIM_INTERNAL_UNIT, SIM_INTERNAL_CLK);
464 sim_idle_rate_ms = sim_os_ms_sleep_init ();
465
466 clock_last = clock_start = sim_os_msec ();
467 sim_os_clock_resoluton_ms = 1000;
468 do {
469 uint32 clock_diff;
470
471 clock_now = sim_os_msec ();
472 clock_diff = clock_now - clock_last;
473 if ((clock_diff > 0) && (clock_diff < sim_os_clock_resoluton_ms))
474 sim_os_clock_resoluton_ms = clock_diff;
475 clock_last = clock_now;
476 } while (clock_now < clock_start + 100);
477 sim_os_tick_hz = 1000/(sim_os_clock_resoluton_ms * (sim_idle_rate_ms/sim_os_clock_resoluton_ms));
478 return (sim_idle_rate_ms != 0);
479 }
480
481
482 t_stat sim_show_timers (FILE* st, DEVICE *dptr, UNIT* uptr, int32 val, CONST char* desc)
483 {
484 int tmr, clocks;
485 struct timespec now;
486 time_t time_t_now;
487 int32 calb_tmr = (sim_calb_tmr == -1) ? sim_calb_tmr_last : sim_calb_tmr;
488
489 for (tmr=clocks=0; tmr<=SIM_NTIMERS; ++tmr) {
490 if (0 == rtc_initd[tmr])
491 continue;
492
493 if (sim_clock_unit[tmr]) {
494 ++clocks;
495 fprintf (st, "%s clock device is %s%s%s\n", sim_name,
496 (tmr == SIM_NTIMERS) ? "Internal Calibrated Timer(" : "",
497 sim_uname(sim_clock_unit[tmr]),
498 (tmr == SIM_NTIMERS) ? ")" : "");
499 }
500
501 fprintf (st, "%s%sTimer %d:\n", "", rtc_hz[tmr] ? "Calibrated " : "Uncalibrated ", tmr);
502 if (rtc_hz[tmr]) {
503 fprintf (st, " Running at: %lu Hz\n", (unsigned long)rtc_hz[tmr]);
504 fprintf (st, " Tick Size: %s\n", sim_fmt_secs (rtc_clock_tick_size[tmr]));
505 fprintf (st, " Ticks in current second: %lu\n", (unsigned long)rtc_ticks[tmr]);
506 }
507 fprintf (st, " Seconds Running: %lu (%s)\n", (unsigned long)rtc_elapsed[tmr], sim_fmt_secs ((double)rtc_elapsed[tmr]));
508 if (tmr == calb_tmr) {
509 fprintf (st, " Calibration Opportunities: %lu\n", (unsigned long)rtc_calibrations[tmr]);
510 if (sim_idle_calib_pct)
511 fprintf (st, " Calib Skip Idle Thresh %%: %lu\n", (unsigned long)sim_idle_calib_pct);
512 if (rtc_clock_calib_skip_idle[tmr])
513 fprintf (st, " Calibs Skip While Idle: %lu\n", (unsigned long)rtc_clock_calib_skip_idle[tmr]);
514 if (rtc_clock_calib_backwards[tmr])
515 fprintf (st, " Calibs Skip Backwards: %lu\n", (unsigned long)rtc_clock_calib_backwards[tmr]);
516 if (rtc_clock_calib_gap2big[tmr])
517 fprintf (st, " Calibs Skip Gap Too Big: %lu\n", (unsigned long)rtc_clock_calib_gap2big[tmr]);
518 }
519 if (rtc_gtime[tmr])
520 fprintf (st, " Instruction Time: %.0f\n", rtc_gtime[tmr]);
521 fprintf (st, " Current Insts Per Tick: %lu\n", (unsigned long)rtc_currd[tmr]);
522 fprintf (st, " Initializations: %lu\n", (unsigned long)rtc_calib_initializations[tmr]);
523 fprintf (st, " Total Ticks: %lu\n", (unsigned long)rtc_clock_ticks_tot[tmr]+(unsigned long)rtc_clock_ticks[tmr]);
524 if (rtc_clock_skew_max[tmr] != 0.0)
525 fprintf (st, " Peak Clock Skew: %s%s\n", sim_fmt_secs (fabs(rtc_clock_skew_max[tmr])), (rtc_clock_skew_max[tmr] < 0) ? " fast" : " slow");
526 if (rtc_calib_ticks_acked[tmr])
527 fprintf (st, " Ticks Acked: %lu\n", (unsigned long)rtc_calib_ticks_acked[tmr]);
528 if (rtc_calib_ticks_acked_tot[tmr]+rtc_calib_ticks_acked[tmr] != rtc_calib_ticks_acked[tmr])
529 fprintf (st, " Total Ticks Acked: %lu\n", (unsigned long)rtc_calib_ticks_acked_tot[tmr]+(unsigned long)rtc_calib_ticks_acked[tmr]);
530 if (rtc_calib_tick_time[tmr])
531 fprintf (st, " Tick Time: %s\n", sim_fmt_secs (rtc_calib_tick_time[tmr]));
532 if (rtc_calib_tick_time_tot[tmr]+rtc_calib_tick_time[tmr] != rtc_calib_tick_time[tmr])
533 fprintf (st, " Total Tick Time: %s\n", sim_fmt_secs (rtc_calib_tick_time_tot[tmr]+rtc_calib_tick_time[tmr]));
534 if (rtc_clock_catchup_ticks[tmr])
535 fprintf (st, " Catchup Ticks Sched: %lu\n", (unsigned long)rtc_clock_catchup_ticks[tmr]);
536 if (rtc_clock_catchup_ticks_tot[tmr]+rtc_clock_catchup_ticks[tmr] != rtc_clock_catchup_ticks[tmr])
537 fprintf (st, " Total Catchup Ticks Sched: %lu\n", (unsigned long)rtc_clock_catchup_ticks_tot[tmr]+(unsigned long)rtc_clock_catchup_ticks[tmr]);
538 #ifdef MACOSXPPC
539 clock_serv_t cclock;
540 mach_timespec_t mts;
541 host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock);
542 clock_get_time(cclock, &mts);
543 mach_port_deallocate(mach_task_self(), cclock);
544 now.tv_sec = mts.tv_sec;
545 now.tv_nsec = mts.tv_nsec;
546 #else
547 clock_gettime (CLOCK_REALTIME, &now);
548 #endif
549 time_t_now = (time_t)now.tv_sec;
550 fprintf (st, " Wall Clock Time Now: %8.8s.%03d\n", 11+ctime(&time_t_now), (int)(now.tv_nsec/1000000));
551 if (rtc_clock_catchup_eligible[tmr]) {
552 _double_to_timespec (&now, rtc_clock_catchup_base_time[tmr]+rtc_calib_tick_time[tmr]);
553 time_t_now = (time_t)now.tv_sec;
554 fprintf (st, " Catchup Tick Time: %8.8s.%03d\n", 11+ctime(&time_t_now), (int)(now.tv_nsec/1000000));
555 _double_to_timespec (&now, rtc_clock_catchup_base_time[tmr]);
556 time_t_now = (time_t)now.tv_sec;
557 fprintf (st, " Catchup Base Time: %8.8s.%03d\n", 11+ctime(&time_t_now), (int)(now.tv_nsec/1000000));
558 }
559 if (rtc_clock_time_idled[tmr])
560 fprintf (st, " Total Time Idled: %s\n", sim_fmt_secs (rtc_clock_time_idled[tmr]/1000.0));
561 }
562 if (clocks == 0)
563 fprintf (st, "%s clock device is not specified, co-scheduling is unavailable\n", sim_name);
564 return SCPE_OK;
565 }
566
567 t_stat sim_show_clock_queues (FILE *st, DEVICE *dptr, UNIT *uptr, int32 flag, CONST char *cptr)
568 {
569 int tmr;
570
571 for (tmr=0; tmr<=SIM_NTIMERS; ++tmr) {
572 if (sim_clock_unit[tmr] == NULL)
573 continue;
574 if (sim_clock_cosched_queue[tmr] != QUEUE_LIST_END) {
575 int32 accum;
576
577 fprintf (st, "%s clock (%s) co-schedule event queue status\n",
578 sim_name, sim_uname(sim_clock_unit[tmr]));
579 accum = 0;
580 for (uptr = sim_clock_cosched_queue[tmr]; uptr != QUEUE_LIST_END; uptr = uptr->next) {
581 if ((dptr = find_dev_from_unit (uptr)) != NULL) {
582 fprintf (st, " %s", sim_dname (dptr));
583 if (dptr->numunits > 1)
584 fprintf (st, " unit %d", (int32) (uptr - dptr->units));
585 }
586 else
587 fprintf (st, " Unknown");
588 if (accum > 0)
589 fprintf (st, " after %d ticks", accum);
590 fprintf (st, "\n");
591 accum = accum + uptr->time;
592 }
593 }
594 }
595 return SCPE_OK;
596 }
597
598 REG sim_timer_reg[] = {
599 { NULL }
600 };
601
602
603
604 t_stat sim_timer_clr_catchup (UNIT *uptr, int32 val, CONST char *cptr, void *desc)
605 {
606 if (sim_catchup_ticks)
607 sim_catchup_ticks = FALSE;
608 return SCPE_OK;
609 }
610
611 t_stat sim_timer_set_catchup (UNIT *uptr, int32 val, CONST char *cptr, void *desc)
612 {
613 if (!sim_catchup_ticks)
614 sim_catchup_ticks = TRUE;
615 return SCPE_OK;
616 }
617
618 t_stat sim_timer_show_catchup (FILE *st, UNIT *uptr, int32 val, CONST void *desc)
619 {
620 fprintf (st, "Calibrated Ticks%s", sim_catchup_ticks ? " with Catchup Ticks" : "");
621 return SCPE_OK;
622 }
623
624 MTAB sim_timer_mod[] = {
625 { MTAB_VDV, MTAB_VDV, "CATCHUP", "CATCHUP", &sim_timer_set_catchup, &sim_timer_show_catchup, NULL, "Enables/Displays Clock Tick catchup mode" },
626 { MTAB_VDV, 0, NULL, "NOCATCHUP", &sim_timer_clr_catchup, NULL, NULL, "Disables Clock Tick catchup mode" },
627 { 0 },
628 };
629
630 static t_stat sim_timer_clock_reset (DEVICE *dptr);
631
632 DEVICE sim_timer_dev = {
633 "TIMER", sim_timer_units, sim_timer_reg, sim_timer_mod,
634 SIM_NTIMERS+1, 0, 0, 0, 0, 0,
635 NULL, NULL, &sim_timer_clock_reset, NULL, NULL, NULL,
636 NULL, DEV_DEBUG | DEV_NOSAVE, 0, sim_timer_debug};
637
638
639 t_stat sim_timer_tick_svc (UNIT *uptr)
640 {
641 int tmr = (int)(uptr-sim_timer_units);
642 t_stat stat;
643
644 rtc_clock_ticks[tmr] += 1;
645 rtc_calib_tick_time[tmr] += rtc_clock_tick_size[tmr];
646
647
648
649
650
651
652
653
654 if (sim_clock_unit[tmr]->action == NULL)
655 return SCPE_IERR;
656 stat = sim_clock_unit[tmr]->action (sim_clock_unit[tmr]);
657 --sim_cosched_interval[tmr];
658 if (stat == SCPE_OK) {
659 if (rtc_clock_catchup_eligible[tmr]) {
660 struct timespec now;
661 double skew;
662
663 #ifdef MACOSXPPC
664 clock_serv_t cclock;
665 mach_timespec_t mts;
666 host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock);
667 clock_get_time(cclock, &mts);
668 mach_port_deallocate(mach_task_self(), cclock);
669 now.tv_sec = mts.tv_sec;
670 now.tv_nsec = mts.tv_nsec;
671 #else
672 clock_gettime(CLOCK_REALTIME, &now);
673 #endif
674 skew = (_timespec_to_double(&now) - (rtc_calib_tick_time[tmr]+rtc_clock_catchup_base_time[tmr]));
675
676 if (fabs(skew) > fabs(rtc_clock_skew_max[tmr]))
677 rtc_clock_skew_max[tmr] = skew;
678 }
679 while ((sim_clock_cosched_queue[tmr] != QUEUE_LIST_END) &&
680 (sim_cosched_interval[tmr] < sim_clock_cosched_queue[tmr]->time)) {
681 UNIT *cptr = sim_clock_cosched_queue[tmr];
682 sim_clock_cosched_queue[tmr] = cptr->next;
683 cptr->next = NULL;
684 cptr->cancel = NULL;
685 _sim_activate (cptr, 0);
686 }
687 if (sim_clock_cosched_queue[tmr] != QUEUE_LIST_END)
688 sim_cosched_interval[tmr] = sim_clock_cosched_queue[tmr]->time;
689 else
690 sim_cosched_interval[tmr] = 0;
691 }
692 sim_timer_activate_after (uptr, 1000000/rtc_hz[tmr]);
693 return stat;
694 }
695
696 #if !defined(__CYGWIN__) && \
697 ( defined(_WIN32) || defined(__MINGW32__) || defined(__MINGW64__) || \
698 defined(CROSS_MINGW32) || defined(CROSS_MINGW64) )
699 void win32_usleep(__int64 usec)
700 {
701 HANDLE timer;
702 LARGE_INTEGER ft;
703
704 ft.QuadPart = -(10*usec);
705
706 timer = CreateWaitableTimer(NULL, TRUE, NULL);
707 SetWaitableTimer(timer, &ft, 0, NULL, NULL, 0);
708 WaitForSingleObject(timer, INFINITE);
709 CloseHandle(timer);
710 }
711 #endif
712
713
714
715 int
716 sim_usleep(useconds_t tusleep)
717 {
718 #if ( !defined(__APPLE__) && !defined(__OpenBSD__) )
719 # if !defined(__CYGWIN__) && \
720 ( defined(_WIN32) || defined(__MINGW32__) || defined(__MINGW64__) || \
721 defined(CROSS_MINGW32) || defined(CROSS_MINGW64) )
722 win32_usleep(tusleep);
723
724 return 0;
725 # else
726 struct timespec rqt;
727 rqt.tv_sec = tusleep / 1000000;
728 rqt.tv_nsec = (tusleep % 1000000) * 1000;
729
730 return clock_nanosleep(CLOCK_MONOTONIC, 0, &rqt, NULL);
731 # endif
732
733
734 #else
735 # if defined(__APPLE__) && !defined(MACOSXPPC)
736 struct timespec rqt;
737 rqt.tv_sec = tusleep / 1000000;
738 rqt.tv_nsec = (tusleep % 1000000) * 1000;
739 return nanosleep(&rqt, NULL);
740 # else
741 return usleep(tusleep);
742 # endif
743 #endif
744 }
745
746 static double _timespec_to_double (struct timespec *time)
747 {
748 return ((double)time->tv_sec)+(double)(time->tv_nsec)/1000000000.0;
749 }
750
751 static void _double_to_timespec (struct timespec *time, double dtime)
752 {
753 time->tv_sec = (time_t)floor(dtime);
754 time->tv_nsec = (long)((dtime-floor(dtime))*1000000000.0);
755 }
756
757 #define CLK_TPS 10
758 #define CLK_INIT (SIM_INITIAL_IPS/CLK_TPS)
759 static int32 sim_int_clk_tps;
760
761 static t_stat sim_timer_clock_tick_svc (UNIT *uptr)
762 {
763 sim_rtcn_calb (sim_int_clk_tps, SIM_INTERNAL_CLK);
764 sim_activate_after (uptr, 1000000/sim_int_clk_tps);
765 return SCPE_OK;
766 }
767
768 static void _rtcn_configure_calibrated_clock (int32 newtmr)
769 {
770 int32 tmr;
771
772
773 sim_int_clk_tps = MIN(CLK_TPS, sim_os_tick_hz);
774 for (tmr=0; tmr<SIM_NTIMERS; tmr++) {
775 if ((rtc_hz[tmr]) &&
776 (rtc_hz[tmr] <= (uint32)sim_os_tick_hz))
777 break;
778 }
779 if (tmr == SIM_NTIMERS) {
780 if ((tmr != newtmr) && (!sim_is_active (&SIM_INTERNAL_UNIT))) {
781
782 sim_calb_tmr = SIM_NTIMERS;
783 sim_debug (DBG_CAL, &sim_timer_dev, "_rtcn_configure_calibrated_clock() - Starting Internal Calibrated Timer at %dHz\n", sim_int_clk_tps);
784 SIM_INTERNAL_UNIT.action = &sim_timer_clock_tick_svc;
785 SIM_INTERNAL_UNIT.flags = UNIT_DIS | UNIT_IDLE;
786 sim_activate_abs (&SIM_INTERNAL_UNIT, 0);
787 sim_rtcn_init_unit (&SIM_INTERNAL_UNIT, (CLK_INIT*CLK_TPS)/sim_int_clk_tps, SIM_INTERNAL_CLK);
788 }
789 return;
790 }
791 if ((tmr == newtmr) &&
792 (sim_calb_tmr == newtmr))
793 return;
794 if (sim_calb_tmr == SIM_NTIMERS) {
795 sim_debug (DBG_CAL, &sim_timer_dev, "_rtcn_configure_calibrated_clock() - Stopping Internal Calibrated Timer, New Timer = %d (%dHz)\n", tmr, rtc_hz[tmr]);
796 rtc_initd[SIM_NTIMERS] = 0;
797 rtc_hz[SIM_NTIMERS] = 0;
798 sim_cancel (&SIM_INTERNAL_UNIT);
799
800 while (sim_clock_cosched_queue[SIM_NTIMERS] != QUEUE_LIST_END) {
801 UNIT *uptr = sim_clock_cosched_queue[SIM_NTIMERS];
802 _sim_coschedule_cancel (uptr);
803 _sim_activate (uptr, 1);
804 }
805 }
806 else {
807 sim_debug (DBG_CAL, &sim_timer_dev, "_rtcn_configure_calibrated_clock() - Changing Calibrated Timer from %d (%dHz) to %d (%dHz)\n", sim_calb_tmr, rtc_hz[sim_calb_tmr], tmr, rtc_hz[tmr]);
808 sim_calb_tmr = tmr;
809 }
810 sim_calb_tmr = tmr;
811 }
812
813 static t_stat sim_timer_clock_reset (DEVICE *dptr)
814 {
815 sim_debug (DBG_TRC, &sim_timer_dev, "sim_timer_clock_reset()\n");
816 _rtcn_configure_calibrated_clock (sim_calb_tmr);
817 if (sim_switches & SWMASK ('P')) {
818 sim_cancel (&SIM_INTERNAL_UNIT);
819 sim_calb_tmr = -1;
820 }
821 return SCPE_OK;
822 }
823
824 void sim_start_timer_services (void)
825 {
826 sim_debug (DBG_TRC, &sim_timer_dev, "sim_start_timer_services()\n");
827 _rtcn_configure_calibrated_clock (sim_calb_tmr);
828 }
829
830 void sim_stop_timer_services (void)
831 {
832 int tmr;
833
834 sim_debug (DBG_TRC, &sim_timer_dev, "sim_stop_timer_services()\n");
835
836 for (tmr=0; tmr<=SIM_NTIMERS; tmr++) {
837 int32 accum;
838
839 if (sim_clock_unit[tmr]) {
840
841 sim_cancel (&sim_timer_units[tmr]);
842 if (rtc_hz[tmr])
843 sim_activate (sim_clock_unit[tmr], rtc_currd[tmr]);
844
845 accum = 1;
846 while (sim_clock_cosched_queue[tmr] != QUEUE_LIST_END) {
847 UNIT *cptr = sim_clock_cosched_queue[tmr];
848
849 sim_clock_cosched_queue[tmr] = cptr->next;
850 cptr->next = NULL;
851 cptr->cancel = NULL;
852
853 accum += cptr->time;
854 _sim_activate (cptr, accum*rtc_currd[tmr]);
855 }
856 }
857 }
858 sim_cancel (&SIM_INTERNAL_UNIT);
859 sim_calb_tmr_last = sim_calb_tmr;
860 sim_inst_per_sec_last = sim_timer_inst_per_sec ();
861 sim_calb_tmr = -1;
862 }
863
864
865
866 double sim_timer_inst_per_sec (void)
867 {
868 double inst_per_sec = SIM_INITIAL_IPS;
869
870 if (sim_calb_tmr == -1)
871 return inst_per_sec;
872 inst_per_sec = ((double)rtc_currd[sim_calb_tmr])*rtc_hz[sim_calb_tmr];
873 if (0 == inst_per_sec)
874 inst_per_sec = ((double)rtc_currd[sim_calb_tmr])*sim_int_clk_tps;
875 return inst_per_sec;
876 }
877
878 t_stat sim_timer_activate (UNIT *uptr, int32 interval)
879 {
880 return sim_timer_activate_after (uptr, (uint32)((interval * 1000000.0) / sim_timer_inst_per_sec ()));
881 }
882
883 t_stat sim_timer_activate_after (UNIT *uptr, uint32 usec_delay)
884 {
885 int inst_delay, tmr;
886 double inst_delay_d, inst_per_sec;
887
888
889 for (tmr=0; tmr<=SIM_NTIMERS; tmr++)
890 if (sim_clock_unit[tmr] == uptr) {
891 uptr = &sim_timer_units[tmr];
892 break;
893 }
894 if (sim_is_active (uptr))
895 return SCPE_OK;
896 inst_per_sec = sim_timer_inst_per_sec ();
897 inst_delay_d = ((inst_per_sec*usec_delay)/1000000.0);
898
899
900 if (inst_delay_d > (double)0x7fffffff)
901 inst_delay_d = (double)0x7fffffff;
902 inst_delay = (int32)inst_delay_d;
903 if ((inst_delay == 0) && (usec_delay != 0))
904 inst_delay = 1;
905 sim_debug (DBG_TIM, &sim_timer_dev, "sim_timer_activate_after() - queue addition %s at %d (%d usecs)\n",
906 sim_uname(uptr), inst_delay, usec_delay);
907 return _sim_activate (uptr, inst_delay);
908 }
909
910 t_stat sim_register_clock_unit_tmr (UNIT *uptr, int32 tmr)
911 {
912 if (tmr == SIM_INTERNAL_CLK)
913 tmr = SIM_NTIMERS;
914 else {
915 if ((tmr < 0) || (tmr >= SIM_NTIMERS))
916 return SCPE_IERR;
917 }
918 if (NULL == uptr) {
919 while (sim_clock_cosched_queue[tmr] != QUEUE_LIST_END) {
920 UNIT *uptr = sim_clock_cosched_queue[tmr];
921
922 _sim_coschedule_cancel (uptr);
923 _sim_activate (uptr, 1);
924 }
925 sim_clock_unit[tmr] = NULL;
926 return SCPE_OK;
927 }
928 if (NULL == sim_clock_unit[tmr])
929 sim_clock_cosched_queue[tmr] = QUEUE_LIST_END;
930 sim_clock_unit[tmr] = uptr;
931 uptr->dynflags |= UNIT_TMR_UNIT;
932 sim_timer_units[tmr].flags = UNIT_DIS | (sim_clock_unit[tmr] ? UNIT_IDLE : 0);
933 return SCPE_OK;
934 }
935
936
937 static void _sim_coschedule_cancel (UNIT *uptr)
938 {
939 if (uptr->next) {
940 int tmr;
941
942 for (tmr=0; tmr<SIM_NTIMERS; tmr++) {
943 if (uptr == sim_clock_cosched_queue[tmr]) {
944 sim_clock_cosched_queue[tmr] = uptr->next;
945 uptr->next = NULL;
946 }
947 else {
948 UNIT *cptr;
949 for (cptr = sim_clock_cosched_queue[tmr];
950 (cptr != QUEUE_LIST_END);
951 cptr = cptr->next)
952 if (cptr->next == (uptr)) {
953 cptr->next = (uptr)->next;
954 uptr->next = NULL;
955 break;
956 }
957 }
958 if (uptr->next == NULL) {
959 uptr->cancel = NULL;
960 sim_debug (SIM_DBG_EVENT, &sim_timer_dev, "Canceled Clock Coscheduled Event for %s\n", sim_uname(uptr));
961 return;
962 }
963 }
964 }
965 }
966