This source file includes following definitions.
- cpu_show_config
- cpu_set_config
- cpu_show_nunits
- cpu_set_nunits
- cpu_show_kips
- cpu_set_kips
- cpu_show_stall
- cpu_set_stall
- setCPUConfigL68
- setCPUConfigDPS8M
- cycle_str
- set_cpu_cycle
- set_cpu_idx
- cpu_reset_unit_idx
- simh_cpu_reset_and_clear_unit
- simh_cpu_reset_unit
- str_SDW0
- cpu_boot
- setup_scbank_map
- lookup_cpu_mem_map
- get_serial_number
- do_stats
- ev_poll_cb
- cpu_init
- cpu_reset
- sim_cpu_reset
- cpu_ex
- cpu_dep
- get_highest_intr
- sample_interrupts
- simh_hooks
- panel_process_event
- sim_instr
- cpu_thread_main
- do_LUF_fault
- set_temporary_absolute_mode
- clear_temporary_absolute_mode
- becomeClockMaster
- giveupClockMaster
- threadz_sim_instr
- operand_size
- readOperandRead
- readOperandRMW
- write_operand
- set_mem_watch
- nem_check
- core_read
- core_read_lock
- core_write
- core_write_unlock
- core_unlock_all
- core_write_zone
- core_read2
- core_write2
- decode_instruction
- is_priv_mode
- get_bar_mode
- get_addr_mode
- set_addr_mode
- get_BAR_address
- add_history
- add_history_force
- add_dps8m_CU_history
- add_dps8m_DU_OU_history
- add_dps8m_APU_history
- add_dps8m_EAPU_history
- add_l68_CU_history
- add_l68_DU_history
- add_l68_OU_history
- add_l68_APU_history
- get_dbg_verb
- dps8_sim_debug
- setupPROM
- cpuStats
- perfTest
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32 #include <stdio.h>
33 #include <unistd.h>
34 #include <ctype.h>
35
36 #if !defined(__MINGW64__) && !defined(__MINGW32__) && !defined(CROSS_MINGW64) && !defined(CROSS_MINGW32)
37 # include <sys/mman.h>
38 #endif
39
40 #include "dps8.h"
41 #include "dps8_sys.h"
42 #include "dps8_iom.h"
43 #include "dps8_cable.h"
44 #include "dps8_cpu.h"
45 #include "dps8_rt.h"
46 #include "dps8_priv.h"
47 #include "dps8_addrmods.h"
48 #include "dps8_faults.h"
49 #include "dps8_scu.h"
50 #include "dps8_append.h"
51 #include "dps8_ins.h"
52 #include "dps8_state.h"
53 #include "dps8_math.h"
54 #include "dps8_iefp.h"
55 #include "dps8_console.h"
56 #include "dps8_fnp2.h"
57 #include "dps8_socket_dev.h"
58 #include "dps8_crdrdr.h"
59 #include "dps8_absi.h"
60 #include "dps8_mgp.h"
61 #include "dps8_utils.h"
62 #include "dps8_memalign.h"
63
64 #if defined(M_SHARED)
65 # include "shm.h"
66 #endif
67
68 #include "dps8_opcodetable.h"
69 #include "../simh/sim_defs.h"
70 #include "../simh/sim_os_mem.h"
71
72 #if defined(THREADZ) || defined(LOCKLESS)
73 # include "threadz.h"
74 __thread uint current_running_cpu_idx;
75 #endif
76
77 #include "ver.h"
78
79 #if defined(_AIX) && !defined(__PASE__)
80 # include <pthread.h>
81 # include <sys/resource.h>
82 #endif
83
84 #if defined(NO_LOCALE)
85 # define xstrerror_l strerror
86 #endif
87
88 #define DBG_CTR cpu.cycleCnt
89
90 #define ASSUME0 0
91
92 #define FREE(p) do \
93 { \
94 free((p)); \
95 (p) = NULL; \
96 } while(0)
97
98
99
100 static UNIT cpu_unit [N_CPU_UNITS_MAX] = {
101 #if defined(NO_C_ELLIPSIS)
102 { UDATA (NULL, UNIT_FIX|UNIT_BINK, MEMSIZE), 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL },
103 { UDATA (NULL, UNIT_FIX|UNIT_BINK, MEMSIZE), 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL },
104 { UDATA (NULL, UNIT_FIX|UNIT_BINK, MEMSIZE), 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL },
105 { UDATA (NULL, UNIT_FIX|UNIT_BINK, MEMSIZE), 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL },
106 { UDATA (NULL, UNIT_FIX|UNIT_BINK, MEMSIZE), 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL },
107 { UDATA (NULL, UNIT_FIX|UNIT_BINK, MEMSIZE), 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL },
108 { UDATA (NULL, UNIT_FIX|UNIT_BINK, MEMSIZE), 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL },
109 { UDATA (NULL, UNIT_FIX|UNIT_BINK, MEMSIZE), 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL }
110 #else
111 [0 ... N_CPU_UNITS_MAX - 1] = {
112 UDATA (NULL, UNIT_FIX|UNIT_BINK, MEMSIZE), 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL
113 }
114 #endif
115 };
116
117 #define UNIT_IDX(uptr) ((uptr) - cpu_unit)
118
119
120 #define LOCKUP_KIPS 1000
121 static uint64 kips = LOCKUP_KIPS;
122 static uint64 luf_limits[] =
123 {
124 2000*LOCKUP_KIPS/1000,
125 4000*LOCKUP_KIPS/1000,
126 8000*LOCKUP_KIPS/1000,
127 16000*LOCKUP_KIPS/1000,
128 32000*LOCKUP_KIPS/1000
129 };
130
131 struct stall_point_s stall_points [N_STALL_POINTS];
132 bool stall_point_active = false;
133
134 #if defined(PANEL68)
135 static void panel_process_event (void);
136 #endif
137
138 static t_stat simh_cpu_reset_and_clear_unit (UNIT * uptr,
139 UNUSED int32 value,
140 UNUSED const char * cptr,
141 UNUSED void * desc);
142 char * cycle_str (cycles_e cycle);
143
144 static t_stat cpu_show_config (UNUSED FILE * st, UNIT * uptr,
145 UNUSED int val, UNUSED const void * desc)
146 {
147 long cpu_unit_idx = UNIT_IDX (uptr);
148 if (cpu_unit_idx < 0 || cpu_unit_idx >= N_CPU_UNITS_MAX)
149 {
150 sim_warn ("error: Invalid unit number %ld\r\n", (long) cpu_unit_idx);
151 return SCPE_ARG;
152 }
153
154 #define PFC_INT8 "%c%c%c%c%c%c%c%c"
155
156 #define PBI_8(i) \
157 ( ((i) & 0x80ll) ? '1' : '0' ), \
158 ( ((i) & 0x40ll) ? '1' : '0' ), \
159 ( ((i) & 0x20ll) ? '1' : '0' ), \
160 ( ((i) & 0x10ll) ? '1' : '0' ), \
161 ( ((i) & 0x08ll) ? '1' : '0' ), \
162 ( ((i) & 0x04ll) ? '1' : '0' ), \
163 ( ((i) & 0x02ll) ? '1' : '0' ), \
164 ( ((i) & 0x01ll) ? '1' : '0' )
165
166 #define PFC_INT16 PFC_INT8 PFC_INT8
167 #define PFC_INT32 PFC_INT16 PFC_INT16
168 #define PFC_INT64 PFC_INT32 PFC_INT32
169
170 #define PBI_16(i) PBI_8((i) >> 8), PBI_8(i)
171 #define PBI_32(i) PBI_16((i) >> 16), PBI_16(i)
172 #define PBI_64(i) PBI_32((i) >> 32), PBI_32(i)
173
174 char dsbin[66], adbin[34];
175
176 sim_msg ("CPU unit number %ld\r\n", (long) cpu_unit_idx);
177
178 sim_msg ("Fault base: %03o(8)\r\n",
179 cpus[cpu_unit_idx].switches.FLT_BASE);
180 sim_msg ("CPU number: %01o(8)\r\n",
181 cpus[cpu_unit_idx].switches.cpu_num);
182 sim_msg ("Data switches: %012llo(8)\r\n",
183 (unsigned long long)cpus[cpu_unit_idx].switches.data_switches);
184 (void)snprintf (dsbin, 65, PFC_INT64,
185 PBI_64((unsigned long long)cpus[cpu_unit_idx].switches.data_switches));
186 sim_msg (" %36s(2)\r\n",
187 dsbin + strlen(dsbin) - 36);
188 sim_msg ("Address switches: %06o(8)\r\n",
189 cpus[cpu_unit_idx].switches.addr_switches);
190 (void)snprintf (adbin, 33, PFC_INT32,
191 PBI_32(cpus[cpu_unit_idx].switches.addr_switches));
192 sim_msg (" %18s(2)\r\n",
193 adbin + strlen(adbin) - 18);
194 for (int i = 0; i < (cpus[cpu_unit_idx].tweaks.l68_mode ? N_L68_CPU_PORTS : N_DPS8M_CPU_PORTS); i ++)
195 {
196 sim_msg ("Port%c enable: %01o(8)\r\n",
197 'A' + i, cpus[cpu_unit_idx].switches.enable [i]);
198 sim_msg ("Port%c init enable: %01o(8)\r\n",
199 'A' + i, cpus[cpu_unit_idx].switches.init_enable [i]);
200 sim_msg ("Port%c assignment: %01o(8)\r\n",
201 'A' + i, cpus[cpu_unit_idx].switches.assignment [i]);
202 sim_msg ("Port%c interlace: %01o(8)\r\n",
203 'A' + i, cpus[cpu_unit_idx].switches.interlace [i]);
204 sim_msg ("Port%c store size: %01o(8)\r\n",
205 'A' + i, cpus[cpu_unit_idx].switches.store_size [i]);
206 }
207 sim_msg ("Processor mode: %s [%o]\r\n",
208 cpus[cpu_unit_idx].switches.procMode == \
209 procModeMultics ? "Multics" : cpus[cpu_unit_idx].switches.procMode == procModeGCOS ? "GCOS" : "???",
210 cpus[cpu_unit_idx].switches.procMode);
211 sim_msg ("8K Cache: %s\r\n",
212 cpus[cpu_unit_idx].switches.enable_cache ? "Enabled" : "Disabled");
213 sim_msg ("SDWAM: %s\r\n",
214 cpus[cpu_unit_idx].switches.sdwam_enable ? "Enabled" : "Disabled");
215 sim_msg ("PTWAM: %s\r\n",
216 cpus[cpu_unit_idx].switches.ptwam_enable ? "Enabled" : "Disabled");
217
218 sim_msg ("Processor speed: %02o(8)\r\n",
219 cpus[cpu_unit_idx].options.proc_speed);
220 sim_msg ("DIS enable: %01o(8)\r\n",
221 cpus[cpu_unit_idx].tweaks.dis_enable);
222 sim_msg ("Steady clock: %01o(8)\r\n",
223 scu [0].steady_clock);
224 sim_msg ("Halt on unimplemented: %01o(8)\r\n",
225 cpus[cpu_unit_idx].tweaks.halt_on_unimp);
226 sim_msg ("Enable simulated SDWAM/PTWAM: %01o(8)\r\n",
227 cpus[cpu_unit_idx].tweaks.enable_wam);
228 sim_msg ("Report faults: %01o(8)\r\n",
229 cpus[cpu_unit_idx].tweaks.report_faults);
230 sim_msg ("TRO faults enabled: %01o(8)\r\n",
231 cpus[cpu_unit_idx].tweaks.tro_enable);
232 sim_msg ("drl fatal enabled: %01o(8)\r\n",
233 cpus[cpu_unit_idx].tweaks.drl_fatal);
234 sim_msg ("useMap: %d\r\n",
235 cpus[cpu_unit_idx].tweaks.useMap);
236 sim_msg ("PROM installed: %01o(8)\r\n",
237 cpus[cpu_unit_idx].options.prom_installed);
238 sim_msg ("Hex mode installed: %01o(8)\r\n",
239 cpus[cpu_unit_idx].options.hex_mode_installed);
240 sim_msg ("8K cache installed: %01o(8)\r\n",
241 cpus[cpu_unit_idx].options.cache_installed);
242 sim_msg ("Clock slave installed: %01o(8)\r\n",
243 cpus[cpu_unit_idx].options.clock_slave_installed);
244 #if defined(AFFINITY)
245 if (cpus[cpu_unit_idx].set_affinity)
246 sim_msg ("CPU affinity: %d\r\n", cpus[cpu_unit_idx].affinity);
247 else
248 sim_msg ("CPU affinity: not set\r\n");
249 #endif
250 sim_msg ("ISOLTS mode: %01o(8)\r\n", cpus[cpu_unit_idx].tweaks.isolts_mode);
251 sim_msg ("NODIS mode: %01o(8)\r\n", cpus[cpu_unit_idx].tweaks.nodis);
252 sim_msg ("6180 mode: %01o(8) [%s]\r\n",
253 cpus[cpu_unit_idx].tweaks.l68_mode, cpus[cpu_unit_idx].tweaks.l68_mode ? "6180" : "DPS8/M");
254 return SCPE_OK;
255 }
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281 static config_value_list_t cfg_multics_fault_base [] =
282 {
283 { "multics", 2 },
284 { NULL, 0 }
285 };
286
287 static config_value_list_t cfg_on_off [] =
288 {
289 { "off", 0 },
290 { "on", 1 },
291 { "disable", 0 },
292 { "enable", 1 },
293 { NULL, 0 }
294 };
295
296 static config_value_list_t cfg_l68_mode [] = {
297 { "dps8/m", 0 },
298 { "dps8m", 0 },
299 { "dps8", 0 },
300 { "l68", 1 },
301 { "l6180", 1 },
302 { "6180", 1 },
303 };
304
305 static config_value_list_t cfg_cpu_mode [] =
306 {
307 { "gcos", 0 },
308 { "multics", 1 },
309 { NULL, 0 }
310 };
311
312 static config_value_list_t cfg_port_letter [] =
313 {
314 { "a", 0 },
315 { "b", 1 },
316 { "c", 2 },
317 { "d", 3 },
318 { "e", 4 },
319 { "f", 5 },
320 { "g", 6 },
321 { "h", 7 },
322 { NULL, 0 }
323 };
324
325 static config_value_list_t cfg_interlace [] =
326 {
327 { "off", 0 },
328 { "2", 2 },
329 { "4", 4 },
330 { NULL, 0 }
331 };
332
333 #if defined(AFFINITY)
334 static config_value_list_t cfg_affinity [] =
335 {
336 { "off", -1 },
337 { NULL, 0 }
338 };
339 #endif
340
341 static config_value_list_t cfg_size_list [] =
342 {
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405 { "32", 8 },
406 { "32K", 8 },
407 { "64", 9 },
408 { "64K", 9 },
409 { "128", 10 },
410 { "128K", 10 },
411 { "256", 11 },
412 { "256K", 11 },
413 { "512", 12 },
414 { "512K", 12 },
415 { "1024", 13 },
416 { "1024K", 13 },
417 { "1M", 13 },
418 { "2048", 14 },
419 { "2048K", 14 },
420 { "2M", 14 },
421 { "4096", 15 },
422 { "4096K", 15 },
423 { "4M", 15 },
424 { NULL, 0 }
425 };
426
427 static config_list_t cpu_config_list [] =
428 {
429 { "faultbase", 0, 0177, cfg_multics_fault_base },
430 { "num", 0, 07, NULL },
431 { "data", 0, 0777777777777, NULL },
432 { "stopnum", 0, 999999, NULL },
433 { "mode", 0, 01, cfg_cpu_mode },
434 { "speed", 0, 017, NULL },
435 { "port", 0, N_CPU_PORTS - 1, cfg_port_letter },
436 { "assignment", 0, 7, NULL },
437 { "interlace", 0, 1, cfg_interlace },
438 { "enable", 0, 1, cfg_on_off },
439 { "init_enable", 0, 1, cfg_on_off },
440 { "store_size", 0, 7, cfg_size_list },
441 { "enable_cache", 0, 1, cfg_on_off },
442 { "sdwam", 0, 1, cfg_on_off },
443 { "ptwam", 0, 1, cfg_on_off },
444
445
446 { "dis_enable", 0, 1, cfg_on_off },
447
448 { "steady_clock", 0, 1, cfg_on_off },
449 { "halt_on_unimplemented", 0, 1, cfg_on_off },
450 { "enable_wam", 0, 1, cfg_on_off },
451 { "report_faults", 0, 1, cfg_on_off },
452 { "tro_enable", 0, 1, cfg_on_off },
453 { "drl_fatal", 0, 1, cfg_on_off },
454 { "useMap", 0, 1, cfg_on_off },
455 { "address", 0, 0777777, NULL },
456 { "prom_installed", 0, 1, cfg_on_off },
457 { "hex_mode_installed", 0, 1, cfg_on_off },
458 { "cache_installed", 0, 1, cfg_on_off },
459 { "clock_slave_installed", 0, 1, cfg_on_off },
460 { "enable_emcall", 0, 1, cfg_on_off },
461
462
463 #if defined(AFFINITY)
464 { "affinity", -1, 32767, cfg_affinity },
465 #endif
466 { "isolts_mode", 0, 1, cfg_on_off },
467 { "nodis", 0, 1, cfg_on_off },
468 { "l68_mode", 0, 1, cfg_l68_mode },
469 { NULL, 0, 0, NULL }
470 };
471
472 static t_stat cpu_set_config (UNIT * uptr, UNUSED int32 value,
473 const char * cptr, UNUSED void * desc)
474 {
475 long cpu_unit_idx = UNIT_IDX (uptr);
476 if (cpu_unit_idx < 0 || cpu_unit_idx >= N_CPU_UNITS_MAX)
477 {
478 sim_warn ("error: cpu_set_config: Invalid unit number %ld\r\n",
479 (long) cpu_unit_idx);
480 return SCPE_ARG;
481 }
482
483 static int port_num = 0;
484
485 config_state_t cfg_state = { NULL, NULL };
486
487 for (;;)
488 {
489 int64_t v;
490 int rc = cfg_parse (__func__, cptr, cpu_config_list,
491 & cfg_state, & v);
492 if (rc == -1)
493 {
494 break;
495 }
496 if (rc == -2)
497 {
498 cfg_parse_done (& cfg_state);
499 return SCPE_ARG;
500 }
501
502 const char * p = cpu_config_list [rc] . name;
503 if (strcmp (p, "faultbase") == 0)
504 cpus[cpu_unit_idx].switches.FLT_BASE = (uint) v;
505 else if (strcmp (p, "num") == 0)
506 cpus[cpu_unit_idx].switches.cpu_num = (uint) v;
507 else if (strcmp (p, "data") == 0)
508 cpus[cpu_unit_idx].switches.data_switches = (word36) v;
509 else if (strcmp (p, "stopnum") == 0)
510 {
511
512
513 int64_t d1 = (v / 1000) % 10;
514 int64_t d2 = (v / 100) % 10;
515 int64_t d3 = (v / 10) % 10;
516 int64_t d4 = (v / 1) % 10;
517 word36 d = 0123000000000;
518 putbits36_6 (& d, 9, (word4) d1);
519 putbits36_6 (& d, 15, (word4) d2);
520 putbits36_6 (& d, 21, (word4) d3);
521 putbits36_6 (& d, 27, (word4) d4);
522 cpus[cpu_unit_idx].switches.data_switches = d;
523 }
524 else if (strcmp (p, "address") == 0)
525 cpus[cpu_unit_idx].switches.addr_switches = (word18) v;
526 else if (strcmp (p, "mode") == 0)
527 cpus[cpu_unit_idx].switches.procMode = v ? procModeMultics : procModeGCOS;
528 else if (strcmp (p, "speed") == 0)
529 cpus[cpu_unit_idx].options.proc_speed = (uint) v;
530 else if (strcmp (p, "port") == 0) {
531 if ((! cpus[cpu_unit_idx].tweaks.l68_mode) && (int) v > 3) {
532 cfg_parse_done (& cfg_state);
533 return SCPE_ARG;
534 }
535 port_num = (int) v;
536 }
537 else if (strcmp (p, "assignment") == 0)
538 cpus[cpu_unit_idx].switches.assignment [port_num] = (uint) v;
539 else if (strcmp (p, "interlace") == 0)
540 cpus[cpu_unit_idx].switches.interlace [port_num] = (uint) v;
541 else if (strcmp (p, "enable") == 0)
542 cpus[cpu_unit_idx].switches.enable [port_num] = (uint) v;
543 else if (strcmp (p, "init_enable") == 0)
544 cpus[cpu_unit_idx].switches.init_enable [port_num] = (uint) v;
545 else if (strcmp (p, "store_size") == 0) {
546 if (v > 7) {
547 if (cpus[cpu_unit_idx].tweaks.l68_mode) {
548 switch (v) {
549 case 8: v = 0; break;
550 case 9: v = 1; break;
551 case 10: v = 3; break;
552 case 11: v = 7; break;
553 case 12: v = 4; break;
554 case 13: v = 5; break;
555 case 14: v = 6; break;
556 case 15: v = 2; break;
557 }
558 } else {
559 switch (v) {
560 case 8: v = 0; break;
561 case 9: v = 1; break;
562 case 10: v = 2; break;
563 case 11: v = 3; break;
564 case 12: v = 4; break;
565 case 13: v = 5; break;
566 case 14: v = 6; break;
567 case 15: v = 7; break;
568 }
569 }
570 }
571 cpus[cpu_unit_idx].switches.store_size [port_num] = (uint) v;
572 }
573 else if (strcmp (p, "enable_cache") == 0)
574 cpus[cpu_unit_idx].switches.enable_cache = (uint) v ? true : false;
575 else if (strcmp (p, "sdwam") == 0)
576 cpus[cpu_unit_idx].switches.sdwam_enable = (uint) v ? true : false;
577 else if (strcmp (p, "ptwam") == 0)
578 cpus[cpu_unit_idx].switches.ptwam_enable = (uint) v ? true : false;
579 else if (strcmp (p, "dis_enable") == 0)
580 cpus[cpu_unit_idx].tweaks.dis_enable = (uint) v;
581 else if (strcmp (p, "steady_clock") == 0)
582 scu [0].steady_clock = (uint) v;
583 else if (strcmp (p, "halt_on_unimplemented") == 0)
584 cpus[cpu_unit_idx].tweaks.halt_on_unimp = (uint) v;
585 else if (strcmp (p, "enable_wam") == 0)
586 cpus[cpu_unit_idx].tweaks.enable_wam = (uint) v;
587 else if (strcmp (p, "report_faults") == 0)
588 cpus[cpu_unit_idx].tweaks.report_faults = (uint) v;
589 else if (strcmp (p, "tro_enable") == 0)
590 cpus[cpu_unit_idx].tweaks.tro_enable = (uint) v;
591 else if (strcmp (p, "drl_fatal") == 0)
592 cpus[cpu_unit_idx].tweaks.drl_fatal = (uint) v;
593 else if (strcmp (p, "useMap") == 0)
594 cpus[cpu_unit_idx].tweaks.useMap = v;
595 else if (strcmp (p, "prom_installed") == 0)
596 cpus[cpu_unit_idx].options.prom_installed = v;
597 else if (strcmp (p, "hex_mode_installed") == 0)
598 cpus[cpu_unit_idx].options.hex_mode_installed = v;
599 else if (strcmp (p, "cache_installed") == 0)
600 cpus[cpu_unit_idx].options.cache_installed = v;
601 else if (strcmp (p, "clock_slave_installed") == 0)
602 cpus[cpu_unit_idx].options.clock_slave_installed = v;
603 else if (strcmp (p, "enable_emcall") == 0)
604 cpus[cpu_unit_idx].tweaks.enable_emcall = v;
605 #if defined(AFFINITY)
606 else if (strcmp (p, "affinity") == 0)
607 if (v < 0)
608 {
609 cpus[cpu_unit_idx].set_affinity = false;
610 }
611 else
612 {
613 cpus[cpu_unit_idx].set_affinity = true;
614 cpus[cpu_unit_idx].affinity = (uint) v;
615 }
616 #endif
617 else if (strcmp (p, "isolts_mode") == 0)
618 {
619 bool was = cpus[cpu_unit_idx].tweaks.isolts_mode;
620 cpus[cpu_unit_idx].tweaks.isolts_mode = v;
621 if (v && ! was) {
622 uint store_sz;
623 if (cpus[cpu_unit_idx].tweaks.l68_mode)
624 store_sz = 3;
625 else
626 store_sz = 2;
627 cpus[cpu_unit_idx].isolts_switches_save = cpus[cpu_unit_idx].switches;
628
629 cpus[cpu_unit_idx].switches.data_switches = 00000030714000;
630 cpus[cpu_unit_idx].switches.addr_switches = 0100150;
631 cpus[cpu_unit_idx].tweaks.useMap = true;
632 cpus[cpu_unit_idx].tweaks.enable_wam = true;
633 cpus[cpu_unit_idx].switches.assignment [0] = 0;
634 cpus[cpu_unit_idx].switches.interlace [0] = false;
635 cpus[cpu_unit_idx].switches.enable [0] = false;
636 cpus[cpu_unit_idx].switches.init_enable [0] = false;
637 cpus[cpu_unit_idx].switches.store_size [0] = store_sz;
638
639 cpus[cpu_unit_idx].switches.assignment [1] = 0;
640 cpus[cpu_unit_idx].switches.interlace [1] = false;
641 cpus[cpu_unit_idx].switches.enable [1] = true;
642 cpus[cpu_unit_idx].switches.init_enable [1] = false;
643 cpus[cpu_unit_idx].switches.store_size [1] = store_sz;
644
645 cpus[cpu_unit_idx].switches.assignment [2] = 0;
646 cpus[cpu_unit_idx].switches.interlace [2] = false;
647 cpus[cpu_unit_idx].switches.enable [2] = false;
648 cpus[cpu_unit_idx].switches.init_enable [2] = false;
649 cpus[cpu_unit_idx].switches.store_size [2] = store_sz;
650
651 cpus[cpu_unit_idx].switches.assignment [3] = 0;
652 cpus[cpu_unit_idx].switches.interlace [3] = false;
653 cpus[cpu_unit_idx].switches.enable [3] = false;
654 cpus[cpu_unit_idx].switches.init_enable [3] = false;
655 cpus[cpu_unit_idx].switches.store_size [3] = store_sz;
656
657 if (cpus[cpu_unit_idx].tweaks.l68_mode) {
658 cpus[cpu_unit_idx].switches.assignment [4] = 0;
659 cpus[cpu_unit_idx].switches.interlace [4] = false;
660 cpus[cpu_unit_idx].switches.enable [4] = false;
661 cpus[cpu_unit_idx].switches.init_enable [4] = false;
662 cpus[cpu_unit_idx].switches.store_size [4] = 3;
663
664 cpus[cpu_unit_idx].switches.assignment [5] = 0;
665 cpus[cpu_unit_idx].switches.interlace [5] = false;
666 cpus[cpu_unit_idx].switches.enable [5] = false;
667 cpus[cpu_unit_idx].switches.init_enable [5] = false;
668 cpus[cpu_unit_idx].switches.store_size [5] = 3;
669
670 cpus[cpu_unit_idx].switches.assignment [6] = 0;
671 cpus[cpu_unit_idx].switches.interlace [6] = false;
672 cpus[cpu_unit_idx].switches.enable [6] = false;
673 cpus[cpu_unit_idx].switches.init_enable [6] = false;
674 cpus[cpu_unit_idx].switches.store_size [6] = 3;
675
676 cpus[cpu_unit_idx].switches.assignment [7] = 0;
677 cpus[cpu_unit_idx].switches.interlace [7] = false;
678 cpus[cpu_unit_idx].switches.enable [7] = false;
679 cpus[cpu_unit_idx].switches.init_enable [7] = false;
680 cpus[cpu_unit_idx].switches.store_size [7] = 3;
681 }
682 cpus[cpu_unit_idx].switches.enable [1] = true;
683
684 #if defined(THREADZ) || defined(LOCKLESS)
685 if (cpus[cpu_unit_idx].executing) {
686 cpus[cpu_unit_idx].forceRestart = true;
687 wakeCPU (cpu_unit_idx);
688 } else {
689 cpu_reset_unit_idx ((uint) cpu_unit_idx, false);
690
691 }
692 #else
693 cpu_reset_unit_idx ((uint) cpu_unit_idx, false);
694 simh_cpu_reset_and_clear_unit (cpu_unit + cpu_unit_idx, 0, NULL, NULL);
695 #endif
696
697 } else if (was && !v) {
698 cpus[cpu_unit_idx].switches = cpus[cpu_unit_idx].isolts_switches_save;
699
700 #if defined(THREADZ) || defined(LOCKLESS)
701 if (cpus[cpu_unit_idx].executing) {
702 cpus[cpu_unit_idx].forceRestart = true;
703 wakeCPU (cpu_unit_idx);
704 } else {
705 cpu_reset_unit_idx ((uint) cpu_unit_idx, false);
706
707 }
708 #else
709 cpu_reset_unit_idx ((uint) cpu_unit_idx, false);
710 simh_cpu_reset_and_clear_unit (cpu_unit + cpu_unit_idx, 0, NULL, NULL);
711 #endif
712
713 }
714 }
715 else if (strcmp (p, "nodis") == 0)
716 cpus[cpu_unit_idx].tweaks.nodis = v;
717 else if (strcmp (p, "l68_mode") == 0)
718 cpus[cpu_unit_idx].tweaks.l68_mode = v;
719 else
720 {
721 sim_warn ("error: cpu_set_config: Invalid cfg_parse rc <%ld>\r\n",
722 (long) rc);
723 cfg_parse_done (& cfg_state);
724 return SCPE_ARG;
725 }
726 }
727 cfg_parse_done (& cfg_state);
728
729 return SCPE_OK;
730 }
731
732 static t_stat cpu_show_nunits (UNUSED FILE * st, UNUSED UNIT * uptr,
733 UNUSED int val, UNUSED const void * desc)
734 {
735 sim_msg ("Number of CPUs in system is %d\r\n", cpu_dev.numunits);
736 return SCPE_OK;
737 }
738
739 static t_stat cpu_set_nunits (UNUSED UNIT * uptr, UNUSED int32 value,
740 const char * cptr, UNUSED void * desc)
741 {
742 if (! cptr)
743 return SCPE_ARG;
744 int n = atoi (cptr);
745 if (n < 1 || n > N_CPU_UNITS_MAX)
746 return SCPE_ARG;
747 cpu_dev.numunits = (uint32) n;
748 return SCPE_OK;
749 }
750
751 static t_stat cpu_show_kips (UNUSED FILE * st, UNUSED UNIT * uptr,
752 UNUSED int val, UNUSED const void * desc)
753 {
754 sim_msg ("CPU KIPS %lu\r\n", (unsigned long)kips);
755 return SCPE_OK;
756 }
757
758 static t_stat cpu_set_kips (UNUSED UNIT * uptr, UNUSED int32 value,
759 const char * cptr, UNUSED void * desc)
760 {
761 if (! cptr)
762 return SCPE_ARG;
763 long n = atol (cptr);
764 if (n < 1 || n > 4000000)
765 return SCPE_ARG;
766 kips = (uint64) n;
767 luf_limits[0] = 2000*kips/1000;
768 luf_limits[1] = 4000*kips/1000;
769 luf_limits[2] = 8000*kips/1000;
770 luf_limits[3] = 16000*kips/1000;
771 luf_limits[4] = 32000*kips/1000;
772 return SCPE_OK;
773 }
774
775 static t_stat cpu_show_stall (UNUSED FILE * st, UNUSED UNIT * uptr,
776 UNUSED int val, UNUSED const void * desc)
777 {
778 if (! stall_point_active)
779 {
780 sim_printf ("No stall points\r\n");
781 return SCPE_OK;
782 }
783
784 sim_printf ("Stall points\r\n");
785 for (int i = 0; i < N_STALL_POINTS; i ++)
786 if (stall_points[i].segno || stall_points[i].offset)
787 {
788 #if defined(WIN_STDIO)
789 sim_printf ("%2ld %05o:%06o %10lu\r\n",
790 #else
791 sim_printf ("%2ld %05o:%06o %'10lu\r\n",
792 #endif
793 (long)i, stall_points[i].segno, stall_points[i].offset,
794 (unsigned long)stall_points[i].time);
795 }
796 return SCPE_OK;
797 }
798
799
800
801
802
803
804
805 static t_stat cpu_set_stall (UNUSED UNIT * uptr, UNUSED int32 value,
806 const char * cptr, UNUSED void * desc)
807 {
808 if (! cptr)
809 return SCPE_ARG;
810
811 long n, s, o, t;
812
813 char * end;
814 n = strtol (cptr, & end, 0);
815 if (* end != '=')
816 return SCPE_ARG;
817 if (n < 0 || n >= N_STALL_POINTS)
818 return SCPE_ARG;
819
820 s = strtol (end + 1, & end, 8);
821 if (* end != ':')
822 return SCPE_ARG;
823 if (s < 0 || s > MASK15)
824 return SCPE_ARG;
825
826 o = strtol (end + 1, & end, 8);
827 if (* end != '=')
828 return SCPE_ARG;
829 if (o < 0 || o > MASK18)
830 return SCPE_ARG;
831
832 t = strtol (end + 1, & end, 0);
833 if (* end != 0)
834 return SCPE_ARG;
835 if (t < 0 || t > 30000000)
836 return SCPE_ARG;
837
838 stall_points[n].segno = (word15) s;
839 stall_points[n].offset = (word18) o;
840 stall_points[n].time = (unsigned int) t;
841 stall_point_active = false;
842
843 for (int i = 0; i < N_STALL_POINTS; i ++)
844 if (stall_points[n].segno && stall_points[n].offset)
845 stall_point_active = true;
846
847 return SCPE_OK;
848 }
849
850 static t_stat setCPUConfigL68 (UNIT * uptr, UNUSED int32 value, UNUSED const char * cptr, UNUSED void * desc) {
851 long cpuUnitIdx = UNIT_IDX (uptr);
852 if (cpuUnitIdx < 0 || cpuUnitIdx >= N_CPU_UNITS_MAX)
853 return SCPE_ARG;
854 cpu_state_t * cpun = cpus + cpuUnitIdx;
855
856 cpun->tweaks.l68_mode = 1;
857 cpun->options.hex_mode_installed = 0;
858 for (uint port_num = 0; port_num < N_DPS8M_CPU_PORTS; port_num ++) {
859 cpun->switches.assignment[port_num] = port_num;
860 cpun->switches.interlace[port_num] = 0;
861 cpun->switches.store_size[port_num] = 2;
862 cpun->switches.enable[port_num] = 1;
863 cpun->switches.init_enable[port_num] = 1;
864 }
865 for (uint port_num = N_DPS8M_CPU_PORTS; port_num < N_L68_CPU_PORTS; port_num ++) {
866 cpun->switches.assignment[port_num] = 0;
867 cpun->switches.interlace[port_num] = 0;
868 cpun->switches.store_size[port_num] = 0;
869 cpun->switches.enable[port_num] = 0;
870 cpun->switches.init_enable[port_num] = 0;
871 }
872 return SCPE_OK;
873 }
874
875 static t_stat setCPUConfigDPS8M (UNIT * uptr, UNUSED int32 value, UNUSED const char * cptr, UNUSED void * desc) {
876 long cpuUnitIdx = UNIT_IDX (uptr);
877 if (cpuUnitIdx < 0 || cpuUnitIdx >= N_CPU_UNITS_MAX)
878 return SCPE_ARG;
879 cpu_state_t * cpun = cpus + cpuUnitIdx;
880
881 cpun->tweaks.l68_mode = 0;
882 cpun->options.hex_mode_installed = 0;
883 for (uint port_num = 0; port_num < N_DPS8M_CPU_PORTS; port_num ++) {
884 cpun->switches.assignment[port_num] = port_num;
885 cpun->switches.interlace[port_num] = 0;
886 cpun->switches.store_size[port_num] = 7;
887 cpun->switches.enable[port_num] = 1;
888 cpun->switches.init_enable[port_num] = 1;
889 }
890 for (uint port_num = N_DPS8M_CPU_PORTS; port_num < N_L68_CPU_PORTS; port_num ++) {
891 cpun->switches.assignment[port_num] = 0;
892 cpun->switches.interlace[port_num] = 0;
893 cpun->switches.store_size[port_num] = 0;
894 cpun->switches.enable[port_num] = 0;
895 cpun->switches.init_enable[port_num] = 0;
896 }
897 return SCPE_OK;
898 }
899
900 char * cycle_str (cycles_e cycle)
901 {
902 switch (cycle)
903 {
904
905
906 case FAULT_cycle:
907 return "FAULT_cycle";
908 case EXEC_cycle:
909 return "EXEC_cycle";
910 case FAULT_EXEC_cycle:
911 return "FAULT_EXEC_cycle";
912 case INTERRUPT_cycle:
913 return "INTERRUPT_cycle";
914 case INTERRUPT_EXEC_cycle:
915 return "INTERRUPT_EXEC_cycle";
916 case FETCH_cycle:
917 return "FETCH_cycle";
918 case PSEUDO_FETCH_cycle:
919 return "PSEUDO_FETCH_cycle";
920 case SYNC_FAULT_RTN_cycle:
921 return "SYNC_FAULT_RTN_cycle";
922 default:
923 return "unknown cycle";
924 }
925 }
926
927 static void set_cpu_cycle (cpu_state_t * cpup, cycles_e cycle)
928 {
929 sim_debug (DBG_CYCLE, & cpu_dev, "Setting cycle to %s\r\n",
930 cycle_str (cycle));
931 cpu.cycle = cycle;
932 }
933
934
935
936 #define MEM_UNINITIALIZED (1LLU<<62)
937
938 uint set_cpu_idx (UNUSED uint cpu_idx)
939 {
940 uint prev = current_running_cpu_idx;
941 #if defined(THREADZ) || defined(LOCKLESS)
942 current_running_cpu_idx = cpu_idx;
943 #endif
944 _cpup = & cpus [current_running_cpu_idx];
945 return prev;
946 }
947
948 void cpu_reset_unit_idx (UNUSED uint cpun, bool clear_mem)
949 {
950 uint save = set_cpu_idx (cpun);
951 cpu_state_t * cpup = _cpup;
952 if (clear_mem)
953 {
954 for (uint i = 0; i < MEMSIZE; i ++)
955 {
956
957 #if defined(LOCKLESS)
958 M[i] = (M[i] & ~(MASK36 | MEM_LOCKED)) | MEM_UNINITIALIZED;
959 #else
960 M[i] = (M[i] & ~(MASK36)) | MEM_UNINITIALIZED;
961 #endif
962 }
963 }
964 cpu.rA = 0;
965 cpu.rQ = 0;
966
967 cpu.PPR.IC = 0;
968 cpu.PPR.PRR = 0;
969 cpu.PPR.PSR = 0;
970 cpu.PPR.P = 1;
971 cpu.RSDWH_R1 = 0;
972 cpu.rTR = MASK27;
973
974 if (cpu.tweaks.isolts_mode)
975 {
976 cpu.shadowTR = 0;
977 cpu.rTRlsb = 0;
978 }
979 cpu.rTRticks = 0;
980
981 set_addr_mode (cpup, ABSOLUTE_mode);
982 SET_I_NBAR;
983
984 cpu.CMR.luf = 3;
985 cpu.cu.SD_ON = cpu.switches.sdwam_enable ? 1 : 0;
986 cpu.cu.PT_ON = cpu.switches.ptwam_enable ? 1 : 0;
987
988 if (cpu.tweaks.nodis) {
989 set_cpu_cycle (cpup, FETCH_cycle);
990 } else {
991 set_cpu_cycle (cpup, EXEC_cycle);
992 cpu.cu.IWB = 0000000616200;
993 }
994 #if defined(PERF_STRIP)
995 set_cpu_cycle (cpup, FETCH_cycle);
996 #endif
997 cpu.wasXfer = false;
998 cpu.wasInhibited = false;
999
1000 cpu.interrupt_flag = false;
1001 cpu.g7_flag = false;
1002
1003 cpu.faultRegister [0] = 0;
1004 cpu.faultRegister [1] = 0;
1005
1006 #if defined(RAPRx)
1007 cpu.apu.lastCycle = UNKNOWN_CYCLE;
1008 #endif
1009
1010 (void)memset (& cpu.PPR, 0, sizeof (struct ppr_s));
1011
1012 setup_scbank_map (cpup);
1013
1014 tidy_cu (cpup);
1015 set_cpu_idx (save);
1016 }
1017
1018 static t_stat simh_cpu_reset_and_clear_unit (UNIT * uptr,
1019 UNUSED int32 value,
1020 UNUSED const char * cptr,
1021 UNUSED void * desc)
1022 {
1023 long cpu_unit_idx = UNIT_IDX (uptr);
1024 cpu_state_t * cpun = cpus + cpu_unit_idx;
1025 if (cpun->tweaks.isolts_mode)
1026 {
1027
1028 if (cpun->tweaks.useMap)
1029 {
1030 for (uint pgnum = 0; pgnum < N_SCBANKS; pgnum ++)
1031 {
1032 int base = cpun->sc_addr_map [pgnum];
1033 if (base < 0)
1034 continue;
1035 for (uint addr = 0; addr < SCBANK_SZ; addr ++)
1036 M [addr + (uint) base] = MEM_UNINITIALIZED;
1037 }
1038 }
1039 }
1040
1041 cpu_reset_unit_idx ((uint) cpu_unit_idx, false);
1042 return SCPE_OK;
1043 }
1044
1045 static t_stat simh_cpu_reset_unit (UNIT * uptr,
1046 UNUSED int32 value,
1047 UNUSED const char * cptr,
1048 UNUSED void * desc)
1049 {
1050 long cpu_unit_idx = UNIT_IDX (uptr);
1051 cpu_reset_unit_idx ((uint) cpu_unit_idx, false);
1052 return SCPE_OK;
1053 }
1054
1055 #if !defined(PERF_STRIP)
1056 static uv_loop_t * ev_poll_loop;
1057 static uv_timer_t ev_poll_handle;
1058 #endif
1059
1060 static MTAB cpu_mod[] =
1061 {
1062 {
1063 MTAB_unit_value,
1064 0,
1065 "CONFIG",
1066 "CONFIG",
1067 cpu_set_config,
1068 cpu_show_config,
1069 NULL,
1070 NULL
1071 },
1072
1073
1074
1075 {
1076 MTAB_unit_value,
1077 0,
1078 "RESET",
1079 "RESET",
1080 simh_cpu_reset_unit,
1081 NULL,
1082 NULL,
1083 NULL
1084 },
1085
1086 {
1087 MTAB_unit_value,
1088 0,
1089 "INITIALIZE",
1090 "INITIALIZE",
1091 simh_cpu_reset_unit,
1092 NULL,
1093 NULL,
1094 NULL
1095 },
1096
1097
1098
1099 {
1100 MTAB_unit_value,
1101 0,
1102 "INITIALIZEANDCLEAR",
1103 "INITIALIZEANDCLEAR",
1104 simh_cpu_reset_and_clear_unit,
1105 NULL,
1106 NULL,
1107 NULL
1108 },
1109
1110 {
1111 MTAB_unit_value,
1112 0,
1113 "IAC",
1114 "IAC",
1115 simh_cpu_reset_and_clear_unit,
1116 NULL,
1117 NULL,
1118 NULL
1119 },
1120
1121 {
1122 MTAB_dev_value,
1123 0,
1124 "NUNITS",
1125 "NUNITS",
1126 cpu_set_nunits,
1127 cpu_show_nunits,
1128 NULL,
1129 NULL
1130 },
1131
1132 {
1133 MTAB_dev_value,
1134 0,
1135 "KIPS",
1136 "KIPS",
1137 cpu_set_kips,
1138 cpu_show_kips,
1139 NULL,
1140 NULL
1141 },
1142
1143 {
1144 MTAB_dev_value,
1145 0,
1146 "STALL",
1147 "STALL",
1148 cpu_set_stall,
1149 cpu_show_stall,
1150 NULL,
1151 NULL
1152 },
1153
1154 {
1155 MTAB_unit_value,
1156 0,
1157 "DPS8M",
1158 "DPS8M",
1159 setCPUConfigDPS8M,
1160 NULL,
1161 NULL,
1162 NULL
1163 },
1164
1165 {
1166 MTAB_unit_value,
1167 0,
1168 "L68",
1169 "L68",
1170 setCPUConfigL68,
1171 NULL,
1172 NULL,
1173 NULL
1174 },
1175
1176 { 0, 0, NULL, NULL, NULL, NULL, NULL, NULL }
1177 };
1178
1179 static DEBTAB cpu_dt[] =
1180 {
1181 { "TRACE", DBG_TRACE, NULL },
1182 { "TRACEEXT", DBG_TRACEEXT, NULL },
1183 { "MESSAGES", DBG_MSG, NULL },
1184
1185 { "REGDUMPAQI", DBG_REGDUMPAQI, NULL },
1186 { "REGDUMPIDX", DBG_REGDUMPIDX, NULL },
1187 { "REGDUMPPR", DBG_REGDUMPPR, NULL },
1188 { "REGDUMPPPR", DBG_REGDUMPPPR, NULL },
1189 { "REGDUMPDSBR", DBG_REGDUMPDSBR, NULL },
1190 { "REGDUMPFLT", DBG_REGDUMPFLT, NULL },
1191 { "REGDUMP", DBG_REGDUMP, NULL },
1192
1193 { "ADDRMOD", DBG_ADDRMOD, NULL },
1194 { "APPENDING", DBG_APPENDING, NULL },
1195
1196 { "NOTIFY", DBG_NOTIFY, NULL },
1197 { "INFO", DBG_INFO, NULL },
1198 { "ERR", DBG_ERR, NULL },
1199 { "WARN", DBG_WARN, NULL },
1200 { "DEBUG", DBG_DEBUG, NULL },
1201 { "ALL", DBG_ALL, NULL },
1202
1203 { "FAULT", DBG_FAULT, NULL },
1204 { "INTR", DBG_INTR, NULL },
1205 { "CORE", DBG_CORE, NULL },
1206 { "CYCLE", DBG_CYCLE, NULL },
1207 { "CAC", DBG_CAC, NULL },
1208 { "FINAL", DBG_FINAL, NULL },
1209 { "AVC", DBG_AVC, NULL },
1210 { NULL, 0, NULL }
1211 };
1212
1213
1214 const char *sim_stop_messages[] =
1215 {
1216 "Unknown error",
1217 "Simulation stop",
1218 "Breakpoint",
1219 };
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249 #if !defined(SPEED)
1250 static bool watch_bits [MEMSIZE];
1251 #endif
1252
1253 char * str_SDW0 (char * buf, sdw0_s * SDW)
1254 {
1255 (void)sprintf (buf, "ADDR=%06o R1=%o R2=%o R3=%o F=%o FC=%o BOUND=%o R=%o "
1256 "E=%o W=%o P=%o U=%o G=%o C=%o EB=%o",
1257 SDW->ADDR, SDW->R1, SDW->R2, SDW->R3, SDW->DF,
1258 SDW->FC, SDW->BOUND, SDW->R, SDW->E, SDW->W,
1259 SDW->P, SDW->U, SDW->G, SDW->C, SDW->EB);
1260 return buf;
1261 }
1262
1263 static t_stat cpu_boot (UNUSED int32 cpu_unit_idx, UNUSED DEVICE * dptr)
1264 {
1265 sim_warn ("Try 'BOOT IOMn'\r\n");
1266 return SCPE_ARG;
1267 }
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291 #define ZONE_SZ (MEM_SIZE_MAX / 4)
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305 void setup_scbank_map (cpu_state_t * cpup)
1306 {
1307
1308 for (uint pg = 0; pg < N_SCBANKS; pg ++)
1309 {
1310 cpu.sc_addr_map [pg] = -1;
1311 cpu.sc_scu_map [pg] = -1;
1312 }
1313 for (uint u = 0; u < N_SCU_UNITS_MAX; u ++)
1314 cpu.sc_num_banks[u] = 0;
1315
1316
1317 for (int port_num = 0; port_num < (cpu.tweaks.l68_mode ? N_L68_CPU_PORTS : N_DPS8M_CPU_PORTS); port_num ++)
1318 {
1319
1320 if (! cpu.switches.enable [port_num])
1321 continue;
1322
1323
1324
1325
1326 if (! cables->cpu_to_scu[current_running_cpu_idx][port_num].in_use)
1327 {
1328 continue;
1329 }
1330
1331
1332 uint store_size = cpu.switches.store_size [port_num];
1333 uint dps8m_store_table [8] =
1334 { 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304 };
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349 uint l68_store_table [8] =
1350 { 32768, 65536, 4194304, 131072, 524288, 1048576, 2097152, 262144 };
1351 uint l68_isolts_store_table [8] =
1352 { 32768, 65536, 4194304, 65536, 524288, 1048576, 2097152, 262144 };
1353
1354 uint sz_wds =
1355 cpu.tweaks.l68_mode ?
1356 cpu.tweaks.isolts_mode ?
1357 l68_isolts_store_table [store_size] :
1358 l68_store_table [store_size] :
1359 dps8m_store_table [store_size];
1360
1361
1362 uint base_addr_wds = sz_wds * cpu.switches.assignment[port_num];
1363
1364
1365 uint num_banks = sz_wds / SCBANK_SZ;
1366 cpu.sc_num_banks[port_num] = num_banks;
1367 uint base_addr_bks = base_addr_wds / SCBANK_SZ;
1368
1369
1370 for (uint pg = 0; pg < num_banks; pg ++)
1371 {
1372
1373 uint addr_bks = base_addr_bks + pg;
1374
1375 if (addr_bks < N_SCBANKS)
1376 {
1377
1378 if (cpu.sc_addr_map [addr_bks] != -1)
1379 {
1380 sim_warn ("scbank overlap addr_bks %d (%o) old port %d "
1381 "newport %d\r\n",
1382 addr_bks, addr_bks, cpu.sc_addr_map [addr_bks], port_num);
1383 }
1384 else
1385 {
1386
1387 cpu.sc_addr_map[addr_bks] = (int)((int)port_num * (int)ZONE_SZ + (int)pg * (int)SCBANK_SZ);
1388 cpu.sc_scu_map[addr_bks] = port_num;
1389 }
1390 }
1391 else
1392 {
1393 sim_warn ("addr_bks too big port %d addr_bks %d (%o), "
1394 "limit %d (%o)\r\n",
1395 port_num, addr_bks, addr_bks, N_SCBANKS, N_SCBANKS);
1396 }
1397 }
1398
1399 }
1400
1401
1402
1403 }
1404
1405 int lookup_cpu_mem_map (cpu_state_t * cpup, word24 addr)
1406 {
1407 uint scpg = addr / SCBANK_SZ;
1408 if (scpg < N_SCBANKS)
1409 {
1410 return cpu.sc_scu_map[scpg];
1411 }
1412 return -1;
1413 }
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423 #if !defined(PERF_STRIP)
1424 static void get_serial_number (cpu_state_t * cpup)
1425 {
1426 bool havesn = false;
1427 FILE * fp = fopen ("./serial.txt", "r");
1428 while (fp && ! feof (fp))
1429 {
1430 char buffer [81] = "";
1431 # if !defined(__clang_analyzer__)
1432 char * checksn = fgets (buffer, sizeof (buffer), fp);
1433 (void)checksn;
1434 # endif
1435 uint cpun, sn;
1436 if (sscanf (buffer, "sn: %u", & sn) == 1)
1437 {
1438 if (cpu.switches.serno)
1439 sim_msg ("\r\nReplacing CPU serial number:\r\n");
1440 cpu.switches.serno = sn;
1441 if (!sim_quiet)
1442 {
1443 sim_msg ("%s CPU serial number: %u\r\n", sim_name, cpu.switches.serno);
1444 }
1445 havesn = true;
1446 }
1447 else if (sscanf (buffer, "sn%u: %u", & cpun, & sn) == 2)
1448 {
1449 if (cpun < N_CPU_UNITS_MAX)
1450 {
1451 if (cpus[cpun].switches.serno)
1452 sim_msg ("\r\nReplacing CPU %u serial number:\r\n", cpun);
1453 cpus[cpun].switches.serno = sn;
1454 if (!sim_quiet)
1455 {
1456 sim_msg ("%s CPU %u serial number: %u\r\n",
1457 sim_name, cpun, cpus[cpun].switches.serno);
1458 }
1459 havesn = true;
1460 }
1461 }
1462 }
1463 if (!havesn)
1464 {
1465 if (!sim_quiet)
1466 {
1467 sim_msg ("\r\nPlease register your system at "
1468 "https://ringzero.wikidot.com/wiki:register\r\n");
1469 sim_msg ("or create the file 'serial.txt' containing the line "
1470 "'sn: 0'.\r\n\r\n");
1471 }
1472 }
1473 if (fp)
1474 fclose (fp);
1475 }
1476 #endif
1477
1478 #if defined(STATS)
1479 static void do_stats (void)
1480 {
1481 static struct timespec stats_time;
1482 static bool first = true;
1483 if (first)
1484 {
1485 first = false;
1486 clock_gettime (CLOCK_BOOTTIME, & stats_time);
1487 sim_msg ("stats started\r\n");
1488 }
1489 else
1490 {
1491 struct timespec now, delta;
1492 clock_gettime (CLOCK_BOOTTIME, & now);
1493 timespec_diff (& stats_time, & now, & delta);
1494 stats_time = now;
1495 sim_msg ("stats %6ld.%02ld\r\n", delta.tv_sec,
1496 delta.tv_nsec / 10000000);
1497
1498 sim_msg ("Instruction counts\r\n");
1499 for (uint i = 0; i < 8; i ++)
1500 {
1501 # if defined(WIN_STDIO)
1502 sim_msg (" %9lld\r\n", (long long int) cpus[i].instrCnt);
1503 # else
1504 sim_msg (" %'9lld\r\n", (long long int) cpus[i].instrCnt);
1505 # endif
1506 cpus[i].instrCnt = 0;
1507 }
1508 sim_msg ("\r\n");
1509 }
1510 }
1511 #endif
1512
1513
1514
1515 #if !defined(PERF_STRIP)
1516 static void ev_poll_cb (UNUSED uv_timer_t * handle)
1517 {
1518 cpu_state_t * cpup = _cpup;
1519
1520
1521 static uint oneHz = 0;
1522 if (oneHz ++ >= sys_opts.sys_slow_poll_interval)
1523 {
1524 oneHz = 0;
1525 rdrProcessEvent ();
1526 # if defined(STATS)
1527 do_stats ();
1528 # endif
1529 cpu.instrCntT0 = cpu.instrCntT1;
1530 cpu.instrCntT1 = cpu.instrCnt;
1531 }
1532 fnpProcessEvent ();
1533 # if defined(WITH_SOCKET_DEV)
1534 # if !defined(__MINGW64__) && !defined(__MINGW32__) && !defined(CROSS_MINGW32) && !defined(CROSS_MINGW64)
1535 sk_process_event ();
1536 # endif
1537 # endif
1538 consoleProcess ();
1539 # if defined(IO_ASYNC_PAYLOAD_CHAN)
1540 iomProcess ();
1541 # endif
1542 # if defined(WITH_ABSI_DEV)
1543 # if !defined(__MINGW32__) && !defined(__MINGW64__) && !defined(CROSS_MINGW32) && !defined(CROSS_MINGW64)
1544 absi_process_event ();
1545 # endif
1546 # endif
1547 # if defined(WITH_MGP_DEV)
1548 # if !defined(__MINGW32__) && !defined(__MINGW64__) && !defined(CROSS_MINGW32) && !defined(CROSS_MINGW64)
1549 mgp_process_event ();
1550 # endif
1551 # endif
1552 PNL (panel_process_event ());
1553 }
1554 #endif
1555
1556
1557
1558 void cpu_init (void)
1559 {
1560
1561
1562
1563 M = system_state->M;
1564 #if defined(M_SHARED)
1565 cpus = system_state->cpus;
1566 #endif
1567
1568 #if !defined(SPEED)
1569 (void)memset (& watch_bits, 0, sizeof (watch_bits));
1570 #endif
1571
1572 set_cpu_idx (0);
1573
1574 (void)memset (cpus, 0, sizeof (cpu_state_t) * N_CPU_UNITS_MAX);
1575
1576 #if !defined(PERF_STRIP)
1577 get_serial_number (_cpup);
1578
1579 ev_poll_loop = uv_default_loop ();
1580 uv_timer_init (ev_poll_loop, & ev_poll_handle);
1581
1582 uv_timer_start (& ev_poll_handle, ev_poll_cb, sys_opts.sys_poll_interval, sys_opts.sys_poll_interval);
1583 #endif
1584
1585
1586 cpu_state_t * cpup = _cpup;
1587
1588 cpu.instrCnt = 0;
1589 cpu.cycleCnt = 0;
1590 for (int i = 0; i < N_FAULTS; i ++)
1591 cpu.faultCnt [i] = 0;
1592
1593 #if defined(MATRIX)
1594 initializeTheMatrix ();
1595 #endif
1596 }
1597
1598 static void cpu_reset (void)
1599 {
1600 for (uint i = 0; i < N_CPU_UNITS_MAX; i ++)
1601 {
1602 cpu_reset_unit_idx (i, true);
1603 }
1604
1605 set_cpu_idx (0);
1606
1607 #if defined(TESTING)
1608 cpu_state_t * cpup = _cpup;
1609 sim_debug (DBG_INFO, & cpu_dev, "CPU reset: Running\r\n");
1610 #endif
1611 }
1612
1613 static t_stat sim_cpu_reset (UNUSED DEVICE *dptr)
1614 {
1615
1616
1617
1618
1619
1620 cpu_reset ();
1621 return SCPE_OK;
1622 }
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632 static t_stat cpu_ex (t_value *vptr, t_addr addr, UNUSED UNIT * uptr,
1633 UNUSED int32 sw)
1634 {
1635 if (addr>= MEMSIZE)
1636 return SCPE_NXM;
1637 if (vptr != NULL)
1638 {
1639 *vptr = M[addr] & DMASK;
1640 }
1641 return SCPE_OK;
1642 }
1643
1644
1645
1646 static t_stat cpu_dep (t_value val, t_addr addr, UNUSED UNIT * uptr,
1647 UNUSED int32 sw)
1648 {
1649 if (addr >= MEMSIZE) return SCPE_NXM;
1650 M[addr] = val & DMASK;
1651 return SCPE_OK;
1652 }
1653
1654
1655
1656
1657
1658 #if defined(M_SHARED)
1659
1660 static word18 dummy_IC;
1661 #endif
1662
1663 static REG cpu_reg[] =
1664 {
1665
1666 #if defined(M_SHARED)
1667 { ORDATA (IC, dummy_IC, VASIZE), 0, 0, 0 },
1668 #else
1669 { ORDATA (IC, cpus[0].PPR.IC, VASIZE), 0, 0, 0 },
1670 #endif
1671 { NULL, NULL, 0, 0, 0, 0, NULL, NULL, 0, 0, 0 }
1672 };
1673
1674
1675
1676
1677
1678 REG *sim_PC = & cpu_reg[0];
1679
1680
1681
1682 DEVICE cpu_dev =
1683 {
1684 "CPU",
1685 cpu_unit,
1686 cpu_reg,
1687 cpu_mod,
1688 N_CPU_UNITS,
1689 8,
1690 PASIZE,
1691 1,
1692 8,
1693 36,
1694 & cpu_ex,
1695 & cpu_dep,
1696 & sim_cpu_reset,
1697 & cpu_boot,
1698 NULL,
1699 NULL,
1700 NULL,
1701 DEV_DEBUG,
1702 0,
1703 cpu_dt,
1704 NULL,
1705 NULL,
1706 NULL,
1707 NULL,
1708 NULL,
1709 NULL,
1710 NULL
1711 };
1712
1713 #if defined(M_SHARED)
1714 cpu_state_t * cpus = NULL;
1715 #else
1716 cpu_state_t cpus [N_CPU_UNITS_MAX];
1717 #endif
1718 #if defined(THREADZ) || defined(LOCKLESS)
1719 __thread cpu_state_t * restrict _cpup;
1720 #else
1721 cpu_state_t * restrict _cpup;
1722 #endif
1723
1724
1725
1726
1727
1728
1729
1730 static uint get_highest_intr (cpu_state_t *cpup)
1731 {
1732 uint fp = 1;
1733 for (uint scu_unit_idx = 0; scu_unit_idx < N_SCU_UNITS_MAX; scu_unit_idx ++)
1734 {
1735 if (cpu.events.XIP [scu_unit_idx])
1736 {
1737 fp = scu_get_highest_intr (scu_unit_idx);
1738 if (fp != 1)
1739 break;
1740 }
1741 }
1742 return fp;
1743 }
1744
1745 bool sample_interrupts (cpu_state_t * cpup)
1746 {
1747 cpu.lufCounter = 0;
1748 for (uint scu_unit_idx = 0; scu_unit_idx < N_SCU_UNITS_MAX; scu_unit_idx ++)
1749 {
1750 if (cpu.events.XIP [scu_unit_idx])
1751 {
1752 return true;
1753 }
1754 }
1755 return false;
1756 }
1757
1758 t_stat simh_hooks (cpu_state_t * cpup)
1759 {
1760 int reason = 0;
1761
1762 if (breakEnable && stop_cpu)
1763 return STOP_STOP;
1764
1765 if (cpu.tweaks.isolts_mode == 0)
1766 {
1767
1768 if (sim_interval <= 0)
1769 {
1770 reason = sim_process_event ();
1771 if ((! breakEnable) && reason == SCPE_STOP)
1772 reason = SCPE_OK;
1773 if (reason)
1774 return reason;
1775 }
1776 }
1777
1778 sim_interval --;
1779
1780 #if !defined(THREADZ) && !defined(LOCKLESS)
1781
1782
1783
1784 if (sim_brk_summ &&
1785 sim_brk_test ((cpu.PPR.IC & 0777777) |
1786 ((((t_addr) cpu.PPR.PSR) & 037777) << 18),
1787 SWMASK ('E')))
1788 return STOP_BKPT;
1789 # if !defined(SPEED)
1790 if (sim_deb_break && cpu.cycleCnt >= sim_deb_break)
1791 return STOP_BKPT;
1792 # endif
1793 #endif
1794
1795 return reason;
1796 }
1797
1798 #if defined(PANEL68)
1799 static void panel_process_event (void)
1800 {
1801 cpu_state_t * cpup = _cpup;
1802
1803 if (cpu.panelInitialize && cpu.DATA_panel_s_trig_sw == 0)
1804 {
1805
1806 while (cpu.panelInitialize)
1807 ;
1808 if (cpu.DATA_panel_init_sw)
1809 cpu_reset_unit_idx (ASSUME0, true);
1810 else
1811 cpu_reset_unit_idx (ASSUME0, false);
1812
1813 do_boot ();
1814 }
1815
1816 if (cpu.DATA_panel_s_trig_sw == 0 &&
1817 cpu.DATA_panel_execute_sw &&
1818 cpu.DATA_panel_scope_sw &&
1819 cpu.DATA_panel_exec_sw == 0)
1820
1821 {
1822
1823 while (cpu.DATA_panel_execute_sw)
1824 ;
1825
1826 if (cpu.DATA_panel_exec_sw)
1827 {
1828 cpu_reset_unit_idx (ASSUME0, false);
1829 cpu.cu.IWB = cpu.switches.data_switches;
1830 set_cpu_cycle (cpup, EXEC_cycle);
1831 }
1832 else
1833 {
1834 setG7fault (current_running_cpu_idx, FAULT_EXF);
1835 }
1836 }
1837 }
1838 #endif
1839
1840 #if defined(THREADZ) || defined(LOCKLESS)
1841 bool bce_dis_called = false;
1842
1843
1844 t_stat sim_instr (void)
1845 {
1846 cpu_state_t * cpup = _cpup;
1847 t_stat reason = 0;
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893 if (cpuThreadz[0].run == false)
1894 createCPUThread (0);
1895 do
1896 {
1897
1898 reason = simh_hooks (cpup);
1899 if (reason)
1900 {
1901 break;
1902 }
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930 if (bce_dis_called) {
1931
1932 reason = STOP_STOP;
1933 break;
1934 }
1935
1936 # if !defined(PERF_STRIP)
1937
1938
1939 # if defined(LOCKLESS)
1940 lock_iom();
1941 # endif
1942 lock_libuv ();
1943 uv_run (ev_poll_loop, UV_RUN_NOWAIT);
1944 unlock_libuv ();
1945 # if defined(LOCKLESS)
1946 unlock_iom();
1947 # endif
1948 PNL (panel_process_event ());
1949
1950 int con_unit_idx = check_attn_key ();
1951 if (con_unit_idx != -1)
1952 console_attn_idx (con_unit_idx);
1953 # endif
1954
1955 # if defined(IO_ASYNC_PAYLOAD_CHAN_THREAD)
1956 struct timespec next_time;
1957 clock_gettime (CLOCK_REALTIME, & next_time);
1958 next_time.tv_nsec += 1000l * 1000l;
1959 if (next_time.tv_nsec >= 1000l * 1000l *1000l)
1960 {
1961 next_time.tv_nsec -= 1000l * 1000l *1000l;
1962 next_time.tv_sec += (time_t) 1;
1963 }
1964 struct timespec new_time;
1965 do
1966 {
1967 pthread_mutex_lock (& iom_start_lock);
1968 pthread_cond_timedwait (& iomCond,
1969 & iom_start_lock,
1970 & next_time);
1971 pthread_mutex_unlock (& iom_start_lock);
1972 lock_iom();
1973 lock_libuv ();
1974
1975 iomProcess ();
1976
1977 unlock_libuv ();
1978 unlock_iom ();
1979
1980 clock_gettime (CLOCK_REALTIME, & new_time);
1981 }
1982 while ((next_time.tv_sec == new_time.tv_sec) ? (next_time.tv_nsec > new_time.tv_nsec) : \
1983 (next_time.tv_sec > new_time.tv_sec));
1984 # else
1985 sim_usleep (1000);
1986 # endif
1987 }
1988 while (reason == 0);
1989
1990 for (uint cpuNo = 0; cpuNo < N_CPU_UNITS_MAX; cpuNo ++) {
1991 cpuStats (cpuNo);
1992 }
1993
1994 # if defined(TESTING)
1995 HDBGPrint ();
1996 # endif
1997 return reason;
1998 }
1999 #endif
2000
2001 #if !defined(THREADZ) && !defined(LOCKLESS)
2002 static uint fast_queue_subsample = 0;
2003 #endif
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053 #if defined(THREADZ) || defined(LOCKLESS)
2054 void * cpu_thread_main (void * arg)
2055 {
2056 int myid = * (int *) arg;
2057 set_cpu_idx ((uint) myid);
2058 unsigned char umyid = (unsigned char)toupper('a' + (int)myid);
2059 char thread_name[SIR_MAXPID] = {0};
2060 char temp_thread_name[SIR_MAXPID] = {0};
2061
2062 _cpup->thread_id = pthread_self();
2063
2064 if (realtime_ok) {
2065 set_realtime_priority (pthread_self(), realtime_max_priority() - 1);
2066 check_realtime_priority (pthread_self(), realtime_max_priority() - 1);
2067 } else {
2068 # if !defined(__QNX__)
2069 (void)sim_os_set_thread_priority (PRIORITY_ABOVE_NORMAL);
2070 # endif
2071 }
2072 _sir_snprintf_trunc(thread_name, SIR_MAXPID, "CPU %c", (unsigned int)umyid);
2073 if (!_sir_setthreadname(thread_name) || !_sir_getthreadname(temp_thread_name))
2074 (void)sir_info ("%s thread created (TID " SIR_TIDFORMAT ")",
2075 thread_name, PID_CAST _sir_gettid());
2076 else
2077 (void)sir_info ("Thread created (TID " SIR_TIDFORMAT ")",
2078 PID_CAST _sir_gettid());
2079 # if defined(TESTING) && defined(__APPLE__) && defined(__MACH__)
2080 (void)sir_info ("Mach thread ID: 0x%x", pthread_mach_thread_np(pthread_self()));
2081 # endif
2082 bool warned = false;
2083 if (realtime_ok) {
2084 if (myid + 2 > nprocs) {
2085 (void)sir_warn ("Total number of supervisor and CPU threads (%lu) exceeds available host parallelism (%lu)!",
2086 (unsigned long)(myid) + 2, (unsigned long)nprocs);
2087 warned = true;
2088 }
2089 if (!warned && nprocs >= 2 && ncores >= 1 && nprocs >= ncores && myid + 2 > ncores) {
2090 (void)sir_warn ("Total number of supervisor and CPU threads (%lu) exceeds physical host core count (%lu)!",
2091 (unsigned long)(myid) + 2, (unsigned long)ncores);
2092 }
2093 } else {
2094 if (myid + 1 > nprocs) {
2095 (void)sir_warn ("Total number of CPU threads (%lu) exceeds available host parallelism (%lu)!",
2096 (unsigned long)(myid) + 1, (unsigned long)nprocs);
2097 warned = true;
2098 }
2099 if (!warned && ncores >= 1 && nprocs >= ncores && myid + 1 > ncores) {
2100 (void)sir_warn ("Total number of CPU threads (%lu) exceeds physical host core count (%lu)!",
2101 (unsigned long)(myid) + 1, (unsigned long)ncores);
2102 }
2103 }
2104 setSignals ();
2105 threadz_sim_instr ();
2106 return NULL;
2107 }
2108 #endif
2109
2110 NO_RETURN
2111 static void do_LUF_fault (cpu_state_t * cpup)
2112 {
2113 CPT (cpt1U, 16);
2114 cpu.lufCounter = 0;
2115 cpu.lufOccurred = false;
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134 if (cpu.tweaks.isolts_mode)
2135 cpu.shadowTR = (word27) cpu.TR0 - (1024u << (is_priv_mode (cpup) ? 4 : cpu.CMR.luf));
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148 doFault (FAULT_LUF, fst_zero, "instruction cycle lockup");
2149 }
2150
2151 #if !defined(THREADZ) && !defined(LOCKLESS)
2152 # define threadz_sim_instr sim_instr
2153 #endif
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165 static void set_temporary_absolute_mode (cpu_state_t * cpup)
2166 {
2167 CPT (cpt1L, 20);
2168 cpu.secret_addressing_mode = true;
2169 cpu.cu.XSF = false;
2170 sim_debug (DBG_TRACEEXT, & cpu_dev, "set_temporary_absolute_mode bit 29 sets XSF to 0\r\n");
2171
2172 }
2173
2174 static bool clear_temporary_absolute_mode (cpu_state_t * cpup)
2175 {
2176 CPT (cpt1L, 21);
2177 cpu.secret_addressing_mode = false;
2178 return cpu.cu.XSF;
2179
2180 }
2181
2182 #if defined(THREADZ) || defined(LOCKLESS)
2183 static const int workAllocationQuantum = 64;
2184 static const int syncClockModePollRate = 64;
2185 static const int masterCycleCntlimit = 2048;
2186
2187 void becomeClockMaster (uint cpuNum) {
2188
2189 # ifdef SYNCTEST
2190 sim_printf ("CPU%c %s entry\r\n", cpuNum + 'A', __func__);
2191 allocCount = 0;
2192 # endif
2193
2194
2195
2196
2197 if (syncClockMode) {
2198
2199
2200
2201 return;
2202 }
2203
2204 syncClockModeMasterIdx = cpuNum;
2205 cpu_state_t * cpup = & cpus[cpuNum];
2206 cpu.syncClockModeMaster = true;
2207 cpu.masterCycleCnt = 0;
2208 cpu.syncClockModeCache = true;
2209 for (int i = 0; i < N_CPU_UNITS_MAX; i ++) {
2210 if (i != cpuNum) {
2211 cpus[i].workAllocation = 0;
2212 __asm volatile ("");
2213 atomic_thread_fence (memory_order_seq_cst);
2214 if (cpus[i].inMultics && ! cpus[i].isSlave) {
2215 cpus[i].syncClockModePoll = 0;
2216 __asm volatile ("");
2217 atomic_thread_fence (memory_order_seq_cst);
2218 cpus[i].becomeSlave = true;
2219 __asm volatile ("");
2220 atomic_thread_fence (memory_order_seq_cst);
2221 }
2222 }
2223 }
2224
2225 __asm volatile ("");
2226 atomic_thread_fence (memory_order_seq_cst);
2227 syncClockMode = true;
2228
2229 __asm volatile ("");
2230 atomic_thread_fence (memory_order_seq_cst);
2231
2232 }
2233
2234 void giveupClockMaster (cpu_state_t * cpup) {
2235
2236 # ifdef SYNCTEST
2237
2238 sim_printf ("CPU%c %s entry\r\n", cpu.cpuIdx + 'A', __func__);
2239 sim_printf ("CPU%c Alloc count %d\r\n", cpu.cpuIdx + 'A', allocCount);
2240 # endif
2241 __asm volatile ("");
2242 cpu.syncClockModeMaster = false;
2243 __asm volatile ("");
2244 syncClockMode = false;
2245 __asm volatile ("");
2246 for (int i = 0; i < N_CPU_UNITS_MAX; i ++) {
2247 cpus[i].syncClockModeCache = false;
2248 }
2249 __asm volatile ("");
2250 atomic_thread_fence (memory_order_seq_cst);
2251
2252 }
2253 #endif
2254
2255 t_stat threadz_sim_instr (void)
2256 {
2257 cpu_state_t * cpup = _cpup;
2258
2259
2260 #if !defined(SCHED_NEVER_YIELD)
2261 unsigned long long lockYieldAll = 0;
2262 #endif
2263 unsigned long long lockWaitMaxAll = 0;
2264 unsigned long long lockWaitAll = 0;
2265 unsigned long long lockImmediateAll = 0;
2266 unsigned long long lockCntAll = 0;
2267 unsigned long long instrCntAll = 0;
2268 unsigned long long cycleCntAll = 0;
2269
2270 t_stat reason = 0;
2271
2272 #if !defined(THREADZ) && !defined(LOCKLESS)
2273 set_cpu_idx (0);
2274 # if defined(M_SHARED)
2275
2276
2277
2278
2279 cpus [0].PPR.IC = dummy_IC;
2280 # endif
2281
2282 #endif
2283
2284
2285 int val = setjmp (cpu.jmpMain);
2286
2287 switch (val)
2288 {
2289 case JMP_ENTRY:
2290 case JMP_REENTRY:
2291 reason = 0;
2292 break;
2293 case JMP_SYNC_FAULT_RETURN:
2294 set_cpu_cycle (cpup, SYNC_FAULT_RTN_cycle);
2295 break;
2296 case JMP_STOP:
2297 reason = STOP_STOP;
2298 goto leave;
2299 case JMP_REFETCH:
2300
2301
2302
2303
2304
2305
2306
2307 cpu.wasXfer = false;
2308
2309 set_cpu_cycle (cpup, FETCH_cycle);
2310 break;
2311 case JMP_RESTART:
2312 set_cpu_cycle (cpup, EXEC_cycle);
2313 break;
2314 case JMP_FORCE_RESTART:
2315
2316
2317
2318
2319
2320
2321 cpu_reset_unit_idx (current_running_cpu_idx, false);
2322 #if defined(THREADZ) || defined(LOCKLESS)
2323
2324 if (syncClockMode && syncClockModeMasterIdx == current_running_cpu_idx)
2325 giveupClockMaster (cpup);
2326 #endif
2327 break;
2328 default:
2329 sim_warn ("longjmp value of %d unhandled\r\n", val);
2330 goto leave;
2331 }
2332
2333
2334
2335 DCDstruct * ci = & cpu.currentInstruction;
2336
2337 if (cpu.restart)
2338 {
2339 set_cpu_cycle (cpup, FAULT_cycle);
2340 }
2341
2342 #if defined(THREADZ) || defined(LOCKLESS)
2343
2344
2345
2346
2347 __asm volatile ("");
2348 cpu.executing = true;
2349 if (cpu.tweaks.isolts_mode) {
2350 ;
2351 } else {
2352 cpu.inMultics = true;
2353 }
2354 #endif
2355
2356 do
2357 {
2358
2359 reason = 0;
2360
2361 #if !defined(THREADZ) && !defined(LOCKLESS)
2362
2363 reason = simh_hooks (cpup);
2364 if (reason)
2365 {
2366 break;
2367 }
2368
2369
2370
2371
2372
2373
2374
2375
2376 if (fast_queue_subsample ++ > sys_opts.sys_poll_check_rate)
2377 {
2378 fast_queue_subsample = 0;
2379 # if defined(CONSOLE_FIX)
2380 # if defined(THREADZ) || defined(LOCKLESS)
2381 lock_libuv ();
2382 # endif
2383 # endif
2384 uv_run (ev_poll_loop, UV_RUN_NOWAIT);
2385 # if defined(CONSOLE_FIX)
2386 # if defined(THREADZ) || defined(LOCKLESS)
2387 unlock_libuv ();
2388 # endif
2389 # endif
2390 PNL (panel_process_event ());
2391 }
2392 #endif
2393
2394 cpu.cycleCnt ++;
2395
2396 #if defined(THREADZ)
2397
2398 unlock_mem_force ();
2399
2400
2401 cpuRunningWait ();
2402 #endif
2403 #if defined(LOCKLESS)
2404 core_unlock_all (cpup);
2405 #endif
2406
2407 #if !defined(LOCKLESS)
2408 int con_unit_idx = check_attn_key ();
2409 if (con_unit_idx != -1)
2410 console_attn_idx (con_unit_idx);
2411 #endif
2412
2413 #if !defined(THREADZ) && !defined(LOCKLESS)
2414 if (cpu.tweaks.isolts_mode)
2415 {
2416 if (cpu.cycle != FETCH_cycle)
2417 {
2418
2419 cpu.rTRlsb ++;
2420 if (cpu.rTRlsb >= 4)
2421 {
2422 cpu.rTRlsb = 0;
2423 cpu.shadowTR = (cpu.shadowTR - 1) & MASK27;
2424 if (cpu.shadowTR == 0)
2425 {
2426 if (cpu.tweaks.tro_enable)
2427 setG7fault (current_running_cpu_idx, FAULT_TRO);
2428 }
2429 }
2430 }
2431 }
2432 #endif
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446 # define TR_RATE 2
2447
2448
2449
2450 cpu.rTR = (word27) (((word27s) cpu.rTR) - (word27s) (cpu.rTRticks / TR_RATE));
2451 cpu.rTRticks %= TR_RATE;
2452
2453
2454
2455 if (cpu.rTR & ~MASK27)
2456 {
2457 cpu.rTR &= MASK27;
2458 if (cpu.tweaks.tro_enable) {
2459 setG7fault (current_running_cpu_idx, FAULT_TRO);
2460 }
2461 }
2462
2463 sim_debug (DBG_CYCLE, & cpu_dev, "Cycle is %s\r\n",
2464 cycle_str (cpu.cycle));
2465
2466 switch (cpu.cycle)
2467 {
2468 case INTERRUPT_cycle:
2469 {
2470 CPT (cpt1U, 0);
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482 uint intr_pair_addr = get_highest_intr (cpup);
2483 #if defined(TESTING)
2484 HDBGIntr (intr_pair_addr, "");
2485 #endif
2486 cpu.cu.FI_ADDR = (word5) (intr_pair_addr / 2);
2487 cu_safe_store (cpup);
2488
2489
2490
2491 CPT (cpt1U, 1);
2492
2493 set_temporary_absolute_mode (cpup);
2494
2495
2496 cpu.PPR.PRR = 0;
2497 cpu.TPR.TRR = 0;
2498
2499 sim_debug (DBG_INTR, & cpu_dev, "intr_pair_addr %u flag %d\r\n",
2500 intr_pair_addr, cpu.interrupt_flag);
2501 #if !defined(SPEED)
2502 if_sim_debug (DBG_INTR, & cpu_dev)
2503 traceInstruction (DBG_INTR);
2504 #endif
2505
2506 if (cpu.interrupt_flag)
2507 {
2508 CPT (cpt1U, 2);
2509
2510
2511
2512
2513
2514
2515 if (intr_pair_addr != 1)
2516 {
2517 CPT (cpt1U, 3);
2518
2519
2520 core_read2 (cpup, intr_pair_addr,
2521 & cpu.cu.IWB, & cpu.cu.IRODD, __func__);
2522 #if defined(TESTING)
2523 HDBGMRead (intr_pair_addr, cpu.cu.IWB, "intr even");
2524 HDBGMRead (intr_pair_addr + 1, cpu.cu.IRODD, "intr odd");
2525 #endif
2526 cpu.cu.xde = 1;
2527 cpu.cu.xdo = 1;
2528 cpu.isExec = true;
2529 cpu.isXED = true;
2530
2531 CPT (cpt1U, 4);
2532 cpu.interrupt_flag = false;
2533 set_cpu_cycle (cpup, INTERRUPT_EXEC_cycle);
2534 break;
2535 }
2536 }
2537
2538
2539
2540 CPT (cpt1U, 5);
2541 cpu.interrupt_flag = false;
2542 clear_temporary_absolute_mode (cpup);
2543
2544 cu_safe_restore (cpup);
2545
2546
2547 cpu.wasXfer = false;
2548
2549
2550 set_cpu_cycle (cpup, FETCH_cycle);
2551 }
2552 break;
2553
2554 case FETCH_cycle:
2555 #if defined(PANEL68)
2556 (void)memset (cpu.cpt, 0, sizeof (cpu.cpt));
2557 #endif
2558 CPT (cpt1U, 13);
2559
2560 PNL (L68_ (cpu.INS_FETCH = false;))
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597 if (get_bar_mode (cpup))
2598 get_BAR_address (cpup, cpu.PPR.IC);
2599
2600
2601
2602
2603 bool tmp_priv_mode = is_priv_mode (cpup);
2604 bool is_dis = cpu.currentInstruction.opcode == 0616 &&
2605 cpu.currentInstruction.opcodeX == 0;
2606 bool noCheckTR = tmp_priv_mode &&
2607 !(is_dis && GET_I (cpu.cu.IWB) == 0);
2608
2609 if (is_dis)
2610 {
2611
2612
2613 cpu.interrupt_flag = sample_interrupts (cpup);
2614 cpu.g7_flag =
2615 noCheckTR ? bG7PendingNoTRO (cpup) : bG7Pending (cpup);
2616 }
2617 else if (! (cpu.cu.xde | cpu.cu.xdo |
2618 cpu.cu.rpt | cpu.cu.rd | cpu.cu.rl))
2619 {
2620 if ((!cpu.wasInhibited) &&
2621 (cpu.PPR.IC & 1) == 0 &&
2622 (! cpu.wasXfer))
2623 {
2624 CPT (cpt1U, 14);
2625 cpu.interrupt_flag = sample_interrupts (cpup);
2626 cpu.g7_flag =
2627 noCheckTR ? bG7PendingNoTRO (cpup) : bG7Pending (cpup);
2628 }
2629 cpu.wasInhibited = false;
2630 }
2631 else
2632 {
2633
2634
2635
2636
2637
2638 if ((cpu.PPR.IC & 1) == 1)
2639 {
2640 cpu.wasInhibited = true;
2641 }
2642 }
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678 if (cpu.g7_flag)
2679 {
2680 cpu.g7_flag = false;
2681 cpu.interrupt_flag = false;
2682 sim_debug (DBG_CYCLE, & cpu_dev,
2683 "call doG7Fault (%d)\r\n", !noCheckTR);
2684 doG7Fault (cpup, !noCheckTR);
2685 }
2686 if (cpu.interrupt_flag)
2687 {
2688
2689
2690
2691 CPT (cpt1U, 15);
2692 set_cpu_cycle (cpup, INTERRUPT_cycle);
2693 break;
2694 }
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704 case PSEUDO_FETCH_cycle:
2705
2706 tmp_priv_mode = is_priv_mode (cpup);
2707 if (! (luf_flag && tmp_priv_mode))
2708 cpu.lufCounter ++;
2709
2710 if (cpu.lufCounter > luf_limits[cpu.CMR.luf])
2711 {
2712 if (tmp_priv_mode)
2713 {
2714
2715 cpu.lufOccurred = true;
2716 }
2717 else
2718 {
2719 do_LUF_fault (cpup);
2720 }
2721 }
2722
2723
2724 if (cpu.lufCounter > luf_limits[4])
2725 {
2726 do_LUF_fault (cpup);
2727 }
2728
2729
2730
2731 if (! tmp_priv_mode && cpu.lufOccurred)
2732 {
2733 do_LUF_fault (cpup);
2734 }
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767 if (cpu.cycle == PSEUDO_FETCH_cycle)
2768 {
2769 cpu.apu.lastCycle = INSTRUCTION_FETCH;
2770 cpu.cu.XSF = 0;
2771 cpu.cu.TSN_VALID [0] = 0;
2772 cpu.TPR.TSR = cpu.PPR.PSR;
2773 cpu.TPR.TRR = cpu.PPR.PRR;
2774 cpu.wasInhibited = false;
2775 }
2776 else
2777 {
2778 CPT (cpt1U, 20);
2779 cpu.isExec = false;
2780 cpu.isXED = false;
2781
2782
2783
2784 cpu.cu.XSF = 0;
2785 sim_debug (DBG_TRACEEXT, & cpu_dev, "fetchCycle bit 29 sets XSF to 0\r\n");
2786 cpu.cu.TSN_VALID [0] = 0;
2787 cpu.TPR.TSR = cpu.PPR.PSR;
2788 cpu.TPR.TRR = cpu.PPR.PRR;
2789 PNL (cpu.prepare_state = ps_PIA);
2790 PNL (L68_ (cpu.INS_FETCH = true;))
2791 fetchInstruction (cpup, cpu.PPR.IC);
2792 }
2793
2794 CPT (cpt1U, 21);
2795 advanceG7Faults (cpup);
2796 set_cpu_cycle (cpup, EXEC_cycle);
2797 break;
2798
2799 case EXEC_cycle:
2800 case FAULT_EXEC_cycle:
2801 case INTERRUPT_EXEC_cycle:
2802 {
2803 #if defined(THREADZ) || defined(LOCKLESS)
2804
2805
2806 if (UNLIKELY (cpu.becomeSlave)) {
2807 cpu.becomeSlave = false;
2808
2809 while (! syncClockMode) {
2810 sim_usleep (1);
2811 }
2812
2813 cpu.syncClockModePoll = 0;
2814 }
2815
2816
2817 if (cpu.syncClockModeCache || --cpu.syncClockModePoll <= 0) {
2818
2819 cpu.syncClockModePoll = cpu.tweaks.isolts_mode ? 1 : syncClockModePollRate;
2820
2821
2822 if (syncClockMode) {
2823
2824
2825 cpu.syncClockModeCache = true;
2826
2827
2828 if (syncClockModeMasterIdx == current_running_cpu_idx) {
2829
2830
2831 cpu.masterCycleCnt ++;
2832 if (cpu.masterCycleCnt > masterCycleCntlimit) {
2833 # ifdef SYNCTEST
2834 sim_printf ("too many cycles\r\n");
2835 # endif
2836 giveupClockMaster (cpup);
2837 goto bail;
2838 }
2839
2840
2841 if (cpu.workAllocation <= 0) {
2842 # ifdef SYNCTEST
2843 allocCount ++;
2844 # endif
2845
2846
2847
2848
2849
2850 int64_t waitTimeout = 100000;
2851
2852
2853 while (1) {
2854 bool alldone = true;
2855 for (int i = 0; i < N_CPU_UNITS_MAX; i ++) {
2856 if (cpus[i].inMultics && cpus[i].workAllocation > 0) {
2857 wakeCPU (i);
2858 alldone = false;
2859
2860 }
2861 }
2862 if (alldone) {
2863
2864 for (int i = 0; i < N_CPU_UNITS_MAX; i ++) {
2865 if (cpus[i].inMultics) {
2866 cpus[i].workAllocation += cpu.tweaks.isolts_mode ? 1 : workAllocationQuantum;
2867 wakeCPU (i);
2868 }
2869 }
2870 break;
2871 }
2872 if (waitTimeout-- < 0) {
2873
2874
2875 sim_printf ("Clock master CPU %c timed out\r\n", "ABCDEFGH"[current_running_cpu_idx]);
2876 for (int i = 0; i < N_CPU_UNITS_MAX; i ++) {
2877 if (cpus[i].inMultics && cpus[i].workAllocation > 0) {
2878 sim_printf ("CPU %c remaining allocation: %ld\r\n", "ABCDEFGH"[i], cpus[i].workAllocation);
2879 }
2880 }
2881 sim_printf ("Conceding clock mastery...\r\n");
2882 cpu.syncClockModeCache = false;
2883 giveupClockMaster (cpup);
2884 goto bail;
2885 }
2886 sim_usleep (1);
2887 }
2888 }
2889
2890
2891
2892 } else {
2893
2894
2895
2896
2897 if (! cpu.isSlave) {
2898
2899 # ifdef SYNCTEST
2900 sim_printf ("CPU%c becoming slave\r\n", cpu.cpuIdx + 'A');
2901 # endif
2902 }
2903 cpu.isSlave = true;
2904
2905
2906 while (syncClockMode && cpu.workAllocation <= 0)
2907 sim_usleep (1);
2908
2909
2910
2911
2912 }
2913
2914 } else {
2915
2916 cpu.syncClockModeCache = false;
2917 if (cpu.isSlave) {
2918
2919 # ifdef SYNCTEST
2920 sim_printf ("CPU%c free; free at last\r\n", cpu.cpuIdx + 'A');
2921 # endif
2922 cpu.isSlave = false;
2923 }
2924 }
2925 }
2926 bail:
2927
2928 #endif
2929
2930 #if defined(THREADZ) || defined(LOCKLESS)
2931 if (LIKELY (! cpu.tweaks.isolts_mode) &&
2932 UNLIKELY (! cpu.inMultics)) {
2933 cpu.inMultics = true;
2934 }
2935 #endif
2936
2937 CPT (cpt1U, 22);
2938
2939 #if defined(LOCKLESS)
2940 if (stall_point_active)
2941 {
2942 for (int i = 0; i < N_STALL_POINTS; i ++)
2943 if (stall_points[i].segno && stall_points[i].segno == cpu.PPR.PSR &&
2944 stall_points[i].offset && stall_points[i].offset == cpu.PPR.IC)
2945 {
2946 # if defined(CTRACE)
2947 (void)fprintf (stderr, "%10lu %s stall %d\r\n", seqno (), cpunstr[current_running_cpu_idx], i);
2948 # endif
2949
2950 sim_usleep(stall_points[i].time);
2951 break;
2952 }
2953 }
2954 #endif
2955
2956
2957
2958
2959 if (GET_I (cpu.cu.IWB))
2960 cpu.wasInhibited = true;
2961
2962 t_stat ret = executeInstruction (cpup);
2963 DO_WORK_EXEC;
2964 CPT (cpt1U, 23);
2965
2966 if (cpu.tweaks.l68_mode)
2967 add_l68_CU_history (cpup);
2968 else
2969 add_dps8m_CU_history (cpup);
2970
2971 if (ret > 0)
2972 {
2973 reason = ret;
2974 break;
2975 }
2976
2977 if (ret == CONT_XEC)
2978 {
2979 CPT (cpt1U, 27);
2980 cpu.wasXfer = false;
2981 cpu.isExec = true;
2982 if (cpu.cu.xdo)
2983 cpu.isXED = true;
2984
2985 cpu.cu.XSF = 0;
2986 cpu.cu.TSN_VALID [0] = 0;
2987 cpu.TPR.TSR = cpu.PPR.PSR;
2988 cpu.TPR.TRR = cpu.PPR.PRR;
2989 break;
2990 }
2991
2992 if (ret == CONT_TRA || ret == CONT_RET)
2993 {
2994 CPT (cpt1U, 24);
2995 cpu.cu.xde = cpu.cu.xdo = 0;
2996 cpu.isExec = false;
2997 cpu.isXED = false;
2998
2999 cpu.wasXfer = true;
3000
3001 if (cpu.cycle != EXEC_cycle)
3002 {
3003 clearFaultCycle (cpup);
3004
3005
3006
3007
3008
3009 if (! (cpu.currentInstruction.opcode == 0715 &&
3010 cpu.currentInstruction.opcodeX == 0))
3011 {
3012 CPT (cpt1U, 9);
3013 SET_I_NBAR;
3014 }
3015
3016 if (!clear_temporary_absolute_mode (cpup))
3017 {
3018
3019 sim_debug (DBG_TRACEEXT, & cpu_dev,
3020 "setting ABS mode\r\n");
3021 CPT (cpt1U, 10);
3022 set_addr_mode (cpup, ABSOLUTE_mode);
3023 }
3024 else
3025 {
3026
3027 sim_debug (DBG_TRACEEXT, & cpu_dev,
3028 "not setting ABS mode\r\n");
3029 }
3030
3031 }
3032
3033
3034 if (TST_I_ABS && cpu.cu.XSF)
3035 {
3036 set_addr_mode (cpup, APPEND_mode);
3037 }
3038
3039 if (ret == CONT_TRA)
3040 {
3041
3042 cpu.wasXfer = false;
3043 set_cpu_cycle (cpup, PSEUDO_FETCH_cycle);
3044 }
3045 else
3046 set_cpu_cycle (cpup, FETCH_cycle);
3047 break;
3048 }
3049
3050 if (ret == CONT_DIS)
3051 {
3052 CPT (cpt1U, 25);
3053
3054 #if defined(THREADZ) || defined(LOCKLESS)
3055
3056 if (cpu.syncClockModeCache) {
3057 break;
3058 }
3059 #endif
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096 #if defined(THREADZ) || defined(LOCKLESS)
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106 # if defined(NO_TIMEWAIT)
3107
3108 struct timespec req, rem;
3109 uint ms = sys_opts.sys_poll_interval;
3110 long int nsec = (long int) ms * 1000L * 1000L;
3111 req.tv_nsec = nsec;
3112 req.tv_sec += req.tv_nsec / 1000000000L;
3113 req.tv_nsec %= 1000000000L;
3114 int rc = nanosleep (& req, & rem);
3115
3116 if (rc == -1)
3117 {
3118 ms = (uint) (rem.tv_nsec / 1000 + req.tv_sec * 1000);
3119 }
3120 word27 ticks = ms * 512;
3121 if (cpu.rTR <= ticks)
3122 {
3123 if (cpu.tweaks.tro_enable) {
3124 setG7fault (current_running_cpu_idx, FAULT_TRO);
3125 }
3126 cpu.rTR = (cpu.rTR - ticks) & MASK27;
3127 }
3128 else
3129 cpu.rTR = (cpu.rTR - ticks) & MASK27;
3130
3131 if (cpu.rTR == 0)
3132 cpu.rTR = MASK27;
3133 # else
3134
3135
3136 unsigned long left = (unsigned long) ((uint64) (cpu.rTR) * 125u / 64u);
3137
3138
3139
3140
3141
3142
3143
3144
3145 unsigned long nowLeft = left;
3146 if (!sample_interrupts (cpup))
3147 {
3148 nowLeft = sleepCPU (left);
3149 }
3150 if (nowLeft)
3151 {
3152
3153
3154 if (nowLeft <= left) {
3155 cpu.rTR = (word27) (left * 64 / 125);
3156 }
3157 }
3158 else
3159 {
3160
3161 if (cpu.tweaks.tro_enable)
3162 {
3163 lock_scu ();
3164 setG7fault (current_running_cpu_idx, FAULT_TRO);
3165 unlock_scu ();
3166 }
3167 cpu.rTR = MASK27;
3168 }
3169 # endif
3170 cpu.rTRticks = 0;
3171 break;
3172 #else
3173
3174 sim_usleep (sys_opts.sys_poll_interval * 1000);
3175
3176 # if defined(CONSOLE_FIX)
3177 # if defined(THREADZ) || defined(LOCKLESS)
3178 lock_libuv ();
3179 # endif
3180 # endif
3181 uv_run (ev_poll_loop, UV_RUN_NOWAIT);
3182 # if defined(CONSOLE_FIX)
3183 # if defined(THREADZ) || defined(LOCKLESS)
3184 unlock_libuv ();
3185 # endif
3186 # endif
3187 fast_queue_subsample = 0;
3188
3189 sim_interval = 0;
3190
3191
3192
3193
3194 cpu.rTRticks = 0;
3195
3196
3197
3198
3199
3200
3201 if (cpu.rTR <= sys_opts.sys_poll_interval * 512)
3202 {
3203 if (cpu.tweaks.tro_enable) {
3204 setG7fault (current_running_cpu_idx, FAULT_TRO);
3205 }
3206 cpu.rTR = (cpu.rTR - sys_opts.sys_poll_interval * 512) & MASK27;
3207 }
3208 else
3209 cpu.rTR = (cpu.rTR - sys_opts.sys_poll_interval * 512) & MASK27;
3210 if (cpu.rTR == 0)
3211 cpu.rTR = MASK27;
3212 #endif
3213
3214 break;
3215 }
3216
3217 cpu.wasXfer = false;
3218
3219 if (ret < 0)
3220 {
3221 sim_warn ("executeInstruction returned %d?\r\n", ret);
3222 break;
3223 }
3224
3225 if ((! cpu.cu.repeat_first) &&
3226 (cpu.cu.rpt ||
3227 (cpu.cu.rd && (cpu.PPR.IC & 1)) ||
3228 cpu.cu.rl))
3229 {
3230 CPT (cpt1U, 26);
3231 if (cpu.cu.rd)
3232 -- cpu.PPR.IC;
3233 cpu.wasXfer = false;
3234 set_cpu_cycle (cpup, FETCH_cycle);
3235 break;
3236 }
3237
3238
3239 if (cpu.cycle == FAULT_EXEC_cycle &&
3240 !cpu.cu.xde && cpu.cu.xdo)
3241 {
3242 clear_temporary_absolute_mode (cpup);
3243 cu_safe_restore (cpup);
3244 CPT (cpt1U, 12);
3245 clearFaultCycle (cpup);
3246
3247
3248
3249 cpu.wasXfer = false;
3250 cpu.isExec = false;
3251 cpu.isXED = false;
3252
3253 cpu.PPR.IC += ci->info->ndes;
3254 cpu.PPR.IC ++;
3255
3256 set_cpu_cycle (cpup, FETCH_cycle);
3257 break;
3258 }
3259
3260
3261 if (cpu.cycle == INTERRUPT_EXEC_cycle &&
3262 !cpu.cu.xde && cpu.cu.xdo)
3263 {
3264 clear_temporary_absolute_mode (cpup);
3265 cu_safe_restore (cpup);
3266
3267
3268
3269 CPT (cpt1U, 12);
3270 cpu.wasXfer = false;
3271 cpu.isExec = false;
3272 cpu.isXED = false;
3273
3274 set_cpu_cycle (cpup, FETCH_cycle);
3275 break;
3276 }
3277
3278
3279 if (cpu.cu.xde && cpu.cu.xdo)
3280 {
3281
3282 cpu.cu.IWB = cpu.cu.IRODD;
3283 cpu.cu.xde = 0;
3284 cpu.isExec = true;
3285 cpu.isXED = true;
3286 cpu.cu.XSF = 0;
3287 cpu.cu.TSN_VALID [0] = 0;
3288 cpu.TPR.TSR = cpu.PPR.PSR;
3289 cpu.TPR.TRR = cpu.PPR.PRR;
3290 break;
3291 }
3292
3293 if (cpu.cu.xde || cpu.cu.xdo)
3294 {
3295 cpu.cu.xde = cpu.cu.xdo = 0;
3296 cpu.isExec = false;
3297 cpu.isXED = false;
3298 CPT (cpt1U, 27);
3299 cpu.wasXfer = false;
3300 cpu.PPR.IC ++;
3301 if (ci->info->ndes > 0)
3302 cpu.PPR.IC += ci->info->ndes;
3303 cpu.wasInhibited = true;
3304 set_cpu_cycle (cpup, FETCH_cycle);
3305 break;
3306 }
3307
3308
3309 if (cpu.cycle != EXEC_cycle)
3310 sim_warn ("expected EXEC_cycle (%d)\r\n", cpu.cycle);
3311
3312 cpu.cu.xde = cpu.cu.xdo = 0;
3313 cpu.isExec = false;
3314 cpu.isXED = false;
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325 if ((cpu.PPR.IC & 1) == 0 &&
3326 ci->info->ndes == 0 &&
3327 !cpu.cu.repeat_first && !cpu.cu.rpt && !cpu.cu.rd && !cpu.cu.rl &&
3328 !(cpu.currentInstruction.opcode == 0616 && cpu.currentInstruction.opcodeX == 0) &&
3329 (cpu.PPR.IC & ~3u) != (cpu.last_write & ~3u))
3330 {
3331 cpu.PPR.IC ++;
3332 cpu.wasXfer = false;
3333 cpu.cu.IWB = cpu.cu.IRODD;
3334 set_cpu_cycle (cpup, PSEUDO_FETCH_cycle);
3335 break;
3336 }
3337
3338 cpu.PPR.IC ++;
3339 if (ci->info->ndes > 0)
3340 cpu.PPR.IC += ci->info->ndes;
3341
3342 CPT (cpt1U, 28);
3343 cpu.wasXfer = false;
3344 set_cpu_cycle (cpup, FETCH_cycle);
3345 }
3346 break;
3347
3348 case SYNC_FAULT_RTN_cycle:
3349 {
3350 CPT (cpt1U, 29);
3351
3352
3353
3354
3355 cpu.PPR.IC += ci->info->ndes;
3356 cpu.PPR.IC ++;
3357 cpu.wasXfer = false;
3358 set_cpu_cycle (cpup, FETCH_cycle);
3359 }
3360 break;
3361
3362 case FAULT_cycle:
3363 {
3364 CPT (cpt1U, 30);
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386 if ((cpu.cu.APUCycleBits & 060) || cpu.secret_addressing_mode)
3387 set_apu_status (cpup, apuStatus_FABS);
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399 if (cpu.faultNumber != FAULT_TRB || cpu.cu.xde == 0)
3400 {
3401 cu_safe_store (cpup);
3402 }
3403 else
3404 {
3405 word36 tmpIRODD = cpu.scu_data[7];
3406 cu_safe_store (cpup);
3407 cpu.scu_data[7] = tmpIRODD;
3408 }
3409 CPT (cpt1U, 31);
3410
3411
3412 set_temporary_absolute_mode (cpup);
3413
3414
3415 cpu.PPR.PRR = 0;
3416 cpu.TPR.TRR = 0;
3417
3418
3419 uint fltAddress = (cpu.switches.FLT_BASE << 5) & 07740;
3420 L68_ (
3421 if (cpu.is_FFV)
3422 {
3423 cpu.is_FFV = false;
3424 CPTUR (cptUseMR);
3425
3426 fltAddress = (cpu.MR.FFV & MASK15) << 3;
3427 }
3428 )
3429
3430
3431 word24 addr = fltAddress + 2 * cpu.faultNumber;
3432
3433 if (cpu.restart)
3434 {
3435 cpu.restart = false;
3436 addr = cpu.restart_address;
3437 }
3438
3439 core_read2 (cpup, addr, & cpu.cu.IWB, & cpu.cu.IRODD, __func__);
3440 #if defined(TESTING)
3441 HDBGMRead (addr, cpu.cu.IWB, "fault even");
3442 HDBGMRead (addr + 1, cpu.cu.IRODD, "fault odd");
3443 #endif
3444 cpu.cu.xde = 1;
3445 cpu.cu.xdo = 1;
3446 cpu.isExec = true;
3447 cpu.isXED = true;
3448
3449 CPT (cpt1U, 33);
3450 set_cpu_cycle (cpup, FAULT_EXEC_cycle);
3451
3452 break;
3453 }
3454
3455 }
3456 }
3457 while (reason == 0);
3458
3459 leave:
3460 #if defined(THREADZ) || defined(LOCKLESS)
3461 cpu.executing = false;
3462 cpu.inMultics = false;
3463 #endif
3464 #if defined(TESTING)
3465 HDBGPrint ();
3466 #endif
3467
3468 for (unsigned short n = 0; n < N_CPU_UNITS_MAX; n++)
3469 {
3470 #if !defined(SCHED_NEVER_YIELD)
3471 lockYieldAll = lockYieldAll + (unsigned long long)cpus[n].coreLockState.lockYield;
3472 #endif
3473 lockWaitMaxAll = lockWaitMaxAll + (unsigned long long)cpus[n].coreLockState.lockWaitMax;
3474 lockWaitAll = lockWaitAll + (unsigned long long)cpus[n].coreLockState.lockWait;
3475 lockImmediateAll = lockImmediateAll + (unsigned long long)cpus[n].coreLockState.lockImmediate;
3476 lockCntAll = lockCntAll + (unsigned long long)cpus[n].coreLockState.lockCnt;
3477 instrCntAll = instrCntAll + (unsigned long long)cpus[n].instrCnt;
3478 cycleCntAll = cycleCntAll + (unsigned long long)cpus[n].cycleCnt;
3479 }
3480
3481 (void)fflush(stderr);
3482 (void)fflush(stdout);
3483
3484 # if !defined(PERF_STRIP)
3485 if (cycleCntAll > (unsigned long long)cpu.cycleCnt)
3486 {
3487 # endif
3488 sim_msg ("\r\n");
3489 sim_msg ("\r+---------------------------------+\r\n");
3490 sim_msg ("\r| Aggregate CPU Statistics |\r\n");
3491 sim_msg ("\r+---------------------------------+\r\n");
3492 (void)fflush(stderr);
3493 (void)fflush(stdout);
3494 # if defined(WIN_STDIO)
3495 sim_msg ("\r| cycles %15llu |\r\n", cycleCntAll);
3496 sim_msg ("\r| instructions %15llu |\r\n", instrCntAll);
3497 (void)fflush(stderr);
3498 (void)fflush(stdout);
3499 sim_msg ("\r+---------------------------------+\r\n");
3500 sim_msg ("\r| lockCnt %15llu |\r\n", lockCntAll);
3501 sim_msg ("\r| lockImmediate %15llu |\r\n", lockImmediateAll);
3502 (void)fflush(stderr);
3503 (void)fflush(stdout);
3504 sim_msg ("\r+---------------------------------+\r\n");
3505 sim_msg ("\r| lockWait %15llu |\r\n", lockWaitAll);
3506 sim_msg ("\r| lockWaitMax %15llu |\r\n", lockWaitMaxAll);
3507 (void)fflush(stderr);
3508 (void)fflush(stdout);
3509 # if !defined(SCHED_NEVER_YIELD)
3510 sim_msg ("\r| lockYield %15llu |\r\n", lockYieldAll);
3511 # else
3512 sim_msg ("\r| lockYield ---- |\r\n");
3513 # endif
3514 sim_msg ("\r+---------------------------------+\r\n");
3515 (void)fflush(stderr);
3516 (void)fflush(stdout);
3517 # else
3518 sim_msg ("\r| cycles %'15llu |\r\n", cycleCntAll);
3519 sim_msg ("\r| instructions %'15llu |\r\n", instrCntAll);
3520 (void)fflush(stderr);
3521 (void)fflush(stdout);
3522 sim_msg ("\r+---------------------------------+\r\n");
3523 sim_msg ("\r| lockCnt %'15llu |\r\n", lockCntAll);
3524 sim_msg ("\r| lockImmediate %'15llu |\r\n", lockImmediateAll);
3525 (void)fflush(stderr);
3526 (void)fflush(stdout);
3527 sim_msg ("\r+---------------------------------+\r\n");
3528 sim_msg ("\r| lockWait %'15llu |\r\n", lockWaitAll);
3529 sim_msg ("\r| lockWaitMax %'15llu |\r\n", lockWaitMaxAll);
3530 (void)fflush(stderr);
3531 (void)fflush(stdout);
3532 # if !defined(SCHED_NEVER_YIELD)
3533 sim_msg ("\r| lockYield %'15llu |\r\n", lockYieldAll);
3534 # else
3535 sim_msg ("\r| lockYield ---- |\r\n");
3536 # endif
3537 sim_msg ("\r+---------------------------------+\r\n");
3538 (void)fflush(stderr);
3539 (void)fflush(stdout);
3540 # endif
3541 # if !defined(PERF_STRIP)
3542 }
3543 # else
3544 sim_msg("\r\n");
3545 # endif
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557 #if defined(THREADZ) || defined(LOCKLESS)
3558 if (running_perf_test == false)
3559 sim_usleep(2000000);
3560 stopCPUThread();
3561 #endif
3562
3563 #if defined(M_SHARED)
3564
3565
3566
3567
3568 set_cpu_idx (0);
3569 dummy_IC = cpu.PPR.IC;
3570 #endif
3571
3572 return reason;
3573 }
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589 int operand_size (cpu_state_t * cpup)
3590 {
3591 DCDstruct * i = & cpu.currentInstruction;
3592 if (i->info->flags & (READ_OPERAND | STORE_OPERAND))
3593 return 1;
3594 else if (i->info->flags & (READ_YPAIR | STORE_YPAIR))
3595 return 2;
3596 else if (i->info->flags & (READ_YBLOCK8 | STORE_YBLOCK8))
3597 return 8;
3598 else if (i->info->flags & (READ_YBLOCK16 | STORE_YBLOCK16))
3599 return 16;
3600 else if (i->info->flags & (READ_YBLOCK32 | STORE_YBLOCK32))
3601 return 32;
3602 return 0;
3603 }
3604
3605
3606
3607 void readOperandRead (cpu_state_t * cpup, word18 addr) {
3608 CPT (cpt1L, 6);
3609
3610 #if defined(THREADZ)
3611 DCDstruct * i = & cpu.currentInstruction;
3612 if (RMWOP (i))
3613 lock_rmw ();
3614 #endif
3615
3616 switch (operand_size (cpup)) {
3617 case 1:
3618 CPT (cpt1L, 7);
3619 ReadOperandRead (cpup, addr, & cpu.CY);
3620 break;
3621 case 2:
3622 CPT (cpt1L, 8);
3623 addr &= 0777776;
3624 Read2OperandRead (cpup, addr, cpu.Ypair);
3625 break;
3626 case 8:
3627 CPT (cpt1L, 9);
3628 addr &= 0777770;
3629 Read8 (cpup, addr, cpu.Yblock8, cpu.currentInstruction.b29);
3630 break;
3631 case 16:
3632 CPT (cpt1L, 10);
3633 addr &= 0777770;
3634 Read16 (cpup, addr, cpu.Yblock16);
3635 break;
3636 case 32:
3637 CPT (cpt1L, 11);
3638 addr &= 0777740;
3639 for (uint j = 0 ; j < 32 ; j += 1)
3640 ReadOperandRead (cpup, addr + j, cpu.Yblock32 + j);
3641 break;
3642 }
3643 }
3644
3645 void readOperandRMW (cpu_state_t * cpup, word18 addr) {
3646 CPT (cpt1L, 6);
3647 switch (operand_size (cpup)) {
3648 case 1:
3649 CPT (cpt1L, 7);
3650 ReadOperandRMW (cpup, addr, & cpu.CY);
3651 break;
3652 case 2:
3653 CPT (cpt1L, 8);
3654 addr &= 0777776;
3655 Read2OperandRead (cpup, addr, cpu.Ypair);
3656 break;
3657 case 8:
3658 CPT (cpt1L, 9);
3659 addr &= 0777770;
3660 Read8 (cpup, addr, cpu.Yblock8, cpu.currentInstruction.b29);
3661 break;
3662 case 16:
3663 CPT (cpt1L, 10);
3664 addr &= 0777770;
3665 Read16 (cpup, addr, cpu.Yblock16);
3666 break;
3667 case 32:
3668 CPT (cpt1L, 11);
3669 addr &= 0777740;
3670 for (uint j = 0 ; j < 32 ; j += 1)
3671 ReadOperandRMW (cpup, addr + j, cpu.Yblock32 + j);
3672 break;
3673 }
3674 }
3675
3676
3677
3678 t_stat write_operand (cpu_state_t * cpup, word18 addr, UNUSED processor_cycle_type cyctyp)
3679 {
3680 switch (operand_size (cpup))
3681 {
3682 case 1:
3683 CPT (cpt1L, 12);
3684 WriteOperandStore (cpup, addr, cpu.CY);
3685 break;
3686 case 2:
3687 CPT (cpt1L, 13);
3688 addr &= 0777776;
3689 Write2OperandStore (cpup, addr + 0, cpu.Ypair);
3690 break;
3691 case 8:
3692 CPT (cpt1L, 14);
3693 addr &= 0777770;
3694 Write8 (cpup, addr, cpu.Yblock8, cpu.currentInstruction.b29);
3695 break;
3696 case 16:
3697 CPT (cpt1L, 15);
3698 addr &= 0777770;
3699 Write16 (cpup, addr, cpu.Yblock16);
3700 break;
3701 case 32:
3702 CPT (cpt1L, 16);
3703 addr &= 0777740;
3704
3705
3706 Write32 (cpup, addr, cpu.Yblock32);
3707 break;
3708 }
3709
3710 #if defined(THREADZ)
3711 if (cyctyp == OPERAND_STORE)
3712 {
3713 DCDstruct * i = & cpu.currentInstruction;
3714 if (RMWOP (i))
3715 unlock_mem ();
3716 }
3717 #endif
3718 return SCPE_OK;
3719
3720 }
3721
3722 #if !defined(SPEED)
3723 t_stat set_mem_watch (int32 arg, const char * buf)
3724 {
3725 if (strlen (buf) == 0)
3726 {
3727 if (arg)
3728 {
3729 sim_warn ("no argument to watch?\r\n");
3730 return SCPE_ARG;
3731 }
3732 sim_msg ("Clearing all watch points\r\n");
3733 (void)memset (& watch_bits, 0, sizeof (watch_bits));
3734 return SCPE_OK;
3735 }
3736 char * end;
3737 long int n = strtol (buf, & end, 0);
3738 if (* end || n < 0 || n >= MEMSIZE)
3739 {
3740 sim_warn ("Invalid argument to watch? %ld\r\n", (long) n);
3741 return SCPE_ARG;
3742 }
3743 watch_bits [n] = arg != 0;
3744 return SCPE_OK;
3745 }
3746 #endif
3747
3748
3749
3750
3751
3752 #if !defined(SPEED)
3753 static void nem_check (word24 addr, const char * context)
3754 {
3755 cpu_state_t * cpup = _cpup;
3756 if (lookup_cpu_mem_map (cpup, addr) < 0)
3757 {
3758 doFault (FAULT_STR, fst_str_nea, context);
3759 }
3760 }
3761 #endif
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774 #if !defined(SPEED) || !defined(INLINE_CORE)
3775 int core_read (cpu_state_t * cpup, word24 addr, word36 *data, const char * ctx)
3776 {
3777 PNL (cpu.portBusy = true;)
3778 SC_MAP_ADDR (addr, addr);
3779 # if !defined(LOCKLESS)
3780 if (M[addr] & MEM_UNINITIALIZED)
3781 {
3782 sim_debug (DBG_WARN, & cpu_dev,
3783 "Uninitialized memory accessed at address %08o; "
3784 "IC is 0%06o:0%06o (%s(\r\n",
3785 addr, cpu.PPR.PSR, cpu.PPR.IC, ctx);
3786 }
3787 # endif
3788 # if !defined(SPEED)
3789 if (watch_bits [addr])
3790 {
3791 sim_msg ("WATCH [%llu] %05o:%06o read %08o %012llo (%s)\r\n",
3792 (long long unsigned int)cpu.cycleCnt, cpu.PPR.PSR, cpu.PPR.IC, addr,
3793 (long long unsigned int)M [addr], ctx);
3794 traceInstruction (0);
3795 }
3796 # endif
3797 # if defined(LOCKLESS)
3798 # if !defined(SUNLINT)
3799 word36 v;
3800 LOAD_ACQ_CORE_WORD(v, addr);
3801 *data = v & DMASK;
3802 # endif
3803 # else
3804 *data = M[addr] & DMASK;
3805 # endif
3806
3807 DO_WORK_MEM;
3808 sim_debug (DBG_CORE, & cpu_dev,
3809 "core_read %08o %012"PRIo64" (%s)\r\n",
3810 addr, * data, ctx);
3811 PNL (trackport (addr, * data));
3812 return 0;
3813 }
3814 #endif
3815
3816 #if defined(LOCKLESS)
3817 int core_read_lock (cpu_state_t * cpup, word24 addr, word36 *data, UNUSED const char * ctx)
3818 {
3819 SC_MAP_ADDR (addr, addr);
3820 LOCK_CORE_WORD(addr, & cpu.coreLockState);
3821 if (cpu.coreLockState.locked_addr != 0) {
3822 sim_warn ("core_read_lock: locked %08o locked_addr %08o %c %05o:%06o\r\n",
3823 addr, cpu.coreLockState.locked_addr, current_running_cpu_idx + 'A',
3824 cpu.PPR.PSR, cpu.PPR.IC);
3825 core_unlock_all (cpup);
3826 }
3827 cpu.coreLockState.locked_addr = addr;
3828 # if !defined(SUNLINT)
3829 word36 v;
3830 LOAD_ACQ_CORE_WORD(v, addr);
3831 * data = v & DMASK;
3832 # endif
3833 return 0;
3834 }
3835 #endif
3836
3837 #if !defined(SPEED) || !defined(INLINE_CORE)
3838 int core_write (cpu_state_t * cpup, word24 addr, word36 data, const char * ctx)
3839 {
3840 PNL (cpu.portBusy = true;)
3841 SC_MAP_ADDR (addr, addr);
3842 if (cpu.tweaks.isolts_mode)
3843 {
3844 if (cpu.MR.sdpap)
3845 {
3846 sim_warn ("failing to implement sdpap\r\n");
3847 cpu.MR.sdpap = 0;
3848 }
3849 if (cpu.MR.separ)
3850 {
3851 sim_warn ("failing to implement separ\r\n");
3852 cpu.MR.separ = 0;
3853 }
3854 }
3855 # if defined(LOCKLESS)
3856 LOCK_CORE_WORD(addr, & cpu.coreLockState);
3857 # if !defined(SUNLINT)
3858 STORE_REL_CORE_WORD(addr, data);
3859 # endif
3860 # else
3861 M[addr] = data & DMASK;
3862 # endif
3863 # if !defined(SPEED)
3864 if (watch_bits [addr])
3865 {
3866 sim_msg ("WATCH [%llu] %05o:%06o write %08llo %012llo (%s)\r\n",
3867 (long long unsigned int)cpu.cycleCnt, cpu.PPR.PSR, cpu.PPR.IC,
3868 (long long unsigned int)addr, (unsigned long long int)M [addr], ctx);
3869 traceInstruction (0);
3870 }
3871 # endif
3872 DO_WORK_MEM;
3873 sim_debug (DBG_CORE, & cpu_dev,
3874 "core_write %08o %012"PRIo64" (%s)\r\n",
3875 addr, data, ctx);
3876 PNL (trackport (addr, data));
3877 return 0;
3878 }
3879 #endif
3880
3881 #if defined(LOCKLESS)
3882 int core_write_unlock (cpu_state_t * cpup, word24 addr, word36 data, UNUSED const char * ctx)
3883 {
3884 SC_MAP_ADDR (addr, addr);
3885 if (cpu.coreLockState.locked_addr != addr)
3886 {
3887 sim_warn ("core_write_unlock: locked %08o locked_addr %08o %c %05o:%06o\r\n",
3888 addr, cpu.coreLockState.locked_addr, current_running_cpu_idx + 'A',
3889 cpu.PPR.PSR, cpu.PPR.IC);
3890 core_unlock_all (cpup);
3891 }
3892
3893 # if !defined(SUNLINT)
3894 STORE_REL_CORE_WORD(addr, data);
3895 # endif
3896 cpu.coreLockState.locked_addr = 0;
3897 return 0;
3898 }
3899
3900 int core_unlock_all (cpu_state_t * cpup)
3901 {
3902 if (cpu.coreLockState.locked_addr != 0) {
3903 sim_warn ("core_unlock_all: locked %08o %c %05o:%06o\r\n",
3904 cpu.coreLockState.locked_addr, current_running_cpu_idx + 'A',
3905 cpu.PPR.PSR, cpu.PPR.IC);
3906 # if !defined(SUNLINT)
3907 STORE_REL_CORE_WORD(cpu.coreLockState.locked_addr, M[cpu.coreLockState.locked_addr]);
3908 # endif
3909 cpu.coreLockState.locked_addr = 0;
3910 }
3911 return 0;
3912 }
3913 #endif
3914
3915 #if !defined(SPEED) || !defined(INLINE_CORE)
3916 int core_write_zone (cpu_state_t * cpup, word24 addr, word36 data, const char * ctx)
3917 {
3918 PNL (cpu.portBusy = true;)
3919 if (cpu.tweaks.isolts_mode)
3920 {
3921 if (cpu.MR.sdpap)
3922 {
3923 sim_warn ("failing to implement sdpap\r\n");
3924 cpu.MR.sdpap = 0;
3925 }
3926 if (cpu.MR.separ)
3927 {
3928 sim_warn ("failing to implement separ\r\n");
3929 cpu.MR.separ = 0;
3930 }
3931 }
3932 word24 mapAddr = 0;
3933 SC_MAP_ADDR (addr, mapAddr);
3934 # if defined(LOCKLESS)
3935 word36 v;
3936 core_read_lock(cpup, addr, &v, ctx);
3937 v = (v & ~cpu.zone) | (data & cpu.zone);
3938 core_write_unlock(cpup, addr, v, ctx);
3939 # else
3940 M[mapAddr] = (M[mapAddr] & ~cpu.zone) | (data & cpu.zone);
3941 # endif
3942 cpu.useZone = false;
3943 # if !defined(SPEED)
3944 if (watch_bits [mapAddr])
3945 {
3946 sim_msg ("WATCH [%llu] %05o:%06o writez %08llo %012llo (%s)\r\n",
3947 (unsigned long long int)cpu.cycleCnt, cpu.PPR.PSR, cpu.PPR.IC,
3948 (unsigned long long int)mapAddr, (unsigned long long int)M [mapAddr], ctx);
3949 traceInstruction (0);
3950 }
3951 # endif
3952 DO_WORK_MEM;
3953 sim_debug (DBG_CORE, & cpu_dev,
3954 "core_write_zone %08o %012"PRIo64" (%s)\r\n",
3955 mapAddr, data, ctx);
3956 PNL (trackport (mapAddr, data));
3957 return 0;
3958 }
3959 #endif
3960
3961 #if !defined(SPEED) || !defined(INLINE_CORE)
3962 int core_read2 (cpu_state_t * cpup, word24 addr, word36 *even, word36 *odd, const char * ctx)
3963 {
3964 PNL (cpu.portBusy = true;)
3965 # if defined(LOCKLESS)
3966
3967 word36 v;
3968 # endif
3969 if (addr & 1)
3970 {
3971 sim_debug (DBG_MSG, & cpu_dev,
3972 "warning: subtracting 1 from pair at %o in "
3973 "core_read2 (%s)\r\n", addr, ctx);
3974 addr &= (word24)~1;
3975 }
3976 SC_MAP_ADDR (addr, addr);
3977 # if !defined(LOCKLESS)
3978 if (M[addr] & MEM_UNINITIALIZED)
3979 {
3980 sim_debug (DBG_WARN, & cpu_dev,
3981 "Uninitialized memory accessed at address %08o; "
3982 "IC is 0%06o:0%06o (%s)\r\n",
3983 addr, cpu.PPR.PSR, cpu.PPR.IC, ctx);
3984 }
3985 # endif
3986 # if !defined(SPEED)
3987 if (watch_bits [addr])
3988 {
3989 sim_msg ("WATCH [%llu] %05o:%06o read2 %08llo %012llo (%s)\r\n",
3990 (unsigned long long int)cpu.cycleCnt, cpu.PPR.PSR, cpu.PPR.IC,
3991 (unsigned long long int)addr, (unsigned long long int)M [addr], ctx);
3992 traceInstruction (0);
3993 }
3994 # endif
3995 # if defined(LOCKLESS)
3996 # if !defined(SUNLINT)
3997 LOAD_ACQ_CORE_WORD(v, addr);
3998 if (v & MEM_LOCKED)
3999 sim_warn ("core_read2: even locked %08o locked_addr %08o %c %05o:%06o\r\n",
4000 addr, cpu.coreLockState.locked_addr, current_running_cpu_idx + 'A',
4001 cpu.PPR.PSR, cpu.PPR.IC);
4002 *even = v & DMASK;
4003 addr++;
4004 # endif
4005 # else
4006 *even = M[addr++] & DMASK;
4007 # endif
4008 sim_debug (DBG_CORE, & cpu_dev,
4009 "core_read2 %08o %012"PRIo64" (%s)\r\n",
4010 addr - 1, * even, ctx);
4011
4012
4013
4014 # if !defined(LOCKLESS)
4015 if (M[addr] & MEM_UNINITIALIZED)
4016 {
4017 sim_debug (DBG_WARN, & cpu_dev,
4018 "Uninitialized memory accessed at address %08o; "
4019 "IC is 0%06o:0%06o (%s)\r\n",
4020 addr, cpu.PPR.PSR, cpu.PPR.IC, ctx);
4021 }
4022 # endif
4023 # if !defined(SPEED)
4024 if (watch_bits [addr])
4025 {
4026 sim_msg ("WATCH [%llu] %05o:%06o read2 %08llo %012llo (%s)\r\n",
4027 (unsigned long long int)cpu.cycleCnt, cpu.PPR.PSR, cpu.PPR.IC,
4028 (unsigned long long int)addr, (unsigned long long int)M [addr], ctx);
4029 traceInstruction (0);
4030 }
4031 # endif
4032 # if defined(LOCKLESS)
4033 # if !defined(SUNLINT)
4034 LOAD_ACQ_CORE_WORD(v, addr);
4035 if (v & MEM_LOCKED)
4036 sim_warn ("core_read2: odd locked %08o locked_addr %08o %c %05o:%06o\r\n",
4037 addr, cpu.coreLockState.locked_addr, current_running_cpu_idx + 'A',
4038 cpu.PPR.PSR, cpu.PPR.IC);
4039 *odd = v & DMASK;
4040 # endif
4041 # else
4042 *odd = M[addr] & DMASK;
4043 # endif
4044 sim_debug (DBG_CORE, & cpu_dev,
4045 "core_read2 %08o %012"PRIo64" (%s)\r\n",
4046 addr, * odd, ctx);
4047 DO_WORK_MEM;
4048 PNL (trackport (addr - 1, * even));
4049 return 0;
4050 }
4051 #endif
4052
4053 #if !defined(SPEED) || !defined(INLINE_CORE)
4054 int core_write2 (cpu_state_t * cpup, word24 addr, word36 even, word36 odd, const char * ctx) {
4055 PNL (cpu.portBusy = true;)
4056 if (addr & 1) {
4057 sim_debug (DBG_MSG, & cpu_dev,
4058 "warning: subtracting 1 from pair at %o in core_write2 " "(%s)\r\n",
4059 addr, ctx);
4060 addr &= (word24)~1;
4061 }
4062 SC_MAP_ADDR (addr, addr);
4063 if (cpu.tweaks.isolts_mode) {
4064 if (cpu.MR.sdpap) {
4065 sim_warn ("failing to implement sdpap\r\n");
4066 cpu.MR.sdpap = 0;
4067 }
4068 if (cpu.MR.separ) {
4069 sim_warn ("failing to implement separ\r\n");
4070 cpu.MR.separ = 0;
4071 }
4072 }
4073
4074 # if !defined(SPEED)
4075 if (watch_bits [addr]) {
4076 sim_msg ("WATCH [%llu] %05o:%06o write2 %08llo %012llo (%s)\r\n",
4077 (unsigned long long int)cpu.cycleCnt, cpu.PPR.PSR, cpu.PPR.IC,
4078 (unsigned long long int)addr, (unsigned long long int)even, ctx);
4079 traceInstruction (0);
4080 }
4081 # endif
4082 # if defined(LOCKLESS)
4083 LOCK_CORE_WORD(addr, & cpu.coreLockState);
4084 # if !defined(SUNLINT)
4085 STORE_REL_CORE_WORD(addr, even);
4086 # endif
4087 addr++;
4088 # else
4089 M[addr++] = even & DMASK;
4090 # endif
4091 sim_debug (DBG_CORE, & cpu_dev, "core_write2 %08o %012llo (%s)\r\n", addr - 1,
4092 (long long unsigned int)even, ctx);
4093
4094
4095
4096
4097 # if !defined(SPEED)
4098 if (watch_bits [addr]) {
4099 sim_msg ("WATCH [%llu] %05o:%06o write2 %08llo %012llo (%s)\r\n",
4100 (long long unsigned int)cpu.cycleCnt, cpu.PPR.PSR, cpu.PPR.IC,
4101 (long long unsigned int)addr, (long long unsigned int)odd, ctx);
4102 traceInstruction (0);
4103 }
4104 # endif
4105 # if defined(LOCKLESS)
4106 LOCK_CORE_WORD(addr, & cpu.coreLockState);
4107 # if !defined(SUNLINT)
4108 STORE_REL_CORE_WORD(addr, odd);
4109 # endif
4110 # else
4111 M[addr] = odd & DMASK;
4112 # endif
4113 DO_WORK_MEM;
4114 PNL (trackport (addr - 1, even));
4115 sim_debug (DBG_CORE, & cpu_dev, "core_write2 %08o %012"PRIo64" (%s)\r\n", addr, odd, ctx);
4116 return 0;
4117 }
4118 #endif
4119
4120
4121
4122
4123
4124
4125
4126
4127
4128
4129 void decode_instruction (cpu_state_t * cpup, word36 inst, DCDstruct * p)
4130 {
4131 CPT (cpt1L, 17);
4132 (void)memset (p, 0, sizeof (DCDstruct));
4133
4134 p->opcode = GET_OP (inst);
4135 p->opcodeX = GET_OPX(inst);
4136 p->opcode10 = p->opcode | (p->opcodeX ? 01000 : 0);
4137 p->address = GET_ADDR (inst);
4138 p->b29 = GET_A (inst);
4139 p->i = GET_I (inst);
4140 p->tag = GET_TAG (inst);
4141
4142 p->info = get_iwb_info (p);
4143
4144 if (p->info->flags & IGN_B29)
4145 p->b29 = 0;
4146
4147 if (p->info->ndes > 0)
4148 {
4149 p->b29 = 0;
4150 p->tag = 0;
4151 if (p->info->ndes > 1)
4152 {
4153 (void)memset (& cpu.currentEISinstruction, 0,
4154 sizeof (cpu.currentEISinstruction));
4155 }
4156 }
4157 }
4158
4159
4160
4161
4162
4163
4164
4165
4166
4167
4168
4169
4170
4171
4172
4173
4174
4175
4176
4177
4178 int is_priv_mode (cpu_state_t * cpup)
4179 {
4180
4181
4182
4183 if (get_bar_mode (cpup))
4184 return 0;
4185
4186
4187 if (get_addr_mode (cpup) == ABSOLUTE_mode)
4188 return 1;
4189 else if (cpu.PPR.P)
4190 return 1;
4191
4192 return 0;
4193 }
4194
4195
4196
4197
4198
4199
4200
4201
4202 bool get_bar_mode (cpu_state_t * cpup)
4203 {
4204 return ! (cpu.secret_addressing_mode || TST_I_NBAR);
4205 }
4206
4207 addr_modes_e get_addr_mode (cpu_state_t * cpup)
4208 {
4209 if (cpu.secret_addressing_mode)
4210 return ABSOLUTE_mode;
4211
4212
4213
4214
4215
4216
4217
4218 if (TST_I_ABS)
4219 {
4220 return ABSOLUTE_mode;
4221 }
4222 else
4223 {
4224 return APPEND_mode;
4225 }
4226 }
4227
4228
4229
4230
4231
4232
4233
4234
4235 void set_addr_mode (cpu_state_t * cpup, addr_modes_e mode)
4236 {
4237
4238
4239
4240
4241
4242
4243
4244
4245
4246
4247 cpu.secret_addressing_mode = false;
4248 if (mode == ABSOLUTE_mode)
4249 {
4250 CPT (cpt1L, 22);
4251 sim_debug (DBG_DEBUG, & cpu_dev, "APU: Setting absolute mode.\r\n");
4252
4253 SET_I_ABS;
4254 cpu.PPR.P = 1;
4255 }
4256 else if (mode == APPEND_mode)
4257 {
4258 CPT (cpt1L, 23);
4259 if (! TST_I_ABS && TST_I_NBAR)
4260 sim_debug (DBG_DEBUG, & cpu_dev, "APU: Keeping append mode.\r\n");
4261 else
4262 sim_debug (DBG_DEBUG, & cpu_dev, "APU: Setting append mode.\r\n");
4263
4264 CLR_I_ABS;
4265 }
4266 else
4267 {
4268 sim_debug (DBG_ERR, & cpu_dev,
4269 "APU: Unable to determine address mode.\r\n");
4270 sim_warn ("APU: Unable to determine address mode. Can't happen!\r\n");
4271 }
4272 }
4273
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284
4285
4286
4287
4288
4289
4290
4291
4292
4293
4294
4295
4296
4297
4298
4299
4300 word18 get_BAR_address (cpu_state_t * cpup, word18 addr)
4301 {
4302 if (cpu . BAR.BOUND == 0)
4303
4304 doFault (FAULT_STR, fst_str_oob, "BAR store fault; out of bounds");
4305
4306
4307
4308
4309
4310
4311
4312
4313
4314
4315 if (addr >= (((word18) cpu . BAR.BOUND) << 9))
4316
4317 doFault (FAULT_STR, fst_str_oob, "BAR store fault; out of bounds");
4318
4319 word18 barAddr = (addr + (((word18) cpu . BAR.BASE) << 9)) & 0777777;
4320 return barAddr;
4321 }
4322
4323
4324
4325 static void add_history (cpu_state_t * cpup, uint hset, word36 w0, word36 w1)
4326 {
4327
4328 {
4329 cpu.history [hset] [cpu.history_cyclic[hset]] [0] = w0;
4330 cpu.history [hset] [cpu.history_cyclic[hset]] [1] = w1;
4331 cpu.history_cyclic[hset] = (cpu.history_cyclic[hset] + 1) % N_MODEL_HIST_SIZE;
4332 }
4333 }
4334
4335 void add_history_force (cpu_state_t * cpup, uint hset, word36 w0, word36 w1)
4336 {
4337 cpu.history [hset] [cpu.history_cyclic[hset]] [0] = w0;
4338 cpu.history [hset] [cpu.history_cyclic[hset]] [1] = w1;
4339 cpu.history_cyclic[hset] = (cpu.history_cyclic[hset] + 1) % N_MODEL_HIST_SIZE;
4340 }
4341
4342 void add_dps8m_CU_history (cpu_state_t * cpup)
4343 {
4344 if (cpu.skip_cu_hist)
4345 return;
4346 if (! cpu.MR_cache.emr)
4347 return;
4348 if (! cpu.MR_cache.ihr)
4349 return;
4350 if (cpu.MR_cache.hrxfr && ! cpu.wasXfer)
4351 return;
4352
4353 word36 flags = 0;
4354 word5 proccmd = 0;
4355 word7 flags2 = 0;
4356 word36 w0 = 0, w1 = 0;
4357 w0 |= flags & 0777777000000;
4358 w0 |= IWB_IRODD & MASK18;
4359 w1 |= ((word36)(cpu.iefpFinalAddress & MASK24) << 12);
4360 w1 |= (proccmd & MASK5) << 7;
4361 w1 |= flags2 & 0176;
4362 add_history (cpup, CU_HIST_REG, w0, w1);
4363 }
4364
4365 #if !defined(QUIET_UNUSED)
4366 void add_dps8m_DU_OU_history (cpu_state_t * cpup, word36 flags, word18 ICT, word9 RS_REG, word9 flags2)
4367 {
4368 word36 w0 = flags, w1 = 0;
4369 w1 |= (ICT & MASK18) << 18;
4370 w1 |= (RS_REG & MASK9) << 9;
4371 w1 |= flags2 & MASK9;
4372 add_history (cpup, DPS8M_DU_OU_HIST_REG, w0, w1);
4373 }
4374
4375 void add_dps8m_APU_history (cpu_state_t * cpup, word15 ESN, word21 flags, word24 RMA, word3 RTRR, word9 flags2)
4376 {
4377 word36 w0 = 0, w1 = 0;
4378 w0 |= (ESN & MASK15) << 21;
4379 w0 |= flags & MASK21;
4380 w1 |= (RMA & MASK24) << 12;
4381 w1 |= (RTRR & MASK3) << 9;
4382 w1 |= flags2 & MASK9;
4383 add_history (cpu.tweaks.l68_mode ? L68_APU_HIST_REG : DPS8M_APU_HIST_REG, w0, w1);
4384 }
4385
4386 void add_dps8m_EAPU_history (word18 ZCA, word18 opcode)
4387 {
4388 word36 w0 = 0;
4389 w0 |= (ZCA & MASK18) << 18;
4390 w0 |= opcode & MASK18;
4391 add_history (DPS8M_EAPU_HIST_REG, w0, 0);
4392
4393
4394
4395
4396 }
4397 #endif
4398
4399
4400
4401
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413
4414
4415
4416
4417
4418
4419
4420
4421
4422
4423
4424
4425
4426
4427
4428
4429
4430
4431
4432
4433 void add_l68_CU_history (cpu_state_t * cpup)
4434 {
4435 CPT (cpt1L, 24);
4436
4437 if (cpu.skip_cu_hist)
4438 return;
4439 if (! cpu.MR_cache.emr)
4440 return;
4441 if (! cpu.MR_cache.ihr)
4442 return;
4443
4444 word36 w0 = 0, w1 = 0;
4445
4446
4447
4448
4449
4450
4451
4452
4453
4454 PNL (putbits36_8 (& w0, 0, cpu.prepare_state);)
4455
4456 putbits36_1 (& w0, 8, cpu.wasXfer);
4457
4458 putbits36_1 (& w0, 9, cpu.cu.xde);
4459
4460 putbits36_1 (& w0, 10, cpu.cu.xdo);
4461
4462 putbits36_1 (& w0, 11, USE_IRODD?1:0);
4463
4464 putbits36_1 (& w0, 12, cpu.cu.rpt);
4465
4466
4467 PNL (putbits36_1 (& w0, 14, cpu.AR_F_E);)
4468
4469 putbits36_1 (& w0, 15, cpu.cycle != INTERRUPT_cycle?1:0);
4470
4471 putbits36_1 (& w0, 16, cpu.cycle != FAULT_cycle?1:0);
4472
4473 putbits36_1 (& w0, 17, TSTF (cpu.cu.IR, I_NBAR)?1:0);
4474
4475 putbits36_18 (& w0, 18, (word18) (IWB_IRODD & MASK18));
4476
4477
4478 putbits36_18 (& w1, 0, cpu.TPR.CA);
4479
4480
4481 PNL (putbits36_1 (& w1, 59-36, (cpu.portSelect == 0)?1:0);)
4482 PNL (putbits36_1 (& w1, 60-36, (cpu.portSelect == 1)?1:0);)
4483 PNL (putbits36_1 (& w1, 61-36, (cpu.portSelect == 2)?1:0);)
4484 PNL (putbits36_1 (& w1, 62-36, (cpu.portSelect == 3)?1:0);)
4485
4486 putbits36_1 (& w1, 63-36, cpu.interrupt_flag?1:0);
4487
4488 PNL (putbits36_1 (& w1, 64-36, cpu.INS_FETCH?1:0);)
4489
4490
4491
4492
4493
4494
4495
4496
4497 add_history (cpup, CU_HIST_REG, w0, w1);
4498
4499
4500 CPTUR (cptUseMR);
4501 if (cpu.MR.hrhlt && cpu.history_cyclic[CU_HIST_REG] == 0)
4502 {
4503
4504 if (cpu.MR.ihrrs)
4505 {
4506 cpu.MR.ihr = 0;
4507 }
4508 set_FFV_fault (cpup, 4);
4509 return;
4510 }
4511 }
4512
4513
4514
4515
4516
4517
4518
4519
4520
4521
4522
4523
4524
4525
4526
4527
4528
4529
4530
4531
4532
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545
4546
4547
4548
4549
4550
4551 void add_l68_DU_history (cpu_state_t * cpup)
4552 {
4553 CPT (cpt1L, 25);
4554 PNL (add_history (cpup, L68_DU_HIST_REG, cpu.du.cycle1, cpu.du.cycle2);)
4555 }
4556
4557 void add_l68_OU_history (cpu_state_t * cpup)
4558 {
4559 CPT (cpt1L, 26);
4560 word36 w0 = 0, w1 = 0;
4561
4562
4563
4564 PNL (putbits36_9 (& w0, 0, cpu.ou.RS);)
4565
4566
4567 putbits36_1 (& w0, 9, cpu.ou.characterOperandSize ? 1 : 0);
4568
4569
4570 putbits36_3 (& w0, 10, cpu.ou.characterOperandOffset);
4571
4572
4573 putbits36_1 (& w0, 13, cpu.ou.crflag);
4574
4575
4576 putbits36_1 (& w0, 14, cpu.ou.directOperandFlag ? 1 : 0);
4577
4578
4579 putbits36_2 (& w0, 15, cpu.ou.eac);
4580
4581
4582
4583 PNL (putbits36_9 (& w0, 18, cpu.ou.RS);)
4584
4585
4586 putbits36_1 (& w0, 27, cpu.ou.RB1_FULL);
4587
4588
4589 putbits36_1 (& w0, 28, cpu.ou.RP_FULL);
4590
4591
4592 putbits36_1 (& w0, 29, cpu.ou.RS_FULL);
4593
4594
4595 putbits36_6 (& w0, 30, (word6) (cpu.ou.cycle >> 3));
4596
4597
4598 putbits36_3 (& w1, 36-36, (word3) cpu.ou.cycle);
4599
4600
4601 putbits36_1 (& w1, 39-36, cpu.ou.STR_OP);
4602
4603
4604
4605
4606 PNL (putbits36_10 (& w1, 41-36,
4607 (word10) ~opcodes10 [cpu.ou.RS].reg_use);)
4608
4609
4610
4611
4612 putbits36_18 (& w1, 54 - 36, cpu.PPR.IC);
4613
4614 add_history (cpup, L68_OU_HIST_REG, w0, w1);
4615 }
4616
4617
4618
4619
4620
4621
4622
4623
4624
4625
4626
4627
4628
4629
4630
4631
4632
4633
4634
4635
4636
4637
4638
4639
4640
4641
4642
4643
4644
4645
4646
4647
4648
4649
4650
4651
4652
4653
4654
4655
4656
4657
4658
4659
4660
4661
4662
4663
4664
4665
4666 void add_l68_APU_history (cpu_state_t * cpup, enum APUH_e op)
4667 {
4668 CPT (cpt1L, 28);
4669 word36 w0 = 0, w1 = 0;
4670
4671 w0 = op;
4672
4673
4674 putbits36_15 (& w0, 0, cpu.TPR.TSR);
4675
4676 PNL (putbits36_1 (& w0, 15, (cpu.apu.state & apu_ESN_SNR) ? 1 : 0);)
4677 PNL (putbits36_1 (& w0, 16, (cpu.apu.state & apu_ESN_TSR) ? 1 : 0);)
4678
4679 putbits36_1 (& w0, 25, cpu.cu.SDWAMM);
4680
4681 putbits36_4 (& w0, 26, (word4) cpu.SDWAMR);
4682
4683 putbits36_1 (& w0, 30, cpu.cu.PTWAMM);
4684
4685 putbits36_4 (& w0, 31, (word4) cpu.PTWAMR);
4686
4687 PNL (putbits36_1 (& w0, 35, (cpu.apu.state & apu_FLT) ? 1 : 0);)
4688
4689
4690 PNL (putbits36_24 (& w1, 0, cpu.APUMemAddr);)
4691
4692 putbits36_3 (& w1, 24, cpu.TPR.TRR);
4693
4694
4695 putbits36_1 (& w1, 34, cpu.SDW0.C);
4696
4697
4698 add_history (cpup, L68_APU_HIST_REG, w0, w1);
4699 }
4700
4701 #if defined(THREADZ) || defined(LOCKLESS)
4702
4703
4704 static const char * get_dbg_verb (uint32 dbits, DEVICE * dptr)
4705 {
4706 static const char * debtab_none = "DEBTAB_ISNULL";
4707 static const char * debtab_nomatch = "DEBTAB_NOMATCH";
4708 const char * some_match = NULL;
4709 int32 offset = 0;
4710
4711 if (dptr->debflags == 0)
4712 return debtab_none;
4713
4714 dbits &= dptr->dctrl;
4715
4716
4717 while ((offset < 32) && dptr->debflags[offset].name)
4718 {
4719 if (dptr->debflags[offset].mask == dbits)
4720 return dptr->debflags[offset].name;
4721 if (dptr->debflags[offset].mask & dbits)
4722 some_match = dptr->debflags[offset].name;
4723 offset ++;
4724 }
4725 return some_match ? some_match : debtab_nomatch;
4726 }
4727
4728 void dps8_sim_debug (uint32 dbits, DEVICE * dptr, unsigned long long cnt, const char* fmt, ...)
4729 {
4730
4731 if (sim_deb && dptr && (dptr->dctrl & dbits))
4732 {
4733 const char * debug_type = get_dbg_verb (dbits, dptr);
4734 char stackbuf[STACKBUFSIZE];
4735 int32 bufsize = sizeof (stackbuf);
4736 char * buf = stackbuf;
4737 va_list arglist;
4738 int32 i, j, len;
4739 struct timespec t;
4740 clock_gettime(CLOCK_REALTIME, &t);
4741
4742 buf [bufsize-1] = '\0';
4743
4744 while (1)
4745 {
4746 va_start (arglist, fmt);
4747 len = vsnprintf (buf, (int)((unsigned long)(bufsize)-1), fmt, arglist);
4748 va_end (arglist);
4749
4750
4751
4752 if ((len < 0) || (len >= bufsize-1))
4753 {
4754 if (buf != stackbuf)
4755 FREE (buf);
4756 if (bufsize >= (INT_MAX / 2))
4757 return;
4758 bufsize = bufsize * 2;
4759 if (bufsize < len + 2)
4760 bufsize = len + 2;
4761 buf = (char *) malloc ((unsigned long) bufsize);
4762 if (buf == NULL)
4763 return;
4764 buf[bufsize-1] = '\0';
4765 continue;
4766 }
4767 break;
4768 }
4769
4770
4771
4772 for (i = j = 0; i < len; ++i)
4773 {
4774 if ('\n' == buf[i])
4775 {
4776 if (i >= j)
4777 {
4778 if ((i != j) || (i == 0))
4779 {
4780 (void)fprintf (sim_deb, "%lld.%06ld: DBG(%lld) %o: %s %s %.*s\r\n",
4781 (long long)t.tv_sec, t.tv_nsec/1000, cnt,
4782 current_running_cpu_idx, dptr->name, debug_type, i-j, &buf[j]);
4783 }
4784 }
4785 j = i + 1;
4786 }
4787 }
4788
4789
4790 if (buf != stackbuf)
4791 FREE (buf);
4792 }
4793
4794 }
4795 #endif
4796
4797 void setupPROM (uint cpuNo, unsigned char * PROM) {
4798
4799
4800
4801
4802
4803
4804
4805
4806
4807
4808
4809
4810
4811
4812
4813
4814
4815
4816
4817
4818
4819
4820
4821
4822
4823
4824
4825
4826
4827
4828
4829
4830
4831
4832
4833
4834
4835
4836
4837 word36 rsw2 = 0;
4838
4839
4840
4841
4842
4843
4844
4845 putbits36_4 (& rsw2, 0, 0);
4846
4847 putbits36_2 (& rsw2, 4, 001);
4848
4849 putbits36_7 (& rsw2, 6, 2);
4850
4851 putbits36_1 (& rsw2, 13, 1);
4852
4853 putbits36_5 (& rsw2, 14, 0);
4854
4855 putbits36_1 (& rsw2, 19, 1);
4856
4857 putbits36_1 (& rsw2, 20, cpus[cpuNo].options.cache_installed ? 1 : 0);
4858
4859 putbits36_2 (& rsw2, 21, 0);
4860
4861 putbits36_1 (& rsw2, 23, 1);
4862
4863 putbits36_1 (& rsw2, 24, 0);
4864
4865 putbits36_4 (& rsw2, 25, 0);
4866
4867 putbits36_4 (& rsw2, 29, cpus[cpuNo].options.proc_speed & 017LL);
4868
4869 putbits36_3 (& rsw2, 33, cpus[cpuNo].switches.cpu_num & 07LL);
4870
4871 word4 rsw2Ext = 0;
4872 if (cpus[cpuNo].options.hex_mode_installed)
4873 rsw2Ext |= 010;
4874 if (cpus[cpuNo].options.clock_slave_installed)
4875 rsw2Ext |= 004;
4876
4877
4878 char serial[12];
4879 (void)sprintf (serial, "%-11u", cpus[cpuNo].switches.serno);
4880
4881 #if defined(VER_H_PROM_SHIP)
4882 char * ship = VER_H_PROM_SHIP;
4883 #else
4884 char * ship = "200101";
4885 #endif
4886
4887 #if !defined(VER_H_PROM_MAJOR_VER)
4888 # define VER_H_PROM_MAJOR_VER "999"
4889 #endif
4890
4891 #if !defined(VER_H_PROM_MINOR_VER)
4892 # define VER_H_PROM_MINOR_VER "999"
4893 #endif
4894
4895 #if !defined(VER_H_PROM_PATCH_VER)
4896 # define VER_H_PROM_PATCH_VER "999"
4897 #endif
4898
4899 #if !defined(VER_H_PROM_OTHER_VER)
4900 # define VER_H_PROM_OTHER_VER "999"
4901 #endif
4902
4903 #if !defined(VER_H_GIT_RELT)
4904 # define VER_H_GIT_RELT "X"
4905 #endif
4906
4907 #if !defined(VER_H_PROM_VER_TEXT)
4908 # define VER_H_PROM_VER_TEXT "Unknown "
4909 #endif
4910
4911 #if defined(BUILD_PROM_OSA_TEXT)
4912 # define BURN_PROM_OSA_TEXT BUILD_PROM_OSA_TEXT
4913 #else
4914 # if !defined(VER_H_PROM_OSA_TEXT)
4915 # define BURN_PROM_OSA_TEXT "Unknown Build Op Sys"
4916 # else
4917 # define BURN_PROM_OSA_TEXT VER_H_PROM_OSA_TEXT
4918 # endif
4919 #endif
4920
4921 #if defined(BUILD_PROM_OSV_TEXT)
4922 # define BURN_PROM_OSV_TEXT BUILD_PROM_OSV_TEXT
4923 #else
4924 # if !defined(VER_H_PROM_OSV_TEXT)
4925 # define BURN_PROM_OSV_TEXT "Unknown Build Arch. "
4926 # else
4927 # define BURN_PROM_OSV_TEXT VER_H_PROM_OSV_TEXT
4928 # endif
4929 #endif
4930
4931 #if defined(BUILD_PROM_TSA_TEXT)
4932 # define BURN_PROM_TSA_TEXT BUILD_PROM_TSA_TEXT
4933 #else
4934 # if defined(_M_X64) || defined(_M_AMD64) || defined(__amd64__) || defined(__x86_64__) || defined(__AMD64)
4935 # define VER_H_PROM_TSA_TEXT "Intel x86_64 (AMD64)"
4936 # elif defined(_M_IX86) || defined(__i386) || defined(__i486) || defined(__i586) || defined(__i686) || defined(__ix86)
4937 # define VER_H_PROM_TSA_TEXT "Intel ix86 (32-bit) "
4938 # elif defined(_M_ARM64) || defined(__aarch64__) || defined(__arm64__)
4939 # define VER_H_PROM_TSA_TEXT "AArch64/ARM64/64-bit"
4940 # elif defined(_M_ARM) || defined(__arm__)
4941 # define VER_H_PROM_TSA_TEXT "AArch32/ARM32/32-bit"
4942 # elif defined(__ia64__) || defined(_M_IA64) || defined(__itanium__)
4943 # define VER_H_PROM_TSA_TEXT "Intel Itanium (IA64)"
4944 # elif defined(__ppc64__) || defined(__PPC64__) || defined(__ppc64le__) || defined(__PPC64LE__) || defined(__powerpc64__) || \
4945 defined(__POWERPC64__) || \
4946 defined(_M_PPC64) || \
4947 defined(__PPC64) || \
4948 defined(_ARCH_PPC64)
4949 # define VER_H_PROM_TSA_TEXT "Power ISA (64-bit) "
4950 # elif defined(__ppc__) || defined(__PPC__) || defined(__powerpc__) || defined(__POWERPC__) || defined(_M_PPC) || \
4951 defined(__PPC) || \
4952 defined(__ppc32__) || \
4953 defined(__PPC32__) || \
4954 defined(__powerpc32__) || \
4955 defined(__POWERPC32__) || \
4956 defined(_M_PPC32) || \
4957 defined(__PPC32)
4958 # define VER_H_PROM_TSA_TEXT "PowerPC ISA (32-bit)"
4959 # elif defined(__s390x__)
4960 # define VER_H_PROM_TSA_TEXT "IBM z/Architecture "
4961 # elif defined(__s390__)
4962 # define VER_H_PROM_TSA_TEXT "IBM ESA System/390 "
4963 # elif defined(__J2__) || defined(__J2P__) || defined(__j2__) || defined(__j2p__)
4964 # define VER_H_PROM_TSA_TEXT "J-Core J2 Open CPU "
4965 # elif defined(__SH4__) || defined(__sh4__) || defined(__SH4) || defined(__sh4)
4966 # define VER_H_PROM_TSA_TEXT "Hitachi/Renesas SH-4"
4967 # elif defined(__SH2__) || defined(__sh2__) || defined(__SH2) || defined(__sh2)
4968 # define VER_H_PROM_TSA_TEXT "Hitachi/Renesas SH-2"
4969 # elif defined(__alpha__)
4970 # define VER_H_PROM_TSA_TEXT "Alpha AXP "
4971 # elif defined(__hppa__) || defined(__HPPA__) || defined(__PARISC__) || defined(__parisc__)
4972 # define VER_H_PROM_TSA_TEXT "HP PA-RISC "
4973 # elif defined(__ICE9__) || defined(__ice9__) || defined(__ICE9) || defined(__ice9)
4974 # define VER_H_PROM_TSA_TEXT "SiCortex ICE-9 "
4975 # elif defined(mips64) || defined(__mips64__) || defined(MIPS64) || defined(_MIPS64_) || defined(__mips64)
4976 # define VER_H_PROM_TSA_TEXT "MIPS64 "
4977 # elif defined(mips) || defined(__mips__) || defined(MIPS) || defined(_MIPS_) || defined(__mips)
4978 # define VER_H_PROM_TSA_TEXT "MIPS "
4979 # elif defined(__OpenRISC__) || defined(__OPENRISC__) || defined(__openrisc__) || defined(__OR1K__) || defined(__OPENRISC1K__)
4980 # define VER_H_PROM_TSA_TEXT "OpenRISC "
4981 # elif defined(__sparc64) || defined(__SPARC64) || defined(__SPARC64__) || defined(__sparc64__)
4982 # define VER_H_PROM_TSA_TEXT "SPARC64 "
4983 # elif defined(__sparc) || defined(__SPARC) || defined(__SPARC__) || defined(__sparc__)
4984 # define VER_H_PROM_TSA_TEXT "SPARC "
4985 # elif defined(__riscv) || defined(__riscv__)
4986 # define VER_H_PROM_TSA_TEXT "RISC-V "
4987 # elif defined(__e2k__) || defined(__E2K__) || defined(__elbrus64__) || defined(__elbrus__) || defined(__ELBRUS__) || defined(__e2k64__)
4988 # if defined(__iset__)
4989 # if __iset__ > 0
4990 # if __iset__ == 1
4991 # define VER_H_PROM_TSA_TEXT "MCST Elbrus v1 "
4992 # elif __iset__ == 2
4993 # define VER_H_PROM_TSA_TEXT "MCST Elbrus v2 "
4994 # elif __iset__ == 3
4995 # define VER_H_PROM_TSA_TEXT "MCST Elbrus v3 "
4996 # elif __iset__ == 4
4997 # define VER_H_PROM_TSA_TEXT "MCST Elbrus v4 "
4998 # elif __iset__ == 5
4999 # define VER_H_PROM_TSA_TEXT "MCST Elbrus v5 "
5000 # elif __iset__ == 6
5001 # define VER_H_PROM_TSA_TEXT "MCST Elbrus v6 "
5002 # elif __iset__ == 7
5003 # define VER_H_PROM_TSA_TEXT "MCST Elbrus v7 "
5004 # elif __iset__ == 8
5005 # define VER_H_PROM_TSA_TEXT "MCST Elbrus v8 "
5006 # elif __iset__ == 9
5007 # define VER_H_PROM_TSA_TEXT "MCST Elbrus v9 "
5008 # elif __iset__ == 10
5009 # define VER_H_PROM_TSA_TEXT "MCST Elbrus v10 "
5010 # else
5011 # define VER_H_PROM_TSA_TEXT "MCST Elbrus "
5012 # endif
5013 # else
5014 # define VER_H_PROM_TSA_TEXT "MCST Elbrus "
5015 # endif
5016 # else
5017 # define VER_H_PROM_TSA_TEXT "MCST Elbrus "
5018 # endif
5019 # elif defined(__myriad2__)
5020 # define VER_H_PROM_TSA_TEXT "Myriad2 "
5021 # elif defined(__loongarch64) || defined(__loongarch__)
5022 # define VER_H_PROM_TSA_TEXT "LoongArch "
5023 # elif defined(_m68851) || defined(__m68k__) || defined(__m68000__) || defined(__M68K)
5024 # define VER_H_PROM_TSA_TEXT "Motorola m68k "
5025 # elif defined(__m88k__) || defined(__m88000__) || defined(__M88K)
5026 # define VER_H_PROM_TSA_TEXT "Motorola m88k "
5027 # elif defined(__VAX__) || defined(__vax__)
5028 # define VER_H_PROM_TSA_TEXT "VAX "
5029 # elif defined(__NIOS2__) || defined(__nios2__)
5030 # define VER_H_PROM_TSA_TEXT "Altera Nios II "
5031 # elif defined(__MICROBLAZE__) || defined(__microblaze__)
5032 # define VER_H_PROM_TSA_TEXT "Xilinx MicroBlaze "
5033 # elif defined(__kvx__) || defined(__KVX__) || defined(__KVX_64__)
5034 # define VER_H_PROM_TSA_TEXT "Kalray KVX "
5035 # endif
5036 # if !defined(VER_H_PROM_TSA_TEXT)
5037 # define BURN_PROM_TSA_TEXT "Unknown Target Arch."
5038 # else
5039 # define BURN_PROM_TSA_TEXT VER_H_PROM_TSA_TEXT
5040 # endif
5041 #endif
5042
5043 #if (defined(__WIN__) || defined(_WIN32) || defined(IS_WINDOWS) || defined(_MSC_VER) || defined(__MINGW32__) || \
5044 defined(__MINGW64__) || defined(CROSS_MINGW32) || defined(CROSS_MINGW64)) && !defined(__CYGWIN__)
5045 # define DC_IS_WINDOWS 1
5046 #else
5047 # define DC_IS_WINDOWS 0
5048 #endif
5049
5050 #if defined(BUILD_PROM_TSV_TEXT)
5051 # define BURN_PROM_TSV_TEXT BUILD_PROM_TSV_TEXT
5052 #else
5053 # if DC_IS_WINDOWS
5054 # define VER_H_PROM_TSV_TEXT "Microsoft Windows "
5055 # elif defined(__CYGWIN__)
5056 # define VER_H_PROM_TSV_TEXT "Windows/Cygwin "
5057 # elif (defined(__sunos) || defined(__sun) || defined(__sun__)) && (defined(SYSV) || defined(__SVR4) || defined(__SVR4__) || \
5058 defined(__svr4__))
5059 # if defined(__illumos__)
5060 # define VER_H_PROM_TSV_TEXT "illumos "
5061 # else
5062 # define VER_H_PROM_TSV_TEXT "Solaris "
5063 # endif
5064 # elif defined(__APPLE__) && defined(__MACH__)
5065 # define VER_H_PROM_TSV_TEXT "Apple macOS "
5066 # elif defined(__GNU__) && !defined(__linux__)
5067 # define VER_H_PROM_TSV_TEXT "GNU/Hurd "
5068 # elif defined(__ANDROID__) && defined(__ANDROID_API__)
5069 # if defined(__linux__)
5070 # define VER_H_PROM_TSV_TEXT "Android/Linux "
5071 # else
5072 # define VER_H_PROM_TSV_TEXT "Android "
5073 # endif
5074 # elif defined(__lynxOS__) || defined(__LYNXOS__) || defined(LynxOS) || defined(LYNXOS)
5075 # define VER_H_PROM_TSV_TEXT "LynxOS "
5076 # elif defined(__HELENOS__)
5077 # define VER_H_PROM_TSV_TEXT "HelenOS "
5078 # elif defined(__linux__)
5079 # if defined(__BIONIC__)
5080 # define VER_H_PROM_TSV_TEXT "Linux/Bionic-libc "
5081 # elif defined(__UCLIBC__) || defined(UCLIBC)
5082 # define VER_H_PROM_TSV_TEXT "Linux/uClibc "
5083 # elif defined(__NEWLIB__)
5084 # define VER_H_PROM_TSV_TEXT "Linux/Newlib "
5085 # elif defined(__dietlibc__)
5086 # define VER_H_PROM_TSV_TEXT "Linux/Diet-libc "
5087 # elif defined(__GLIBC__)
5088 # define VER_H_PROM_TSV_TEXT "GNU/Linux "
5089 # else
5090 # define VER_H_PROM_TSV_TEXT "Linux "
5091 # endif
5092 # elif defined(__HAIKU__)
5093 # define VER_H_PROM_TSV_TEXT "Haiku "
5094 # elif defined(__serenity__)
5095 # define VER_H_PROM_TSV_TEXT "SerenityOS "
5096 # elif defined(__FreeBSD__)
5097 # define VER_H_PROM_TSV_TEXT "FreeBSD "
5098 # elif defined(__NetBSD__)
5099 # define VER_H_PROM_TSV_TEXT "NetBSD "
5100 # elif defined(__OpenBSD__)
5101 # define VER_H_PROM_TSV_TEXT "OpenBSD "
5102 # elif defined(__DragonFly__)
5103 # define VER_H_PROM_TSV_TEXT "DragonFly BSD "
5104 # elif defined(_AIX)
5105 # if !defined(__PASE__)
5106 # define VER_H_PROM_TSV_TEXT "IBM AIX "
5107 # else
5108 # define VER_H_PROM_TSV_TEXT "IBM OS/400 (PASE) "
5109 # endif
5110 # elif defined(__VXWORKS__) || defined(__VXWORKS) || defined(__vxworks) || defined(__vxworks__) || defined(_VxWorks)
5111 # if !defined(__RTP__)
5112 # define VER_H_PROM_TSV_TEXT "VxWorks "
5113 # else
5114 # define VER_H_PROM_TSV_TEXT "VxWorks RTP "
5115 # endif
5116 # elif defined(__rtems__)
5117 # if defined(__FreeBSD_version)
5118 # define VER_H_PROM_TSV_TEXT "RTEMS/LibBSD "
5119 # else
5120 # define VER_H_PROM_TSV_TEXT "RTEMS "
5121 # endif
5122 # elif defined(__ZEPHYR__)
5123 # define VER_H_PROM_TSV_TEXT "Zephyr "
5124 # elif defined(ti_sysbios_BIOS___VERS) || defined(ti_sysbios_BIOS__top__)
5125 # define VER_H_PROM_TSV_TEXT "TI-RTOS (SYS/BIOS) "
5126 # elif defined(__OSV__)
5127 # define VER_H_PROM_TSV_TEXT "OSv "
5128 # elif defined(MINIX) || defined(MINIX3) || defined(MINIX315) || defined(__minix__) || defined(__minix3__) || defined(__minix315__)
5129 # define VER_H_PROM_TSV_TEXT "Minix "
5130 # elif defined(__QNX__)
5131 # if defined(__QNXNTO__)
5132 # define VER_H_PROM_TSV_TEXT "QNX Neutrino "
5133 # else
5134 # define VER_H_PROM_TSV_TEXT "QNX "
5135 # endif
5136 # elif defined(__managarm__)
5137 # define VER_H_PROM_TSV_TEXT "Managarm "
5138 # endif
5139 # if !defined(VER_H_PROM_TSV_TEXT)
5140 # define BURN_PROM_TSV_TEXT "Unknown Target OpSys"
5141 # else
5142 # define BURN_PROM_TSV_TEXT VER_H_PROM_TSV_TEXT
5143 # endif
5144 #endif
5145
5146 #if !defined(VER_H_GIT_DATE_SHORT)
5147 # define VER_H_GIT_DATE_SHORT "2021-01-01"
5148 #endif
5149
5150 #if !defined(BURN_PROM_BUILD_NUM)
5151 # define BURN_PROM_BUILD_NUM " "
5152 #endif
5153
5154 #define BURN(offset, length, string) memcpy ((char *) PROM + (offset), string, length)
5155 #define BURN1(offset, byte) PROM[offset] = (char) (byte)
5156
5157 (void)memset (PROM, 255, 1024);
5158
5159
5160 BURN ( 00, 11, "DPS 8/SIM M");
5161 BURN (013, 11, serial);
5162 BURN (026, 6, ship);
5163 BURN1 (034, getbits36_8 (rsw2, 0));
5164 BURN1 (035, getbits36_8 (rsw2, 8));
5165 BURN1 (036, getbits36_8 (rsw2, 16));
5166 BURN1 (037, getbits36_8 (rsw2, 24));
5167 BURN1 (040, ((getbits36_4 (rsw2, 32) << 4) \
5168 | rsw2Ext));
5169
5170
5171 BURN ( 60, 1, "2");
5172 BURN ( 70, 10, VER_H_GIT_DATE_SHORT);
5173 BURN ( 80, 3, VER_H_PROM_MAJOR_VER);
5174 BURN ( 83, 3, VER_H_PROM_MINOR_VER);
5175 BURN ( 86, 3, VER_H_PROM_PATCH_VER);
5176 BURN ( 89, 3, VER_H_PROM_OTHER_VER);
5177 BURN ( 92, 8, BURN_PROM_BUILD_NUM);
5178 BURN (100, 1, VER_H_GIT_RELT);
5179 BURN (101, 29, VER_H_PROM_VER_TEXT);
5180 BURN (130, 20, BURN_PROM_OSA_TEXT);
5181 BURN (150, 20, BURN_PROM_OSV_TEXT);
5182 BURN (170, 20, BURN_PROM_TSA_TEXT);
5183 BURN (190, 20, BURN_PROM_TSV_TEXT);
5184 }
5185
5186 void cpuStats (uint cpuNo) {
5187 if (! cpus[cpuNo].cycleCnt)
5188 return;
5189
5190
5191 #if defined(__HAIKU__)
5192 # if HAS_INCLUDE(<syscall_clock_info.h>)
5193 # include <syscall_clock_info.h>
5194 # endif
5195 # if !defined(_SYSTEM_SYSCALL_CLOCK_INFO_H)
5196 # if !defined(HAIKU_NO_PTHREAD_GETCPUCLOCKID)
5197 # define HAIKU_NO_PTHREAD_GETCPUCLOCKID
5198 # endif
5199 # endif
5200 #endif
5201
5202
5203
5204 #if defined(__sun) || defined(__sun__)
5205 # if !defined(__illumos__)
5206 # if HAS_INCLUDE(<sys/sysevent.h>)
5207 # include <sys/sysevent.h>
5208 # endif
5209 # if defined(ILLUMOS_VENDOR) || defined(ILLUMOS_KERN_PUB)
5210 # define __illumos__
5211 # endif
5212 # endif
5213 #endif
5214
5215 double cpu_seconds = 0;
5216 int cpu_millis = 0;
5217 char cpu_ftime[64] = {0};
5218 #if (defined(THREADZ) || defined(LOCKLESS))
5219 # if !defined(HAIKU_NO_PTHREAD_GETCPUCLOCKID) && !defined(__illumos__) && \
5220 !defined(__APPLE__) && !defined(__PASE__) && !defined(__serenity__)
5221 struct timespec cpu_time;
5222 clockid_t clock_id;
5223 if (pthread_getcpuclockid (cpus[cpuNo].thread_id, &clock_id) == 0) {
5224 if (clock_gettime (clock_id, &cpu_time) == 0) {
5225 cpu_seconds = (double)cpu_time.tv_sec + cpu_time.tv_nsec / 1e9;
5226 }
5227 }
5228 # endif
5229 #endif
5230
5231 if (cpu_seconds > 0 && cpus[cpuNo].instrCnt > 0) {
5232 int cpu_hours = (int)(cpu_seconds / 3600);
5233 int cpu_minutes = (int)((cpu_seconds - cpu_hours * 3600) / 60);
5234 int cpu_secs = (int)(cpu_seconds - (cpu_hours * 3600) - (cpu_minutes * 60));
5235 struct tm cpu_tm = {0};
5236 cpu_tm.tm_hour = cpu_hours;
5237 cpu_tm.tm_min = cpu_minutes;
5238 cpu_tm.tm_sec = cpu_secs;
5239 strftime(cpu_ftime, sizeof(cpu_ftime), "%H:%M:%S", &cpu_tm);
5240 cpu_millis = (int)((cpu_seconds - (cpu_hours * 3600) - (cpu_minutes * 60) - cpu_secs) * 1000);
5241 }
5242
5243 (void)fflush(stderr);
5244 (void)fflush(stdout);
5245 sim_msg ("\r\n");
5246 (void)fflush(stdout);
5247 (void)fflush(stderr);
5248 sim_msg ("\r+---------------------------------+\r\n");
5249 sim_msg ("\r| CPU %c Statistics |\r\n", 'A' + cpuNo);
5250 sim_msg ("\r+---------------------------------+\r\n");
5251 if (cpu_seconds > 0 && cpus[cpuNo].instrCnt > 0) {
5252 sim_msg ("\r| CPU Time Used %11s.%03d |\r\n", cpu_ftime, cpu_millis);
5253 sim_msg ("\r+---------------------------------+\r\n");
5254 }
5255 (void)fflush(stdout);
5256 (void)fflush(stderr);
5257 #if defined(_AIX) && !defined(__PASE__)
5258 struct rusage rusage;
5259 if (!pthread_getrusage_np(cpus[cpuNo].thread_id, &rusage, PTHRDSINFO_RUSAGE_COLLECT)) {
5260 sim_msg ("\r| Volun. CtxtSw %'15llu |\r\n", (unsigned long long)rusage.ru_nvcsw);
5261 sim_msg ("\r| Invol. CtxtSw %'15llu |\r\n", (unsigned long long)rusage.ru_nivcsw);
5262 sim_msg ("\r+---------------------------------+\r\n");
5263 }
5264 #endif
5265 #if defined(WIN_STDIO)
5266 sim_msg ("\r| cycles %15llu |\r\n", (unsigned long long)cpus[cpuNo].cycleCnt);
5267 sim_msg ("\r| instructions %15llu |\r\n", (unsigned long long)cpus[cpuNo].instrCnt);
5268 (void)fflush(stdout);
5269 (void)fflush(stderr);
5270 sim_msg ("\r+---------------------------------+\r\n");
5271 sim_msg ("\r| lockCnt %15llu |\r\n", (unsigned long long)cpus[cpuNo].coreLockState.lockCnt);
5272 sim_msg ("\r| lockImmediate %15llu |\r\n", (unsigned long long)cpus[cpuNo].coreLockState.lockImmediate);
5273 (void)fflush(stdout);
5274 (void)fflush(stderr);
5275 sim_msg ("\r+---------------------------------+\r\n");
5276 sim_msg ("\r| lockWait %15llu |\r\n", (unsigned long long)cpus[cpuNo].coreLockState.lockWait);
5277 sim_msg ("\r| lockWaitMax %15llu |\r\n", (unsigned long long)cpus[cpuNo].coreLockState.lockWaitMax);
5278 (void)fflush(stdout);
5279 (void)fflush(stderr);
5280 # if !defined(SCHED_NEVER_YIELD)
5281 sim_msg ("\r| lockYield %15llu |\r\n", (unsigned long long)cpus[cpuNo].coreLockState.lockYield);
5282 (void)fflush(stdout);
5283 (void)fflush(stderr);
5284 # else
5285 sim_msg ("\r| lockYield ---- |\r\n");
5286 (void)fflush(stdout);
5287 (void)fflush(stderr);
5288 # endif
5289 sim_msg ("\r+---------------------------------+");
5290 (void)fflush(stdout);
5291 (void)fflush(stderr);
5292 # if !defined(UCACHE)
5293 # if !defined(UCACHE_STATS)
5294 sim_msg ("\r\n");
5295 # endif
5296 # endif
5297 (void)fflush(stdout);
5298 (void)fflush(stderr);
5299 #else
5300 sim_msg ("\r| cycles %'15llu |\r\n", (unsigned long long)cpus[cpuNo].cycleCnt);
5301 sim_msg ("\r| instructions %'15llu |\r\n", (unsigned long long)cpus[cpuNo].instrCnt);
5302 (void)fflush(stdout);
5303 (void)fflush(stderr);
5304 sim_msg ("\r+---------------------------------+\r\n");
5305 sim_msg ("\r| lockCnt %'15llu |\r\n", (unsigned long long)cpus[cpuNo].coreLockState.lockCnt);
5306 sim_msg ("\r| lockImmediate %'15llu |\r\n", (unsigned long long)cpus[cpuNo].coreLockState.lockImmediate);
5307 (void)fflush(stdout);
5308 (void)fflush(stderr);
5309 sim_msg ("\r+---------------------------------+\r\n");
5310 sim_msg ("\r| lockWait %'15llu |\r\n", (unsigned long long)cpus[cpuNo].coreLockState.lockWait);
5311 sim_msg ("\r| lockWaitMax %'15llu |\r\n", (unsigned long long)cpus[cpuNo].coreLockState.lockWaitMax);
5312 (void)fflush(stdout);
5313 (void)fflush(stderr);
5314 # if !defined(SCHED_NEVER_YIELD)
5315 sim_msg ("\r| lockYield %'15llu |\r\n", (unsigned long long)cpus[cpuNo].coreLockState.lockYield);
5316 (void)fflush(stdout);
5317 (void)fflush(stderr);
5318 # else
5319 sim_msg ("\r| lockYield ---- |\r\n");
5320 (void)fflush(stdout);
5321 (void)fflush(stderr);
5322 # endif
5323 sim_msg ("\r+---------------------------------+");
5324 (void)fflush(stdout);
5325 (void)fflush(stderr);
5326 # if !defined(UCACHE)
5327 # if !defined(UCACHE_STATS)
5328 sim_msg ("\r\n");
5329 # endif
5330 # endif
5331 (void)fflush(stderr);
5332 (void)fflush(stdout);
5333 #endif
5334
5335 #if defined(UCACHE_STATS)
5336 ucacheStats (cpuNo);
5337 #endif
5338
5339
5340
5341
5342
5343
5344
5345 }
5346
5347 bool running_perf_test;
5348
5349 #if defined(THREADZ) || defined(LOCKLESS)
5350 # include <locale.h>
5351 # include "segldr.h"
5352
5353 void perfTest (char * testName) {
5354 running_perf_test = true;
5355
5356 if (testName == NULL)
5357 testName = "strip.mem";
5358
5359 # if !defined(NO_LOCALE)
5360 (void) setlocale(LC_NUMERIC, "");
5361 # endif
5362
5363
5364 # if !defined(_AIX)
5365 system_state = aligned_malloc (sizeof (struct system_state_s));
5366 # else
5367 system_state = malloc (sizeof (struct system_state_s));
5368 # endif
5369 if (!system_state)
5370 {
5371 (void)fprintf (stderr, "\rFATAL: Out of memory! Aborting at %s[%s:%d]\r\n",
5372 __func__, __FILE__, __LINE__);
5373 # if defined(USE_BACKTRACE)
5374 # if defined(SIGUSR2)
5375 (void)raise(SIGUSR2);
5376
5377 # endif
5378 # endif
5379 abort();
5380 }
5381 # if !defined(__MINGW64__) && !defined(__MINGW32__) && !defined(CROSS_MINGW64) && !defined(CROSS_MINGW32) && !defined(__PASE__)
5382 if (0 == sim_free_memory || sim_free_memory >= 192000000) {
5383 if (mlock(system_state, sizeof(struct system_state_s)) == -1) {
5384 mlock_failure = true;
5385 }
5386 } else {
5387 # if defined(TESTING)
5388 sim_warn ("Low memory - no memory locking attempted.\r\n");
5389 # else
5390 (void)system_state;
5391 # endif
5392 }
5393 # endif
5394 M = system_state->M;
5395 # if defined(M_SHARED)
5396 cpus = system_state->cpus;
5397 # endif
5398 (void) memset (cpus, 0, sizeof (cpu_state_t) * N_CPU_UNITS_MAX);
5399 for (int i = 0; i < N_CPU_UNITS_MAX; i ++) {
5400 cpus[i].switches.FLT_BASE = 2;
5401 cpus[i].instrCnt = 0;
5402 cpus[i].cycleCnt = 0;
5403 for (int j = 0; j < N_FAULTS; j ++)
5404 cpus[i].faultCnt [j] = 0;
5405 }
5406
5407 cpus[0].tweaks.enable_emcall = 1;
5408 opc_dev.numunits = 1;
5409 cpu_reset_unit_idx (0, false);
5410 set_cpu_cycle (& cpus[0], FETCH_cycle);
5411 mrestore (testName);
5412 _cpup = & cpus[0];
5413 threadz_sim_instr ();
5414 }
5415 #endif
5416