This source file includes following definitions.
- cpu_show_config
- cpu_set_config
- cpu_show_nunits
- cpu_set_nunits
- cpu_show_kips
- cpu_set_kips
- cpu_show_stall
- cpu_set_stall
- setCPUConfigL68
- setCPUConfigDPS8M
- cycle_str
- set_cpu_cycle
- set_cpu_idx
- cpu_reset_unit_idx
- simh_cpu_reset_and_clear_unit
- simh_cpu_reset_unit
- str_SDW0
- cpu_boot
- setup_scbank_map
- lookup_cpu_mem_map
- get_serial_number
- do_stats
- ev_poll_cb
- cpu_init
- cpu_reset
- sim_cpu_reset
- cpu_ex
- cpu_dep
- get_highest_intr
- sample_interrupts
- simh_hooks
- panel_process_event
- sim_instr
- cpu_thread_main
- do_LUF_fault
- set_temporary_absolute_mode
- clear_temporary_absolute_mode
- becomeClockMaster
- giveupClockMaster
- threadz_sim_instr
- operand_size
- readOperandRead
- readOperandRMW
- write_operand
- set_mem_watch
- nem_check
- core_read
- core_read_lock
- core_write
- core_write_unlock
- core_unlock_all
- core_write_zone
- core_read2
- core_write2
- decode_instruction
- is_priv_mode
- get_bar_mode
- get_addr_mode
- set_addr_mode
- get_BAR_address
- add_history
- add_history_force
- add_dps8m_CU_history
- add_dps8m_DU_OU_history
- add_dps8m_APU_history
- add_dps8m_EAPU_history
- add_l68_CU_history
- add_l68_DU_history
- add_l68_OU_history
- add_l68_APU_history
- get_dbg_verb
- dps8_sim_debug
- setupPROM
- cpuStats
- perfTest
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32 #include <stdio.h>
33 #include <unistd.h>
34 #include <ctype.h>
35
36 #if !defined(__MINGW64__) && !defined(__MINGW32__) && !defined(CROSS_MINGW64) && !defined(CROSS_MINGW32)
37 # include <sys/mman.h>
38 #endif
39
40 #include "dps8.h"
41 #include "dps8_sys.h"
42 #include "dps8_iom.h"
43 #include "dps8_cable.h"
44 #include "dps8_cpu.h"
45 #include "dps8_rt.h"
46 #include "dps8_priv.h"
47 #include "dps8_addrmods.h"
48 #include "dps8_faults.h"
49 #include "dps8_scu.h"
50 #include "dps8_append.h"
51 #include "dps8_ins.h"
52 #include "dps8_state.h"
53 #include "dps8_math.h"
54 #include "dps8_iefp.h"
55 #include "dps8_console.h"
56 #include "dps8_fnp2.h"
57 #include "dps8_socket_dev.h"
58 #include "dps8_crdrdr.h"
59 #include "dps8_absi.h"
60 #include "dps8_mgp.h"
61 #include "dps8_utils.h"
62 #include "dps8_memalign.h"
63
64 #if defined(M_SHARED)
65 # include "shm.h"
66 #endif
67
68 #include "dps8_opcodetable.h"
69 #include "../simh/sim_defs.h"
70 #include "../simh/sim_os_mem.h"
71
72 #if defined(THREADZ) || defined(LOCKLESS)
73 # include "threadz.h"
74 __thread uint current_running_cpu_idx;
75 #endif
76
77 #include "ver.h"
78
79 #if defined(_AIX) && !defined(__PASE__)
80 # include <pthread.h>
81 # include <sys/resource.h>
82 #endif
83
84 #if defined(NO_LOCALE)
85 # define xstrerror_l strerror
86 #endif
87
88 #define DBG_CTR cpu.cycleCnt
89
90 #define ASSUME0 0
91
92 #define FREE(p) do \
93 { \
94 free((p)); \
95 (p) = NULL; \
96 } while(0)
97
98
99
100 static UNIT cpu_unit [N_CPU_UNITS_MAX] = {
101 #if defined(NO_C_ELLIPSIS)
102 { UDATA (NULL, UNIT_FIX|UNIT_BINK, MEMSIZE), 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL },
103 { UDATA (NULL, UNIT_FIX|UNIT_BINK, MEMSIZE), 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL },
104 { UDATA (NULL, UNIT_FIX|UNIT_BINK, MEMSIZE), 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL },
105 { UDATA (NULL, UNIT_FIX|UNIT_BINK, MEMSIZE), 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL },
106 { UDATA (NULL, UNIT_FIX|UNIT_BINK, MEMSIZE), 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL },
107 { UDATA (NULL, UNIT_FIX|UNIT_BINK, MEMSIZE), 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL },
108 { UDATA (NULL, UNIT_FIX|UNIT_BINK, MEMSIZE), 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL },
109 { UDATA (NULL, UNIT_FIX|UNIT_BINK, MEMSIZE), 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL }
110 #else
111 [0 ... N_CPU_UNITS_MAX - 1] = {
112 UDATA (NULL, UNIT_FIX|UNIT_BINK, MEMSIZE), 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL
113 }
114 #endif
115 };
116
117 #define UNIT_IDX(uptr) ((uptr) - cpu_unit)
118
119
120 #define LOCKUP_KIPS 1000
121 static uint64 kips = LOCKUP_KIPS;
122 static uint64 luf_limits[] =
123 {
124 2000*LOCKUP_KIPS/1000,
125 4000*LOCKUP_KIPS/1000,
126 8000*LOCKUP_KIPS/1000,
127 16000*LOCKUP_KIPS/1000,
128 32000*LOCKUP_KIPS/1000
129 };
130
131 struct stall_point_s stall_points [N_STALL_POINTS];
132 bool stall_point_active = false;
133
134 #if defined(PANEL68)
135 static void panel_process_event (void);
136 #endif
137
138 static t_stat simh_cpu_reset_and_clear_unit (UNIT * uptr,
139 UNUSED int32 value,
140 UNUSED const char * cptr,
141 UNUSED void * desc);
142 char * cycle_str (cycles_e cycle);
143
144 static t_stat cpu_show_config (UNUSED FILE * st, UNIT * uptr,
145 UNUSED int val, UNUSED const void * desc)
146 {
147 long cpu_unit_idx = UNIT_IDX (uptr);
148 if (cpu_unit_idx < 0 || cpu_unit_idx >= N_CPU_UNITS_MAX)
149 {
150 sim_warn ("error: Invalid unit number %ld\r\n", (long) cpu_unit_idx);
151 return SCPE_ARG;
152 }
153
154 #define PFC_INT8 "%c%c%c%c%c%c%c%c"
155
156 #define PBI_8(i) \
157 ( ((i) & 0x80ll) ? '1' : '0' ), \
158 ( ((i) & 0x40ll) ? '1' : '0' ), \
159 ( ((i) & 0x20ll) ? '1' : '0' ), \
160 ( ((i) & 0x10ll) ? '1' : '0' ), \
161 ( ((i) & 0x08ll) ? '1' : '0' ), \
162 ( ((i) & 0x04ll) ? '1' : '0' ), \
163 ( ((i) & 0x02ll) ? '1' : '0' ), \
164 ( ((i) & 0x01ll) ? '1' : '0' )
165
166 #define PFC_INT16 PFC_INT8 PFC_INT8
167 #define PFC_INT32 PFC_INT16 PFC_INT16
168 #define PFC_INT64 PFC_INT32 PFC_INT32
169
170 #define PBI_16(i) PBI_8((i) >> 8), PBI_8(i)
171 #define PBI_32(i) PBI_16((i) >> 16), PBI_16(i)
172 #define PBI_64(i) PBI_32((i) >> 32), PBI_32(i)
173
174 char dsbin[66], adbin[34];
175
176 sim_msg ("CPU unit number %ld\r\n", (long) cpu_unit_idx);
177
178 sim_msg ("Fault base: %03o(8)\r\n",
179 cpus[cpu_unit_idx].switches.FLT_BASE);
180 sim_msg ("CPU number: %01o(8)\r\n",
181 cpus[cpu_unit_idx].switches.cpu_num);
182 sim_msg ("Data switches: %012llo(8)\r\n",
183 (unsigned long long)cpus[cpu_unit_idx].switches.data_switches);
184 (void)snprintf (dsbin, 65, PFC_INT64,
185 PBI_64((unsigned long long)cpus[cpu_unit_idx].switches.data_switches));
186 sim_msg (" %36s(2)\r\n",
187 dsbin + strlen(dsbin) - 36);
188 sim_msg ("Address switches: %06o(8)\r\n",
189 cpus[cpu_unit_idx].switches.addr_switches);
190 (void)snprintf (adbin, 33, PFC_INT32,
191 PBI_32(cpus[cpu_unit_idx].switches.addr_switches));
192 sim_msg (" %18s(2)\r\n",
193 adbin + strlen(adbin) - 18);
194 for (int i = 0; i < (cpus[cpu_unit_idx].tweaks.l68_mode ? N_L68_CPU_PORTS : N_DPS8M_CPU_PORTS); i ++)
195 {
196 sim_msg ("Port%c enable: %01o(8)\r\n",
197 'A' + i, cpus[cpu_unit_idx].switches.enable [i]);
198 sim_msg ("Port%c init enable: %01o(8)\r\n",
199 'A' + i, cpus[cpu_unit_idx].switches.init_enable [i]);
200 sim_msg ("Port%c assignment: %01o(8)\r\n",
201 'A' + i, cpus[cpu_unit_idx].switches.assignment [i]);
202 sim_msg ("Port%c interlace: %01o(8)\r\n",
203 'A' + i, cpus[cpu_unit_idx].switches.interlace [i]);
204 sim_msg ("Port%c store size: %01o(8)\r\n",
205 'A' + i, cpus[cpu_unit_idx].switches.store_size [i]);
206 }
207 sim_msg ("Processor mode: %s [%o]\r\n",
208 cpus[cpu_unit_idx].switches.procMode == \
209 procModeMultics ? "Multics" : cpus[cpu_unit_idx].switches.procMode == procModeGCOS ? "GCOS" : "???",
210 cpus[cpu_unit_idx].switches.procMode);
211 sim_msg ("8K Cache: %s\r\n",
212 cpus[cpu_unit_idx].switches.enable_cache ? "Enabled" : "Disabled");
213 sim_msg ("SDWAM: %s\r\n",
214 cpus[cpu_unit_idx].switches.sdwam_enable ? "Enabled" : "Disabled");
215 sim_msg ("PTWAM: %s\r\n",
216 cpus[cpu_unit_idx].switches.ptwam_enable ? "Enabled" : "Disabled");
217
218 sim_msg ("Processor speed: %02o(8)\r\n",
219 cpus[cpu_unit_idx].options.proc_speed);
220 sim_msg ("DIS enable: %01o(8)\r\n",
221 cpus[cpu_unit_idx].tweaks.dis_enable);
222 sim_msg ("Steady clock: %01o(8)\r\n",
223 scu [0].steady_clock);
224 sim_msg ("Halt on unimplemented: %01o(8)\r\n",
225 cpus[cpu_unit_idx].tweaks.halt_on_unimp);
226 sim_msg ("Enable simulated SDWAM/PTWAM: %01o(8)\r\n",
227 cpus[cpu_unit_idx].tweaks.enable_wam);
228 sim_msg ("Report faults: %01o(8)\r\n",
229 cpus[cpu_unit_idx].tweaks.report_faults);
230 sim_msg ("TRO faults enabled: %01o(8)\r\n",
231 cpus[cpu_unit_idx].tweaks.tro_enable);
232 sim_msg ("drl fatal enabled: %01o(8)\r\n",
233 cpus[cpu_unit_idx].tweaks.drl_fatal);
234 sim_msg ("useMap: %d\r\n",
235 cpus[cpu_unit_idx].tweaks.useMap);
236 sim_msg ("PROM installed: %01o(8)\r\n",
237 cpus[cpu_unit_idx].options.prom_installed);
238 sim_msg ("Hex mode installed: %01o(8)\r\n",
239 cpus[cpu_unit_idx].options.hex_mode_installed);
240 sim_msg ("8K cache installed: %01o(8)\r\n",
241 cpus[cpu_unit_idx].options.cache_installed);
242 sim_msg ("Clock slave installed: %01o(8)\r\n",
243 cpus[cpu_unit_idx].options.clock_slave_installed);
244 #if defined(AFFINITY)
245 if (cpus[cpu_unit_idx].set_affinity)
246 sim_msg ("CPU affinity: %d\r\n", cpus[cpu_unit_idx].affinity);
247 else
248 sim_msg ("CPU affinity: not set\r\n");
249 #endif
250 sim_msg ("ISOLTS mode: %01o(8)\r\n", cpus[cpu_unit_idx].tweaks.isolts_mode);
251 sim_msg ("NODIS mode: %01o(8)\r\n", cpus[cpu_unit_idx].tweaks.nodis);
252 sim_msg ("6180 mode: %01o(8) [%s]\r\n",
253 cpus[cpu_unit_idx].tweaks.l68_mode, cpus[cpu_unit_idx].tweaks.l68_mode ? "6180" : "DPS8/M");
254 return SCPE_OK;
255 }
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281 static config_value_list_t cfg_multics_fault_base [] =
282 {
283 { "multics", 2 },
284 { NULL, 0 }
285 };
286
287 static config_value_list_t cfg_on_off [] =
288 {
289 { "off", 0 },
290 { "on", 1 },
291 { "disable", 0 },
292 { "enable", 1 },
293 { NULL, 0 }
294 };
295
296 static config_value_list_t cfg_l68_mode [] = {
297 { "dps8/m", 0 },
298 { "dps8m", 0 },
299 { "dps8", 0 },
300 { "l68", 1 },
301 { "l6180", 1 },
302 { "6180", 1 },
303 };
304
305 static config_value_list_t cfg_cpu_mode [] =
306 {
307 { "gcos", 0 },
308 { "multics", 1 },
309 { NULL, 0 }
310 };
311
312 static config_value_list_t cfg_port_letter [] =
313 {
314 { "a", 0 },
315 { "b", 1 },
316 { "c", 2 },
317 { "d", 3 },
318 { "e", 4 },
319 { "f", 5 },
320 { "g", 6 },
321 { "h", 7 },
322 { NULL, 0 }
323 };
324
325 static config_value_list_t cfg_interlace [] =
326 {
327 { "off", 0 },
328 { "2", 2 },
329 { "4", 4 },
330 { NULL, 0 }
331 };
332
333 #if defined(AFFINITY)
334 static config_value_list_t cfg_affinity [] =
335 {
336 { "off", -1 },
337 { NULL, 0 }
338 };
339 #endif
340
341 static config_value_list_t cfg_size_list [] =
342 {
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405 { "32", 8 },
406 { "32K", 8 },
407 { "64", 9 },
408 { "64K", 9 },
409 { "128", 10 },
410 { "128K", 10 },
411 { "256", 11 },
412 { "256K", 11 },
413 { "512", 12 },
414 { "512K", 12 },
415 { "1024", 13 },
416 { "1024K", 13 },
417 { "1M", 13 },
418 { "2048", 14 },
419 { "2048K", 14 },
420 { "2M", 14 },
421 { "4096", 15 },
422 { "4096K", 15 },
423 { "4M", 15 },
424 { NULL, 0 }
425 };
426
427 static config_list_t cpu_config_list [] =
428 {
429 { "faultbase", 0, 0177, cfg_multics_fault_base },
430 { "num", 0, 07, NULL },
431 { "data", 0, 0777777777777, NULL },
432 { "stopnum", 0, 999999, NULL },
433 { "mode", 0, 01, cfg_cpu_mode },
434 { "speed", 0, 017, NULL },
435 { "port", 0, N_CPU_PORTS - 1, cfg_port_letter },
436 { "assignment", 0, 7, NULL },
437 { "interlace", 0, 1, cfg_interlace },
438 { "enable", 0, 1, cfg_on_off },
439 { "init_enable", 0, 1, cfg_on_off },
440 { "store_size", 0, 7, cfg_size_list },
441 { "enable_cache", 0, 1, cfg_on_off },
442 { "sdwam", 0, 1, cfg_on_off },
443 { "ptwam", 0, 1, cfg_on_off },
444
445
446 { "dis_enable", 0, 1, cfg_on_off },
447
448 { "steady_clock", 0, 1, cfg_on_off },
449 { "halt_on_unimplemented", 0, 1, cfg_on_off },
450 { "enable_wam", 0, 1, cfg_on_off },
451 { "report_faults", 0, 1, cfg_on_off },
452 { "tro_enable", 0, 1, cfg_on_off },
453 { "drl_fatal", 0, 1, cfg_on_off },
454 { "useMap", 0, 1, cfg_on_off },
455 { "address", 0, 0777777, NULL },
456 { "prom_installed", 0, 1, cfg_on_off },
457 { "hex_mode_installed", 0, 1, cfg_on_off },
458 { "cache_installed", 0, 1, cfg_on_off },
459 { "clock_slave_installed", 0, 1, cfg_on_off },
460 { "enable_emcall", 0, 1, cfg_on_off },
461
462
463 #if defined(AFFINITY)
464 { "affinity", -1, 32767, cfg_affinity },
465 #endif
466 { "isolts_mode", 0, 1, cfg_on_off },
467 { "nodis", 0, 1, cfg_on_off },
468 { "l68_mode", 0, 1, cfg_l68_mode },
469 { NULL, 0, 0, NULL }
470 };
471
472 static t_stat cpu_set_config (UNIT * uptr, UNUSED int32 value,
473 const char * cptr, UNUSED void * desc)
474 {
475 long cpu_unit_idx = UNIT_IDX (uptr);
476 if (cpu_unit_idx < 0 || cpu_unit_idx >= N_CPU_UNITS_MAX)
477 {
478 sim_warn ("error: cpu_set_config: Invalid unit number %ld\r\n",
479 (long) cpu_unit_idx);
480 return SCPE_ARG;
481 }
482
483 static int port_num = 0;
484
485 config_state_t cfg_state = { NULL, NULL };
486
487 for (;;)
488 {
489 int64_t v;
490 int rc = cfg_parse (__func__, cptr, cpu_config_list,
491 & cfg_state, & v);
492 if (rc == -1)
493 {
494 break;
495 }
496 if (rc == -2)
497 {
498 cfg_parse_done (& cfg_state);
499 return SCPE_ARG;
500 }
501
502 const char * p = cpu_config_list [rc] . name;
503 if (strcmp (p, "faultbase") == 0)
504 cpus[cpu_unit_idx].switches.FLT_BASE = (uint) v;
505 else if (strcmp (p, "num") == 0)
506 cpus[cpu_unit_idx].switches.cpu_num = (uint) v;
507 else if (strcmp (p, "data") == 0)
508 cpus[cpu_unit_idx].switches.data_switches = (word36) v;
509 else if (strcmp (p, "stopnum") == 0)
510 {
511
512
513 int64_t d1 = (v / 1000) % 10;
514 int64_t d2 = (v / 100) % 10;
515 int64_t d3 = (v / 10) % 10;
516 int64_t d4 = (v / 1) % 10;
517 word36 d = 0123000000000;
518 putbits36_6 (& d, 9, (word4) d1);
519 putbits36_6 (& d, 15, (word4) d2);
520 putbits36_6 (& d, 21, (word4) d3);
521 putbits36_6 (& d, 27, (word4) d4);
522 cpus[cpu_unit_idx].switches.data_switches = d;
523 }
524 else if (strcmp (p, "address") == 0)
525 cpus[cpu_unit_idx].switches.addr_switches = (word18) v;
526 else if (strcmp (p, "mode") == 0)
527 cpus[cpu_unit_idx].switches.procMode = v ? procModeMultics : procModeGCOS;
528 else if (strcmp (p, "speed") == 0)
529 cpus[cpu_unit_idx].options.proc_speed = (uint) v;
530 else if (strcmp (p, "port") == 0) {
531 if ((! cpus[cpu_unit_idx].tweaks.l68_mode) && (int) v > 3) {
532 cfg_parse_done (& cfg_state);
533 return SCPE_ARG;
534 }
535 port_num = (int) v;
536 }
537 else if (strcmp (p, "assignment") == 0)
538 cpus[cpu_unit_idx].switches.assignment [port_num] = (uint) v;
539 else if (strcmp (p, "interlace") == 0)
540 cpus[cpu_unit_idx].switches.interlace [port_num] = (uint) v;
541 else if (strcmp (p, "enable") == 0)
542 cpus[cpu_unit_idx].switches.enable [port_num] = (uint) v;
543 else if (strcmp (p, "init_enable") == 0)
544 cpus[cpu_unit_idx].switches.init_enable [port_num] = (uint) v;
545 else if (strcmp (p, "store_size") == 0) {
546 if (v > 7) {
547 if (cpus[cpu_unit_idx].tweaks.l68_mode) {
548 switch (v) {
549 case 8: v = 0; break;
550 case 9: v = 1; break;
551 case 10: v = 3; break;
552 case 11: v = 7; break;
553 case 12: v = 4; break;
554 case 13: v = 5; break;
555 case 14: v = 6; break;
556 case 15: v = 2; break;
557 }
558 } else {
559 switch (v) {
560 case 8: v = 0; break;
561 case 9: v = 1; break;
562 case 10: v = 2; break;
563 case 11: v = 3; break;
564 case 12: v = 4; break;
565 case 13: v = 5; break;
566 case 14: v = 6; break;
567 case 15: v = 7; break;
568 }
569 }
570 }
571 cpus[cpu_unit_idx].switches.store_size [port_num] = (uint) v;
572 }
573 else if (strcmp (p, "enable_cache") == 0)
574 cpus[cpu_unit_idx].switches.enable_cache = (uint) v ? true : false;
575 else if (strcmp (p, "sdwam") == 0)
576 cpus[cpu_unit_idx].switches.sdwam_enable = (uint) v ? true : false;
577 else if (strcmp (p, "ptwam") == 0)
578 cpus[cpu_unit_idx].switches.ptwam_enable = (uint) v ? true : false;
579 else if (strcmp (p, "dis_enable") == 0)
580 cpus[cpu_unit_idx].tweaks.dis_enable = (uint) v;
581 else if (strcmp (p, "steady_clock") == 0)
582 scu [0].steady_clock = (uint) v;
583 else if (strcmp (p, "halt_on_unimplemented") == 0)
584 cpus[cpu_unit_idx].tweaks.halt_on_unimp = (uint) v;
585 else if (strcmp (p, "enable_wam") == 0)
586 cpus[cpu_unit_idx].tweaks.enable_wam = (uint) v;
587 else if (strcmp (p, "report_faults") == 0)
588 cpus[cpu_unit_idx].tweaks.report_faults = (uint) v;
589 else if (strcmp (p, "tro_enable") == 0)
590 cpus[cpu_unit_idx].tweaks.tro_enable = (uint) v;
591 else if (strcmp (p, "drl_fatal") == 0)
592 cpus[cpu_unit_idx].tweaks.drl_fatal = (uint) v;
593 else if (strcmp (p, "useMap") == 0)
594 cpus[cpu_unit_idx].tweaks.useMap = v;
595 else if (strcmp (p, "prom_installed") == 0)
596 cpus[cpu_unit_idx].options.prom_installed = v;
597 else if (strcmp (p, "hex_mode_installed") == 0)
598 cpus[cpu_unit_idx].options.hex_mode_installed = v;
599 else if (strcmp (p, "cache_installed") == 0)
600 cpus[cpu_unit_idx].options.cache_installed = v;
601 else if (strcmp (p, "clock_slave_installed") == 0)
602 cpus[cpu_unit_idx].options.clock_slave_installed = v;
603 else if (strcmp (p, "enable_emcall") == 0)
604 cpus[cpu_unit_idx].tweaks.enable_emcall = v;
605 #if defined(AFFINITY)
606 else if (strcmp (p, "affinity") == 0)
607 if (v < 0)
608 {
609 cpus[cpu_unit_idx].set_affinity = false;
610 }
611 else
612 {
613 cpus[cpu_unit_idx].set_affinity = true;
614 cpus[cpu_unit_idx].affinity = (uint) v;
615 }
616 #endif
617 else if (strcmp (p, "isolts_mode") == 0)
618 {
619 bool was = cpus[cpu_unit_idx].tweaks.isolts_mode;
620 cpus[cpu_unit_idx].tweaks.isolts_mode = v;
621 if (v && ! was) {
622 uint store_sz;
623 if (cpus[cpu_unit_idx].tweaks.l68_mode)
624 store_sz = 3;
625 else
626 store_sz = 2;
627 cpus[cpu_unit_idx].isolts_switches_save = cpus[cpu_unit_idx].switches;
628
629 cpus[cpu_unit_idx].switches.data_switches = 00000030714000;
630 cpus[cpu_unit_idx].switches.addr_switches = 0100150;
631 cpus[cpu_unit_idx].tweaks.useMap = true;
632 cpus[cpu_unit_idx].tweaks.enable_wam = true;
633 cpus[cpu_unit_idx].switches.assignment [0] = 0;
634 cpus[cpu_unit_idx].switches.interlace [0] = false;
635 cpus[cpu_unit_idx].switches.enable [0] = false;
636 cpus[cpu_unit_idx].switches.init_enable [0] = false;
637 cpus[cpu_unit_idx].switches.store_size [0] = store_sz;
638
639 cpus[cpu_unit_idx].switches.assignment [1] = 0;
640 cpus[cpu_unit_idx].switches.interlace [1] = false;
641 cpus[cpu_unit_idx].switches.enable [1] = true;
642 cpus[cpu_unit_idx].switches.init_enable [1] = false;
643 cpus[cpu_unit_idx].switches.store_size [1] = store_sz;
644
645 cpus[cpu_unit_idx].switches.assignment [2] = 0;
646 cpus[cpu_unit_idx].switches.interlace [2] = false;
647 cpus[cpu_unit_idx].switches.enable [2] = false;
648 cpus[cpu_unit_idx].switches.init_enable [2] = false;
649 cpus[cpu_unit_idx].switches.store_size [2] = store_sz;
650
651 cpus[cpu_unit_idx].switches.assignment [3] = 0;
652 cpus[cpu_unit_idx].switches.interlace [3] = false;
653 cpus[cpu_unit_idx].switches.enable [3] = false;
654 cpus[cpu_unit_idx].switches.init_enable [3] = false;
655 cpus[cpu_unit_idx].switches.store_size [3] = store_sz;
656
657 if (cpus[cpu_unit_idx].tweaks.l68_mode) {
658 cpus[cpu_unit_idx].switches.assignment [4] = 0;
659 cpus[cpu_unit_idx].switches.interlace [4] = false;
660 cpus[cpu_unit_idx].switches.enable [4] = false;
661 cpus[cpu_unit_idx].switches.init_enable [4] = false;
662 cpus[cpu_unit_idx].switches.store_size [4] = 3;
663
664 cpus[cpu_unit_idx].switches.assignment [5] = 0;
665 cpus[cpu_unit_idx].switches.interlace [5] = false;
666 cpus[cpu_unit_idx].switches.enable [5] = false;
667 cpus[cpu_unit_idx].switches.init_enable [5] = false;
668 cpus[cpu_unit_idx].switches.store_size [5] = 3;
669
670 cpus[cpu_unit_idx].switches.assignment [6] = 0;
671 cpus[cpu_unit_idx].switches.interlace [6] = false;
672 cpus[cpu_unit_idx].switches.enable [6] = false;
673 cpus[cpu_unit_idx].switches.init_enable [6] = false;
674 cpus[cpu_unit_idx].switches.store_size [6] = 3;
675
676 cpus[cpu_unit_idx].switches.assignment [7] = 0;
677 cpus[cpu_unit_idx].switches.interlace [7] = false;
678 cpus[cpu_unit_idx].switches.enable [7] = false;
679 cpus[cpu_unit_idx].switches.init_enable [7] = false;
680 cpus[cpu_unit_idx].switches.store_size [7] = 3;
681 }
682 cpus[cpu_unit_idx].switches.enable [1] = true;
683
684 #if defined(THREADZ) || defined(LOCKLESS)
685 if (cpus[cpu_unit_idx].executing) {
686 cpus[cpu_unit_idx].forceRestart = true;
687 wakeCPU (cpu_unit_idx);
688 } else {
689 cpu_reset_unit_idx ((uint) cpu_unit_idx, false);
690
691 }
692 #else
693 cpu_reset_unit_idx ((uint) cpu_unit_idx, false);
694 simh_cpu_reset_and_clear_unit (cpu_unit + cpu_unit_idx, 0, NULL, NULL);
695 #endif
696
697 } else if (was && !v) {
698 cpus[cpu_unit_idx].switches = cpus[cpu_unit_idx].isolts_switches_save;
699
700 #if defined(THREADZ) || defined(LOCKLESS)
701 if (cpus[cpu_unit_idx].executing) {
702 cpus[cpu_unit_idx].forceRestart = true;
703 wakeCPU (cpu_unit_idx);
704 } else {
705 cpu_reset_unit_idx ((uint) cpu_unit_idx, false);
706
707 }
708 #else
709 cpu_reset_unit_idx ((uint) cpu_unit_idx, false);
710 simh_cpu_reset_and_clear_unit (cpu_unit + cpu_unit_idx, 0, NULL, NULL);
711 #endif
712
713 }
714 }
715 else if (strcmp (p, "nodis") == 0)
716 cpus[cpu_unit_idx].tweaks.nodis = v;
717 else if (strcmp (p, "l68_mode") == 0)
718 cpus[cpu_unit_idx].tweaks.l68_mode = v;
719 else
720 {
721 sim_warn ("error: cpu_set_config: Invalid cfg_parse rc <%ld>\r\n",
722 (long) rc);
723 cfg_parse_done (& cfg_state);
724 return SCPE_ARG;
725 }
726 }
727 cfg_parse_done (& cfg_state);
728
729 return SCPE_OK;
730 }
731
732 static t_stat cpu_show_nunits (UNUSED FILE * st, UNUSED UNIT * uptr,
733 UNUSED int val, UNUSED const void * desc)
734 {
735 sim_msg ("Number of CPUs in system is %d\r\n", cpu_dev.numunits);
736 return SCPE_OK;
737 }
738
739 static t_stat cpu_set_nunits (UNUSED UNIT * uptr, UNUSED int32 value,
740 const char * cptr, UNUSED void * desc)
741 {
742 if (! cptr)
743 return SCPE_ARG;
744 int n = atoi (cptr);
745 if (n < 1 || n > N_CPU_UNITS_MAX)
746 return SCPE_ARG;
747 cpu_dev.numunits = (uint32) n;
748 return SCPE_OK;
749 }
750
751 static t_stat cpu_show_kips (UNUSED FILE * st, UNUSED UNIT * uptr,
752 UNUSED int val, UNUSED const void * desc)
753 {
754 sim_msg ("CPU KIPS %lu\r\n", (unsigned long)kips);
755 return SCPE_OK;
756 }
757
758 static t_stat cpu_set_kips (UNUSED UNIT * uptr, UNUSED int32 value,
759 const char * cptr, UNUSED void * desc)
760 {
761 if (! cptr)
762 return SCPE_ARG;
763 long n = atol (cptr);
764 if (n < 1 || n > 4000000)
765 return SCPE_ARG;
766 kips = (uint64) n;
767 luf_limits[0] = 2000*kips/1000;
768 luf_limits[1] = 4000*kips/1000;
769 luf_limits[2] = 8000*kips/1000;
770 luf_limits[3] = 16000*kips/1000;
771 luf_limits[4] = 32000*kips/1000;
772 return SCPE_OK;
773 }
774
775 static t_stat cpu_show_stall (UNUSED FILE * st, UNUSED UNIT * uptr,
776 UNUSED int val, UNUSED const void * desc)
777 {
778 if (! stall_point_active)
779 {
780 sim_printf ("No stall points\r\n");
781 return SCPE_OK;
782 }
783
784 sim_printf ("Stall points\r\n");
785 for (int i = 0; i < N_STALL_POINTS; i ++)
786 if (stall_points[i].segno || stall_points[i].offset)
787 {
788 #if defined(WIN_STDIO)
789 sim_printf ("%2ld %05o:%06o %10lu\r\n",
790 #else
791 sim_printf ("%2ld %05o:%06o %'10lu\r\n",
792 #endif
793 (long)i, stall_points[i].segno, stall_points[i].offset,
794 (unsigned long)stall_points[i].time);
795 }
796 return SCPE_OK;
797 }
798
799
800
801
802
803
804
805 static t_stat cpu_set_stall (UNUSED UNIT * uptr, UNUSED int32 value,
806 const char * cptr, UNUSED void * desc)
807 {
808 if (! cptr)
809 return SCPE_ARG;
810
811 long n, s, o, t;
812
813 char * end;
814 n = strtol (cptr, & end, 0);
815 if (* end != '=')
816 return SCPE_ARG;
817 if (n < 0 || n >= N_STALL_POINTS)
818 return SCPE_ARG;
819
820 s = strtol (end + 1, & end, 8);
821 if (* end != ':')
822 return SCPE_ARG;
823 if (s < 0 || s > MASK15)
824 return SCPE_ARG;
825
826 o = strtol (end + 1, & end, 8);
827 if (* end != '=')
828 return SCPE_ARG;
829 if (o < 0 || o > MASK18)
830 return SCPE_ARG;
831
832 t = strtol (end + 1, & end, 0);
833 if (* end != 0)
834 return SCPE_ARG;
835 if (t < 0 || t > 30000000)
836 return SCPE_ARG;
837
838 stall_points[n].segno = (word15) s;
839 stall_points[n].offset = (word18) o;
840 stall_points[n].time = (unsigned int) t;
841 stall_point_active = false;
842
843 for (int i = 0; i < N_STALL_POINTS; i ++)
844 if (stall_points[n].segno && stall_points[n].offset)
845 stall_point_active = true;
846
847 return SCPE_OK;
848 }
849
850 static t_stat setCPUConfigL68 (UNIT * uptr, UNUSED int32 value, UNUSED const char * cptr, UNUSED void * desc) {
851 long cpuUnitIdx = UNIT_IDX (uptr);
852 if (cpuUnitIdx < 0 || cpuUnitIdx >= N_CPU_UNITS_MAX)
853 return SCPE_ARG;
854 cpu_state_t * cpun = cpus + cpuUnitIdx;
855
856 cpun->tweaks.l68_mode = 1;
857 cpun->options.hex_mode_installed = 0;
858 for (uint port_num = 0; port_num < N_DPS8M_CPU_PORTS; port_num ++) {
859 cpun->switches.assignment[port_num] = port_num;
860 cpun->switches.interlace[port_num] = 0;
861 cpun->switches.store_size[port_num] = 2;
862 cpun->switches.enable[port_num] = 1;
863 cpun->switches.init_enable[port_num] = 1;
864 }
865 for (uint port_num = N_DPS8M_CPU_PORTS; port_num < N_L68_CPU_PORTS; port_num ++) {
866 cpun->switches.assignment[port_num] = 0;
867 cpun->switches.interlace[port_num] = 0;
868 cpun->switches.store_size[port_num] = 0;
869 cpun->switches.enable[port_num] = 0;
870 cpun->switches.init_enable[port_num] = 0;
871 }
872 return SCPE_OK;
873 }
874
875 static t_stat setCPUConfigDPS8M (UNIT * uptr, UNUSED int32 value, UNUSED const char * cptr, UNUSED void * desc) {
876 long cpuUnitIdx = UNIT_IDX (uptr);
877 if (cpuUnitIdx < 0 || cpuUnitIdx >= N_CPU_UNITS_MAX)
878 return SCPE_ARG;
879 cpu_state_t * cpun = cpus + cpuUnitIdx;
880
881 cpun->tweaks.l68_mode = 0;
882 cpun->options.hex_mode_installed = 0;
883 for (uint port_num = 0; port_num < N_DPS8M_CPU_PORTS; port_num ++) {
884 cpun->switches.assignment[port_num] = port_num;
885 cpun->switches.interlace[port_num] = 0;
886 cpun->switches.store_size[port_num] = 7;
887 cpun->switches.enable[port_num] = 1;
888 cpun->switches.init_enable[port_num] = 1;
889 }
890 for (uint port_num = N_DPS8M_CPU_PORTS; port_num < N_L68_CPU_PORTS; port_num ++) {
891 cpun->switches.assignment[port_num] = 0;
892 cpun->switches.interlace[port_num] = 0;
893 cpun->switches.store_size[port_num] = 0;
894 cpun->switches.enable[port_num] = 0;
895 cpun->switches.init_enable[port_num] = 0;
896 }
897 return SCPE_OK;
898 }
899
900 char * cycle_str (cycles_e cycle)
901 {
902 switch (cycle)
903 {
904
905
906 case FAULT_cycle:
907 return "FAULT_cycle";
908 case EXEC_cycle:
909 return "EXEC_cycle";
910 case FAULT_EXEC_cycle:
911 return "FAULT_EXEC_cycle";
912 case INTERRUPT_cycle:
913 return "INTERRUPT_cycle";
914 case INTERRUPT_EXEC_cycle:
915 return "INTERRUPT_EXEC_cycle";
916 case FETCH_cycle:
917 return "FETCH_cycle";
918 case PSEUDO_FETCH_cycle:
919 return "PSEUDO_FETCH_cycle";
920 case SYNC_FAULT_RTN_cycle:
921 return "SYNC_FAULT_RTN_cycle";
922 default:
923 return "unknown cycle";
924 }
925 }
926
927 static void set_cpu_cycle (cpu_state_t * cpup, cycles_e cycle)
928 {
929 sim_debug (DBG_CYCLE, & cpu_dev, "Setting cycle to %s\r\n",
930 cycle_str (cycle));
931 cpu.cycle = cycle;
932 }
933
934
935
936 #define MEM_UNINITIALIZED (1LLU<<62)
937
938 uint set_cpu_idx (UNUSED uint cpu_idx)
939 {
940 uint prev = current_running_cpu_idx;
941 #if defined(THREADZ) || defined(LOCKLESS)
942 current_running_cpu_idx = cpu_idx;
943 #endif
944 _cpup = & cpus [current_running_cpu_idx];
945 return prev;
946 }
947
948 void cpu_reset_unit_idx (UNUSED uint cpun, bool clear_mem)
949 {
950 uint save = set_cpu_idx (cpun);
951 cpu_state_t * cpup = _cpup;
952 if (clear_mem)
953 {
954 for (uint i = 0; i < MEMSIZE; i ++)
955 {
956
957 #if defined(LOCKLESS)
958 M[i] = (M[i] & ~(MASK36 | MEM_LOCKED)) | MEM_UNINITIALIZED;
959 #else
960 M[i] = (M[i] & ~(MASK36)) | MEM_UNINITIALIZED;
961 #endif
962 }
963 }
964 cpu.rA = 0;
965 cpu.rQ = 0;
966
967 cpu.PPR.IC = 0;
968 cpu.PPR.PRR = 0;
969 cpu.PPR.PSR = 0;
970 cpu.PPR.P = 1;
971 cpu.RSDWH_R1 = 0;
972 cpu.rTR = MASK27;
973
974 if (cpu.tweaks.isolts_mode)
975 {
976 cpu.shadowTR = 0;
977 cpu.rTRlsb = 0;
978 }
979 cpu.rTRticks = 0;
980
981 set_addr_mode (cpup, ABSOLUTE_mode);
982 SET_I_NBAR;
983
984 cpu.CMR.luf = 3;
985 cpu.cu.SD_ON = cpu.switches.sdwam_enable ? 1 : 0;
986 cpu.cu.PT_ON = cpu.switches.ptwam_enable ? 1 : 0;
987
988 if (cpu.tweaks.nodis) {
989 set_cpu_cycle (cpup, FETCH_cycle);
990 } else {
991 set_cpu_cycle (cpup, EXEC_cycle);
992 cpu.cu.IWB = 0000000616200;
993 }
994 #if defined(PERF_STRIP)
995 set_cpu_cycle (cpup, FETCH_cycle);
996 #endif
997 cpu.wasXfer = false;
998 cpu.wasInhibited = false;
999
1000 cpu.interrupt_flag = false;
1001 cpu.g7_flag = false;
1002
1003 cpu.faultRegister [0] = 0;
1004 cpu.faultRegister [1] = 0;
1005
1006 #if defined(RAPRx)
1007 cpu.apu.lastCycle = UNKNOWN_CYCLE;
1008 #endif
1009
1010 (void)memset (& cpu.PPR, 0, sizeof (struct ppr_s));
1011
1012 setup_scbank_map (cpup);
1013
1014 tidy_cu (cpup);
1015 set_cpu_idx (save);
1016 }
1017
1018 static t_stat simh_cpu_reset_and_clear_unit (UNIT * uptr,
1019 UNUSED int32 value,
1020 UNUSED const char * cptr,
1021 UNUSED void * desc)
1022 {
1023 long cpu_unit_idx = UNIT_IDX (uptr);
1024 cpu_state_t * cpun = cpus + cpu_unit_idx;
1025 if (cpun->tweaks.isolts_mode)
1026 {
1027
1028 if (cpun->tweaks.useMap)
1029 {
1030 for (uint pgnum = 0; pgnum < N_SCBANKS; pgnum ++)
1031 {
1032 int base = cpun->sc_addr_map [pgnum];
1033 if (base < 0)
1034 continue;
1035 for (uint addr = 0; addr < SCBANK_SZ; addr ++)
1036 M [addr + (uint) base] = MEM_UNINITIALIZED;
1037 }
1038 }
1039 }
1040
1041 cpu_reset_unit_idx ((uint) cpu_unit_idx, false);
1042 return SCPE_OK;
1043 }
1044
1045 static t_stat simh_cpu_reset_unit (UNIT * uptr,
1046 UNUSED int32 value,
1047 UNUSED const char * cptr,
1048 UNUSED void * desc)
1049 {
1050 long cpu_unit_idx = UNIT_IDX (uptr);
1051 cpu_reset_unit_idx ((uint) cpu_unit_idx, false);
1052 return SCPE_OK;
1053 }
1054
1055 #if !defined(PERF_STRIP)
1056 static uv_loop_t * ev_poll_loop;
1057 static uv_timer_t ev_poll_handle;
1058 #endif
1059
1060 static MTAB cpu_mod[] =
1061 {
1062 {
1063 MTAB_unit_value,
1064 0,
1065 "CONFIG",
1066 "CONFIG",
1067 cpu_set_config,
1068 cpu_show_config,
1069 NULL,
1070 NULL
1071 },
1072
1073
1074
1075 {
1076 MTAB_unit_value,
1077 0,
1078 "RESET",
1079 "RESET",
1080 simh_cpu_reset_unit,
1081 NULL,
1082 NULL,
1083 NULL
1084 },
1085
1086 {
1087 MTAB_unit_value,
1088 0,
1089 "INITIALIZE",
1090 "INITIALIZE",
1091 simh_cpu_reset_unit,
1092 NULL,
1093 NULL,
1094 NULL
1095 },
1096
1097
1098
1099 {
1100 MTAB_unit_value,
1101 0,
1102 "INITIALIZEANDCLEAR",
1103 "INITIALIZEANDCLEAR",
1104 simh_cpu_reset_and_clear_unit,
1105 NULL,
1106 NULL,
1107 NULL
1108 },
1109
1110 {
1111 MTAB_unit_value,
1112 0,
1113 "IAC",
1114 "IAC",
1115 simh_cpu_reset_and_clear_unit,
1116 NULL,
1117 NULL,
1118 NULL
1119 },
1120
1121 {
1122 MTAB_dev_value,
1123 0,
1124 "NUNITS",
1125 "NUNITS",
1126 cpu_set_nunits,
1127 cpu_show_nunits,
1128 NULL,
1129 NULL
1130 },
1131
1132 {
1133 MTAB_dev_value,
1134 0,
1135 "KIPS",
1136 "KIPS",
1137 cpu_set_kips,
1138 cpu_show_kips,
1139 NULL,
1140 NULL
1141 },
1142
1143 {
1144 MTAB_dev_value,
1145 0,
1146 "STALL",
1147 "STALL",
1148 cpu_set_stall,
1149 cpu_show_stall,
1150 NULL,
1151 NULL
1152 },
1153
1154 {
1155 MTAB_unit_value,
1156 0,
1157 "DPS8M",
1158 "DPS8M",
1159 setCPUConfigDPS8M,
1160 NULL,
1161 NULL,
1162 NULL
1163 },
1164
1165 {
1166 MTAB_unit_value,
1167 0,
1168 "L68",
1169 "L68",
1170 setCPUConfigL68,
1171 NULL,
1172 NULL,
1173 NULL
1174 },
1175
1176 { 0, 0, NULL, NULL, NULL, NULL, NULL, NULL }
1177 };
1178
1179 static DEBTAB cpu_dt[] =
1180 {
1181 { "TRACE", DBG_TRACE, NULL },
1182 { "TRACEEXT", DBG_TRACEEXT, NULL },
1183 { "MESSAGES", DBG_MSG, NULL },
1184
1185 { "REGDUMPAQI", DBG_REGDUMPAQI, NULL },
1186 { "REGDUMPIDX", DBG_REGDUMPIDX, NULL },
1187 { "REGDUMPPR", DBG_REGDUMPPR, NULL },
1188 { "REGDUMPPPR", DBG_REGDUMPPPR, NULL },
1189 { "REGDUMPDSBR", DBG_REGDUMPDSBR, NULL },
1190 { "REGDUMPFLT", DBG_REGDUMPFLT, NULL },
1191 { "REGDUMP", DBG_REGDUMP, NULL },
1192
1193 { "ADDRMOD", DBG_ADDRMOD, NULL },
1194 { "APPENDING", DBG_APPENDING, NULL },
1195
1196 { "NOTIFY", DBG_NOTIFY, NULL },
1197 { "INFO", DBG_INFO, NULL },
1198 { "ERR", DBG_ERR, NULL },
1199 { "WARN", DBG_WARN, NULL },
1200 { "DEBUG", DBG_DEBUG, NULL },
1201 { "ALL", DBG_ALL, NULL },
1202
1203 { "FAULT", DBG_FAULT, NULL },
1204 { "INTR", DBG_INTR, NULL },
1205 { "CORE", DBG_CORE, NULL },
1206 { "CYCLE", DBG_CYCLE, NULL },
1207 { "CAC", DBG_CAC, NULL },
1208 { "FINAL", DBG_FINAL, NULL },
1209 { "AVC", DBG_AVC, NULL },
1210 { NULL, 0, NULL }
1211 };
1212
1213
1214 const char *sim_stop_messages[] =
1215 {
1216 "Unknown error",
1217 "Simulation stop",
1218 "Breakpoint",
1219 };
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249 #if !defined(SPEED)
1250 static bool watch_bits [MEMSIZE];
1251 #endif
1252
1253 char * str_SDW0 (char * buf, sdw0_s * SDW)
1254 {
1255 (void)sprintf (buf, "ADDR=%06o R1=%o R2=%o R3=%o F=%o FC=%o BOUND=%o R=%o "
1256 "E=%o W=%o P=%o U=%o G=%o C=%o EB=%o",
1257 SDW->ADDR, SDW->R1, SDW->R2, SDW->R3, SDW->DF,
1258 SDW->FC, SDW->BOUND, SDW->R, SDW->E, SDW->W,
1259 SDW->P, SDW->U, SDW->G, SDW->C, SDW->EB);
1260 return buf;
1261 }
1262
1263 static t_stat cpu_boot (UNUSED int32 cpu_unit_idx, UNUSED DEVICE * dptr)
1264 {
1265 sim_warn ("Try 'BOOT IOMn'\r\n");
1266 return SCPE_ARG;
1267 }
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291 #define ZONE_SZ (MEM_SIZE_MAX / 4)
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305 void setup_scbank_map (cpu_state_t * cpup)
1306 {
1307
1308 for (uint pg = 0; pg < N_SCBANKS; pg ++)
1309 {
1310 cpu.sc_addr_map [pg] = -1;
1311 cpu.sc_scu_map [pg] = -1;
1312 }
1313 for (uint u = 0; u < N_SCU_UNITS_MAX; u ++)
1314 cpu.sc_num_banks[u] = 0;
1315
1316
1317 for (int port_num = 0; port_num < (cpu.tweaks.l68_mode ? N_L68_CPU_PORTS : N_DPS8M_CPU_PORTS); port_num ++)
1318 {
1319
1320 if (! cpu.switches.enable [port_num])
1321 continue;
1322
1323
1324
1325
1326 if (! cables->cpu_to_scu[current_running_cpu_idx][port_num].in_use)
1327 {
1328 continue;
1329 }
1330
1331
1332 uint store_size = cpu.switches.store_size [port_num];
1333 uint dps8m_store_table [8] =
1334 { 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304 };
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349 uint l68_store_table [8] =
1350 { 32768, 65536, 4194304, 131072, 524288, 1048576, 2097152, 262144 };
1351 uint l68_isolts_store_table [8] =
1352 { 32768, 65536, 4194304, 65536, 524288, 1048576, 2097152, 262144 };
1353
1354 uint sz_wds =
1355 cpu.tweaks.l68_mode ?
1356 cpu.tweaks.isolts_mode ?
1357 l68_isolts_store_table [store_size] :
1358 l68_store_table [store_size] :
1359 dps8m_store_table [store_size];
1360
1361
1362 uint base_addr_wds = sz_wds * cpu.switches.assignment[port_num];
1363
1364
1365 uint num_banks = sz_wds / SCBANK_SZ;
1366 cpu.sc_num_banks[port_num] = num_banks;
1367 uint base_addr_bks = base_addr_wds / SCBANK_SZ;
1368
1369
1370 for (uint pg = 0; pg < num_banks; pg ++)
1371 {
1372
1373 uint addr_bks = base_addr_bks + pg;
1374
1375 if (addr_bks < N_SCBANKS)
1376 {
1377
1378 if (cpu.sc_addr_map [addr_bks] != -1)
1379 {
1380 sim_warn ("scbank overlap addr_bks %d (%o) old port %d "
1381 "newport %d\r\n",
1382 addr_bks, addr_bks, cpu.sc_addr_map [addr_bks], port_num);
1383 }
1384 else
1385 {
1386
1387 cpu.sc_addr_map[addr_bks] = (int)((int)port_num * (int)ZONE_SZ + (int)pg * (int)SCBANK_SZ);
1388 cpu.sc_scu_map[addr_bks] = port_num;
1389 }
1390 }
1391 else
1392 {
1393 sim_warn ("addr_bks too big port %d addr_bks %d (%o), "
1394 "limit %d (%o)\r\n",
1395 port_num, addr_bks, addr_bks, N_SCBANKS, N_SCBANKS);
1396 }
1397 }
1398
1399 }
1400
1401
1402
1403 }
1404
1405 int lookup_cpu_mem_map (cpu_state_t * cpup, word24 addr)
1406 {
1407 uint scpg = addr / SCBANK_SZ;
1408 if (scpg < N_SCBANKS)
1409 {
1410 return cpu.sc_scu_map[scpg];
1411 }
1412 return -1;
1413 }
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423 #if !defined(PERF_STRIP)
1424 static void get_serial_number (cpu_state_t * cpup)
1425 {
1426 bool havesn = false;
1427 FILE * fp = fopen ("./serial.txt", "r");
1428 while (fp && ! feof (fp))
1429 {
1430 char buffer [81] = "";
1431 # if !defined(__clang_analyzer__)
1432 char * checksn = fgets (buffer, sizeof (buffer), fp);
1433 (void)checksn;
1434 # endif
1435 uint cpun, sn;
1436 if (sscanf (buffer, "sn: %u", & sn) == 1)
1437 {
1438 if (cpu.switches.serno)
1439 sim_msg ("\r\nReplacing CPU serial number:\r\n");
1440 cpu.switches.serno = sn;
1441 if (!sim_quiet)
1442 {
1443 sim_msg ("%s CPU serial number: %u\r\n", sim_name, cpu.switches.serno);
1444 }
1445 havesn = true;
1446 }
1447 else if (sscanf (buffer, "sn%u: %u", & cpun, & sn) == 2)
1448 {
1449 if (cpun < N_CPU_UNITS_MAX)
1450 {
1451 if (cpus[cpun].switches.serno)
1452 sim_msg ("\r\nReplacing CPU %u serial number:\r\n", cpun);
1453 cpus[cpun].switches.serno = sn;
1454 if (!sim_quiet)
1455 {
1456 sim_msg ("%s CPU %u serial number: %u\r\n",
1457 sim_name, cpun, cpus[cpun].switches.serno);
1458 }
1459 havesn = true;
1460 }
1461 }
1462 }
1463 if (!havesn)
1464 {
1465 if (!sim_quiet)
1466 {
1467 sim_msg ("\r\nPlease register your system at https://dps8m.gitlab.io/register/\r\n");
1468 sim_msg ("or create the file 'serial.txt' containing the line 'sn: 0'.\r\n\r\n");
1469 }
1470 }
1471 if (fp)
1472 fclose (fp);
1473 }
1474 #endif
1475
1476 #if defined(STATS)
1477 static void do_stats (void)
1478 {
1479 static struct timespec stats_time;
1480 static bool first = true;
1481 if (first)
1482 {
1483 first = false;
1484 clock_gettime (CLOCK_BOOTTIME, & stats_time);
1485 sim_msg ("stats started\r\n");
1486 }
1487 else
1488 {
1489 struct timespec now, delta;
1490 clock_gettime (CLOCK_BOOTTIME, & now);
1491 timespec_diff (& stats_time, & now, & delta);
1492 stats_time = now;
1493 sim_msg ("stats %6ld.%02ld\r\n", delta.tv_sec,
1494 delta.tv_nsec / 10000000);
1495
1496 sim_msg ("Instruction counts\r\n");
1497 for (uint i = 0; i < 8; i ++)
1498 {
1499 # if defined(WIN_STDIO)
1500 sim_msg (" %9lld\r\n", (long long int) cpus[i].instrCnt);
1501 # else
1502 sim_msg (" %'9lld\r\n", (long long int) cpus[i].instrCnt);
1503 # endif
1504 cpus[i].instrCnt = 0;
1505 }
1506 sim_msg ("\r\n");
1507 }
1508 }
1509 #endif
1510
1511
1512
1513 #if !defined(PERF_STRIP)
1514 static void ev_poll_cb (UNUSED uv_timer_t * handle)
1515 {
1516 cpu_state_t * cpup = _cpup;
1517
1518
1519 static uint oneHz = 0;
1520 if (oneHz ++ >= sys_opts.sys_slow_poll_interval)
1521 {
1522 oneHz = 0;
1523 rdrProcessEvent ();
1524 # if defined(STATS)
1525 do_stats ();
1526 # endif
1527 cpu.instrCntT0 = cpu.instrCntT1;
1528 cpu.instrCntT1 = cpu.instrCnt;
1529 }
1530 fnpProcessEvent ();
1531 # if defined(WITH_SOCKET_DEV)
1532 # if !defined(__MINGW64__) && !defined(__MINGW32__) && !defined(CROSS_MINGW32) && !defined(CROSS_MINGW64)
1533 sk_process_event ();
1534 # endif
1535 # endif
1536 consoleProcess ();
1537 # if defined(IO_ASYNC_PAYLOAD_CHAN)
1538 iomProcess ();
1539 # endif
1540 # if defined(WITH_ABSI_DEV)
1541 # if !defined(__MINGW32__) && !defined(__MINGW64__) && !defined(CROSS_MINGW32) && !defined(CROSS_MINGW64)
1542 absi_process_event ();
1543 # endif
1544 # endif
1545 # if defined(WITH_MGP_DEV)
1546 # if !defined(__MINGW32__) && !defined(__MINGW64__) && !defined(CROSS_MINGW32) && !defined(CROSS_MINGW64)
1547 mgp_process_event ();
1548 # endif
1549 # endif
1550 PNL (panel_process_event ());
1551 }
1552 #endif
1553
1554
1555
1556 void cpu_init (void)
1557 {
1558
1559
1560
1561 M = system_state->M;
1562 #if defined(M_SHARED)
1563 cpus = system_state->cpus;
1564 #endif
1565
1566 #if !defined(SPEED)
1567 (void)memset (& watch_bits, 0, sizeof (watch_bits));
1568 #endif
1569
1570 set_cpu_idx (0);
1571
1572 (void)memset (cpus, 0, sizeof (cpu_state_t) * N_CPU_UNITS_MAX);
1573
1574 #if !defined(PERF_STRIP)
1575 get_serial_number (_cpup);
1576
1577 ev_poll_loop = uv_default_loop ();
1578 uv_timer_init (ev_poll_loop, & ev_poll_handle);
1579
1580 uv_timer_start (& ev_poll_handle, ev_poll_cb, sys_opts.sys_poll_interval, sys_opts.sys_poll_interval);
1581 #endif
1582
1583
1584 cpu_state_t * cpup = _cpup;
1585
1586 cpu.instrCnt = 0;
1587 cpu.cycleCnt = 0;
1588 for (int i = 0; i < N_FAULTS; i ++)
1589 cpu.faultCnt [i] = 0;
1590
1591 #if defined(MATRIX)
1592 initializeTheMatrix ();
1593 #endif
1594 }
1595
1596 static void cpu_reset (void)
1597 {
1598 for (uint i = 0; i < N_CPU_UNITS_MAX; i ++)
1599 {
1600 cpu_reset_unit_idx (i, true);
1601 }
1602
1603 set_cpu_idx (0);
1604
1605 #if defined(TESTING)
1606 cpu_state_t * cpup = _cpup;
1607 sim_debug (DBG_INFO, & cpu_dev, "CPU reset: Running\r\n");
1608 #endif
1609 }
1610
1611 static t_stat sim_cpu_reset (UNUSED DEVICE *dptr)
1612 {
1613
1614
1615
1616
1617
1618 cpu_reset ();
1619 return SCPE_OK;
1620 }
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630 static t_stat cpu_ex (t_value *vptr, t_addr addr, UNUSED UNIT * uptr,
1631 UNUSED int32 sw)
1632 {
1633 if (addr>= MEMSIZE)
1634 return SCPE_NXM;
1635 if (vptr != NULL)
1636 {
1637 *vptr = M[addr] & DMASK;
1638 }
1639 return SCPE_OK;
1640 }
1641
1642
1643
1644 static t_stat cpu_dep (t_value val, t_addr addr, UNUSED UNIT * uptr,
1645 UNUSED int32 sw)
1646 {
1647 if (addr >= MEMSIZE) return SCPE_NXM;
1648 M[addr] = val & DMASK;
1649 return SCPE_OK;
1650 }
1651
1652
1653
1654
1655
1656 #if defined(M_SHARED)
1657
1658 static word18 dummy_IC;
1659 #endif
1660
1661 static REG cpu_reg[] =
1662 {
1663
1664 #if defined(M_SHARED)
1665 { ORDATA (IC, dummy_IC, VASIZE), 0, 0, 0 },
1666 #else
1667 { ORDATA (IC, cpus[0].PPR.IC, VASIZE), 0, 0, 0 },
1668 #endif
1669 { NULL, NULL, 0, 0, 0, 0, NULL, NULL, 0, 0, 0 }
1670 };
1671
1672
1673
1674
1675
1676 REG *sim_PC = & cpu_reg[0];
1677
1678
1679
1680 DEVICE cpu_dev =
1681 {
1682 "CPU",
1683 cpu_unit,
1684 cpu_reg,
1685 cpu_mod,
1686 N_CPU_UNITS,
1687 8,
1688 PASIZE,
1689 1,
1690 8,
1691 36,
1692 & cpu_ex,
1693 & cpu_dep,
1694 & sim_cpu_reset,
1695 & cpu_boot,
1696 NULL,
1697 NULL,
1698 NULL,
1699 DEV_DEBUG,
1700 0,
1701 cpu_dt,
1702 NULL,
1703 NULL,
1704 NULL,
1705 NULL,
1706 NULL,
1707 NULL,
1708 NULL
1709 };
1710
1711 #if defined(M_SHARED)
1712 cpu_state_t * cpus = NULL;
1713 #else
1714 cpu_state_t cpus [N_CPU_UNITS_MAX];
1715 #endif
1716 #if defined(THREADZ) || defined(LOCKLESS)
1717 __thread cpu_state_t * restrict _cpup;
1718 #else
1719 cpu_state_t * restrict _cpup;
1720 #endif
1721
1722
1723
1724
1725
1726
1727
1728 static uint get_highest_intr (cpu_state_t *cpup)
1729 {
1730 uint fp = 1;
1731 for (uint scu_unit_idx = 0; scu_unit_idx < N_SCU_UNITS_MAX; scu_unit_idx ++)
1732 {
1733 if (cpu.events.XIP [scu_unit_idx])
1734 {
1735 fp = scu_get_highest_intr (scu_unit_idx);
1736 if (fp != 1)
1737 break;
1738 }
1739 }
1740 return fp;
1741 }
1742
1743 bool sample_interrupts (cpu_state_t * cpup)
1744 {
1745 cpu.lufCounter = 0;
1746 for (uint scu_unit_idx = 0; scu_unit_idx < N_SCU_UNITS_MAX; scu_unit_idx ++)
1747 {
1748 if (cpu.events.XIP [scu_unit_idx])
1749 {
1750 return true;
1751 }
1752 }
1753 return false;
1754 }
1755
1756 t_stat simh_hooks (cpu_state_t * cpup)
1757 {
1758 int reason = 0;
1759
1760 if (breakEnable && stop_cpu)
1761 return STOP_STOP;
1762
1763 if (cpu.tweaks.isolts_mode == 0)
1764 {
1765
1766 if (sim_interval <= 0)
1767 {
1768 reason = sim_process_event ();
1769 if ((! breakEnable) && reason == SCPE_STOP)
1770 reason = SCPE_OK;
1771 if (reason)
1772 return reason;
1773 }
1774 }
1775
1776 sim_interval --;
1777
1778 #if !defined(THREADZ) && !defined(LOCKLESS)
1779
1780
1781
1782 if (sim_brk_summ &&
1783 sim_brk_test ((cpu.PPR.IC & 0777777) |
1784 ((((t_addr) cpu.PPR.PSR) & 037777) << 18),
1785 SWMASK ('E')))
1786 return STOP_BKPT;
1787 # if !defined(SPEED)
1788 if (sim_deb_break && cpu.cycleCnt >= sim_deb_break)
1789 return STOP_BKPT;
1790 # endif
1791 #endif
1792
1793 return reason;
1794 }
1795
1796 #if defined(PANEL68)
1797 static void panel_process_event (void)
1798 {
1799 cpu_state_t * cpup = _cpup;
1800
1801 if (cpu.panelInitialize && cpu.DATA_panel_s_trig_sw == 0)
1802 {
1803
1804 while (cpu.panelInitialize)
1805 ;
1806 if (cpu.DATA_panel_init_sw)
1807 cpu_reset_unit_idx (ASSUME0, true);
1808 else
1809 cpu_reset_unit_idx (ASSUME0, false);
1810
1811 do_boot ();
1812 }
1813
1814 if (cpu.DATA_panel_s_trig_sw == 0 &&
1815 cpu.DATA_panel_execute_sw &&
1816 cpu.DATA_panel_scope_sw &&
1817 cpu.DATA_panel_exec_sw == 0)
1818
1819 {
1820
1821 while (cpu.DATA_panel_execute_sw)
1822 ;
1823
1824 if (cpu.DATA_panel_exec_sw)
1825 {
1826 cpu_reset_unit_idx (ASSUME0, false);
1827 cpu.cu.IWB = cpu.switches.data_switches;
1828 set_cpu_cycle (cpup, EXEC_cycle);
1829 }
1830 else
1831 {
1832 setG7fault (current_running_cpu_idx, FAULT_EXF);
1833 }
1834 }
1835 }
1836 #endif
1837
1838 #if defined(THREADZ) || defined(LOCKLESS)
1839 bool bce_dis_called = false;
1840
1841
1842 t_stat sim_instr (void)
1843 {
1844 cpu_state_t * cpup = _cpup;
1845 t_stat reason = 0;
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891 if (cpuThreadz[0].run == false)
1892 createCPUThread (0);
1893 do
1894 {
1895
1896 reason = simh_hooks (cpup);
1897 if (reason)
1898 {
1899 break;
1900 }
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928 if (bce_dis_called) {
1929
1930 reason = STOP_STOP;
1931 break;
1932 }
1933
1934 # if !defined(PERF_STRIP)
1935
1936
1937 # if defined(LOCKLESS)
1938 lock_iom();
1939 # endif
1940 lock_libuv ();
1941 uv_run (ev_poll_loop, UV_RUN_NOWAIT);
1942 unlock_libuv ();
1943 # if defined(LOCKLESS)
1944 unlock_iom();
1945 # endif
1946 PNL (panel_process_event ());
1947
1948 int con_unit_idx = check_attn_key ();
1949 if (con_unit_idx != -1)
1950 console_attn_idx (con_unit_idx);
1951 # endif
1952
1953 # if defined(IO_ASYNC_PAYLOAD_CHAN_THREAD)
1954 struct timespec next_time;
1955 clock_gettime (CLOCK_REALTIME, & next_time);
1956 next_time.tv_nsec += 1000l * 1000l;
1957 if (next_time.tv_nsec >= 1000l * 1000l *1000l)
1958 {
1959 next_time.tv_nsec -= 1000l * 1000l *1000l;
1960 next_time.tv_sec += (time_t) 1;
1961 }
1962 struct timespec new_time;
1963 do
1964 {
1965 pthread_mutex_lock (& iom_start_lock);
1966 pthread_cond_timedwait (& iomCond,
1967 & iom_start_lock,
1968 & next_time);
1969 pthread_mutex_unlock (& iom_start_lock);
1970 lock_iom();
1971 lock_libuv ();
1972
1973 iomProcess ();
1974
1975 unlock_libuv ();
1976 unlock_iom ();
1977
1978 clock_gettime (CLOCK_REALTIME, & new_time);
1979 }
1980 while ((next_time.tv_sec == new_time.tv_sec) ? (next_time.tv_nsec > new_time.tv_nsec) : \
1981 (next_time.tv_sec > new_time.tv_sec));
1982 # else
1983 sim_usleep (1000);
1984 # endif
1985 }
1986 while (reason == 0);
1987
1988 for (uint cpuNo = 0; cpuNo < N_CPU_UNITS_MAX; cpuNo ++) {
1989 cpuStats (cpuNo);
1990 }
1991
1992 # if defined(TESTING)
1993 HDBGPrint ();
1994 # endif
1995 return reason;
1996 }
1997 #endif
1998
1999 #if !defined(THREADZ) && !defined(LOCKLESS)
2000 static uint fast_queue_subsample = 0;
2001 #endif
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051 #if defined(THREADZ) || defined(LOCKLESS)
2052 void * cpu_thread_main (void * arg)
2053 {
2054 int myid = * (int *) arg;
2055 set_cpu_idx ((uint) myid);
2056 unsigned char umyid = (unsigned char)toupper('a' + (int)myid);
2057 char thread_name[SIR_MAXPID] = {0};
2058 char temp_thread_name[SIR_MAXPID] = {0};
2059
2060 _cpup->thread_id = pthread_self();
2061
2062 if (realtime_ok) {
2063 set_realtime_priority (pthread_self(), realtime_max_priority() - 1);
2064 check_realtime_priority (pthread_self(), realtime_max_priority() - 1);
2065 } else {
2066 # if !defined(__QNX__)
2067 (void)sim_os_set_thread_priority (PRIORITY_ABOVE_NORMAL);
2068 # endif
2069 }
2070 _sir_snprintf_trunc(thread_name, SIR_MAXPID, "CPU %c", (unsigned int)umyid);
2071 if (!_sir_setthreadname(thread_name) || !_sir_getthreadname(temp_thread_name))
2072 (void)sir_info ("%s thread created (TID " SIR_TIDFORMAT ")",
2073 thread_name, PID_CAST _sir_gettid());
2074 else
2075 (void)sir_info ("Thread created (TID " SIR_TIDFORMAT ")",
2076 PID_CAST _sir_gettid());
2077 # if defined(TESTING) && defined(__APPLE__) && defined(__MACH__)
2078 (void)sir_info ("Mach thread ID: 0x%x", pthread_mach_thread_np(pthread_self()));
2079 # endif
2080 bool warned = false;
2081 if (realtime_ok) {
2082 if (myid + 2 > nprocs) {
2083 (void)sir_warn ("Total number of supervisor and CPU threads (%lu) exceeds available host parallelism (%lu)!",
2084 (unsigned long)(myid) + 2, (unsigned long)nprocs);
2085 warned = true;
2086 }
2087 if (!warned && nprocs >= 2 && ncores >= 1 && nprocs >= ncores && myid + 2 > ncores) {
2088 (void)sir_warn ("Total number of supervisor and CPU threads (%lu) exceeds physical host core count (%lu)!",
2089 (unsigned long)(myid) + 2, (unsigned long)ncores);
2090 }
2091 } else {
2092 if (myid + 1 > nprocs) {
2093 (void)sir_warn ("Total number of CPU threads (%lu) exceeds available host parallelism (%lu)!",
2094 (unsigned long)(myid) + 1, (unsigned long)nprocs);
2095 warned = true;
2096 }
2097 if (!warned && ncores >= 1 && nprocs >= ncores && myid + 1 > ncores) {
2098 (void)sir_warn ("Total number of CPU threads (%lu) exceeds physical host core count (%lu)!",
2099 (unsigned long)(myid) + 1, (unsigned long)ncores);
2100 }
2101 }
2102 setSignals ();
2103 threadz_sim_instr ();
2104 return NULL;
2105 }
2106 #endif
2107
2108 NO_RETURN
2109 static void do_LUF_fault (cpu_state_t * cpup)
2110 {
2111 CPT (cpt1U, 16);
2112 cpu.lufCounter = 0;
2113 cpu.lufOccurred = false;
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132 if (cpu.tweaks.isolts_mode)
2133 cpu.shadowTR = (word27) cpu.TR0 - (1024u << (is_priv_mode (cpup) ? 4 : cpu.CMR.luf));
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146 doFault (FAULT_LUF, fst_zero, "instruction cycle lockup");
2147 }
2148
2149 #if !defined(THREADZ) && !defined(LOCKLESS)
2150 # define threadz_sim_instr sim_instr
2151 #endif
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163 static void set_temporary_absolute_mode (cpu_state_t * cpup)
2164 {
2165 CPT (cpt1L, 20);
2166 cpu.secret_addressing_mode = true;
2167 cpu.cu.XSF = false;
2168 sim_debug (DBG_TRACEEXT, & cpu_dev, "set_temporary_absolute_mode bit 29 sets XSF to 0\r\n");
2169
2170 }
2171
2172 static bool clear_temporary_absolute_mode (cpu_state_t * cpup)
2173 {
2174 CPT (cpt1L, 21);
2175 cpu.secret_addressing_mode = false;
2176 return cpu.cu.XSF;
2177
2178 }
2179
2180 #if defined(THREADZ) || defined(LOCKLESS)
2181 static const int workAllocationQuantum = 64;
2182 static const int syncClockModePollRate = 64;
2183 static const int masterCycleCntlimit = 2048;
2184
2185 void becomeClockMaster (uint cpuNum) {
2186
2187 # ifdef SYNCTEST
2188 sim_printf ("CPU%c %s entry\r\n", cpuNum + 'A', __func__);
2189 allocCount = 0;
2190 # endif
2191
2192
2193
2194
2195 if (syncClockMode) {
2196
2197
2198
2199 return;
2200 }
2201
2202 syncClockModeMasterIdx = cpuNum;
2203 cpu_state_t * cpup = & cpus[cpuNum];
2204 cpu.syncClockModeMaster = true;
2205 cpu.masterCycleCnt = 0;
2206 cpu.syncClockModeCache = true;
2207 for (int i = 0; i < N_CPU_UNITS_MAX; i ++) {
2208 if (i != cpuNum) {
2209 cpus[i].workAllocation = 0;
2210 __asm volatile ("");
2211 atomic_thread_fence (memory_order_seq_cst);
2212 if (cpus[i].inMultics && ! cpus[i].isSlave) {
2213 cpus[i].syncClockModePoll = 0;
2214 __asm volatile ("");
2215 atomic_thread_fence (memory_order_seq_cst);
2216 cpus[i].becomeSlave = true;
2217 __asm volatile ("");
2218 atomic_thread_fence (memory_order_seq_cst);
2219 }
2220 }
2221 }
2222
2223 __asm volatile ("");
2224 atomic_thread_fence (memory_order_seq_cst);
2225 syncClockMode = true;
2226
2227 __asm volatile ("");
2228 atomic_thread_fence (memory_order_seq_cst);
2229
2230 }
2231
2232 void giveupClockMaster (cpu_state_t * cpup) {
2233
2234 # ifdef SYNCTEST
2235
2236 sim_printf ("CPU%c %s entry\r\n", cpu.cpuIdx + 'A', __func__);
2237 sim_printf ("CPU%c Alloc count %d\r\n", cpu.cpuIdx + 'A', allocCount);
2238 # endif
2239 __asm volatile ("");
2240 cpu.syncClockModeMaster = false;
2241 __asm volatile ("");
2242 syncClockMode = false;
2243 __asm volatile ("");
2244 for (int i = 0; i < N_CPU_UNITS_MAX; i ++) {
2245 cpus[i].syncClockModeCache = false;
2246 }
2247 __asm volatile ("");
2248 atomic_thread_fence (memory_order_seq_cst);
2249
2250 }
2251 #endif
2252
2253 t_stat threadz_sim_instr (void)
2254 {
2255 cpu_state_t * cpup = _cpup;
2256
2257
2258 #if !defined(SCHED_NEVER_YIELD)
2259 unsigned long long lockYieldAll = 0;
2260 #endif
2261 unsigned long long lockWaitMaxAll = 0;
2262 unsigned long long lockWaitAll = 0;
2263 unsigned long long lockImmediateAll = 0;
2264 unsigned long long lockCntAll = 0;
2265 unsigned long long instrCntAll = 0;
2266 unsigned long long cycleCntAll = 0;
2267
2268 t_stat reason = 0;
2269
2270 #if !defined(THREADZ) && !defined(LOCKLESS)
2271 set_cpu_idx (0);
2272 # if defined(M_SHARED)
2273
2274
2275
2276
2277 cpus [0].PPR.IC = dummy_IC;
2278 # endif
2279
2280 #endif
2281
2282
2283 int val = setjmp (cpu.jmpMain);
2284
2285 switch (val)
2286 {
2287 case JMP_ENTRY:
2288 case JMP_REENTRY:
2289 reason = 0;
2290 break;
2291 case JMP_SYNC_FAULT_RETURN:
2292 set_cpu_cycle (cpup, SYNC_FAULT_RTN_cycle);
2293 break;
2294 case JMP_STOP:
2295 reason = STOP_STOP;
2296 goto leave;
2297 case JMP_REFETCH:
2298
2299
2300
2301
2302
2303
2304
2305 cpu.wasXfer = false;
2306
2307 set_cpu_cycle (cpup, FETCH_cycle);
2308 break;
2309 case JMP_RESTART:
2310 set_cpu_cycle (cpup, EXEC_cycle);
2311 break;
2312 case JMP_FORCE_RESTART:
2313
2314
2315
2316
2317
2318
2319 cpu_reset_unit_idx (current_running_cpu_idx, false);
2320 #if defined(THREADZ) || defined(LOCKLESS)
2321
2322 if (syncClockMode && syncClockModeMasterIdx == current_running_cpu_idx)
2323 giveupClockMaster (cpup);
2324 #endif
2325 break;
2326 default:
2327 sim_warn ("longjmp value of %d unhandled\r\n", val);
2328 goto leave;
2329 }
2330
2331
2332
2333 DCDstruct * ci = & cpu.currentInstruction;
2334
2335 if (cpu.restart)
2336 {
2337 set_cpu_cycle (cpup, FAULT_cycle);
2338 }
2339
2340 #if defined(THREADZ) || defined(LOCKLESS)
2341
2342
2343
2344
2345 __asm volatile ("");
2346 cpu.executing = true;
2347 if (cpu.tweaks.isolts_mode) {
2348 ;
2349 } else {
2350 cpu.inMultics = true;
2351 }
2352 #endif
2353
2354 do
2355 {
2356
2357 reason = 0;
2358
2359 #if !defined(THREADZ) && !defined(LOCKLESS)
2360
2361 reason = simh_hooks (cpup);
2362 if (reason)
2363 {
2364 break;
2365 }
2366
2367
2368
2369
2370
2371
2372
2373
2374 if (fast_queue_subsample ++ > sys_opts.sys_poll_check_rate)
2375 {
2376 fast_queue_subsample = 0;
2377 # if defined(CONSOLE_FIX)
2378 # if defined(THREADZ) || defined(LOCKLESS)
2379 lock_libuv ();
2380 # endif
2381 # endif
2382 uv_run (ev_poll_loop, UV_RUN_NOWAIT);
2383 # if defined(CONSOLE_FIX)
2384 # if defined(THREADZ) || defined(LOCKLESS)
2385 unlock_libuv ();
2386 # endif
2387 # endif
2388 PNL (panel_process_event ());
2389 }
2390 #endif
2391
2392 cpu.cycleCnt ++;
2393
2394 #if defined(THREADZ)
2395
2396 unlock_mem_force ();
2397
2398
2399 cpuRunningWait ();
2400 #endif
2401 #if defined(LOCKLESS)
2402 core_unlock_all (cpup);
2403 #endif
2404
2405 #if !defined(LOCKLESS)
2406 int con_unit_idx = check_attn_key ();
2407 if (con_unit_idx != -1)
2408 console_attn_idx (con_unit_idx);
2409 #endif
2410
2411 #if !defined(THREADZ) && !defined(LOCKLESS)
2412 if (cpu.tweaks.isolts_mode)
2413 {
2414 if (cpu.cycle != FETCH_cycle)
2415 {
2416
2417 cpu.rTRlsb ++;
2418 if (cpu.rTRlsb >= 4)
2419 {
2420 cpu.rTRlsb = 0;
2421 cpu.shadowTR = (cpu.shadowTR - 1) & MASK27;
2422 if (cpu.shadowTR == 0)
2423 {
2424 if (cpu.tweaks.tro_enable)
2425 setG7fault (current_running_cpu_idx, FAULT_TRO);
2426 }
2427 }
2428 }
2429 }
2430 #endif
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444 # define TR_RATE 2
2445
2446
2447
2448 cpu.rTR = (word27) (((word27s) cpu.rTR) - (word27s) (cpu.rTRticks / TR_RATE));
2449 cpu.rTRticks %= TR_RATE;
2450
2451
2452
2453 if (cpu.rTR & ~MASK27)
2454 {
2455 cpu.rTR &= MASK27;
2456 if (cpu.tweaks.tro_enable) {
2457 setG7fault (current_running_cpu_idx, FAULT_TRO);
2458 }
2459 }
2460
2461 sim_debug (DBG_CYCLE, & cpu_dev, "Cycle is %s\r\n",
2462 cycle_str (cpu.cycle));
2463
2464 switch (cpu.cycle)
2465 {
2466 case INTERRUPT_cycle:
2467 {
2468 CPT (cpt1U, 0);
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480 uint intr_pair_addr = get_highest_intr (cpup);
2481 #if defined(TESTING)
2482 HDBGIntr (intr_pair_addr, "");
2483 #endif
2484 cpu.cu.FI_ADDR = (word5) (intr_pair_addr / 2);
2485 cu_safe_store (cpup);
2486
2487
2488
2489 CPT (cpt1U, 1);
2490
2491 set_temporary_absolute_mode (cpup);
2492
2493
2494 cpu.PPR.PRR = 0;
2495 cpu.TPR.TRR = 0;
2496
2497 sim_debug (DBG_INTR, & cpu_dev, "intr_pair_addr %u flag %d\r\n",
2498 intr_pair_addr, cpu.interrupt_flag);
2499 #if !defined(SPEED)
2500 if_sim_debug (DBG_INTR, & cpu_dev)
2501 traceInstruction (DBG_INTR);
2502 #endif
2503
2504 if (cpu.interrupt_flag)
2505 {
2506 CPT (cpt1U, 2);
2507
2508
2509
2510
2511
2512
2513 if (intr_pair_addr != 1)
2514 {
2515 CPT (cpt1U, 3);
2516
2517
2518 core_read2 (cpup, intr_pair_addr,
2519 & cpu.cu.IWB, & cpu.cu.IRODD, __func__);
2520 #if defined(TESTING)
2521 HDBGMRead (intr_pair_addr, cpu.cu.IWB, "intr even");
2522 HDBGMRead (intr_pair_addr + 1, cpu.cu.IRODD, "intr odd");
2523 #endif
2524 cpu.cu.xde = 1;
2525 cpu.cu.xdo = 1;
2526 cpu.isExec = true;
2527 cpu.isXED = true;
2528
2529 CPT (cpt1U, 4);
2530 cpu.interrupt_flag = false;
2531 set_cpu_cycle (cpup, INTERRUPT_EXEC_cycle);
2532 break;
2533 }
2534 }
2535
2536
2537
2538 CPT (cpt1U, 5);
2539 cpu.interrupt_flag = false;
2540 clear_temporary_absolute_mode (cpup);
2541
2542 cu_safe_restore (cpup);
2543
2544
2545 cpu.wasXfer = false;
2546
2547
2548 set_cpu_cycle (cpup, FETCH_cycle);
2549 }
2550 break;
2551
2552 case FETCH_cycle:
2553 #if defined(PANEL68)
2554 (void)memset (cpu.cpt, 0, sizeof (cpu.cpt));
2555 #endif
2556 CPT (cpt1U, 13);
2557
2558 PNL (L68_ (cpu.INS_FETCH = false;))
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595 if (get_bar_mode (cpup))
2596 get_BAR_address (cpup, cpu.PPR.IC);
2597
2598
2599
2600
2601 bool tmp_priv_mode = is_priv_mode (cpup);
2602 bool is_dis = cpu.currentInstruction.opcode == 0616 &&
2603 cpu.currentInstruction.opcodeX == 0;
2604 bool noCheckTR = tmp_priv_mode &&
2605 !(is_dis && GET_I (cpu.cu.IWB) == 0);
2606
2607 if (is_dis)
2608 {
2609
2610
2611 cpu.interrupt_flag = sample_interrupts (cpup);
2612 cpu.g7_flag =
2613 noCheckTR ? bG7PendingNoTRO (cpup) : bG7Pending (cpup);
2614 }
2615 else if (! (cpu.cu.xde | cpu.cu.xdo |
2616 cpu.cu.rpt | cpu.cu.rd | cpu.cu.rl))
2617 {
2618 if ((!cpu.wasInhibited) &&
2619 (cpu.PPR.IC & 1) == 0 &&
2620 (! cpu.wasXfer))
2621 {
2622 CPT (cpt1U, 14);
2623 cpu.interrupt_flag = sample_interrupts (cpup);
2624 cpu.g7_flag =
2625 noCheckTR ? bG7PendingNoTRO (cpup) : bG7Pending (cpup);
2626 }
2627 cpu.wasInhibited = false;
2628 }
2629 else
2630 {
2631
2632
2633
2634
2635
2636 if ((cpu.PPR.IC & 1) == 1)
2637 {
2638 cpu.wasInhibited = true;
2639 }
2640 }
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676 if (cpu.g7_flag)
2677 {
2678 cpu.g7_flag = false;
2679 cpu.interrupt_flag = false;
2680 sim_debug (DBG_CYCLE, & cpu_dev,
2681 "call doG7Fault (%d)\r\n", !noCheckTR);
2682 doG7Fault (cpup, !noCheckTR);
2683 }
2684 if (cpu.interrupt_flag)
2685 {
2686
2687
2688
2689 CPT (cpt1U, 15);
2690 set_cpu_cycle (cpup, INTERRUPT_cycle);
2691 break;
2692 }
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702 case PSEUDO_FETCH_cycle:
2703
2704 tmp_priv_mode = is_priv_mode (cpup);
2705 if (! (luf_flag && tmp_priv_mode))
2706 cpu.lufCounter ++;
2707
2708 if (cpu.lufCounter > luf_limits[cpu.CMR.luf])
2709 {
2710 if (tmp_priv_mode)
2711 {
2712
2713 cpu.lufOccurred = true;
2714 }
2715 else
2716 {
2717 do_LUF_fault (cpup);
2718 }
2719 }
2720
2721
2722 if (cpu.lufCounter > luf_limits[4])
2723 {
2724 do_LUF_fault (cpup);
2725 }
2726
2727
2728
2729 if (! tmp_priv_mode && cpu.lufOccurred)
2730 {
2731 do_LUF_fault (cpup);
2732 }
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765 if (cpu.cycle == PSEUDO_FETCH_cycle)
2766 {
2767 cpu.apu.lastCycle = INSTRUCTION_FETCH;
2768 cpu.cu.XSF = 0;
2769 cpu.cu.TSN_VALID [0] = 0;
2770 cpu.TPR.TSR = cpu.PPR.PSR;
2771 cpu.TPR.TRR = cpu.PPR.PRR;
2772 cpu.wasInhibited = false;
2773 }
2774 else
2775 {
2776 CPT (cpt1U, 20);
2777 cpu.isExec = false;
2778 cpu.isXED = false;
2779
2780
2781
2782 cpu.cu.XSF = 0;
2783 sim_debug (DBG_TRACEEXT, & cpu_dev, "fetchCycle bit 29 sets XSF to 0\r\n");
2784 cpu.cu.TSN_VALID [0] = 0;
2785 cpu.TPR.TSR = cpu.PPR.PSR;
2786 cpu.TPR.TRR = cpu.PPR.PRR;
2787 PNL (cpu.prepare_state = ps_PIA);
2788 PNL (L68_ (cpu.INS_FETCH = true;))
2789 fetchInstruction (cpup, cpu.PPR.IC);
2790 }
2791
2792 CPT (cpt1U, 21);
2793 advanceG7Faults (cpup);
2794 set_cpu_cycle (cpup, EXEC_cycle);
2795 break;
2796
2797 case EXEC_cycle:
2798 case FAULT_EXEC_cycle:
2799 case INTERRUPT_EXEC_cycle:
2800 {
2801 #if defined(THREADZ) || defined(LOCKLESS)
2802
2803
2804 if (UNLIKELY (cpu.becomeSlave)) {
2805 cpu.becomeSlave = false;
2806
2807 while (! syncClockMode) {
2808 sim_usleep (1);
2809 }
2810
2811 cpu.syncClockModePoll = 0;
2812 }
2813
2814
2815 if (cpu.syncClockModeCache || --cpu.syncClockModePoll <= 0) {
2816
2817 cpu.syncClockModePoll = cpu.tweaks.isolts_mode ? 1 : syncClockModePollRate;
2818
2819
2820 if (syncClockMode) {
2821
2822
2823 cpu.syncClockModeCache = true;
2824
2825
2826 if (syncClockModeMasterIdx == current_running_cpu_idx) {
2827
2828
2829 cpu.masterCycleCnt ++;
2830 if (cpu.masterCycleCnt > masterCycleCntlimit) {
2831 # ifdef SYNCTEST
2832 sim_printf ("too many cycles\r\n");
2833 # endif
2834 giveupClockMaster (cpup);
2835 goto bail;
2836 }
2837
2838
2839 if (cpu.workAllocation <= 0) {
2840 # ifdef SYNCTEST
2841 allocCount ++;
2842 # endif
2843
2844
2845
2846
2847
2848 int64_t waitTimeout = 100000;
2849
2850
2851 while (1) {
2852 bool alldone = true;
2853 for (int i = 0; i < N_CPU_UNITS_MAX; i ++) {
2854 if (cpus[i].inMultics && cpus[i].workAllocation > 0) {
2855 wakeCPU (i);
2856 alldone = false;
2857
2858 }
2859 }
2860 if (alldone) {
2861
2862 for (int i = 0; i < N_CPU_UNITS_MAX; i ++) {
2863 if (cpus[i].inMultics) {
2864 cpus[i].workAllocation += cpu.tweaks.isolts_mode ? 1 : workAllocationQuantum;
2865 wakeCPU (i);
2866 }
2867 }
2868 break;
2869 }
2870 if (waitTimeout-- < 0) {
2871
2872
2873 sim_printf ("Clock master CPU %c timed out\r\n", "ABCDEFGH"[current_running_cpu_idx]);
2874 for (int i = 0; i < N_CPU_UNITS_MAX; i ++) {
2875 if (cpus[i].inMultics && cpus[i].workAllocation > 0) {
2876 sim_printf ("CPU %c remaining allocation: %ld\r\n", "ABCDEFGH"[i], cpus[i].workAllocation);
2877 }
2878 }
2879 sim_printf ("Conceding clock mastery...\r\n");
2880 cpu.syncClockModeCache = false;
2881 giveupClockMaster (cpup);
2882 goto bail;
2883 }
2884 sim_usleep (1);
2885 }
2886 }
2887
2888
2889
2890 } else {
2891
2892
2893
2894
2895 if (! cpu.isSlave) {
2896
2897 # ifdef SYNCTEST
2898 sim_printf ("CPU%c becoming slave\r\n", cpu.cpuIdx + 'A');
2899 # endif
2900 }
2901 cpu.isSlave = true;
2902
2903
2904 while (syncClockMode && cpu.workAllocation <= 0)
2905 sim_usleep (1);
2906
2907
2908
2909
2910 }
2911
2912 } else {
2913
2914 cpu.syncClockModeCache = false;
2915 if (cpu.isSlave) {
2916
2917 # ifdef SYNCTEST
2918 sim_printf ("CPU%c free; free at last\r\n", cpu.cpuIdx + 'A');
2919 # endif
2920 cpu.isSlave = false;
2921 }
2922 }
2923 }
2924 bail:
2925
2926 #endif
2927
2928 #if defined(THREADZ) || defined(LOCKLESS)
2929 if (LIKELY (! cpu.tweaks.isolts_mode) &&
2930 UNLIKELY (! cpu.inMultics)) {
2931 cpu.inMultics = true;
2932 }
2933 #endif
2934
2935 CPT (cpt1U, 22);
2936
2937 #if defined(LOCKLESS)
2938 if (stall_point_active)
2939 {
2940 for (int i = 0; i < N_STALL_POINTS; i ++)
2941 if (stall_points[i].segno && stall_points[i].segno == cpu.PPR.PSR &&
2942 stall_points[i].offset && stall_points[i].offset == cpu.PPR.IC)
2943 {
2944 # if defined(CTRACE)
2945 (void)fprintf (stderr, "%10lu %s stall %d\r\n", seqno (), cpunstr[current_running_cpu_idx], i);
2946 # endif
2947
2948 sim_usleep(stall_points[i].time);
2949 break;
2950 }
2951 }
2952 #endif
2953
2954
2955
2956
2957 if (GET_I (cpu.cu.IWB))
2958 cpu.wasInhibited = true;
2959
2960 t_stat ret = executeInstruction (cpup);
2961 DO_WORK_EXEC;
2962 CPT (cpt1U, 23);
2963
2964 if (cpu.tweaks.l68_mode)
2965 add_l68_CU_history (cpup);
2966 else
2967 add_dps8m_CU_history (cpup);
2968
2969 if (ret > 0)
2970 {
2971 reason = ret;
2972 break;
2973 }
2974
2975 if (ret == CONT_XEC)
2976 {
2977 CPT (cpt1U, 27);
2978 cpu.wasXfer = false;
2979 cpu.isExec = true;
2980 if (cpu.cu.xdo)
2981 cpu.isXED = true;
2982
2983 cpu.cu.XSF = 0;
2984 cpu.cu.TSN_VALID [0] = 0;
2985 cpu.TPR.TSR = cpu.PPR.PSR;
2986 cpu.TPR.TRR = cpu.PPR.PRR;
2987 break;
2988 }
2989
2990 if (ret == CONT_TRA || ret == CONT_RET)
2991 {
2992 CPT (cpt1U, 24);
2993 cpu.cu.xde = cpu.cu.xdo = 0;
2994 cpu.isExec = false;
2995 cpu.isXED = false;
2996
2997 cpu.wasXfer = true;
2998
2999 if (cpu.cycle != EXEC_cycle)
3000 {
3001 clearFaultCycle (cpup);
3002
3003
3004
3005
3006
3007 if (! (cpu.currentInstruction.opcode == 0715 &&
3008 cpu.currentInstruction.opcodeX == 0))
3009 {
3010 CPT (cpt1U, 9);
3011 SET_I_NBAR;
3012 }
3013
3014 if (!clear_temporary_absolute_mode (cpup))
3015 {
3016
3017 sim_debug (DBG_TRACEEXT, & cpu_dev,
3018 "setting ABS mode\r\n");
3019 CPT (cpt1U, 10);
3020 set_addr_mode (cpup, ABSOLUTE_mode);
3021 }
3022 else
3023 {
3024
3025 sim_debug (DBG_TRACEEXT, & cpu_dev,
3026 "not setting ABS mode\r\n");
3027 }
3028
3029 }
3030
3031
3032 if (TST_I_ABS && cpu.cu.XSF)
3033 {
3034 set_addr_mode (cpup, APPEND_mode);
3035 }
3036
3037 if (ret == CONT_TRA)
3038 {
3039
3040 cpu.wasXfer = false;
3041 set_cpu_cycle (cpup, PSEUDO_FETCH_cycle);
3042 }
3043 else
3044 set_cpu_cycle (cpup, FETCH_cycle);
3045 break;
3046 }
3047
3048 if (ret == CONT_DIS)
3049 {
3050 CPT (cpt1U, 25);
3051
3052 #if defined(THREADZ) || defined(LOCKLESS)
3053
3054 if (cpu.syncClockModeCache) {
3055 break;
3056 }
3057 #endif
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094 #if defined(THREADZ) || defined(LOCKLESS)
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104 # if defined(NO_TIMEWAIT)
3105
3106 struct timespec req, rem;
3107 uint ms = sys_opts.sys_poll_interval;
3108 long int nsec = (long int) ms * 1000L * 1000L;
3109 req.tv_nsec = nsec;
3110 req.tv_sec += req.tv_nsec / 1000000000L;
3111 req.tv_nsec %= 1000000000L;
3112 int rc = nanosleep (& req, & rem);
3113
3114 if (rc == -1)
3115 {
3116 ms = (uint) (rem.tv_nsec / 1000 + req.tv_sec * 1000);
3117 }
3118 word27 ticks = ms * 512;
3119 if (cpu.rTR <= ticks)
3120 {
3121 if (cpu.tweaks.tro_enable) {
3122 setG7fault (current_running_cpu_idx, FAULT_TRO);
3123 }
3124 cpu.rTR = (cpu.rTR - ticks) & MASK27;
3125 }
3126 else
3127 cpu.rTR = (cpu.rTR - ticks) & MASK27;
3128
3129 if (cpu.rTR == 0)
3130 cpu.rTR = MASK27;
3131 # else
3132
3133
3134 unsigned long left = (unsigned long) ((uint64) (cpu.rTR) * 125u / 64u);
3135
3136
3137
3138
3139
3140
3141
3142
3143 unsigned long nowLeft = left;
3144 if (!sample_interrupts (cpup))
3145 {
3146 nowLeft = sleepCPU (left);
3147 }
3148 if (nowLeft)
3149 {
3150
3151
3152 if (nowLeft <= left) {
3153 cpu.rTR = (word27) (left * 64 / 125);
3154 }
3155 }
3156 else
3157 {
3158
3159 if (cpu.tweaks.tro_enable)
3160 {
3161 lock_scu ();
3162 setG7fault (current_running_cpu_idx, FAULT_TRO);
3163 unlock_scu ();
3164 }
3165 cpu.rTR = MASK27;
3166 }
3167 # endif
3168 cpu.rTRticks = 0;
3169 break;
3170 #else
3171
3172 sim_usleep (sys_opts.sys_poll_interval * 1000);
3173
3174 # if defined(CONSOLE_FIX)
3175 # if defined(THREADZ) || defined(LOCKLESS)
3176 lock_libuv ();
3177 # endif
3178 # endif
3179 uv_run (ev_poll_loop, UV_RUN_NOWAIT);
3180 # if defined(CONSOLE_FIX)
3181 # if defined(THREADZ) || defined(LOCKLESS)
3182 unlock_libuv ();
3183 # endif
3184 # endif
3185 fast_queue_subsample = 0;
3186
3187 sim_interval = 0;
3188
3189
3190
3191
3192 cpu.rTRticks = 0;
3193
3194
3195
3196
3197
3198
3199 if (cpu.rTR <= sys_opts.sys_poll_interval * 512)
3200 {
3201 if (cpu.tweaks.tro_enable) {
3202 setG7fault (current_running_cpu_idx, FAULT_TRO);
3203 }
3204 cpu.rTR = (cpu.rTR - sys_opts.sys_poll_interval * 512) & MASK27;
3205 }
3206 else
3207 cpu.rTR = (cpu.rTR - sys_opts.sys_poll_interval * 512) & MASK27;
3208 if (cpu.rTR == 0)
3209 cpu.rTR = MASK27;
3210 #endif
3211
3212 break;
3213 }
3214
3215 cpu.wasXfer = false;
3216
3217 if (ret < 0)
3218 {
3219 sim_warn ("executeInstruction returned %d?\r\n", ret);
3220 break;
3221 }
3222
3223 if ((! cpu.cu.repeat_first) &&
3224 (cpu.cu.rpt ||
3225 (cpu.cu.rd && (cpu.PPR.IC & 1)) ||
3226 cpu.cu.rl))
3227 {
3228 CPT (cpt1U, 26);
3229 if (cpu.cu.rd)
3230 -- cpu.PPR.IC;
3231 cpu.wasXfer = false;
3232 set_cpu_cycle (cpup, FETCH_cycle);
3233 break;
3234 }
3235
3236
3237 if (cpu.cycle == FAULT_EXEC_cycle &&
3238 !cpu.cu.xde && cpu.cu.xdo)
3239 {
3240 clear_temporary_absolute_mode (cpup);
3241 cu_safe_restore (cpup);
3242 CPT (cpt1U, 12);
3243 clearFaultCycle (cpup);
3244
3245
3246
3247 cpu.wasXfer = false;
3248 cpu.isExec = false;
3249 cpu.isXED = false;
3250
3251 cpu.PPR.IC += ci->info->ndes;
3252 cpu.PPR.IC ++;
3253
3254 set_cpu_cycle (cpup, FETCH_cycle);
3255 break;
3256 }
3257
3258
3259 if (cpu.cycle == INTERRUPT_EXEC_cycle &&
3260 !cpu.cu.xde && cpu.cu.xdo)
3261 {
3262 clear_temporary_absolute_mode (cpup);
3263 cu_safe_restore (cpup);
3264
3265
3266
3267 CPT (cpt1U, 12);
3268 cpu.wasXfer = false;
3269 cpu.isExec = false;
3270 cpu.isXED = false;
3271
3272 set_cpu_cycle (cpup, FETCH_cycle);
3273 break;
3274 }
3275
3276
3277 if (cpu.cu.xde && cpu.cu.xdo)
3278 {
3279
3280 cpu.cu.IWB = cpu.cu.IRODD;
3281 cpu.cu.xde = 0;
3282 cpu.isExec = true;
3283 cpu.isXED = true;
3284 cpu.cu.XSF = 0;
3285 cpu.cu.TSN_VALID [0] = 0;
3286 cpu.TPR.TSR = cpu.PPR.PSR;
3287 cpu.TPR.TRR = cpu.PPR.PRR;
3288 break;
3289 }
3290
3291 if (cpu.cu.xde || cpu.cu.xdo)
3292 {
3293 cpu.cu.xde = cpu.cu.xdo = 0;
3294 cpu.isExec = false;
3295 cpu.isXED = false;
3296 CPT (cpt1U, 27);
3297 cpu.wasXfer = false;
3298 cpu.PPR.IC ++;
3299 if (ci->info->ndes > 0)
3300 cpu.PPR.IC += ci->info->ndes;
3301 cpu.wasInhibited = true;
3302 set_cpu_cycle (cpup, FETCH_cycle);
3303 break;
3304 }
3305
3306
3307 if (cpu.cycle != EXEC_cycle)
3308 sim_warn ("expected EXEC_cycle (%d)\r\n", cpu.cycle);
3309
3310 cpu.cu.xde = cpu.cu.xdo = 0;
3311 cpu.isExec = false;
3312 cpu.isXED = false;
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323 if ((cpu.PPR.IC & 1) == 0 &&
3324 ci->info->ndes == 0 &&
3325 !cpu.cu.repeat_first && !cpu.cu.rpt && !cpu.cu.rd && !cpu.cu.rl &&
3326 !(cpu.currentInstruction.opcode == 0616 && cpu.currentInstruction.opcodeX == 0) &&
3327 (cpu.PPR.IC & ~3u) != (cpu.last_write & ~3u))
3328 {
3329 cpu.PPR.IC ++;
3330 cpu.wasXfer = false;
3331 cpu.cu.IWB = cpu.cu.IRODD;
3332 set_cpu_cycle (cpup, PSEUDO_FETCH_cycle);
3333 break;
3334 }
3335
3336 cpu.PPR.IC ++;
3337 if (ci->info->ndes > 0)
3338 cpu.PPR.IC += ci->info->ndes;
3339
3340 CPT (cpt1U, 28);
3341 cpu.wasXfer = false;
3342 set_cpu_cycle (cpup, FETCH_cycle);
3343 }
3344 break;
3345
3346 case SYNC_FAULT_RTN_cycle:
3347 {
3348 CPT (cpt1U, 29);
3349
3350
3351
3352
3353 cpu.PPR.IC += ci->info->ndes;
3354 cpu.PPR.IC ++;
3355 cpu.wasXfer = false;
3356 set_cpu_cycle (cpup, FETCH_cycle);
3357 }
3358 break;
3359
3360 case FAULT_cycle:
3361 {
3362 CPT (cpt1U, 30);
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384 if ((cpu.cu.APUCycleBits & 060) || cpu.secret_addressing_mode)
3385 set_apu_status (cpup, apuStatus_FABS);
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397 if (cpu.faultNumber != FAULT_TRB || cpu.cu.xde == 0)
3398 {
3399 cu_safe_store (cpup);
3400 }
3401 else
3402 {
3403 word36 tmpIRODD = cpu.scu_data[7];
3404 cu_safe_store (cpup);
3405 cpu.scu_data[7] = tmpIRODD;
3406 }
3407 CPT (cpt1U, 31);
3408
3409
3410 set_temporary_absolute_mode (cpup);
3411
3412
3413 cpu.PPR.PRR = 0;
3414 cpu.TPR.TRR = 0;
3415
3416
3417 uint fltAddress = (cpu.switches.FLT_BASE << 5) & 07740;
3418 L68_ (
3419 if (cpu.is_FFV)
3420 {
3421 cpu.is_FFV = false;
3422 CPTUR (cptUseMR);
3423
3424 fltAddress = (cpu.MR.FFV & MASK15) << 3;
3425 }
3426 )
3427
3428
3429 word24 addr = fltAddress + 2 * cpu.faultNumber;
3430
3431 if (cpu.restart)
3432 {
3433 cpu.restart = false;
3434 addr = cpu.restart_address;
3435 }
3436
3437 core_read2 (cpup, addr, & cpu.cu.IWB, & cpu.cu.IRODD, __func__);
3438 #if defined(TESTING)
3439 HDBGMRead (addr, cpu.cu.IWB, "fault even");
3440 HDBGMRead (addr + 1, cpu.cu.IRODD, "fault odd");
3441 #endif
3442 cpu.cu.xde = 1;
3443 cpu.cu.xdo = 1;
3444 cpu.isExec = true;
3445 cpu.isXED = true;
3446
3447 CPT (cpt1U, 33);
3448 set_cpu_cycle (cpup, FAULT_EXEC_cycle);
3449
3450 break;
3451 }
3452
3453 }
3454 }
3455 while (reason == 0);
3456
3457 leave:
3458 #if defined(THREADZ) || defined(LOCKLESS)
3459 cpu.executing = false;
3460 cpu.inMultics = false;
3461 #endif
3462 #if defined(TESTING)
3463 HDBGPrint ();
3464 #endif
3465
3466 for (unsigned short n = 0; n < N_CPU_UNITS_MAX; n++)
3467 {
3468 #if !defined(SCHED_NEVER_YIELD)
3469 lockYieldAll = lockYieldAll + (unsigned long long)cpus[n].coreLockState.lockYield;
3470 #endif
3471 lockWaitMaxAll = lockWaitMaxAll + (unsigned long long)cpus[n].coreLockState.lockWaitMax;
3472 lockWaitAll = lockWaitAll + (unsigned long long)cpus[n].coreLockState.lockWait;
3473 lockImmediateAll = lockImmediateAll + (unsigned long long)cpus[n].coreLockState.lockImmediate;
3474 lockCntAll = lockCntAll + (unsigned long long)cpus[n].coreLockState.lockCnt;
3475 instrCntAll = instrCntAll + (unsigned long long)cpus[n].instrCnt;
3476 cycleCntAll = cycleCntAll + (unsigned long long)cpus[n].cycleCnt;
3477 }
3478
3479 (void)fflush(stderr);
3480 (void)fflush(stdout);
3481
3482 # if !defined(PERF_STRIP)
3483 if (cycleCntAll > (unsigned long long)cpu.cycleCnt)
3484 {
3485 # endif
3486 sim_msg ("\r\n");
3487 sim_msg ("\r+---------------------------------+\r\n");
3488 sim_msg ("\r| Aggregate CPU Statistics |\r\n");
3489 sim_msg ("\r+---------------------------------+\r\n");
3490 (void)fflush(stderr);
3491 (void)fflush(stdout);
3492 # if defined(WIN_STDIO)
3493 sim_msg ("\r| cycles %15llu |\r\n", cycleCntAll);
3494 sim_msg ("\r| instructions %15llu |\r\n", instrCntAll);
3495 (void)fflush(stderr);
3496 (void)fflush(stdout);
3497 sim_msg ("\r+---------------------------------+\r\n");
3498 sim_msg ("\r| lockCnt %15llu |\r\n", lockCntAll);
3499 sim_msg ("\r| lockImmediate %15llu |\r\n", lockImmediateAll);
3500 (void)fflush(stderr);
3501 (void)fflush(stdout);
3502 sim_msg ("\r+---------------------------------+\r\n");
3503 sim_msg ("\r| lockWait %15llu |\r\n", lockWaitAll);
3504 sim_msg ("\r| lockWaitMax %15llu |\r\n", lockWaitMaxAll);
3505 (void)fflush(stderr);
3506 (void)fflush(stdout);
3507 # if !defined(SCHED_NEVER_YIELD)
3508 sim_msg ("\r| lockYield %15llu |\r\n", lockYieldAll);
3509 # else
3510 sim_msg ("\r| lockYield ---- |\r\n");
3511 # endif
3512 sim_msg ("\r+---------------------------------+\r\n");
3513 (void)fflush(stderr);
3514 (void)fflush(stdout);
3515 # else
3516 sim_msg ("\r| cycles %'15llu |\r\n", cycleCntAll);
3517 sim_msg ("\r| instructions %'15llu |\r\n", instrCntAll);
3518 (void)fflush(stderr);
3519 (void)fflush(stdout);
3520 sim_msg ("\r+---------------------------------+\r\n");
3521 sim_msg ("\r| lockCnt %'15llu |\r\n", lockCntAll);
3522 sim_msg ("\r| lockImmediate %'15llu |\r\n", lockImmediateAll);
3523 (void)fflush(stderr);
3524 (void)fflush(stdout);
3525 sim_msg ("\r+---------------------------------+\r\n");
3526 sim_msg ("\r| lockWait %'15llu |\r\n", lockWaitAll);
3527 sim_msg ("\r| lockWaitMax %'15llu |\r\n", lockWaitMaxAll);
3528 (void)fflush(stderr);
3529 (void)fflush(stdout);
3530 # if !defined(SCHED_NEVER_YIELD)
3531 sim_msg ("\r| lockYield %'15llu |\r\n", lockYieldAll);
3532 # else
3533 sim_msg ("\r| lockYield ---- |\r\n");
3534 # endif
3535 sim_msg ("\r+---------------------------------+\r\n");
3536 (void)fflush(stderr);
3537 (void)fflush(stdout);
3538 # endif
3539 # if !defined(PERF_STRIP)
3540 }
3541 # else
3542 sim_msg("\r\n");
3543 # endif
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555 #if defined(THREADZ) || defined(LOCKLESS)
3556 if (running_perf_test == false)
3557 sim_usleep(2000000);
3558 stopCPUThread();
3559 #endif
3560
3561 #if defined(M_SHARED)
3562
3563
3564
3565
3566 set_cpu_idx (0);
3567 dummy_IC = cpu.PPR.IC;
3568 #endif
3569
3570 return reason;
3571 }
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587 int operand_size (cpu_state_t * cpup)
3588 {
3589 DCDstruct * i = & cpu.currentInstruction;
3590 if (i->info->flags & (READ_OPERAND | STORE_OPERAND))
3591 return 1;
3592 else if (i->info->flags & (READ_YPAIR | STORE_YPAIR))
3593 return 2;
3594 else if (i->info->flags & (READ_YBLOCK8 | STORE_YBLOCK8))
3595 return 8;
3596 else if (i->info->flags & (READ_YBLOCK16 | STORE_YBLOCK16))
3597 return 16;
3598 else if (i->info->flags & (READ_YBLOCK32 | STORE_YBLOCK32))
3599 return 32;
3600 return 0;
3601 }
3602
3603
3604
3605 void readOperandRead (cpu_state_t * cpup, word18 addr) {
3606 CPT (cpt1L, 6);
3607
3608 #if defined(THREADZ)
3609 DCDstruct * i = & cpu.currentInstruction;
3610 if (RMWOP (i))
3611 lock_rmw ();
3612 #endif
3613
3614 switch (operand_size (cpup)) {
3615 case 1:
3616 CPT (cpt1L, 7);
3617 ReadOperandRead (cpup, addr, & cpu.CY);
3618 break;
3619 case 2:
3620 CPT (cpt1L, 8);
3621 addr &= 0777776;
3622 Read2OperandRead (cpup, addr, cpu.Ypair);
3623 break;
3624 case 8:
3625 CPT (cpt1L, 9);
3626 addr &= 0777770;
3627 Read8 (cpup, addr, cpu.Yblock8, cpu.currentInstruction.b29);
3628 break;
3629 case 16:
3630 CPT (cpt1L, 10);
3631 addr &= 0777770;
3632 Read16 (cpup, addr, cpu.Yblock16);
3633 break;
3634 case 32:
3635 CPT (cpt1L, 11);
3636 addr &= 0777740;
3637 for (uint j = 0 ; j < 32 ; j += 1)
3638 ReadOperandRead (cpup, addr + j, cpu.Yblock32 + j);
3639 break;
3640 }
3641 }
3642
3643 void readOperandRMW (cpu_state_t * cpup, word18 addr) {
3644 CPT (cpt1L, 6);
3645 switch (operand_size (cpup)) {
3646 case 1:
3647 CPT (cpt1L, 7);
3648 ReadOperandRMW (cpup, addr, & cpu.CY);
3649 break;
3650 case 2:
3651 CPT (cpt1L, 8);
3652 addr &= 0777776;
3653 Read2OperandRead (cpup, addr, cpu.Ypair);
3654 break;
3655 case 8:
3656 CPT (cpt1L, 9);
3657 addr &= 0777770;
3658 Read8 (cpup, addr, cpu.Yblock8, cpu.currentInstruction.b29);
3659 break;
3660 case 16:
3661 CPT (cpt1L, 10);
3662 addr &= 0777770;
3663 Read16 (cpup, addr, cpu.Yblock16);
3664 break;
3665 case 32:
3666 CPT (cpt1L, 11);
3667 addr &= 0777740;
3668 for (uint j = 0 ; j < 32 ; j += 1)
3669 ReadOperandRMW (cpup, addr + j, cpu.Yblock32 + j);
3670 break;
3671 }
3672 }
3673
3674
3675
3676 t_stat write_operand (cpu_state_t * cpup, word18 addr, UNUSED processor_cycle_type cyctyp)
3677 {
3678 switch (operand_size (cpup))
3679 {
3680 case 1:
3681 CPT (cpt1L, 12);
3682 WriteOperandStore (cpup, addr, cpu.CY);
3683 break;
3684 case 2:
3685 CPT (cpt1L, 13);
3686 addr &= 0777776;
3687 Write2OperandStore (cpup, addr + 0, cpu.Ypair);
3688 break;
3689 case 8:
3690 CPT (cpt1L, 14);
3691 addr &= 0777770;
3692 Write8 (cpup, addr, cpu.Yblock8, cpu.currentInstruction.b29);
3693 break;
3694 case 16:
3695 CPT (cpt1L, 15);
3696 addr &= 0777770;
3697 Write16 (cpup, addr, cpu.Yblock16);
3698 break;
3699 case 32:
3700 CPT (cpt1L, 16);
3701 addr &= 0777740;
3702
3703
3704 Write32 (cpup, addr, cpu.Yblock32);
3705 break;
3706 }
3707
3708 #if defined(THREADZ)
3709 if (cyctyp == OPERAND_STORE)
3710 {
3711 DCDstruct * i = & cpu.currentInstruction;
3712 if (RMWOP (i))
3713 unlock_mem ();
3714 }
3715 #endif
3716 return SCPE_OK;
3717
3718 }
3719
3720 #if !defined(SPEED)
3721 t_stat set_mem_watch (int32 arg, const char * buf)
3722 {
3723 if (strlen (buf) == 0)
3724 {
3725 if (arg)
3726 {
3727 sim_warn ("no argument to watch?\r\n");
3728 return SCPE_ARG;
3729 }
3730 sim_msg ("Clearing all watch points\r\n");
3731 (void)memset (& watch_bits, 0, sizeof (watch_bits));
3732 return SCPE_OK;
3733 }
3734 char * end;
3735 long int n = strtol (buf, & end, 0);
3736 if (* end || n < 0 || n >= MEMSIZE)
3737 {
3738 sim_warn ("Invalid argument to watch? %ld\r\n", (long) n);
3739 return SCPE_ARG;
3740 }
3741 watch_bits [n] = arg != 0;
3742 return SCPE_OK;
3743 }
3744 #endif
3745
3746
3747
3748
3749
3750 #if !defined(SPEED)
3751 static void nem_check (word24 addr, const char * context)
3752 {
3753 cpu_state_t * cpup = _cpup;
3754 if (lookup_cpu_mem_map (cpup, addr) < 0)
3755 {
3756 doFault (FAULT_STR, fst_str_nea, context);
3757 }
3758 }
3759 #endif
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772 #if !defined(SPEED) || !defined(INLINE_CORE)
3773 int core_read (cpu_state_t * cpup, word24 addr, word36 *data, const char * ctx)
3774 {
3775 PNL (cpu.portBusy = true;)
3776 SC_MAP_ADDR (addr, addr);
3777 # if !defined(LOCKLESS)
3778 if (M[addr] & MEM_UNINITIALIZED)
3779 {
3780 sim_debug (DBG_WARN, & cpu_dev,
3781 "Uninitialized memory accessed at address %08o; "
3782 "IC is 0%06o:0%06o (%s(\r\n",
3783 addr, cpu.PPR.PSR, cpu.PPR.IC, ctx);
3784 }
3785 # endif
3786 # if !defined(SPEED)
3787 if (watch_bits [addr])
3788 {
3789 sim_msg ("WATCH [%llu] %05o:%06o read %08o %012llo (%s)\r\n",
3790 (long long unsigned int)cpu.cycleCnt, cpu.PPR.PSR, cpu.PPR.IC, addr,
3791 (long long unsigned int)M [addr], ctx);
3792 traceInstruction (0);
3793 }
3794 # endif
3795 # if defined(LOCKLESS)
3796 # if !defined(SUNLINT)
3797 word36 v;
3798 LOAD_ACQ_CORE_WORD(v, addr);
3799 *data = v & DMASK;
3800 # endif
3801 # else
3802 *data = M[addr] & DMASK;
3803 # endif
3804
3805 DO_WORK_MEM;
3806 sim_debug (DBG_CORE, & cpu_dev,
3807 "core_read %08o %012"PRIo64" (%s)\r\n",
3808 addr, * data, ctx);
3809 PNL (trackport (addr, * data));
3810 return 0;
3811 }
3812 #endif
3813
3814 #if defined(LOCKLESS)
3815 int core_read_lock (cpu_state_t * cpup, word24 addr, word36 *data, UNUSED const char * ctx)
3816 {
3817 SC_MAP_ADDR (addr, addr);
3818 LOCK_CORE_WORD(addr, & cpu.coreLockState);
3819 if (cpu.coreLockState.locked_addr != 0) {
3820 sim_warn ("core_read_lock: locked %08o locked_addr %08o %c %05o:%06o\r\n",
3821 addr, cpu.coreLockState.locked_addr, current_running_cpu_idx + 'A',
3822 cpu.PPR.PSR, cpu.PPR.IC);
3823 core_unlock_all (cpup);
3824 }
3825 cpu.coreLockState.locked_addr = addr;
3826 # if !defined(SUNLINT)
3827 word36 v;
3828 LOAD_ACQ_CORE_WORD(v, addr);
3829 * data = v & DMASK;
3830 # endif
3831 return 0;
3832 }
3833 #endif
3834
3835 #if !defined(SPEED) || !defined(INLINE_CORE)
3836 int core_write (cpu_state_t * cpup, word24 addr, word36 data, const char * ctx)
3837 {
3838 PNL (cpu.portBusy = true;)
3839 SC_MAP_ADDR (addr, addr);
3840 if (cpu.tweaks.isolts_mode)
3841 {
3842 if (cpu.MR.sdpap)
3843 {
3844 sim_warn ("failing to implement sdpap\r\n");
3845 cpu.MR.sdpap = 0;
3846 }
3847 if (cpu.MR.separ)
3848 {
3849 sim_warn ("failing to implement separ\r\n");
3850 cpu.MR.separ = 0;
3851 }
3852 }
3853 # if defined(LOCKLESS)
3854 LOCK_CORE_WORD(addr, & cpu.coreLockState);
3855 # if !defined(SUNLINT)
3856 STORE_REL_CORE_WORD(addr, data);
3857 # endif
3858 # else
3859 M[addr] = data & DMASK;
3860 # endif
3861 # if !defined(SPEED)
3862 if (watch_bits [addr])
3863 {
3864 sim_msg ("WATCH [%llu] %05o:%06o write %08llo %012llo (%s)\r\n",
3865 (long long unsigned int)cpu.cycleCnt, cpu.PPR.PSR, cpu.PPR.IC,
3866 (long long unsigned int)addr, (unsigned long long int)M [addr], ctx);
3867 traceInstruction (0);
3868 }
3869 # endif
3870 DO_WORK_MEM;
3871 sim_debug (DBG_CORE, & cpu_dev,
3872 "core_write %08o %012"PRIo64" (%s)\r\n",
3873 addr, data, ctx);
3874 PNL (trackport (addr, data));
3875 return 0;
3876 }
3877 #endif
3878
3879 #if defined(LOCKLESS)
3880 int core_write_unlock (cpu_state_t * cpup, word24 addr, word36 data, UNUSED const char * ctx)
3881 {
3882 SC_MAP_ADDR (addr, addr);
3883 if (cpu.coreLockState.locked_addr != addr)
3884 {
3885 sim_warn ("core_write_unlock: locked %08o locked_addr %08o %c %05o:%06o\r\n",
3886 addr, cpu.coreLockState.locked_addr, current_running_cpu_idx + 'A',
3887 cpu.PPR.PSR, cpu.PPR.IC);
3888 core_unlock_all (cpup);
3889 }
3890
3891 # if !defined(SUNLINT)
3892 STORE_REL_CORE_WORD(addr, data);
3893 # endif
3894 cpu.coreLockState.locked_addr = 0;
3895 return 0;
3896 }
3897
3898 int core_unlock_all (cpu_state_t * cpup)
3899 {
3900 if (cpu.coreLockState.locked_addr != 0) {
3901 sim_warn ("core_unlock_all: locked %08o %c %05o:%06o\r\n",
3902 cpu.coreLockState.locked_addr, current_running_cpu_idx + 'A',
3903 cpu.PPR.PSR, cpu.PPR.IC);
3904 # if !defined(SUNLINT)
3905 STORE_REL_CORE_WORD(cpu.coreLockState.locked_addr, M[cpu.coreLockState.locked_addr]);
3906 # endif
3907 cpu.coreLockState.locked_addr = 0;
3908 }
3909 return 0;
3910 }
3911 #endif
3912
3913 #if !defined(SPEED) || !defined(INLINE_CORE)
3914 int core_write_zone (cpu_state_t * cpup, word24 addr, word36 data, const char * ctx)
3915 {
3916 PNL (cpu.portBusy = true;)
3917 if (cpu.tweaks.isolts_mode)
3918 {
3919 if (cpu.MR.sdpap)
3920 {
3921 sim_warn ("failing to implement sdpap\r\n");
3922 cpu.MR.sdpap = 0;
3923 }
3924 if (cpu.MR.separ)
3925 {
3926 sim_warn ("failing to implement separ\r\n");
3927 cpu.MR.separ = 0;
3928 }
3929 }
3930 word24 mapAddr = 0;
3931 SC_MAP_ADDR (addr, mapAddr);
3932 # if defined(LOCKLESS)
3933 word36 v;
3934 core_read_lock(cpup, addr, &v, ctx);
3935 v = (v & ~cpu.zone) | (data & cpu.zone);
3936 core_write_unlock(cpup, addr, v, ctx);
3937 # else
3938 M[mapAddr] = (M[mapAddr] & ~cpu.zone) | (data & cpu.zone);
3939 # endif
3940 cpu.useZone = false;
3941 # if !defined(SPEED)
3942 if (watch_bits [mapAddr])
3943 {
3944 sim_msg ("WATCH [%llu] %05o:%06o writez %08llo %012llo (%s)\r\n",
3945 (unsigned long long int)cpu.cycleCnt, cpu.PPR.PSR, cpu.PPR.IC,
3946 (unsigned long long int)mapAddr, (unsigned long long int)M [mapAddr], ctx);
3947 traceInstruction (0);
3948 }
3949 # endif
3950 DO_WORK_MEM;
3951 sim_debug (DBG_CORE, & cpu_dev,
3952 "core_write_zone %08o %012"PRIo64" (%s)\r\n",
3953 mapAddr, data, ctx);
3954 PNL (trackport (mapAddr, data));
3955 return 0;
3956 }
3957 #endif
3958
3959 #if !defined(SPEED) || !defined(INLINE_CORE)
3960 int core_read2 (cpu_state_t * cpup, word24 addr, word36 *even, word36 *odd, const char * ctx)
3961 {
3962 PNL (cpu.portBusy = true;)
3963 # if defined(LOCKLESS)
3964
3965 word36 v;
3966 # endif
3967 if (addr & 1)
3968 {
3969 sim_debug (DBG_MSG, & cpu_dev,
3970 "warning: subtracting 1 from pair at %o in "
3971 "core_read2 (%s)\r\n", addr, ctx);
3972 addr &= (word24)~1;
3973 }
3974 SC_MAP_ADDR (addr, addr);
3975 # if !defined(LOCKLESS)
3976 if (M[addr] & MEM_UNINITIALIZED)
3977 {
3978 sim_debug (DBG_WARN, & cpu_dev,
3979 "Uninitialized memory accessed at address %08o; "
3980 "IC is 0%06o:0%06o (%s)\r\n",
3981 addr, cpu.PPR.PSR, cpu.PPR.IC, ctx);
3982 }
3983 # endif
3984 # if !defined(SPEED)
3985 if (watch_bits [addr])
3986 {
3987 sim_msg ("WATCH [%llu] %05o:%06o read2 %08llo %012llo (%s)\r\n",
3988 (unsigned long long int)cpu.cycleCnt, cpu.PPR.PSR, cpu.PPR.IC,
3989 (unsigned long long int)addr, (unsigned long long int)M [addr], ctx);
3990 traceInstruction (0);
3991 }
3992 # endif
3993 # if defined(LOCKLESS)
3994 # if !defined(SUNLINT)
3995 LOAD_ACQ_CORE_WORD(v, addr);
3996 if (v & MEM_LOCKED)
3997 sim_warn ("core_read2: even locked %08o locked_addr %08o %c %05o:%06o\r\n",
3998 addr, cpu.coreLockState.locked_addr, current_running_cpu_idx + 'A',
3999 cpu.PPR.PSR, cpu.PPR.IC);
4000 *even = v & DMASK;
4001 addr++;
4002 # endif
4003 # else
4004 *even = M[addr++] & DMASK;
4005 # endif
4006 sim_debug (DBG_CORE, & cpu_dev,
4007 "core_read2 %08o %012"PRIo64" (%s)\r\n",
4008 addr - 1, * even, ctx);
4009
4010
4011
4012 # if !defined(LOCKLESS)
4013 if (M[addr] & MEM_UNINITIALIZED)
4014 {
4015 sim_debug (DBG_WARN, & cpu_dev,
4016 "Uninitialized memory accessed at address %08o; "
4017 "IC is 0%06o:0%06o (%s)\r\n",
4018 addr, cpu.PPR.PSR, cpu.PPR.IC, ctx);
4019 }
4020 # endif
4021 # if !defined(SPEED)
4022 if (watch_bits [addr])
4023 {
4024 sim_msg ("WATCH [%llu] %05o:%06o read2 %08llo %012llo (%s)\r\n",
4025 (unsigned long long int)cpu.cycleCnt, cpu.PPR.PSR, cpu.PPR.IC,
4026 (unsigned long long int)addr, (unsigned long long int)M [addr], ctx);
4027 traceInstruction (0);
4028 }
4029 # endif
4030 # if defined(LOCKLESS)
4031 # if !defined(SUNLINT)
4032 LOAD_ACQ_CORE_WORD(v, addr);
4033 if (v & MEM_LOCKED)
4034 sim_warn ("core_read2: odd locked %08o locked_addr %08o %c %05o:%06o\r\n",
4035 addr, cpu.coreLockState.locked_addr, current_running_cpu_idx + 'A',
4036 cpu.PPR.PSR, cpu.PPR.IC);
4037 *odd = v & DMASK;
4038 # endif
4039 # else
4040 *odd = M[addr] & DMASK;
4041 # endif
4042 sim_debug (DBG_CORE, & cpu_dev,
4043 "core_read2 %08o %012"PRIo64" (%s)\r\n",
4044 addr, * odd, ctx);
4045 DO_WORK_MEM;
4046 PNL (trackport (addr - 1, * even));
4047 return 0;
4048 }
4049 #endif
4050
4051 #if !defined(SPEED) || !defined(INLINE_CORE)
4052 int core_write2 (cpu_state_t * cpup, word24 addr, word36 even, word36 odd, const char * ctx) {
4053 PNL (cpu.portBusy = true;)
4054 if (addr & 1) {
4055 sim_debug (DBG_MSG, & cpu_dev,
4056 "warning: subtracting 1 from pair at %o in core_write2 " "(%s)\r\n",
4057 addr, ctx);
4058 addr &= (word24)~1;
4059 }
4060 SC_MAP_ADDR (addr, addr);
4061 if (cpu.tweaks.isolts_mode) {
4062 if (cpu.MR.sdpap) {
4063 sim_warn ("failing to implement sdpap\r\n");
4064 cpu.MR.sdpap = 0;
4065 }
4066 if (cpu.MR.separ) {
4067 sim_warn ("failing to implement separ\r\n");
4068 cpu.MR.separ = 0;
4069 }
4070 }
4071
4072 # if !defined(SPEED)
4073 if (watch_bits [addr]) {
4074 sim_msg ("WATCH [%llu] %05o:%06o write2 %08llo %012llo (%s)\r\n",
4075 (unsigned long long int)cpu.cycleCnt, cpu.PPR.PSR, cpu.PPR.IC,
4076 (unsigned long long int)addr, (unsigned long long int)even, ctx);
4077 traceInstruction (0);
4078 }
4079 # endif
4080 # if defined(LOCKLESS)
4081 LOCK_CORE_WORD(addr, & cpu.coreLockState);
4082 # if !defined(SUNLINT)
4083 STORE_REL_CORE_WORD(addr, even);
4084 # endif
4085 addr++;
4086 # else
4087 M[addr++] = even & DMASK;
4088 # endif
4089 sim_debug (DBG_CORE, & cpu_dev, "core_write2 %08o %012llo (%s)\r\n", addr - 1,
4090 (long long unsigned int)even, ctx);
4091
4092
4093
4094
4095 # if !defined(SPEED)
4096 if (watch_bits [addr]) {
4097 sim_msg ("WATCH [%llu] %05o:%06o write2 %08llo %012llo (%s)\r\n",
4098 (long long unsigned int)cpu.cycleCnt, cpu.PPR.PSR, cpu.PPR.IC,
4099 (long long unsigned int)addr, (long long unsigned int)odd, ctx);
4100 traceInstruction (0);
4101 }
4102 # endif
4103 # if defined(LOCKLESS)
4104 LOCK_CORE_WORD(addr, & cpu.coreLockState);
4105 # if !defined(SUNLINT)
4106 STORE_REL_CORE_WORD(addr, odd);
4107 # endif
4108 # else
4109 M[addr] = odd & DMASK;
4110 # endif
4111 DO_WORK_MEM;
4112 PNL (trackport (addr - 1, even));
4113 sim_debug (DBG_CORE, & cpu_dev, "core_write2 %08o %012"PRIo64" (%s)\r\n", addr, odd, ctx);
4114 return 0;
4115 }
4116 #endif
4117
4118
4119
4120
4121
4122
4123
4124
4125
4126
4127 void decode_instruction (cpu_state_t * cpup, word36 inst, DCDstruct * p)
4128 {
4129 CPT (cpt1L, 17);
4130 (void)memset (p, 0, sizeof (DCDstruct));
4131
4132 p->opcode = GET_OP (inst);
4133 p->opcodeX = GET_OPX(inst);
4134 p->opcode10 = p->opcode | (p->opcodeX ? 01000 : 0);
4135 p->address = GET_ADDR (inst);
4136 p->b29 = GET_A (inst);
4137 p->i = GET_I (inst);
4138 p->tag = GET_TAG (inst);
4139
4140 p->info = get_iwb_info (p);
4141
4142 if (p->info->flags & IGN_B29)
4143 p->b29 = 0;
4144
4145 if (p->info->ndes > 0)
4146 {
4147 p->b29 = 0;
4148 p->tag = 0;
4149 if (p->info->ndes > 1)
4150 {
4151 (void)memset (& cpu.currentEISinstruction, 0,
4152 sizeof (cpu.currentEISinstruction));
4153 }
4154 }
4155 }
4156
4157
4158
4159
4160
4161
4162
4163
4164
4165
4166
4167
4168
4169
4170
4171
4172
4173
4174
4175
4176 int is_priv_mode (cpu_state_t * cpup)
4177 {
4178
4179
4180
4181 if (get_bar_mode (cpup))
4182 return 0;
4183
4184
4185 if (get_addr_mode (cpup) == ABSOLUTE_mode)
4186 return 1;
4187 else if (cpu.PPR.P)
4188 return 1;
4189
4190 return 0;
4191 }
4192
4193
4194
4195
4196
4197
4198
4199
4200 bool get_bar_mode (cpu_state_t * cpup)
4201 {
4202 return ! (cpu.secret_addressing_mode || TST_I_NBAR);
4203 }
4204
4205 addr_modes_e get_addr_mode (cpu_state_t * cpup)
4206 {
4207 if (cpu.secret_addressing_mode)
4208 return ABSOLUTE_mode;
4209
4210
4211
4212
4213
4214
4215
4216 if (TST_I_ABS)
4217 {
4218 return ABSOLUTE_mode;
4219 }
4220 else
4221 {
4222 return APPEND_mode;
4223 }
4224 }
4225
4226
4227
4228
4229
4230
4231
4232
4233 void set_addr_mode (cpu_state_t * cpup, addr_modes_e mode)
4234 {
4235
4236
4237
4238
4239
4240
4241
4242
4243
4244
4245 cpu.secret_addressing_mode = false;
4246 if (mode == ABSOLUTE_mode)
4247 {
4248 CPT (cpt1L, 22);
4249 sim_debug (DBG_DEBUG, & cpu_dev, "APU: Setting absolute mode.\r\n");
4250
4251 SET_I_ABS;
4252 cpu.PPR.P = 1;
4253 }
4254 else if (mode == APPEND_mode)
4255 {
4256 CPT (cpt1L, 23);
4257 if (! TST_I_ABS && TST_I_NBAR)
4258 sim_debug (DBG_DEBUG, & cpu_dev, "APU: Keeping append mode.\r\n");
4259 else
4260 sim_debug (DBG_DEBUG, & cpu_dev, "APU: Setting append mode.\r\n");
4261
4262 CLR_I_ABS;
4263 }
4264 else
4265 {
4266 sim_debug (DBG_ERR, & cpu_dev,
4267 "APU: Unable to determine address mode.\r\n");
4268 sim_warn ("APU: Unable to determine address mode. Can't happen!\r\n");
4269 }
4270 }
4271
4272
4273
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284
4285
4286
4287
4288
4289
4290
4291
4292
4293
4294
4295
4296
4297
4298 word18 get_BAR_address (cpu_state_t * cpup, word18 addr)
4299 {
4300 if (cpu . BAR.BOUND == 0)
4301
4302 doFault (FAULT_STR, fst_str_oob, "BAR store fault; out of bounds");
4303
4304
4305
4306
4307
4308
4309
4310
4311
4312
4313 if (addr >= (((word18) cpu . BAR.BOUND) << 9))
4314
4315 doFault (FAULT_STR, fst_str_oob, "BAR store fault; out of bounds");
4316
4317 word18 barAddr = (addr + (((word18) cpu . BAR.BASE) << 9)) & 0777777;
4318 return barAddr;
4319 }
4320
4321
4322
4323 static void add_history (cpu_state_t * cpup, uint hset, word36 w0, word36 w1)
4324 {
4325
4326 {
4327 cpu.history [hset] [cpu.history_cyclic[hset]] [0] = w0;
4328 cpu.history [hset] [cpu.history_cyclic[hset]] [1] = w1;
4329 cpu.history_cyclic[hset] = (cpu.history_cyclic[hset] + 1) % N_MODEL_HIST_SIZE;
4330 }
4331 }
4332
4333 void add_history_force (cpu_state_t * cpup, uint hset, word36 w0, word36 w1)
4334 {
4335 cpu.history [hset] [cpu.history_cyclic[hset]] [0] = w0;
4336 cpu.history [hset] [cpu.history_cyclic[hset]] [1] = w1;
4337 cpu.history_cyclic[hset] = (cpu.history_cyclic[hset] + 1) % N_MODEL_HIST_SIZE;
4338 }
4339
4340 void add_dps8m_CU_history (cpu_state_t * cpup)
4341 {
4342 if (cpu.skip_cu_hist)
4343 return;
4344 if (! cpu.MR_cache.emr)
4345 return;
4346 if (! cpu.MR_cache.ihr)
4347 return;
4348 if (cpu.MR_cache.hrxfr && ! cpu.wasXfer)
4349 return;
4350
4351 word36 flags = 0;
4352 word5 proccmd = 0;
4353 word7 flags2 = 0;
4354 word36 w0 = 0, w1 = 0;
4355 w0 |= flags & 0777777000000;
4356 w0 |= IWB_IRODD & MASK18;
4357 w1 |= ((word36)(cpu.iefpFinalAddress & MASK24) << 12);
4358 w1 |= (proccmd & MASK5) << 7;
4359 w1 |= flags2 & 0176;
4360 add_history (cpup, CU_HIST_REG, w0, w1);
4361 }
4362
4363 #if !defined(QUIET_UNUSED)
4364 void add_dps8m_DU_OU_history (cpu_state_t * cpup, word36 flags, word18 ICT, word9 RS_REG, word9 flags2)
4365 {
4366 word36 w0 = flags, w1 = 0;
4367 w1 |= (ICT & MASK18) << 18;
4368 w1 |= (RS_REG & MASK9) << 9;
4369 w1 |= flags2 & MASK9;
4370 add_history (cpup, DPS8M_DU_OU_HIST_REG, w0, w1);
4371 }
4372
4373 void add_dps8m_APU_history (cpu_state_t * cpup, word15 ESN, word21 flags, word24 RMA, word3 RTRR, word9 flags2)
4374 {
4375 word36 w0 = 0, w1 = 0;
4376 w0 |= (ESN & MASK15) << 21;
4377 w0 |= flags & MASK21;
4378 w1 |= (RMA & MASK24) << 12;
4379 w1 |= (RTRR & MASK3) << 9;
4380 w1 |= flags2 & MASK9;
4381 add_history (cpu.tweaks.l68_mode ? L68_APU_HIST_REG : DPS8M_APU_HIST_REG, w0, w1);
4382 }
4383
4384 void add_dps8m_EAPU_history (word18 ZCA, word18 opcode)
4385 {
4386 word36 w0 = 0;
4387 w0 |= (ZCA & MASK18) << 18;
4388 w0 |= opcode & MASK18;
4389 add_history (DPS8M_EAPU_HIST_REG, w0, 0);
4390
4391
4392
4393
4394 }
4395 #endif
4396
4397
4398
4399
4400
4401
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413
4414
4415
4416
4417
4418
4419
4420
4421
4422
4423
4424
4425
4426
4427
4428
4429
4430
4431 void add_l68_CU_history (cpu_state_t * cpup)
4432 {
4433 CPT (cpt1L, 24);
4434
4435 if (cpu.skip_cu_hist)
4436 return;
4437 if (! cpu.MR_cache.emr)
4438 return;
4439 if (! cpu.MR_cache.ihr)
4440 return;
4441
4442 word36 w0 = 0, w1 = 0;
4443
4444
4445
4446
4447
4448
4449
4450
4451
4452 PNL (putbits36_8 (& w0, 0, cpu.prepare_state);)
4453
4454 putbits36_1 (& w0, 8, cpu.wasXfer);
4455
4456 putbits36_1 (& w0, 9, cpu.cu.xde);
4457
4458 putbits36_1 (& w0, 10, cpu.cu.xdo);
4459
4460 putbits36_1 (& w0, 11, USE_IRODD?1:0);
4461
4462 putbits36_1 (& w0, 12, cpu.cu.rpt);
4463
4464
4465 PNL (putbits36_1 (& w0, 14, cpu.AR_F_E);)
4466
4467 putbits36_1 (& w0, 15, cpu.cycle != INTERRUPT_cycle?1:0);
4468
4469 putbits36_1 (& w0, 16, cpu.cycle != FAULT_cycle?1:0);
4470
4471 putbits36_1 (& w0, 17, TSTF (cpu.cu.IR, I_NBAR)?1:0);
4472
4473 putbits36_18 (& w0, 18, (word18) (IWB_IRODD & MASK18));
4474
4475
4476 putbits36_18 (& w1, 0, cpu.TPR.CA);
4477
4478
4479 PNL (putbits36_1 (& w1, 59-36, (cpu.portSelect == 0)?1:0);)
4480 PNL (putbits36_1 (& w1, 60-36, (cpu.portSelect == 1)?1:0);)
4481 PNL (putbits36_1 (& w1, 61-36, (cpu.portSelect == 2)?1:0);)
4482 PNL (putbits36_1 (& w1, 62-36, (cpu.portSelect == 3)?1:0);)
4483
4484 putbits36_1 (& w1, 63-36, cpu.interrupt_flag?1:0);
4485
4486 PNL (putbits36_1 (& w1, 64-36, cpu.INS_FETCH?1:0);)
4487
4488
4489
4490
4491
4492
4493
4494
4495 add_history (cpup, CU_HIST_REG, w0, w1);
4496
4497
4498 CPTUR (cptUseMR);
4499 if (cpu.MR.hrhlt && cpu.history_cyclic[CU_HIST_REG] == 0)
4500 {
4501
4502 if (cpu.MR.ihrrs)
4503 {
4504 cpu.MR.ihr = 0;
4505 }
4506 set_FFV_fault (cpup, 4);
4507 return;
4508 }
4509 }
4510
4511
4512
4513
4514
4515
4516
4517
4518
4519
4520
4521
4522
4523
4524
4525
4526
4527
4528
4529
4530
4531
4532
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545
4546
4547
4548
4549 void add_l68_DU_history (cpu_state_t * cpup)
4550 {
4551 CPT (cpt1L, 25);
4552 PNL (add_history (cpup, L68_DU_HIST_REG, cpu.du.cycle1, cpu.du.cycle2);)
4553 }
4554
4555 void add_l68_OU_history (cpu_state_t * cpup)
4556 {
4557 CPT (cpt1L, 26);
4558 word36 w0 = 0, w1 = 0;
4559
4560
4561
4562 PNL (putbits36_9 (& w0, 0, cpu.ou.RS);)
4563
4564
4565 putbits36_1 (& w0, 9, cpu.ou.characterOperandSize ? 1 : 0);
4566
4567
4568 putbits36_3 (& w0, 10, cpu.ou.characterOperandOffset);
4569
4570
4571 putbits36_1 (& w0, 13, cpu.ou.crflag);
4572
4573
4574 putbits36_1 (& w0, 14, cpu.ou.directOperandFlag ? 1 : 0);
4575
4576
4577 putbits36_2 (& w0, 15, cpu.ou.eac);
4578
4579
4580
4581 PNL (putbits36_9 (& w0, 18, cpu.ou.RS);)
4582
4583
4584 putbits36_1 (& w0, 27, cpu.ou.RB1_FULL);
4585
4586
4587 putbits36_1 (& w0, 28, cpu.ou.RP_FULL);
4588
4589
4590 putbits36_1 (& w0, 29, cpu.ou.RS_FULL);
4591
4592
4593 putbits36_6 (& w0, 30, (word6) (cpu.ou.cycle >> 3));
4594
4595
4596 putbits36_3 (& w1, 36-36, (word3) cpu.ou.cycle);
4597
4598
4599 putbits36_1 (& w1, 39-36, cpu.ou.STR_OP);
4600
4601
4602
4603
4604 PNL (putbits36_10 (& w1, 41-36,
4605 (word10) ~opcodes10 [cpu.ou.RS].reg_use);)
4606
4607
4608
4609
4610 putbits36_18 (& w1, 54 - 36, cpu.PPR.IC);
4611
4612 add_history (cpup, L68_OU_HIST_REG, w0, w1);
4613 }
4614
4615
4616
4617
4618
4619
4620
4621
4622
4623
4624
4625
4626
4627
4628
4629
4630
4631
4632
4633
4634
4635
4636
4637
4638
4639
4640
4641
4642
4643
4644
4645
4646
4647
4648
4649
4650
4651
4652
4653
4654
4655
4656
4657
4658
4659
4660
4661
4662
4663
4664 void add_l68_APU_history (cpu_state_t * cpup, enum APUH_e op)
4665 {
4666 CPT (cpt1L, 28);
4667 word36 w0 = 0, w1 = 0;
4668
4669 w0 = op;
4670
4671
4672 putbits36_15 (& w0, 0, cpu.TPR.TSR);
4673
4674 PNL (putbits36_1 (& w0, 15, (cpu.apu.state & apu_ESN_SNR) ? 1 : 0);)
4675 PNL (putbits36_1 (& w0, 16, (cpu.apu.state & apu_ESN_TSR) ? 1 : 0);)
4676
4677 putbits36_1 (& w0, 25, cpu.cu.SDWAMM);
4678
4679 putbits36_4 (& w0, 26, (word4) cpu.SDWAMR);
4680
4681 putbits36_1 (& w0, 30, cpu.cu.PTWAMM);
4682
4683 putbits36_4 (& w0, 31, (word4) cpu.PTWAMR);
4684
4685 PNL (putbits36_1 (& w0, 35, (cpu.apu.state & apu_FLT) ? 1 : 0);)
4686
4687
4688 PNL (putbits36_24 (& w1, 0, cpu.APUMemAddr);)
4689
4690 putbits36_3 (& w1, 24, cpu.TPR.TRR);
4691
4692
4693 putbits36_1 (& w1, 34, cpu.SDW0.C);
4694
4695
4696 add_history (cpup, L68_APU_HIST_REG, w0, w1);
4697 }
4698
4699 #if defined(THREADZ) || defined(LOCKLESS)
4700
4701
4702 static const char * get_dbg_verb (uint32 dbits, DEVICE * dptr)
4703 {
4704 static const char * debtab_none = "DEBTAB_ISNULL";
4705 static const char * debtab_nomatch = "DEBTAB_NOMATCH";
4706 const char * some_match = NULL;
4707 int32 offset = 0;
4708
4709 if (dptr->debflags == 0)
4710 return debtab_none;
4711
4712 dbits &= dptr->dctrl;
4713
4714
4715 while ((offset < 32) && dptr->debflags[offset].name)
4716 {
4717 if (dptr->debflags[offset].mask == dbits)
4718 return dptr->debflags[offset].name;
4719 if (dptr->debflags[offset].mask & dbits)
4720 some_match = dptr->debflags[offset].name;
4721 offset ++;
4722 }
4723 return some_match ? some_match : debtab_nomatch;
4724 }
4725
4726 void dps8_sim_debug (uint32 dbits, DEVICE * dptr, unsigned long long cnt, const char* fmt, ...)
4727 {
4728
4729 if (sim_deb && dptr && (dptr->dctrl & dbits))
4730 {
4731 const char * debug_type = get_dbg_verb (dbits, dptr);
4732 char stackbuf[STACKBUFSIZE];
4733 int32 bufsize = sizeof (stackbuf);
4734 char * buf = stackbuf;
4735 va_list arglist;
4736 int32 i, j, len;
4737 struct timespec t;
4738 clock_gettime(CLOCK_REALTIME, &t);
4739
4740 buf [bufsize-1] = '\0';
4741
4742 while (1)
4743 {
4744 va_start (arglist, fmt);
4745 len = vsnprintf (buf, (int)((unsigned long)(bufsize)-1), fmt, arglist);
4746 va_end (arglist);
4747
4748
4749
4750 if ((len < 0) || (len >= bufsize-1))
4751 {
4752 if (buf != stackbuf)
4753 FREE (buf);
4754 if (bufsize >= (INT_MAX / 2))
4755 return;
4756 bufsize = bufsize * 2;
4757 if (bufsize < len + 2)
4758 bufsize = len + 2;
4759 buf = (char *) malloc ((unsigned long) bufsize);
4760 if (buf == NULL)
4761 return;
4762 buf[bufsize-1] = '\0';
4763 continue;
4764 }
4765 break;
4766 }
4767
4768
4769
4770 for (i = j = 0; i < len; ++i)
4771 {
4772 if ('\n' == buf[i])
4773 {
4774 if (i >= j)
4775 {
4776 if ((i != j) || (i == 0))
4777 {
4778 (void)fprintf (sim_deb, "%lld.%06ld: DBG(%lld) %o: %s %s %.*s\r\n",
4779 (long long)t.tv_sec, t.tv_nsec/1000, cnt,
4780 current_running_cpu_idx, dptr->name, debug_type, i-j, &buf[j]);
4781 }
4782 }
4783 j = i + 1;
4784 }
4785 }
4786
4787
4788 if (buf != stackbuf)
4789 FREE (buf);
4790 }
4791
4792 }
4793 #endif
4794
4795 void setupPROM (uint cpuNo, unsigned char * PROM) {
4796
4797
4798
4799
4800
4801
4802
4803
4804
4805
4806
4807
4808
4809
4810
4811
4812
4813
4814
4815
4816
4817
4818
4819
4820
4821
4822
4823
4824
4825
4826
4827
4828
4829
4830
4831
4832
4833
4834
4835 word36 rsw2 = 0;
4836
4837
4838
4839
4840
4841
4842
4843 putbits36_4 (& rsw2, 0, 0);
4844
4845 putbits36_2 (& rsw2, 4, 001);
4846
4847 putbits36_7 (& rsw2, 6, 2);
4848
4849 putbits36_1 (& rsw2, 13, 1);
4850
4851 putbits36_5 (& rsw2, 14, 0);
4852
4853 putbits36_1 (& rsw2, 19, 1);
4854
4855 putbits36_1 (& rsw2, 20, cpus[cpuNo].options.cache_installed ? 1 : 0);
4856
4857 putbits36_2 (& rsw2, 21, 0);
4858
4859 putbits36_1 (& rsw2, 23, 1);
4860
4861 putbits36_1 (& rsw2, 24, 0);
4862
4863 putbits36_4 (& rsw2, 25, 0);
4864
4865 putbits36_4 (& rsw2, 29, cpus[cpuNo].options.proc_speed & 017LL);
4866
4867 putbits36_3 (& rsw2, 33, cpus[cpuNo].switches.cpu_num & 07LL);
4868
4869 word4 rsw2Ext = 0;
4870 if (cpus[cpuNo].options.hex_mode_installed)
4871 rsw2Ext |= 010;
4872 if (cpus[cpuNo].options.clock_slave_installed)
4873 rsw2Ext |= 004;
4874
4875
4876 char serial[12];
4877 (void)sprintf (serial, "%-11u", cpus[cpuNo].switches.serno);
4878
4879 #if defined(VER_H_PROM_SHIP)
4880 char * ship = VER_H_PROM_SHIP;
4881 #else
4882 char * ship = "200101";
4883 #endif
4884
4885 #if !defined(VER_H_PROM_MAJOR_VER)
4886 # define VER_H_PROM_MAJOR_VER "999"
4887 #endif
4888
4889 #if !defined(VER_H_PROM_MINOR_VER)
4890 # define VER_H_PROM_MINOR_VER "999"
4891 #endif
4892
4893 #if !defined(VER_H_PROM_PATCH_VER)
4894 # define VER_H_PROM_PATCH_VER "999"
4895 #endif
4896
4897 #if !defined(VER_H_PROM_OTHER_VER)
4898 # define VER_H_PROM_OTHER_VER "999"
4899 #endif
4900
4901 #if !defined(VER_H_GIT_RELT)
4902 # define VER_H_GIT_RELT "X"
4903 #endif
4904
4905 #if !defined(VER_H_PROM_VER_TEXT)
4906 # define VER_H_PROM_VER_TEXT "Unknown "
4907 #endif
4908
4909 #if defined(BUILD_PROM_OSA_TEXT)
4910 # define BURN_PROM_OSA_TEXT BUILD_PROM_OSA_TEXT
4911 #else
4912 # if !defined(VER_H_PROM_OSA_TEXT)
4913 # define BURN_PROM_OSA_TEXT "Unknown Build Op Sys"
4914 # else
4915 # define BURN_PROM_OSA_TEXT VER_H_PROM_OSA_TEXT
4916 # endif
4917 #endif
4918
4919 #if defined(BUILD_PROM_OSV_TEXT)
4920 # define BURN_PROM_OSV_TEXT BUILD_PROM_OSV_TEXT
4921 #else
4922 # if !defined(VER_H_PROM_OSV_TEXT)
4923 # define BURN_PROM_OSV_TEXT "Unknown Build Arch. "
4924 # else
4925 # define BURN_PROM_OSV_TEXT VER_H_PROM_OSV_TEXT
4926 # endif
4927 #endif
4928
4929 #if defined(BUILD_PROM_TSA_TEXT)
4930 # define BURN_PROM_TSA_TEXT BUILD_PROM_TSA_TEXT
4931 #else
4932 # if defined(_M_X64) || defined(_M_AMD64) || defined(__amd64__) || defined(__x86_64__) || defined(__AMD64)
4933 # define VER_H_PROM_TSA_TEXT "Intel x86_64 (AMD64)"
4934 # elif defined(_M_IX86) || defined(__i386) || defined(__i486) || defined(__i586) || defined(__i686) || defined(__ix86)
4935 # define VER_H_PROM_TSA_TEXT "Intel ix86 (32-bit) "
4936 # elif defined(_M_ARM64) || defined(__aarch64__) || defined(__arm64__)
4937 # define VER_H_PROM_TSA_TEXT "AArch64/ARM64/64-bit"
4938 # elif defined(_M_ARM) || defined(__arm__)
4939 # define VER_H_PROM_TSA_TEXT "AArch32/ARM32/32-bit"
4940 # elif defined(__ia64__) || defined(_M_IA64) || defined(__itanium__)
4941 # define VER_H_PROM_TSA_TEXT "Intel Itanium (IA64)"
4942 # elif defined(__ppc64__) || defined(__PPC64__) || defined(__ppc64le__) || defined(__PPC64LE__) || defined(__powerpc64__) || \
4943 defined(__POWERPC64__) || \
4944 defined(_M_PPC64) || \
4945 defined(__PPC64) || \
4946 defined(_ARCH_PPC64)
4947 # define VER_H_PROM_TSA_TEXT "Power ISA (64-bit) "
4948 # elif defined(__ppc__) || defined(__PPC__) || defined(__powerpc__) || defined(__POWERPC__) || defined(_M_PPC) || \
4949 defined(__PPC) || \
4950 defined(__ppc32__) || \
4951 defined(__PPC32__) || \
4952 defined(__powerpc32__) || \
4953 defined(__POWERPC32__) || \
4954 defined(_M_PPC32) || \
4955 defined(__PPC32)
4956 # define VER_H_PROM_TSA_TEXT "PowerPC ISA (32-bit)"
4957 # elif defined(__s390x__)
4958 # define VER_H_PROM_TSA_TEXT "IBM z/Architecture "
4959 # elif defined(__s390__)
4960 # define VER_H_PROM_TSA_TEXT "IBM ESA System/390 "
4961 # elif defined(__J2__) || defined(__J2P__) || defined(__j2__) || defined(__j2p__)
4962 # define VER_H_PROM_TSA_TEXT "J-Core J2 Open CPU "
4963 # elif defined(__SH4__) || defined(__sh4__) || defined(__SH4) || defined(__sh4)
4964 # define VER_H_PROM_TSA_TEXT "Hitachi/Renesas SH-4"
4965 # elif defined(__SH2__) || defined(__sh2__) || defined(__SH2) || defined(__sh2)
4966 # define VER_H_PROM_TSA_TEXT "Hitachi/Renesas SH-2"
4967 # elif defined(__alpha__)
4968 # define VER_H_PROM_TSA_TEXT "Alpha AXP "
4969 # elif defined(__hppa__) || defined(__HPPA__) || defined(__PARISC__) || defined(__parisc__)
4970 # define VER_H_PROM_TSA_TEXT "HP PA-RISC "
4971 # elif defined(__ICE9__) || defined(__ice9__) || defined(__ICE9) || defined(__ice9)
4972 # define VER_H_PROM_TSA_TEXT "SiCortex ICE-9 "
4973 # elif defined(mips64) || defined(__mips64__) || defined(MIPS64) || defined(_MIPS64_) || defined(__mips64)
4974 # define VER_H_PROM_TSA_TEXT "MIPS64 "
4975 # elif defined(mips) || defined(__mips__) || defined(MIPS) || defined(_MIPS_) || defined(__mips)
4976 # define VER_H_PROM_TSA_TEXT "MIPS "
4977 # elif defined(__OpenRISC__) || defined(__OPENRISC__) || defined(__openrisc__) || defined(__OR1K__) || defined(__OPENRISC1K__)
4978 # define VER_H_PROM_TSA_TEXT "OpenRISC "
4979 # elif defined(__sparc64) || defined(__SPARC64) || defined(__SPARC64__) || defined(__sparc64__)
4980 # define VER_H_PROM_TSA_TEXT "SPARC64 "
4981 # elif defined(__sparc) || defined(__SPARC) || defined(__SPARC__) || defined(__sparc__)
4982 # define VER_H_PROM_TSA_TEXT "SPARC "
4983 # elif defined(__riscv) || defined(__riscv__)
4984 # define VER_H_PROM_TSA_TEXT "RISC-V "
4985 # elif defined(__e2k__) || defined(__E2K__) || defined(__elbrus64__) || defined(__elbrus__) || defined(__ELBRUS__) || defined(__e2k64__)
4986 # if defined(__iset__)
4987 # if __iset__ > 0
4988 # if __iset__ == 1
4989 # define VER_H_PROM_TSA_TEXT "MCST Elbrus v1 "
4990 # elif __iset__ == 2
4991 # define VER_H_PROM_TSA_TEXT "MCST Elbrus v2 "
4992 # elif __iset__ == 3
4993 # define VER_H_PROM_TSA_TEXT "MCST Elbrus v3 "
4994 # elif __iset__ == 4
4995 # define VER_H_PROM_TSA_TEXT "MCST Elbrus v4 "
4996 # elif __iset__ == 5
4997 # define VER_H_PROM_TSA_TEXT "MCST Elbrus v5 "
4998 # elif __iset__ == 6
4999 # define VER_H_PROM_TSA_TEXT "MCST Elbrus v6 "
5000 # elif __iset__ == 7
5001 # define VER_H_PROM_TSA_TEXT "MCST Elbrus v7 "
5002 # elif __iset__ == 8
5003 # define VER_H_PROM_TSA_TEXT "MCST Elbrus v8 "
5004 # elif __iset__ == 9
5005 # define VER_H_PROM_TSA_TEXT "MCST Elbrus v9 "
5006 # elif __iset__ == 10
5007 # define VER_H_PROM_TSA_TEXT "MCST Elbrus v10 "
5008 # else
5009 # define VER_H_PROM_TSA_TEXT "MCST Elbrus "
5010 # endif
5011 # else
5012 # define VER_H_PROM_TSA_TEXT "MCST Elbrus "
5013 # endif
5014 # else
5015 # define VER_H_PROM_TSA_TEXT "MCST Elbrus "
5016 # endif
5017 # elif defined(__myriad2__)
5018 # define VER_H_PROM_TSA_TEXT "Myriad2 "
5019 # elif defined(__loongarch64) || defined(__loongarch__)
5020 # define VER_H_PROM_TSA_TEXT "LoongArch "
5021 # elif defined(_m68851) || defined(__m68k__) || defined(__m68000__) || defined(__M68K)
5022 # define VER_H_PROM_TSA_TEXT "Motorola m68k "
5023 # elif defined(__m88k__) || defined(__m88000__) || defined(__M88K)
5024 # define VER_H_PROM_TSA_TEXT "Motorola m88k "
5025 # elif defined(__VAX__) || defined(__vax__)
5026 # define VER_H_PROM_TSA_TEXT "VAX "
5027 # elif defined(__NIOS2__) || defined(__nios2__)
5028 # define VER_H_PROM_TSA_TEXT "Altera Nios II "
5029 # elif defined(__MICROBLAZE__) || defined(__microblaze__)
5030 # define VER_H_PROM_TSA_TEXT "Xilinx MicroBlaze "
5031 # elif defined(__kvx__) || defined(__KVX__) || defined(__KVX_64__)
5032 # define VER_H_PROM_TSA_TEXT "Kalray KVX "
5033 # endif
5034 # if !defined(VER_H_PROM_TSA_TEXT)
5035 # define BURN_PROM_TSA_TEXT "Unknown Target Arch."
5036 # else
5037 # define BURN_PROM_TSA_TEXT VER_H_PROM_TSA_TEXT
5038 # endif
5039 #endif
5040
5041 #if (defined(__WIN__) || defined(_WIN32) || defined(IS_WINDOWS) || defined(_MSC_VER) || defined(__MINGW32__) || \
5042 defined(__MINGW64__) || defined(CROSS_MINGW32) || defined(CROSS_MINGW64)) && !defined(__CYGWIN__)
5043 # define DC_IS_WINDOWS 1
5044 #else
5045 # define DC_IS_WINDOWS 0
5046 #endif
5047
5048 #if defined(BUILD_PROM_TSV_TEXT)
5049 # define BURN_PROM_TSV_TEXT BUILD_PROM_TSV_TEXT
5050 #else
5051 # if DC_IS_WINDOWS
5052 # define VER_H_PROM_TSV_TEXT "Microsoft Windows "
5053 # elif defined(__CYGWIN__)
5054 # define VER_H_PROM_TSV_TEXT "Windows/Cygwin "
5055 # elif (defined(__sunos) || defined(__sun) || defined(__sun__)) && (defined(SYSV) || defined(__SVR4) || defined(__SVR4__) || \
5056 defined(__svr4__))
5057 # if defined(__illumos__)
5058 # define VER_H_PROM_TSV_TEXT "illumos "
5059 # else
5060 # define VER_H_PROM_TSV_TEXT "Solaris "
5061 # endif
5062 # elif defined(__APPLE__) && defined(__MACH__)
5063 # define VER_H_PROM_TSV_TEXT "Apple macOS "
5064 # elif defined(__GNU__) && !defined(__linux__)
5065 # define VER_H_PROM_TSV_TEXT "GNU/Hurd "
5066 # elif defined(__ANDROID__) && defined(__ANDROID_API__)
5067 # if defined(__linux__)
5068 # define VER_H_PROM_TSV_TEXT "Android/Linux "
5069 # else
5070 # define VER_H_PROM_TSV_TEXT "Android "
5071 # endif
5072 # elif defined(__lynxOS__) || defined(__LYNXOS__) || defined(LynxOS) || defined(LYNXOS)
5073 # define VER_H_PROM_TSV_TEXT "LynxOS "
5074 # elif defined(__HELENOS__)
5075 # define VER_H_PROM_TSV_TEXT "HelenOS "
5076 # elif defined(__linux__)
5077 # if defined(__BIONIC__)
5078 # define VER_H_PROM_TSV_TEXT "Linux/Bionic-libc "
5079 # elif defined(__UCLIBC__) || defined(UCLIBC)
5080 # define VER_H_PROM_TSV_TEXT "Linux/uClibc "
5081 # elif defined(__NEWLIB__)
5082 # define VER_H_PROM_TSV_TEXT "Linux/Newlib "
5083 # elif defined(__dietlibc__)
5084 # define VER_H_PROM_TSV_TEXT "Linux/Diet-libc "
5085 # elif defined(__GLIBC__)
5086 # define VER_H_PROM_TSV_TEXT "GNU/Linux "
5087 # else
5088 # define VER_H_PROM_TSV_TEXT "Linux "
5089 # endif
5090 # elif defined(__HAIKU__)
5091 # define VER_H_PROM_TSV_TEXT "Haiku "
5092 # elif defined(__serenity__)
5093 # define VER_H_PROM_TSV_TEXT "SerenityOS "
5094 # elif defined(__FreeBSD__)
5095 # define VER_H_PROM_TSV_TEXT "FreeBSD "
5096 # elif defined(__NetBSD__)
5097 # define VER_H_PROM_TSV_TEXT "NetBSD "
5098 # elif defined(__OpenBSD__)
5099 # define VER_H_PROM_TSV_TEXT "OpenBSD "
5100 # elif defined(__DragonFly__)
5101 # define VER_H_PROM_TSV_TEXT "DragonFly BSD "
5102 # elif defined(_AIX)
5103 # if !defined(__PASE__)
5104 # define VER_H_PROM_TSV_TEXT "IBM AIX "
5105 # else
5106 # define VER_H_PROM_TSV_TEXT "IBM OS/400 (PASE) "
5107 # endif
5108 # elif defined(__VXWORKS__) || defined(__VXWORKS) || defined(__vxworks) || defined(__vxworks__) || defined(_VxWorks)
5109 # if !defined(__RTP__)
5110 # define VER_H_PROM_TSV_TEXT "VxWorks "
5111 # else
5112 # define VER_H_PROM_TSV_TEXT "VxWorks RTP "
5113 # endif
5114 # elif defined(__rtems__)
5115 # if defined(__FreeBSD_version)
5116 # define VER_H_PROM_TSV_TEXT "RTEMS/LibBSD "
5117 # else
5118 # define VER_H_PROM_TSV_TEXT "RTEMS "
5119 # endif
5120 # elif defined(__ZEPHYR__)
5121 # define VER_H_PROM_TSV_TEXT "Zephyr "
5122 # elif defined(ti_sysbios_BIOS___VERS) || defined(ti_sysbios_BIOS__top__)
5123 # define VER_H_PROM_TSV_TEXT "TI-RTOS (SYS/BIOS) "
5124 # elif defined(__OSV__)
5125 # define VER_H_PROM_TSV_TEXT "OSv "
5126 # elif defined(MINIX) || defined(MINIX3) || defined(MINIX315) || defined(__minix__) || defined(__minix3__) || defined(__minix315__)
5127 # define VER_H_PROM_TSV_TEXT "Minix "
5128 # elif defined(__QNX__)
5129 # if defined(__QNXNTO__)
5130 # define VER_H_PROM_TSV_TEXT "QNX Neutrino "
5131 # else
5132 # define VER_H_PROM_TSV_TEXT "QNX "
5133 # endif
5134 # elif defined(__managarm__)
5135 # define VER_H_PROM_TSV_TEXT "Managarm "
5136 # endif
5137 # if !defined(VER_H_PROM_TSV_TEXT)
5138 # define BURN_PROM_TSV_TEXT "Unknown Target OpSys"
5139 # else
5140 # define BURN_PROM_TSV_TEXT VER_H_PROM_TSV_TEXT
5141 # endif
5142 #endif
5143
5144 #if !defined(VER_H_GIT_DATE_SHORT)
5145 # define VER_H_GIT_DATE_SHORT "2021-01-01"
5146 #endif
5147
5148 #if !defined(BURN_PROM_BUILD_NUM)
5149 # define BURN_PROM_BUILD_NUM " "
5150 #endif
5151
5152 #define BURN(offset, length, string) memcpy ((char *) PROM + (offset), string, length)
5153 #define BURN1(offset, byte) PROM[offset] = (char) (byte)
5154
5155 (void)memset (PROM, 255, 1024);
5156
5157
5158 BURN ( 00, 11, "DPS 8/SIM M");
5159 BURN (013, 11, serial);
5160 BURN (026, 6, ship);
5161 BURN1 (034, getbits36_8 (rsw2, 0));
5162 BURN1 (035, getbits36_8 (rsw2, 8));
5163 BURN1 (036, getbits36_8 (rsw2, 16));
5164 BURN1 (037, getbits36_8 (rsw2, 24));
5165 BURN1 (040, ((getbits36_4 (rsw2, 32) << 4) \
5166 | rsw2Ext));
5167
5168
5169 BURN ( 60, 1, "2");
5170 BURN ( 70, 10, VER_H_GIT_DATE_SHORT);
5171 BURN ( 80, 3, VER_H_PROM_MAJOR_VER);
5172 BURN ( 83, 3, VER_H_PROM_MINOR_VER);
5173 BURN ( 86, 3, VER_H_PROM_PATCH_VER);
5174 BURN ( 89, 3, VER_H_PROM_OTHER_VER);
5175 BURN ( 92, 8, BURN_PROM_BUILD_NUM);
5176 BURN (100, 1, VER_H_GIT_RELT);
5177 BURN (101, 29, VER_H_PROM_VER_TEXT);
5178 BURN (130, 20, BURN_PROM_OSA_TEXT);
5179 BURN (150, 20, BURN_PROM_OSV_TEXT);
5180 BURN (170, 20, BURN_PROM_TSA_TEXT);
5181 BURN (190, 20, BURN_PROM_TSV_TEXT);
5182 }
5183
5184 void cpuStats (uint cpuNo) {
5185 if (! cpus[cpuNo].cycleCnt)
5186 return;
5187
5188
5189 #if defined(__HAIKU__)
5190 # if HAS_INCLUDE(<syscall_clock_info.h>)
5191 # include <syscall_clock_info.h>
5192 # endif
5193 # if !defined(_SYSTEM_SYSCALL_CLOCK_INFO_H)
5194 # if !defined(HAIKU_NO_PTHREAD_GETCPUCLOCKID)
5195 # define HAIKU_NO_PTHREAD_GETCPUCLOCKID
5196 # endif
5197 # endif
5198 #endif
5199
5200
5201
5202 #if defined(__sun) || defined(__sun__)
5203 # if !defined(__illumos__)
5204 # if HAS_INCLUDE(<sys/sysevent.h>)
5205 # include <sys/sysevent.h>
5206 # endif
5207 # if defined(ILLUMOS_VENDOR) || defined(ILLUMOS_KERN_PUB)
5208 # define __illumos__
5209 # endif
5210 # endif
5211 #endif
5212
5213 double cpu_seconds = 0;
5214 int cpu_millis = 0;
5215 char cpu_ftime[64] = {0};
5216 #if (defined(THREADZ) || defined(LOCKLESS))
5217 # if !defined(HAIKU_NO_PTHREAD_GETCPUCLOCKID) && !defined(__illumos__) && \
5218 !defined(__APPLE__) && !defined(__PASE__) && !defined(__serenity__)
5219 struct timespec cpu_time;
5220 clockid_t clock_id;
5221 if (pthread_getcpuclockid (cpus[cpuNo].thread_id, &clock_id) == 0) {
5222 if (clock_gettime (clock_id, &cpu_time) == 0) {
5223 cpu_seconds = (double)cpu_time.tv_sec + cpu_time.tv_nsec / 1e9;
5224 }
5225 }
5226 # endif
5227 #endif
5228
5229 if (cpu_seconds > 0 && cpus[cpuNo].instrCnt > 0) {
5230 int cpu_hours = (int)(cpu_seconds / 3600);
5231 int cpu_minutes = (int)((cpu_seconds - cpu_hours * 3600) / 60);
5232 int cpu_secs = (int)(cpu_seconds - (cpu_hours * 3600) - (cpu_minutes * 60));
5233 struct tm cpu_tm = {0};
5234 cpu_tm.tm_hour = cpu_hours;
5235 cpu_tm.tm_min = cpu_minutes;
5236 cpu_tm.tm_sec = cpu_secs;
5237 strftime(cpu_ftime, sizeof(cpu_ftime), "%H:%M:%S", &cpu_tm);
5238 cpu_millis = (int)((cpu_seconds - (cpu_hours * 3600) - (cpu_minutes * 60) - cpu_secs) * 1000);
5239 }
5240
5241 (void)fflush(stderr);
5242 (void)fflush(stdout);
5243 sim_msg ("\r\n");
5244 (void)fflush(stdout);
5245 (void)fflush(stderr);
5246 sim_msg ("\r+---------------------------------+\r\n");
5247 sim_msg ("\r| CPU %c Statistics |\r\n", 'A' + cpuNo);
5248 sim_msg ("\r+---------------------------------+\r\n");
5249 if (cpu_seconds > 0 && cpus[cpuNo].instrCnt > 0) {
5250 sim_msg ("\r| CPU Time Used %11s.%03d |\r\n", cpu_ftime, cpu_millis);
5251 sim_msg ("\r+---------------------------------+\r\n");
5252 }
5253 (void)fflush(stdout);
5254 (void)fflush(stderr);
5255 #if defined(_AIX) && !defined(__PASE__)
5256 struct rusage rusage;
5257 if (!pthread_getrusage_np(cpus[cpuNo].thread_id, &rusage, PTHRDSINFO_RUSAGE_COLLECT)) {
5258 sim_msg ("\r| Volun. CtxtSw %'15llu |\r\n", (unsigned long long)rusage.ru_nvcsw);
5259 sim_msg ("\r| Invol. CtxtSw %'15llu |\r\n", (unsigned long long)rusage.ru_nivcsw);
5260 sim_msg ("\r+---------------------------------+\r\n");
5261 }
5262 #endif
5263 #if defined(WIN_STDIO)
5264 sim_msg ("\r| cycles %15llu |\r\n", (unsigned long long)cpus[cpuNo].cycleCnt);
5265 sim_msg ("\r| instructions %15llu |\r\n", (unsigned long long)cpus[cpuNo].instrCnt);
5266 (void)fflush(stdout);
5267 (void)fflush(stderr);
5268 sim_msg ("\r+---------------------------------+\r\n");
5269 sim_msg ("\r| lockCnt %15llu |\r\n", (unsigned long long)cpus[cpuNo].coreLockState.lockCnt);
5270 sim_msg ("\r| lockImmediate %15llu |\r\n", (unsigned long long)cpus[cpuNo].coreLockState.lockImmediate);
5271 (void)fflush(stdout);
5272 (void)fflush(stderr);
5273 sim_msg ("\r+---------------------------------+\r\n");
5274 sim_msg ("\r| lockWait %15llu |\r\n", (unsigned long long)cpus[cpuNo].coreLockState.lockWait);
5275 sim_msg ("\r| lockWaitMax %15llu |\r\n", (unsigned long long)cpus[cpuNo].coreLockState.lockWaitMax);
5276 (void)fflush(stdout);
5277 (void)fflush(stderr);
5278 # if !defined(SCHED_NEVER_YIELD)
5279 sim_msg ("\r| lockYield %15llu |\r\n", (unsigned long long)cpus[cpuNo].coreLockState.lockYield);
5280 (void)fflush(stdout);
5281 (void)fflush(stderr);
5282 # else
5283 sim_msg ("\r| lockYield ---- |\r\n");
5284 (void)fflush(stdout);
5285 (void)fflush(stderr);
5286 # endif
5287 sim_msg ("\r+---------------------------------+");
5288 (void)fflush(stdout);
5289 (void)fflush(stderr);
5290 # if !defined(UCACHE)
5291 # if !defined(UCACHE_STATS)
5292 sim_msg ("\r\n");
5293 # endif
5294 # endif
5295 (void)fflush(stdout);
5296 (void)fflush(stderr);
5297 #else
5298 sim_msg ("\r| cycles %'15llu |\r\n", (unsigned long long)cpus[cpuNo].cycleCnt);
5299 sim_msg ("\r| instructions %'15llu |\r\n", (unsigned long long)cpus[cpuNo].instrCnt);
5300 (void)fflush(stdout);
5301 (void)fflush(stderr);
5302 sim_msg ("\r+---------------------------------+\r\n");
5303 sim_msg ("\r| lockCnt %'15llu |\r\n", (unsigned long long)cpus[cpuNo].coreLockState.lockCnt);
5304 sim_msg ("\r| lockImmediate %'15llu |\r\n", (unsigned long long)cpus[cpuNo].coreLockState.lockImmediate);
5305 (void)fflush(stdout);
5306 (void)fflush(stderr);
5307 sim_msg ("\r+---------------------------------+\r\n");
5308 sim_msg ("\r| lockWait %'15llu |\r\n", (unsigned long long)cpus[cpuNo].coreLockState.lockWait);
5309 sim_msg ("\r| lockWaitMax %'15llu |\r\n", (unsigned long long)cpus[cpuNo].coreLockState.lockWaitMax);
5310 (void)fflush(stdout);
5311 (void)fflush(stderr);
5312 # if !defined(SCHED_NEVER_YIELD)
5313 sim_msg ("\r| lockYield %'15llu |\r\n", (unsigned long long)cpus[cpuNo].coreLockState.lockYield);
5314 (void)fflush(stdout);
5315 (void)fflush(stderr);
5316 # else
5317 sim_msg ("\r| lockYield ---- |\r\n");
5318 (void)fflush(stdout);
5319 (void)fflush(stderr);
5320 # endif
5321 sim_msg ("\r+---------------------------------+");
5322 (void)fflush(stdout);
5323 (void)fflush(stderr);
5324 # if !defined(UCACHE)
5325 # if !defined(UCACHE_STATS)
5326 sim_msg ("\r\n");
5327 # endif
5328 # endif
5329 (void)fflush(stderr);
5330 (void)fflush(stdout);
5331 #endif
5332
5333 #if defined(UCACHE_STATS)
5334 ucacheStats (cpuNo);
5335 #endif
5336
5337
5338
5339
5340
5341
5342
5343 }
5344
5345 bool running_perf_test;
5346
5347 #if defined(THREADZ) || defined(LOCKLESS)
5348 # include <locale.h>
5349 # include "segldr.h"
5350
5351 void perfTest (char * testName) {
5352 running_perf_test = true;
5353
5354 if (testName == NULL)
5355 testName = "strip.mem";
5356
5357 # if !defined(NO_LOCALE)
5358 (void) setlocale(LC_NUMERIC, "");
5359 # endif
5360
5361
5362 # if !defined(_AIX)
5363 system_state = aligned_malloc (sizeof (struct system_state_s));
5364 # else
5365 system_state = malloc (sizeof (struct system_state_s));
5366 # endif
5367 if (!system_state)
5368 {
5369 (void)fprintf (stderr, "\rFATAL: Out of memory! Aborting at %s[%s:%d]\r\n",
5370 __func__, __FILE__, __LINE__);
5371 # if defined(USE_BACKTRACE)
5372 # if defined(SIGUSR2)
5373 (void)raise(SIGUSR2);
5374
5375 # endif
5376 # endif
5377 abort();
5378 }
5379 # if !defined(__MINGW64__) && !defined(__MINGW32__) && !defined(CROSS_MINGW64) && !defined(CROSS_MINGW32) && !defined(__PASE__)
5380 if (0 == sim_free_memory || sim_free_memory >= 192000000) {
5381 if (mlock(system_state, sizeof(struct system_state_s)) == -1) {
5382 mlock_failure = true;
5383 }
5384 } else {
5385 # if defined(TESTING)
5386 sim_warn ("Low memory - no memory locking attempted.\r\n");
5387 # else
5388 (void)system_state;
5389 # endif
5390 }
5391 # endif
5392 M = system_state->M;
5393 # if defined(M_SHARED)
5394 cpus = system_state->cpus;
5395 # endif
5396 (void) memset (cpus, 0, sizeof (cpu_state_t) * N_CPU_UNITS_MAX);
5397 for (int i = 0; i < N_CPU_UNITS_MAX; i ++) {
5398 cpus[i].switches.FLT_BASE = 2;
5399 cpus[i].instrCnt = 0;
5400 cpus[i].cycleCnt = 0;
5401 for (int j = 0; j < N_FAULTS; j ++)
5402 cpus[i].faultCnt [j] = 0;
5403 }
5404
5405 cpus[0].tweaks.enable_emcall = 1;
5406 opc_dev.numunits = 1;
5407 cpu_reset_unit_idx (0, false);
5408 set_cpu_cycle (& cpus[0], FETCH_cycle);
5409 mrestore (testName);
5410 _cpup = & cpus[0];
5411 threadz_sim_instr ();
5412 }
5413 #endif
5414