This source file includes following definitions.
- cpu_show_config
- cpu_set_config
- cpu_show_nunits
- cpu_set_nunits
- cpu_show_kips
- cpu_set_kips
- cpu_show_stall
- cpu_set_stall
- setCPUConfigL68
- setCPUConfigDPS8M
- cycle_str
- set_cpu_cycle
- set_cpu_idx
- cpu_reset_unit_idx
- simh_cpu_reset_and_clear_unit
- simh_cpu_reset_unit
- str_SDW0
- cpu_boot
- setup_scbank_map
- lookup_cpu_mem_map
- get_serial_number
- do_stats
- ev_poll_cb
- cpu_init
- cpu_reset
- sim_cpu_reset
- cpu_ex
- cpu_dep
- get_highest_intr
- sample_interrupts
- simh_hooks
- panel_process_event
- sim_instr
- cpu_thread_main
- do_LUF_fault
- set_temporary_absolute_mode
- clear_temporary_absolute_mode
- becomeClockMaster
- giveupClockMaster
- threadz_sim_instr
- operand_size
- readOperandRead
- readOperandRMW
- write_operand
- set_mem_watch
- nem_check
- core_read
- core_read_lock
- core_write
- core_write_unlock
- core_unlock_all
- core_write_zone
- core_read2
- core_write2
- decode_instruction
- is_priv_mode
- get_bar_mode
- get_addr_mode
- set_addr_mode
- get_BAR_address
- add_history
- add_history_force
- add_dps8m_CU_history
- add_dps8m_DU_OU_history
- add_dps8m_APU_history
- add_dps8m_EAPU_history
- add_l68_CU_history
- add_l68_DU_history
- add_l68_OU_history
- add_l68_APU_history
- get_dbg_verb
- dps8_sim_debug
- setupPROM
- cpuStats
- perfTest
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32 #include <stdio.h>
33 #include <unistd.h>
34 #include <ctype.h>
35
36 #if !defined(__MINGW64__) && !defined(__MINGW32__) && !defined(CROSS_MINGW64) && !defined(CROSS_MINGW32)
37 # include <sys/mman.h>
38 #endif
39
40 #include "dps8.h"
41 #include "dps8_sys.h"
42 #include "dps8_iom.h"
43 #include "dps8_cable.h"
44 #include "dps8_cpu.h"
45 #include "dps8_rt.h"
46 #include "dps8_priv.h"
47 #include "dps8_addrmods.h"
48 #include "dps8_faults.h"
49 #include "dps8_scu.h"
50 #include "dps8_append.h"
51 #include "dps8_ins.h"
52 #include "dps8_state.h"
53 #include "dps8_math.h"
54 #include "dps8_iefp.h"
55 #include "dps8_console.h"
56 #include "dps8_fnp2.h"
57 #include "dps8_socket_dev.h"
58 #include "dps8_crdrdr.h"
59 #include "dps8_absi.h"
60 #include "dps8_mgp.h"
61 #include "dps8_utils.h"
62 #include "dps8_memalign.h"
63
64 #if defined(M_SHARED)
65 # include "shm.h"
66 #endif
67
68 #include "dps8_opcodetable.h"
69 #include "../simh/sim_defs.h"
70 #include "../simh/sim_os_mem.h"
71
72 #if defined(THREADZ) || defined(LOCKLESS)
73 # include "threadz.h"
74 __thread uint current_running_cpu_idx;
75 #endif
76
77 #include "ver.h"
78
79 #if defined(_AIX) && !defined(__PASE__)
80 # include <pthread.h>
81 # include <sys/resource.h>
82 #endif
83
84 #if defined(NO_LOCALE)
85 # define xstrerror_l strerror
86 #endif
87
88 #define DBG_CTR cpu.cycleCnt
89
90 #define ASSUME0 0
91
92 #define FREE(p) do \
93 { \
94 free((p)); \
95 (p) = NULL; \
96 } while(0)
97
98
99
100 static UNIT cpu_unit [N_CPU_UNITS_MAX] = {
101 #if defined(NO_C_ELLIPSIS)
102 { UDATA (NULL, UNIT_FIX|UNIT_BINK, MEMSIZE), 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL },
103 { UDATA (NULL, UNIT_FIX|UNIT_BINK, MEMSIZE), 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL },
104 { UDATA (NULL, UNIT_FIX|UNIT_BINK, MEMSIZE), 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL },
105 { UDATA (NULL, UNIT_FIX|UNIT_BINK, MEMSIZE), 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL },
106 { UDATA (NULL, UNIT_FIX|UNIT_BINK, MEMSIZE), 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL },
107 { UDATA (NULL, UNIT_FIX|UNIT_BINK, MEMSIZE), 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL },
108 { UDATA (NULL, UNIT_FIX|UNIT_BINK, MEMSIZE), 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL },
109 { UDATA (NULL, UNIT_FIX|UNIT_BINK, MEMSIZE), 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL }
110 #else
111 [0 ... N_CPU_UNITS_MAX - 1] = {
112 UDATA (NULL, UNIT_FIX|UNIT_BINK, MEMSIZE), 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL
113 }
114 #endif
115 };
116
117 #define UNIT_IDX(uptr) ((uptr) - cpu_unit)
118
119
120 #define LOCKUP_KIPS 1000
121 static uint64 kips = LOCKUP_KIPS;
122 static uint64 luf_limits[] =
123 {
124 2000*LOCKUP_KIPS/1000,
125 4000*LOCKUP_KIPS/1000,
126 8000*LOCKUP_KIPS/1000,
127 16000*LOCKUP_KIPS/1000,
128 32000*LOCKUP_KIPS/1000
129 };
130
131 struct stall_point_s stall_points [N_STALL_POINTS];
132 bool stall_point_active = false;
133
134 #if defined(PANEL68)
135 static void panel_process_event (void);
136 #endif
137
138 static t_stat simh_cpu_reset_and_clear_unit (UNIT * uptr,
139 UNUSED int32 value,
140 UNUSED const char * cptr,
141 UNUSED void * desc);
142 char * cycle_str (cycles_e cycle);
143
144 static t_stat cpu_show_config (UNUSED FILE * st, UNIT * uptr,
145 UNUSED int val, UNUSED const void * desc)
146 {
147 long cpu_unit_idx = UNIT_IDX (uptr);
148 if (cpu_unit_idx < 0 || cpu_unit_idx >= N_CPU_UNITS_MAX)
149 {
150 sim_warn ("error: Invalid unit number %ld\r\n", (long) cpu_unit_idx);
151 return SCPE_ARG;
152 }
153
154 #define PFC_INT8 "%c%c%c%c%c%c%c%c"
155
156 #define PBI_8(i) \
157 ( ((i) & 0x80ll) ? '1' : '0' ), \
158 ( ((i) & 0x40ll) ? '1' : '0' ), \
159 ( ((i) & 0x20ll) ? '1' : '0' ), \
160 ( ((i) & 0x10ll) ? '1' : '0' ), \
161 ( ((i) & 0x08ll) ? '1' : '0' ), \
162 ( ((i) & 0x04ll) ? '1' : '0' ), \
163 ( ((i) & 0x02ll) ? '1' : '0' ), \
164 ( ((i) & 0x01ll) ? '1' : '0' )
165
166 #define PFC_INT16 PFC_INT8 PFC_INT8
167 #define PFC_INT32 PFC_INT16 PFC_INT16
168 #define PFC_INT64 PFC_INT32 PFC_INT32
169
170 #define PBI_16(i) PBI_8((i) >> 8), PBI_8(i)
171 #define PBI_32(i) PBI_16((i) >> 16), PBI_16(i)
172 #define PBI_64(i) PBI_32((i) >> 32), PBI_32(i)
173
174 char dsbin[66], adbin[34];
175
176 sim_msg ("CPU unit number %ld\r\n", (long) cpu_unit_idx);
177
178 sim_msg ("Fault base: %03o(8)\r\n",
179 cpus[cpu_unit_idx].switches.FLT_BASE);
180 sim_msg ("CPU number: %01o(8)\r\n",
181 cpus[cpu_unit_idx].switches.cpu_num);
182 sim_msg ("Data switches: %012llo(8)\r\n",
183 (unsigned long long)cpus[cpu_unit_idx].switches.data_switches);
184 (void)snprintf (dsbin, 65, PFC_INT64,
185 PBI_64((unsigned long long)cpus[cpu_unit_idx].switches.data_switches));
186 sim_msg (" %36s(2)\r\n",
187 dsbin + strlen(dsbin) - 36);
188 sim_msg ("Address switches: %06o(8)\r\n",
189 cpus[cpu_unit_idx].switches.addr_switches);
190 (void)snprintf (adbin, 33, PFC_INT32,
191 PBI_32(cpus[cpu_unit_idx].switches.addr_switches));
192 sim_msg (" %18s(2)\r\n",
193 adbin + strlen(adbin) - 18);
194 for (int i = 0; i < (cpus[cpu_unit_idx].tweaks.l68_mode ? N_L68_CPU_PORTS : N_DPS8M_CPU_PORTS); i ++)
195 {
196 sim_msg ("Port%c enable: %01o(8)\r\n",
197 'A' + i, cpus[cpu_unit_idx].switches.enable [i]);
198 sim_msg ("Port%c init enable: %01o(8)\r\n",
199 'A' + i, cpus[cpu_unit_idx].switches.init_enable [i]);
200 sim_msg ("Port%c assignment: %01o(8)\r\n",
201 'A' + i, cpus[cpu_unit_idx].switches.assignment [i]);
202 sim_msg ("Port%c interlace: %01o(8)\r\n",
203 'A' + i, cpus[cpu_unit_idx].switches.interlace [i]);
204 sim_msg ("Port%c store size: %01o(8)\r\n",
205 'A' + i, cpus[cpu_unit_idx].switches.store_size [i]);
206 }
207 sim_msg ("Processor mode: %s [%o]\r\n",
208 cpus[cpu_unit_idx].switches.procMode == \
209 procModeMultics ? "Multics" : cpus[cpu_unit_idx].switches.procMode == procModeGCOS ? "GCOS" : "???",
210 cpus[cpu_unit_idx].switches.procMode);
211 sim_msg ("8K Cache: %s\r\n",
212 cpus[cpu_unit_idx].switches.enable_cache ? "Enabled" : "Disabled");
213 sim_msg ("SDWAM: %s\r\n",
214 cpus[cpu_unit_idx].switches.sdwam_enable ? "Enabled" : "Disabled");
215 sim_msg ("PTWAM: %s\r\n",
216 cpus[cpu_unit_idx].switches.ptwam_enable ? "Enabled" : "Disabled");
217
218 sim_msg ("Processor speed: %02o(8)\r\n",
219 cpus[cpu_unit_idx].options.proc_speed);
220 sim_msg ("DIS enable: %01o(8)\r\n",
221 cpus[cpu_unit_idx].tweaks.dis_enable);
222 sim_msg ("Steady clock: %01o(8)\r\n",
223 scu [0].steady_clock);
224 sim_msg ("Halt on unimplemented: %01o(8)\r\n",
225 cpus[cpu_unit_idx].tweaks.halt_on_unimp);
226 sim_msg ("Enable simulated SDWAM/PTWAM: %01o(8)\r\n",
227 cpus[cpu_unit_idx].tweaks.enable_wam);
228 sim_msg ("Report faults: %01o(8)\r\n",
229 cpus[cpu_unit_idx].tweaks.report_faults);
230 sim_msg ("TRO faults enabled: %01o(8)\r\n",
231 cpus[cpu_unit_idx].tweaks.tro_enable);
232 sim_msg ("drl fatal enabled: %01o(8)\r\n",
233 cpus[cpu_unit_idx].tweaks.drl_fatal);
234 sim_msg ("useMap: %d\r\n",
235 cpus[cpu_unit_idx].tweaks.useMap);
236 sim_msg ("PROM installed: %01o(8)\r\n",
237 cpus[cpu_unit_idx].options.prom_installed);
238 sim_msg ("Hex mode installed: %01o(8)\r\n",
239 cpus[cpu_unit_idx].options.hex_mode_installed);
240 sim_msg ("8K cache installed: %01o(8)\r\n",
241 cpus[cpu_unit_idx].options.cache_installed);
242 sim_msg ("Clock slave installed: %01o(8)\r\n",
243 cpus[cpu_unit_idx].options.clock_slave_installed);
244 #if defined(AFFINITY)
245 if (cpus[cpu_unit_idx].set_affinity)
246 sim_msg ("CPU affinity: %d\r\n", cpus[cpu_unit_idx].affinity);
247 else
248 sim_msg ("CPU affinity: not set\r\n");
249 #endif
250 sim_msg ("ISOLTS mode: %01o(8)\r\n", cpus[cpu_unit_idx].tweaks.isolts_mode);
251 sim_msg ("NODIS mode: %01o(8)\r\n", cpus[cpu_unit_idx].tweaks.nodis);
252 sim_msg ("6180 mode: %01o(8) [%s]\r\n",
253 cpus[cpu_unit_idx].tweaks.l68_mode, cpus[cpu_unit_idx].tweaks.l68_mode ? "6180" : "DPS8/M");
254 return SCPE_OK;
255 }
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281 static config_value_list_t cfg_multics_fault_base [] =
282 {
283 { "multics", 2 },
284 { NULL, 0 }
285 };
286
287 static config_value_list_t cfg_on_off [] =
288 {
289 { "off", 0 },
290 { "on", 1 },
291 { "disable", 0 },
292 { "enable", 1 },
293 { NULL, 0 }
294 };
295
296 static config_value_list_t cfg_l68_mode [] = {
297 { "dps8/m", 0 },
298 { "dps8m", 0 },
299 { "dps8", 0 },
300 { "l68", 1 },
301 { "l6180", 1 },
302 { "6180", 1 },
303 };
304
305 static config_value_list_t cfg_cpu_mode [] =
306 {
307 { "gcos", 0 },
308 { "multics", 1 },
309 { NULL, 0 }
310 };
311
312 static config_value_list_t cfg_port_letter [] =
313 {
314 { "a", 0 },
315 { "b", 1 },
316 { "c", 2 },
317 { "d", 3 },
318 { "e", 4 },
319 { "f", 5 },
320 { "g", 6 },
321 { "h", 7 },
322 { NULL, 0 }
323 };
324
325 static config_value_list_t cfg_interlace [] =
326 {
327 { "off", 0 },
328 { "2", 2 },
329 { "4", 4 },
330 { NULL, 0 }
331 };
332
333 #if defined(AFFINITY)
334 static config_value_list_t cfg_affinity [] =
335 {
336 { "off", -1 },
337 { NULL, 0 }
338 };
339 #endif
340
341 static config_value_list_t cfg_size_list [] =
342 {
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405 { "32", 8 },
406 { "32K", 8 },
407 { "64", 9 },
408 { "64K", 9 },
409 { "128", 10 },
410 { "128K", 10 },
411 { "256", 11 },
412 { "256K", 11 },
413 { "512", 12 },
414 { "512K", 12 },
415 { "1024", 13 },
416 { "1024K", 13 },
417 { "1M", 13 },
418 { "2048", 14 },
419 { "2048K", 14 },
420 { "2M", 14 },
421 { "4096", 15 },
422 { "4096K", 15 },
423 { "4M", 15 },
424 { NULL, 0 }
425 };
426
427 static config_list_t cpu_config_list [] =
428 {
429 { "faultbase", 0, 0177, cfg_multics_fault_base },
430 { "num", 0, 07, NULL },
431 { "data", 0, 0777777777777, NULL },
432 { "stopnum", 0, 999999, NULL },
433 { "mode", 0, 01, cfg_cpu_mode },
434 { "speed", 0, 017, NULL },
435 { "port", 0, N_CPU_PORTS - 1, cfg_port_letter },
436 { "assignment", 0, 7, NULL },
437 { "interlace", 0, 1, cfg_interlace },
438 { "enable", 0, 1, cfg_on_off },
439 { "init_enable", 0, 1, cfg_on_off },
440 { "store_size", 0, 7, cfg_size_list },
441 { "enable_cache", 0, 1, cfg_on_off },
442 { "sdwam", 0, 1, cfg_on_off },
443 { "ptwam", 0, 1, cfg_on_off },
444
445
446 { "dis_enable", 0, 1, cfg_on_off },
447
448 { "steady_clock", 0, 1, cfg_on_off },
449 { "halt_on_unimplemented", 0, 1, cfg_on_off },
450 { "enable_wam", 0, 1, cfg_on_off },
451 { "report_faults", 0, 1, cfg_on_off },
452 { "tro_enable", 0, 1, cfg_on_off },
453 { "drl_fatal", 0, 1, cfg_on_off },
454 { "useMap", 0, 1, cfg_on_off },
455 { "address", 0, 0777777, NULL },
456 { "prom_installed", 0, 1, cfg_on_off },
457 { "hex_mode_installed", 0, 1, cfg_on_off },
458 { "cache_installed", 0, 1, cfg_on_off },
459 { "clock_slave_installed", 0, 1, cfg_on_off },
460 { "enable_emcall", 0, 1, cfg_on_off },
461
462
463 #if defined(AFFINITY)
464 { "affinity", -1, 32767, cfg_affinity },
465 #endif
466 { "isolts_mode", 0, 1, cfg_on_off },
467 { "nodis", 0, 1, cfg_on_off },
468 { "l68_mode", 0, 1, cfg_l68_mode },
469 { NULL, 0, 0, NULL }
470 };
471
472 static t_stat cpu_set_config (UNIT * uptr, UNUSED int32 value,
473 const char * cptr, UNUSED void * desc)
474 {
475 long cpu_unit_idx = UNIT_IDX (uptr);
476 if (cpu_unit_idx < 0 || cpu_unit_idx >= N_CPU_UNITS_MAX)
477 {
478 sim_warn ("error: cpu_set_config: Invalid unit number %ld\r\n",
479 (long) cpu_unit_idx);
480 return SCPE_ARG;
481 }
482
483 static int port_num = 0;
484
485 config_state_t cfg_state = { NULL, NULL };
486
487 for (;;)
488 {
489 int64_t v;
490 int rc = cfg_parse (__func__, cptr, cpu_config_list,
491 & cfg_state, & v);
492 if (rc == -1)
493 {
494 break;
495 }
496 if (rc == -2)
497 {
498 cfg_parse_done (& cfg_state);
499 return SCPE_ARG;
500 }
501
502 const char * p = cpu_config_list [rc] . name;
503 if (strcmp (p, "faultbase") == 0)
504 cpus[cpu_unit_idx].switches.FLT_BASE = (uint) v;
505 else if (strcmp (p, "num") == 0)
506 cpus[cpu_unit_idx].switches.cpu_num = (uint) v;
507 else if (strcmp (p, "data") == 0)
508 cpus[cpu_unit_idx].switches.data_switches = (word36) v;
509 else if (strcmp (p, "stopnum") == 0)
510 {
511
512
513 int64_t d1 = (v / 1000) % 10;
514 int64_t d2 = (v / 100) % 10;
515 int64_t d3 = (v / 10) % 10;
516 int64_t d4 = (v / 1) % 10;
517 word36 d = 0123000000000;
518 putbits36_6 (& d, 9, (word4) d1);
519 putbits36_6 (& d, 15, (word4) d2);
520 putbits36_6 (& d, 21, (word4) d3);
521 putbits36_6 (& d, 27, (word4) d4);
522 cpus[cpu_unit_idx].switches.data_switches = d;
523 }
524 else if (strcmp (p, "address") == 0)
525 cpus[cpu_unit_idx].switches.addr_switches = (word18) v;
526 else if (strcmp (p, "mode") == 0)
527 cpus[cpu_unit_idx].switches.procMode = v ? procModeMultics : procModeGCOS;
528 else if (strcmp (p, "speed") == 0)
529 cpus[cpu_unit_idx].options.proc_speed = (uint) v;
530 else if (strcmp (p, "port") == 0) {
531 if ((! cpus[cpu_unit_idx].tweaks.l68_mode) && (int) v > 3) {
532 cfg_parse_done (& cfg_state);
533 return SCPE_ARG;
534 }
535 port_num = (int) v;
536 }
537 else if (strcmp (p, "assignment") == 0)
538 cpus[cpu_unit_idx].switches.assignment [port_num] = (uint) v;
539 else if (strcmp (p, "interlace") == 0)
540 cpus[cpu_unit_idx].switches.interlace [port_num] = (uint) v;
541 else if (strcmp (p, "enable") == 0)
542 cpus[cpu_unit_idx].switches.enable [port_num] = (uint) v;
543 else if (strcmp (p, "init_enable") == 0)
544 cpus[cpu_unit_idx].switches.init_enable [port_num] = (uint) v;
545 else if (strcmp (p, "store_size") == 0) {
546 if (v > 7) {
547 if (cpus[cpu_unit_idx].tweaks.l68_mode) {
548 switch (v) {
549 case 8: v = 0; break;
550 case 9: v = 1; break;
551 case 10: v = 3; break;
552 case 11: v = 7; break;
553 case 12: v = 4; break;
554 case 13: v = 5; break;
555 case 14: v = 6; break;
556 case 15: v = 2; break;
557 }
558 } else {
559 switch (v) {
560 case 8: v = 0; break;
561 case 9: v = 1; break;
562 case 10: v = 2; break;
563 case 11: v = 3; break;
564 case 12: v = 4; break;
565 case 13: v = 5; break;
566 case 14: v = 6; break;
567 case 15: v = 7; break;
568 }
569 }
570 }
571 cpus[cpu_unit_idx].switches.store_size [port_num] = (uint) v;
572 }
573 else if (strcmp (p, "enable_cache") == 0)
574 cpus[cpu_unit_idx].switches.enable_cache = (uint) v ? true : false;
575 else if (strcmp (p, "sdwam") == 0)
576 cpus[cpu_unit_idx].switches.sdwam_enable = (uint) v ? true : false;
577 else if (strcmp (p, "ptwam") == 0)
578 cpus[cpu_unit_idx].switches.ptwam_enable = (uint) v ? true : false;
579 else if (strcmp (p, "dis_enable") == 0)
580 cpus[cpu_unit_idx].tweaks.dis_enable = (uint) v;
581 else if (strcmp (p, "steady_clock") == 0)
582 scu [0].steady_clock = (uint) v;
583 else if (strcmp (p, "halt_on_unimplemented") == 0)
584 cpus[cpu_unit_idx].tweaks.halt_on_unimp = (uint) v;
585 else if (strcmp (p, "enable_wam") == 0)
586 cpus[cpu_unit_idx].tweaks.enable_wam = (uint) v;
587 else if (strcmp (p, "report_faults") == 0)
588 cpus[cpu_unit_idx].tweaks.report_faults = (uint) v;
589 else if (strcmp (p, "tro_enable") == 0)
590 cpus[cpu_unit_idx].tweaks.tro_enable = (uint) v;
591 else if (strcmp (p, "drl_fatal") == 0)
592 cpus[cpu_unit_idx].tweaks.drl_fatal = (uint) v;
593 else if (strcmp (p, "useMap") == 0)
594 cpus[cpu_unit_idx].tweaks.useMap = v;
595 else if (strcmp (p, "prom_installed") == 0)
596 cpus[cpu_unit_idx].options.prom_installed = v;
597 else if (strcmp (p, "hex_mode_installed") == 0)
598 cpus[cpu_unit_idx].options.hex_mode_installed = v;
599 else if (strcmp (p, "cache_installed") == 0)
600 cpus[cpu_unit_idx].options.cache_installed = v;
601 else if (strcmp (p, "clock_slave_installed") == 0)
602 cpus[cpu_unit_idx].options.clock_slave_installed = v;
603 else if (strcmp (p, "enable_emcall") == 0)
604 cpus[cpu_unit_idx].tweaks.enable_emcall = v;
605 #if defined(AFFINITY)
606 else if (strcmp (p, "affinity") == 0)
607 if (v < 0)
608 {
609 cpus[cpu_unit_idx].set_affinity = false;
610 }
611 else
612 {
613 cpus[cpu_unit_idx].set_affinity = true;
614 cpus[cpu_unit_idx].affinity = (uint) v;
615 }
616 #endif
617 else if (strcmp (p, "isolts_mode") == 0)
618 {
619 bool was = cpus[cpu_unit_idx].tweaks.isolts_mode;
620 cpus[cpu_unit_idx].tweaks.isolts_mode = v;
621 if (v && ! was) {
622 uint store_sz;
623 if (cpus[cpu_unit_idx].tweaks.l68_mode)
624 store_sz = 3;
625 else
626 store_sz = 2;
627 cpus[cpu_unit_idx].isolts_switches_save = cpus[cpu_unit_idx].switches;
628
629 cpus[cpu_unit_idx].switches.data_switches = 00000030714000;
630 cpus[cpu_unit_idx].switches.addr_switches = 0100150;
631 cpus[cpu_unit_idx].tweaks.useMap = true;
632 cpus[cpu_unit_idx].tweaks.enable_wam = true;
633 cpus[cpu_unit_idx].switches.assignment [0] = 0;
634 cpus[cpu_unit_idx].switches.interlace [0] = false;
635 cpus[cpu_unit_idx].switches.enable [0] = false;
636 cpus[cpu_unit_idx].switches.init_enable [0] = false;
637 cpus[cpu_unit_idx].switches.store_size [0] = store_sz;
638
639 cpus[cpu_unit_idx].switches.assignment [1] = 0;
640 cpus[cpu_unit_idx].switches.interlace [1] = false;
641 cpus[cpu_unit_idx].switches.enable [1] = true;
642 cpus[cpu_unit_idx].switches.init_enable [1] = false;
643 cpus[cpu_unit_idx].switches.store_size [1] = store_sz;
644
645 cpus[cpu_unit_idx].switches.assignment [2] = 0;
646 cpus[cpu_unit_idx].switches.interlace [2] = false;
647 cpus[cpu_unit_idx].switches.enable [2] = false;
648 cpus[cpu_unit_idx].switches.init_enable [2] = false;
649 cpus[cpu_unit_idx].switches.store_size [2] = store_sz;
650
651 cpus[cpu_unit_idx].switches.assignment [3] = 0;
652 cpus[cpu_unit_idx].switches.interlace [3] = false;
653 cpus[cpu_unit_idx].switches.enable [3] = false;
654 cpus[cpu_unit_idx].switches.init_enable [3] = false;
655 cpus[cpu_unit_idx].switches.store_size [3] = store_sz;
656
657 if (cpus[cpu_unit_idx].tweaks.l68_mode) {
658 cpus[cpu_unit_idx].switches.assignment [4] = 0;
659 cpus[cpu_unit_idx].switches.interlace [4] = false;
660 cpus[cpu_unit_idx].switches.enable [4] = false;
661 cpus[cpu_unit_idx].switches.init_enable [4] = false;
662 cpus[cpu_unit_idx].switches.store_size [4] = 3;
663
664 cpus[cpu_unit_idx].switches.assignment [5] = 0;
665 cpus[cpu_unit_idx].switches.interlace [5] = false;
666 cpus[cpu_unit_idx].switches.enable [5] = false;
667 cpus[cpu_unit_idx].switches.init_enable [5] = false;
668 cpus[cpu_unit_idx].switches.store_size [5] = 3;
669
670 cpus[cpu_unit_idx].switches.assignment [6] = 0;
671 cpus[cpu_unit_idx].switches.interlace [6] = false;
672 cpus[cpu_unit_idx].switches.enable [6] = false;
673 cpus[cpu_unit_idx].switches.init_enable [6] = false;
674 cpus[cpu_unit_idx].switches.store_size [6] = 3;
675
676 cpus[cpu_unit_idx].switches.assignment [7] = 0;
677 cpus[cpu_unit_idx].switches.interlace [7] = false;
678 cpus[cpu_unit_idx].switches.enable [7] = false;
679 cpus[cpu_unit_idx].switches.init_enable [7] = false;
680 cpus[cpu_unit_idx].switches.store_size [7] = 3;
681 }
682 cpus[cpu_unit_idx].switches.enable [1] = true;
683
684 #if defined(THREADZ) || defined(LOCKLESS)
685 if (cpus[cpu_unit_idx].executing) {
686 cpus[cpu_unit_idx].forceRestart = true;
687 wakeCPU (cpu_unit_idx);
688 } else {
689 cpu_reset_unit_idx ((uint) cpu_unit_idx, false);
690
691 }
692 #else
693 cpu_reset_unit_idx ((uint) cpu_unit_idx, false);
694 simh_cpu_reset_and_clear_unit (cpu_unit + cpu_unit_idx, 0, NULL, NULL);
695 #endif
696
697 } else if (was && !v) {
698 cpus[cpu_unit_idx].switches = cpus[cpu_unit_idx].isolts_switches_save;
699
700 #if defined(THREADZ) || defined(LOCKLESS)
701 if (cpus[cpu_unit_idx].executing) {
702 cpus[cpu_unit_idx].forceRestart = true;
703 wakeCPU (cpu_unit_idx);
704 } else {
705 cpu_reset_unit_idx ((uint) cpu_unit_idx, false);
706
707 }
708 #else
709 cpu_reset_unit_idx ((uint) cpu_unit_idx, false);
710 simh_cpu_reset_and_clear_unit (cpu_unit + cpu_unit_idx, 0, NULL, NULL);
711 #endif
712
713 }
714 }
715 else if (strcmp (p, "nodis") == 0)
716 cpus[cpu_unit_idx].tweaks.nodis = v;
717 else if (strcmp (p, "l68_mode") == 0)
718 cpus[cpu_unit_idx].tweaks.l68_mode = v;
719 else
720 {
721 sim_warn ("error: cpu_set_config: Invalid cfg_parse rc <%ld>\r\n",
722 (long) rc);
723 cfg_parse_done (& cfg_state);
724 return SCPE_ARG;
725 }
726 }
727 cfg_parse_done (& cfg_state);
728
729 return SCPE_OK;
730 }
731
732 static t_stat cpu_show_nunits (UNUSED FILE * st, UNUSED UNIT * uptr,
733 UNUSED int val, UNUSED const void * desc)
734 {
735 sim_msg ("Number of CPUs in system is %d\r\n", cpu_dev.numunits);
736 return SCPE_OK;
737 }
738
739 static t_stat cpu_set_nunits (UNUSED UNIT * uptr, UNUSED int32 value,
740 const char * cptr, UNUSED void * desc)
741 {
742 if (! cptr)
743 return SCPE_ARG;
744 int n = atoi (cptr);
745 if (n < 1 || n > N_CPU_UNITS_MAX)
746 return SCPE_ARG;
747 cpu_dev.numunits = (uint32) n;
748 return SCPE_OK;
749 }
750
751 static t_stat cpu_show_kips (UNUSED FILE * st, UNUSED UNIT * uptr,
752 UNUSED int val, UNUSED const void * desc)
753 {
754 sim_msg ("CPU KIPS %lu\r\n", (unsigned long)kips);
755 return SCPE_OK;
756 }
757
758 static t_stat cpu_set_kips (UNUSED UNIT * uptr, UNUSED int32 value,
759 const char * cptr, UNUSED void * desc)
760 {
761 if (! cptr)
762 return SCPE_ARG;
763 long n = atol (cptr);
764 if (n < 1 || n > 4000000)
765 return SCPE_ARG;
766 kips = (uint64) n;
767 luf_limits[0] = 2000*kips/1000;
768 luf_limits[1] = 4000*kips/1000;
769 luf_limits[2] = 8000*kips/1000;
770 luf_limits[3] = 16000*kips/1000;
771 luf_limits[4] = 32000*kips/1000;
772 return SCPE_OK;
773 }
774
775 static t_stat cpu_show_stall (UNUSED FILE * st, UNUSED UNIT * uptr,
776 UNUSED int val, UNUSED const void * desc)
777 {
778 if (! stall_point_active)
779 {
780 sim_printf ("No stall points\r\n");
781 return SCPE_OK;
782 }
783
784 sim_printf ("Stall points\r\n");
785 for (int i = 0; i < N_STALL_POINTS; i ++)
786 if (stall_points[i].segno || stall_points[i].offset)
787 {
788 #if defined(WIN_STDIO)
789 sim_printf ("%2ld %05o:%06o %10lu\r\n",
790 #else
791 sim_printf ("%2ld %05o:%06o %'10lu\r\n",
792 #endif
793 (long)i, stall_points[i].segno, stall_points[i].offset,
794 (unsigned long)stall_points[i].time);
795 }
796 return SCPE_OK;
797 }
798
799
800
801
802
803
804
805 static t_stat cpu_set_stall (UNUSED UNIT * uptr, UNUSED int32 value,
806 const char * cptr, UNUSED void * desc)
807 {
808 if (! cptr)
809 return SCPE_ARG;
810
811 long n, s, o, t;
812
813 char * end;
814 n = strtol (cptr, & end, 0);
815 if (* end != '=')
816 return SCPE_ARG;
817 if (n < 0 || n >= N_STALL_POINTS)
818 return SCPE_ARG;
819
820 s = strtol (end + 1, & end, 8);
821 if (* end != ':')
822 return SCPE_ARG;
823 if (s < 0 || s > MASK15)
824 return SCPE_ARG;
825
826 o = strtol (end + 1, & end, 8);
827 if (* end != '=')
828 return SCPE_ARG;
829 if (o < 0 || o > MASK18)
830 return SCPE_ARG;
831
832 t = strtol (end + 1, & end, 0);
833 if (* end != 0)
834 return SCPE_ARG;
835 if (t < 0 || t > 30000000)
836 return SCPE_ARG;
837
838 stall_points[n].segno = (word15) s;
839 stall_points[n].offset = (word18) o;
840 stall_points[n].time = (unsigned int) t;
841 stall_point_active = false;
842
843 for (int i = 0; i < N_STALL_POINTS; i ++)
844 if (stall_points[n].segno && stall_points[n].offset)
845 stall_point_active = true;
846
847 return SCPE_OK;
848 }
849
850 static t_stat setCPUConfigL68 (UNIT * uptr, UNUSED int32 value, UNUSED const char * cptr, UNUSED void * desc) {
851 long cpuUnitIdx = UNIT_IDX (uptr);
852 if (cpuUnitIdx < 0 || cpuUnitIdx >= N_CPU_UNITS_MAX)
853 return SCPE_ARG;
854 cpu_state_t * cpun = cpus + cpuUnitIdx;
855
856 cpun->tweaks.l68_mode = 1;
857 cpun->options.hex_mode_installed = 0;
858 for (uint port_num = 0; port_num < N_DPS8M_CPU_PORTS; port_num ++) {
859 cpun->switches.assignment[port_num] = port_num;
860 cpun->switches.interlace[port_num] = 0;
861 cpun->switches.store_size[port_num] = 2;
862 cpun->switches.enable[port_num] = 1;
863 cpun->switches.init_enable[port_num] = 1;
864 }
865 for (uint port_num = N_DPS8M_CPU_PORTS; port_num < N_L68_CPU_PORTS; port_num ++) {
866 cpun->switches.assignment[port_num] = 0;
867 cpun->switches.interlace[port_num] = 0;
868 cpun->switches.store_size[port_num] = 0;
869 cpun->switches.enable[port_num] = 0;
870 cpun->switches.init_enable[port_num] = 0;
871 }
872 return SCPE_OK;
873 }
874
875 static t_stat setCPUConfigDPS8M (UNIT * uptr, UNUSED int32 value, UNUSED const char * cptr, UNUSED void * desc) {
876 long cpuUnitIdx = UNIT_IDX (uptr);
877 if (cpuUnitIdx < 0 || cpuUnitIdx >= N_CPU_UNITS_MAX)
878 return SCPE_ARG;
879 cpu_state_t * cpun = cpus + cpuUnitIdx;
880
881 cpun->tweaks.l68_mode = 0;
882 cpun->options.hex_mode_installed = 0;
883 for (uint port_num = 0; port_num < N_DPS8M_CPU_PORTS; port_num ++) {
884 cpun->switches.assignment[port_num] = port_num;
885 cpun->switches.interlace[port_num] = 0;
886 cpun->switches.store_size[port_num] = 7;
887 cpun->switches.enable[port_num] = 1;
888 cpun->switches.init_enable[port_num] = 1;
889 }
890 for (uint port_num = N_DPS8M_CPU_PORTS; port_num < N_L68_CPU_PORTS; port_num ++) {
891 cpun->switches.assignment[port_num] = 0;
892 cpun->switches.interlace[port_num] = 0;
893 cpun->switches.store_size[port_num] = 0;
894 cpun->switches.enable[port_num] = 0;
895 cpun->switches.init_enable[port_num] = 0;
896 }
897 return SCPE_OK;
898 }
899
900 char * cycle_str (cycles_e cycle)
901 {
902 switch (cycle)
903 {
904
905
906 case FAULT_cycle:
907 return "FAULT_cycle";
908 case EXEC_cycle:
909 return "EXEC_cycle";
910 case FAULT_EXEC_cycle:
911 return "FAULT_EXEC_cycle";
912 case INTERRUPT_cycle:
913 return "INTERRUPT_cycle";
914 case INTERRUPT_EXEC_cycle:
915 return "INTERRUPT_EXEC_cycle";
916 case FETCH_cycle:
917 return "FETCH_cycle";
918 case PSEUDO_FETCH_cycle:
919 return "PSEUDO_FETCH_cycle";
920 case SYNC_FAULT_RTN_cycle:
921 return "SYNC_FAULT_RTN_cycle";
922 default:
923 return "unknown cycle";
924 }
925 }
926
927 static void set_cpu_cycle (cpu_state_t * cpup, cycles_e cycle)
928 {
929 sim_debug (DBG_CYCLE, & cpu_dev, "Setting cycle to %s\r\n",
930 cycle_str (cycle));
931 cpu.cycle = cycle;
932 }
933
934
935
936 #define MEM_UNINITIALIZED (1LLU<<62)
937
938 uint set_cpu_idx (UNUSED uint cpu_idx)
939 {
940 uint prev = current_running_cpu_idx;
941 #if defined(THREADZ) || defined(LOCKLESS)
942 current_running_cpu_idx = cpu_idx;
943 #endif
944 #if defined(ROUND_ROBIN)
945 current_running_cpu_idx = cpu_idx;
946 #endif
947 _cpup = & cpus [current_running_cpu_idx];
948 return prev;
949 }
950
951 void cpu_reset_unit_idx (UNUSED uint cpun, bool clear_mem)
952 {
953 uint save = set_cpu_idx (cpun);
954 cpu_state_t * cpup = _cpup;
955 if (clear_mem)
956 {
957 for (uint i = 0; i < MEMSIZE; i ++)
958 {
959
960 #if defined(LOCKLESS)
961 M[i] = (M[i] & ~(MASK36 | MEM_LOCKED)) | MEM_UNINITIALIZED;
962 #else
963 M[i] = (M[i] & ~(MASK36)) | MEM_UNINITIALIZED;
964 #endif
965 }
966 }
967 cpu.rA = 0;
968 cpu.rQ = 0;
969
970 cpu.PPR.IC = 0;
971 cpu.PPR.PRR = 0;
972 cpu.PPR.PSR = 0;
973 cpu.PPR.P = 1;
974 cpu.RSDWH_R1 = 0;
975 cpu.rTR = MASK27;
976
977 if (cpu.tweaks.isolts_mode)
978 {
979 cpu.shadowTR = 0;
980 cpu.rTRlsb = 0;
981 }
982 cpu.rTRticks = 0;
983
984 set_addr_mode (cpup, ABSOLUTE_mode);
985 SET_I_NBAR;
986
987 cpu.CMR.luf = 3;
988 cpu.cu.SD_ON = cpu.switches.sdwam_enable ? 1 : 0;
989 cpu.cu.PT_ON = cpu.switches.ptwam_enable ? 1 : 0;
990
991 if (cpu.tweaks.nodis) {
992 set_cpu_cycle (cpup, FETCH_cycle);
993 } else {
994 set_cpu_cycle (cpup, EXEC_cycle);
995 cpu.cu.IWB = 0000000616200;
996 }
997 #if defined(PERF_STRIP)
998 set_cpu_cycle (cpup, FETCH_cycle);
999 #endif
1000 cpu.wasXfer = false;
1001 cpu.wasInhibited = false;
1002
1003 cpu.interrupt_flag = false;
1004 cpu.g7_flag = false;
1005
1006 cpu.faultRegister [0] = 0;
1007 cpu.faultRegister [1] = 0;
1008
1009 #if defined(RAPRx)
1010 cpu.apu.lastCycle = UNKNOWN_CYCLE;
1011 #endif
1012
1013 (void)memset (& cpu.PPR, 0, sizeof (struct ppr_s));
1014
1015 setup_scbank_map (cpup);
1016
1017 tidy_cu (cpup);
1018 set_cpu_idx (save);
1019 }
1020
1021 static t_stat simh_cpu_reset_and_clear_unit (UNIT * uptr,
1022 UNUSED int32 value,
1023 UNUSED const char * cptr,
1024 UNUSED void * desc)
1025 {
1026 long cpu_unit_idx = UNIT_IDX (uptr);
1027 cpu_state_t * cpun = cpus + cpu_unit_idx;
1028 if (cpun->tweaks.isolts_mode)
1029 {
1030
1031 if (cpun->tweaks.useMap)
1032 {
1033 for (uint pgnum = 0; pgnum < N_SCBANKS; pgnum ++)
1034 {
1035 int base = cpun->sc_addr_map [pgnum];
1036 if (base < 0)
1037 continue;
1038 for (uint addr = 0; addr < SCBANK_SZ; addr ++)
1039 M [addr + (uint) base] = MEM_UNINITIALIZED;
1040 }
1041 }
1042 }
1043
1044 cpu_reset_unit_idx ((uint) cpu_unit_idx, false);
1045 return SCPE_OK;
1046 }
1047
1048 static t_stat simh_cpu_reset_unit (UNIT * uptr,
1049 UNUSED int32 value,
1050 UNUSED const char * cptr,
1051 UNUSED void * desc)
1052 {
1053 long cpu_unit_idx = UNIT_IDX (uptr);
1054 cpu_reset_unit_idx ((uint) cpu_unit_idx, false);
1055 return SCPE_OK;
1056 }
1057
1058 #if !defined(PERF_STRIP)
1059 static uv_loop_t * ev_poll_loop;
1060 static uv_timer_t ev_poll_handle;
1061 #endif
1062
1063 static MTAB cpu_mod[] =
1064 {
1065 {
1066 MTAB_unit_value,
1067 0,
1068 "CONFIG",
1069 "CONFIG",
1070 cpu_set_config,
1071 cpu_show_config,
1072 NULL,
1073 NULL
1074 },
1075
1076
1077
1078 {
1079 MTAB_unit_value,
1080 0,
1081 "RESET",
1082 "RESET",
1083 simh_cpu_reset_unit,
1084 NULL,
1085 NULL,
1086 NULL
1087 },
1088
1089 {
1090 MTAB_unit_value,
1091 0,
1092 "INITIALIZE",
1093 "INITIALIZE",
1094 simh_cpu_reset_unit,
1095 NULL,
1096 NULL,
1097 NULL
1098 },
1099
1100
1101
1102 {
1103 MTAB_unit_value,
1104 0,
1105 "INITIALIZEANDCLEAR",
1106 "INITIALIZEANDCLEAR",
1107 simh_cpu_reset_and_clear_unit,
1108 NULL,
1109 NULL,
1110 NULL
1111 },
1112
1113 {
1114 MTAB_unit_value,
1115 0,
1116 "IAC",
1117 "IAC",
1118 simh_cpu_reset_and_clear_unit,
1119 NULL,
1120 NULL,
1121 NULL
1122 },
1123
1124 {
1125 MTAB_dev_value,
1126 0,
1127 "NUNITS",
1128 "NUNITS",
1129 cpu_set_nunits,
1130 cpu_show_nunits,
1131 NULL,
1132 NULL
1133 },
1134
1135 {
1136 MTAB_dev_value,
1137 0,
1138 "KIPS",
1139 "KIPS",
1140 cpu_set_kips,
1141 cpu_show_kips,
1142 NULL,
1143 NULL
1144 },
1145
1146 {
1147 MTAB_dev_value,
1148 0,
1149 "STALL",
1150 "STALL",
1151 cpu_set_stall,
1152 cpu_show_stall,
1153 NULL,
1154 NULL
1155 },
1156
1157 {
1158 MTAB_unit_value,
1159 0,
1160 "DPS8M",
1161 "DPS8M",
1162 setCPUConfigDPS8M,
1163 NULL,
1164 NULL,
1165 NULL
1166 },
1167
1168 {
1169 MTAB_unit_value,
1170 0,
1171 "L68",
1172 "L68",
1173 setCPUConfigL68,
1174 NULL,
1175 NULL,
1176 NULL
1177 },
1178
1179 { 0, 0, NULL, NULL, NULL, NULL, NULL, NULL }
1180 };
1181
1182 static DEBTAB cpu_dt[] =
1183 {
1184 { "TRACE", DBG_TRACE, NULL },
1185 { "TRACEEXT", DBG_TRACEEXT, NULL },
1186 { "MESSAGES", DBG_MSG, NULL },
1187
1188 { "REGDUMPAQI", DBG_REGDUMPAQI, NULL },
1189 { "REGDUMPIDX", DBG_REGDUMPIDX, NULL },
1190 { "REGDUMPPR", DBG_REGDUMPPR, NULL },
1191 { "REGDUMPPPR", DBG_REGDUMPPPR, NULL },
1192 { "REGDUMPDSBR", DBG_REGDUMPDSBR, NULL },
1193 { "REGDUMPFLT", DBG_REGDUMPFLT, NULL },
1194 { "REGDUMP", DBG_REGDUMP, NULL },
1195
1196 { "ADDRMOD", DBG_ADDRMOD, NULL },
1197 { "APPENDING", DBG_APPENDING, NULL },
1198
1199 { "NOTIFY", DBG_NOTIFY, NULL },
1200 { "INFO", DBG_INFO, NULL },
1201 { "ERR", DBG_ERR, NULL },
1202 { "WARN", DBG_WARN, NULL },
1203 { "DEBUG", DBG_DEBUG, NULL },
1204 { "ALL", DBG_ALL, NULL },
1205
1206 { "FAULT", DBG_FAULT, NULL },
1207 { "INTR", DBG_INTR, NULL },
1208 { "CORE", DBG_CORE, NULL },
1209 { "CYCLE", DBG_CYCLE, NULL },
1210 { "CAC", DBG_CAC, NULL },
1211 { "FINAL", DBG_FINAL, NULL },
1212 { "AVC", DBG_AVC, NULL },
1213 { NULL, 0, NULL }
1214 };
1215
1216
1217 const char *sim_stop_messages[] =
1218 {
1219 "Unknown error",
1220 "Simulation stop",
1221 "Breakpoint",
1222 };
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252 #if !defined(SPEED)
1253 static bool watch_bits [MEMSIZE];
1254 #endif
1255
1256 char * str_SDW0 (char * buf, sdw0_s * SDW)
1257 {
1258 (void)sprintf (buf, "ADDR=%06o R1=%o R2=%o R3=%o F=%o FC=%o BOUND=%o R=%o "
1259 "E=%o W=%o P=%o U=%o G=%o C=%o EB=%o",
1260 SDW->ADDR, SDW->R1, SDW->R2, SDW->R3, SDW->DF,
1261 SDW->FC, SDW->BOUND, SDW->R, SDW->E, SDW->W,
1262 SDW->P, SDW->U, SDW->G, SDW->C, SDW->EB);
1263 return buf;
1264 }
1265
1266 static t_stat cpu_boot (UNUSED int32 cpu_unit_idx, UNUSED DEVICE * dptr)
1267 {
1268 sim_warn ("Try 'BOOT IOMn'\r\n");
1269 return SCPE_ARG;
1270 }
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294 #define ZONE_SZ (MEM_SIZE_MAX / 4)
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308 void setup_scbank_map (cpu_state_t * cpup)
1309 {
1310
1311 for (uint pg = 0; pg < N_SCBANKS; pg ++)
1312 {
1313 cpu.sc_addr_map [pg] = -1;
1314 cpu.sc_scu_map [pg] = -1;
1315 }
1316 for (uint u = 0; u < N_SCU_UNITS_MAX; u ++)
1317 cpu.sc_num_banks[u] = 0;
1318
1319
1320 for (int port_num = 0; port_num < (cpu.tweaks.l68_mode ? N_L68_CPU_PORTS : N_DPS8M_CPU_PORTS); port_num ++)
1321 {
1322
1323 if (! cpu.switches.enable [port_num])
1324 continue;
1325
1326
1327
1328
1329 if (! cables->cpu_to_scu[current_running_cpu_idx][port_num].in_use)
1330 {
1331 continue;
1332 }
1333
1334
1335 uint store_size = cpu.switches.store_size [port_num];
1336 uint dps8m_store_table [8] =
1337 { 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304 };
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352 uint l68_store_table [8] =
1353 { 32768, 65536, 4194304, 131072, 524288, 1048576, 2097152, 262144 };
1354 uint l68_isolts_store_table [8] =
1355 { 32768, 65536, 4194304, 65536, 524288, 1048576, 2097152, 262144 };
1356
1357 uint sz_wds =
1358 cpu.tweaks.l68_mode ?
1359 cpu.tweaks.isolts_mode ?
1360 l68_isolts_store_table [store_size] :
1361 l68_store_table [store_size] :
1362 dps8m_store_table [store_size];
1363
1364
1365 uint base_addr_wds = sz_wds * cpu.switches.assignment[port_num];
1366
1367
1368 uint num_banks = sz_wds / SCBANK_SZ;
1369 cpu.sc_num_banks[port_num] = num_banks;
1370 uint base_addr_bks = base_addr_wds / SCBANK_SZ;
1371
1372
1373 for (uint pg = 0; pg < num_banks; pg ++)
1374 {
1375
1376 uint addr_bks = base_addr_bks + pg;
1377
1378 if (addr_bks < N_SCBANKS)
1379 {
1380
1381 if (cpu.sc_addr_map [addr_bks] != -1)
1382 {
1383 sim_warn ("scbank overlap addr_bks %d (%o) old port %d "
1384 "newport %d\r\n",
1385 addr_bks, addr_bks, cpu.sc_addr_map [addr_bks], port_num);
1386 }
1387 else
1388 {
1389
1390 cpu.sc_addr_map[addr_bks] = (int)((int)port_num * (int)ZONE_SZ + (int)pg * (int)SCBANK_SZ);
1391 cpu.sc_scu_map[addr_bks] = port_num;
1392 }
1393 }
1394 else
1395 {
1396 sim_warn ("addr_bks too big port %d addr_bks %d (%o), "
1397 "limit %d (%o)\r\n",
1398 port_num, addr_bks, addr_bks, N_SCBANKS, N_SCBANKS);
1399 }
1400 }
1401
1402 }
1403
1404
1405
1406 }
1407
1408 int lookup_cpu_mem_map (cpu_state_t * cpup, word24 addr)
1409 {
1410 uint scpg = addr / SCBANK_SZ;
1411 if (scpg < N_SCBANKS)
1412 {
1413 return cpu.sc_scu_map[scpg];
1414 }
1415 return -1;
1416 }
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426 #if !defined(PERF_STRIP)
1427 static void get_serial_number (cpu_state_t * cpup)
1428 {
1429 bool havesn = false;
1430 FILE * fp = fopen ("./serial.txt", "r");
1431 while (fp && ! feof (fp))
1432 {
1433 char buffer [81] = "";
1434 # if !defined(__clang_analyzer__)
1435 char * checksn = fgets (buffer, sizeof (buffer), fp);
1436 (void)checksn;
1437 # endif
1438 uint cpun, sn;
1439 if (sscanf (buffer, "sn: %u", & sn) == 1)
1440 {
1441 if (cpu.switches.serno)
1442 sim_msg ("\r\nReplacing CPU serial number:\r\n");
1443 cpu.switches.serno = sn;
1444 if (!sim_quiet)
1445 {
1446 sim_msg ("%s CPU serial number: %u\r\n", sim_name, cpu.switches.serno);
1447 }
1448 havesn = true;
1449 }
1450 else if (sscanf (buffer, "sn%u: %u", & cpun, & sn) == 2)
1451 {
1452 if (cpun < N_CPU_UNITS_MAX)
1453 {
1454 if (cpus[cpun].switches.serno)
1455 sim_msg ("\r\nReplacing CPU %u serial number:\r\n", cpun);
1456 cpus[cpun].switches.serno = sn;
1457 if (!sim_quiet)
1458 {
1459 sim_msg ("%s CPU %u serial number: %u\r\n",
1460 sim_name, cpun, cpus[cpun].switches.serno);
1461 }
1462 havesn = true;
1463 }
1464 }
1465 }
1466 if (!havesn)
1467 {
1468 if (!sim_quiet)
1469 {
1470 sim_msg ("\r\nPlease register your system at "
1471 "https://ringzero.wikidot.com/wiki:register\r\n");
1472 sim_msg ("or create the file 'serial.txt' containing the line "
1473 "'sn: 0'.\r\n\r\n");
1474 }
1475 }
1476 if (fp)
1477 fclose (fp);
1478 }
1479 #endif
1480
1481 #if defined(STATS)
1482 static void do_stats (void)
1483 {
1484 static struct timespec stats_time;
1485 static bool first = true;
1486 if (first)
1487 {
1488 first = false;
1489 clock_gettime (CLOCK_BOOTTIME, & stats_time);
1490 sim_msg ("stats started\r\n");
1491 }
1492 else
1493 {
1494 struct timespec now, delta;
1495 clock_gettime (CLOCK_BOOTTIME, & now);
1496 timespec_diff (& stats_time, & now, & delta);
1497 stats_time = now;
1498 sim_msg ("stats %6ld.%02ld\r\n", delta.tv_sec,
1499 delta.tv_nsec / 10000000);
1500
1501 sim_msg ("Instruction counts\r\n");
1502 for (uint i = 0; i < 8; i ++)
1503 {
1504 # if defined(WIN_STDIO)
1505 sim_msg (" %9lld\r\n", (long long int) cpus[i].instrCnt);
1506 # else
1507 sim_msg (" %'9lld\r\n", (long long int) cpus[i].instrCnt);
1508 # endif
1509 cpus[i].instrCnt = 0;
1510 }
1511 sim_msg ("\r\n");
1512 }
1513 }
1514 #endif
1515
1516
1517
1518 #if !defined(PERF_STRIP)
1519 static void ev_poll_cb (UNUSED uv_timer_t * handle)
1520 {
1521 cpu_state_t * cpup = _cpup;
1522
1523
1524 static uint oneHz = 0;
1525 if (oneHz ++ >= sys_opts.sys_slow_poll_interval)
1526 {
1527 oneHz = 0;
1528 rdrProcessEvent ();
1529 # if defined(STATS)
1530 do_stats ();
1531 # endif
1532 cpu.instrCntT0 = cpu.instrCntT1;
1533 cpu.instrCntT1 = cpu.instrCnt;
1534 }
1535 fnpProcessEvent ();
1536 # if defined(WITH_SOCKET_DEV)
1537 # if !defined(__MINGW64__) && !defined(__MINGW32__) && !defined(CROSS_MINGW32) && !defined(CROSS_MINGW64)
1538 sk_process_event ();
1539 # endif
1540 # endif
1541 consoleProcess ();
1542 # if defined(IO_ASYNC_PAYLOAD_CHAN)
1543 iomProcess ();
1544 # endif
1545 # if defined(WITH_ABSI_DEV)
1546 # if !defined(__MINGW32__) && !defined(__MINGW64__) && !defined(CROSS_MINGW32) && !defined(CROSS_MINGW64)
1547 absi_process_event ();
1548 # endif
1549 # endif
1550 # if defined(WITH_MGP_DEV)
1551 # if !defined(__MINGW32__) && !defined(__MINGW64__) && !defined(CROSS_MINGW32) && !defined(CROSS_MINGW64)
1552 mgp_process_event ();
1553 # endif
1554 # endif
1555 PNL (panel_process_event ());
1556 }
1557 #endif
1558
1559
1560
1561 void cpu_init (void)
1562 {
1563
1564
1565
1566 M = system_state->M;
1567 #if defined(M_SHARED)
1568 cpus = system_state->cpus;
1569 #endif
1570
1571 #if !defined(SPEED)
1572 (void)memset (& watch_bits, 0, sizeof (watch_bits));
1573 #endif
1574
1575 set_cpu_idx (0);
1576
1577 (void)memset (cpus, 0, sizeof (cpu_state_t) * N_CPU_UNITS_MAX);
1578
1579 #if !defined(PERF_STRIP)
1580 get_serial_number (_cpup);
1581
1582 ev_poll_loop = uv_default_loop ();
1583 uv_timer_init (ev_poll_loop, & ev_poll_handle);
1584
1585 uv_timer_start (& ev_poll_handle, ev_poll_cb, sys_opts.sys_poll_interval, sys_opts.sys_poll_interval);
1586 #endif
1587
1588
1589 cpu_state_t * cpup = _cpup;
1590
1591 cpu.instrCnt = 0;
1592 cpu.cycleCnt = 0;
1593 for (int i = 0; i < N_FAULTS; i ++)
1594 cpu.faultCnt [i] = 0;
1595
1596 #if defined(MATRIX)
1597 initializeTheMatrix ();
1598 #endif
1599 }
1600
1601 static void cpu_reset (void)
1602 {
1603 for (uint i = 0; i < N_CPU_UNITS_MAX; i ++)
1604 {
1605 cpu_reset_unit_idx (i, true);
1606 }
1607
1608 set_cpu_idx (0);
1609
1610 #if defined(TESTING)
1611 cpu_state_t * cpup = _cpup;
1612 sim_debug (DBG_INFO, & cpu_dev, "CPU reset: Running\r\n");
1613 #endif
1614 }
1615
1616 static t_stat sim_cpu_reset (UNUSED DEVICE *dptr)
1617 {
1618
1619
1620
1621
1622
1623 cpu_reset ();
1624 return SCPE_OK;
1625 }
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635 static t_stat cpu_ex (t_value *vptr, t_addr addr, UNUSED UNIT * uptr,
1636 UNUSED int32 sw)
1637 {
1638 if (addr>= MEMSIZE)
1639 return SCPE_NXM;
1640 if (vptr != NULL)
1641 {
1642 *vptr = M[addr] & DMASK;
1643 }
1644 return SCPE_OK;
1645 }
1646
1647
1648
1649 static t_stat cpu_dep (t_value val, t_addr addr, UNUSED UNIT * uptr,
1650 UNUSED int32 sw)
1651 {
1652 if (addr >= MEMSIZE) return SCPE_NXM;
1653 M[addr] = val & DMASK;
1654 return SCPE_OK;
1655 }
1656
1657
1658
1659
1660
1661 #if defined(M_SHARED)
1662
1663 static word18 dummy_IC;
1664 #endif
1665
1666 static REG cpu_reg[] =
1667 {
1668
1669 #if defined(M_SHARED)
1670 { ORDATA (IC, dummy_IC, VASIZE), 0, 0, 0 },
1671 #else
1672 { ORDATA (IC, cpus[0].PPR.IC, VASIZE), 0, 0, 0 },
1673 #endif
1674 { NULL, NULL, 0, 0, 0, 0, NULL, NULL, 0, 0, 0 }
1675 };
1676
1677
1678
1679
1680
1681 REG *sim_PC = & cpu_reg[0];
1682
1683
1684
1685 DEVICE cpu_dev =
1686 {
1687 "CPU",
1688 cpu_unit,
1689 cpu_reg,
1690 cpu_mod,
1691 N_CPU_UNITS,
1692 8,
1693 PASIZE,
1694 1,
1695 8,
1696 36,
1697 & cpu_ex,
1698 & cpu_dep,
1699 & sim_cpu_reset,
1700 & cpu_boot,
1701 NULL,
1702 NULL,
1703 NULL,
1704 DEV_DEBUG,
1705 0,
1706 cpu_dt,
1707 NULL,
1708 NULL,
1709 NULL,
1710 NULL,
1711 NULL,
1712 NULL,
1713 NULL
1714 };
1715
1716 #if defined(M_SHARED)
1717 cpu_state_t * cpus = NULL;
1718 #else
1719 cpu_state_t cpus [N_CPU_UNITS_MAX];
1720 #endif
1721 #if defined(THREADZ) || defined(LOCKLESS)
1722 __thread cpu_state_t * restrict _cpup;
1723 #else
1724 cpu_state_t * restrict _cpup;
1725 #endif
1726 #if defined(ROUND_ROBIN)
1727 uint current_running_cpu_idx;
1728 #endif
1729
1730
1731
1732
1733
1734
1735
1736 static uint get_highest_intr (cpu_state_t *cpup)
1737 {
1738 uint fp = 1;
1739 for (uint scu_unit_idx = 0; scu_unit_idx < N_SCU_UNITS_MAX; scu_unit_idx ++)
1740 {
1741 if (cpu.events.XIP [scu_unit_idx])
1742 {
1743 fp = scu_get_highest_intr (scu_unit_idx);
1744 if (fp != 1)
1745 break;
1746 }
1747 }
1748 return fp;
1749 }
1750
1751 bool sample_interrupts (cpu_state_t * cpup)
1752 {
1753 cpu.lufCounter = 0;
1754 for (uint scu_unit_idx = 0; scu_unit_idx < N_SCU_UNITS_MAX; scu_unit_idx ++)
1755 {
1756 if (cpu.events.XIP [scu_unit_idx])
1757 {
1758 return true;
1759 }
1760 }
1761 return false;
1762 }
1763
1764 t_stat simh_hooks (cpu_state_t * cpup)
1765 {
1766 int reason = 0;
1767
1768 if (breakEnable && stop_cpu)
1769 return STOP_STOP;
1770
1771 if (cpu.tweaks.isolts_mode == 0)
1772 {
1773
1774 if (sim_interval <= 0)
1775 {
1776 reason = sim_process_event ();
1777 if ((! breakEnable) && reason == SCPE_STOP)
1778 reason = SCPE_OK;
1779 if (reason)
1780 return reason;
1781 }
1782 }
1783
1784 sim_interval --;
1785
1786 #if !defined(THREADZ) && !defined(LOCKLESS)
1787
1788
1789
1790 if (sim_brk_summ &&
1791 sim_brk_test ((cpu.PPR.IC & 0777777) |
1792 ((((t_addr) cpu.PPR.PSR) & 037777) << 18),
1793 SWMASK ('E')))
1794 return STOP_BKPT;
1795 # if !defined(SPEED)
1796 if (sim_deb_break && cpu.cycleCnt >= sim_deb_break)
1797 return STOP_BKPT;
1798 # endif
1799 #endif
1800
1801 return reason;
1802 }
1803
1804 #if defined(PANEL68)
1805 static void panel_process_event (void)
1806 {
1807 cpu_state_t * cpup = _cpup;
1808
1809 if (cpu.panelInitialize && cpu.DATA_panel_s_trig_sw == 0)
1810 {
1811
1812 while (cpu.panelInitialize)
1813 ;
1814 if (cpu.DATA_panel_init_sw)
1815 cpu_reset_unit_idx (ASSUME0, true);
1816 else
1817 cpu_reset_unit_idx (ASSUME0, false);
1818
1819 do_boot ();
1820 }
1821
1822 if (cpu.DATA_panel_s_trig_sw == 0 &&
1823 cpu.DATA_panel_execute_sw &&
1824 cpu.DATA_panel_scope_sw &&
1825 cpu.DATA_panel_exec_sw == 0)
1826
1827 {
1828
1829 while (cpu.DATA_panel_execute_sw)
1830 ;
1831
1832 if (cpu.DATA_panel_exec_sw)
1833 {
1834 cpu_reset_unit_idx (ASSUME0, false);
1835 cpu.cu.IWB = cpu.switches.data_switches;
1836 set_cpu_cycle (cpup, EXEC_cycle);
1837 }
1838 else
1839 {
1840 setG7fault (current_running_cpu_idx, FAULT_EXF);
1841 }
1842 }
1843 }
1844 #endif
1845
1846 #if defined(THREADZ) || defined(LOCKLESS)
1847 bool bce_dis_called = false;
1848
1849
1850 t_stat sim_instr (void)
1851 {
1852 cpu_state_t * cpup = _cpup;
1853 t_stat reason = 0;
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899 if (cpuThreadz[0].run == false)
1900 createCPUThread (0);
1901 do
1902 {
1903
1904 reason = simh_hooks (cpup);
1905 if (reason)
1906 {
1907 break;
1908 }
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936 if (bce_dis_called) {
1937
1938 reason = STOP_STOP;
1939 break;
1940 }
1941
1942 # if !defined(PERF_STRIP)
1943
1944
1945 # if defined(LOCKLESS)
1946 lock_iom();
1947 # endif
1948 lock_libuv ();
1949 uv_run (ev_poll_loop, UV_RUN_NOWAIT);
1950 unlock_libuv ();
1951 # if defined(LOCKLESS)
1952 unlock_iom();
1953 # endif
1954 PNL (panel_process_event ());
1955
1956 int con_unit_idx = check_attn_key ();
1957 if (con_unit_idx != -1)
1958 console_attn_idx (con_unit_idx);
1959 # endif
1960
1961 # if defined(IO_ASYNC_PAYLOAD_CHAN_THREAD)
1962 struct timespec next_time;
1963 clock_gettime (CLOCK_REALTIME, & next_time);
1964 next_time.tv_nsec += 1000l * 1000l;
1965 if (next_time.tv_nsec >= 1000l * 1000l *1000l)
1966 {
1967 next_time.tv_nsec -= 1000l * 1000l *1000l;
1968 next_time.tv_sec += (time_t) 1;
1969 }
1970 struct timespec new_time;
1971 do
1972 {
1973 pthread_mutex_lock (& iom_start_lock);
1974 pthread_cond_timedwait (& iomCond,
1975 & iom_start_lock,
1976 & next_time);
1977 pthread_mutex_unlock (& iom_start_lock);
1978 lock_iom();
1979 lock_libuv ();
1980
1981 iomProcess ();
1982
1983 unlock_libuv ();
1984 unlock_iom ();
1985
1986 clock_gettime (CLOCK_REALTIME, & new_time);
1987 }
1988 while ((next_time.tv_sec == new_time.tv_sec) ? (next_time.tv_nsec > new_time.tv_nsec) : \
1989 (next_time.tv_sec > new_time.tv_sec));
1990 # else
1991 sim_usleep (1000);
1992 # endif
1993 }
1994 while (reason == 0);
1995
1996 for (uint cpuNo = 0; cpuNo < N_CPU_UNITS_MAX; cpuNo ++) {
1997 cpuStats (cpuNo);
1998 }
1999
2000 # if defined(TESTING)
2001 HDBGPrint ();
2002 # endif
2003 return reason;
2004 }
2005 #endif
2006
2007 #if !defined(THREADZ) && !defined(LOCKLESS)
2008 static uint fast_queue_subsample = 0;
2009 #endif
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059 #if defined(THREADZ) || defined(LOCKLESS)
2060 void * cpu_thread_main (void * arg)
2061 {
2062 int myid = * (int *) arg;
2063 set_cpu_idx ((uint) myid);
2064 unsigned char umyid = (unsigned char)toupper('a' + (int)myid);
2065 char thread_name[SIR_MAXPID] = {0};
2066 char temp_thread_name[SIR_MAXPID] = {0};
2067
2068 _cpup->thread_id = pthread_self();
2069
2070 if (realtime_ok) {
2071 set_realtime_priority (pthread_self(), realtime_max_priority() - 1);
2072 check_realtime_priority (pthread_self(), realtime_max_priority() - 1);
2073 } else {
2074 # if !defined(__QNX__)
2075 (void)sim_os_set_thread_priority (PRIORITY_ABOVE_NORMAL);
2076 # endif
2077 }
2078 _sir_snprintf_trunc(thread_name, SIR_MAXPID, "CPU %c", (unsigned int)umyid);
2079 if (!_sir_setthreadname(thread_name) || !_sir_getthreadname(temp_thread_name))
2080 (void)sir_info ("%s thread created (TID " SIR_TIDFORMAT ")",
2081 thread_name, PID_CAST _sir_gettid());
2082 else
2083 (void)sir_info ("Thread created (TID " SIR_TIDFORMAT ")",
2084 PID_CAST _sir_gettid());
2085 # if defined(TESTING) && defined(__APPLE__) && defined(__MACH__)
2086 (void)sir_info ("Mach thread ID: 0x%x", pthread_mach_thread_np(pthread_self()));
2087 # endif
2088 bool warned = false;
2089 if (realtime_ok) {
2090 if (myid + 2 > nprocs) {
2091 (void)sir_warn ("Total number of supervisor and CPU threads (%lu) exceeds available host parallelism (%lu)!",
2092 (unsigned long)(myid) + 2, (unsigned long)nprocs);
2093 warned = true;
2094 }
2095 if (!warned && nprocs >= 2 && ncores >= 1 && nprocs >= ncores && myid + 2 > ncores) {
2096 (void)sir_warn ("Total number of supervisor and CPU threads (%lu) exceeds physical host core count (%lu)!",
2097 (unsigned long)(myid) + 2, (unsigned long)ncores);
2098 }
2099 } else {
2100 if (myid + 1 > nprocs) {
2101 (void)sir_warn ("Total number of CPU threads (%lu) exceeds available host parallelism (%lu)!",
2102 (unsigned long)(myid) + 1, (unsigned long)nprocs);
2103 warned = true;
2104 }
2105 if (!warned && ncores >= 1 && nprocs >= ncores && myid + 1 > ncores) {
2106 (void)sir_warn ("Total number of CPU threads (%lu) exceeds physical host core count (%lu)!",
2107 (unsigned long)(myid) + 1, (unsigned long)ncores);
2108 }
2109 }
2110 setSignals ();
2111 threadz_sim_instr ();
2112 return NULL;
2113 }
2114 #endif
2115
2116 NO_RETURN
2117 static void do_LUF_fault (cpu_state_t * cpup)
2118 {
2119 CPT (cpt1U, 16);
2120 cpu.lufCounter = 0;
2121 cpu.lufOccurred = false;
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140 if (cpu.tweaks.isolts_mode)
2141 cpu.shadowTR = (word27) cpu.TR0 - (1024u << (is_priv_mode (cpup) ? 4 : cpu.CMR.luf));
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154 doFault (FAULT_LUF, fst_zero, "instruction cycle lockup");
2155 }
2156
2157 #if !defined(THREADZ) && !defined(LOCKLESS)
2158 # define threadz_sim_instr sim_instr
2159 #endif
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171 static void set_temporary_absolute_mode (cpu_state_t * cpup)
2172 {
2173 CPT (cpt1L, 20);
2174 cpu.secret_addressing_mode = true;
2175 cpu.cu.XSF = false;
2176 sim_debug (DBG_TRACEEXT, & cpu_dev, "set_temporary_absolute_mode bit 29 sets XSF to 0\r\n");
2177
2178 }
2179
2180 static bool clear_temporary_absolute_mode (cpu_state_t * cpup)
2181 {
2182 CPT (cpt1L, 21);
2183 cpu.secret_addressing_mode = false;
2184 return cpu.cu.XSF;
2185
2186 }
2187
2188 #if defined(THREADZ) || defined(LOCKLESS)
2189 static const int workAllocationQuantum = 64;
2190 static const int syncClockModePollRate = 64;
2191 static const int masterCycleCntlimit = 2048;
2192
2193 void becomeClockMaster (uint cpuNum) {
2194
2195 # ifdef SYNCTEST
2196 sim_printf ("CPU%c %s entry\r\n", cpuNum + 'A', __func__);
2197 allocCount = 0;
2198 # endif
2199
2200
2201
2202
2203 if (syncClockMode) {
2204
2205
2206
2207 return;
2208 }
2209
2210 syncClockModeMasterIdx = cpuNum;
2211 cpu_state_t * cpup = & cpus[cpuNum];
2212 cpu.syncClockModeMaster = true;
2213 cpu.masterCycleCnt = 0;
2214 cpu.syncClockModeCache = true;
2215 for (int i = 0; i < N_CPU_UNITS_MAX; i ++) {
2216 if (i != cpuNum) {
2217 cpus[i].workAllocation = 0;
2218 __asm volatile ("");
2219 atomic_thread_fence (memory_order_seq_cst);
2220 if (cpus[i].inMultics && ! cpus[i].isSlave) {
2221 cpus[i].syncClockModePoll = 0;
2222 __asm volatile ("");
2223 atomic_thread_fence (memory_order_seq_cst);
2224 cpus[i].becomeSlave = true;
2225 __asm volatile ("");
2226 atomic_thread_fence (memory_order_seq_cst);
2227 }
2228 }
2229 }
2230
2231 __asm volatile ("");
2232 atomic_thread_fence (memory_order_seq_cst);
2233 syncClockMode = true;
2234
2235 __asm volatile ("");
2236 atomic_thread_fence (memory_order_seq_cst);
2237
2238 }
2239
2240 void giveupClockMaster (cpu_state_t * cpup) {
2241
2242 # ifdef SYNCTEST
2243
2244 sim_printf ("CPU%c %s entry\r\n", cpu.cpuIdx + 'A', __func__);
2245 sim_printf ("CPU%c Alloc count %d\r\n", cpu.cpuIdx + 'A', allocCount);
2246 # endif
2247 __asm volatile ("");
2248 cpu.syncClockModeMaster = false;
2249 __asm volatile ("");
2250 syncClockMode = false;
2251 __asm volatile ("");
2252 for (int i = 0; i < N_CPU_UNITS_MAX; i ++) {
2253 cpus[i].syncClockModeCache = false;
2254 }
2255 __asm volatile ("");
2256 atomic_thread_fence (memory_order_seq_cst);
2257
2258 }
2259 #endif
2260
2261 t_stat threadz_sim_instr (void)
2262 {
2263 #if !defined(ROUND_ROBIN)
2264 cpu_state_t * cpup = _cpup;
2265 #endif
2266
2267
2268 #if !defined(SCHED_NEVER_YIELD)
2269 unsigned long long lockYieldAll = 0;
2270 #endif
2271 unsigned long long lockWaitMaxAll = 0;
2272 unsigned long long lockWaitAll = 0;
2273 unsigned long long lockImmediateAll = 0;
2274 unsigned long long lockCntAll = 0;
2275 unsigned long long instrCntAll = 0;
2276 unsigned long long cycleCntAll = 0;
2277
2278 t_stat reason = 0;
2279
2280 #if !defined(THREADZ) && !defined(LOCKLESS)
2281 set_cpu_idx (0);
2282 # if defined(M_SHARED)
2283
2284
2285
2286
2287 cpus [0].PPR.IC = dummy_IC;
2288 # endif
2289
2290 # if defined(ROUND_ROBIN)
2291 cpu_state_t * cpup = _cpup;
2292 cpup->isRunning = true;
2293 set_cpu_idx (cpu_dev.numunits - 1);
2294
2295 setCPU:;
2296 uint current = current_running_cpu_idx;
2297 uint c;
2298 for (c = 0; c < cpu_dev.numunits; c ++)
2299 {
2300 set_cpu_idx (c);
2301 if (cpu.isRunning)
2302 break;
2303 }
2304 if (c == cpu_dev.numunits)
2305 {
2306 sim_msg ("All CPUs stopped\r\n");
2307 goto leave;
2308 }
2309 set_cpu_idx ((current + 1) % cpu_dev.numunits);
2310 if (! _cpup-> isRunning)
2311 goto setCPU;
2312 # endif
2313 #endif
2314
2315
2316 int val = setjmp (cpu.jmpMain);
2317
2318 switch (val)
2319 {
2320 case JMP_ENTRY:
2321 case JMP_REENTRY:
2322 reason = 0;
2323 break;
2324 case JMP_SYNC_FAULT_RETURN:
2325 set_cpu_cycle (cpup, SYNC_FAULT_RTN_cycle);
2326 break;
2327 case JMP_STOP:
2328 reason = STOP_STOP;
2329 goto leave;
2330 case JMP_REFETCH:
2331
2332
2333
2334
2335
2336
2337
2338 cpu.wasXfer = false;
2339
2340 set_cpu_cycle (cpup, FETCH_cycle);
2341 break;
2342 case JMP_RESTART:
2343 set_cpu_cycle (cpup, EXEC_cycle);
2344 break;
2345 case JMP_FORCE_RESTART:
2346
2347
2348
2349
2350
2351
2352 cpu_reset_unit_idx (current_running_cpu_idx, false);
2353 #if defined(THREADZ) || defined(LOCKLESS)
2354
2355 if (syncClockMode && syncClockModeMasterIdx == current_running_cpu_idx)
2356 giveupClockMaster (cpup);
2357 #endif
2358 break;
2359 default:
2360 sim_warn ("longjmp value of %d unhandled\r\n", val);
2361 goto leave;
2362 }
2363
2364
2365
2366 DCDstruct * ci = & cpu.currentInstruction;
2367
2368 if (cpu.restart)
2369 {
2370 set_cpu_cycle (cpup, FAULT_cycle);
2371 }
2372
2373 #if defined(THREADZ) || defined(LOCKLESS)
2374
2375
2376
2377
2378 __asm volatile ("");
2379 cpu.executing = true;
2380 if (cpu.tweaks.isolts_mode) {
2381 ;
2382 } else {
2383 cpu.inMultics = true;
2384 }
2385 #endif
2386
2387 do
2388 {
2389
2390 reason = 0;
2391
2392 #if !defined(THREADZ) && !defined(LOCKLESS)
2393
2394 reason = simh_hooks (cpup);
2395 if (reason)
2396 {
2397 break;
2398 }
2399
2400
2401
2402
2403
2404
2405
2406
2407 if (fast_queue_subsample ++ > sys_opts.sys_poll_check_rate)
2408 {
2409 fast_queue_subsample = 0;
2410 # if defined(CONSOLE_FIX)
2411 # if defined(THREADZ) || defined(LOCKLESS)
2412 lock_libuv ();
2413 # endif
2414 # endif
2415 uv_run (ev_poll_loop, UV_RUN_NOWAIT);
2416 # if defined(CONSOLE_FIX)
2417 # if defined(THREADZ) || defined(LOCKLESS)
2418 unlock_libuv ();
2419 # endif
2420 # endif
2421 PNL (panel_process_event ());
2422 }
2423 #endif
2424
2425 cpu.cycleCnt ++;
2426
2427 #if defined(THREADZ)
2428
2429 unlock_mem_force ();
2430
2431
2432 cpuRunningWait ();
2433 #endif
2434 #if defined(LOCKLESS)
2435 core_unlock_all (cpup);
2436 #endif
2437
2438 #if defined(ROUND_ROBIN) || !defined(LOCKLESS)
2439 int con_unit_idx = check_attn_key ();
2440 if (con_unit_idx != -1)
2441 console_attn_idx (con_unit_idx);
2442 #endif
2443
2444 #if !defined(THREADZ) && !defined(LOCKLESS)
2445 if (cpu.tweaks.isolts_mode)
2446 {
2447 if (cpu.cycle != FETCH_cycle)
2448 {
2449
2450 cpu.rTRlsb ++;
2451 if (cpu.rTRlsb >= 4)
2452 {
2453 cpu.rTRlsb = 0;
2454 cpu.shadowTR = (cpu.shadowTR - 1) & MASK27;
2455 if (cpu.shadowTR == 0)
2456 {
2457 if (cpu.tweaks.tro_enable)
2458 setG7fault (current_running_cpu_idx, FAULT_TRO);
2459 }
2460 }
2461 }
2462 }
2463 #endif
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477 # define TR_RATE 2
2478
2479
2480
2481 cpu.rTR = (word27) (((word27s) cpu.rTR) - (word27s) (cpu.rTRticks / TR_RATE));
2482 cpu.rTRticks %= TR_RATE;
2483
2484
2485
2486 if (cpu.rTR & ~MASK27)
2487 {
2488 cpu.rTR &= MASK27;
2489 if (cpu.tweaks.tro_enable) {
2490 setG7fault (current_running_cpu_idx, FAULT_TRO);
2491 }
2492 }
2493
2494 sim_debug (DBG_CYCLE, & cpu_dev, "Cycle is %s\r\n",
2495 cycle_str (cpu.cycle));
2496
2497 switch (cpu.cycle)
2498 {
2499 case INTERRUPT_cycle:
2500 {
2501 CPT (cpt1U, 0);
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513 uint intr_pair_addr = get_highest_intr (cpup);
2514 #if defined(TESTING)
2515 HDBGIntr (intr_pair_addr, "");
2516 #endif
2517 cpu.cu.FI_ADDR = (word5) (intr_pair_addr / 2);
2518 cu_safe_store (cpup);
2519
2520
2521
2522 CPT (cpt1U, 1);
2523
2524 set_temporary_absolute_mode (cpup);
2525
2526
2527 cpu.PPR.PRR = 0;
2528 cpu.TPR.TRR = 0;
2529
2530 sim_debug (DBG_INTR, & cpu_dev, "intr_pair_addr %u flag %d\r\n",
2531 intr_pair_addr, cpu.interrupt_flag);
2532 #if !defined(SPEED)
2533 if_sim_debug (DBG_INTR, & cpu_dev)
2534 traceInstruction (DBG_INTR);
2535 #endif
2536
2537 if (cpu.interrupt_flag)
2538 {
2539 CPT (cpt1U, 2);
2540
2541
2542
2543
2544
2545
2546 if (intr_pair_addr != 1)
2547 {
2548 CPT (cpt1U, 3);
2549
2550
2551 core_read2 (cpup, intr_pair_addr,
2552 & cpu.cu.IWB, & cpu.cu.IRODD, __func__);
2553 #if defined(TESTING)
2554 HDBGMRead (intr_pair_addr, cpu.cu.IWB, "intr even");
2555 HDBGMRead (intr_pair_addr + 1, cpu.cu.IRODD, "intr odd");
2556 #endif
2557 cpu.cu.xde = 1;
2558 cpu.cu.xdo = 1;
2559 cpu.isExec = true;
2560 cpu.isXED = true;
2561
2562 CPT (cpt1U, 4);
2563 cpu.interrupt_flag = false;
2564 set_cpu_cycle (cpup, INTERRUPT_EXEC_cycle);
2565 break;
2566 }
2567 }
2568
2569
2570
2571 CPT (cpt1U, 5);
2572 cpu.interrupt_flag = false;
2573 clear_temporary_absolute_mode (cpup);
2574
2575 cu_safe_restore (cpup);
2576
2577
2578 cpu.wasXfer = false;
2579
2580
2581 set_cpu_cycle (cpup, FETCH_cycle);
2582 }
2583 break;
2584
2585 case FETCH_cycle:
2586 #if defined(PANEL68)
2587 (void)memset (cpu.cpt, 0, sizeof (cpu.cpt));
2588 #endif
2589 CPT (cpt1U, 13);
2590
2591 PNL (L68_ (cpu.INS_FETCH = false;))
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628 if (get_bar_mode (cpup))
2629 get_BAR_address (cpup, cpu.PPR.IC);
2630
2631
2632
2633
2634 bool tmp_priv_mode = is_priv_mode (cpup);
2635 bool is_dis = cpu.currentInstruction.opcode == 0616 &&
2636 cpu.currentInstruction.opcodeX == 0;
2637 bool noCheckTR = tmp_priv_mode &&
2638 !(is_dis && GET_I (cpu.cu.IWB) == 0);
2639
2640 if (is_dis)
2641 {
2642
2643
2644 cpu.interrupt_flag = sample_interrupts (cpup);
2645 cpu.g7_flag =
2646 noCheckTR ? bG7PendingNoTRO (cpup) : bG7Pending (cpup);
2647 }
2648 else if (! (cpu.cu.xde | cpu.cu.xdo |
2649 cpu.cu.rpt | cpu.cu.rd | cpu.cu.rl))
2650 {
2651 if ((!cpu.wasInhibited) &&
2652 (cpu.PPR.IC & 1) == 0 &&
2653 (! cpu.wasXfer))
2654 {
2655 CPT (cpt1U, 14);
2656 cpu.interrupt_flag = sample_interrupts (cpup);
2657 cpu.g7_flag =
2658 noCheckTR ? bG7PendingNoTRO (cpup) : bG7Pending (cpup);
2659 }
2660 cpu.wasInhibited = false;
2661 }
2662 else
2663 {
2664
2665
2666
2667
2668
2669 if ((cpu.PPR.IC & 1) == 1)
2670 {
2671 cpu.wasInhibited = true;
2672 }
2673 }
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709 if (cpu.g7_flag)
2710 {
2711 cpu.g7_flag = false;
2712 cpu.interrupt_flag = false;
2713 sim_debug (DBG_CYCLE, & cpu_dev,
2714 "call doG7Fault (%d)\r\n", !noCheckTR);
2715 doG7Fault (cpup, !noCheckTR);
2716 }
2717 if (cpu.interrupt_flag)
2718 {
2719
2720
2721
2722 CPT (cpt1U, 15);
2723 set_cpu_cycle (cpup, INTERRUPT_cycle);
2724 break;
2725 }
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735 case PSEUDO_FETCH_cycle:
2736
2737 tmp_priv_mode = is_priv_mode (cpup);
2738 if (! (luf_flag && tmp_priv_mode))
2739 cpu.lufCounter ++;
2740
2741 if (cpu.lufCounter > luf_limits[cpu.CMR.luf])
2742 {
2743 if (tmp_priv_mode)
2744 {
2745
2746 cpu.lufOccurred = true;
2747 }
2748 else
2749 {
2750 do_LUF_fault (cpup);
2751 }
2752 }
2753
2754
2755 if (cpu.lufCounter > luf_limits[4])
2756 {
2757 do_LUF_fault (cpup);
2758 }
2759
2760
2761
2762 if (! tmp_priv_mode && cpu.lufOccurred)
2763 {
2764 do_LUF_fault (cpup);
2765 }
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798 if (cpu.cycle == PSEUDO_FETCH_cycle)
2799 {
2800 cpu.apu.lastCycle = INSTRUCTION_FETCH;
2801 cpu.cu.XSF = 0;
2802 cpu.cu.TSN_VALID [0] = 0;
2803 cpu.TPR.TSR = cpu.PPR.PSR;
2804 cpu.TPR.TRR = cpu.PPR.PRR;
2805 cpu.wasInhibited = false;
2806 }
2807 else
2808 {
2809 CPT (cpt1U, 20);
2810 cpu.isExec = false;
2811 cpu.isXED = false;
2812
2813
2814
2815 cpu.cu.XSF = 0;
2816 sim_debug (DBG_TRACEEXT, & cpu_dev, "fetchCycle bit 29 sets XSF to 0\r\n");
2817 cpu.cu.TSN_VALID [0] = 0;
2818 cpu.TPR.TSR = cpu.PPR.PSR;
2819 cpu.TPR.TRR = cpu.PPR.PRR;
2820 PNL (cpu.prepare_state = ps_PIA);
2821 PNL (L68_ (cpu.INS_FETCH = true;))
2822 fetchInstruction (cpup, cpu.PPR.IC);
2823 }
2824
2825 CPT (cpt1U, 21);
2826 advanceG7Faults (cpup);
2827 set_cpu_cycle (cpup, EXEC_cycle);
2828 break;
2829
2830 case EXEC_cycle:
2831 case FAULT_EXEC_cycle:
2832 case INTERRUPT_EXEC_cycle:
2833 {
2834 #if defined(THREADZ) || defined(LOCKLESS)
2835
2836
2837 if (UNLIKELY (cpu.becomeSlave)) {
2838 cpu.becomeSlave = false;
2839
2840 while (! syncClockMode) {
2841 sim_usleep (1);
2842 }
2843
2844 cpu.syncClockModePoll = 0;
2845 }
2846
2847
2848 if (cpu.syncClockModeCache || --cpu.syncClockModePoll <= 0) {
2849
2850 cpu.syncClockModePoll = cpu.tweaks.isolts_mode ? 1 : syncClockModePollRate;
2851
2852
2853 if (syncClockMode) {
2854
2855
2856 cpu.syncClockModeCache = true;
2857
2858
2859 if (syncClockModeMasterIdx == current_running_cpu_idx) {
2860
2861
2862 cpu.masterCycleCnt ++;
2863 if (cpu.masterCycleCnt > masterCycleCntlimit) {
2864 # ifdef SYNCTEST
2865 sim_printf ("too many cycles\r\n");
2866 # endif
2867 giveupClockMaster (cpup);
2868 goto bail;
2869 }
2870
2871
2872 if (cpu.workAllocation <= 0) {
2873 # ifdef SYNCTEST
2874 allocCount ++;
2875 # endif
2876
2877
2878
2879
2880
2881 int64_t waitTimeout = 100000;
2882
2883
2884 while (1) {
2885 bool alldone = true;
2886 for (int i = 0; i < N_CPU_UNITS_MAX; i ++) {
2887 if (cpus[i].inMultics && cpus[i].workAllocation > 0) {
2888 wakeCPU (i);
2889 alldone = false;
2890
2891 }
2892 }
2893 if (alldone) {
2894
2895 for (int i = 0; i < N_CPU_UNITS_MAX; i ++) {
2896 if (cpus[i].inMultics) {
2897 cpus[i].workAllocation += cpu.tweaks.isolts_mode ? 1 : workAllocationQuantum;
2898 wakeCPU (i);
2899 }
2900 }
2901 break;
2902 }
2903 if (waitTimeout-- < 0) {
2904
2905
2906 sim_printf ("Clock master CPU %c timed out\r\n", "ABCDEFGH"[current_running_cpu_idx]);
2907 for (int i = 0; i < N_CPU_UNITS_MAX; i ++) {
2908 if (cpus[i].inMultics && cpus[i].workAllocation > 0) {
2909 sim_printf ("CPU %c remaining allocation: %ld\r\n", "ABCDEFGH"[i], cpus[i].workAllocation);
2910 }
2911 }
2912 sim_printf ("Conceding clock mastery...\r\n");
2913 cpu.syncClockModeCache = false;
2914 giveupClockMaster (cpup);
2915 goto bail;
2916 }
2917 sim_usleep (1);
2918 }
2919 }
2920
2921
2922
2923 } else {
2924
2925
2926
2927
2928 if (! cpu.isSlave) {
2929
2930 # ifdef SYNCTEST
2931 sim_printf ("CPU%c becoming slave\r\n", cpu.cpuIdx + 'A');
2932 # endif
2933 }
2934 cpu.isSlave = true;
2935
2936
2937 while (syncClockMode && cpu.workAllocation <= 0)
2938 sim_usleep (1);
2939
2940
2941
2942
2943 }
2944
2945 } else {
2946
2947 cpu.syncClockModeCache = false;
2948 if (cpu.isSlave) {
2949
2950 # ifdef SYNCTEST
2951 sim_printf ("CPU%c free; free at last\r\n", cpu.cpuIdx + 'A');
2952 # endif
2953 cpu.isSlave = false;
2954 }
2955 }
2956 }
2957 bail:
2958
2959 #endif
2960
2961 #if defined(THREADZ) || defined(LOCKLESS)
2962 if (LIKELY (! cpu.tweaks.isolts_mode) &&
2963 UNLIKELY (! cpu.inMultics)) {
2964 cpu.inMultics = true;
2965 }
2966 #endif
2967
2968 CPT (cpt1U, 22);
2969
2970 #if defined(LOCKLESS)
2971 if (stall_point_active)
2972 {
2973 for (int i = 0; i < N_STALL_POINTS; i ++)
2974 if (stall_points[i].segno && stall_points[i].segno == cpu.PPR.PSR &&
2975 stall_points[i].offset && stall_points[i].offset == cpu.PPR.IC)
2976 {
2977 # if defined(CTRACE)
2978 (void)fprintf (stderr, "%10lu %s stall %d\r\n", seqno (), cpunstr[current_running_cpu_idx], i);
2979 # endif
2980
2981 sim_usleep(stall_points[i].time);
2982 break;
2983 }
2984 }
2985 #endif
2986
2987
2988
2989
2990 if (GET_I (cpu.cu.IWB))
2991 cpu.wasInhibited = true;
2992
2993 t_stat ret = executeInstruction (cpup);
2994 DO_WORK_EXEC;
2995 CPT (cpt1U, 23);
2996
2997 if (cpu.tweaks.l68_mode)
2998 add_l68_CU_history (cpup);
2999 else
3000 add_dps8m_CU_history (cpup);
3001
3002 if (ret > 0)
3003 {
3004 reason = ret;
3005 break;
3006 }
3007
3008 if (ret == CONT_XEC)
3009 {
3010 CPT (cpt1U, 27);
3011 cpu.wasXfer = false;
3012 cpu.isExec = true;
3013 if (cpu.cu.xdo)
3014 cpu.isXED = true;
3015
3016 cpu.cu.XSF = 0;
3017 cpu.cu.TSN_VALID [0] = 0;
3018 cpu.TPR.TSR = cpu.PPR.PSR;
3019 cpu.TPR.TRR = cpu.PPR.PRR;
3020 break;
3021 }
3022
3023 if (ret == CONT_TRA || ret == CONT_RET)
3024 {
3025 CPT (cpt1U, 24);
3026 cpu.cu.xde = cpu.cu.xdo = 0;
3027 cpu.isExec = false;
3028 cpu.isXED = false;
3029
3030 cpu.wasXfer = true;
3031
3032 if (cpu.cycle != EXEC_cycle)
3033 {
3034 clearFaultCycle (cpup);
3035
3036
3037
3038
3039
3040 if (! (cpu.currentInstruction.opcode == 0715 &&
3041 cpu.currentInstruction.opcodeX == 0))
3042 {
3043 CPT (cpt1U, 9);
3044 SET_I_NBAR;
3045 }
3046
3047 if (!clear_temporary_absolute_mode (cpup))
3048 {
3049
3050 sim_debug (DBG_TRACEEXT, & cpu_dev,
3051 "setting ABS mode\r\n");
3052 CPT (cpt1U, 10);
3053 set_addr_mode (cpup, ABSOLUTE_mode);
3054 }
3055 else
3056 {
3057
3058 sim_debug (DBG_TRACEEXT, & cpu_dev,
3059 "not setting ABS mode\r\n");
3060 }
3061
3062 }
3063
3064
3065 if (TST_I_ABS && cpu.cu.XSF)
3066 {
3067 set_addr_mode (cpup, APPEND_mode);
3068 }
3069
3070 if (ret == CONT_TRA)
3071 {
3072
3073 cpu.wasXfer = false;
3074 set_cpu_cycle (cpup, PSEUDO_FETCH_cycle);
3075 }
3076 else
3077 set_cpu_cycle (cpup, FETCH_cycle);
3078 break;
3079 }
3080
3081 if (ret == CONT_DIS)
3082 {
3083 CPT (cpt1U, 25);
3084
3085 #if defined(THREADZ) || defined(LOCKLESS)
3086
3087 if (cpu.syncClockModeCache) {
3088 break;
3089 }
3090 #endif
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123 #if !defined(ROUND_ROBIN)
3124
3125
3126
3127
3128 # if defined(THREADZ) || defined(LOCKLESS)
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138 # if defined(NO_TIMEWAIT)
3139
3140 struct timespec req, rem;
3141 uint ms = sys_opts.sys_poll_interval;
3142 long int nsec = (long int) ms * 1000L * 1000L;
3143 req.tv_nsec = nsec;
3144 req.tv_sec += req.tv_nsec / 1000000000L;
3145 req.tv_nsec %= 1000000000L;
3146 int rc = nanosleep (& req, & rem);
3147
3148 if (rc == -1)
3149 {
3150 ms = (uint) (rem.tv_nsec / 1000 + req.tv_sec * 1000);
3151 }
3152 word27 ticks = ms * 512;
3153 if (cpu.rTR <= ticks)
3154 {
3155 if (cpu.tweaks.tro_enable) {
3156 setG7fault (current_running_cpu_idx, FAULT_TRO);
3157 }
3158 cpu.rTR = (cpu.rTR - ticks) & MASK27;
3159 }
3160 else
3161 cpu.rTR = (cpu.rTR - ticks) & MASK27;
3162
3163 if (cpu.rTR == 0)
3164 cpu.rTR = MASK27;
3165 # else
3166
3167
3168 unsigned long left = (unsigned long) ((uint64) (cpu.rTR) * 125u / 64u);
3169
3170
3171
3172
3173
3174
3175
3176
3177 unsigned long nowLeft = left;
3178 if (!sample_interrupts (cpup))
3179 {
3180 nowLeft = sleepCPU (left);
3181 }
3182 if (nowLeft)
3183 {
3184
3185
3186 if (nowLeft <= left) {
3187 cpu.rTR = (word27) (left * 64 / 125);
3188 }
3189 }
3190 else
3191 {
3192
3193 if (cpu.tweaks.tro_enable)
3194 {
3195 lock_scu ();
3196 setG7fault (current_running_cpu_idx, FAULT_TRO);
3197 unlock_scu ();
3198 }
3199 cpu.rTR = MASK27;
3200 }
3201 # endif
3202 cpu.rTRticks = 0;
3203 break;
3204 # else
3205
3206 sim_usleep (sys_opts.sys_poll_interval * 1000);
3207
3208 # if defined(CONSOLE_FIX)
3209 # if defined(THREADZ) || defined(LOCKLESS)
3210 lock_libuv ();
3211 # endif
3212 # endif
3213 uv_run (ev_poll_loop, UV_RUN_NOWAIT);
3214 # if defined(CONSOLE_FIX)
3215 # if defined(THREADZ) || defined(LOCKLESS)
3216 unlock_libuv ();
3217 # endif
3218 # endif
3219 fast_queue_subsample = 0;
3220
3221 sim_interval = 0;
3222
3223
3224
3225
3226 cpu.rTRticks = 0;
3227
3228
3229
3230
3231
3232
3233 if (cpu.rTR <= sys_opts.sys_poll_interval * 512)
3234 {
3235 if (cpu.tweaks.tro_enable) {
3236 setG7fault (current_running_cpu_idx, FAULT_TRO);
3237 }
3238 cpu.rTR = (cpu.rTR - sys_opts.sys_poll_interval * 512) & MASK27;
3239 }
3240 else
3241 cpu.rTR = (cpu.rTR - sys_opts.sys_poll_interval * 512) & MASK27;
3242 if (cpu.rTR == 0)
3243 cpu.rTR = MASK27;
3244 # endif
3245 #endif
3246
3247 break;
3248 }
3249
3250 cpu.wasXfer = false;
3251
3252 if (ret < 0)
3253 {
3254 sim_warn ("executeInstruction returned %d?\r\n", ret);
3255 break;
3256 }
3257
3258 if ((! cpu.cu.repeat_first) &&
3259 (cpu.cu.rpt ||
3260 (cpu.cu.rd && (cpu.PPR.IC & 1)) ||
3261 cpu.cu.rl))
3262 {
3263 CPT (cpt1U, 26);
3264 if (cpu.cu.rd)
3265 -- cpu.PPR.IC;
3266 cpu.wasXfer = false;
3267 set_cpu_cycle (cpup, FETCH_cycle);
3268 break;
3269 }
3270
3271
3272 if (cpu.cycle == FAULT_EXEC_cycle &&
3273 !cpu.cu.xde && cpu.cu.xdo)
3274 {
3275 clear_temporary_absolute_mode (cpup);
3276 cu_safe_restore (cpup);
3277 CPT (cpt1U, 12);
3278 clearFaultCycle (cpup);
3279
3280
3281
3282 cpu.wasXfer = false;
3283 cpu.isExec = false;
3284 cpu.isXED = false;
3285
3286 cpu.PPR.IC += ci->info->ndes;
3287 cpu.PPR.IC ++;
3288
3289 set_cpu_cycle (cpup, FETCH_cycle);
3290 break;
3291 }
3292
3293
3294 if (cpu.cycle == INTERRUPT_EXEC_cycle &&
3295 !cpu.cu.xde && cpu.cu.xdo)
3296 {
3297 clear_temporary_absolute_mode (cpup);
3298 cu_safe_restore (cpup);
3299
3300
3301
3302 CPT (cpt1U, 12);
3303 cpu.wasXfer = false;
3304 cpu.isExec = false;
3305 cpu.isXED = false;
3306
3307 set_cpu_cycle (cpup, FETCH_cycle);
3308 break;
3309 }
3310
3311
3312 if (cpu.cu.xde && cpu.cu.xdo)
3313 {
3314
3315 cpu.cu.IWB = cpu.cu.IRODD;
3316 cpu.cu.xde = 0;
3317 cpu.isExec = true;
3318 cpu.isXED = true;
3319 cpu.cu.XSF = 0;
3320 cpu.cu.TSN_VALID [0] = 0;
3321 cpu.TPR.TSR = cpu.PPR.PSR;
3322 cpu.TPR.TRR = cpu.PPR.PRR;
3323 break;
3324 }
3325
3326 if (cpu.cu.xde || cpu.cu.xdo)
3327 {
3328 cpu.cu.xde = cpu.cu.xdo = 0;
3329 cpu.isExec = false;
3330 cpu.isXED = false;
3331 CPT (cpt1U, 27);
3332 cpu.wasXfer = false;
3333 cpu.PPR.IC ++;
3334 if (ci->info->ndes > 0)
3335 cpu.PPR.IC += ci->info->ndes;
3336 cpu.wasInhibited = true;
3337 set_cpu_cycle (cpup, FETCH_cycle);
3338 break;
3339 }
3340
3341
3342 if (cpu.cycle != EXEC_cycle)
3343 sim_warn ("expected EXEC_cycle (%d)\r\n", cpu.cycle);
3344
3345 cpu.cu.xde = cpu.cu.xdo = 0;
3346 cpu.isExec = false;
3347 cpu.isXED = false;
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358 if ((cpu.PPR.IC & 1) == 0 &&
3359 ci->info->ndes == 0 &&
3360 !cpu.cu.repeat_first && !cpu.cu.rpt && !cpu.cu.rd && !cpu.cu.rl &&
3361 !(cpu.currentInstruction.opcode == 0616 && cpu.currentInstruction.opcodeX == 0) &&
3362 (cpu.PPR.IC & ~3u) != (cpu.last_write & ~3u))
3363 {
3364 cpu.PPR.IC ++;
3365 cpu.wasXfer = false;
3366 cpu.cu.IWB = cpu.cu.IRODD;
3367 set_cpu_cycle (cpup, PSEUDO_FETCH_cycle);
3368 break;
3369 }
3370
3371 cpu.PPR.IC ++;
3372 if (ci->info->ndes > 0)
3373 cpu.PPR.IC += ci->info->ndes;
3374
3375 CPT (cpt1U, 28);
3376 cpu.wasXfer = false;
3377 set_cpu_cycle (cpup, FETCH_cycle);
3378 }
3379 break;
3380
3381 case SYNC_FAULT_RTN_cycle:
3382 {
3383 CPT (cpt1U, 29);
3384
3385
3386
3387
3388 cpu.PPR.IC += ci->info->ndes;
3389 cpu.PPR.IC ++;
3390 cpu.wasXfer = false;
3391 set_cpu_cycle (cpup, FETCH_cycle);
3392 }
3393 break;
3394
3395 case FAULT_cycle:
3396 {
3397 CPT (cpt1U, 30);
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419 if ((cpu.cu.APUCycleBits & 060) || cpu.secret_addressing_mode)
3420 set_apu_status (cpup, apuStatus_FABS);
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432 if (cpu.faultNumber != FAULT_TRB || cpu.cu.xde == 0)
3433 {
3434 cu_safe_store (cpup);
3435 }
3436 else
3437 {
3438 word36 tmpIRODD = cpu.scu_data[7];
3439 cu_safe_store (cpup);
3440 cpu.scu_data[7] = tmpIRODD;
3441 }
3442 CPT (cpt1U, 31);
3443
3444
3445 set_temporary_absolute_mode (cpup);
3446
3447
3448 cpu.PPR.PRR = 0;
3449 cpu.TPR.TRR = 0;
3450
3451
3452 uint fltAddress = (cpu.switches.FLT_BASE << 5) & 07740;
3453 L68_ (
3454 if (cpu.is_FFV)
3455 {
3456 cpu.is_FFV = false;
3457 CPTUR (cptUseMR);
3458
3459 fltAddress = (cpu.MR.FFV & MASK15) << 3;
3460 }
3461 )
3462
3463
3464 word24 addr = fltAddress + 2 * cpu.faultNumber;
3465
3466 if (cpu.restart)
3467 {
3468 cpu.restart = false;
3469 addr = cpu.restart_address;
3470 }
3471
3472 core_read2 (cpup, addr, & cpu.cu.IWB, & cpu.cu.IRODD, __func__);
3473 #if defined(TESTING)
3474 HDBGMRead (addr, cpu.cu.IWB, "fault even");
3475 HDBGMRead (addr + 1, cpu.cu.IRODD, "fault odd");
3476 #endif
3477 cpu.cu.xde = 1;
3478 cpu.cu.xdo = 1;
3479 cpu.isExec = true;
3480 cpu.isXED = true;
3481
3482 CPT (cpt1U, 33);
3483 set_cpu_cycle (cpup, FAULT_EXEC_cycle);
3484
3485 break;
3486 }
3487
3488 }
3489 }
3490 #if defined(ROUND_ROBIN)
3491 while (0);
3492 if (reason == 0)
3493 goto setCPU;
3494 #else
3495 while (reason == 0);
3496 #endif
3497
3498 leave:
3499 #if defined(THREADZ) || defined(LOCKLESS)
3500 cpu.executing = false;
3501 cpu.inMultics = false;
3502 #endif
3503 #if defined(TESTING)
3504 HDBGPrint ();
3505 #endif
3506
3507 for (unsigned short n = 0; n < N_CPU_UNITS_MAX; n++)
3508 {
3509 #if !defined(SCHED_NEVER_YIELD)
3510 lockYieldAll = lockYieldAll + (unsigned long long)cpus[n].coreLockState.lockYield;
3511 #endif
3512 lockWaitMaxAll = lockWaitMaxAll + (unsigned long long)cpus[n].coreLockState.lockWaitMax;
3513 lockWaitAll = lockWaitAll + (unsigned long long)cpus[n].coreLockState.lockWait;
3514 lockImmediateAll = lockImmediateAll + (unsigned long long)cpus[n].coreLockState.lockImmediate;
3515 lockCntAll = lockCntAll + (unsigned long long)cpus[n].coreLockState.lockCnt;
3516 instrCntAll = instrCntAll + (unsigned long long)cpus[n].instrCnt;
3517 cycleCntAll = cycleCntAll + (unsigned long long)cpus[n].cycleCnt;
3518 }
3519
3520 (void)fflush(stderr);
3521 (void)fflush(stdout);
3522
3523 # if !defined(PERF_STRIP)
3524 if (cycleCntAll > (unsigned long long)cpu.cycleCnt)
3525 {
3526 # endif
3527 sim_msg ("\r\n");
3528 sim_msg ("\r+---------------------------------+\r\n");
3529 sim_msg ("\r| Aggregate CPU Statistics |\r\n");
3530 sim_msg ("\r+---------------------------------+\r\n");
3531 (void)fflush(stderr);
3532 (void)fflush(stdout);
3533 # if defined(WIN_STDIO)
3534 sim_msg ("\r| cycles %15llu |\r\n", cycleCntAll);
3535 sim_msg ("\r| instructions %15llu |\r\n", instrCntAll);
3536 (void)fflush(stderr);
3537 (void)fflush(stdout);
3538 sim_msg ("\r+---------------------------------+\r\n");
3539 sim_msg ("\r| lockCnt %15llu |\r\n", lockCntAll);
3540 sim_msg ("\r| lockImmediate %15llu |\r\n", lockImmediateAll);
3541 (void)fflush(stderr);
3542 (void)fflush(stdout);
3543 sim_msg ("\r+---------------------------------+\r\n");
3544 sim_msg ("\r| lockWait %15llu |\r\n", lockWaitAll);
3545 sim_msg ("\r| lockWaitMax %15llu |\r\n", lockWaitMaxAll);
3546 (void)fflush(stderr);
3547 (void)fflush(stdout);
3548 # if !defined(SCHED_NEVER_YIELD)
3549 sim_msg ("\r| lockYield %15llu |\r\n", lockYieldAll);
3550 # else
3551 sim_msg ("\r| lockYield ---- |\r\n");
3552 # endif
3553 sim_msg ("\r+---------------------------------+\r\n");
3554 (void)fflush(stderr);
3555 (void)fflush(stdout);
3556 # else
3557 sim_msg ("\r| cycles %'15llu |\r\n", cycleCntAll);
3558 sim_msg ("\r| instructions %'15llu |\r\n", instrCntAll);
3559 (void)fflush(stderr);
3560 (void)fflush(stdout);
3561 sim_msg ("\r+---------------------------------+\r\n");
3562 sim_msg ("\r| lockCnt %'15llu |\r\n", lockCntAll);
3563 sim_msg ("\r| lockImmediate %'15llu |\r\n", lockImmediateAll);
3564 (void)fflush(stderr);
3565 (void)fflush(stdout);
3566 sim_msg ("\r+---------------------------------+\r\n");
3567 sim_msg ("\r| lockWait %'15llu |\r\n", lockWaitAll);
3568 sim_msg ("\r| lockWaitMax %'15llu |\r\n", lockWaitMaxAll);
3569 (void)fflush(stderr);
3570 (void)fflush(stdout);
3571 # if !defined(SCHED_NEVER_YIELD)
3572 sim_msg ("\r| lockYield %'15llu |\r\n", lockYieldAll);
3573 # else
3574 sim_msg ("\r| lockYield ---- |\r\n");
3575 # endif
3576 sim_msg ("\r+---------------------------------+\r\n");
3577 (void)fflush(stderr);
3578 (void)fflush(stdout);
3579 # endif
3580 # if !defined(PERF_STRIP)
3581 }
3582 # else
3583 sim_msg("\r\n");
3584 # endif
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596 #if defined(THREADZ) || defined(LOCKLESS)
3597 if (running_perf_test == false)
3598 sim_usleep(2000000);
3599 stopCPUThread();
3600 #endif
3601
3602 #if defined(M_SHARED)
3603
3604
3605
3606
3607 set_cpu_idx (0);
3608 dummy_IC = cpu.PPR.IC;
3609 #endif
3610
3611 return reason;
3612 }
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628 int operand_size (cpu_state_t * cpup)
3629 {
3630 DCDstruct * i = & cpu.currentInstruction;
3631 if (i->info->flags & (READ_OPERAND | STORE_OPERAND))
3632 return 1;
3633 else if (i->info->flags & (READ_YPAIR | STORE_YPAIR))
3634 return 2;
3635 else if (i->info->flags & (READ_YBLOCK8 | STORE_YBLOCK8))
3636 return 8;
3637 else if (i->info->flags & (READ_YBLOCK16 | STORE_YBLOCK16))
3638 return 16;
3639 else if (i->info->flags & (READ_YBLOCK32 | STORE_YBLOCK32))
3640 return 32;
3641 return 0;
3642 }
3643
3644
3645
3646 void readOperandRead (cpu_state_t * cpup, word18 addr) {
3647 CPT (cpt1L, 6);
3648
3649 #if defined(THREADZ)
3650 DCDstruct * i = & cpu.currentInstruction;
3651 if (RMWOP (i))
3652 lock_rmw ();
3653 #endif
3654
3655 switch (operand_size (cpup)) {
3656 case 1:
3657 CPT (cpt1L, 7);
3658 ReadOperandRead (cpup, addr, & cpu.CY);
3659 break;
3660 case 2:
3661 CPT (cpt1L, 8);
3662 addr &= 0777776;
3663 Read2OperandRead (cpup, addr, cpu.Ypair);
3664 break;
3665 case 8:
3666 CPT (cpt1L, 9);
3667 addr &= 0777770;
3668 Read8 (cpup, addr, cpu.Yblock8, cpu.currentInstruction.b29);
3669 break;
3670 case 16:
3671 CPT (cpt1L, 10);
3672 addr &= 0777770;
3673 Read16 (cpup, addr, cpu.Yblock16);
3674 break;
3675 case 32:
3676 CPT (cpt1L, 11);
3677 addr &= 0777740;
3678 for (uint j = 0 ; j < 32 ; j += 1)
3679 ReadOperandRead (cpup, addr + j, cpu.Yblock32 + j);
3680 break;
3681 }
3682 }
3683
3684 void readOperandRMW (cpu_state_t * cpup, word18 addr) {
3685 CPT (cpt1L, 6);
3686 switch (operand_size (cpup)) {
3687 case 1:
3688 CPT (cpt1L, 7);
3689 ReadOperandRMW (cpup, addr, & cpu.CY);
3690 break;
3691 case 2:
3692 CPT (cpt1L, 8);
3693 addr &= 0777776;
3694 Read2OperandRead (cpup, addr, cpu.Ypair);
3695 break;
3696 case 8:
3697 CPT (cpt1L, 9);
3698 addr &= 0777770;
3699 Read8 (cpup, addr, cpu.Yblock8, cpu.currentInstruction.b29);
3700 break;
3701 case 16:
3702 CPT (cpt1L, 10);
3703 addr &= 0777770;
3704 Read16 (cpup, addr, cpu.Yblock16);
3705 break;
3706 case 32:
3707 CPT (cpt1L, 11);
3708 addr &= 0777740;
3709 for (uint j = 0 ; j < 32 ; j += 1)
3710 ReadOperandRMW (cpup, addr + j, cpu.Yblock32 + j);
3711 break;
3712 }
3713 }
3714
3715
3716
3717 t_stat write_operand (cpu_state_t * cpup, word18 addr, UNUSED processor_cycle_type cyctyp)
3718 {
3719 switch (operand_size (cpup))
3720 {
3721 case 1:
3722 CPT (cpt1L, 12);
3723 WriteOperandStore (cpup, addr, cpu.CY);
3724 break;
3725 case 2:
3726 CPT (cpt1L, 13);
3727 addr &= 0777776;
3728 Write2OperandStore (cpup, addr + 0, cpu.Ypair);
3729 break;
3730 case 8:
3731 CPT (cpt1L, 14);
3732 addr &= 0777770;
3733 Write8 (cpup, addr, cpu.Yblock8, cpu.currentInstruction.b29);
3734 break;
3735 case 16:
3736 CPT (cpt1L, 15);
3737 addr &= 0777770;
3738 Write16 (cpup, addr, cpu.Yblock16);
3739 break;
3740 case 32:
3741 CPT (cpt1L, 16);
3742 addr &= 0777740;
3743
3744
3745 Write32 (cpup, addr, cpu.Yblock32);
3746 break;
3747 }
3748
3749 #if defined(THREADZ)
3750 if (cyctyp == OPERAND_STORE)
3751 {
3752 DCDstruct * i = & cpu.currentInstruction;
3753 if (RMWOP (i))
3754 unlock_mem ();
3755 }
3756 #endif
3757 return SCPE_OK;
3758
3759 }
3760
3761 #if !defined(SPEED)
3762 t_stat set_mem_watch (int32 arg, const char * buf)
3763 {
3764 if (strlen (buf) == 0)
3765 {
3766 if (arg)
3767 {
3768 sim_warn ("no argument to watch?\r\n");
3769 return SCPE_ARG;
3770 }
3771 sim_msg ("Clearing all watch points\r\n");
3772 (void)memset (& watch_bits, 0, sizeof (watch_bits));
3773 return SCPE_OK;
3774 }
3775 char * end;
3776 long int n = strtol (buf, & end, 0);
3777 if (* end || n < 0 || n >= MEMSIZE)
3778 {
3779 sim_warn ("Invalid argument to watch? %ld\r\n", (long) n);
3780 return SCPE_ARG;
3781 }
3782 watch_bits [n] = arg != 0;
3783 return SCPE_OK;
3784 }
3785 #endif
3786
3787
3788
3789
3790
3791 #if !defined(SPEED)
3792 static void nem_check (word24 addr, const char * context)
3793 {
3794 cpu_state_t * cpup = _cpup;
3795 if (lookup_cpu_mem_map (cpup, addr) < 0)
3796 {
3797 doFault (FAULT_STR, fst_str_nea, context);
3798 }
3799 }
3800 #endif
3801
3802
3803
3804
3805
3806
3807
3808
3809
3810
3811
3812
3813 #if !defined(SPEED) || !defined(INLINE_CORE)
3814 int core_read (cpu_state_t * cpup, word24 addr, word36 *data, const char * ctx)
3815 {
3816 PNL (cpu.portBusy = true;)
3817 SC_MAP_ADDR (addr, addr);
3818 # if !defined(LOCKLESS)
3819 if (M[addr] & MEM_UNINITIALIZED)
3820 {
3821 sim_debug (DBG_WARN, & cpu_dev,
3822 "Uninitialized memory accessed at address %08o; "
3823 "IC is 0%06o:0%06o (%s(\r\n",
3824 addr, cpu.PPR.PSR, cpu.PPR.IC, ctx);
3825 }
3826 # endif
3827 # if !defined(SPEED)
3828 if (watch_bits [addr])
3829 {
3830 sim_msg ("WATCH [%llu] %05o:%06o read %08o %012llo (%s)\r\n",
3831 (long long unsigned int)cpu.cycleCnt, cpu.PPR.PSR, cpu.PPR.IC, addr,
3832 (long long unsigned int)M [addr], ctx);
3833 traceInstruction (0);
3834 }
3835 # endif
3836 # if defined(LOCKLESS)
3837 # if !defined(SUNLINT)
3838 word36 v;
3839 LOAD_ACQ_CORE_WORD(v, addr);
3840 *data = v & DMASK;
3841 # endif
3842 # else
3843 *data = M[addr] & DMASK;
3844 # endif
3845
3846 DO_WORK_MEM;
3847 sim_debug (DBG_CORE, & cpu_dev,
3848 "core_read %08o %012"PRIo64" (%s)\r\n",
3849 addr, * data, ctx);
3850 PNL (trackport (addr, * data));
3851 return 0;
3852 }
3853 #endif
3854
3855 #if defined(LOCKLESS)
3856 int core_read_lock (cpu_state_t * cpup, word24 addr, word36 *data, UNUSED const char * ctx)
3857 {
3858 SC_MAP_ADDR (addr, addr);
3859 LOCK_CORE_WORD(addr, & cpu.coreLockState);
3860 if (cpu.coreLockState.locked_addr != 0) {
3861 sim_warn ("core_read_lock: locked %08o locked_addr %08o %c %05o:%06o\r\n",
3862 addr, cpu.coreLockState.locked_addr, current_running_cpu_idx + 'A',
3863 cpu.PPR.PSR, cpu.PPR.IC);
3864 core_unlock_all (cpup);
3865 }
3866 cpu.coreLockState.locked_addr = addr;
3867 # if !defined(SUNLINT)
3868 word36 v;
3869 LOAD_ACQ_CORE_WORD(v, addr);
3870 * data = v & DMASK;
3871 # endif
3872 return 0;
3873 }
3874 #endif
3875
3876 #if !defined(SPEED) || !defined(INLINE_CORE)
3877 int core_write (cpu_state_t * cpup, word24 addr, word36 data, const char * ctx)
3878 {
3879 PNL (cpu.portBusy = true;)
3880 SC_MAP_ADDR (addr, addr);
3881 if (cpu.tweaks.isolts_mode)
3882 {
3883 if (cpu.MR.sdpap)
3884 {
3885 sim_warn ("failing to implement sdpap\r\n");
3886 cpu.MR.sdpap = 0;
3887 }
3888 if (cpu.MR.separ)
3889 {
3890 sim_warn ("failing to implement separ\r\n");
3891 cpu.MR.separ = 0;
3892 }
3893 }
3894 # if defined(LOCKLESS)
3895 LOCK_CORE_WORD(addr, & cpu.coreLockState);
3896 # if !defined(SUNLINT)
3897 STORE_REL_CORE_WORD(addr, data);
3898 # endif
3899 # else
3900 M[addr] = data & DMASK;
3901 # endif
3902 # if !defined(SPEED)
3903 if (watch_bits [addr])
3904 {
3905 sim_msg ("WATCH [%llu] %05o:%06o write %08llo %012llo (%s)\r\n",
3906 (long long unsigned int)cpu.cycleCnt, cpu.PPR.PSR, cpu.PPR.IC,
3907 (long long unsigned int)addr, (unsigned long long int)M [addr], ctx);
3908 traceInstruction (0);
3909 }
3910 # endif
3911 DO_WORK_MEM;
3912 sim_debug (DBG_CORE, & cpu_dev,
3913 "core_write %08o %012"PRIo64" (%s)\r\n",
3914 addr, data, ctx);
3915 PNL (trackport (addr, data));
3916 return 0;
3917 }
3918 #endif
3919
3920 #if defined(LOCKLESS)
3921 int core_write_unlock (cpu_state_t * cpup, word24 addr, word36 data, UNUSED const char * ctx)
3922 {
3923 SC_MAP_ADDR (addr, addr);
3924 if (cpu.coreLockState.locked_addr != addr)
3925 {
3926 sim_warn ("core_write_unlock: locked %08o locked_addr %08o %c %05o:%06o\r\n",
3927 addr, cpu.coreLockState.locked_addr, current_running_cpu_idx + 'A',
3928 cpu.PPR.PSR, cpu.PPR.IC);
3929 core_unlock_all (cpup);
3930 }
3931
3932 # if !defined(SUNLINT)
3933 STORE_REL_CORE_WORD(addr, data);
3934 # endif
3935 cpu.coreLockState.locked_addr = 0;
3936 return 0;
3937 }
3938
3939 int core_unlock_all (cpu_state_t * cpup)
3940 {
3941 if (cpu.coreLockState.locked_addr != 0) {
3942 sim_warn ("core_unlock_all: locked %08o %c %05o:%06o\r\n",
3943 cpu.coreLockState.locked_addr, current_running_cpu_idx + 'A',
3944 cpu.PPR.PSR, cpu.PPR.IC);
3945 # if !defined(SUNLINT)
3946 STORE_REL_CORE_WORD(cpu.coreLockState.locked_addr, M[cpu.coreLockState.locked_addr]);
3947 # endif
3948 cpu.coreLockState.locked_addr = 0;
3949 }
3950 return 0;
3951 }
3952 #endif
3953
3954 #if !defined(SPEED) || !defined(INLINE_CORE)
3955 int core_write_zone (cpu_state_t * cpup, word24 addr, word36 data, const char * ctx)
3956 {
3957 PNL (cpu.portBusy = true;)
3958 if (cpu.tweaks.isolts_mode)
3959 {
3960 if (cpu.MR.sdpap)
3961 {
3962 sim_warn ("failing to implement sdpap\r\n");
3963 cpu.MR.sdpap = 0;
3964 }
3965 if (cpu.MR.separ)
3966 {
3967 sim_warn ("failing to implement separ\r\n");
3968 cpu.MR.separ = 0;
3969 }
3970 }
3971 word24 mapAddr = 0;
3972 SC_MAP_ADDR (addr, mapAddr);
3973 # if defined(LOCKLESS)
3974 word36 v;
3975 core_read_lock(cpup, addr, &v, ctx);
3976 v = (v & ~cpu.zone) | (data & cpu.zone);
3977 core_write_unlock(cpup, addr, v, ctx);
3978 # else
3979 M[mapAddr] = (M[mapAddr] & ~cpu.zone) | (data & cpu.zone);
3980 # endif
3981 cpu.useZone = false;
3982 # if !defined(SPEED)
3983 if (watch_bits [mapAddr])
3984 {
3985 sim_msg ("WATCH [%llu] %05o:%06o writez %08llo %012llo (%s)\r\n",
3986 (unsigned long long int)cpu.cycleCnt, cpu.PPR.PSR, cpu.PPR.IC,
3987 (unsigned long long int)mapAddr, (unsigned long long int)M [mapAddr], ctx);
3988 traceInstruction (0);
3989 }
3990 # endif
3991 DO_WORK_MEM;
3992 sim_debug (DBG_CORE, & cpu_dev,
3993 "core_write_zone %08o %012"PRIo64" (%s)\r\n",
3994 mapAddr, data, ctx);
3995 PNL (trackport (mapAddr, data));
3996 return 0;
3997 }
3998 #endif
3999
4000 #if !defined(SPEED) || !defined(INLINE_CORE)
4001 int core_read2 (cpu_state_t * cpup, word24 addr, word36 *even, word36 *odd, const char * ctx)
4002 {
4003 PNL (cpu.portBusy = true;)
4004 # if defined(LOCKLESS)
4005
4006 word36 v;
4007 # endif
4008 if (addr & 1)
4009 {
4010 sim_debug (DBG_MSG, & cpu_dev,
4011 "warning: subtracting 1 from pair at %o in "
4012 "core_read2 (%s)\r\n", addr, ctx);
4013 addr &= (word24)~1;
4014 }
4015 SC_MAP_ADDR (addr, addr);
4016 # if !defined(LOCKLESS)
4017 if (M[addr] & MEM_UNINITIALIZED)
4018 {
4019 sim_debug (DBG_WARN, & cpu_dev,
4020 "Uninitialized memory accessed at address %08o; "
4021 "IC is 0%06o:0%06o (%s)\r\n",
4022 addr, cpu.PPR.PSR, cpu.PPR.IC, ctx);
4023 }
4024 # endif
4025 # if !defined(SPEED)
4026 if (watch_bits [addr])
4027 {
4028 sim_msg ("WATCH [%llu] %05o:%06o read2 %08llo %012llo (%s)\r\n",
4029 (unsigned long long int)cpu.cycleCnt, cpu.PPR.PSR, cpu.PPR.IC,
4030 (unsigned long long int)addr, (unsigned long long int)M [addr], ctx);
4031 traceInstruction (0);
4032 }
4033 # endif
4034 # if defined(LOCKLESS)
4035 # if !defined(SUNLINT)
4036 LOAD_ACQ_CORE_WORD(v, addr);
4037 if (v & MEM_LOCKED)
4038 sim_warn ("core_read2: even locked %08o locked_addr %08o %c %05o:%06o\r\n",
4039 addr, cpu.coreLockState.locked_addr, current_running_cpu_idx + 'A',
4040 cpu.PPR.PSR, cpu.PPR.IC);
4041 *even = v & DMASK;
4042 addr++;
4043 # endif
4044 # else
4045 *even = M[addr++] & DMASK;
4046 # endif
4047 sim_debug (DBG_CORE, & cpu_dev,
4048 "core_read2 %08o %012"PRIo64" (%s)\r\n",
4049 addr - 1, * even, ctx);
4050
4051
4052
4053 # if !defined(LOCKLESS)
4054 if (M[addr] & MEM_UNINITIALIZED)
4055 {
4056 sim_debug (DBG_WARN, & cpu_dev,
4057 "Uninitialized memory accessed at address %08o; "
4058 "IC is 0%06o:0%06o (%s)\r\n",
4059 addr, cpu.PPR.PSR, cpu.PPR.IC, ctx);
4060 }
4061 # endif
4062 # if !defined(SPEED)
4063 if (watch_bits [addr])
4064 {
4065 sim_msg ("WATCH [%llu] %05o:%06o read2 %08llo %012llo (%s)\r\n",
4066 (unsigned long long int)cpu.cycleCnt, cpu.PPR.PSR, cpu.PPR.IC,
4067 (unsigned long long int)addr, (unsigned long long int)M [addr], ctx);
4068 traceInstruction (0);
4069 }
4070 # endif
4071 # if defined(LOCKLESS)
4072 # if !defined(SUNLINT)
4073 LOAD_ACQ_CORE_WORD(v, addr);
4074 if (v & MEM_LOCKED)
4075 sim_warn ("core_read2: odd locked %08o locked_addr %08o %c %05o:%06o\r\n",
4076 addr, cpu.coreLockState.locked_addr, current_running_cpu_idx + 'A',
4077 cpu.PPR.PSR, cpu.PPR.IC);
4078 *odd = v & DMASK;
4079 # endif
4080 # else
4081 *odd = M[addr] & DMASK;
4082 # endif
4083 sim_debug (DBG_CORE, & cpu_dev,
4084 "core_read2 %08o %012"PRIo64" (%s)\r\n",
4085 addr, * odd, ctx);
4086 DO_WORK_MEM;
4087 PNL (trackport (addr - 1, * even));
4088 return 0;
4089 }
4090 #endif
4091
4092 #if !defined(SPEED) || !defined(INLINE_CORE)
4093 int core_write2 (cpu_state_t * cpup, word24 addr, word36 even, word36 odd, const char * ctx) {
4094 PNL (cpu.portBusy = true;)
4095 if (addr & 1) {
4096 sim_debug (DBG_MSG, & cpu_dev,
4097 "warning: subtracting 1 from pair at %o in core_write2 " "(%s)\r\n",
4098 addr, ctx);
4099 addr &= (word24)~1;
4100 }
4101 SC_MAP_ADDR (addr, addr);
4102 if (cpu.tweaks.isolts_mode) {
4103 if (cpu.MR.sdpap) {
4104 sim_warn ("failing to implement sdpap\r\n");
4105 cpu.MR.sdpap = 0;
4106 }
4107 if (cpu.MR.separ) {
4108 sim_warn ("failing to implement separ\r\n");
4109 cpu.MR.separ = 0;
4110 }
4111 }
4112
4113 # if !defined(SPEED)
4114 if (watch_bits [addr]) {
4115 sim_msg ("WATCH [%llu] %05o:%06o write2 %08llo %012llo (%s)\r\n",
4116 (unsigned long long int)cpu.cycleCnt, cpu.PPR.PSR, cpu.PPR.IC,
4117 (unsigned long long int)addr, (unsigned long long int)even, ctx);
4118 traceInstruction (0);
4119 }
4120 # endif
4121 # if defined(LOCKLESS)
4122 LOCK_CORE_WORD(addr, & cpu.coreLockState);
4123 # if !defined(SUNLINT)
4124 STORE_REL_CORE_WORD(addr, even);
4125 # endif
4126 addr++;
4127 # else
4128 M[addr++] = even & DMASK;
4129 # endif
4130 sim_debug (DBG_CORE, & cpu_dev, "core_write2 %08o %012llo (%s)\r\n", addr - 1,
4131 (long long unsigned int)even, ctx);
4132
4133
4134
4135
4136 # if !defined(SPEED)
4137 if (watch_bits [addr]) {
4138 sim_msg ("WATCH [%llu] %05o:%06o write2 %08llo %012llo (%s)\r\n",
4139 (long long unsigned int)cpu.cycleCnt, cpu.PPR.PSR, cpu.PPR.IC,
4140 (long long unsigned int)addr, (long long unsigned int)odd, ctx);
4141 traceInstruction (0);
4142 }
4143 # endif
4144 # if defined(LOCKLESS)
4145 LOCK_CORE_WORD(addr, & cpu.coreLockState);
4146 # if !defined(SUNLINT)
4147 STORE_REL_CORE_WORD(addr, odd);
4148 # endif
4149 # else
4150 M[addr] = odd & DMASK;
4151 # endif
4152 DO_WORK_MEM;
4153 PNL (trackport (addr - 1, even));
4154 sim_debug (DBG_CORE, & cpu_dev, "core_write2 %08o %012"PRIo64" (%s)\r\n", addr, odd, ctx);
4155 return 0;
4156 }
4157 #endif
4158
4159
4160
4161
4162
4163
4164
4165
4166
4167
4168 void decode_instruction (cpu_state_t * cpup, word36 inst, DCDstruct * p)
4169 {
4170 CPT (cpt1L, 17);
4171 (void)memset (p, 0, sizeof (DCDstruct));
4172
4173 p->opcode = GET_OP (inst);
4174 p->opcodeX = GET_OPX(inst);
4175 p->opcode10 = p->opcode | (p->opcodeX ? 01000 : 0);
4176 p->address = GET_ADDR (inst);
4177 p->b29 = GET_A (inst);
4178 p->i = GET_I (inst);
4179 p->tag = GET_TAG (inst);
4180
4181 p->info = get_iwb_info (p);
4182
4183 if (p->info->flags & IGN_B29)
4184 p->b29 = 0;
4185
4186 if (p->info->ndes > 0)
4187 {
4188 p->b29 = 0;
4189 p->tag = 0;
4190 if (p->info->ndes > 1)
4191 {
4192 (void)memset (& cpu.currentEISinstruction, 0,
4193 sizeof (cpu.currentEISinstruction));
4194 }
4195 }
4196 }
4197
4198
4199
4200
4201
4202
4203
4204
4205
4206
4207
4208
4209
4210
4211
4212
4213
4214
4215
4216
4217 int is_priv_mode (cpu_state_t * cpup)
4218 {
4219
4220
4221
4222 if (get_bar_mode (cpup))
4223 return 0;
4224
4225
4226 if (get_addr_mode (cpup) == ABSOLUTE_mode)
4227 return 1;
4228 else if (cpu.PPR.P)
4229 return 1;
4230
4231 return 0;
4232 }
4233
4234
4235
4236
4237
4238
4239
4240
4241 bool get_bar_mode (cpu_state_t * cpup)
4242 {
4243 return ! (cpu.secret_addressing_mode || TST_I_NBAR);
4244 }
4245
4246 addr_modes_e get_addr_mode (cpu_state_t * cpup)
4247 {
4248 if (cpu.secret_addressing_mode)
4249 return ABSOLUTE_mode;
4250
4251
4252
4253
4254
4255
4256
4257 if (TST_I_ABS)
4258 {
4259 return ABSOLUTE_mode;
4260 }
4261 else
4262 {
4263 return APPEND_mode;
4264 }
4265 }
4266
4267
4268
4269
4270
4271
4272
4273
4274 void set_addr_mode (cpu_state_t * cpup, addr_modes_e mode)
4275 {
4276
4277
4278
4279
4280
4281
4282
4283
4284
4285
4286 cpu.secret_addressing_mode = false;
4287 if (mode == ABSOLUTE_mode)
4288 {
4289 CPT (cpt1L, 22);
4290 sim_debug (DBG_DEBUG, & cpu_dev, "APU: Setting absolute mode.\r\n");
4291
4292 SET_I_ABS;
4293 cpu.PPR.P = 1;
4294 }
4295 else if (mode == APPEND_mode)
4296 {
4297 CPT (cpt1L, 23);
4298 if (! TST_I_ABS && TST_I_NBAR)
4299 sim_debug (DBG_DEBUG, & cpu_dev, "APU: Keeping append mode.\r\n");
4300 else
4301 sim_debug (DBG_DEBUG, & cpu_dev, "APU: Setting append mode.\r\n");
4302
4303 CLR_I_ABS;
4304 }
4305 else
4306 {
4307 sim_debug (DBG_ERR, & cpu_dev,
4308 "APU: Unable to determine address mode.\r\n");
4309 sim_warn ("APU: Unable to determine address mode. Can't happen!\r\n");
4310 }
4311 }
4312
4313
4314
4315
4316
4317
4318
4319
4320
4321
4322
4323
4324
4325
4326
4327
4328
4329
4330
4331
4332
4333
4334
4335
4336
4337
4338
4339 word18 get_BAR_address (cpu_state_t * cpup, word18 addr)
4340 {
4341 if (cpu . BAR.BOUND == 0)
4342
4343 doFault (FAULT_STR, fst_str_oob, "BAR store fault; out of bounds");
4344
4345
4346
4347
4348
4349
4350
4351
4352
4353
4354 if (addr >= (((word18) cpu . BAR.BOUND) << 9))
4355
4356 doFault (FAULT_STR, fst_str_oob, "BAR store fault; out of bounds");
4357
4358 word18 barAddr = (addr + (((word18) cpu . BAR.BASE) << 9)) & 0777777;
4359 return barAddr;
4360 }
4361
4362
4363
4364 static void add_history (cpu_state_t * cpup, uint hset, word36 w0, word36 w1)
4365 {
4366
4367 {
4368 cpu.history [hset] [cpu.history_cyclic[hset]] [0] = w0;
4369 cpu.history [hset] [cpu.history_cyclic[hset]] [1] = w1;
4370 cpu.history_cyclic[hset] = (cpu.history_cyclic[hset] + 1) % N_MODEL_HIST_SIZE;
4371 }
4372 }
4373
4374 void add_history_force (cpu_state_t * cpup, uint hset, word36 w0, word36 w1)
4375 {
4376 cpu.history [hset] [cpu.history_cyclic[hset]] [0] = w0;
4377 cpu.history [hset] [cpu.history_cyclic[hset]] [1] = w1;
4378 cpu.history_cyclic[hset] = (cpu.history_cyclic[hset] + 1) % N_MODEL_HIST_SIZE;
4379 }
4380
4381 void add_dps8m_CU_history (cpu_state_t * cpup)
4382 {
4383 if (cpu.skip_cu_hist)
4384 return;
4385 if (! cpu.MR_cache.emr)
4386 return;
4387 if (! cpu.MR_cache.ihr)
4388 return;
4389 if (cpu.MR_cache.hrxfr && ! cpu.wasXfer)
4390 return;
4391
4392 word36 flags = 0;
4393 word5 proccmd = 0;
4394 word7 flags2 = 0;
4395 word36 w0 = 0, w1 = 0;
4396 w0 |= flags & 0777777000000;
4397 w0 |= IWB_IRODD & MASK18;
4398 w1 |= ((word36)(cpu.iefpFinalAddress & MASK24) << 12);
4399 w1 |= (proccmd & MASK5) << 7;
4400 w1 |= flags2 & 0176;
4401 add_history (cpup, CU_HIST_REG, w0, w1);
4402 }
4403
4404 #if !defined(QUIET_UNUSED)
4405 void add_dps8m_DU_OU_history (cpu_state_t * cpup, word36 flags, word18 ICT, word9 RS_REG, word9 flags2)
4406 {
4407 word36 w0 = flags, w1 = 0;
4408 w1 |= (ICT & MASK18) << 18;
4409 w1 |= (RS_REG & MASK9) << 9;
4410 w1 |= flags2 & MASK9;
4411 add_history (cpup, DPS8M_DU_OU_HIST_REG, w0, w1);
4412 }
4413
4414 void add_dps8m_APU_history (cpu_state_t * cpup, word15 ESN, word21 flags, word24 RMA, word3 RTRR, word9 flags2)
4415 {
4416 word36 w0 = 0, w1 = 0;
4417 w0 |= (ESN & MASK15) << 21;
4418 w0 |= flags & MASK21;
4419 w1 |= (RMA & MASK24) << 12;
4420 w1 |= (RTRR & MASK3) << 9;
4421 w1 |= flags2 & MASK9;
4422 add_history (cpu.tweaks.l68_mode ? L68_APU_HIST_REG : DPS8M_APU_HIST_REG, w0, w1);
4423 }
4424
4425 void add_dps8m_EAPU_history (word18 ZCA, word18 opcode)
4426 {
4427 word36 w0 = 0;
4428 w0 |= (ZCA & MASK18) << 18;
4429 w0 |= opcode & MASK18;
4430 add_history (DPS8M_EAPU_HIST_REG, w0, 0);
4431
4432
4433
4434
4435 }
4436 #endif
4437
4438
4439
4440
4441
4442
4443
4444
4445
4446
4447
4448
4449
4450
4451
4452
4453
4454
4455
4456
4457
4458
4459
4460
4461
4462
4463
4464
4465
4466
4467
4468
4469
4470
4471
4472 void add_l68_CU_history (cpu_state_t * cpup)
4473 {
4474 CPT (cpt1L, 24);
4475
4476 if (cpu.skip_cu_hist)
4477 return;
4478 if (! cpu.MR_cache.emr)
4479 return;
4480 if (! cpu.MR_cache.ihr)
4481 return;
4482
4483 word36 w0 = 0, w1 = 0;
4484
4485
4486
4487
4488
4489
4490
4491
4492
4493 PNL (putbits36_8 (& w0, 0, cpu.prepare_state);)
4494
4495 putbits36_1 (& w0, 8, cpu.wasXfer);
4496
4497 putbits36_1 (& w0, 9, cpu.cu.xde);
4498
4499 putbits36_1 (& w0, 10, cpu.cu.xdo);
4500
4501 putbits36_1 (& w0, 11, USE_IRODD?1:0);
4502
4503 putbits36_1 (& w0, 12, cpu.cu.rpt);
4504
4505
4506 PNL (putbits36_1 (& w0, 14, cpu.AR_F_E);)
4507
4508 putbits36_1 (& w0, 15, cpu.cycle != INTERRUPT_cycle?1:0);
4509
4510 putbits36_1 (& w0, 16, cpu.cycle != FAULT_cycle?1:0);
4511
4512 putbits36_1 (& w0, 17, TSTF (cpu.cu.IR, I_NBAR)?1:0);
4513
4514 putbits36_18 (& w0, 18, (word18) (IWB_IRODD & MASK18));
4515
4516
4517 putbits36_18 (& w1, 0, cpu.TPR.CA);
4518
4519
4520 PNL (putbits36_1 (& w1, 59-36, (cpu.portSelect == 0)?1:0);)
4521 PNL (putbits36_1 (& w1, 60-36, (cpu.portSelect == 1)?1:0);)
4522 PNL (putbits36_1 (& w1, 61-36, (cpu.portSelect == 2)?1:0);)
4523 PNL (putbits36_1 (& w1, 62-36, (cpu.portSelect == 3)?1:0);)
4524
4525 putbits36_1 (& w1, 63-36, cpu.interrupt_flag?1:0);
4526
4527 PNL (putbits36_1 (& w1, 64-36, cpu.INS_FETCH?1:0);)
4528
4529
4530
4531
4532
4533
4534
4535
4536 add_history (cpup, CU_HIST_REG, w0, w1);
4537
4538
4539 CPTUR (cptUseMR);
4540 if (cpu.MR.hrhlt && cpu.history_cyclic[CU_HIST_REG] == 0)
4541 {
4542
4543 if (cpu.MR.ihrrs)
4544 {
4545 cpu.MR.ihr = 0;
4546 }
4547 set_FFV_fault (cpup, 4);
4548 return;
4549 }
4550 }
4551
4552
4553
4554
4555
4556
4557
4558
4559
4560
4561
4562
4563
4564
4565
4566
4567
4568
4569
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579
4580
4581
4582
4583
4584
4585
4586
4587
4588
4589
4590 void add_l68_DU_history (cpu_state_t * cpup)
4591 {
4592 CPT (cpt1L, 25);
4593 PNL (add_history (cpup, L68_DU_HIST_REG, cpu.du.cycle1, cpu.du.cycle2);)
4594 }
4595
4596 void add_l68_OU_history (cpu_state_t * cpup)
4597 {
4598 CPT (cpt1L, 26);
4599 word36 w0 = 0, w1 = 0;
4600
4601
4602
4603 PNL (putbits36_9 (& w0, 0, cpu.ou.RS);)
4604
4605
4606 putbits36_1 (& w0, 9, cpu.ou.characterOperandSize ? 1 : 0);
4607
4608
4609 putbits36_3 (& w0, 10, cpu.ou.characterOperandOffset);
4610
4611
4612 putbits36_1 (& w0, 13, cpu.ou.crflag);
4613
4614
4615 putbits36_1 (& w0, 14, cpu.ou.directOperandFlag ? 1 : 0);
4616
4617
4618 putbits36_2 (& w0, 15, cpu.ou.eac);
4619
4620
4621
4622 PNL (putbits36_9 (& w0, 18, cpu.ou.RS);)
4623
4624
4625 putbits36_1 (& w0, 27, cpu.ou.RB1_FULL);
4626
4627
4628 putbits36_1 (& w0, 28, cpu.ou.RP_FULL);
4629
4630
4631 putbits36_1 (& w0, 29, cpu.ou.RS_FULL);
4632
4633
4634 putbits36_6 (& w0, 30, (word6) (cpu.ou.cycle >> 3));
4635
4636
4637 putbits36_3 (& w1, 36-36, (word3) cpu.ou.cycle);
4638
4639
4640 putbits36_1 (& w1, 39-36, cpu.ou.STR_OP);
4641
4642
4643
4644
4645 PNL (putbits36_10 (& w1, 41-36,
4646 (word10) ~opcodes10 [cpu.ou.RS].reg_use);)
4647
4648
4649
4650
4651 putbits36_18 (& w1, 54 - 36, cpu.PPR.IC);
4652
4653 add_history (cpup, L68_OU_HIST_REG, w0, w1);
4654 }
4655
4656
4657
4658
4659
4660
4661
4662
4663
4664
4665
4666
4667
4668
4669
4670
4671
4672
4673
4674
4675
4676
4677
4678
4679
4680
4681
4682
4683
4684
4685
4686
4687
4688
4689
4690
4691
4692
4693
4694
4695
4696
4697
4698
4699
4700
4701
4702
4703
4704
4705 void add_l68_APU_history (cpu_state_t * cpup, enum APUH_e op)
4706 {
4707 CPT (cpt1L, 28);
4708 word36 w0 = 0, w1 = 0;
4709
4710 w0 = op;
4711
4712
4713 putbits36_15 (& w0, 0, cpu.TPR.TSR);
4714
4715 PNL (putbits36_1 (& w0, 15, (cpu.apu.state & apu_ESN_SNR) ? 1 : 0);)
4716 PNL (putbits36_1 (& w0, 16, (cpu.apu.state & apu_ESN_TSR) ? 1 : 0);)
4717
4718 putbits36_1 (& w0, 25, cpu.cu.SDWAMM);
4719
4720 putbits36_4 (& w0, 26, (word4) cpu.SDWAMR);
4721
4722 putbits36_1 (& w0, 30, cpu.cu.PTWAMM);
4723
4724 putbits36_4 (& w0, 31, (word4) cpu.PTWAMR);
4725
4726 PNL (putbits36_1 (& w0, 35, (cpu.apu.state & apu_FLT) ? 1 : 0);)
4727
4728
4729 PNL (putbits36_24 (& w1, 0, cpu.APUMemAddr);)
4730
4731 putbits36_3 (& w1, 24, cpu.TPR.TRR);
4732
4733
4734 putbits36_1 (& w1, 34, cpu.SDW0.C);
4735
4736
4737 add_history (cpup, L68_APU_HIST_REG, w0, w1);
4738 }
4739
4740 #if defined(THREADZ) || defined(LOCKLESS)
4741
4742
4743 static const char * get_dbg_verb (uint32 dbits, DEVICE * dptr)
4744 {
4745 static const char * debtab_none = "DEBTAB_ISNULL";
4746 static const char * debtab_nomatch = "DEBTAB_NOMATCH";
4747 const char * some_match = NULL;
4748 int32 offset = 0;
4749
4750 if (dptr->debflags == 0)
4751 return debtab_none;
4752
4753 dbits &= dptr->dctrl;
4754
4755
4756 while ((offset < 32) && dptr->debflags[offset].name)
4757 {
4758 if (dptr->debflags[offset].mask == dbits)
4759 return dptr->debflags[offset].name;
4760 if (dptr->debflags[offset].mask & dbits)
4761 some_match = dptr->debflags[offset].name;
4762 offset ++;
4763 }
4764 return some_match ? some_match : debtab_nomatch;
4765 }
4766
4767 void dps8_sim_debug (uint32 dbits, DEVICE * dptr, unsigned long long cnt, const char* fmt, ...)
4768 {
4769
4770 if (sim_deb && dptr && (dptr->dctrl & dbits))
4771 {
4772 const char * debug_type = get_dbg_verb (dbits, dptr);
4773 char stackbuf[STACKBUFSIZE];
4774 int32 bufsize = sizeof (stackbuf);
4775 char * buf = stackbuf;
4776 va_list arglist;
4777 int32 i, j, len;
4778 struct timespec t;
4779 clock_gettime(CLOCK_REALTIME, &t);
4780
4781 buf [bufsize-1] = '\0';
4782
4783 while (1)
4784 {
4785 va_start (arglist, fmt);
4786 len = vsnprintf (buf, (int)((unsigned long)(bufsize)-1), fmt, arglist);
4787 va_end (arglist);
4788
4789
4790
4791 if ((len < 0) || (len >= bufsize-1))
4792 {
4793 if (buf != stackbuf)
4794 FREE (buf);
4795 if (bufsize >= (INT_MAX / 2))
4796 return;
4797 bufsize = bufsize * 2;
4798 if (bufsize < len + 2)
4799 bufsize = len + 2;
4800 buf = (char *) malloc ((unsigned long) bufsize);
4801 if (buf == NULL)
4802 return;
4803 buf[bufsize-1] = '\0';
4804 continue;
4805 }
4806 break;
4807 }
4808
4809
4810
4811 for (i = j = 0; i < len; ++i)
4812 {
4813 if ('\n' == buf[i])
4814 {
4815 if (i >= j)
4816 {
4817 if ((i != j) || (i == 0))
4818 {
4819 (void)fprintf (sim_deb, "%lld.%06ld: DBG(%lld) %o: %s %s %.*s\r\n",
4820 (long long)t.tv_sec, t.tv_nsec/1000, cnt,
4821 current_running_cpu_idx, dptr->name, debug_type, i-j, &buf[j]);
4822 }
4823 }
4824 j = i + 1;
4825 }
4826 }
4827
4828
4829 if (buf != stackbuf)
4830 FREE (buf);
4831 }
4832
4833 }
4834 #endif
4835
4836 void setupPROM (uint cpuNo, unsigned char * PROM) {
4837
4838
4839
4840
4841
4842
4843
4844
4845
4846
4847
4848
4849
4850
4851
4852
4853
4854
4855
4856
4857
4858
4859
4860
4861
4862
4863
4864
4865
4866
4867
4868
4869
4870
4871
4872
4873
4874
4875
4876 word36 rsw2 = 0;
4877
4878
4879
4880
4881
4882
4883
4884 putbits36_4 (& rsw2, 0, 0);
4885
4886 putbits36_2 (& rsw2, 4, 001);
4887
4888 putbits36_7 (& rsw2, 6, 2);
4889
4890 putbits36_1 (& rsw2, 13, 1);
4891
4892 putbits36_5 (& rsw2, 14, 0);
4893
4894 putbits36_1 (& rsw2, 19, 1);
4895
4896 putbits36_1 (& rsw2, 20, cpus[cpuNo].options.cache_installed ? 1 : 0);
4897
4898 putbits36_2 (& rsw2, 21, 0);
4899
4900 putbits36_1 (& rsw2, 23, 1);
4901
4902 putbits36_1 (& rsw2, 24, 0);
4903
4904 putbits36_4 (& rsw2, 25, 0);
4905
4906 putbits36_4 (& rsw2, 29, cpus[cpuNo].options.proc_speed & 017LL);
4907
4908 putbits36_3 (& rsw2, 33, cpus[cpuNo].switches.cpu_num & 07LL);
4909
4910 word4 rsw2Ext = 0;
4911 if (cpus[cpuNo].options.hex_mode_installed)
4912 rsw2Ext |= 010;
4913 if (cpus[cpuNo].options.clock_slave_installed)
4914 rsw2Ext |= 004;
4915
4916
4917 char serial[12];
4918 (void)sprintf (serial, "%-11u", cpus[cpuNo].switches.serno);
4919
4920 #if defined(VER_H_PROM_SHIP)
4921 char * ship = VER_H_PROM_SHIP;
4922 #else
4923 char * ship = "200101";
4924 #endif
4925
4926 #if !defined(VER_H_PROM_MAJOR_VER)
4927 # define VER_H_PROM_MAJOR_VER "999"
4928 #endif
4929
4930 #if !defined(VER_H_PROM_MINOR_VER)
4931 # define VER_H_PROM_MINOR_VER "999"
4932 #endif
4933
4934 #if !defined(VER_H_PROM_PATCH_VER)
4935 # define VER_H_PROM_PATCH_VER "999"
4936 #endif
4937
4938 #if !defined(VER_H_PROM_OTHER_VER)
4939 # define VER_H_PROM_OTHER_VER "999"
4940 #endif
4941
4942 #if !defined(VER_H_GIT_RELT)
4943 # define VER_H_GIT_RELT "X"
4944 #endif
4945
4946 #if !defined(VER_H_PROM_VER_TEXT)
4947 # define VER_H_PROM_VER_TEXT "Unknown "
4948 #endif
4949
4950 #if defined(BUILD_PROM_OSA_TEXT)
4951 # define BURN_PROM_OSA_TEXT BUILD_PROM_OSA_TEXT
4952 #else
4953 # if !defined(VER_H_PROM_OSA_TEXT)
4954 # define BURN_PROM_OSA_TEXT "Unknown Build Op Sys"
4955 # else
4956 # define BURN_PROM_OSA_TEXT VER_H_PROM_OSA_TEXT
4957 # endif
4958 #endif
4959
4960 #if defined(BUILD_PROM_OSV_TEXT)
4961 # define BURN_PROM_OSV_TEXT BUILD_PROM_OSV_TEXT
4962 #else
4963 # if !defined(VER_H_PROM_OSV_TEXT)
4964 # define BURN_PROM_OSV_TEXT "Unknown Build Arch. "
4965 # else
4966 # define BURN_PROM_OSV_TEXT VER_H_PROM_OSV_TEXT
4967 # endif
4968 #endif
4969
4970 #if defined(BUILD_PROM_TSA_TEXT)
4971 # define BURN_PROM_TSA_TEXT BUILD_PROM_TSA_TEXT
4972 #else
4973 # if defined(_M_X64) || defined(_M_AMD64) || defined(__amd64__) || defined(__x86_64__) || defined(__AMD64)
4974 # define VER_H_PROM_TSA_TEXT "Intel x86_64 (AMD64)"
4975 # elif defined(_M_IX86) || defined(__i386) || defined(__i486) || defined(__i586) || defined(__i686) || defined(__ix86)
4976 # define VER_H_PROM_TSA_TEXT "Intel ix86 (32-bit) "
4977 # elif defined(_M_ARM64) || defined(__aarch64__) || defined(__arm64__)
4978 # define VER_H_PROM_TSA_TEXT "AArch64/ARM64/64-bit"
4979 # elif defined(_M_ARM) || defined(__arm__)
4980 # define VER_H_PROM_TSA_TEXT "AArch32/ARM32/32-bit"
4981 # elif defined(__ia64__) || defined(_M_IA64) || defined(__itanium__)
4982 # define VER_H_PROM_TSA_TEXT "Intel Itanium (IA64)"
4983 # elif defined(__ppc64__) || defined(__PPC64__) || defined(__ppc64le__) || defined(__PPC64LE__) || defined(__powerpc64__) || \
4984 defined(__POWERPC64__) || \
4985 defined(_M_PPC64) || \
4986 defined(__PPC64) || \
4987 defined(_ARCH_PPC64)
4988 # define VER_H_PROM_TSA_TEXT "Power ISA (64-bit) "
4989 # elif defined(__ppc__) || defined(__PPC__) || defined(__powerpc__) || defined(__POWERPC__) || defined(_M_PPC) || \
4990 defined(__PPC) || \
4991 defined(__ppc32__) || \
4992 defined(__PPC32__) || \
4993 defined(__powerpc32__) || \
4994 defined(__POWERPC32__) || \
4995 defined(_M_PPC32) || \
4996 defined(__PPC32)
4997 # define VER_H_PROM_TSA_TEXT "PowerPC ISA (32-bit)"
4998 # elif defined(__s390x__)
4999 # define VER_H_PROM_TSA_TEXT "IBM z/Architecture "
5000 # elif defined(__s390__)
5001 # define VER_H_PROM_TSA_TEXT "IBM ESA System/390 "
5002 # elif defined(__J2__) || defined(__J2P__) || defined(__j2__) || defined(__j2p__)
5003 # define VER_H_PROM_TSA_TEXT "J-Core J2 Open CPU "
5004 # elif defined(__SH4__) || defined(__sh4__) || defined(__SH4) || defined(__sh4)
5005 # define VER_H_PROM_TSA_TEXT "Hitachi/Renesas SH-4"
5006 # elif defined(__SH2__) || defined(__sh2__) || defined(__SH2) || defined(__sh2)
5007 # define VER_H_PROM_TSA_TEXT "Hitachi/Renesas SH-2"
5008 # elif defined(__alpha__)
5009 # define VER_H_PROM_TSA_TEXT "Alpha AXP "
5010 # elif defined(__hppa__) || defined(__HPPA__) || defined(__PARISC__) || defined(__parisc__)
5011 # define VER_H_PROM_TSA_TEXT "HP PA-RISC "
5012 # elif defined(__ICE9__) || defined(__ice9__) || defined(__ICE9) || defined(__ice9)
5013 # define VER_H_PROM_TSA_TEXT "SiCortex ICE-9 "
5014 # elif defined(mips64) || defined(__mips64__) || defined(MIPS64) || defined(_MIPS64_) || defined(__mips64)
5015 # define VER_H_PROM_TSA_TEXT "MIPS64 "
5016 # elif defined(mips) || defined(__mips__) || defined(MIPS) || defined(_MIPS_) || defined(__mips)
5017 # define VER_H_PROM_TSA_TEXT "MIPS "
5018 # elif defined(__OpenRISC__) || defined(__OPENRISC__) || defined(__openrisc__) || defined(__OR1K__) || defined(__OPENRISC1K__)
5019 # define VER_H_PROM_TSA_TEXT "OpenRISC "
5020 # elif defined(__sparc64) || defined(__SPARC64) || defined(__SPARC64__) || defined(__sparc64__)
5021 # define VER_H_PROM_TSA_TEXT "SPARC64 "
5022 # elif defined(__sparc) || defined(__SPARC) || defined(__SPARC__) || defined(__sparc__)
5023 # define VER_H_PROM_TSA_TEXT "SPARC "
5024 # elif defined(__riscv) || defined(__riscv__)
5025 # define VER_H_PROM_TSA_TEXT "RISC-V "
5026 # elif defined(__e2k__) || defined(__E2K__) || defined(__elbrus64__) || defined(__elbrus__) || defined(__ELBRUS__) || defined(__e2k64__)
5027 # if defined(__iset__)
5028 # if __iset__ > 0
5029 # if __iset__ == 1
5030 # define VER_H_PROM_TSA_TEXT "MCST Elbrus v1 "
5031 # elif __iset__ == 2
5032 # define VER_H_PROM_TSA_TEXT "MCST Elbrus v2 "
5033 # elif __iset__ == 3
5034 # define VER_H_PROM_TSA_TEXT "MCST Elbrus v3 "
5035 # elif __iset__ == 4
5036 # define VER_H_PROM_TSA_TEXT "MCST Elbrus v4 "
5037 # elif __iset__ == 5
5038 # define VER_H_PROM_TSA_TEXT "MCST Elbrus v5 "
5039 # elif __iset__ == 6
5040 # define VER_H_PROM_TSA_TEXT "MCST Elbrus v6 "
5041 # elif __iset__ == 7
5042 # define VER_H_PROM_TSA_TEXT "MCST Elbrus v7 "
5043 # elif __iset__ == 8
5044 # define VER_H_PROM_TSA_TEXT "MCST Elbrus v8 "
5045 # elif __iset__ == 9
5046 # define VER_H_PROM_TSA_TEXT "MCST Elbrus v9 "
5047 # elif __iset__ == 10
5048 # define VER_H_PROM_TSA_TEXT "MCST Elbrus v10 "
5049 # else
5050 # define VER_H_PROM_TSA_TEXT "MCST Elbrus "
5051 # endif
5052 # else
5053 # define VER_H_PROM_TSA_TEXT "MCST Elbrus "
5054 # endif
5055 # else
5056 # define VER_H_PROM_TSA_TEXT "MCST Elbrus "
5057 # endif
5058 # elif defined(__myriad2__)
5059 # define VER_H_PROM_TSA_TEXT "Myriad2 "
5060 # elif defined(__loongarch64) || defined(__loongarch__)
5061 # define VER_H_PROM_TSA_TEXT "LoongArch "
5062 # elif defined(_m68851) || defined(__m68k__) || defined(__m68000__) || defined(__M68K)
5063 # define VER_H_PROM_TSA_TEXT "Motorola m68k "
5064 # elif defined(__m88k__) || defined(__m88000__) || defined(__M88K)
5065 # define VER_H_PROM_TSA_TEXT "Motorola m88k "
5066 # elif defined(__VAX__) || defined(__vax__)
5067 # define VER_H_PROM_TSA_TEXT "VAX "
5068 # elif defined(__NIOS2__) || defined(__nios2__)
5069 # define VER_H_PROM_TSA_TEXT "Altera Nios II "
5070 # elif defined(__MICROBLAZE__) || defined(__microblaze__)
5071 # define VER_H_PROM_TSA_TEXT "Xilinx MicroBlaze "
5072 # elif defined(__kvx__) || defined(__KVX__) || defined(__KVX_64__)
5073 # define VER_H_PROM_TSA_TEXT "Kalray KVX "
5074 # endif
5075 # if !defined(VER_H_PROM_TSA_TEXT)
5076 # define BURN_PROM_TSA_TEXT "Unknown Target Arch."
5077 # else
5078 # define BURN_PROM_TSA_TEXT VER_H_PROM_TSA_TEXT
5079 # endif
5080 #endif
5081
5082 #if (defined(__WIN__) || defined(_WIN32) || defined(IS_WINDOWS) || defined(_MSC_VER) || defined(__MINGW32__) || \
5083 defined(__MINGW64__) || defined(CROSS_MINGW32) || defined(CROSS_MINGW64)) && !defined(__CYGWIN__)
5084 # define DC_IS_WINDOWS 1
5085 #else
5086 # define DC_IS_WINDOWS 0
5087 #endif
5088
5089 #if defined(BUILD_PROM_TSV_TEXT)
5090 # define BURN_PROM_TSV_TEXT BUILD_PROM_TSV_TEXT
5091 #else
5092 # if DC_IS_WINDOWS
5093 # define VER_H_PROM_TSV_TEXT "Microsoft Windows "
5094 # elif defined(__CYGWIN__)
5095 # define VER_H_PROM_TSV_TEXT "Windows/Cygwin "
5096 # elif (defined(__sunos) || defined(__sun) || defined(__sun__)) && (defined(SYSV) || defined(__SVR4) || defined(__SVR4__) || \
5097 defined(__svr4__))
5098 # if defined(__illumos__)
5099 # define VER_H_PROM_TSV_TEXT "illumos "
5100 # else
5101 # define VER_H_PROM_TSV_TEXT "Solaris "
5102 # endif
5103 # elif defined(__APPLE__) && defined(__MACH__)
5104 # define VER_H_PROM_TSV_TEXT "Apple macOS "
5105 # elif defined(__GNU__) && !defined(__linux__)
5106 # define VER_H_PROM_TSV_TEXT "GNU/Hurd "
5107 # elif defined(__ANDROID__) && defined(__ANDROID_API__)
5108 # if defined(__linux__)
5109 # define VER_H_PROM_TSV_TEXT "Android/Linux "
5110 # else
5111 # define VER_H_PROM_TSV_TEXT "Android "
5112 # endif
5113 # elif defined(__lynxOS__) || defined(__LYNXOS__) || defined(LynxOS) || defined(LYNXOS)
5114 # define VER_H_PROM_TSV_TEXT "LynxOS "
5115 # elif defined(__HELENOS__)
5116 # define VER_H_PROM_TSV_TEXT "HelenOS "
5117 # elif defined(__linux__)
5118 # if defined(__BIONIC__)
5119 # define VER_H_PROM_TSV_TEXT "Linux/Bionic-libc "
5120 # elif defined(__UCLIBC__) || defined(UCLIBC)
5121 # define VER_H_PROM_TSV_TEXT "Linux/uClibc "
5122 # elif defined(__NEWLIB__)
5123 # define VER_H_PROM_TSV_TEXT "Linux/Newlib "
5124 # elif defined(__dietlibc__)
5125 # define VER_H_PROM_TSV_TEXT "Linux/Diet-libc "
5126 # elif defined(__GLIBC__)
5127 # define VER_H_PROM_TSV_TEXT "GNU/Linux "
5128 # else
5129 # define VER_H_PROM_TSV_TEXT "Linux "
5130 # endif
5131 # elif defined(__HAIKU__)
5132 # define VER_H_PROM_TSV_TEXT "Haiku "
5133 # elif defined(__serenity__)
5134 # define VER_H_PROM_TSV_TEXT "SerenityOS "
5135 # elif defined(__FreeBSD__)
5136 # define VER_H_PROM_TSV_TEXT "FreeBSD "
5137 # elif defined(__NetBSD__)
5138 # define VER_H_PROM_TSV_TEXT "NetBSD "
5139 # elif defined(__OpenBSD__)
5140 # define VER_H_PROM_TSV_TEXT "OpenBSD "
5141 # elif defined(__DragonFly__)
5142 # define VER_H_PROM_TSV_TEXT "DragonFly BSD "
5143 # elif defined(_AIX)
5144 # if !defined(__PASE__)
5145 # define VER_H_PROM_TSV_TEXT "IBM AIX "
5146 # else
5147 # define VER_H_PROM_TSV_TEXT "IBM OS/400 (PASE) "
5148 # endif
5149 # elif defined(__VXWORKS__) || defined(__VXWORKS) || defined(__vxworks) || defined(__vxworks__) || defined(_VxWorks)
5150 # if !defined(__RTP__)
5151 # define VER_H_PROM_TSV_TEXT "VxWorks "
5152 # else
5153 # define VER_H_PROM_TSV_TEXT "VxWorks RTP "
5154 # endif
5155 # elif defined(__rtems__)
5156 # if defined(__FreeBSD_version)
5157 # define VER_H_PROM_TSV_TEXT "RTEMS/LibBSD "
5158 # else
5159 # define VER_H_PROM_TSV_TEXT "RTEMS "
5160 # endif
5161 # elif defined(__ZEPHYR__)
5162 # define VER_H_PROM_TSV_TEXT "Zephyr "
5163 # elif defined(ti_sysbios_BIOS___VERS) || defined(ti_sysbios_BIOS__top__)
5164 # define VER_H_PROM_TSV_TEXT "TI-RTOS (SYS/BIOS) "
5165 # elif defined(__OSV__)
5166 # define VER_H_PROM_TSV_TEXT "OSv "
5167 # elif defined(MINIX) || defined(MINIX3) || defined(MINIX315) || defined(__minix__) || defined(__minix3__) || defined(__minix315__)
5168 # define VER_H_PROM_TSV_TEXT "Minix "
5169 # elif defined(__QNX__)
5170 # if defined(__QNXNTO__)
5171 # define VER_H_PROM_TSV_TEXT "QNX Neutrino "
5172 # else
5173 # define VER_H_PROM_TSV_TEXT "QNX "
5174 # endif
5175 # endif
5176 # if !defined(VER_H_PROM_TSV_TEXT)
5177 # define BURN_PROM_TSV_TEXT "Unknown Target OpSys"
5178 # else
5179 # define BURN_PROM_TSV_TEXT VER_H_PROM_TSV_TEXT
5180 # endif
5181 #endif
5182
5183 #if !defined(VER_H_GIT_DATE_SHORT)
5184 # define VER_H_GIT_DATE_SHORT "2021-01-01"
5185 #endif
5186
5187 #if !defined(BURN_PROM_BUILD_NUM)
5188 # define BURN_PROM_BUILD_NUM " "
5189 #endif
5190
5191 #define BURN(offset, length, string) memcpy ((char *) PROM + (offset), string, length)
5192 #define BURN1(offset, byte) PROM[offset] = (char) (byte)
5193
5194 (void)memset (PROM, 255, 1024);
5195
5196
5197 BURN ( 00, 11, "DPS 8/SIM M");
5198 BURN (013, 11, serial);
5199 BURN (026, 6, ship);
5200 BURN1 (034, getbits36_8 (rsw2, 0));
5201 BURN1 (035, getbits36_8 (rsw2, 8));
5202 BURN1 (036, getbits36_8 (rsw2, 16));
5203 BURN1 (037, getbits36_8 (rsw2, 24));
5204 BURN1 (040, ((getbits36_4 (rsw2, 32) << 4) \
5205 | rsw2Ext));
5206
5207
5208 BURN ( 60, 1, "2");
5209 BURN ( 70, 10, VER_H_GIT_DATE_SHORT);
5210 BURN ( 80, 3, VER_H_PROM_MAJOR_VER);
5211 BURN ( 83, 3, VER_H_PROM_MINOR_VER);
5212 BURN ( 86, 3, VER_H_PROM_PATCH_VER);
5213 BURN ( 89, 3, VER_H_PROM_OTHER_VER);
5214 BURN ( 92, 8, BURN_PROM_BUILD_NUM);
5215 BURN (100, 1, VER_H_GIT_RELT);
5216 BURN (101, 29, VER_H_PROM_VER_TEXT);
5217 BURN (130, 20, BURN_PROM_OSA_TEXT);
5218 BURN (150, 20, BURN_PROM_OSV_TEXT);
5219 BURN (170, 20, BURN_PROM_TSA_TEXT);
5220 BURN (190, 20, BURN_PROM_TSV_TEXT);
5221 }
5222
5223 void cpuStats (uint cpuNo) {
5224 if (! cpus[cpuNo].cycleCnt)
5225 return;
5226
5227
5228 #if defined(__HAIKU__)
5229 # if HAS_INCLUDE(<syscall_clock_info.h>)
5230 # include <syscall_clock_info.h>
5231 # endif
5232 # if !defined(_SYSTEM_SYSCALL_CLOCK_INFO_H)
5233 # if !defined(HAIKU_NO_PTHREAD_GETCPUCLOCKID)
5234 # define HAIKU_NO_PTHREAD_GETCPUCLOCKID
5235 # endif
5236 # endif
5237 #endif
5238
5239
5240
5241 #if defined(__sun) || defined(__sun__)
5242 # if !defined(__illumos__)
5243 # if HAS_INCLUDE(<sys/sysevent.h>)
5244 # include <sys/sysevent.h>
5245 # endif
5246 # if defined(ILLUMOS_VENDOR) || defined(ILLUMOS_KERN_PUB)
5247 # define __illumos__
5248 # endif
5249 # endif
5250 #endif
5251
5252 double cpu_seconds = 0;
5253 int cpu_millis = 0;
5254 char cpu_ftime[64] = {0};
5255 #if (defined(THREADZ) || defined(LOCKLESS))
5256 # if !defined(HAIKU_NO_PTHREAD_GETCPUCLOCKID) && !defined(__illumos__) && \
5257 !defined(__APPLE__) && !defined(__PASE__) && !defined(__serenity__)
5258 struct timespec cpu_time;
5259 clockid_t clock_id;
5260 if (pthread_getcpuclockid (cpus[cpuNo].thread_id, &clock_id) == 0) {
5261 if (clock_gettime (clock_id, &cpu_time) == 0) {
5262 cpu_seconds = (double)cpu_time.tv_sec + cpu_time.tv_nsec / 1e9;
5263 }
5264 }
5265 # endif
5266 #endif
5267
5268 if (cpu_seconds > 0 && cpus[cpuNo].instrCnt > 0) {
5269 int cpu_hours = (int)(cpu_seconds / 3600);
5270 int cpu_minutes = (int)((cpu_seconds - cpu_hours * 3600) / 60);
5271 int cpu_secs = (int)(cpu_seconds - (cpu_hours * 3600) - (cpu_minutes * 60));
5272 struct tm cpu_tm = {0};
5273 cpu_tm.tm_hour = cpu_hours;
5274 cpu_tm.tm_min = cpu_minutes;
5275 cpu_tm.tm_sec = cpu_secs;
5276 strftime(cpu_ftime, sizeof(cpu_ftime), "%H:%M:%S", &cpu_tm);
5277 cpu_millis = (int)((cpu_seconds - (cpu_hours * 3600) - (cpu_minutes * 60) - cpu_secs) * 1000);
5278 }
5279
5280 (void)fflush(stderr);
5281 (void)fflush(stdout);
5282 sim_msg ("\r\n");
5283 (void)fflush(stdout);
5284 (void)fflush(stderr);
5285 sim_msg ("\r+---------------------------------+\r\n");
5286 sim_msg ("\r| CPU %c Statistics |\r\n", 'A' + cpuNo);
5287 sim_msg ("\r+---------------------------------+\r\n");
5288 if (cpu_seconds > 0 && cpus[cpuNo].instrCnt > 0) {
5289 sim_msg ("\r| CPU Time Used %11s.%03d |\r\n", cpu_ftime, cpu_millis);
5290 sim_msg ("\r+---------------------------------+\r\n");
5291 }
5292 (void)fflush(stdout);
5293 (void)fflush(stderr);
5294 #if defined(_AIX) && !defined(__PASE__)
5295 struct rusage rusage;
5296 if (!pthread_getrusage_np(cpus[cpuNo].thread_id, &rusage, PTHRDSINFO_RUSAGE_COLLECT)) {
5297 sim_msg ("\r| Volun. CtxtSw %'15llu |\r\n", (unsigned long long)rusage.ru_nvcsw);
5298 sim_msg ("\r| Invol. CtxtSw %'15llu |\r\n", (unsigned long long)rusage.ru_nivcsw);
5299 sim_msg ("\r+---------------------------------+\r\n");
5300 }
5301 #endif
5302 #if defined(WIN_STDIO)
5303 sim_msg ("\r| cycles %15llu |\r\n", (unsigned long long)cpus[cpuNo].cycleCnt);
5304 sim_msg ("\r| instructions %15llu |\r\n", (unsigned long long)cpus[cpuNo].instrCnt);
5305 (void)fflush(stdout);
5306 (void)fflush(stderr);
5307 sim_msg ("\r+---------------------------------+\r\n");
5308 sim_msg ("\r| lockCnt %15llu |\r\n", (unsigned long long)cpus[cpuNo].coreLockState.lockCnt);
5309 sim_msg ("\r| lockImmediate %15llu |\r\n", (unsigned long long)cpus[cpuNo].coreLockState.lockImmediate);
5310 (void)fflush(stdout);
5311 (void)fflush(stderr);
5312 sim_msg ("\r+---------------------------------+\r\n");
5313 sim_msg ("\r| lockWait %15llu |\r\n", (unsigned long long)cpus[cpuNo].coreLockState.lockWait);
5314 sim_msg ("\r| lockWaitMax %15llu |\r\n", (unsigned long long)cpus[cpuNo].coreLockState.lockWaitMax);
5315 (void)fflush(stdout);
5316 (void)fflush(stderr);
5317 # if !defined(SCHED_NEVER_YIELD)
5318 sim_msg ("\r| lockYield %15llu |\r\n", (unsigned long long)cpus[cpuNo].coreLockState.lockYield);
5319 (void)fflush(stdout);
5320 (void)fflush(stderr);
5321 # else
5322 sim_msg ("\r| lockYield ---- |\r\n");
5323 (void)fflush(stdout);
5324 (void)fflush(stderr);
5325 # endif
5326 sim_msg ("\r+---------------------------------+");
5327 (void)fflush(stdout);
5328 (void)fflush(stderr);
5329 # if !defined(UCACHE)
5330 # if !defined(UCACHE_STATS)
5331 sim_msg ("\r\n");
5332 # endif
5333 # endif
5334 (void)fflush(stdout);
5335 (void)fflush(stderr);
5336 #else
5337 sim_msg ("\r| cycles %'15llu |\r\n", (unsigned long long)cpus[cpuNo].cycleCnt);
5338 sim_msg ("\r| instructions %'15llu |\r\n", (unsigned long long)cpus[cpuNo].instrCnt);
5339 (void)fflush(stdout);
5340 (void)fflush(stderr);
5341 sim_msg ("\r+---------------------------------+\r\n");
5342 sim_msg ("\r| lockCnt %'15llu |\r\n", (unsigned long long)cpus[cpuNo].coreLockState.lockCnt);
5343 sim_msg ("\r| lockImmediate %'15llu |\r\n", (unsigned long long)cpus[cpuNo].coreLockState.lockImmediate);
5344 (void)fflush(stdout);
5345 (void)fflush(stderr);
5346 sim_msg ("\r+---------------------------------+\r\n");
5347 sim_msg ("\r| lockWait %'15llu |\r\n", (unsigned long long)cpus[cpuNo].coreLockState.lockWait);
5348 sim_msg ("\r| lockWaitMax %'15llu |\r\n", (unsigned long long)cpus[cpuNo].coreLockState.lockWaitMax);
5349 (void)fflush(stdout);
5350 (void)fflush(stderr);
5351 # if !defined(SCHED_NEVER_YIELD)
5352 sim_msg ("\r| lockYield %'15llu |\r\n", (unsigned long long)cpus[cpuNo].coreLockState.lockYield);
5353 (void)fflush(stdout);
5354 (void)fflush(stderr);
5355 # else
5356 sim_msg ("\r| lockYield ---- |\r\n");
5357 (void)fflush(stdout);
5358 (void)fflush(stderr);
5359 # endif
5360 sim_msg ("\r+---------------------------------+");
5361 (void)fflush(stdout);
5362 (void)fflush(stderr);
5363 # if !defined(UCACHE)
5364 # if !defined(UCACHE_STATS)
5365 sim_msg ("\r\n");
5366 # endif
5367 # endif
5368 (void)fflush(stderr);
5369 (void)fflush(stdout);
5370 #endif
5371
5372 #if defined(UCACHE_STATS)
5373 ucacheStats (cpuNo);
5374 #endif
5375
5376
5377
5378
5379
5380
5381
5382 }
5383
5384 bool running_perf_test;
5385
5386 #if defined(THREADZ) || defined(LOCKLESS)
5387 # include <locale.h>
5388 # include "segldr.h"
5389
5390 void perfTest (char * testName) {
5391 running_perf_test = true;
5392
5393 if (testName == NULL)
5394 testName = "strip.mem";
5395
5396 # if !defined(NO_LOCALE)
5397 (void) setlocale(LC_NUMERIC, "");
5398 # endif
5399
5400
5401 # if !defined(_AIX)
5402 system_state = aligned_malloc (sizeof (struct system_state_s));
5403 # else
5404 system_state = malloc (sizeof (struct system_state_s));
5405 # endif
5406 if (!system_state)
5407 {
5408 (void)fprintf (stderr, "\rFATAL: Out of memory! Aborting at %s[%s:%d]\r\n",
5409 __func__, __FILE__, __LINE__);
5410 # if defined(USE_BACKTRACE)
5411 # if defined(SIGUSR2)
5412 (void)raise(SIGUSR2);
5413
5414 # endif
5415 # endif
5416 abort();
5417 }
5418 # if !defined(__MINGW64__) && !defined(__MINGW32__) && !defined(CROSS_MINGW64) && !defined(CROSS_MINGW32) && !defined(__PASE__)
5419 if (0 == sim_free_memory || sim_free_memory >= 192000000) {
5420 if (mlock(system_state, sizeof(struct system_state_s)) == -1) {
5421 mlock_failure = true;
5422 }
5423 } else {
5424 # if defined(TESTING)
5425 sim_warn ("Low memory - no memory locking attempted.\r\n");
5426 # else
5427 (void)system_state;
5428 # endif
5429 }
5430 # endif
5431 M = system_state->M;
5432 # if defined(M_SHARED)
5433 cpus = system_state->cpus;
5434 # endif
5435 (void) memset (cpus, 0, sizeof (cpu_state_t) * N_CPU_UNITS_MAX);
5436 for (int i = 0; i < N_CPU_UNITS_MAX; i ++) {
5437 cpus[i].switches.FLT_BASE = 2;
5438 cpus[i].instrCnt = 0;
5439 cpus[i].cycleCnt = 0;
5440 for (int j = 0; j < N_FAULTS; j ++)
5441 cpus[i].faultCnt [j] = 0;
5442 }
5443
5444 cpus[0].tweaks.enable_emcall = 1;
5445 opc_dev.numunits = 1;
5446 cpu_reset_unit_idx (0, false);
5447 set_cpu_cycle (& cpus[0], FETCH_cycle);
5448 mrestore (testName);
5449 _cpup = & cpus[0];
5450 threadz_sim_instr ();
5451 }
5452 #endif
5453