root/src/dps8/doAppendCycleInstructionFetch.h

/* [previous][next][first][last][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. doAppendCycleInstructionFetch

   1 /*
   2  * vim: filetype=c:tabstop=4:ai:expandtab
   3  * SPDX-License-Identifier: ICU
   4  * scspell-id: 45d63e9a-171d-11ee-b7ed-80ee73e9b8e7
   5  *
   6  * ---------------------------------------------------------------------------
   7  *
   8  * Copyright (c) 2022-2023 Charles Anthony
   9  * Copyright (c) 2022-2023 Jeffrey H. Johnson
  10  * Copyright (c) 2022-2024 The DPS8M Development Team
  11  *
  12  * This software is made available under the terms of the ICU License.
  13  * See the LICENSE.md file at the top-level directory of this distribution.
  14  *
  15  * ---------------------------------------------------------------------------
  16  */
  17 
  18 //
  19 //    A get SDW
  20 //       |
  21 //       V
  22 //    B last cycle = RTCD operand fetch?
  23 //       Yes                            No
  24 //        |                              |
  25 //        V                              V
  26 //    C check ring brackets           F check ring brackets
  27 //        |                              |
  28 //        +------------------------------+
  29 //        |
  30 //        V
  31 //    D check RALR
  32 //        |
  33 //        V
  34 //    G check bound
  35 //      paged?
  36 //       Yes                            No
  37 //        |                              |
  38 //        V                              |
  39 //    G1 get PTW                         |
  40 //        |                              |
  41 //        V                              V
  42 //    I calc. paged address           H calc. unpaged address
  43 //        |                              |
  44 //        +------------------------------+
  45 //        |
  46 //        V
  47 //    HI set XSF
  48 //       read memory
  49 //        |
  50 //        V
  51 //    L lastcyle RTCD operand fetch handling
  52 //        |
  53 //        V
  54 //    KL set PSR, IC
  55 //        |
  56 //        V
  57 //    M  set P
  58 //        |
  59 //        V
  60 //    EXIT  return final address
  61 
  62 word24 doAppendCycleInstructionFetch (cpu_state_t * cpup, word36 * data, uint nWords) {
     /* [previous][next][first][last][top][bottom][index][help] */
  63 //sim_printf ("doAppendCycleInstructionFetch %05o:%06o\r\n",
  64 static int evcnt = 0;
  65   DCDstruct * i = & cpu.currentInstruction;
  66   (void)evcnt;
  67   DBGAPP ("doAppendCycleInstructionFetch(Entry) thisCycle=INSTRUCTION_FETCH\n");
  68   DBGAPP ("doAppendCycleInstructionFetch(Entry) lastCycle=%s\n", str_pct (cpu.apu.lastCycle));
  69   DBGAPP ("doAppendCycleInstructionFetch(Entry) CA %06o\n", cpu.TPR.CA);
  70   DBGAPP ("doAppendCycleInstructionFetch(Entry) n=%2u\n", nWords);
  71   DBGAPP ("doAppendCycleInstructionFetch(Entry) PPR.PRR=%o PPR.PSR=%05o\n", cpu.PPR.PRR, cpu.PPR.PSR);
  72   DBGAPP ("doAppendCycleInstructionFetch(Entry) TPR.TRR=%o TPR.TSR=%05o\n", cpu.TPR.TRR, cpu.TPR.TSR);
  73 
  74   if (i->b29) {
  75     DBGAPP ("doAppendCycleInstructionFetch(Entry) isb29 PRNO %o\n", GET_PRN (IWB_IRODD));
  76   }
  77 
  78   uint this = UC_INSTRUCTION_FETCH;
  79 
  80   word24 finalAddress = 0;
  81   word24 pageAddress = 0;
  82   word3 RSDWH_R1 = 0;
  83   word14 bound = 0;
  84   word1 p = 0;
  85   bool paged = false;
  86 
  87 // ucache logic:
  88 // The cache will hit if:
  89 //   No CAMS/CAMP instruction has been executed.
  90 //   The segment number matches the cached value.
  91 //   The offset is on the same page as the cached value.
  92 //
  93 // doAppendCycle (INSTRUCTION_FETCH) checks:
  94 //   associative memory: Don't Care. If the cache hits, the WAM won't be
  95 //     queried which is the best case condition.
  96 //   lastCycle: Set to INSTRUCTION_FETCH.
  97 //   RSDWH_R1: Restored from cache.
  98 //   lastCycle == RTCD_OPERAND_FETCH. One would think that RTCD would always
  99 //     go to a new page, but that is not guaranteed; skip ucache.
 100 //   rRALR. Since it is before a segment change, the ucache will always miss.
 101 //   Ring brackets.  They will be the same, so recheck is unnecessary.
 102 //   ACVs: They will be the same, so recheck is unnecessary.
 103 //   SDW/PTW : They will be the same, so recheck is unnecessary.
 104 //   Prepage mode:  Skip ucache.
 105 //   History registers... Hm. skip for now; Values could be stashed...
 106 
 107 // Is this cycle a candidate for ucache?
 108 
 109 //#define TEST_UCACHE
 110 #if defined(TEST_UCACHE)
 111   bool cacheHit;
 112   cacheHit = false; // Assume skip...
 113 #endif
 114 
 115   // lastCycle == RTCD_OPERAND_FETCH
 116   if (i->opcode == 0610  && ! i->opcodeX) {
 117     //sim_printf ("skip RTCD\r\n");
 118     goto skip_ucache;
 119   }
 120 
 121   // RALR
 122   if (cpu.rRALR) {
 123     //sim_printf ("skip rRALR\r\n");
 124     goto skip_ucache;
 125   }
 126 
 127   // Prepage mode?
 128   // check for "uninterruptible" EIS instruction
 129   // ISOLTS-878 02: mvn,cmpn,mvne,ad3d; obviously also
 130   // ad2/3d,sb2/3d,mp2/3d,dv2/3d
 131   // DH03 p.8-13: probably also mve,btd,dtb
 132   if (i->opcodeX && ((i->opcode & 0770)== 0200|| (i->opcode & 0770) == 0220
 133       || (i->opcode & 0770)== 020|| (i->opcode & 0770) == 0300)) {
 134     //sim_printf ("skip uninterruptible\r\n");
 135     goto skip_ucache;
 136   }
 137 
 138 // Yes; check the ucache
 139 
 140 //#define TEST_UCACHE
 141 #if defined(TEST_UCACHE)
 142   word24 cachedAddress;
 143   word3 cachedR1;
 144   word14 cachedBound;
 145   word1 cachedP;
 146   bool cachedPaged;
 147   cacheHit =
 148       ucCacheCheck (cpup, this, cpu.TPR.TSR, cpu.TPR.CA, & cachedBound, & cachedP, & cachedAddress, & cachedR1, & cachedPaged);
 149   goto miss_ucache;
 150 #else
 151   if (! ucCacheCheck (cpup, this, cpu.TPR.TSR, cpu.TPR.CA, & bound, & p, & pageAddress, & RSDWH_R1, & paged))
 152     goto miss_ucache;
 153 #endif
 154 
 155   if (paged) {
 156     finalAddress = pageAddress + (cpu.TPR.CA & OS18MASK);
 157   } else {
 158     finalAddress = pageAddress + cpu.TPR.CA;
 159   }
 160   cpu.RSDWH_R1 = RSDWH_R1;
 161 
 162 // ucache hit; housekeeping...
 163   //sim_printf ("hit  %d %05o:%06o\r\n", evcnt, cpu.TPR.TSR, cpu.TPR.CA);
 164 
 165   cpu.apu.lastCycle = INSTRUCTION_FETCH;
 166   goto HI;
 167 
 168 skip_ucache:;
 169   //sim_printf ("miss %d %05o:%06o\r\n", evcnt, cpu.TPR.TSR, cpu.TPR.CA);
 170 #if defined(UCACHE_STATS)
 171   cpu.uCache.skips[this] ++;
 172 #endif
 173 
 174 miss_ucache:;
 175 
 176   bool nomatch = true;
 177   if (cpu.tweaks.enable_wam) {
 178     // AL39: The associative memory is ignored (forced to "no match") during
 179     // address preparation.
 180     // lptp,lptr,lsdp,lsdr,sptp,sptr,ssdp,ssdr
 181     // Unfortunately, ISOLTS doesn't try to execute any of these in append mode.
 182     // XXX should this be only for OPERAND_READ and OPERAND_STORE?
 183     nomatch = ((i->opcode == 0232 || i->opcode == 0254 ||
 184                 i->opcode == 0154 || i->opcode == 0173) &&
 185                 i->opcodeX ) ||
 186                ((i->opcode == 0557 || i->opcode == 0257) &&
 187                 ! i->opcodeX);
 188   }
 189 
 190   processor_cycle_type lastCycle = cpu.apu.lastCycle;
 191   cpu.apu.lastCycle = INSTRUCTION_FETCH;
 192 
 193   DBGAPP ("doAppendCycleInstructionFetch(Entry) XSF %o\n", cpu.cu.XSF);
 194 
 195   PNL (L68_ (cpu.apu.state = 0;))
 196 
 197   cpu.RSDWH_R1 = 0;
 198 
 199   cpu.acvFaults = 0;
 200 
 201 //#define FMSG(x) x
 202 #define FMSG(x)
 203   FMSG (char * acvFaultsMsg = "<unknown>";)
 204 
 205 ////////////////////////////////////////
 206 //
 207 // Sheet 2: "A"
 208 //
 209 ////////////////////////////////////////
 210 
 211 //
 212 //  A:
 213 //    Get SDW
 214 
 215   //PNL (cpu.APUMemAddr = address;)
 216   PNL (cpu.APUMemAddr = cpu.TPR.CA;)
 217 
 218   DBGAPP ("doAppendCycleInstructionFetch(A)\n");
 219 
 220   // is SDW for C(TPR.TSR) in SDWAM?
 221   if (nomatch || ! fetch_sdw_from_sdwam (cpup, cpu.TPR.TSR)) {
 222     // No
 223     DBGAPP ("doAppendCycleInstructionFetch(A):SDW for segment %05o not in SDWAM\n", cpu.TPR.TSR);
 224 
 225     DBGAPP ("doAppendCycleInstructionFetch(A):DSBR.U=%o\n", cpu.DSBR.U);
 226 
 227     if (cpu.DSBR.U == 0) {
 228       fetch_dsptw (cpup, cpu.TPR.TSR);
 229 
 230       if (! cpu.PTW0.DF)
 231         doFault (FAULT_DF0 + cpu.PTW0.FC, fst_zero, "doAppendCycleInstructionFetch(A): PTW0.F == 0");
 232 
 233       if (! cpu.PTW0.U)
 234         modify_dsptw (cpup, cpu.TPR.TSR);
 235 
 236       fetch_psdw (cpup, cpu.TPR.TSR);
 237     } else
 238       fetch_nsdw (cpup, cpu.TPR.TSR); // load SDW0 from descriptor segment table.
 239 
 240     if (cpu.SDW0.DF == 0) {
 241       DBGAPP ("doAppendCycleInstructionFetch(A): SDW0.F == 0! " "Initiating directed fault\n");
 242       // initiate a directed fault ...
 243       doFault (FAULT_DF0 + cpu.SDW0.FC, fst_zero, "SDW0.F == 0");
 244     }
 245     // load SDWAM .....
 246     load_sdwam (cpup, cpu.TPR.TSR, nomatch);
 247   }
 248   DBGAPP ("doAppendCycleInstructionFetch(A) R1 %o R2 %o R3 %o E %o\n", cpu.SDW->R1, cpu.SDW->R2, cpu.SDW->R3, cpu.SDW->E);
 249 
 250   // Yes...
 251   RSDWH_R1 = cpu.RSDWH_R1 = cpu.SDW->R1;
 252 
 253 ////////////////////////////////////////
 254 //
 255 // Sheet 3: "B"
 256 //
 257 ////////////////////////////////////////
 258 
 259 //
 260 // B: Check the ring
 261 //
 262 
 263   DBGAPP ("doAppendCycleInstructionFetch(B)\n");
 264 
 265   // check ring bracket consistency
 266 
 267   //C(SDW.R1) <= C(SDW.R2) <= C(SDW .R3)?
 268   if (! (cpu.SDW->R1 <= cpu.SDW->R2 && cpu.SDW->R2 <= cpu.SDW->R3)) {
 269     // Set fault ACV0 = IRO
 270     cpu.acvFaults |= ACV0;
 271     PNL (L68_ (cpu.apu.state |= apu_FLT;))
 272     FMSG (acvFaultsMsg = "acvFaults(B) C(SDW.R1) <= C(SDW.R2) <= " "C(SDW .R3)";)
 273   }
 274 
 275   // lastCycle == RTCD_OPERAND_FETCH
 276   // if a fault happens between the RTCD_OPERAND_FETCH and the INSTRUCTION_FETCH
 277   // of the next instruction - this happens about 35 time for just booting  and
 278   // shutting down multics -- a stored lastCycle is useless.
 279   // the opcode is preserved across faults and only replaced as the
 280   // INSTRUCTION_FETCH succeeds.
 281   if (i->opcode == 0610  && ! i->opcodeX)
 282     goto C;
 283 
 284   if (lastCycle == RTCD_OPERAND_FETCH)
 285     sim_warn ("%s: lastCycle == RTCD_OPERAND_FETCH opcode %0#o\n", __func__, i->opcode);
 286 
 287   //
 288   // B1: The operand is one of: an instruction, data to be read or data to be
 289   //     written
 290   //
 291 
 292   // Transfer or instruction fetch?
 293   goto F;
 294 
 295 ////////////////////////////////////////
 296 //
 297 // Sheet 4: "C" "D"
 298 //
 299 ////////////////////////////////////////
 300 
 301 C:;
 302   DBGAPP ("doAppendCycleInstructionFetch(C)\n");
 303 
 304   //
 305   // check ring bracket for instruction fetch after rtcd instruction
 306   //
 307   //   allow outbound transfers (cpu.TPR.TRR >= cpu.PPR.PRR)
 308   //
 309 
 310   // C(TPR.TRR) < C(SDW.R1)?
 311   // C(TPR.TRR) > C(SDW.R2)?
 312   if (cpu.TPR.TRR < cpu.SDW->R1 || cpu.TPR.TRR > cpu.SDW->R2) {
 313     DBGAPP ("ACV1 c\n");
 314     DBGAPP ("acvFaults(C) ACV1 ! ( C(SDW .R1) %o <= C(TPR.TRR) %o <= C(SDW .R2) %o )\n", cpu.SDW->R1, cpu.TPR.TRR, cpu.SDW->R2);
 315     //Set fault ACV1 = OEB
 316     cpu.acvFaults |= ACV1;
 317     PNL (L68_ (cpu.apu.state |= apu_FLT;))
 318     FMSG (acvFaultsMsg = "acvFaults(C) C(SDW.R1 > C(TPR.TRR) > C(SDW.R2)";)
 319   }
 320   // SDW.E set ON?
 321   if (! cpu.SDW->E) {
 322     DBGAPP ("ACV2 a\n");
 323     DBGAPP ("doAppendCycleInstructionFetch(C) ACV2\n");
 324     //Set fault ACV2 = E-OFF
 325     cpu.acvFaults |= ACV2;
 326     PNL (L68_ (cpu.apu.state |= apu_FLT;))
 327     FMSG (acvFaultsMsg = "acvFaults(C) SDW.E";)
 328   }
 329   if (cpu.TPR.TRR > cpu.PPR.PRR)
 330     sim_warn ("rtcd: outbound call cpu.TPR.TRR %d cpu.PPR.PRR %d\n", cpu.TPR.TRR, cpu.PPR.PRR);
 331   // C(TPR.TRR) >= C(PPR.PRR)
 332   if (cpu.TPR.TRR < cpu.PPR.PRR) {
 333     DBGAPP ("ACV11\n");
 334     DBGAPP ("doAppendCycleInstructionFetch(C) ACV11\n");
 335     //Set fault ACV11 = INRET
 336     cpu.acvFaults |= ACV11;
 337     PNL (L68_ (cpu.apu.state |= apu_FLT;))
 338     FMSG (acvFaultsMsg = "acvFaults(C) TRR>=PRR";)
 339   }
 340 
 341 D:;
 342   DBGAPP ("doAppendCycleInstructionFetch(D)\n");
 343 
 344   // transfer or instruction fetch
 345 
 346   // check ring alarm to catch outbound transfers
 347 
 348   if (cpu.rRALR == 0)
 349     goto G;
 350 
 351   // C(PPR.PRR) < RALR?
 352   if (! (cpu.PPR.PRR < cpu.rRALR)) {
 353     DBGAPP ("ACV13\n");
 354     DBGAPP ("acvFaults(D) C(PPR.PRR) %o < RALR %o\n", cpu.PPR.PRR, cpu.rRALR);
 355     cpu.acvFaults |= ACV13;
 356     PNL (L68_ (cpu.apu.state |= apu_FLT;))
 357     FMSG (acvFaultsMsg = "acvFaults(D) C(PPR.PRR) < RALR";)
 358   }
 359 
 360   goto G;
 361 
 362 ////////////////////////////////////////
 363 //
 364 // Sheet 6: "F"
 365 //
 366 ////////////////////////////////////////
 367 
 368 F:;
 369   PNL (L68_ (cpu.apu.state |= apu_PIAU;))
 370   DBGAPP ("doAppendCycleInstructionFetch(F): transfer or instruction fetch\n");
 371 
 372   //
 373   // check ring bracket for instruction fetch
 374   //
 375 
 376   // C(TPR.TRR) > C(SDW .R2)?
 377   if (cpu.TPR.TRR < cpu.SDW->R1 || cpu.TPR.TRR > cpu.SDW->R2) {
 378     DBGAPP ("ACV1 a/b\n");
 379     DBGAPP ("acvFaults(F) ACV1 !( C(SDW .R1) %o <= C(TPR.TRR) %o <= C(SDW .R2) %o )\n", cpu.SDW->R1, cpu.TPR.TRR, cpu.SDW->R2);
 380     cpu.acvFaults |= ACV1;
 381     PNL (L68_ (cpu.apu.state |= apu_FLT;))
 382     FMSG (acvFaultsMsg = "acvFaults(F) C(TPR.TRR) < C(SDW .R1)";)
 383   }
 384   // SDW .E set ON?
 385   if (! cpu.SDW->E) {
 386     DBGAPP ("ACV2 c \n");
 387     DBGAPP ("doAppendCycleInstructionFetch(F) ACV2\n");
 388     cpu.acvFaults |= ACV2;
 389     PNL (L68_ (cpu.apu.state |= apu_FLT;))
 390     FMSG (acvFaultsMsg = "acvFaults(F) SDW .E set OFF";)
 391   }
 392 
 393   // C(PPR.PRR) = C(TPR.TRR)?
 394   if (cpu.PPR.PRR != cpu.TPR.TRR) {
 395     DBGAPP ("ACV12\n");
 396     DBGAPP ("doAppendCycleInstructionFetch(F) ACV12\n");
 397     //Set fault ACV12 = CRT
 398     cpu.acvFaults |= ACV12;
 399     PNL (L68_ (cpu.apu.state |= apu_FLT;))
 400     FMSG (acvFaultsMsg = "acvFaults(F) C(PPR.PRR) != C(TPR.TRR)";)
 401   }
 402 
 403   goto D;
 404 
 405 ////////////////////////////////////////
 406 //
 407 // Sheet 7: "G"
 408 //
 409 ////////////////////////////////////////
 410 
 411 G:;
 412 
 413   DBGAPP ("doAppendCycleInstructionFetch(G)\n");
 414 
 415   //C(TPR.CA)0,13 > SDW.BOUND?
 416   if (((cpu.TPR.CA >> 4) & 037777) > cpu.SDW->BOUND) {
 417     DBGAPP ("ACV15\n");
 418     DBGAPP ("doAppendCycleInstructionFetch(G) ACV15\n");
 419     cpu.acvFaults |= ACV15;
 420     PNL (L68_ (cpu.apu.state |= apu_FLT;))
 421     FMSG (acvFaultsMsg = "acvFaults(G) C(TPR.CA)0,13 > SDW.BOUND";)
 422     DBGAPP ("acvFaults(G) C(TPR.CA)0,13 > SDW.BOUND\n" "   CA %06o CA>>4 & 037777 %06o SDW->BOUND %06o",
 423             cpu.TPR.CA, ((cpu.TPR.CA >> 4) & 037777), cpu.SDW->BOUND);
 424   }
 425   bound = cpu.SDW->BOUND;
 426   p = cpu.SDW->P;
 427 
 428   if (cpu.acvFaults) {
 429     DBGAPP ("doAppendCycleInstructionFetch(G) acvFaults\n");
 430     PNL (L68_ (cpu.apu.state |= apu_FLT;))
 431     // Initiate an access violation fault
 432     doFault (FAULT_ACV, (_fault_subtype) {.fault_acv_subtype=cpu.acvFaults}, "ACV fault");
 433   }
 434 
 435   // is segment C(TPR.TSR) paged?
 436   if (cpu.SDW->U)
 437     goto H; // Not paged
 438 
 439   // Yes. segment is paged ...
 440   // is PTW for C(TPR.CA) in PTWAM?
 441 
 442   DBGAPP ("doAppendCycleInstructionFetch(G) CA %06o\n", cpu.TPR.CA);
 443   if (nomatch ||
 444       ! fetch_ptw_from_ptwam (cpup, cpu.SDW->POINTER, cpu.TPR.CA)) {
 445     fetch_ptw (cpup, cpu.SDW, cpu.TPR.CA);
 446     if (! cpu.PTW0.DF) {
 447       // initiate a directed fault
 448       doFault (FAULT_DF0 + cpu.PTW0.FC, (_fault_subtype) {.bits=0}, "PTW0.F == 0");
 449     }
 450     loadPTWAM (cpup, cpu.SDW->POINTER, cpu.TPR.CA, nomatch); // load PTW0 to PTWAM
 451   }
 452 
 453   // Prepage mode?
 454   // check for "uninterruptible" EIS instruction
 455   // ISOLTS-878 02: mvn,cmpn,mvne,ad3d; obviously also
 456   // ad2/3d,sb2/3d,mp2/3d,dv2/3d
 457   // DH03 p.8-13: probably also mve,btd,dtb
 458 
 459   // XXX: PVS-Studio says that "i->opcodeX" is ALWAYS FALSE in the following check:
 460   if (i->opcodeX && ((i->opcode & 0770)== 0200|| (i->opcode & 0770) == 0220 //-V560
 461       || (i->opcode & 0770)== 020|| (i->opcode & 0770) == 0300)) {
 462       do_ptw2 (cpup, cpu.SDW, cpu.TPR.CA);
 463     }
 464   goto I;
 465 
 466 ////////////////////////////////////////
 467 //
 468 // Sheet 8: "H", "I"
 469 //
 470 ////////////////////////////////////////
 471 
 472 H:;
 473   DBGAPP ("doAppendCycleInstructionFetch(H): FANP\n");
 474 
 475   paged = false;
 476 
 477   PNL (L68_ (cpu.apu.state |= apu_FANP;))
 478 
 479 
 480 
 481 
 482 
 483 
 484 
 485 
 486   set_apu_status (cpup, apuStatus_FANP);
 487 
 488   DBGAPP ("doAppendCycleInstructionFetch(H): SDW->ADDR=%08o CA=%06o \n", cpu.SDW->ADDR, cpu.TPR.CA);
 489 
 490   pageAddress = (cpu.SDW->ADDR & 077777760);
 491   finalAddress = (cpu.SDW->ADDR & 077777760) + cpu.TPR.CA;
 492   finalAddress &= 0xffffff;
 493   PNL (cpu.APUMemAddr = finalAddress;)
 494 
 495   DBGAPP ("doAppendCycleInstructionFetch(H:FANP): (%05o:%06o) finalAddress=%08o\n", cpu.TPR.TSR, cpu.TPR.CA, finalAddress);
 496 
 497   goto HI;
 498 
 499 I:;
 500 
 501 // Set PTW.M
 502 
 503   DBGAPP ("doAppendCycleInstructionFetch(I): FAP\n");
 504 
 505   paged = true;
 506 
 507   // final address paged
 508   set_apu_status (cpup, apuStatus_FAP);
 509   PNL (L68_ (cpu.apu.state |= apu_FAP;))
 510 
 511   word24 y2 = cpu.TPR.CA % 1024;
 512 
 513   pageAddress = (((word24)cpu.PTW->ADDR & 0777760) << 6);
 514   // AL39: The hardware ignores low order bits of the main memory page
 515   // address according to page size
 516   finalAddress = (((word24)cpu.PTW->ADDR & 0777760) << 6) + y2;
 517   finalAddress &= 0xffffff;
 518   PNL (cpu.APUMemAddr = finalAddress;)
 519 
 520 #if defined(L68)
 521   if (cpu.MR_cache.emr && cpu.MR_cache.ihr)
 522     add_APU_history (APUH_FAP);
 523 #endif /* if defined(L68) */
 524   DBGAPP ("doAppendCycleInstructionFetch(H:FAP): (%05o:%06o) finalAddress=%08o\n", cpu.TPR.TSR, cpu.TPR.CA, finalAddress);
 525 
 526 HI:
 527   DBGAPP ("doAppendCycleInstructionFetch(HI)\n");
 528 
 529 #if defined(TEST_UCACHE)
 530   if (cacheHit) {
 531     bool err = false;
 532     if (cachedAddress != pageAddress) {
 533      sim_printf ("cachedAddress %08o != pageAddress %08o\r\n", cachedAddress, pageAddress);
 534      err = true;
 535     }
 536     if (cachedR1 != RSDWH_R1) {
 537       sim_printf ("cachedR1 %01o != RSDWH_R1 %01o\r\n", cachedR1, RSDWH_R1);
 538       err = true;
 539     }
 540     if (cachedBound != bound) {
 541       sim_printf ("cachedBound %01o != bound %01o\r\n", cachedBound, bound);
 542       err = true;
 543     }
 544     if (cachedPaged != paged) {
 545       sim_printf ("cachedPaged %01o != paged %01o\r\n", cachedPaged, paged);
 546       err = true;
 547     }
 548     if (err) {
 549 # if defined(HDBG)
 550       HDBGPrint ();
 551 # endif
 552       sim_printf ("ins fetch err  %d %05o:%06o\r\n", evcnt, cpu.TPR.TSR, cpu.TPR.CA);
 553       exit (1);
 554     }
 555     //sim_printf ("hit  %d %05o:%06o\r\n", evcnt, cpu.TPR.TSR, cpu.TPR.CA);
 556 # if defined(HDBG)
 557     hdbgNote ("doAppendCycleOperandRead.h", "test hit %d %05o:%06o\r\n", evcnt, cpu.TPR.TSR, cpu.TPR.CA);
 558 # endif
 559   } else {
 560     //sim_printf ("miss %d %05o:%06o\r\n", evcnt, cpu.TPR.TSR, cpu.TPR.CA);
 561 # if defined(HDBG)
 562     hdbgNote ("doAppendCycleOperandRead.h", "test miss %d %05o:%06o\r\n", evcnt, cpu.TPR.TSR, cpu.TPR.CA);
 563 # endif
 564   }
 565 #endif
 566 #if defined(TEST_UCACHE)
 567 if (cacheHit) {
 568   if (cachedPaged != paged) sim_printf ("cachedPaged %01o != paged %01o\r\n", cachedPaged, paged);
 569   //sim_printf ("hit  %d %05o:%06o\r\n", evcnt, cpu.TPR.TSR, cpu.TPR.CA);
 570 }
 571 else
 572 {
 573   //sim_printf ("miss %d %05o:%06o\r\n", evcnt, cpu.TPR.TSR, cpu.TPR.CA);
 574 }
 575 #endif
 576 
 577   ucCacheSave (cpup, this, cpu.TPR.TSR, cpu.TPR.CA, bound, p, pageAddress, RSDWH_R1, paged);
 578 evcnt ++;
 579   // isolts 870
 580   cpu.cu.XSF = 1;
 581   sim_debug (DBG_TRACEEXT, & cpu_dev, "loading of cpu.TPR.TSR sets XSF to 1\n");
 582 
 583   core_readN (cpup, finalAddress, data, nWords, "INSTRUCTION_FETCH");
 584 
 585 ////////////////////////////////////////
 586 //
 587 // Sheet 10: "K", "L", "M", "N"
 588 //
 589 ////////////////////////////////////////
 590 
 591 //L:; // Transfer or instruction fetch
 592 
 593   DBGAPP ("doAppendCycleInstructionFetch(L)\n");
 594 
 595   // lastCycle == RTCD_OPERAND_FETCH
 596 
 597   if (i->opcode == 0610  && ! i->opcodeX) {
 598     // C(PPR.PRR) -> C(PRn.RNR) for n = (0, 1, ..., 7)
 599     // Use TRR here; PRR not set until KL
 600     CPTUR (cptUsePRn + 0);
 601     CPTUR (cptUsePRn + 1);
 602     CPTUR (cptUsePRn + 2);
 603     CPTUR (cptUsePRn + 3);
 604     CPTUR (cptUsePRn + 4);
 605     CPTUR (cptUsePRn + 5);
 606     CPTUR (cptUsePRn + 6);
 607     CPTUR (cptUsePRn + 7);
 608     cpu.PR[0].RNR =
 609     cpu.PR[1].RNR =
 610     cpu.PR[2].RNR =
 611     cpu.PR[3].RNR =
 612     cpu.PR[4].RNR =
 613     cpu.PR[5].RNR =
 614     cpu.PR[6].RNR =
 615     cpu.PR[7].RNR = cpu.TPR.TRR;
 616 #if defined(TESTING)
 617     HDBGRegPRW (0, "app rtcd");
 618     HDBGRegPRW (1, "app rtcd");
 619     HDBGRegPRW (2, "app rtcd");
 620     HDBGRegPRW (3, "app rtcd");
 621     HDBGRegPRW (4, "app rtcd");
 622     HDBGRegPRW (5, "app rtcd");
 623     HDBGRegPRW (6, "app rtcd");
 624     HDBGRegPRW (7, "app rtcd");
 625 #endif
 626   }
 627   goto KL;
 628 
 629 KL:
 630   DBGAPP ("doAppendCycleInstructionFetch(KL)\n");
 631 
 632   // C(TPR.TSR) -> C(PPR.PSR)
 633   cpu.PPR.PSR = cpu.TPR.TSR;
 634   // C(TPR.CA) -> C(PPR.IC)
 635   cpu.PPR.IC = cpu.TPR.CA;
 636 
 637   goto M;
 638 
 639 M: // Set P
 640   DBGAPP ("doAppendCycleInstructionFetch(M)\n");
 641 
 642   // C(TPR.TRR) = 0?
 643   if (cpu.TPR.TRR == 0) {
 644     // C(SDW.P) -> C(PPR.P)
 645     //cpu.PPR.P = cpu.SDW->P;
 646     cpu.PPR.P = p;
 647   } else {
 648     // 0 C(PPR.P)
 649     cpu.PPR.P = 0;
 650   }
 651 
 652   PNL (cpu.APUDataBusOffset = cpu.TPR.CA;)
 653   PNL (cpu.APUDataBusAddr = finalAddress;)
 654 
 655   PNL (L68_ (cpu.apu.state |= apu_FA;))
 656 
 657   DBGAPP ("doAppendCycleInstructionFetch (Exit) PRR %o PSR %05o P %o IC %06o\n",
 658           cpu.PPR.PRR, cpu.PPR.PSR, cpu.PPR.P, cpu.PPR.IC);
 659   DBGAPP ("doAppendCycleInstructionFetch (Exit) TRR %o TSR %05o TBR %02o CA %06o\n",
 660           cpu.TPR.TRR, cpu.TPR.TSR, cpu.TPR.TBR, cpu.TPR.CA);
 661 
 662   return finalAddress;
 663 }
 664 #undef TEST_UCACHE

/* [previous][next][first][last][top][bottom][index][help] */