// IfsVMemMain.bcpl -- Virtual memory fault handler // Copyright Xerox Corporation 1982 // Last modified September 17, 1982 4:15 PM by Taft // Derived from: // VMEM - virtual memory package, by P. Deutsch // last edited September 15, 1977 3:10 PM get "vmem.d" external // entry procedures [ LockOnly; LockReloc; LockZero // (addr, new, flag) -> ok // (from assembly code) MAPTRAP // (vpage, wflag, hptr) LockCell // (lvlock[, proc]) UnlockCell // (lvlock) // for VMEMAUX FindFreeBuf FlushBufs DoLocks ] external // entry statics [ @HASHMAP; @HASHMAPSIZE; @HASHMAPMASK @ReprobeInc // for VMEMAUX @HASHMAPSIZE2 @HASHMAP1 @HASHMAPTOP EMPTYXX NAXX @Bpt; @BptLast LockedCells; EndLockedCells; LastLockedCell availableListChanged ] external // procedures [ // O.S. MoveBlock; SetBlock; Zero IFSError Usc // VMemA REHASHMAP // User-supplied CleanupLocks // () DOPAGEIO // (VP, core, # of pages, write flag) PageGroupBase // (VP) -> VP PageGroupSize // (VP) -> # of pages & write group flag PageGroupAlign // (VP) -> core alignment mask PageType // (VP) -> new page flag NoBufsProc ] external // statics [ // O.S. @oneBits ] static [ HASHMAP HASHMAPSIZE // MUST BE POWER OF 2 HASHMAPSIZE2 HASHMAPMASK HASHMAP1 HASHMAPTOP EMPTYXX // HASHX of empty buffers, lshift 8 NAXX // HASHX of unavailable buffers, lshift 8 Bpt // pointer to base of buffer pointer table; // @Bpt contains head of available list (0 if empty) BptLast // Bpt index of tail of available list (0 if empty) LockedCells; LastLockedCell; EndLockedCells ReprobeInc = RepInc AnyDirty LockOnly // = FalsePredicate LockZero // = TruePredicate availableListChanged ] manifest [ ecIllegalVP = 45 ecIllegalPageType = 46 ecFlushLockedPage = 47 ecLockListFull = 48 ] // PROCESS MAP TRAP, CALLED FROM ASSEMBLY CODE let MAPTRAP(VPG, WFLAG, HPTR) be [ if VPG uge MinDummyVP then IFSError(ecIllegalVP) if @HPTR ne 0 then [ // FIRST WRITE TO CLEAN PAGE HPTR>>HM.CLEAN = 0 AnyDirty = true return ] CleanupLocks() let ptype = PageType(VPG, WFLAG) let NEWPAGEFLAG = selecton ptype into [ case 1: false case -1: true default: IFSError(ecIllegalPageType) ] let VPAGE = PageGroupBase(VPG) let VNPGS = PageGroupSize(VPAGE) let ALIGN = PageGroupAlign(VPAGE) let WG = 0 if VNPGS ls 0 then // this is a write group WG, VNPGS = WGROUPbit, -VNPGS // Mark locked pages as referenced for I = LockedCells by LCsize to LastLockedCell-LCsize do unless (I>>LC.proc)(I>>LC.addr, 0, false) do [ let hp = HASHMAP1+(Bpt+@(I>>LC.addr) rshift PS)>>BPT.HASHX*2 if (@hp & DUMMYbit) eq 0 then @hp = @hp & not NOTREFbit ] // Now scan the available list and ensure that every page group containing any // referenced pages has all its pages marked referenced. This keeps all the // pages of a group together on the available list and thereby improves the // performance of the replacement algorithm. (Also, FlushBuffers expects // to see pages of a group together on the available list.) let anyRef = nil for cPage = 1 to BptSize-1 do [ let hp = HASHMAP + (Bpt+cPage)>>BPT.HASHX*2 if hp>>HM.DUMMY eq 0 then [ if hp>>HM.NFPG eq 0 then anyRef = false // first page of group anyRef = anyRef % (hp>>HM.NOTREF eq 0) // accumulate reference bits if hp>>HM.NLPG eq 0 & anyRef then // last page of group [ // scan backward from cPage to first page of group, marking pages referenced let i = cPage [ // repeat hp = HASHMAP + (Bpt+i)>>BPT.HASHX*2 hp>>HM.NOTREF = 0 i = i-1 ] repeatuntil hp>>HM.NFPG eq 0 ] ] ] // Reorder available list based on reference bits UpdateChain() // Find available buffer and flush it let tn = nil [ tn = FindFreeBuf(VNPGS, ALIGN, 0) if tn ne 0 break NoBufsProc() ] repeat FlushBufs(tn, VNPGS, -1) // READ IN NEW PAGE let CORE = tn lshift PS test NEWPAGEFLAG ifso Zero(CORE, VNPGS lshift PS) ifnot DOPAGEIO(VPAGE, CORE, VNPGS, false) ADDTOMAP(tn, VNPGS, VPAGE, (NEWPAGEFLAG % WFLAG? 0, CLEANbit)+WG) ] and getgroup(i, mask, d) = valof [ while ((HASHMAP+(Bpt+i)>>BPT.HASHX*2)>>HM.FLAGWD & mask) ne 0 do i = i+d resultis i ] and FindFreeBuf(nb, mask, wanted) = valof [ let tn = @Bpt let blk = vec BptSize // blk!i eq 0 means page not reached yet // blk!i gr 0 means buffer blk!i is first of available block // blk!i ls 0 means buffer i begins av. block of length -blk!i Zero(blk, BptSize) while tn ne 0 do [ let btn = nil unless DoLocks(tn lshift PS, 0, false) do [ blk!tn = -1 btn = mergeblocks(tn, blk) if ((-btn & mask)+blk!btn+nb le 0) & ((HASHMAP+(Bpt+btn)>>BPT.HASHX*2)>>HM.NFPG eq 0) then [ let rtn = btn+(-btn & mask) if (wanted eq 0) % ((wanted ge rtn) & (wanted ls btn-blk!btn)) resultis rtn ] ] tn = Bpt!tn & NEXTmask ] resultis 0 ] and mergeblocks(otn, blk) = valof // Available blocks are of four types: // closed (C), consisting of an integral number of single pages and complete page groups; // head (H), consisting only of some initial pages of a page group; // tail (T), consisting only of some final pages of a page group; // interior (I), consisting only of some interior pages of a page group. // The following table specifies whether a given pair of adjacent blocks // may be merged (gives the type of the merged block), // must be left separate (-), // or is impossible (?): // C H I T //C C - ? ? //H ? ? H C //I ? ? I T //T - - ? ? // It is easy to distinguish block types on the basis of // the group bits in their first and last pages: // C has (~NF,~NL); // H has (~NF+NL,NF+NL); // T has (NF+NL,NF+~NL); // I has (NF+NL,NF+NL). // When we want to add a page to the set available as candidates for replacement, // we first convert it into a one-page block of the appropriate type, // and then do merging according to the table above. // In fact, it is easiest to detect the cases where merging is forbidden (Tx or xH), // and merge in all other cases. [ let tn = otn [ let bp = blk+tn if @bp eq 0 then [ bp = blk+otn resultis (@bp ls 0? otn, @bp) ] let tn0 = (@bp ls 0? tn, @bp) let bp1 = bp+1 let tp = Bpt+tn unless @bp1 eq 0 % ((HASHMAP1!((Bpt+tn0)>>BPT.HASHX*2)&NFPGbit) ne 0 & ((HASHMAP1!(tp>>BPT.HASHX*2))&NLPGbit) eq 0) % // Tx, can't merge ((HASHMAP1!((tp-@bp1)>>BPT.HASHX*2)&NLPGbit) ne 0 & ((HASHMAP1!((tp+1)>>BPT.HASHX*2))&NFPGbit) eq 0) do // xH, can't merge // OK to merge [ let bp0 = blk+tn0 @bp0 = @bp0+@bp1 // add lengths let lenm1 = -@bp0-1 bp0!lenm1 = tn0 // mark top otn = tn0+lenm1 // move to top of block tn0 = otn+1 ] tn = tn0-1 ] repeat ] and RemoveBufs(tn, NPGS) be // Remove the affected buffers from the chain [ let lasttn = tn+NPGS-1 let I, N = 0, NPGS [ let ip = Bpt+I I = ip>>BPT.NEXT while (I ge tn) & (I le lasttn) do [ I = (Bpt+I)>>BPT.NEXT @ip = (@ip & not NEXTmask)+I N = N-1 if N eq 0 then [ if I eq 0 then BptLast = ip-Bpt // removed former tail buffer return ] ] ] repeat availableListChanged = true ] and UpdateChain() be // Reorder the chain according to use [ let I = @Bpt @Bpt = 0 let head1 = 0 let chain0, chain1 = Bpt, lv head1 [ let ip = Bpt+I let HP = HASHMAP1+(@ip)<>BPT.NEXT = 0 chain0>>BPT.NEXT = head1 BptLast = (head1 eq 0? chain0, chain1)-Bpt availableListChanged = true ] and ADDTOMAP(tn, NPGS, VP, newbits) be [ADM // RESET THE PAGED-OUT ENTRY if NPGS gr 1 then newbits = newbits+NLPGbit (Bpt+BptLast)>>BPT.NEXT = tn BptLast = tn+NPGS-1 let N = tn while N ls BptLast do [ Bpt!N = N+1 MakeMapEntry(VP, N, newbits) newbits = newbits % NFPGbit VP = VP+1 N = N+1 ] Bpt!N = 0 MakeMapEntry(VP, N, newbits & not NLPGbit) availableListChanged = true ]ADM and MakeMapEntry(VP, tn, bits) be [ let hp = REHASHMAP(VP) let bp = Bpt+tn test hp eq 0 ifso // page already in core as a side effect of some external call @bp = (@bp & NEXTmask) + EMPTYXX ifnot [ hp>>HM.NKEY = not VP hp>>HM.FLAGWD = (tn-VP) lshift 8 + bits bp>>BPT.HASHX = (hp-HASHMAP) rshift 1 ] ] and corepage(hp) = (hp>>HM.FLAGWD+(not hp>>HM.NKEY) lshift 8) rshift 8 and DeleteMapEntry(hp) be [ if hp>>HM.DUMMY ne 0 return let oldvp = not hp>>HM.NKEY let i = corepage(hp) let bp = Bpt+i @bp = (@bp & NEXTmask) + EMPTYXX hp>>HM.NKEY = 0 [ hp = hp+ReprobeInc if (hp-HASHMAPTOP) ge 0 then hp = hp-HASHMAPSIZE let key = not hp>>HM.NKEY if key eq -1 break hp>>HM.NKEY = 0 let hp1 = REHASHMAP(key) hp1>>HM.NKEY = not key if hp1 ne hp then [ hp1>>HM.FLAGWD = hp>>HM.FLAGWD (Bpt+corepage(hp1))>>BPT.HASHX = (hp1-HASHMAP) rshift 1 ] ] repeat ] and FlushBufs(tn, N, empty) be // Empty=0 means just mark clean, // >0 means also remove from chain, // <0 means mark empty [ if empty then RemoveBufs(tn, N) let l = getgroup(tn+N-1, NLPGbit, 1) tn = getgroup(tn, NFPGbit, -1) [ AnyDirty = false let f = 0 // TN of first dirty buffer in sequence, or 0 let key = nil // VP of first dirty buffer (i.e. buffer f) let lkey = nil // VP of last dirty buffer let grouptn = nil // TN of first page of group for i = tn to l do [ try: let locked = DoLocks(i lshift PS, 0, empty) if locked & empty then IFSError(ecFlushLockedPage) let HP = HASHMAP+(Bpt+i)>>BPT.HASHX*2 let k = not HP>>HM.NKEY let flags = HP>>HM.FLAGWD if (flags&NFPGbit) eq 0 then grouptn = i // Remember beginning of group test ((flags&CLEANbit) eq 0) & ((f eq 0) % ((k eq lkey+1) & ((flags&NFPGbit) ne 0))) ifso // first dirty buffer, or in sequence [ if f eq 0 then // first one, check for a write group test (flags&WGROUPbit) ne 0 ifso // write whole group (by dirtying the rest of it) [ f = grouptn key = k+f-i // but lkey = k still for j = i+1 to getgroup(i, NLPGbit, 1) do (HASHMAP+(Bpt+j)>>BPT.HASHX*2)>>HM.CLEAN = 0 ] ifnot // just this page f, key = i, k lkey = k if not locked then HP>>HM.FLAGWD = flags+CLEANbit ] ifnot // end of sequence [ if f ne 0 then [ flushout(key, f, i) f = 0 goto try ] ] if empty then DeleteMapEntry(HP) ] if f ne 0 then flushout(key, f, l+1) ] repeatwhile AnyDirty ] and flushout(key, f, lim) be [ let lock = f lshift PS LockCell(lv lock) // in case of recursive VMEM call DOPAGEIO(key, lock, lim-f, true) UnlockCell(lv lock) ] and LockCell(lvLock, proc; numargs n) = valof [ if (n ls 2) % (proc eq 0) then proc = LockOnly UnlockCell(lvLock) if LastLockedCell eq EndLockedCells then IFSError(ecLockListFull) LastLockedCell>>LC.addr, LastLockedCell>>LC.proc = lvLock, proc LastLockedCell = LastLockedCell + LCsize ] and UnlockCell(lvLock) = valof [ for I = LastLockedCell-LCsize by -LCsize to LockedCells do if I>>LC.addr eq lvLock then [ LastLockedCell = LastLockedCell - LCsize MoveBlock(I, LastLockedCell, LCsize) resultis true ] resultis false ] and DoLocks(addr, newpa, flag) = valof [ let oldpa = addr & (not WM) let I = LockedCells LastLockedCell>>LC.addr = lv addr LastLockedCell>>LC.proc = dlexit [ if (@(I>>LC.addr) & (not WM)) eq oldpa then [ let addr = I>>LC.addr // in case lock proc unlocks cell let new = (newpa eq 0? 0, @addr+newpa-oldpa) unless (I>>LC.proc)(addr, new, flag) resultis true if flag then @addr = new ] I = I+LCsize ] repeat dlexit: resultis false ] // and LockOnly(addr, new, flag) = false and LockReloc(addr, new, flag) = new ne 0 // and LockZero(addr, new, flag) = true