// IFSVMemSwap.bcpl - Interim File System VMem interface -- swappable portion
// Copyright Xerox Corporation 1979, 1982, 1983
// Last modified September 23, 1983  2:51 PM by Taft

get "IfsVMem.decl"
get "VMem.d"

external
[
// outgoing procedures
AllocateVMem; FreeVMem; PurgeVMem; FlushBuffers

// incoming procedures
VirtualPage; SnarfBuffers; UnsnarfBuffers; FlushBufs; PageGroupSize
IFSError; Dismiss

// incoming statics
vmdt; availableListChanged; @Bpt; @HASHMAP
]

manifest
[
// error codes
ecVMemAllocate = 2
ecPurgeLockedPage = 4
]

//----------------------------------------------------------------------------
let AllocateVMem(vmd, vmi, numPages) be
//----------------------------------------------------------------------------
// A primitive first-fit virtual memory allocator.  Initializes the VMD to
// describe the vmem region allocated.  Does not touch the VMI.
// Assumes that the VMDT contains "zero" and "infinity" entries.
[
// Round numPages up to next multiple of 4 vmem pages, leaving a gap of
// at least one vmem page so as to prevent DOPAGEIO calls from bridging VMDs
vmd>>VMD.length = numPages
numPages = (numPages+4) & -4

// Search for a free vmem region
vmd>>VMD.base = 0
unless vmdt>>VMDT.length eq maxVMD do
   for i = 1 to vmdt>>VMDT.length do
      [
      let tvmd = vmdt>>VMDT.vmd↑i
      if tvmd>>VMD.base-vmd>>VMD.base uge numPages then
         [  // Insert vmd into table
         vmd>>VMD.vmi = vmi
         for j = vmdt>>VMDT.length to i by -1 do
            vmdt>>VMDT.vmd↑(j+1) = vmdt>>VMDT.vmd↑j
         vmdt>>VMDT.length = vmdt>>VMDT.length+1
         vmdt>>VMDT.vmd↑i = vmd
         vmdt>>VMDT.version = vmdt>>VMDT.version+1
         return
         ]
      vmd>>VMD.base = tvmd>>VMD.base + tvmd>>VMD.length
      ]

// Either the VMDT is full or we can't find a vmem block that is big enough
IFSError(ecVMemAllocate, numPages)
]

//----------------------------------------------------------------------------
and FreeVMem(vmd) be
//----------------------------------------------------------------------------
// Frees the virtual memory belonging to vmd.  Does not attempt to
// destroy the vmd (or vmi) objects, however.
[
// Flush pages and invalidate hash map
PurgeVMem(vmd)

// Remove vmd from table
for i = 1 to vmdt>>VMDT.length do
   if vmd>>VMD.base ule vmdt>>VMDT.vmd↑i>>VMD.base then
      vmdt>>VMDT.vmd↑i = vmdt>>VMDT.vmd↑(i+1)
vmdt>>VMDT.length = vmdt>>VMDT.length-1
vmdt>>VMDT.version = vmdt>>VMDT.version+1
]

//----------------------------------------------------------------------------
and PurgeVMem(vmd) be
//----------------------------------------------------------------------------
// Updates disk state and purges from memory any vPages belonging to vmd.
// This does not invalidate the vmd, it simply invalidates the hash map
// for all virtual addresses belonging to vmd, thereby requiring them to
// be read anew from disk on a subsequent reference.  This permits the
// range of virtual addresses belonging to vmd to be reassigned.
// If any of vmd's pages are locked, an error will result.
// The results are undefined (and possibly erroneous) if there are any
// concurrent references (VRRP, VWRP) to the virtual addresses in question.
[
let tries = 0
let cPage = 1  // Don't start at 0 -- VirtualPage(0) returns funny results
while cPage ls 1 lshift (16-logVMPageLength) do
   [
   let nPages = 1  // go one-at-a-time for pages NOT belonging to vmd
   let vPage = VirtualPage(cPage)
   if vPage-vmd>>VMD.base uls vmd>>VMD.length then
      [
      // vPage belongs to vmd.  Assert: vPage is the first page of a group.
      // Snarf all pages of the group at once so as to keep them together
      // on the replacement list.
      nPages = PageGroupSize(vPage); if nPages ls 0 then nPages = -nPages
      test SnarfBuffers(cPage, nPages, 0) eq 0
         ifso
            [
            // SnarfBuffers failed because the page is locked.
            // The page might be locked because another process is in the
            // midst of swapping it out (which causes it to be locked).
            // Allow up to 10 seconds for this to finish.
            tries = tries+1
            if tries gr 100 then
               IFSError(ecPurgeLockedPage, cPage lshift logVMPageLength)
            Dismiss(10)

            // Careful: now re-evaluate whether VirtualPage(cPage) still
            // belongs to vmd, since it might have gotten flushed by
            // some other process in the meantime.
            loop
            ]
         ifnot
            UnsnarfBuffers(cPage, nPages)
      ]
   cPage = cPage+nPages
   tries = 0
   ]
]

//----------------------------------------------------------------------------
and FlushBuffers(thoroughly; numargs na) be
//----------------------------------------------------------------------------
// "Safe" replacement for the FlushBuffers procedure in VMemAux.
// Flushes only pages that are on the available list, thereby circumventing
// the notorious "Can't flush locked page" bug.
// If thoroughly is true, guarantees to flush all pages that were dirty at the
// time of the call, even in the face of other concurrent VMem activity that
// causes the available list to be reordered.  If thorougly is false or omitted,
// may leave some dirty pages behind in the face of concurrent VMem activity.
[ // repeat
availableListChanged = false
let cPage = @Bpt
until cPage eq 0 do
   [
   if (HASHMAP + (Bpt+cPage)>>BPT.HASHX*2)>>HM.CLEAN eq 0 then
      [
      // Found dirty page; flush it out (but do not remove or mark empty)
      FlushBufs(cPage, 1, 0)

      // Note: at first blush it would seem that continuing to follow the list
      // from cPage is now unsafe, since contexts switched during FlushBufs and
      // there might have been other VMem activity that could reorder the list.
      // However, FlushBufs locked the page while flushing it, which means that
      // FindFreeBuf cannot have selected the page for replacement, and that is
      // the only way by which pages are removed from the list.  Therefore, cPage
      // is still on the list, though other parts of the list may have been
      // reordered.

      // Now attempt to skip to the end of the page group, so as to avoid
      // flushing a dirty locked page group multiple times.  This works only
      // if pages of a group are together in the available list, a
      // condition which CleanupLocks attempts to maintain.  The following
      // statement will skip to the end of the group assuming that the pages
      // in the group are chained together in ascending order of page number.
      while (HASHMAP + (Bpt+cPage)>>BPT.HASHX*2)>>HM.NLPG ne 0 &
       (Bpt+cPage)>>BPT.NEXT eq cPage+1 do
         cPage = cPage+1
      ]
   cPage = (Bpt+cPage)>>BPT.NEXT
   ]
] repeatwhile na gr 0 & thoroughly & availableListChanged