MMCT TEAM
Server IP : 2a02:4780:11:1359:0:1d43:a566:2  /  Your IP : 216.73.216.161
Web Server : LiteSpeed
System : Linux in-mum-web1259.main-hosting.eu 4.18.0-553.37.1.lve.el8.x86_64 #1 SMP Mon Feb 10 22:45:17 UTC 2025 x86_64
User : u490972518 ( 490972518)
PHP Version : 5.6.40
Disable Function : system, exec, shell_exec, passthru, mysql_list_dbs, ini_alter, dl, symlink, link, chgrp, leak, popen, apache_child_terminate, virtual, mb_send_mail
MySQL : ON  |  cURL : ON  |  WGET : ON  |  Perl : OFF  |  Python : OFF
Directory (0755) :  /home/../opt/golang/1.22.0/src/context/../runtime/

[  Home  ][  C0mmand  ][  Upload File  ]

Current File : /home/../opt/golang/1.22.0/src/context/../runtime/mbitmap.go
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package runtime

import (
	"internal/goarch"
	"runtime/internal/atomic"
	"runtime/internal/sys"
	"unsafe"
)

// addb returns the byte pointer p+n.
//
//go:nowritebarrier
//go:nosplit
func addb(p *byte, n uintptr) *byte {
	// Note: wrote out full expression instead of calling add(p, n)
	// to reduce the number of temporaries generated by the
	// compiler for this trivial expression during inlining.
	return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n))
}

// subtractb returns the byte pointer p-n.
//
//go:nowritebarrier
//go:nosplit
func subtractb(p *byte, n uintptr) *byte {
	// Note: wrote out full expression instead of calling add(p, -n)
	// to reduce the number of temporaries generated by the
	// compiler for this trivial expression during inlining.
	return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - n))
}

// add1 returns the byte pointer p+1.
//
//go:nowritebarrier
//go:nosplit
func add1(p *byte) *byte {
	// Note: wrote out full expression instead of calling addb(p, 1)
	// to reduce the number of temporaries generated by the
	// compiler for this trivial expression during inlining.
	return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1))
}

// subtract1 returns the byte pointer p-1.
//
// nosplit because it is used during write barriers and must not be preempted.
//
//go:nowritebarrier
//go:nosplit
func subtract1(p *byte) *byte {
	// Note: wrote out full expression instead of calling subtractb(p, 1)
	// to reduce the number of temporaries generated by the
	// compiler for this trivial expression during inlining.
	return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1))
}

// markBits provides access to the mark bit for an object in the heap.
// bytep points to the byte holding the mark bit.
// mask is a byte with a single bit set that can be &ed with *bytep
// to see if the bit has been set.
// *m.byte&m.mask != 0 indicates the mark bit is set.
// index can be used along with span information to generate
// the address of the object in the heap.
// We maintain one set of mark bits for allocation and one for
// marking purposes.
type markBits struct {
	bytep *uint8
	mask  uint8
	index uintptr
}

//go:nosplit
func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
	bytep, mask := s.allocBits.bitp(allocBitIndex)
	return markBits{bytep, mask, allocBitIndex}
}

// refillAllocCache takes 8 bytes s.allocBits starting at whichByte
// and negates them so that ctz (count trailing zeros) instructions
// can be used. It then places these 8 bytes into the cached 64 bit
// s.allocCache.
func (s *mspan) refillAllocCache(whichByte uint16) {
	bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(uintptr(whichByte))))
	aCache := uint64(0)
	aCache |= uint64(bytes[0])
	aCache |= uint64(bytes[1]) << (1 * 8)
	aCache |= uint64(bytes[2]) << (2 * 8)
	aCache |= uint64(bytes[3]) << (3 * 8)
	aCache |= uint64(bytes[4]) << (4 * 8)
	aCache |= uint64(bytes[5]) << (5 * 8)
	aCache |= uint64(bytes[6]) << (6 * 8)
	aCache |= uint64(bytes[7]) << (7 * 8)
	s.allocCache = ^aCache
}

// nextFreeIndex returns the index of the next free object in s at
// or after s.freeindex.
// There are hardware instructions that can be used to make this
// faster if profiling warrants it.
func (s *mspan) nextFreeIndex() uint16 {
	sfreeindex := s.freeindex
	snelems := s.nelems
	if sfreeindex == snelems {
		return sfreeindex
	}
	if sfreeindex > snelems {
		throw("s.freeindex > s.nelems")
	}

	aCache := s.allocCache

	bitIndex := sys.TrailingZeros64(aCache)
	for bitIndex == 64 {
		// Move index to start of next cached bits.
		sfreeindex = (sfreeindex + 64) &^ (64 - 1)
		if sfreeindex >= snelems {
			s.freeindex = snelems
			return snelems
		}
		whichByte := sfreeindex / 8
		// Refill s.allocCache with the next 64 alloc bits.
		s.refillAllocCache(whichByte)
		aCache = s.allocCache
		bitIndex = sys.TrailingZeros64(aCache)
		// nothing available in cached bits
		// grab the next 8 bytes and try again.
	}
	result := sfreeindex + uint16(bitIndex)
	if result >= snelems {
		s.freeindex = snelems
		return snelems
	}

	s.allocCache >>= uint(bitIndex + 1)
	sfreeindex = result + 1

	if sfreeindex%64 == 0 && sfreeindex != snelems {
		// We just incremented s.freeindex so it isn't 0.
		// As each 1 in s.allocCache was encountered and used for allocation
		// it was shifted away. At this point s.allocCache contains all 0s.
		// Refill s.allocCache so that it corresponds
		// to the bits at s.allocBits starting at s.freeindex.
		whichByte := sfreeindex / 8
		s.refillAllocCache(whichByte)
	}
	s.freeindex = sfreeindex
	return result
}

// isFree reports whether the index'th object in s is unallocated.
//
// The caller must ensure s.state is mSpanInUse, and there must have
// been no preemption points since ensuring this (which could allow a
// GC transition, which would allow the state to change).
func (s *mspan) isFree(index uintptr) bool {
	if index < uintptr(s.freeIndexForScan) {
		return false
	}
	bytep, mask := s.allocBits.bitp(index)
	return *bytep&mask == 0
}

// divideByElemSize returns n/s.elemsize.
// n must be within [0, s.npages*_PageSize),
// or may be exactly s.npages*_PageSize
// if s.elemsize is from sizeclasses.go.
//
// nosplit, because it is called by objIndex, which is nosplit
//
//go:nosplit
func (s *mspan) divideByElemSize(n uintptr) uintptr {
	const doubleCheck = false

	// See explanation in mksizeclasses.go's computeDivMagic.
	q := uintptr((uint64(n) * uint64(s.divMul)) >> 32)

	if doubleCheck && q != n/s.elemsize {
		println(n, "/", s.elemsize, "should be", n/s.elemsize, "but got", q)
		throw("bad magic division")
	}
	return q
}

// nosplit, because it is called by other nosplit code like findObject
//
//go:nosplit
func (s *mspan) objIndex(p uintptr) uintptr {
	return s.divideByElemSize(p - s.base())
}

func markBitsForAddr(p uintptr) markBits {
	s := spanOf(p)
	objIndex := s.objIndex(p)
	return s.markBitsForIndex(objIndex)
}

func (s *mspan) markBitsForIndex(objIndex uintptr) markBits {
	bytep, mask := s.gcmarkBits.bitp(objIndex)
	return markBits{bytep, mask, objIndex}
}

func (s *mspan) markBitsForBase() markBits {
	return markBits{&s.gcmarkBits.x, uint8(1), 0}
}

// isMarked reports whether mark bit m is set.
func (m markBits) isMarked() bool {
	return *m.bytep&m.mask != 0
}

// setMarked sets the marked bit in the markbits, atomically.
func (m markBits) setMarked() {
	// Might be racing with other updates, so use atomic update always.
	// We used to be clever here and use a non-atomic update in certain
	// cases, but it's not worth the risk.
	atomic.Or8(m.bytep, m.mask)
}

// setMarkedNonAtomic sets the marked bit in the markbits, non-atomically.
func (m markBits) setMarkedNonAtomic() {
	*m.bytep |= m.mask
}

// clearMarked clears the marked bit in the markbits, atomically.
func (m markBits) clearMarked() {
	// Might be racing with other updates, so use atomic update always.
	// We used to be clever here and use a non-atomic update in certain
	// cases, but it's not worth the risk.
	atomic.And8(m.bytep, ^m.mask)
}

// markBitsForSpan returns the markBits for the span base address base.
func markBitsForSpan(base uintptr) (mbits markBits) {
	mbits = markBitsForAddr(base)
	if mbits.mask != 1 {
		throw("markBitsForSpan: unaligned start")
	}
	return mbits
}

// advance advances the markBits to the next object in the span.
func (m *markBits) advance() {
	if m.mask == 1<<7 {
		m.bytep = (*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(m.bytep)) + 1))
		m.mask = 1
	} else {
		m.mask = m.mask << 1
	}
	m.index++
}

// clobberdeadPtr is a special value that is used by the compiler to
// clobber dead stack slots, when -clobberdead flag is set.
const clobberdeadPtr = uintptr(0xdeaddead | 0xdeaddead<<((^uintptr(0)>>63)*32))

// badPointer throws bad pointer in heap panic.
func badPointer(s *mspan, p, refBase, refOff uintptr) {
	// Typically this indicates an incorrect use
	// of unsafe or cgo to store a bad pointer in
	// the Go heap. It may also indicate a runtime
	// bug.
	//
	// TODO(austin): We could be more aggressive
	// and detect pointers to unallocated objects
	// in allocated spans.
	printlock()
	print("runtime: pointer ", hex(p))
	if s != nil {
		state := s.state.get()
		if state != mSpanInUse {
			print(" to unallocated span")
		} else {
			print(" to unused region of span")
		}
		print(" span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", state)
	}
	print("\n")
	if refBase != 0 {
		print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
		gcDumpObject("object", refBase, refOff)
	}
	getg().m.traceback = 2
	throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
}

// findObject returns the base address for the heap object containing
// the address p, the object's span, and the index of the object in s.
// If p does not point into a heap object, it returns base == 0.
//
// If p points is an invalid heap pointer and debug.invalidptr != 0,
// findObject panics.
//
// refBase and refOff optionally give the base address of the object
// in which the pointer p was found and the byte offset at which it
// was found. These are used for error reporting.
//
// It is nosplit so it is safe for p to be a pointer to the current goroutine's stack.
// Since p is a uintptr, it would not be adjusted if the stack were to move.
//
//go:nosplit
func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr) {
	s = spanOf(p)
	// If s is nil, the virtual address has never been part of the heap.
	// This pointer may be to some mmap'd region, so we allow it.
	if s == nil {
		if (GOARCH == "amd64" || GOARCH == "arm64") && p == clobberdeadPtr && debug.invalidptr != 0 {
			// Crash if clobberdeadPtr is seen. Only on AMD64 and ARM64 for now,
			// as they are the only platform where compiler's clobberdead mode is
			// implemented. On these platforms clobberdeadPtr cannot be a valid address.
			badPointer(s, p, refBase, refOff)
		}
		return
	}
	// If p is a bad pointer, it may not be in s's bounds.
	//
	// Check s.state to synchronize with span initialization
	// before checking other fields. See also spanOfHeap.
	if state := s.state.get(); state != mSpanInUse || p < s.base() || p >= s.limit {
		// Pointers into stacks are also ok, the runtime manages these explicitly.
		if state == mSpanManual {
			return
		}
		// The following ensures that we are rigorous about what data
		// structures hold valid pointers.
		if debug.invalidptr != 0 {
			badPointer(s, p, refBase, refOff)
		}
		return
	}

	objIndex = s.objIndex(p)
	base = s.base() + objIndex*s.elemsize
	return
}

// reflect_verifyNotInHeapPtr reports whether converting the not-in-heap pointer into a unsafe.Pointer is ok.
//
//go:linkname reflect_verifyNotInHeapPtr reflect.verifyNotInHeapPtr
func reflect_verifyNotInHeapPtr(p uintptr) bool {
	// Conversion to a pointer is ok as long as findObject above does not call badPointer.
	// Since we're already promised that p doesn't point into the heap, just disallow heap
	// pointers and the special clobbered pointer.
	return spanOf(p) == nil && p != clobberdeadPtr
}

const ptrBits = 8 * goarch.PtrSize

// bulkBarrierBitmap executes write barriers for copying from [src,
// src+size) to [dst, dst+size) using a 1-bit pointer bitmap. src is
// assumed to start maskOffset bytes into the data covered by the
// bitmap in bits (which may not be a multiple of 8).
//
// This is used by bulkBarrierPreWrite for writes to data and BSS.
//
//go:nosplit
func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
	word := maskOffset / goarch.PtrSize
	bits = addb(bits, word/8)
	mask := uint8(1) << (word % 8)

	buf := &getg().m.p.ptr().wbBuf
	for i := uintptr(0); i < size; i += goarch.PtrSize {
		if mask == 0 {
			bits = addb(bits, 1)
			if *bits == 0 {
				// Skip 8 words.
				i += 7 * goarch.PtrSize
				continue
			}
			mask = 1
		}
		if *bits&mask != 0 {
			dstx := (*uintptr)(unsafe.Pointer(dst + i))
			if src == 0 {
				p := buf.get1()
				p[0] = *dstx
			} else {
				srcx := (*uintptr)(unsafe.Pointer(src + i))
				p := buf.get2()
				p[0] = *dstx
				p[1] = *srcx
			}
		}
		mask <<= 1
	}
}

// typeBitsBulkBarrier executes a write barrier for every
// pointer that would be copied from [src, src+size) to [dst,
// dst+size) by a memmove using the type bitmap to locate those
// pointer slots.
//
// The type typ must correspond exactly to [src, src+size) and [dst, dst+size).
// dst, src, and size must be pointer-aligned.
// The type typ must have a plain bitmap, not a GC program.
// The only use of this function is in channel sends, and the
// 64 kB channel element limit takes care of this for us.
//
// Must not be preempted because it typically runs right before memmove,
// and the GC must observe them as an atomic action.
//
// Callers must perform cgo checks if goexperiment.CgoCheck2.
//
//go:nosplit
func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
	if typ == nil {
		throw("runtime: typeBitsBulkBarrier without type")
	}
	if typ.Size_ != size {
		println("runtime: typeBitsBulkBarrier with type ", toRType(typ).string(), " of size ", typ.Size_, " but memory size", size)
		throw("runtime: invalid typeBitsBulkBarrier")
	}
	if typ.Kind_&kindGCProg != 0 {
		println("runtime: typeBitsBulkBarrier with type ", toRType(typ).string(), " with GC prog")
		throw("runtime: invalid typeBitsBulkBarrier")
	}
	if !writeBarrier.enabled {
		return
	}
	ptrmask := typ.GCData
	buf := &getg().m.p.ptr().wbBuf
	var bits uint32
	for i := uintptr(0); i < typ.PtrBytes; i += goarch.PtrSize {
		if i&(goarch.PtrSize*8-1) == 0 {
			bits = uint32(*ptrmask)
			ptrmask = addb(ptrmask, 1)
		} else {
			bits = bits >> 1
		}
		if bits&1 != 0 {
			dstx := (*uintptr)(unsafe.Pointer(dst + i))
			srcx := (*uintptr)(unsafe.Pointer(src + i))
			p := buf.get2()
			p[0] = *dstx
			p[1] = *srcx
		}
	}
}

// countAlloc returns the number of objects allocated in span s by
// scanning the mark bitmap.
func (s *mspan) countAlloc() int {
	count := 0
	bytes := divRoundUp(uintptr(s.nelems), 8)
	// Iterate over each 8-byte chunk and count allocations
	// with an intrinsic. Note that newMarkBits guarantees that
	// gcmarkBits will be 8-byte aligned, so we don't have to
	// worry about edge cases, irrelevant bits will simply be zero.
	for i := uintptr(0); i < bytes; i += 8 {
		// Extract 64 bits from the byte pointer and get a OnesCount.
		// Note that the unsafe cast here doesn't preserve endianness,
		// but that's OK. We only care about how many bits are 1, not
		// about the order we discover them in.
		mrkBits := *(*uint64)(unsafe.Pointer(s.gcmarkBits.bytep(i)))
		count += sys.OnesCount64(mrkBits)
	}
	return count
}

// Read the bytes starting at the aligned pointer p into a uintptr.
// Read is little-endian.
func readUintptr(p *byte) uintptr {
	x := *(*uintptr)(unsafe.Pointer(p))
	if goarch.BigEndian {
		if goarch.PtrSize == 8 {
			return uintptr(sys.Bswap64(uint64(x)))
		}
		return uintptr(sys.Bswap32(uint32(x)))
	}
	return x
}

var debugPtrmask struct {
	lock mutex
	data *byte
}

// progToPointerMask returns the 1-bit pointer mask output by the GC program prog.
// size the size of the region described by prog, in bytes.
// The resulting bitvector will have no more than size/goarch.PtrSize bits.
func progToPointerMask(prog *byte, size uintptr) bitvector {
	n := (size/goarch.PtrSize + 7) / 8
	x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
	x[len(x)-1] = 0xa1 // overflow check sentinel
	n = runGCProg(prog, &x[0])
	if x[len(x)-1] != 0xa1 {
		throw("progToPointerMask: overflow")
	}
	return bitvector{int32(n), &x[0]}
}

// Packed GC pointer bitmaps, aka GC programs.
//
// For large types containing arrays, the type information has a
// natural repetition that can be encoded to save space in the
// binary and in the memory representation of the type information.
//
// The encoding is a simple Lempel-Ziv style bytecode machine
// with the following instructions:
//
//	00000000: stop
//	0nnnnnnn: emit n bits copied from the next (n+7)/8 bytes
//	10000000 n c: repeat the previous n bits c times; n, c are varints
//	1nnnnnnn c: repeat the previous n bits c times; c is a varint

// runGCProg returns the number of 1-bit entries written to memory.
func runGCProg(prog, dst *byte) uintptr {
	dstStart := dst

	// Bits waiting to be written to memory.
	var bits uintptr
	var nbits uintptr

	p := prog
Run:
	for {
		// Flush accumulated full bytes.
		// The rest of the loop assumes that nbits <= 7.
		for ; nbits >= 8; nbits -= 8 {
			*dst = uint8(bits)
			dst = add1(dst)
			bits >>= 8
		}

		// Process one instruction.
		inst := uintptr(*p)
		p = add1(p)
		n := inst & 0x7F
		if inst&0x80 == 0 {
			// Literal bits; n == 0 means end of program.
			if n == 0 {
				// Program is over.
				break Run
			}
			nbyte := n / 8
			for i := uintptr(0); i < nbyte; i++ {
				bits |= uintptr(*p) << nbits
				p = add1(p)
				*dst = uint8(bits)
				dst = add1(dst)
				bits >>= 8
			}
			if n %= 8; n > 0 {
				bits |= uintptr(*p) << nbits
				p = add1(p)
				nbits += n
			}
			continue Run
		}

		// Repeat. If n == 0, it is encoded in a varint in the next bytes.
		if n == 0 {
			for off := uint(0); ; off += 7 {
				x := uintptr(*p)
				p = add1(p)
				n |= (x & 0x7F) << off
				if x&0x80 == 0 {
					break
				}
			}
		}

		// Count is encoded in a varint in the next bytes.
		c := uintptr(0)
		for off := uint(0); ; off += 7 {
			x := uintptr(*p)
			p = add1(p)
			c |= (x & 0x7F) << off
			if x&0x80 == 0 {
				break
			}
		}
		c *= n // now total number of bits to copy

		// If the number of bits being repeated is small, load them
		// into a register and use that register for the entire loop
		// instead of repeatedly reading from memory.
		// Handling fewer than 8 bits here makes the general loop simpler.
		// The cutoff is goarch.PtrSize*8 - 7 to guarantee that when we add
		// the pattern to a bit buffer holding at most 7 bits (a partial byte)
		// it will not overflow.
		src := dst
		const maxBits = goarch.PtrSize*8 - 7
		if n <= maxBits {
			// Start with bits in output buffer.
			pattern := bits
			npattern := nbits

			// If we need more bits, fetch them from memory.
			src = subtract1(src)
			for npattern < n {
				pattern <<= 8
				pattern |= uintptr(*src)
				src = subtract1(src)
				npattern += 8
			}

			// We started with the whole bit output buffer,
			// and then we loaded bits from whole bytes.
			// Either way, we might now have too many instead of too few.
			// Discard the extra.
			if npattern > n {
				pattern >>= npattern - n
				npattern = n
			}

			// Replicate pattern to at most maxBits.
			if npattern == 1 {
				// One bit being repeated.
				// If the bit is 1, make the pattern all 1s.
				// If the bit is 0, the pattern is already all 0s,
				// but we can claim that the number of bits
				// in the word is equal to the number we need (c),
				// because right shift of bits will zero fill.
				if pattern == 1 {
					pattern = 1<<maxBits - 1
					npattern = maxBits
				} else {
					npattern = c
				}
			} else {
				b := pattern
				nb := npattern
				if nb+nb <= maxBits {
					// Double pattern until the whole uintptr is filled.
					for nb <= goarch.PtrSize*8 {
						b |= b << nb
						nb += nb
					}
					// Trim away incomplete copy of original pattern in high bits.
					// TODO(rsc): Replace with table lookup or loop on systems without divide?
					nb = maxBits / npattern * npattern
					b &= 1<<nb - 1
					pattern = b
					npattern = nb
				}
			}

			// Add pattern to bit buffer and flush bit buffer, c/npattern times.
			// Since pattern contains >8 bits, there will be full bytes to flush
			// on each iteration.
			for ; c >= npattern; c -= npattern {
				bits |= pattern << nbits
				nbits += npattern
				for nbits >= 8 {
					*dst = uint8(bits)
					dst = add1(dst)
					bits >>= 8
					nbits -= 8
				}
			}

			// Add final fragment to bit buffer.
			if c > 0 {
				pattern &= 1<<c - 1
				bits |= pattern << nbits
				nbits += c
			}
			continue Run
		}

		// Repeat; n too large to fit in a register.
		// Since nbits <= 7, we know the first few bytes of repeated data
		// are already written to memory.
		off := n - nbits // n > nbits because n > maxBits and nbits <= 7
		// Leading src fragment.
		src = subtractb(src, (off+7)/8)
		if frag := off & 7; frag != 0 {
			bits |= uintptr(*src) >> (8 - frag) << nbits
			src = add1(src)
			nbits += frag
			c -= frag
		}
		// Main loop: load one byte, write another.
		// The bits are rotating through the bit buffer.
		for i := c / 8; i > 0; i-- {
			bits |= uintptr(*src) << nbits
			src = add1(src)
			*dst = uint8(bits)
			dst = add1(dst)
			bits >>= 8
		}
		// Final src fragment.
		if c %= 8; c > 0 {
			bits |= (uintptr(*src) & (1<<c - 1)) << nbits
			nbits += c
		}
	}

	// Write any final bits out, using full-byte writes, even for the final byte.
	totalBits := (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*8 + nbits
	nbits += -nbits & 7
	for ; nbits > 0; nbits -= 8 {
		*dst = uint8(bits)
		dst = add1(dst)
		bits >>= 8
	}
	return totalBits
}

// materializeGCProg allocates space for the (1-bit) pointer bitmask
// for an object of size ptrdata.  Then it fills that space with the
// pointer bitmask specified by the program prog.
// The bitmask starts at s.startAddr.
// The result must be deallocated with dematerializeGCProg.
func materializeGCProg(ptrdata uintptr, prog *byte) *mspan {
	// Each word of ptrdata needs one bit in the bitmap.
	bitmapBytes := divRoundUp(ptrdata, 8*goarch.PtrSize)
	// Compute the number of pages needed for bitmapBytes.
	pages := divRoundUp(bitmapBytes, pageSize)
	s := mheap_.allocManual(pages, spanAllocPtrScalarBits)
	runGCProg(addb(prog, 4), (*byte)(unsafe.Pointer(s.startAddr)))
	return s
}
func dematerializeGCProg(s *mspan) {
	mheap_.freeManual(s, spanAllocPtrScalarBits)
}

func dumpGCProg(p *byte) {
	nptr := 0
	for {
		x := *p
		p = add1(p)
		if x == 0 {
			print("\t", nptr, " end\n")
			break
		}
		if x&0x80 == 0 {
			print("\t", nptr, " lit ", x, ":")
			n := int(x+7) / 8
			for i := 0; i < n; i++ {
				print(" ", hex(*p))
				p = add1(p)
			}
			print("\n")
			nptr += int(x)
		} else {
			nbit := int(x &^ 0x80)
			if nbit == 0 {
				for nb := uint(0); ; nb += 7 {
					x := *p
					p = add1(p)
					nbit |= int(x&0x7f) << nb
					if x&0x80 == 0 {
						break
					}
				}
			}
			count := 0
			for nb := uint(0); ; nb += 7 {
				x := *p
				p = add1(p)
				count |= int(x&0x7f) << nb
				if x&0x80 == 0 {
					break
				}
			}
			print("\t", nptr, " repeat ", nbit, " × ", count, "\n")
			nptr += nbit * count
		}
	}
}

// Testing.

// reflect_gcbits returns the GC type info for x, for testing.
// The result is the bitmap entries (0 or 1), one entry per byte.
//
//go:linkname reflect_gcbits reflect.gcbits
func reflect_gcbits(x any) []byte {
	return getgcmask(x)
}

MMCT - 2023