| 1 | n/a | # |
|---|
| 2 | n/a | # Module which supports allocation of memory from an mmap |
|---|
| 3 | n/a | # |
|---|
| 4 | n/a | # multiprocessing/heap.py |
|---|
| 5 | n/a | # |
|---|
| 6 | n/a | # Copyright (c) 2006-2008, R Oudkerk |
|---|
| 7 | n/a | # Licensed to PSF under a Contributor Agreement. |
|---|
| 8 | n/a | # |
|---|
| 9 | n/a | |
|---|
| 10 | n/a | import bisect |
|---|
| 11 | n/a | import mmap |
|---|
| 12 | n/a | import os |
|---|
| 13 | n/a | import sys |
|---|
| 14 | n/a | import tempfile |
|---|
| 15 | n/a | import threading |
|---|
| 16 | n/a | |
|---|
| 17 | n/a | from .context import reduction, assert_spawning |
|---|
| 18 | n/a | from . import util |
|---|
| 19 | n/a | |
|---|
| 20 | n/a | __all__ = ['BufferWrapper'] |
|---|
| 21 | n/a | |
|---|
| 22 | n/a | # |
|---|
| 23 | n/a | # Inheritable class which wraps an mmap, and from which blocks can be allocated |
|---|
| 24 | n/a | # |
|---|
| 25 | n/a | |
|---|
| 26 | n/a | if sys.platform == 'win32': |
|---|
| 27 | n/a | |
|---|
| 28 | n/a | import _winapi |
|---|
| 29 | n/a | |
|---|
| 30 | n/a | class Arena(object): |
|---|
| 31 | n/a | |
|---|
| 32 | n/a | _rand = tempfile._RandomNameSequence() |
|---|
| 33 | n/a | |
|---|
| 34 | n/a | def __init__(self, size): |
|---|
| 35 | n/a | self.size = size |
|---|
| 36 | n/a | for i in range(100): |
|---|
| 37 | n/a | name = 'pym-%d-%s' % (os.getpid(), next(self._rand)) |
|---|
| 38 | n/a | buf = mmap.mmap(-1, size, tagname=name) |
|---|
| 39 | n/a | if _winapi.GetLastError() == 0: |
|---|
| 40 | n/a | break |
|---|
| 41 | n/a | # We have reopened a preexisting mmap. |
|---|
| 42 | n/a | buf.close() |
|---|
| 43 | n/a | else: |
|---|
| 44 | n/a | raise FileExistsError('Cannot find name for new mmap') |
|---|
| 45 | n/a | self.name = name |
|---|
| 46 | n/a | self.buffer = buf |
|---|
| 47 | n/a | self._state = (self.size, self.name) |
|---|
| 48 | n/a | |
|---|
| 49 | n/a | def __getstate__(self): |
|---|
| 50 | n/a | assert_spawning(self) |
|---|
| 51 | n/a | return self._state |
|---|
| 52 | n/a | |
|---|
| 53 | n/a | def __setstate__(self, state): |
|---|
| 54 | n/a | self.size, self.name = self._state = state |
|---|
| 55 | n/a | self.buffer = mmap.mmap(-1, self.size, tagname=self.name) |
|---|
| 56 | n/a | # XXX Temporarily preventing buildbot failures while determining |
|---|
| 57 | n/a | # XXX the correct long-term fix. See issue 23060 |
|---|
| 58 | n/a | #assert _winapi.GetLastError() == _winapi.ERROR_ALREADY_EXISTS |
|---|
| 59 | n/a | |
|---|
| 60 | n/a | else: |
|---|
| 61 | n/a | |
|---|
| 62 | n/a | class Arena(object): |
|---|
| 63 | n/a | |
|---|
| 64 | n/a | def __init__(self, size, fd=-1): |
|---|
| 65 | n/a | self.size = size |
|---|
| 66 | n/a | self.fd = fd |
|---|
| 67 | n/a | if fd == -1: |
|---|
| 68 | n/a | self.fd, name = tempfile.mkstemp( |
|---|
| 69 | n/a | prefix='pym-%d-'%os.getpid(), dir=util.get_temp_dir()) |
|---|
| 70 | n/a | os.unlink(name) |
|---|
| 71 | n/a | util.Finalize(self, os.close, (self.fd,)) |
|---|
| 72 | n/a | with open(self.fd, 'wb', closefd=False) as f: |
|---|
| 73 | n/a | bs = 1024 * 1024 |
|---|
| 74 | n/a | if size >= bs: |
|---|
| 75 | n/a | zeros = b'\0' * bs |
|---|
| 76 | n/a | for _ in range(size // bs): |
|---|
| 77 | n/a | f.write(zeros) |
|---|
| 78 | n/a | del zeros |
|---|
| 79 | n/a | f.write(b'\0' * (size % bs)) |
|---|
| 80 | n/a | assert f.tell() == size |
|---|
| 81 | n/a | self.buffer = mmap.mmap(self.fd, self.size) |
|---|
| 82 | n/a | |
|---|
| 83 | n/a | def reduce_arena(a): |
|---|
| 84 | n/a | if a.fd == -1: |
|---|
| 85 | n/a | raise ValueError('Arena is unpicklable because ' |
|---|
| 86 | n/a | 'forking was enabled when it was created') |
|---|
| 87 | n/a | return rebuild_arena, (a.size, reduction.DupFd(a.fd)) |
|---|
| 88 | n/a | |
|---|
| 89 | n/a | def rebuild_arena(size, dupfd): |
|---|
| 90 | n/a | return Arena(size, dupfd.detach()) |
|---|
| 91 | n/a | |
|---|
| 92 | n/a | reduction.register(Arena, reduce_arena) |
|---|
| 93 | n/a | |
|---|
| 94 | n/a | # |
|---|
| 95 | n/a | # Class allowing allocation of chunks of memory from arenas |
|---|
| 96 | n/a | # |
|---|
| 97 | n/a | |
|---|
| 98 | n/a | class Heap(object): |
|---|
| 99 | n/a | |
|---|
| 100 | n/a | _alignment = 8 |
|---|
| 101 | n/a | |
|---|
| 102 | n/a | def __init__(self, size=mmap.PAGESIZE): |
|---|
| 103 | n/a | self._lastpid = os.getpid() |
|---|
| 104 | n/a | self._lock = threading.Lock() |
|---|
| 105 | n/a | self._size = size |
|---|
| 106 | n/a | self._lengths = [] |
|---|
| 107 | n/a | self._len_to_seq = {} |
|---|
| 108 | n/a | self._start_to_block = {} |
|---|
| 109 | n/a | self._stop_to_block = {} |
|---|
| 110 | n/a | self._allocated_blocks = set() |
|---|
| 111 | n/a | self._arenas = [] |
|---|
| 112 | n/a | # list of pending blocks to free - see free() comment below |
|---|
| 113 | n/a | self._pending_free_blocks = [] |
|---|
| 114 | n/a | |
|---|
| 115 | n/a | @staticmethod |
|---|
| 116 | n/a | def _roundup(n, alignment): |
|---|
| 117 | n/a | # alignment must be a power of 2 |
|---|
| 118 | n/a | mask = alignment - 1 |
|---|
| 119 | n/a | return (n + mask) & ~mask |
|---|
| 120 | n/a | |
|---|
| 121 | n/a | def _malloc(self, size): |
|---|
| 122 | n/a | # returns a large enough block -- it might be much larger |
|---|
| 123 | n/a | i = bisect.bisect_left(self._lengths, size) |
|---|
| 124 | n/a | if i == len(self._lengths): |
|---|
| 125 | n/a | length = self._roundup(max(self._size, size), mmap.PAGESIZE) |
|---|
| 126 | n/a | self._size *= 2 |
|---|
| 127 | n/a | util.info('allocating a new mmap of length %d', length) |
|---|
| 128 | n/a | arena = Arena(length) |
|---|
| 129 | n/a | self._arenas.append(arena) |
|---|
| 130 | n/a | return (arena, 0, length) |
|---|
| 131 | n/a | else: |
|---|
| 132 | n/a | length = self._lengths[i] |
|---|
| 133 | n/a | seq = self._len_to_seq[length] |
|---|
| 134 | n/a | block = seq.pop() |
|---|
| 135 | n/a | if not seq: |
|---|
| 136 | n/a | del self._len_to_seq[length], self._lengths[i] |
|---|
| 137 | n/a | |
|---|
| 138 | n/a | (arena, start, stop) = block |
|---|
| 139 | n/a | del self._start_to_block[(arena, start)] |
|---|
| 140 | n/a | del self._stop_to_block[(arena, stop)] |
|---|
| 141 | n/a | return block |
|---|
| 142 | n/a | |
|---|
| 143 | n/a | def _free(self, block): |
|---|
| 144 | n/a | # free location and try to merge with neighbours |
|---|
| 145 | n/a | (arena, start, stop) = block |
|---|
| 146 | n/a | |
|---|
| 147 | n/a | try: |
|---|
| 148 | n/a | prev_block = self._stop_to_block[(arena, start)] |
|---|
| 149 | n/a | except KeyError: |
|---|
| 150 | n/a | pass |
|---|
| 151 | n/a | else: |
|---|
| 152 | n/a | start, _ = self._absorb(prev_block) |
|---|
| 153 | n/a | |
|---|
| 154 | n/a | try: |
|---|
| 155 | n/a | next_block = self._start_to_block[(arena, stop)] |
|---|
| 156 | n/a | except KeyError: |
|---|
| 157 | n/a | pass |
|---|
| 158 | n/a | else: |
|---|
| 159 | n/a | _, stop = self._absorb(next_block) |
|---|
| 160 | n/a | |
|---|
| 161 | n/a | block = (arena, start, stop) |
|---|
| 162 | n/a | length = stop - start |
|---|
| 163 | n/a | |
|---|
| 164 | n/a | try: |
|---|
| 165 | n/a | self._len_to_seq[length].append(block) |
|---|
| 166 | n/a | except KeyError: |
|---|
| 167 | n/a | self._len_to_seq[length] = [block] |
|---|
| 168 | n/a | bisect.insort(self._lengths, length) |
|---|
| 169 | n/a | |
|---|
| 170 | n/a | self._start_to_block[(arena, start)] = block |
|---|
| 171 | n/a | self._stop_to_block[(arena, stop)] = block |
|---|
| 172 | n/a | |
|---|
| 173 | n/a | def _absorb(self, block): |
|---|
| 174 | n/a | # deregister this block so it can be merged with a neighbour |
|---|
| 175 | n/a | (arena, start, stop) = block |
|---|
| 176 | n/a | del self._start_to_block[(arena, start)] |
|---|
| 177 | n/a | del self._stop_to_block[(arena, stop)] |
|---|
| 178 | n/a | |
|---|
| 179 | n/a | length = stop - start |
|---|
| 180 | n/a | seq = self._len_to_seq[length] |
|---|
| 181 | n/a | seq.remove(block) |
|---|
| 182 | n/a | if not seq: |
|---|
| 183 | n/a | del self._len_to_seq[length] |
|---|
| 184 | n/a | self._lengths.remove(length) |
|---|
| 185 | n/a | |
|---|
| 186 | n/a | return start, stop |
|---|
| 187 | n/a | |
|---|
| 188 | n/a | def _free_pending_blocks(self): |
|---|
| 189 | n/a | # Free all the blocks in the pending list - called with the lock held. |
|---|
| 190 | n/a | while True: |
|---|
| 191 | n/a | try: |
|---|
| 192 | n/a | block = self._pending_free_blocks.pop() |
|---|
| 193 | n/a | except IndexError: |
|---|
| 194 | n/a | break |
|---|
| 195 | n/a | self._allocated_blocks.remove(block) |
|---|
| 196 | n/a | self._free(block) |
|---|
| 197 | n/a | |
|---|
| 198 | n/a | def free(self, block): |
|---|
| 199 | n/a | # free a block returned by malloc() |
|---|
| 200 | n/a | # Since free() can be called asynchronously by the GC, it could happen |
|---|
| 201 | n/a | # that it's called while self._lock is held: in that case, |
|---|
| 202 | n/a | # self._lock.acquire() would deadlock (issue #12352). To avoid that, a |
|---|
| 203 | n/a | # trylock is used instead, and if the lock can't be acquired |
|---|
| 204 | n/a | # immediately, the block is added to a list of blocks to be freed |
|---|
| 205 | n/a | # synchronously sometimes later from malloc() or free(), by calling |
|---|
| 206 | n/a | # _free_pending_blocks() (appending and retrieving from a list is not |
|---|
| 207 | n/a | # strictly thread-safe but under cPython it's atomic thanks to the GIL). |
|---|
| 208 | n/a | assert os.getpid() == self._lastpid |
|---|
| 209 | n/a | if not self._lock.acquire(False): |
|---|
| 210 | n/a | # can't acquire the lock right now, add the block to the list of |
|---|
| 211 | n/a | # pending blocks to free |
|---|
| 212 | n/a | self._pending_free_blocks.append(block) |
|---|
| 213 | n/a | else: |
|---|
| 214 | n/a | # we hold the lock |
|---|
| 215 | n/a | try: |
|---|
| 216 | n/a | self._free_pending_blocks() |
|---|
| 217 | n/a | self._allocated_blocks.remove(block) |
|---|
| 218 | n/a | self._free(block) |
|---|
| 219 | n/a | finally: |
|---|
| 220 | n/a | self._lock.release() |
|---|
| 221 | n/a | |
|---|
| 222 | n/a | def malloc(self, size): |
|---|
| 223 | n/a | # return a block of right size (possibly rounded up) |
|---|
| 224 | n/a | assert 0 <= size < sys.maxsize |
|---|
| 225 | n/a | if os.getpid() != self._lastpid: |
|---|
| 226 | n/a | self.__init__() # reinitialize after fork |
|---|
| 227 | n/a | with self._lock: |
|---|
| 228 | n/a | self._free_pending_blocks() |
|---|
| 229 | n/a | size = self._roundup(max(size,1), self._alignment) |
|---|
| 230 | n/a | (arena, start, stop) = self._malloc(size) |
|---|
| 231 | n/a | new_stop = start + size |
|---|
| 232 | n/a | if new_stop < stop: |
|---|
| 233 | n/a | self._free((arena, new_stop, stop)) |
|---|
| 234 | n/a | block = (arena, start, new_stop) |
|---|
| 235 | n/a | self._allocated_blocks.add(block) |
|---|
| 236 | n/a | return block |
|---|
| 237 | n/a | |
|---|
| 238 | n/a | # |
|---|
| 239 | n/a | # Class representing a chunk of an mmap -- can be inherited by child process |
|---|
| 240 | n/a | # |
|---|
| 241 | n/a | |
|---|
| 242 | n/a | class BufferWrapper(object): |
|---|
| 243 | n/a | |
|---|
| 244 | n/a | _heap = Heap() |
|---|
| 245 | n/a | |
|---|
| 246 | n/a | def __init__(self, size): |
|---|
| 247 | n/a | assert 0 <= size < sys.maxsize |
|---|
| 248 | n/a | block = BufferWrapper._heap.malloc(size) |
|---|
| 249 | n/a | self._state = (block, size) |
|---|
| 250 | n/a | util.Finalize(self, BufferWrapper._heap.free, args=(block,)) |
|---|
| 251 | n/a | |
|---|
| 252 | n/a | def create_memoryview(self): |
|---|
| 253 | n/a | (arena, start, stop), size = self._state |
|---|
| 254 | n/a | return memoryview(arena.buffer)[start:start+size] |
|---|