1 /* $OpenBSD: monitor_mm.c,v 1.13 2006/07/22 20:48:23 stevesk Exp $ */
3 * Copyright 2002 Niels Provos <provos@citi.umich.edu>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #ifdef HAVE_SYS_MMAN_H
39 #include "monitor_mm.h"
42 mm_compare(struct mm_share *a, struct mm_share *b)
44 long diff = (char *)a->address - (char *)b->address;
54 RB_GENERATE(mmtree, mm_share, next, mm_compare)
56 static struct mm_share *
57 mm_make_entry(struct mm_master *mm, struct mmtree *head,
58 void *address, size_t size)
60 struct mm_share *tmp, *tmp2;
62 if (mm->mmalloc == NULL)
63 tmp = xmalloc(sizeof(struct mm_share));
65 tmp = mm_xmalloc(mm->mmalloc, sizeof(struct mm_share));
66 tmp->address = address;
69 tmp2 = RB_INSERT(mmtree, head, tmp);
71 fatal("mm_make_entry(%p): double address %p->%p(%lu)",
72 mm, tmp2, address, (u_long)size);
77 /* Creates a shared memory area of a certain size */
80 mm_create(struct mm_master *mmalloc, size_t size)
86 mm = xmalloc(sizeof(struct mm_master));
88 mm = mm_xmalloc(mmalloc, sizeof(struct mm_master));
91 * If the memory map has a mm_master it can be completely
92 * shared including authentication between the child
95 mm->mmalloc = mmalloc;
97 address = xmmap(size);
98 if (address == (void *)MAP_FAILED)
99 fatal("mmap(%lu): %s", (u_long)size, strerror(errno));
101 mm->address = address;
104 RB_INIT(&mm->rb_free);
105 RB_INIT(&mm->rb_allocated);
107 mm_make_entry(mm, &mm->rb_free, address, size);
112 /* Frees either the allocated or the free list */
115 mm_freelist(struct mm_master *mmalloc, struct mmtree *head)
117 struct mm_share *mms, *next;
119 for (mms = RB_ROOT(head); mms; mms = next) {
120 next = RB_NEXT(mmtree, head, mms);
121 RB_REMOVE(mmtree, head, mms);
125 mm_free(mmalloc, mms);
129 /* Destroys a memory mapped area */
132 mm_destroy(struct mm_master *mm)
134 mm_freelist(mm->mmalloc, &mm->rb_free);
135 mm_freelist(mm->mmalloc, &mm->rb_allocated);
138 if (munmap(mm->address, mm->size) == -1)
139 fatal("munmap(%p, %lu): %s", mm->address, (u_long)mm->size,
142 fatal("%s: UsePrivilegeSeparation=yes and Compression=yes not supported",
145 if (mm->mmalloc == NULL)
148 mm_free(mm->mmalloc, mm);
152 mm_xmalloc(struct mm_master *mm, size_t size)
156 address = mm_malloc(mm, size);
158 fatal("%s: mm_malloc(%lu)", __func__, (u_long)size);
163 /* Allocates data from a memory mapped area */
166 mm_malloc(struct mm_master *mm, size_t size)
168 struct mm_share *mms, *tmp;
171 fatal("mm_malloc: try to allocate 0 space");
172 if (size > SIZE_T_MAX - MM_MINSIZE + 1)
173 fatal("mm_malloc: size too big");
175 size = ((size + (MM_MINSIZE - 1)) / MM_MINSIZE) * MM_MINSIZE;
177 RB_FOREACH(mms, mmtree, &mm->rb_free) {
178 if (mms->size >= size)
186 memset(mms->address, 0xd0, size);
188 tmp = mm_make_entry(mm, &mm->rb_allocated, mms->address, size);
190 /* Does not change order in RB tree */
192 mms->address = (u_char *)mms->address + size;
194 if (mms->size == 0) {
195 RB_REMOVE(mmtree, &mm->rb_free, mms);
196 if (mm->mmalloc == NULL)
199 mm_free(mm->mmalloc, mms);
202 return (tmp->address);
205 /* Frees memory in a memory mapped area */
208 mm_free(struct mm_master *mm, void *address)
210 struct mm_share *mms, *prev, tmp;
212 tmp.address = address;
213 mms = RB_FIND(mmtree, &mm->rb_allocated, &tmp);
215 fatal("mm_free(%p): can not find %p", mm, address);
218 memset(mms->address, 0xd0, mms->size);
220 /* Remove from allocated list and insert in free list */
221 RB_REMOVE(mmtree, &mm->rb_allocated, mms);
222 if (RB_INSERT(mmtree, &mm->rb_free, mms) != NULL)
223 fatal("mm_free(%p): double address %p", mm, address);
225 /* Find previous entry */
227 if (RB_LEFT(prev, next)) {
228 prev = RB_LEFT(prev, next);
229 while (RB_RIGHT(prev, next))
230 prev = RB_RIGHT(prev, next);
232 if (RB_PARENT(prev, next) &&
233 (prev == RB_RIGHT(RB_PARENT(prev, next), next)))
234 prev = RB_PARENT(prev, next);
236 while (RB_PARENT(prev, next) &&
237 (prev == RB_LEFT(RB_PARENT(prev, next), next)))
238 prev = RB_PARENT(prev, next);
239 prev = RB_PARENT(prev, next);
243 /* Check if range does not overlap */
244 if (prev != NULL && MM_ADDRESS_END(prev) > address)
245 fatal("mm_free: memory corruption: %p(%lu) > %p",
246 prev->address, (u_long)prev->size, address);
248 /* See if we can merge backwards */
249 if (prev != NULL && MM_ADDRESS_END(prev) == address) {
250 prev->size += mms->size;
251 RB_REMOVE(mmtree, &mm->rb_free, mms);
252 if (mm->mmalloc == NULL)
255 mm_free(mm->mmalloc, mms);
262 /* Check if we can merge forwards */
263 mms = RB_NEXT(mmtree, &mm->rb_free, prev);
267 if (MM_ADDRESS_END(prev) > mms->address)
268 fatal("mm_free: memory corruption: %p < %p(%lu)",
269 mms->address, prev->address, (u_long)prev->size);
270 if (MM_ADDRESS_END(prev) != mms->address)
273 prev->size += mms->size;
274 RB_REMOVE(mmtree, &mm->rb_free, mms);
276 if (mm->mmalloc == NULL)
279 mm_free(mm->mmalloc, mms);
283 mm_sync_list(struct mmtree *oldtree, struct mmtree *newtree,
284 struct mm_master *mm, struct mm_master *mmold)
286 struct mm_master *mmalloc = mm->mmalloc;
287 struct mm_share *mms, *new;
290 RB_FOREACH(mms, mmtree, oldtree) {
291 /* Check the values */
292 mm_memvalid(mmold, mms, sizeof(struct mm_share));
293 mm_memvalid(mm, mms->address, mms->size);
295 new = mm_xmalloc(mmalloc, sizeof(struct mm_share));
296 memcpy(new, mms, sizeof(struct mm_share));
297 RB_INSERT(mmtree, newtree, new);
302 mm_share_sync(struct mm_master **pmm, struct mm_master **pmmalloc)
304 struct mm_master *mm;
305 struct mm_master *mmalloc;
306 struct mm_master *mmold;
307 struct mmtree rb_free, rb_allocated;
309 debug3("%s: Share sync", __func__);
313 mm_memvalid(mmold, mm, sizeof(*mm));
315 mmalloc = mm_create(NULL, mm->size);
316 mm = mm_xmalloc(mmalloc, sizeof(struct mm_master));
317 memcpy(mm, *pmm, sizeof(struct mm_master));
318 mm->mmalloc = mmalloc;
320 rb_free = mm->rb_free;
321 rb_allocated = mm->rb_allocated;
323 RB_INIT(&mm->rb_free);
324 RB_INIT(&mm->rb_allocated);
326 mm_sync_list(&rb_free, &mm->rb_free, mm, mmold);
327 mm_sync_list(&rb_allocated, &mm->rb_allocated, mm, mmold);
334 debug3("%s: Share sync end", __func__);
338 mm_memvalid(struct mm_master *mm, void *address, size_t size)
340 void *end = (u_char *)address + size;
342 if (address < mm->address)
343 fatal("mm_memvalid: address too small: %p", address);
345 fatal("mm_memvalid: end < address: %p < %p", end, address);
346 if (end > (void *)((u_char *)mm->address + mm->size))
347 fatal("mm_memvalid: address too large: %p", address);