2 * Copyright 2002 Niels Provos <provos@citi.umich.edu>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #ifdef HAVE_SYS_MMAN_H
35 #include "monitor_mm.h"
38 mm_compare(struct mm_share *a, struct mm_share *b)
40 long diff = (char *)a->address - (char *)b->address;
50 RB_GENERATE(mmtree, mm_share, next, mm_compare)
52 static struct mm_share *
53 mm_make_entry(struct mm_master *mm, struct mmtree *head,
54 void *address, size_t size)
56 struct mm_share *tmp, *tmp2;
58 if (mm->mmalloc == NULL)
59 tmp = xmalloc(sizeof(struct mm_share));
61 tmp = mm_xmalloc(mm->mmalloc, sizeof(struct mm_share));
62 tmp->address = address;
65 tmp2 = RB_INSERT(mmtree, head, tmp);
67 fatal("mm_make_entry(%p): double address %p->%p(%lu)",
68 mm, tmp2, address, (u_long)size);
73 /* Creates a shared memory area of a certain size */
76 mm_create(struct mm_master *mmalloc, size_t size)
82 mm = xmalloc(sizeof(struct mm_master));
84 mm = mm_xmalloc(mmalloc, sizeof(struct mm_master));
87 * If the memory map has a mm_master it can be completely
88 * shared including authentication between the child
91 mm->mmalloc = mmalloc;
93 address = xmmap(size);
94 if (address == (void *)MAP_FAILED)
95 fatal("mmap(%lu): %s", (u_long)size, strerror(errno));
97 mm->address = address;
100 RB_INIT(&mm->rb_free);
101 RB_INIT(&mm->rb_allocated);
103 mm_make_entry(mm, &mm->rb_free, address, size);
108 /* Frees either the allocated or the free list */
111 mm_freelist(struct mm_master *mmalloc, struct mmtree *head)
113 struct mm_share *mms, *next;
115 for (mms = RB_ROOT(head); mms; mms = next) {
116 next = RB_NEXT(mmtree, head, mms);
117 RB_REMOVE(mmtree, head, mms);
121 mm_free(mmalloc, mms);
125 /* Destroys a memory mapped area */
128 mm_destroy(struct mm_master *mm)
130 mm_freelist(mm->mmalloc, &mm->rb_free);
131 mm_freelist(mm->mmalloc, &mm->rb_allocated);
134 if (munmap(mm->address, mm->size) == -1)
135 fatal("munmap(%p, %lu): %s", mm->address, (u_long)mm->size,
138 fatal("%s: UsePrivilegeSeparation=yes and Compression=yes not supported",
141 if (mm->mmalloc == NULL)
144 mm_free(mm->mmalloc, mm);
148 mm_xmalloc(struct mm_master *mm, size_t size)
152 address = mm_malloc(mm, size);
154 fatal("%s: mm_malloc(%lu)", __func__, (u_long)size);
159 /* Allocates data from a memory mapped area */
162 mm_malloc(struct mm_master *mm, size_t size)
164 struct mm_share *mms, *tmp;
167 fatal("mm_malloc: try to allocate 0 space");
168 if (size > SIZE_T_MAX - MM_MINSIZE + 1)
169 fatal("mm_malloc: size too big");
171 size = ((size + (MM_MINSIZE - 1)) / MM_MINSIZE) * MM_MINSIZE;
173 RB_FOREACH(mms, mmtree, &mm->rb_free) {
174 if (mms->size >= size)
182 memset(mms->address, 0xd0, size);
184 tmp = mm_make_entry(mm, &mm->rb_allocated, mms->address, size);
186 /* Does not change order in RB tree */
188 mms->address = (u_char *)mms->address + size;
190 if (mms->size == 0) {
191 RB_REMOVE(mmtree, &mm->rb_free, mms);
192 if (mm->mmalloc == NULL)
195 mm_free(mm->mmalloc, mms);
198 return (tmp->address);
201 /* Frees memory in a memory mapped area */
204 mm_free(struct mm_master *mm, void *address)
206 struct mm_share *mms, *prev, tmp;
208 tmp.address = address;
209 mms = RB_FIND(mmtree, &mm->rb_allocated, &tmp);
211 fatal("mm_free(%p): can not find %p", mm, address);
214 memset(mms->address, 0xd0, mms->size);
216 /* Remove from allocated list and insert in free list */
217 RB_REMOVE(mmtree, &mm->rb_allocated, mms);
218 if (RB_INSERT(mmtree, &mm->rb_free, mms) != NULL)
219 fatal("mm_free(%p): double address %p", mm, address);
221 /* Find previous entry */
223 if (RB_LEFT(prev, next)) {
224 prev = RB_LEFT(prev, next);
225 while (RB_RIGHT(prev, next))
226 prev = RB_RIGHT(prev, next);
228 if (RB_PARENT(prev, next) &&
229 (prev == RB_RIGHT(RB_PARENT(prev, next), next)))
230 prev = RB_PARENT(prev, next);
232 while (RB_PARENT(prev, next) &&
233 (prev == RB_LEFT(RB_PARENT(prev, next), next)))
234 prev = RB_PARENT(prev, next);
235 prev = RB_PARENT(prev, next);
239 /* Check if range does not overlap */
240 if (prev != NULL && MM_ADDRESS_END(prev) > address)
241 fatal("mm_free: memory corruption: %p(%lu) > %p",
242 prev->address, (u_long)prev->size, address);
244 /* See if we can merge backwards */
245 if (prev != NULL && MM_ADDRESS_END(prev) == address) {
246 prev->size += mms->size;
247 RB_REMOVE(mmtree, &mm->rb_free, mms);
248 if (mm->mmalloc == NULL)
251 mm_free(mm->mmalloc, mms);
258 /* Check if we can merge forwards */
259 mms = RB_NEXT(mmtree, &mm->rb_free, prev);
263 if (MM_ADDRESS_END(prev) > mms->address)
264 fatal("mm_free: memory corruption: %p < %p(%lu)",
265 mms->address, prev->address, (u_long)prev->size);
266 if (MM_ADDRESS_END(prev) != mms->address)
269 prev->size += mms->size;
270 RB_REMOVE(mmtree, &mm->rb_free, mms);
272 if (mm->mmalloc == NULL)
275 mm_free(mm->mmalloc, mms);
279 mm_sync_list(struct mmtree *oldtree, struct mmtree *newtree,
280 struct mm_master *mm, struct mm_master *mmold)
282 struct mm_master *mmalloc = mm->mmalloc;
283 struct mm_share *mms, *new;
286 RB_FOREACH(mms, mmtree, oldtree) {
287 /* Check the values */
288 mm_memvalid(mmold, mms, sizeof(struct mm_share));
289 mm_memvalid(mm, mms->address, mms->size);
291 new = mm_xmalloc(mmalloc, sizeof(struct mm_share));
292 memcpy(new, mms, sizeof(struct mm_share));
293 RB_INSERT(mmtree, newtree, new);
298 mm_share_sync(struct mm_master **pmm, struct mm_master **pmmalloc)
300 struct mm_master *mm;
301 struct mm_master *mmalloc;
302 struct mm_master *mmold;
303 struct mmtree rb_free, rb_allocated;
305 debug3("%s: Share sync", __func__);
309 mm_memvalid(mmold, mm, sizeof(*mm));
311 mmalloc = mm_create(NULL, mm->size);
312 mm = mm_xmalloc(mmalloc, sizeof(struct mm_master));
313 memcpy(mm, *pmm, sizeof(struct mm_master));
314 mm->mmalloc = mmalloc;
316 rb_free = mm->rb_free;
317 rb_allocated = mm->rb_allocated;
319 RB_INIT(&mm->rb_free);
320 RB_INIT(&mm->rb_allocated);
322 mm_sync_list(&rb_free, &mm->rb_free, mm, mmold);
323 mm_sync_list(&rb_allocated, &mm->rb_allocated, mm, mmold);
330 debug3("%s: Share sync end", __func__);
334 mm_memvalid(struct mm_master *mm, void *address, size_t size)
336 void *end = (u_char *)address + size;
338 if (address < mm->address)
339 fatal("mm_memvalid: address too small: %p", address);
341 fatal("mm_memvalid: end < address: %p < %p", end, address);
342 if (end > (void *)((u_char *)mm->address + mm->size))
343 fatal("mm_memvalid: address too large: %p", address);