]> andersk Git - openssh.git/blame - monitor_mm.c
- stevesk@cvs.openbsd.org 2006/07/22 20:48:23
[openssh.git] / monitor_mm.c
CommitLineData
00146caa 1/* $OpenBSD: monitor_mm.c,v 1.13 2006/07/22 20:48:23 stevesk Exp $ */
1853d1ef 2/*
3 * Copyright 2002 Niels Provos <provos@citi.umich.edu>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include "includes.h"
1853d1ef 28
028094f4 29#include <errno.h>
00146caa 30#include <string.h>
028094f4 31
1e8f8c5b 32#ifdef HAVE_SYS_MMAN_H
1853d1ef 33#include <sys/mman.h>
1e8f8c5b 34#endif
1853d1ef 35
36#include "ssh.h"
37#include "xmalloc.h"
38#include "log.h"
39#include "monitor_mm.h"
40
41static int
42mm_compare(struct mm_share *a, struct mm_share *b)
43{
a64d3560 44 long diff = (char *)a->address - (char *)b->address;
45
46 if (diff == 0)
47 return (0);
48 else if (diff < 0)
49 return (-1);
50 else
51 return (1);
1853d1ef 52}
53
54RB_GENERATE(mmtree, mm_share, next, mm_compare)
55
56static struct mm_share *
57mm_make_entry(struct mm_master *mm, struct mmtree *head,
58 void *address, size_t size)
59{
60 struct mm_share *tmp, *tmp2;
61
62 if (mm->mmalloc == NULL)
63 tmp = xmalloc(sizeof(struct mm_share));
64 else
65 tmp = mm_xmalloc(mm->mmalloc, sizeof(struct mm_share));
66 tmp->address = address;
67 tmp->size = size;
68
69 tmp2 = RB_INSERT(mmtree, head, tmp);
70 if (tmp2 != NULL)
3074b20c 71 fatal("mm_make_entry(%p): double address %p->%p(%lu)",
72 mm, tmp2, address, (u_long)size);
1853d1ef 73
74 return (tmp);
75}
76
77/* Creates a shared memory area of a certain size */
78
79struct mm_master *
80mm_create(struct mm_master *mmalloc, size_t size)
81{
82 void *address;
83 struct mm_master *mm;
84
85 if (mmalloc == NULL)
86 mm = xmalloc(sizeof(struct mm_master));
87 else
88 mm = mm_xmalloc(mmalloc, sizeof(struct mm_master));
89
90 /*
91 * If the memory map has a mm_master it can be completely
92 * shared including authentication between the child
93 * and the client.
94 */
95 mm->mmalloc = mmalloc;
96
4165b82e 97 address = xmmap(size);
4db587d2 98 if (address == (void *)MAP_FAILED)
78beb77d 99 fatal("mmap(%lu): %s", (u_long)size, strerror(errno));
1853d1ef 100
101 mm->address = address;
102 mm->size = size;
103
104 RB_INIT(&mm->rb_free);
105 RB_INIT(&mm->rb_allocated);
106
107 mm_make_entry(mm, &mm->rb_free, address, size);
108
109 return (mm);
110}
111
112/* Frees either the allocated or the free list */
113
114static void
115mm_freelist(struct mm_master *mmalloc, struct mmtree *head)
116{
117 struct mm_share *mms, *next;
118
119 for (mms = RB_ROOT(head); mms; mms = next) {
120 next = RB_NEXT(mmtree, head, mms);
121 RB_REMOVE(mmtree, head, mms);
122 if (mmalloc == NULL)
123 xfree(mms);
124 else
125 mm_free(mmalloc, mms);
126 }
127}
128
129/* Destroys a memory mapped area */
130
131void
132mm_destroy(struct mm_master *mm)
133{
134 mm_freelist(mm->mmalloc, &mm->rb_free);
135 mm_freelist(mm->mmalloc, &mm->rb_allocated);
136
4165b82e 137#ifdef HAVE_MMAP
1853d1ef 138 if (munmap(mm->address, mm->size) == -1)
441b3f63 139 fatal("munmap(%p, %lu): %s", mm->address, (u_long)mm->size,
c22d8cc0 140 strerror(errno));
1bf74eac 141#else
88cb875c 142 fatal("%s: UsePrivilegeSeparation=yes and Compression=yes not supported",
1588c277 143 __func__);
1bf74eac 144#endif
1853d1ef 145 if (mm->mmalloc == NULL)
146 xfree(mm);
147 else
148 mm_free(mm->mmalloc, mm);
149}
150
151void *
152mm_xmalloc(struct mm_master *mm, size_t size)
153{
154 void *address;
155
156 address = mm_malloc(mm, size);
157 if (address == NULL)
1588c277 158 fatal("%s: mm_malloc(%lu)", __func__, (u_long)size);
1853d1ef 159 return (address);
160}
161
162
163/* Allocates data from a memory mapped area */
164
165void *
166mm_malloc(struct mm_master *mm, size_t size)
167{
168 struct mm_share *mms, *tmp;
169
170 if (size == 0)
171 fatal("mm_malloc: try to allocate 0 space");
b85698ab 172 if (size > SIZE_T_MAX - MM_MINSIZE + 1)
173 fatal("mm_malloc: size too big");
1853d1ef 174
b85698ab 175 size = ((size + (MM_MINSIZE - 1)) / MM_MINSIZE) * MM_MINSIZE;
1853d1ef 176
177 RB_FOREACH(mms, mmtree, &mm->rb_free) {
178 if (mms->size >= size)
179 break;
180 }
181
182 if (mms == NULL)
183 return (NULL);
184
185 /* Debug */
186 memset(mms->address, 0xd0, size);
187
188 tmp = mm_make_entry(mm, &mm->rb_allocated, mms->address, size);
189
190 /* Does not change order in RB tree */
191 mms->size -= size;
192 mms->address = (u_char *)mms->address + size;
193
194 if (mms->size == 0) {
195 RB_REMOVE(mmtree, &mm->rb_free, mms);
196 if (mm->mmalloc == NULL)
197 xfree(mms);
198 else
199 mm_free(mm->mmalloc, mms);
200 }
201
202 return (tmp->address);
203}
204
205/* Frees memory in a memory mapped area */
206
207void
208mm_free(struct mm_master *mm, void *address)
209{
210 struct mm_share *mms, *prev, tmp;
211
212 tmp.address = address;
213 mms = RB_FIND(mmtree, &mm->rb_allocated, &tmp);
214 if (mms == NULL)
215 fatal("mm_free(%p): can not find %p", mm, address);
216
217 /* Debug */
218 memset(mms->address, 0xd0, mms->size);
219
220 /* Remove from allocated list and insert in free list */
221 RB_REMOVE(mmtree, &mm->rb_allocated, mms);
222 if (RB_INSERT(mmtree, &mm->rb_free, mms) != NULL)
223 fatal("mm_free(%p): double address %p", mm, address);
224
225 /* Find previous entry */
226 prev = mms;
227 if (RB_LEFT(prev, next)) {
228 prev = RB_LEFT(prev, next);
229 while (RB_RIGHT(prev, next))
230 prev = RB_RIGHT(prev, next);
231 } else {
232 if (RB_PARENT(prev, next) &&
233 (prev == RB_RIGHT(RB_PARENT(prev, next), next)))
234 prev = RB_PARENT(prev, next);
235 else {
236 while (RB_PARENT(prev, next) &&
237 (prev == RB_LEFT(RB_PARENT(prev, next), next)))
238 prev = RB_PARENT(prev, next);
239 prev = RB_PARENT(prev, next);
240 }
241 }
242
243 /* Check if range does not overlap */
244 if (prev != NULL && MM_ADDRESS_END(prev) > address)
3074b20c 245 fatal("mm_free: memory corruption: %p(%lu) > %p",
246 prev->address, (u_long)prev->size, address);
1853d1ef 247
248 /* See if we can merge backwards */
249 if (prev != NULL && MM_ADDRESS_END(prev) == address) {
250 prev->size += mms->size;
251 RB_REMOVE(mmtree, &mm->rb_free, mms);
252 if (mm->mmalloc == NULL)
253 xfree(mms);
254 else
255 mm_free(mm->mmalloc, mms);
256 } else
257 prev = mms;
258
259 if (prev == NULL)
260 return;
261
262 /* Check if we can merge forwards */
263 mms = RB_NEXT(mmtree, &mm->rb_free, prev);
264 if (mms == NULL)
265 return;
266
267 if (MM_ADDRESS_END(prev) > mms->address)
3074b20c 268 fatal("mm_free: memory corruption: %p < %p(%lu)",
269 mms->address, prev->address, (u_long)prev->size);
1853d1ef 270 if (MM_ADDRESS_END(prev) != mms->address)
271 return;
272
273 prev->size += mms->size;
274 RB_REMOVE(mmtree, &mm->rb_free, mms);
275
276 if (mm->mmalloc == NULL)
277 xfree(mms);
278 else
279 mm_free(mm->mmalloc, mms);
280}
281
282static void
283mm_sync_list(struct mmtree *oldtree, struct mmtree *newtree,
284 struct mm_master *mm, struct mm_master *mmold)
285{
286 struct mm_master *mmalloc = mm->mmalloc;
287 struct mm_share *mms, *new;
288
289 /* Sync free list */
290 RB_FOREACH(mms, mmtree, oldtree) {
291 /* Check the values */
292 mm_memvalid(mmold, mms, sizeof(struct mm_share));
293 mm_memvalid(mm, mms->address, mms->size);
294
295 new = mm_xmalloc(mmalloc, sizeof(struct mm_share));
296 memcpy(new, mms, sizeof(struct mm_share));
297 RB_INSERT(mmtree, newtree, new);
298 }
299}
300
301void
302mm_share_sync(struct mm_master **pmm, struct mm_master **pmmalloc)
303{
304 struct mm_master *mm;
305 struct mm_master *mmalloc;
306 struct mm_master *mmold;
307 struct mmtree rb_free, rb_allocated;
308
1588c277 309 debug3("%s: Share sync", __func__);
1853d1ef 310
311 mm = *pmm;
312 mmold = mm->mmalloc;
313 mm_memvalid(mmold, mm, sizeof(*mm));
314
315 mmalloc = mm_create(NULL, mm->size);
316 mm = mm_xmalloc(mmalloc, sizeof(struct mm_master));
317 memcpy(mm, *pmm, sizeof(struct mm_master));
318 mm->mmalloc = mmalloc;
319
320 rb_free = mm->rb_free;
321 rb_allocated = mm->rb_allocated;
322
323 RB_INIT(&mm->rb_free);
324 RB_INIT(&mm->rb_allocated);
325
326 mm_sync_list(&rb_free, &mm->rb_free, mm, mmold);
327 mm_sync_list(&rb_allocated, &mm->rb_allocated, mm, mmold);
328
329 mm_destroy(mmold);
330
331 *pmm = mm;
332 *pmmalloc = mmalloc;
333
1588c277 334 debug3("%s: Share sync end", __func__);
1853d1ef 335}
336
337void
338mm_memvalid(struct mm_master *mm, void *address, size_t size)
339{
340 void *end = (u_char *)address + size;
341
342 if (address < mm->address)
343 fatal("mm_memvalid: address too small: %p", address);
344 if (end < address)
345 fatal("mm_memvalid: end < address: %p < %p", end, address);
346 if (end > (void *)((u_char *)mm->address + mm->size))
347 fatal("mm_memvalid: address too large: %p", address);
348}
This page took 0.217742 seconds and 5 git commands to generate.