Open Model Railroad Network (OpenMRN)
Loading...
Searching...
No Matches
malloc.c
1/* Copyright (c) 2002, 2004, 2010 Joerg Wunsch
2 Copyright (c) 2010 Gerben van den Broeke
3 All rights reserved.
4
5 malloc, free, realloc from avr-libc 1.7.0
6 with minor modifications, by Paul Stoffregen
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11 * Redistributions of source code must retain the above copyright
12 notice, this list of conditions and the following disclaimer.
13
14 * Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in
16 the documentation and/or other materials provided with the
17 distribution.
18
19 * Neither the name of the copyright holders nor the names of
20 contributors may be used to endorse or promote products derived
21 from this software without specific prior written permission.
22
23 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 POSSIBILITY OF SUCH DAMAGE.
34*/
35
36
37#include <stdlib.h>
38#include <inttypes.h>
39#include <string.h>
40
41#include "LPC11xx.h"
42
43
44#define __MALLOC_MARGIN__ 120
45
46extern char _pvHeapStart;
47extern char _vStackTop;
48#define __heap_start _pvHeapStart
49
51struct __freelist {
53 size_t sz;
55 struct __freelist *nx;
56};
57
58/*
59 * Exported interface:
60 *
61 * When extending the data segment, the allocator will not try to go
62 * beyond the current stack limit, decreased by __malloc_margin bytes.
63 * Thus, all possible stack frames of interrupt routines that could
64 * interrupt the current function, plus all further nested function
65 * calls must not require more stack space, or they'll risk to collide
66 * with the data segment.
67 */
68
69
70//#define STACK_POINTER() ((char *)AVR_STACK_POINTER_REG)
71#define STACK_POINTER() (&_vStackTop)
72//extern char __heap_start;
73char *__brkval = &__heap_start; // first location not yet allocated
74struct __freelist *__flp; // freelist pointer (head of freelist)
75char *__brkval_maximum = (char*)100;
76
77void *
78malloc(size_t len)
79{
80 struct __freelist *fp1, *fp2, *sfp1, *sfp2;
81 char *cp;
82 size_t s, avail;
83
84 // Align allocated memory to 4 bytes.
85 len = (len + 3) & (~3);
86
87 /*
88 * Our minimum chunk size is the size of a pointer (plus the
89 * size of the "sz" field, but we don't need to account for
90 * this), otherwise we could not possibly fit a freelist entry
91 * into the chunk later.
92 */
93 if (len < sizeof(struct __freelist) - sizeof(size_t))
94 len = sizeof(struct __freelist) - sizeof(size_t);
95
96 /*
97 * First, walk the free list and try finding a chunk that
98 * would match exactly. If we found one, we are done. While
99 * walking, note down the smallest chunk we found that would
100 * still fit the request -- we need it for step 2.
101 *
102 */
103 for (s = 0, fp1 = __flp, fp2 = 0;
104 fp1;
105 fp2 = fp1, fp1 = fp1->nx) {
106 if (fp1->sz < len)
107 continue;
108 if (fp1->sz == len) {
109 /*
110 * Found it. Disconnect the chunk from the
111 * freelist, and return it.
112 */
113 if (fp2)
114 fp2->nx = fp1->nx;
115 else
116 __flp = fp1->nx;
117 return &(fp1->nx);
118 }
119 else {
120 if (s == 0 || fp1->sz < s) {
121 /* this is the smallest chunk found so far */
122 s = fp1->sz;
123 sfp1 = fp1;
124 sfp2 = fp2;
125 }
126 }
127 }
128 /*
129 * Step 2: If we found a chunk on the freelist that would fit
130 * (but was too large), look it up again and use it, since it
131 * is our closest match now. Since the freelist entry needs
132 * to be split into two entries then, watch out that the
133 * difference between the requested size and the size of the
134 * chunk found is large enough for another freelist entry; if
135 * not, just enlarge the request size to what we have found,
136 * and use the entire chunk.
137 */
138 if (s) {
139 if (s - len < sizeof(struct __freelist)) {
140 /* Disconnect it from freelist and return it. */
141 if (sfp2)
142 sfp2->nx = sfp1->nx;
143 else
144 __flp = sfp1->nx;
145 return &(sfp1->nx);
146 }
147 /*
148 * Split them up. Note that we leave the first part
149 * as the new (smaller) freelist entry, and return the
150 * upper portion to the caller. This saves us the
151 * work to fix up the freelist chain; we just need to
152 * fixup the size of the current entry, and note down
153 * the size of the new chunk before returning it to
154 * the caller.
155 */
156 cp = (char *)sfp1;
157 s -= len;
158 cp += s;
159 sfp2 = (struct __freelist *)cp;
160 sfp2->sz = len;
161 sfp1->sz = s - sizeof(size_t);
162 return &(sfp2->nx);
163 }
164 /*
165 * Step 3: If the request could not be satisfied from a
166 * freelist entry, just prepare a new chunk. This means we
167 * need to obtain more memory first. The largest address just
168 * not allocated so far is remembered in the brkval variable.
169 * Under Unix, the "break value" was the end of the data
170 * segment as dynamically requested from the operating system.
171 * Since we don't have an operating system, just make sure
172 * that we don't collide with the stack.
173 */
174 cp = STACK_POINTER() - __MALLOC_MARGIN__;
175 if (cp <= __brkval)
176 /*
177 * Memory exhausted.
178 */
179 return 0;
180 avail = cp - __brkval;
181 /*
182 * Both tests below are needed to catch the case len >= 0xfffe.
183 */
184 if (avail >= len && avail >= len + sizeof(size_t)) {
185 fp1 = (struct __freelist *)__brkval;
186 __brkval += len + sizeof(size_t);
187 __brkval_maximum = __brkval;
188 fp1->sz = len;
189 return &(fp1->nx);
190 }
191 /*
192 * Step 4: There's no help, just fail. :-/
193 */
194 return 0;
195}
196
197
198void
199free(void *p)
200{
201 struct __freelist *fp1, *fp2, *fpnew;
202 char *cp1, *cp2, *cpnew;
203
204 /* ISO C says free(NULL) must be a no-op */
205 if (p == 0)
206 return;
207
208 cpnew = p;
209 cpnew -= sizeof(size_t);
210 fpnew = (struct __freelist *)cpnew;
211 fpnew->nx = 0;
212
213 /*
214 * Trivial case first: if there's no freelist yet, our entry
215 * will be the only one on it. If this is the last entry, we
216 * can reduce __brkval instead.
217 */
218 if (__flp == 0) {
219 if ((char *)p + fpnew->sz == __brkval)
220 __brkval = cpnew;
221 else
222 __flp = fpnew;
223 return;
224 }
225
226 /*
227 * Now, find the position where our new entry belongs onto the
228 * freelist. Try to aggregate the chunk with adjacent chunks
229 * if possible.
230 */
231 for (fp1 = __flp, fp2 = 0;
232 fp1;
233 fp2 = fp1, fp1 = fp1->nx) {
234 if (fp1 < fpnew)
235 continue;
236 cp1 = (char *)fp1;
237 fpnew->nx = fp1;
238 if ((char *)&(fpnew->nx) + fpnew->sz == cp1) {
239 /* upper chunk adjacent, assimilate it */
240 fpnew->sz += fp1->sz + sizeof(size_t);
241 fpnew->nx = fp1->nx;
242 }
243 if (fp2 == 0) {
244 /* new head of freelist */
245 __flp = fpnew;
246 return;
247 }
248 break;
249 }
250 /*
251 * Note that we get here either if we hit the "break" above,
252 * or if we fell off the end of the loop. The latter means
253 * we've got a new topmost chunk. Either way, try aggregating
254 * with the lower chunk if possible.
255 */
256 fp2->nx = fpnew;
257 cp2 = (char *)&(fp2->nx);
258 if (cp2 + fp2->sz == cpnew) {
259 /* lower junk adjacent, merge */
260 fp2->sz += fpnew->sz + sizeof(size_t);
261 fp2->nx = fpnew->nx;
262 }
263 /*
264 * If there's a new topmost chunk, lower __brkval instead.
265 */
266 for (fp1 = __flp, fp2 = 0;
267 fp1->nx != 0;
268 fp2 = fp1, fp1 = fp1->nx)
269 /* advance to entry just before end of list */;
270 cp2 = (char *)&(fp1->nx);
271 if (cp2 + fp1->sz == __brkval) {
272 if (fp2 == NULL)
273 /* Freelist is empty now. */
274 __flp = NULL;
275 else
276 fp2->nx = NULL;
277 __brkval = cp2 - sizeof(size_t);
278 }
279}
280
281
282
283void *
284realloc(void *ptr, size_t len)
285{
286 struct __freelist *fp1, *fp2, *fp3, *ofp3;
287 char *cp, *cp1;
288 void *memp;
289 size_t s, incr;
290
291 // Align allocated memory to 4 bytes.
292 len = (len + 3) & (~3);
293
294 /* Trivial case, required by C standard. */
295 if (ptr == 0)
296 return malloc(len);
297
298 cp1 = (char *)ptr;
299 cp1 -= sizeof(size_t);
300 fp1 = (struct __freelist *)cp1;
301
302 cp = (char *)ptr + len; /* new next pointer */
303 if (cp < cp1)
304 /* Pointer wrapped across top of RAM, fail. */
305 return 0;
306
307 /*
308 * See whether we are growing or shrinking. When shrinking,
309 * we split off a chunk for the released portion, and call
310 * free() on it. Therefore, we can only shrink if the new
311 * size is at least sizeof(struct __freelist) smaller than the
312 * previous size.
313 */
314 if (len <= fp1->sz) {
315 /* The first test catches a possible unsigned int
316 * rollover condition. */
317 if (fp1->sz <= sizeof(struct __freelist) ||
318 len > fp1->sz - sizeof(struct __freelist))
319 return ptr;
320 fp2 = (struct __freelist *)cp;
321 fp2->sz = fp1->sz - len - sizeof(size_t);
322 fp1->sz = len;
323 free(&(fp2->nx));
324 return ptr;
325 }
326
327 /*
328 * If we get here, we are growing. First, see whether there
329 * is space in the free list on top of our current chunk.
330 */
331 incr = len - fp1->sz;
332 cp = (char *)ptr + fp1->sz;
333 fp2 = (struct __freelist *)cp;
334 for (s = 0, ofp3 = 0, fp3 = __flp;
335 fp3;
336 ofp3 = fp3, fp3 = fp3->nx) {
337 if (fp3 == fp2 && fp3->sz + sizeof(size_t) >= incr) {
338 /* found something that fits */
339 if (fp3->sz + sizeof(size_t) - incr > sizeof(struct __freelist)) {
340 /* split off a new freelist entry */
341 cp = (char *)ptr + len;
342 fp2 = (struct __freelist *)cp;
343 fp2->nx = fp3->nx;
344 fp2->sz = fp3->sz - incr;
345 fp1->sz = len;
346 } else {
347 /* it just fits, so use it entirely */
348 fp1->sz += fp3->sz + sizeof(size_t);
349 fp2 = fp3->nx;
350 }
351 if (ofp3)
352 ofp3->nx = fp2;
353 else
354 __flp = fp2;
355 return ptr;
356 }
357 /*
358 * Find the largest chunk on the freelist while
359 * walking it.
360 */
361 if (fp3->sz > s)
362 s = fp3->sz;
363 }
364 /*
365 * If we are the topmost chunk in memory, and there was no
366 * large enough chunk on the freelist that could be re-used
367 * (by a call to malloc() below), quickly extend the
368 * allocation area if possible, without need to copy the old
369 * data.
370 */
371 if (__brkval == (char *)ptr + fp1->sz && len > s) {
372 cp = (char *)ptr + len;
373 cp1 = STACK_POINTER() - __MALLOC_MARGIN__;
374 if (cp < cp1) {
375 __brkval = cp;
376 __brkval_maximum = cp;
377 fp1->sz = len;
378 return ptr;
379 }
380 /* If that failed, we are out of luck. */
381 return 0;
382 }
383
384 /*
385 * Call malloc() for a new chunk, then copy over the data, and
386 * release the old region.
387 */
388 if ((memp = malloc(len)) == 0)
389 return 0;
390 memcpy(memp, ptr, fp1->sz);
391 free(ptr);
392 return memp;
393}
Freelist overlay for the tiny malloc implementation.
Definition malloc.c:51
size_t sz
number of bytes free here.
Definition malloc.c:53
struct __freelist * nx
link pointer
Definition malloc.c:55