src/stdlib/SDL_malloc.c
changeset 1895 c121d94672cb
parent 1465 8dfa9a6d69a5
child 2194 55e987d8e1b5
equal deleted inserted replaced
1894:c69cee13dd76 1895:c121d94672cb
   475 */
   475 */
   476 
   476 
   477 #ifndef WIN32
   477 #ifndef WIN32
   478 #ifdef _WIN32
   478 #ifdef _WIN32
   479 #define WIN32 1
   479 #define WIN32 1
   480 #endif  /* _WIN32 */
   480 #endif /* _WIN32 */
   481 #endif  /* WIN32 */
   481 #endif /* WIN32 */
   482 #ifdef WIN32
   482 #ifdef WIN32
   483 #define WIN32_LEAN_AND_MEAN
   483 #define WIN32_LEAN_AND_MEAN
   484 #include <windows.h>
   484 #include <windows.h>
   485 #define HAVE_MMAP 1
   485 #define HAVE_MMAP 1
   486 #define HAVE_MORECORE 0
   486 #define HAVE_MORECORE 0
   489 #define LACKS_SYS_MMAN_H
   489 #define LACKS_SYS_MMAN_H
   490 #define LACKS_STRING_H
   490 #define LACKS_STRING_H
   491 #define LACKS_STRINGS_H
   491 #define LACKS_STRINGS_H
   492 #define LACKS_SYS_TYPES_H
   492 #define LACKS_SYS_TYPES_H
   493 #define LACKS_ERRNO_H
   493 #define LACKS_ERRNO_H
   494 #define LACKS_FCNTL_H 
   494 #define LACKS_FCNTL_H
   495 #define MALLOC_FAILURE_ACTION
   495 #define MALLOC_FAILURE_ACTION
   496 #define MMAP_CLEARS 0 /* WINCE and some others apparently don't clear */
   496 #define MMAP_CLEARS 0           /* WINCE and some others apparently don't clear */
   497 #endif  /* WIN32 */
   497 #endif /* WIN32 */
   498 
   498 
   499 #if defined(DARWIN) || defined(_DARWIN)
   499 #if defined(DARWIN) || defined(_DARWIN)
   500 /* Mac OSX docs advise not to use sbrk; it seems better to use mmap */
   500 /* Mac OSX docs advise not to use sbrk; it seems better to use mmap */
   501 #ifndef HAVE_MORECORE
   501 #ifndef HAVE_MORECORE
   502 #define HAVE_MORECORE 0
   502 #define HAVE_MORECORE 0
   503 #define HAVE_MMAP 1
   503 #define HAVE_MMAP 1
   504 #endif  /* HAVE_MORECORE */
   504 #endif /* HAVE_MORECORE */
   505 #endif  /* DARWIN */
   505 #endif /* DARWIN */
   506 
   506 
   507 #ifndef LACKS_SYS_TYPES_H
   507 #ifndef LACKS_SYS_TYPES_H
   508 #include <sys/types.h>  /* For size_t */
   508 #include <sys/types.h>          /* For size_t */
   509 #endif  /* LACKS_SYS_TYPES_H */
   509 #endif /* LACKS_SYS_TYPES_H */
   510 
   510 
   511 /* The maximum possible size_t value has all bits set */
   511 /* The maximum possible size_t value has all bits set */
   512 #define MAX_SIZE_T           (~(size_t)0)
   512 #define MAX_SIZE_T           (~(size_t)0)
   513 
   513 
   514 #ifndef ONLY_MSPACES
   514 #ifndef ONLY_MSPACES
   515 #define ONLY_MSPACES 0
   515 #define ONLY_MSPACES 0
   516 #endif  /* ONLY_MSPACES */
   516 #endif /* ONLY_MSPACES */
   517 #ifndef MSPACES
   517 #ifndef MSPACES
   518 #if ONLY_MSPACES
   518 #if ONLY_MSPACES
   519 #define MSPACES 1
   519 #define MSPACES 1
   520 #else   /* ONLY_MSPACES */
   520 #else /* ONLY_MSPACES */
   521 #define MSPACES 0
   521 #define MSPACES 0
   522 #endif  /* ONLY_MSPACES */
   522 #endif /* ONLY_MSPACES */
   523 #endif  /* MSPACES */
   523 #endif /* MSPACES */
   524 #ifndef MALLOC_ALIGNMENT
   524 #ifndef MALLOC_ALIGNMENT
   525 #define MALLOC_ALIGNMENT ((size_t)8U)
   525 #define MALLOC_ALIGNMENT ((size_t)8U)
   526 #endif  /* MALLOC_ALIGNMENT */
   526 #endif /* MALLOC_ALIGNMENT */
   527 #ifndef FOOTERS
   527 #ifndef FOOTERS
   528 #define FOOTERS 0
   528 #define FOOTERS 0
   529 #endif  /* FOOTERS */
   529 #endif /* FOOTERS */
   530 #ifndef ABORT
   530 #ifndef ABORT
   531 #define ABORT  abort()
   531 #define ABORT  abort()
   532 #endif  /* ABORT */
   532 #endif /* ABORT */
   533 #ifndef ABORT_ON_ASSERT_FAILURE
   533 #ifndef ABORT_ON_ASSERT_FAILURE
   534 #define ABORT_ON_ASSERT_FAILURE 1
   534 #define ABORT_ON_ASSERT_FAILURE 1
   535 #endif  /* ABORT_ON_ASSERT_FAILURE */
   535 #endif /* ABORT_ON_ASSERT_FAILURE */
   536 #ifndef PROCEED_ON_ERROR
   536 #ifndef PROCEED_ON_ERROR
   537 #define PROCEED_ON_ERROR 0
   537 #define PROCEED_ON_ERROR 0
   538 #endif  /* PROCEED_ON_ERROR */
   538 #endif /* PROCEED_ON_ERROR */
   539 #ifndef USE_LOCKS
   539 #ifndef USE_LOCKS
   540 #define USE_LOCKS 0
   540 #define USE_LOCKS 0
   541 #endif  /* USE_LOCKS */
   541 #endif /* USE_LOCKS */
   542 #ifndef INSECURE
   542 #ifndef INSECURE
   543 #define INSECURE 0
   543 #define INSECURE 0
   544 #endif  /* INSECURE */
   544 #endif /* INSECURE */
   545 #ifndef HAVE_MMAP
   545 #ifndef HAVE_MMAP
   546 #define HAVE_MMAP 1
   546 #define HAVE_MMAP 1
   547 #endif  /* HAVE_MMAP */
   547 #endif /* HAVE_MMAP */
   548 #ifndef MMAP_CLEARS
   548 #ifndef MMAP_CLEARS
   549 #define MMAP_CLEARS 1
   549 #define MMAP_CLEARS 1
   550 #endif  /* MMAP_CLEARS */
   550 #endif /* MMAP_CLEARS */
   551 #ifndef HAVE_MREMAP
   551 #ifndef HAVE_MREMAP
   552 #ifdef linux
   552 #ifdef linux
   553 #define HAVE_MREMAP 1
   553 #define HAVE_MREMAP 1
   554 #else   /* linux */
   554 #else /* linux */
   555 #define HAVE_MREMAP 0
   555 #define HAVE_MREMAP 0
   556 #endif  /* linux */
   556 #endif /* linux */
   557 #endif  /* HAVE_MREMAP */
   557 #endif /* HAVE_MREMAP */
   558 #ifndef MALLOC_FAILURE_ACTION
   558 #ifndef MALLOC_FAILURE_ACTION
   559 #define MALLOC_FAILURE_ACTION  errno = ENOMEM;
   559 #define MALLOC_FAILURE_ACTION  errno = ENOMEM;
   560 #endif  /* MALLOC_FAILURE_ACTION */
   560 #endif /* MALLOC_FAILURE_ACTION */
   561 #ifndef HAVE_MORECORE
   561 #ifndef HAVE_MORECORE
   562 #if ONLY_MSPACES
   562 #if ONLY_MSPACES
   563 #define HAVE_MORECORE 0
   563 #define HAVE_MORECORE 0
   564 #else   /* ONLY_MSPACES */
   564 #else /* ONLY_MSPACES */
   565 #define HAVE_MORECORE 1
   565 #define HAVE_MORECORE 1
   566 #endif  /* ONLY_MSPACES */
   566 #endif /* ONLY_MSPACES */
   567 #endif  /* HAVE_MORECORE */
   567 #endif /* HAVE_MORECORE */
   568 #if !HAVE_MORECORE
   568 #if !HAVE_MORECORE
   569 #define MORECORE_CONTIGUOUS 0
   569 #define MORECORE_CONTIGUOUS 0
   570 #else   /* !HAVE_MORECORE */
   570 #else /* !HAVE_MORECORE */
   571 #ifndef MORECORE
   571 #ifndef MORECORE
   572 #define MORECORE sbrk
   572 #define MORECORE sbrk
   573 #endif  /* MORECORE */
   573 #endif /* MORECORE */
   574 #ifndef MORECORE_CONTIGUOUS
   574 #ifndef MORECORE_CONTIGUOUS
   575 #define MORECORE_CONTIGUOUS 1
   575 #define MORECORE_CONTIGUOUS 1
   576 #endif  /* MORECORE_CONTIGUOUS */
   576 #endif /* MORECORE_CONTIGUOUS */
   577 #endif  /* HAVE_MORECORE */
   577 #endif /* HAVE_MORECORE */
   578 #ifndef DEFAULT_GRANULARITY
   578 #ifndef DEFAULT_GRANULARITY
   579 #if MORECORE_CONTIGUOUS
   579 #if MORECORE_CONTIGUOUS
   580 #define DEFAULT_GRANULARITY (0)  /* 0 means to compute in init_mparams */
   580 #define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */
   581 #else   /* MORECORE_CONTIGUOUS */
   581 #else /* MORECORE_CONTIGUOUS */
   582 #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
   582 #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
   583 #endif  /* MORECORE_CONTIGUOUS */
   583 #endif /* MORECORE_CONTIGUOUS */
   584 #endif  /* DEFAULT_GRANULARITY */
   584 #endif /* DEFAULT_GRANULARITY */
   585 #ifndef DEFAULT_TRIM_THRESHOLD
   585 #ifndef DEFAULT_TRIM_THRESHOLD
   586 #ifndef MORECORE_CANNOT_TRIM
   586 #ifndef MORECORE_CANNOT_TRIM
   587 #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
   587 #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
   588 #else   /* MORECORE_CANNOT_TRIM */
   588 #else /* MORECORE_CANNOT_TRIM */
   589 #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
   589 #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
   590 #endif  /* MORECORE_CANNOT_TRIM */
   590 #endif /* MORECORE_CANNOT_TRIM */
   591 #endif  /* DEFAULT_TRIM_THRESHOLD */
   591 #endif /* DEFAULT_TRIM_THRESHOLD */
   592 #ifndef DEFAULT_MMAP_THRESHOLD
   592 #ifndef DEFAULT_MMAP_THRESHOLD
   593 #if HAVE_MMAP
   593 #if HAVE_MMAP
   594 #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
   594 #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
   595 #else   /* HAVE_MMAP */
   595 #else /* HAVE_MMAP */
   596 #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
   596 #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
   597 #endif  /* HAVE_MMAP */
   597 #endif /* HAVE_MMAP */
   598 #endif  /* DEFAULT_MMAP_THRESHOLD */
   598 #endif /* DEFAULT_MMAP_THRESHOLD */
   599 #ifndef USE_BUILTIN_FFS
   599 #ifndef USE_BUILTIN_FFS
   600 #define USE_BUILTIN_FFS 0
   600 #define USE_BUILTIN_FFS 0
   601 #endif  /* USE_BUILTIN_FFS */
   601 #endif /* USE_BUILTIN_FFS */
   602 #ifndef USE_DEV_RANDOM
   602 #ifndef USE_DEV_RANDOM
   603 #define USE_DEV_RANDOM 0
   603 #define USE_DEV_RANDOM 0
   604 #endif  /* USE_DEV_RANDOM */
   604 #endif /* USE_DEV_RANDOM */
   605 #ifndef NO_MALLINFO
   605 #ifndef NO_MALLINFO
   606 #define NO_MALLINFO 0
   606 #define NO_MALLINFO 0
   607 #endif  /* NO_MALLINFO */
   607 #endif /* NO_MALLINFO */
   608 #ifndef MALLINFO_FIELD_TYPE
   608 #ifndef MALLINFO_FIELD_TYPE
   609 #define MALLINFO_FIELD_TYPE size_t
   609 #define MALLINFO_FIELD_TYPE size_t
   610 #endif  /* MALLINFO_FIELD_TYPE */
   610 #endif /* MALLINFO_FIELD_TYPE */
   611 
   611 
   612 #define memset	SDL_memset
   612 #define memset	SDL_memset
   613 #define memcpy	SDL_memcpy
   613 #define memcpy	SDL_memcpy
   614 #define malloc	SDL_malloc
   614 #define malloc	SDL_malloc
   615 #define calloc	SDL_calloc
   615 #define calloc	SDL_calloc
   656 
   656 
   657 #ifdef HAVE_USR_INCLUDE_MALLOC_H
   657 #ifdef HAVE_USR_INCLUDE_MALLOC_H
   658 #include "/usr/include/malloc.h"
   658 #include "/usr/include/malloc.h"
   659 #else /* HAVE_USR_INCLUDE_MALLOC_H */
   659 #else /* HAVE_USR_INCLUDE_MALLOC_H */
   660 
   660 
   661 struct mallinfo {
   661 struct mallinfo
   662   MALLINFO_FIELD_TYPE arena;    /* non-mmapped space allocated from system */
   662 {
   663   MALLINFO_FIELD_TYPE ordblks;  /* number of free chunks */
   663     MALLINFO_FIELD_TYPE arena;  /* non-mmapped space allocated from system */
   664   MALLINFO_FIELD_TYPE smblks;   /* always 0 */
   664     MALLINFO_FIELD_TYPE ordblks;        /* number of free chunks */
   665   MALLINFO_FIELD_TYPE hblks;    /* always 0 */
   665     MALLINFO_FIELD_TYPE smblks; /* always 0 */
   666   MALLINFO_FIELD_TYPE hblkhd;   /* space in mmapped regions */
   666     MALLINFO_FIELD_TYPE hblks;  /* always 0 */
   667   MALLINFO_FIELD_TYPE usmblks;  /* maximum total allocated space */
   667     MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */
   668   MALLINFO_FIELD_TYPE fsmblks;  /* always 0 */
   668     MALLINFO_FIELD_TYPE usmblks;        /* maximum total allocated space */
   669   MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
   669     MALLINFO_FIELD_TYPE fsmblks;        /* always 0 */
   670   MALLINFO_FIELD_TYPE fordblks; /* total free space */
   670     MALLINFO_FIELD_TYPE uordblks;       /* total allocated space */
   671   MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
   671     MALLINFO_FIELD_TYPE fordblks;       /* total free space */
       
   672     MALLINFO_FIELD_TYPE keepcost;       /* releasable (via malloc_trim) space */
   672 };
   673 };
   673 
   674 
   674 #endif /* HAVE_USR_INCLUDE_MALLOC_H */
   675 #endif /* HAVE_USR_INCLUDE_MALLOC_H */
   675 #endif /* NO_MALLINFO */
   676 #endif /* NO_MALLINFO */
   676 
   677 
   677 #ifdef __cplusplus
   678 #ifdef __cplusplus
   678 extern "C" {
   679 extern "C"
   679 #endif /* __cplusplus */
   680 {
       
   681 #endif                          /* __cplusplus */
   680 
   682 
   681 #if !ONLY_MSPACES
   683 #if !ONLY_MSPACES
   682 
   684 
   683 /* ------------------- Declarations of public routines ------------------- */
   685 /* ------------------- Declarations of public routines ------------------- */
   684 
   686 
   697 #define dlmalloc_usable_size   malloc_usable_size
   699 #define dlmalloc_usable_size   malloc_usable_size
   698 #define dlmalloc_footprint     malloc_footprint
   700 #define dlmalloc_footprint     malloc_footprint
   699 #define dlmalloc_max_footprint malloc_max_footprint
   701 #define dlmalloc_max_footprint malloc_max_footprint
   700 #define dlindependent_calloc   independent_calloc
   702 #define dlindependent_calloc   independent_calloc
   701 #define dlindependent_comalloc independent_comalloc
   703 #define dlindependent_comalloc independent_comalloc
   702 #endif /* USE_DL_PREFIX */
   704 #endif                          /* USE_DL_PREFIX */
   703 
   705 
   704 
   706 
   705 /*
   707 /*
   706   malloc(size_t n)
   708   malloc(size_t n)
   707   Returns a pointer to a newly allocated chunk of at least n bytes, or
   709   Returns a pointer to a newly allocated chunk of at least n bytes, or
   714   arguments that would be negative if signed are interpreted as
   716   arguments that would be negative if signed are interpreted as
   715   requests for huge amounts of space, which will often fail. The
   717   requests for huge amounts of space, which will often fail. The
   716   maximum supported value of n differs across systems, but is in all
   718   maximum supported value of n differs across systems, but is in all
   717   cases less than the maximum representable value of a size_t.
   719   cases less than the maximum representable value of a size_t.
   718 */
   720 */
   719 void* dlmalloc(size_t);
   721     void *dlmalloc(size_t);
   720 
   722 
   721 /*
   723 /*
   722   free(void* p)
   724   free(void* p)
   723   Releases the chunk of memory pointed to by p, that had been previously
   725   Releases the chunk of memory pointed to by p, that had been previously
   724   allocated using malloc or a related routine such as realloc.
   726   allocated using malloc or a related routine such as realloc.
   725   It has no effect if p is null. If p was not malloced or already
   727   It has no effect if p is null. If p was not malloced or already
   726   freed, free(p) will by default cause the current program to abort.
   728   freed, free(p) will by default cause the current program to abort.
   727 */
   729 */
   728 void  dlfree(void*);
   730     void dlfree(void *);
   729 
   731 
   730 /*
   732 /*
   731   calloc(size_t n_elements, size_t element_size);
   733   calloc(size_t n_elements, size_t element_size);
   732   Returns a pointer to n_elements * element_size bytes, with all locations
   734   Returns a pointer to n_elements * element_size bytes, with all locations
   733   set to zero.
   735   set to zero.
   734 */
   736 */
   735 void* dlcalloc(size_t, size_t);
   737     void *dlcalloc(size_t, size_t);
   736 
   738 
   737 /*
   739 /*
   738   realloc(void* p, size_t n)
   740   realloc(void* p, size_t n)
   739   Returns a pointer to a chunk of size n that contains the same data
   741   Returns a pointer to a chunk of size n that contains the same data
   740   as does chunk p up to the minimum of (n, p's size) bytes, or null
   742   as does chunk p up to the minimum of (n, p's size) bytes, or null
   755 
   757 
   756   The old unix realloc convention of allowing the last-free'd chunk
   758   The old unix realloc convention of allowing the last-free'd chunk
   757   to be used as an argument to realloc is not supported.
   759   to be used as an argument to realloc is not supported.
   758 */
   760 */
   759 
   761 
   760 void* dlrealloc(void*, size_t);
   762     void *dlrealloc(void *, size_t);
   761 
   763 
   762 /*
   764 /*
   763   memalign(size_t alignment, size_t n);
   765   memalign(size_t alignment, size_t n);
   764   Returns a pointer to a newly allocated chunk of n bytes, aligned
   766   Returns a pointer to a newly allocated chunk of n bytes, aligned
   765   in accord with the alignment argument.
   767   in accord with the alignment argument.
   769   8-byte alignment is guaranteed by normal malloc calls, so don't
   771   8-byte alignment is guaranteed by normal malloc calls, so don't
   770   bother calling memalign with an argument of 8 or less.
   772   bother calling memalign with an argument of 8 or less.
   771 
   773 
   772   Overreliance on memalign is a sure way to fragment space.
   774   Overreliance on memalign is a sure way to fragment space.
   773 */
   775 */
   774 void* dlmemalign(size_t, size_t);
   776     void *dlmemalign(size_t, size_t);
   775 
   777 
   776 /*
   778 /*
   777   valloc(size_t n);
   779   valloc(size_t n);
   778   Equivalent to memalign(pagesize, n), where pagesize is the page
   780   Equivalent to memalign(pagesize, n), where pagesize is the page
   779   size of the system. If the pagesize is unknown, 4096 is used.
   781   size of the system. If the pagesize is unknown, 4096 is used.
   780 */
   782 */
   781 void* dlvalloc(size_t);
   783     void *dlvalloc(size_t);
   782 
   784 
   783 /*
   785 /*
   784   mallopt(int parameter_number, int parameter_value)
   786   mallopt(int parameter_number, int parameter_value)
   785   Sets tunable parameters The format is to provide a
   787   Sets tunable parameters The format is to provide a
   786   (parameter-number, parameter-value) pair.  mallopt then sets the
   788   (parameter-number, parameter-value) pair.  mallopt then sets the
   796   Symbol            param #  default    allowed param values
   798   Symbol            param #  default    allowed param values
   797   M_TRIM_THRESHOLD     -1   2*1024*1024   any   (MAX_SIZE_T disables)
   799   M_TRIM_THRESHOLD     -1   2*1024*1024   any   (MAX_SIZE_T disables)
   798   M_GRANULARITY        -2     page size   any power of 2 >= page size
   800   M_GRANULARITY        -2     page size   any power of 2 >= page size
   799   M_MMAP_THRESHOLD     -3      256*1024   any   (or 0 if no MMAP support)
   801   M_MMAP_THRESHOLD     -3      256*1024   any   (or 0 if no MMAP support)
   800 */
   802 */
   801 int dlmallopt(int, int);
   803     int dlmallopt(int, int);
   802 
   804 
   803 /*
   805 /*
   804   malloc_footprint();
   806   malloc_footprint();
   805   Returns the number of bytes obtained from the system.  The total
   807   Returns the number of bytes obtained from the system.  The total
   806   number of bytes allocated by malloc, realloc etc., is less than this
   808   number of bytes allocated by malloc, realloc etc., is less than this
   807   value. Unlike mallinfo, this function returns only a precomputed
   809   value. Unlike mallinfo, this function returns only a precomputed
   808   result, so can be called frequently to monitor memory consumption.
   810   result, so can be called frequently to monitor memory consumption.
   809   Even if locks are otherwise defined, this function does not use them,
   811   Even if locks are otherwise defined, this function does not use them,
   810   so results might not be up to date.
   812   so results might not be up to date.
   811 */
   813 */
   812 size_t dlmalloc_footprint(void);
   814     size_t dlmalloc_footprint(void);
   813 
   815 
   814 /*
   816 /*
   815   malloc_max_footprint();
   817   malloc_max_footprint();
   816   Returns the maximum number of bytes obtained from the system. This
   818   Returns the maximum number of bytes obtained from the system. This
   817   value will be greater than current footprint if deallocated space
   819   value will be greater than current footprint if deallocated space
   820   this function returns only a precomputed result, so can be called
   822   this function returns only a precomputed result, so can be called
   821   frequently to monitor memory consumption.  Even if locks are
   823   frequently to monitor memory consumption.  Even if locks are
   822   otherwise defined, this function does not use them, so results might
   824   otherwise defined, this function does not use them, so results might
   823   not be up to date.
   825   not be up to date.
   824 */
   826 */
   825 size_t dlmalloc_max_footprint(void);
   827     size_t dlmalloc_max_footprint(void);
   826 
   828 
   827 #if !NO_MALLINFO
   829 #if !NO_MALLINFO
   828 /*
   830 /*
   829   mallinfo()
   831   mallinfo()
   830   Returns (by copy) a struct containing various summary statistics:
   832   Returns (by copy) a struct containing various summary statistics:
   845 
   847 
   846   Because these fields are ints, but internal bookkeeping may
   848   Because these fields are ints, but internal bookkeeping may
   847   be kept as longs, the reported values may wrap around zero and
   849   be kept as longs, the reported values may wrap around zero and
   848   thus be inaccurate.
   850   thus be inaccurate.
   849 */
   851 */
   850 struct mallinfo dlmallinfo(void);
   852     struct mallinfo dlmallinfo(void);
   851 #endif /* NO_MALLINFO */
   853 #endif                          /* NO_MALLINFO */
   852 
   854 
   853 /*
   855 /*
   854   independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
   856   independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
   855 
   857 
   856   independent_calloc is similar to calloc, but instead of returning a
   858   independent_calloc is similar to calloc, but instead of returning a
   900       pool[i]->next = pool[i+1];
   902       pool[i]->next = pool[i+1];
   901     free(pool);     // Can now free the array (or not, if it is needed later)
   903     free(pool);     // Can now free the array (or not, if it is needed later)
   902     return first;
   904     return first;
   903   }
   905   }
   904 */
   906 */
   905 void** dlindependent_calloc(size_t, size_t, void**);
   907     void **dlindependent_calloc(size_t, size_t, void **);
   906 
   908 
   907 /*
   909 /*
   908   independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
   910   independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
   909 
   911 
   910   independent_comalloc allocates, all at once, a set of n_elements
   912   independent_comalloc allocates, all at once, a set of n_elements
   961 
   963 
   962   Overuse of independent_comalloc can increase overall memory usage,
   964   Overuse of independent_comalloc can increase overall memory usage,
   963   since it cannot reuse existing noncontiguous small chunks that
   965   since it cannot reuse existing noncontiguous small chunks that
   964   might be available for some of the elements.
   966   might be available for some of the elements.
   965 */
   967 */
   966 void** dlindependent_comalloc(size_t, size_t*, void**);
   968     void **dlindependent_comalloc(size_t, size_t *, void **);
   967 
   969 
   968 
   970 
   969 /*
   971 /*
   970   pvalloc(size_t n);
   972   pvalloc(size_t n);
   971   Equivalent to valloc(minimum-page-that-holds(n)), that is,
   973   Equivalent to valloc(minimum-page-that-holds(n)), that is,
   972   round up n to nearest pagesize.
   974   round up n to nearest pagesize.
   973  */
   975  */
   974 void*  dlpvalloc(size_t);
   976     void *dlpvalloc(size_t);
   975 
   977 
   976 /*
   978 /*
   977   malloc_trim(size_t pad);
   979   malloc_trim(size_t pad);
   978 
   980 
   979   If possible, gives memory back to the system (via negative arguments
   981   If possible, gives memory back to the system (via negative arguments
   992   trailing space to service future expected allocations without having
   994   trailing space to service future expected allocations without having
   993   to re-obtain memory from the system.
   995   to re-obtain memory from the system.
   994 
   996 
   995   Malloc_trim returns 1 if it actually released any memory, else 0.
   997   Malloc_trim returns 1 if it actually released any memory, else 0.
   996 */
   998 */
   997 int  dlmalloc_trim(size_t);
   999     int dlmalloc_trim(size_t);
   998 
  1000 
   999 /*
  1001 /*
  1000   malloc_usable_size(void* p);
  1002   malloc_usable_size(void* p);
  1001 
  1003 
  1002   Returns the number of bytes you can actually use in
  1004   Returns the number of bytes you can actually use in
  1008   debugging and assertions, for example:
  1010   debugging and assertions, for example:
  1009 
  1011 
  1010   p = malloc(n);
  1012   p = malloc(n);
  1011   assert(malloc_usable_size(p) >= 256);
  1013   assert(malloc_usable_size(p) >= 256);
  1012 */
  1014 */
  1013 size_t dlmalloc_usable_size(void*);
  1015     size_t dlmalloc_usable_size(void *);
  1014 
  1016 
  1015 /*
  1017 /*
  1016   malloc_stats();
  1018   malloc_stats();
  1017   Prints on stderr the amount of space obtained from the system (both
  1019   Prints on stderr the amount of space obtained from the system (both
  1018   via sbrk and mmap), the maximum amount (which may be more than
  1020   via sbrk and mmap), the maximum amount (which may be more than
  1029   (normally sbrk) outside of malloc.
  1031   (normally sbrk) outside of malloc.
  1030 
  1032 
  1031   malloc_stats prints only the most commonly interesting statistics.
  1033   malloc_stats prints only the most commonly interesting statistics.
  1032   More information can be obtained by calling mallinfo.
  1034   More information can be obtained by calling mallinfo.
  1033 */
  1035 */
  1034 void  dlmalloc_stats(void);
  1036     void dlmalloc_stats(void);
  1035 
  1037 
  1036 #endif /* ONLY_MSPACES */
  1038 #endif                          /* ONLY_MSPACES */
  1037 
  1039 
  1038 #if MSPACES
  1040 #if MSPACES
  1039 
  1041 
  1040 /*
  1042 /*
  1041   mspace is an opaque type representing an independent
  1043   mspace is an opaque type representing an independent
  1042   region of space that supports mspace_malloc, etc.
  1044   region of space that supports mspace_malloc, etc.
  1043 */
  1045 */
  1044 typedef void* mspace;
  1046     typedef void *mspace;
  1045 
  1047 
  1046 /*
  1048 /*
  1047   create_mspace creates and returns a new independent space with the
  1049   create_mspace creates and returns a new independent space with the
  1048   given initial capacity, or, if 0, the default granularity size.  It
  1050   given initial capacity, or, if 0, the default granularity size.  It
  1049   returns null if there is no system memory available to create the
  1051   returns null if there is no system memory available to create the
  1052   dynamically as needed to service mspace_malloc requests.  You can
  1054   dynamically as needed to service mspace_malloc requests.  You can
  1053   control the sizes of incremental increases of this space by
  1055   control the sizes of incremental increases of this space by
  1054   compiling with a different DEFAULT_GRANULARITY or dynamically
  1056   compiling with a different DEFAULT_GRANULARITY or dynamically
  1055   setting with mallopt(M_GRANULARITY, value).
  1057   setting with mallopt(M_GRANULARITY, value).
  1056 */
  1058 */
  1057 mspace create_mspace(size_t capacity, int locked);
  1059     mspace create_mspace(size_t capacity, int locked);
  1058 
  1060 
  1059 /*
  1061 /*
  1060   destroy_mspace destroys the given space, and attempts to return all
  1062   destroy_mspace destroys the given space, and attempts to return all
  1061   of its memory back to the system, returning the total number of
  1063   of its memory back to the system, returning the total number of
  1062   bytes freed. After destruction, the results of access to all memory
  1064   bytes freed. After destruction, the results of access to all memory
  1063   used by the space become undefined.
  1065   used by the space become undefined.
  1064 */
  1066 */
  1065 size_t destroy_mspace(mspace msp);
  1067     size_t destroy_mspace(mspace msp);
  1066 
  1068 
  1067 /*
  1069 /*
  1068   create_mspace_with_base uses the memory supplied as the initial base
  1070   create_mspace_with_base uses the memory supplied as the initial base
  1069   of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
  1071   of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
  1070   space is used for bookkeeping, so the capacity must be at least this
  1072   space is used for bookkeeping, so the capacity must be at least this
  1071   large. (Otherwise 0 is returned.) When this initial space is
  1073   large. (Otherwise 0 is returned.) When this initial space is
  1072   exhausted, additional memory will be obtained from the system.
  1074   exhausted, additional memory will be obtained from the system.
  1073   Destroying this space will deallocate all additionally allocated
  1075   Destroying this space will deallocate all additionally allocated
  1074   space (if possible) but not the initial base.
  1076   space (if possible) but not the initial base.
  1075 */
  1077 */
  1076 mspace create_mspace_with_base(void* base, size_t capacity, int locked);
  1078     mspace create_mspace_with_base(void *base, size_t capacity, int locked);
  1077 
  1079 
  1078 /*
  1080 /*
  1079   mspace_malloc behaves as malloc, but operates within
  1081   mspace_malloc behaves as malloc, but operates within
  1080   the given space.
  1082   the given space.
  1081 */
  1083 */
  1082 void* mspace_malloc(mspace msp, size_t bytes);
  1084     void *mspace_malloc(mspace msp, size_t bytes);
  1083 
  1085 
  1084 /*
  1086 /*
  1085   mspace_free behaves as free, but operates within
  1087   mspace_free behaves as free, but operates within
  1086   the given space.
  1088   the given space.
  1087 
  1089 
  1088   If compiled with FOOTERS==1, mspace_free is not actually needed.
  1090   If compiled with FOOTERS==1, mspace_free is not actually needed.
  1089   free may be called instead of mspace_free because freed chunks from
  1091   free may be called instead of mspace_free because freed chunks from
  1090   any space are handled by their originating spaces.
  1092   any space are handled by their originating spaces.
  1091 */
  1093 */
  1092 void mspace_free(mspace msp, void* mem);
  1094     void mspace_free(mspace msp, void *mem);
  1093 
  1095 
  1094 /*
  1096 /*
  1095   mspace_realloc behaves as realloc, but operates within
  1097   mspace_realloc behaves as realloc, but operates within
  1096   the given space.
  1098   the given space.
  1097 
  1099 
  1098   If compiled with FOOTERS==1, mspace_realloc is not actually
  1100   If compiled with FOOTERS==1, mspace_realloc is not actually
  1099   needed.  realloc may be called instead of mspace_realloc because
  1101   needed.  realloc may be called instead of mspace_realloc because
  1100   realloced chunks from any space are handled by their originating
  1102   realloced chunks from any space are handled by their originating
  1101   spaces.
  1103   spaces.
  1102 */
  1104 */
  1103 void* mspace_realloc(mspace msp, void* mem, size_t newsize);
  1105     void *mspace_realloc(mspace msp, void *mem, size_t newsize);
  1104 
  1106 
  1105 /*
  1107 /*
  1106   mspace_calloc behaves as calloc, but operates within
  1108   mspace_calloc behaves as calloc, but operates within
  1107   the given space.
  1109   the given space.
  1108 */
  1110 */
  1109 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
  1111     void *mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
  1110 
  1112 
  1111 /*
  1113 /*
  1112   mspace_memalign behaves as memalign, but operates within
  1114   mspace_memalign behaves as memalign, but operates within
  1113   the given space.
  1115   the given space.
  1114 */
  1116 */
  1115 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
  1117     void *mspace_memalign(mspace msp, size_t alignment, size_t bytes);
  1116 
  1118 
  1117 /*
  1119 /*
  1118   mspace_independent_calloc behaves as independent_calloc, but
  1120   mspace_independent_calloc behaves as independent_calloc, but
  1119   operates within the given space.
  1121   operates within the given space.
  1120 */
  1122 */
  1121 void** mspace_independent_calloc(mspace msp, size_t n_elements,
  1123     void **mspace_independent_calloc(mspace msp, size_t n_elements,
  1122                                  size_t elem_size, void* chunks[]);
  1124                                      size_t elem_size, void *chunks[]);
  1123 
  1125 
  1124 /*
  1126 /*
  1125   mspace_independent_comalloc behaves as independent_comalloc, but
  1127   mspace_independent_comalloc behaves as independent_comalloc, but
  1126   operates within the given space.
  1128   operates within the given space.
  1127 */
  1129 */
  1128 void** mspace_independent_comalloc(mspace msp, size_t n_elements,
  1130     void **mspace_independent_comalloc(mspace msp, size_t n_elements,
  1129                                    size_t sizes[], void* chunks[]);
  1131                                        size_t sizes[], void *chunks[]);
  1130 
  1132 
  1131 /*
  1133 /*
  1132   mspace_footprint() returns the number of bytes obtained from the
  1134   mspace_footprint() returns the number of bytes obtained from the
  1133   system for this space.
  1135   system for this space.
  1134 */
  1136 */
  1135 size_t mspace_footprint(mspace msp);
  1137     size_t mspace_footprint(mspace msp);
  1136 
  1138 
  1137 /*
  1139 /*
  1138   mspace_max_footprint() returns the peak number of bytes obtained from the
  1140   mspace_max_footprint() returns the peak number of bytes obtained from the
  1139   system for this space.
  1141   system for this space.
  1140 */
  1142 */
  1141 size_t mspace_max_footprint(mspace msp);
  1143     size_t mspace_max_footprint(mspace msp);
  1142 
  1144 
  1143 
  1145 
  1144 #if !NO_MALLINFO
  1146 #if !NO_MALLINFO
  1145 /*
  1147 /*
  1146   mspace_mallinfo behaves as mallinfo, but reports properties of
  1148   mspace_mallinfo behaves as mallinfo, but reports properties of
  1147   the given space.
  1149   the given space.
  1148 */
  1150 */
  1149 struct mallinfo mspace_mallinfo(mspace msp);
  1151     struct mallinfo mspace_mallinfo(mspace msp);
  1150 #endif /* NO_MALLINFO */
  1152 #endif                          /* NO_MALLINFO */
  1151 
  1153 
  1152 /*
  1154 /*
  1153   mspace_malloc_stats behaves as malloc_stats, but reports
  1155   mspace_malloc_stats behaves as malloc_stats, but reports
  1154   properties of the given space.
  1156   properties of the given space.
  1155 */
  1157 */
  1156 void mspace_malloc_stats(mspace msp);
  1158     void mspace_malloc_stats(mspace msp);
  1157 
  1159 
  1158 /*
  1160 /*
  1159   mspace_trim behaves as malloc_trim, but
  1161   mspace_trim behaves as malloc_trim, but
  1160   operates within the given space.
  1162   operates within the given space.
  1161 */
  1163 */
  1162 int mspace_trim(mspace msp, size_t pad);
  1164     int mspace_trim(mspace msp, size_t pad);
  1163 
  1165 
  1164 /*
  1166 /*
  1165   An alias for mallopt.
  1167   An alias for mallopt.
  1166 */
  1168 */
  1167 int mspace_mallopt(int, int);
  1169     int mspace_mallopt(int, int);
  1168 
  1170 
  1169 #endif /* MSPACES */
  1171 #endif                          /* MSPACES */
  1170 
  1172 
  1171 #ifdef __cplusplus
  1173 #ifdef __cplusplus
  1172 };  /* end of extern "C" */
  1174 };                              /* end of extern "C" */
  1173 #endif /* __cplusplus */
  1175 #endif /* __cplusplus */
  1174 
  1176 
  1175 /*
  1177 /*
  1176   ========================================================================
  1178   ========================================================================
  1177   To make a fully customizable malloc.h header file, cut everything
  1179   To make a fully customizable malloc.h header file, cut everything
  1183 /* #include "malloc.h" */
  1185 /* #include "malloc.h" */
  1184 
  1186 
  1185 /*------------------------------ internal #includes ---------------------- */
  1187 /*------------------------------ internal #includes ---------------------- */
  1186 
  1188 
  1187 #ifdef _MSC_VER
  1189 #ifdef _MSC_VER
  1188 #pragma warning( disable : 4146 ) /* no "unsigned" warnings */
  1190 #pragma warning( disable : 4146 )       /* no "unsigned" warnings */
  1189 #endif /* _MSC_VER */
  1191 #endif /* _MSC_VER */
  1190 
  1192 
  1191 #ifndef LACKS_STDIO_H
  1193 #ifndef LACKS_STDIO_H
  1192 #include <stdio.h>       /* for printing in malloc_stats */
  1194 #include <stdio.h>              /* for printing in malloc_stats */
  1193 #endif
  1195 #endif
  1194 
  1196 
  1195 #ifndef LACKS_ERRNO_H
  1197 #ifndef LACKS_ERRNO_H
  1196 #include <errno.h>       /* for MALLOC_FAILURE_ACTION */
  1198 #include <errno.h>              /* for MALLOC_FAILURE_ACTION */
  1197 #endif /* LACKS_ERRNO_H */
  1199 #endif /* LACKS_ERRNO_H */
  1198 #if FOOTERS
  1200 #if FOOTERS
  1199 #include <time.h>        /* for magic initialization */
  1201 #include <time.h>               /* for magic initialization */
  1200 #endif /* FOOTERS */
  1202 #endif /* FOOTERS */
  1201 #ifndef LACKS_STDLIB_H
  1203 #ifndef LACKS_STDLIB_H
  1202 #include <stdlib.h>      /* for abort() */
  1204 #include <stdlib.h>             /* for abort() */
  1203 #endif /* LACKS_STDLIB_H */
  1205 #endif /* LACKS_STDLIB_H */
  1204 #ifdef DEBUG
  1206 #ifdef DEBUG
  1205 #if ABORT_ON_ASSERT_FAILURE
  1207 #if ABORT_ON_ASSERT_FAILURE
  1206 #define assert(x) if(!(x)) ABORT
  1208 #define assert(x) if(!(x)) ABORT
  1207 #else /* ABORT_ON_ASSERT_FAILURE */
  1209 #else /* ABORT_ON_ASSERT_FAILURE */
  1208 #include <assert.h>
  1210 #include <assert.h>
  1209 #endif /* ABORT_ON_ASSERT_FAILURE */
  1211 #endif /* ABORT_ON_ASSERT_FAILURE */
  1210 #else  /* DEBUG */
  1212 #else /* DEBUG */
  1211 #define assert(x)
  1213 #define assert(x)
  1212 #endif /* DEBUG */
  1214 #endif /* DEBUG */
  1213 #ifndef LACKS_STRING_H
  1215 #ifndef LACKS_STRING_H
  1214 #include <string.h>      /* for memset etc */
  1216 #include <string.h>             /* for memset etc */
  1215 #endif  /* LACKS_STRING_H */
  1217 #endif /* LACKS_STRING_H */
  1216 #if USE_BUILTIN_FFS
  1218 #if USE_BUILTIN_FFS
  1217 #ifndef LACKS_STRINGS_H
  1219 #ifndef LACKS_STRINGS_H
  1218 #include <strings.h>     /* for ffs */
  1220 #include <strings.h>            /* for ffs */
  1219 #endif /* LACKS_STRINGS_H */
  1221 #endif /* LACKS_STRINGS_H */
  1220 #endif /* USE_BUILTIN_FFS */
  1222 #endif /* USE_BUILTIN_FFS */
  1221 #if HAVE_MMAP
  1223 #if HAVE_MMAP
  1222 #ifndef LACKS_SYS_MMAN_H
  1224 #ifndef LACKS_SYS_MMAN_H
  1223 #include <sys/mman.h>    /* for mmap */
  1225 #include <sys/mman.h>           /* for mmap */
  1224 #endif /* LACKS_SYS_MMAN_H */
  1226 #endif /* LACKS_SYS_MMAN_H */
  1225 #ifndef LACKS_FCNTL_H
  1227 #ifndef LACKS_FCNTL_H
  1226 #include <fcntl.h>
  1228 #include <fcntl.h>
  1227 #endif /* LACKS_FCNTL_H */
  1229 #endif /* LACKS_FCNTL_H */
  1228 #endif /* HAVE_MMAP */
  1230 #endif /* HAVE_MMAP */
  1229 #if HAVE_MORECORE
  1231 #if HAVE_MORECORE
  1230 #ifndef LACKS_UNISTD_H
  1232 #ifndef LACKS_UNISTD_H
  1231 #include <unistd.h>     /* for sbrk */
  1233 #include <unistd.h>             /* for sbrk */
  1232 #else /* LACKS_UNISTD_H */
  1234 #else /* LACKS_UNISTD_H */
  1233 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
  1235 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
  1234 extern void*     sbrk(ptrdiff_t);
  1236 extern void *sbrk(ptrdiff_t);
  1235 #endif /* FreeBSD etc */
  1237 #endif /* FreeBSD etc */
  1236 #endif /* LACKS_UNISTD_H */
  1238 #endif /* LACKS_UNISTD_H */
  1237 #endif /* HAVE_MMAP */
  1239 #endif /* HAVE_MMAP */
  1238 
  1240 
  1239 #ifndef WIN32
  1241 #ifndef WIN32
  1240 #ifndef malloc_getpagesize
  1242 #ifndef malloc_getpagesize
  1241 #  ifdef _SC_PAGESIZE         /* some SVR4 systems omit an underscore */
  1243 #  ifdef _SC_PAGESIZE           /* some SVR4 systems omit an underscore */
  1242 #    ifndef _SC_PAGE_SIZE
  1244 #    ifndef _SC_PAGE_SIZE
  1243 #      define _SC_PAGE_SIZE _SC_PAGESIZE
  1245 #      define _SC_PAGE_SIZE _SC_PAGESIZE
  1244 #    endif
  1246 #    endif
  1245 #  endif
  1247 #  endif
  1246 #  ifdef _SC_PAGE_SIZE
  1248 #  ifdef _SC_PAGE_SIZE
  1247 #    define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
  1249 #    define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
  1248 #  else
  1250 #  else
  1249 #    if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
  1251 #    if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
  1250        extern size_t getpagesize();
  1252 extern size_t getpagesize();
  1251 #      define malloc_getpagesize getpagesize()
  1253 #      define malloc_getpagesize getpagesize()
  1252 #    else
  1254 #    else
  1253 #      ifdef WIN32 /* use supplied emulation of getpagesize */
  1255 #      ifdef WIN32              /* use supplied emulation of getpagesize */
  1254 #        define malloc_getpagesize getpagesize()
  1256 #        define malloc_getpagesize getpagesize()
  1255 #      else
  1257 #      else
  1256 #        ifndef LACKS_SYS_PARAM_H
  1258 #        ifndef LACKS_SYS_PARAM_H
  1257 #          include <sys/param.h>
  1259 #          include <sys/param.h>
  1258 #        endif
  1260 #        endif
  1319 */
  1321 */
  1320 
  1322 
  1321 
  1323 
  1322 /* MORECORE and MMAP must return MFAIL on failure */
  1324 /* MORECORE and MMAP must return MFAIL on failure */
  1323 #define MFAIL                ((void*)(MAX_SIZE_T))
  1325 #define MFAIL                ((void*)(MAX_SIZE_T))
  1324 #define CMFAIL               ((char*)(MFAIL)) /* defined for convenience */
  1326 #define CMFAIL               ((char*)(MFAIL))   /* defined for convenience */
  1325 
  1327 
  1326 #if !HAVE_MMAP
  1328 #if !HAVE_MMAP
  1327 #define IS_MMAPPED_BIT       (SIZE_T_ZERO)
  1329 #define IS_MMAPPED_BIT       (SIZE_T_ZERO)
  1328 #define USE_MMAP_BIT         (SIZE_T_ZERO)
  1330 #define USE_MMAP_BIT         (SIZE_T_ZERO)
  1329 #define CALL_MMAP(s)         MFAIL
  1331 #define CALL_MMAP(s)         MFAIL
  1347 /*
  1349 /*
  1348    Nearly all versions of mmap support MAP_ANONYMOUS, so the following
  1350    Nearly all versions of mmap support MAP_ANONYMOUS, so the following
  1349    is unlikely to be needed, but is supplied just in case.
  1351    is unlikely to be needed, but is supplied just in case.
  1350 */
  1352 */
  1351 #define MMAP_FLAGS           (MAP_PRIVATE)
  1353 #define MMAP_FLAGS           (MAP_PRIVATE)
  1352 static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
  1354 static int dev_zero_fd = -1;    /* Cached file descriptor for /dev/zero. */
  1353 #define CALL_MMAP(s) ((dev_zero_fd < 0) ? \
  1355 #define CALL_MMAP(s) ((dev_zero_fd < 0) ? \
  1354            (dev_zero_fd = open("/dev/zero", O_RDWR), \
  1356            (dev_zero_fd = open("/dev/zero", O_RDWR), \
  1355             mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
  1357             mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
  1356             mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
  1358             mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
  1357 #endif /* MAP_ANONYMOUS */
  1359 #endif /* MAP_ANONYMOUS */
  1358 
  1360 
  1359 #define DIRECT_MMAP(s)       CALL_MMAP(s)
  1361 #define DIRECT_MMAP(s)       CALL_MMAP(s)
  1360 #else /* WIN32 */
  1362 #else /* WIN32 */
  1361 
  1363 
  1362 /* Win32 MMAP via VirtualAlloc */
  1364 /* Win32 MMAP via VirtualAlloc */
  1363 static void* win32mmap(size_t size) {
  1365 static void *
  1364   void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
  1366 win32mmap(size_t size)
  1365   return (ptr != 0)? ptr: MFAIL;
  1367 {
       
  1368     void *ptr =
       
  1369         VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
       
  1370     return (ptr != 0) ? ptr : MFAIL;
  1366 }
  1371 }
  1367 
  1372 
  1368 /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
  1373 /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
  1369 static void* win32direct_mmap(size_t size) {
  1374 static void *
  1370   void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
  1375 win32direct_mmap(size_t size)
  1371                            PAGE_READWRITE);
  1376 {
  1372   return (ptr != 0)? ptr: MFAIL;
  1377     void *ptr = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN,
       
  1378                              PAGE_READWRITE);
       
  1379     return (ptr != 0) ? ptr : MFAIL;
  1373 }
  1380 }
  1374 
  1381 
  1375 /* This function supports releasing coalesed segments */
  1382 /* This function supports releasing coalesed segments */
  1376 static int win32munmap(void* ptr, size_t size) {
  1383 static int
  1377   MEMORY_BASIC_INFORMATION minfo;
  1384 win32munmap(void *ptr, size_t size)
  1378   char* cptr = ptr;
  1385 {
  1379   while (size) {
  1386     MEMORY_BASIC_INFORMATION minfo;
  1380     if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
  1387     char *cptr = ptr;
  1381       return -1;
  1388     while (size) {
  1382     if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
  1389         if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
  1383         minfo.State != MEM_COMMIT || minfo.RegionSize > size)
  1390             return -1;
  1384       return -1;
  1391         if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
  1385     if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
  1392             minfo.State != MEM_COMMIT || minfo.RegionSize > size)
  1386       return -1;
  1393             return -1;
  1387     cptr += minfo.RegionSize;
  1394         if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
  1388     size -= minfo.RegionSize;
  1395             return -1;
  1389   }
  1396         cptr += minfo.RegionSize;
  1390   return 0;
  1397         size -= minfo.RegionSize;
       
  1398     }
       
  1399     return 0;
  1391 }
  1400 }
  1392 
  1401 
  1393 #define CALL_MMAP(s)         win32mmap(s)
  1402 #define CALL_MMAP(s)         win32mmap(s)
  1394 #define CALL_MUNMAP(a, s)    win32munmap((a), (s))
  1403 #define CALL_MUNMAP(a, s)    win32munmap((a), (s))
  1395 #define DIRECT_MMAP(s)       win32direct_mmap(s)
  1404 #define DIRECT_MMAP(s)       win32direct_mmap(s)
  1396 #endif /* WIN32 */
  1405 #endif /* WIN32 */
  1397 #endif /* HAVE_MMAP */
  1406 #endif /* HAVE_MMAP */
  1398 
  1407 
  1399 #if HAVE_MMAP && HAVE_MREMAP
  1408 #if HAVE_MMAP && HAVE_MREMAP
  1400 #define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
  1409 #define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
  1401 #else  /* HAVE_MMAP && HAVE_MREMAP */
  1410 #else /* HAVE_MMAP && HAVE_MREMAP */
  1402 #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
  1411 #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
  1403 #endif /* HAVE_MMAP && HAVE_MREMAP */
  1412 #endif /* HAVE_MMAP && HAVE_MREMAP */
  1404 
  1413 
  1405 #if HAVE_MORECORE
  1414 #if HAVE_MORECORE
  1406 #define CALL_MORECORE(S)     MORECORE(S)
  1415 #define CALL_MORECORE(S)     MORECORE(S)
  1407 #else  /* HAVE_MORECORE */
  1416 #else /* HAVE_MORECORE */
  1408 #define CALL_MORECORE(S)     MFAIL
  1417 #define CALL_MORECORE(S)     MFAIL
  1409 #endif /* HAVE_MORECORE */
  1418 #endif /* HAVE_MORECORE */
  1410 
  1419 
  1411 /* mstate bit set if continguous morecore disabled or failed */
  1420 /* mstate bit set if continguous morecore disabled or failed */
  1412 #define USE_NONCONTIGUOUS_BIT (4U)
  1421 #define USE_NONCONTIGUOUS_BIT (4U)
  1452    Because lock-protected regions have bounded times, and there
  1461    Because lock-protected regions have bounded times, and there
  1453    are no recursive lock calls, we can use simple spinlocks.
  1462    are no recursive lock calls, we can use simple spinlocks.
  1454 */
  1463 */
  1455 
  1464 
  1456 #define MLOCK_T long
  1465 #define MLOCK_T long
  1457 static int win32_acquire_lock (MLOCK_T *sl) {
  1466 static int
  1458   for (;;) {
  1467 win32_acquire_lock(MLOCK_T * sl)
       
  1468 {
       
  1469     for (;;) {
  1459 #ifdef InterlockedCompareExchangePointer
  1470 #ifdef InterlockedCompareExchangePointer
  1460     if (!InterlockedCompareExchange(sl, 1, 0))
  1471         if (!InterlockedCompareExchange(sl, 1, 0))
  1461       return 0;
  1472             return 0;
  1462 #else  /* Use older void* version */
  1473 #else /* Use older void* version */
  1463     if (!InterlockedCompareExchange((void**)sl, (void*)1, (void*)0))
  1474         if (!InterlockedCompareExchange((void **) sl, (void *) 1, (void *) 0))
  1464       return 0;
  1475             return 0;
  1465 #endif /* InterlockedCompareExchangePointer */
  1476 #endif /* InterlockedCompareExchangePointer */
  1466     Sleep (0);
  1477         Sleep(0);
  1467   }
  1478     }
  1468 }
  1479 }
  1469 
  1480 
  1470 static void win32_release_lock (MLOCK_T *sl) {
  1481 static void
  1471   InterlockedExchange (sl, 0);
  1482 win32_release_lock(MLOCK_T * sl)
       
  1483 {
       
  1484     InterlockedExchange(sl, 0);
  1472 }
  1485 }
  1473 
  1486 
  1474 #define INITIAL_LOCK(l)      *(l)=0
  1487 #define INITIAL_LOCK(l)      *(l)=0
  1475 #define ACQUIRE_LOCK(l)      win32_acquire_lock(l)
  1488 #define ACQUIRE_LOCK(l)      win32_acquire_lock(l)
  1476 #define RELEASE_LOCK(l)      win32_release_lock(l)
  1489 #define RELEASE_LOCK(l)      win32_release_lock(l)
  1479 #endif /* HAVE_MORECORE */
  1492 #endif /* HAVE_MORECORE */
  1480 static MLOCK_T magic_init_mutex;
  1493 static MLOCK_T magic_init_mutex;
  1481 #endif /* WIN32 */
  1494 #endif /* WIN32 */
  1482 
  1495 
  1483 #define USE_LOCK_BIT               (2U)
  1496 #define USE_LOCK_BIT               (2U)
  1484 #else  /* USE_LOCKS */
  1497 #else /* USE_LOCKS */
  1485 #define USE_LOCK_BIT               (0U)
  1498 #define USE_LOCK_BIT               (0U)
  1486 #define INITIAL_LOCK(l)
  1499 #define INITIAL_LOCK(l)
  1487 #endif /* USE_LOCKS */
  1500 #endif /* USE_LOCKS */
  1488 
  1501 
  1489 #if USE_LOCKS && HAVE_MORECORE
  1502 #if USE_LOCKS && HAVE_MORECORE
  1495 #endif /* USE_LOCKS && HAVE_MORECORE */
  1508 #endif /* USE_LOCKS && HAVE_MORECORE */
  1496 
  1509 
  1497 #if USE_LOCKS
  1510 #if USE_LOCKS
  1498 #define ACQUIRE_MAGIC_INIT_LOCK()  ACQUIRE_LOCK(&magic_init_mutex);
  1511 #define ACQUIRE_MAGIC_INIT_LOCK()  ACQUIRE_LOCK(&magic_init_mutex);
  1499 #define RELEASE_MAGIC_INIT_LOCK()  RELEASE_LOCK(&magic_init_mutex);
  1512 #define RELEASE_MAGIC_INIT_LOCK()  RELEASE_LOCK(&magic_init_mutex);
  1500 #else  /* USE_LOCKS */
  1513 #else /* USE_LOCKS */
  1501 #define ACQUIRE_MAGIC_INIT_LOCK()
  1514 #define ACQUIRE_MAGIC_INIT_LOCK()
  1502 #define RELEASE_MAGIC_INIT_LOCK()
  1515 #define RELEASE_MAGIC_INIT_LOCK()
  1503 #endif /* USE_LOCKS */
  1516 #endif /* USE_LOCKS */
  1504 
  1517 
  1505 
  1518 
  1638         chunk is trailed by the first two fields of a fake next-chunk
  1651         chunk is trailed by the first two fields of a fake next-chunk
  1639         for sake of usage checks.
  1652         for sake of usage checks.
  1640 
  1653 
  1641 */
  1654 */
  1642 
  1655 
  1643 struct malloc_chunk {
  1656 struct malloc_chunk
  1644   size_t               prev_foot;  /* Size of previous chunk (if free).  */
  1657 {
  1645   size_t               head;       /* Size and inuse bits. */
  1658     size_t prev_foot;           /* Size of previous chunk (if free).  */
  1646   struct malloc_chunk* fd;         /* double links -- used only if free. */
  1659     size_t head;                /* Size and inuse bits. */
  1647   struct malloc_chunk* bk;
  1660     struct malloc_chunk *fd;    /* double links -- used only if free. */
       
  1661     struct malloc_chunk *bk;
  1648 };
  1662 };
  1649 
  1663 
  1650 typedef struct malloc_chunk  mchunk;
  1664 typedef struct malloc_chunk mchunk;
  1651 typedef struct malloc_chunk* mchunkptr;
  1665 typedef struct malloc_chunk *mchunkptr;
  1652 typedef struct malloc_chunk* sbinptr;  /* The type of bins of chunks */
  1666 typedef struct malloc_chunk *sbinptr;   /* The type of bins of chunks */
  1653 typedef size_t bindex_t;               /* Described below */
  1667 typedef size_t bindex_t;        /* Described below */
  1654 typedef unsigned int binmap_t;         /* Described below */
  1668 typedef unsigned int binmap_t;  /* Described below */
  1655 typedef unsigned int flag_t;           /* The type of various bit flag sets */
  1669 typedef unsigned int flag_t;    /* The type of various bit flag sets */
  1656 
  1670 
  1657 /* ------------------- Chunks sizes and alignments ----------------------- */
  1671 /* ------------------- Chunks sizes and alignments ----------------------- */
  1658 
  1672 
  1659 #define MCHUNK_SIZE         (sizeof(mchunk))
  1673 #define MCHUNK_SIZE         (sizeof(mchunk))
  1660 
  1674 
  1843   bins. Under current bin calculations, this ranges from 6 up to 21
  1857   bins. Under current bin calculations, this ranges from 6 up to 21
  1844   (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
  1858   (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
  1845   is of course much better.
  1859   is of course much better.
  1846 */
  1860 */
  1847 
  1861 
  1848 struct malloc_tree_chunk {
  1862 struct malloc_tree_chunk
  1849   /* The first four fields must be compatible with malloc_chunk */
  1863 {
  1850   size_t                    prev_foot;
  1864     /* The first four fields must be compatible with malloc_chunk */
  1851   size_t                    head;
  1865     size_t prev_foot;
  1852   struct malloc_tree_chunk* fd;
  1866     size_t head;
  1853   struct malloc_tree_chunk* bk;
  1867     struct malloc_tree_chunk *fd;
  1854 
  1868     struct malloc_tree_chunk *bk;
  1855   struct malloc_tree_chunk* child[2];
  1869 
  1856   struct malloc_tree_chunk* parent;
  1870     struct malloc_tree_chunk *child[2];
  1857   bindex_t                  index;
  1871     struct malloc_tree_chunk *parent;
       
  1872     bindex_t index;
  1858 };
  1873 };
  1859 
  1874 
  1860 typedef struct malloc_tree_chunk  tchunk;
  1875 typedef struct malloc_tree_chunk tchunk;
  1861 typedef struct malloc_tree_chunk* tchunkptr;
  1876 typedef struct malloc_tree_chunk *tchunkptr;
  1862 typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
  1877 typedef struct malloc_tree_chunk *tbinptr;      /* The type of bins of trees */
  1863 
  1878 
  1864 /* A little helper macro for trees */
  1879 /* A little helper macro for trees */
  1865 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
  1880 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
  1866 
  1881 
  1867 /* ----------------------------- Segments -------------------------------- */
  1882 /* ----------------------------- Segments -------------------------------- */
  1919   * If neither bit is set, then the segment was obtained using
  1934   * If neither bit is set, then the segment was obtained using
  1920     MORECORE so can be merged with surrounding MORECORE'd segments
  1935     MORECORE so can be merged with surrounding MORECORE'd segments
  1921     and deallocated/trimmed using MORECORE with negative arguments.
  1936     and deallocated/trimmed using MORECORE with negative arguments.
  1922 */
  1937 */
  1923 
  1938 
  1924 struct malloc_segment {
  1939 struct malloc_segment
  1925   char*        base;             /* base address */
  1940 {
  1926   size_t       size;             /* allocated size */
  1941     char *base;                 /* base address */
  1927   struct malloc_segment* next;   /* ptr to next segment */
  1942     size_t size;                /* allocated size */
  1928   flag_t       sflags;           /* mmap and extern flag */
  1943     struct malloc_segment *next;        /* ptr to next segment */
       
  1944     flag_t sflags;              /* mmap and extern flag */
  1929 };
  1945 };
  1930 
  1946 
  1931 #define is_mmapped_segment(S)  ((S)->sflags & IS_MMAPPED_BIT)
  1947 #define is_mmapped_segment(S)  ((S)->sflags & IS_MMAPPED_BIT)
  1932 #define is_extern_segment(S)   ((S)->sflags & EXTERN_BIT)
  1948 #define is_extern_segment(S)   ((S)->sflags & EXTERN_BIT)
  1933 
  1949 
  1934 typedef struct malloc_segment  msegment;
  1950 typedef struct malloc_segment msegment;
  1935 typedef struct malloc_segment* msegmentptr;
  1951 typedef struct malloc_segment *msegmentptr;
  1936 
  1952 
  1937 /* ---------------------------- malloc_state ----------------------------- */
  1953 /* ---------------------------- malloc_state ----------------------------- */
  1938 
  1954 
  1939 /*
  1955 /*
  1940    A malloc_state holds all of the bookkeeping for a space.
  1956    A malloc_state holds all of the bookkeeping for a space.
  2017 #define TREEBIN_SHIFT     (8U)
  2033 #define TREEBIN_SHIFT     (8U)
  2018 #define MIN_LARGE_SIZE    (SIZE_T_ONE << TREEBIN_SHIFT)
  2034 #define MIN_LARGE_SIZE    (SIZE_T_ONE << TREEBIN_SHIFT)
  2019 #define MAX_SMALL_SIZE    (MIN_LARGE_SIZE - SIZE_T_ONE)
  2035 #define MAX_SMALL_SIZE    (MIN_LARGE_SIZE - SIZE_T_ONE)
  2020 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
  2036 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
  2021 
  2037 
  2022 struct malloc_state {
  2038 struct malloc_state
  2023   binmap_t   smallmap;
  2039 {
  2024   binmap_t   treemap;
  2040     binmap_t smallmap;
  2025   size_t     dvsize;
  2041     binmap_t treemap;
  2026   size_t     topsize;
  2042     size_t dvsize;
  2027   char*      least_addr;
  2043     size_t topsize;
  2028   mchunkptr  dv;
  2044     char *least_addr;
  2029   mchunkptr  top;
  2045     mchunkptr dv;
  2030   size_t     trim_check;
  2046     mchunkptr top;
  2031   size_t     magic;
  2047     size_t trim_check;
  2032   mchunkptr  smallbins[(NSMALLBINS+1)*2];
  2048     size_t magic;
  2033   tbinptr    treebins[NTREEBINS];
  2049     mchunkptr smallbins[(NSMALLBINS + 1) * 2];
  2034   size_t     footprint;
  2050     tbinptr treebins[NTREEBINS];
  2035   size_t     max_footprint;
  2051     size_t footprint;
  2036   flag_t     mflags;
  2052     size_t max_footprint;
       
  2053     flag_t mflags;
  2037 #if USE_LOCKS
  2054 #if USE_LOCKS
  2038   MLOCK_T    mutex;     /* locate lock among fields that rarely change */
  2055     MLOCK_T mutex;              /* locate lock among fields that rarely change */
  2039 #endif /* USE_LOCKS */
  2056 #endif                          /* USE_LOCKS */
  2040   msegment   seg;
  2057     msegment seg;
  2041 };
  2058 };
  2042 
  2059 
  2043 typedef struct malloc_state*    mstate;
  2060 typedef struct malloc_state *mstate;
  2044 
  2061 
  2045 /* ------------- Global malloc_state and malloc_params ------------------- */
  2062 /* ------------- Global malloc_state and malloc_params ------------------- */
  2046 
  2063 
  2047 /*
  2064 /*
  2048   malloc_params holds global properties, including those that can be
  2065   malloc_params holds global properties, including those that can be
  2049   dynamically set using mallopt. There is a single instance, mparams,
  2066   dynamically set using mallopt. There is a single instance, mparams,
  2050   initialized in init_mparams.
  2067   initialized in init_mparams.
  2051 */
  2068 */
  2052 
  2069 
  2053 struct malloc_params {
  2070 struct malloc_params
  2054   size_t magic;
  2071 {
  2055   size_t page_size;
  2072     size_t magic;
  2056   size_t granularity;
  2073     size_t page_size;
  2057   size_t mmap_threshold;
  2074     size_t granularity;
  2058   size_t trim_threshold;
  2075     size_t mmap_threshold;
  2059   flag_t default_mflags;
  2076     size_t trim_threshold;
       
  2077     flag_t default_mflags;
  2060 };
  2078 };
  2061 
  2079 
  2062 static struct malloc_params mparams;
  2080 static struct malloc_params mparams;
  2063 
  2081 
  2064 /* The global malloc_state used for all non-"mspace" calls */
  2082 /* The global malloc_state used for all non-"mspace" calls */
  2103 /*  True if segment S holds address A */
  2121 /*  True if segment S holds address A */
  2104 #define segment_holds(S, A)\
  2122 #define segment_holds(S, A)\
  2105   ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
  2123   ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
  2106 
  2124 
  2107 /* Return segment holding given address */
  2125 /* Return segment holding given address */
  2108 static msegmentptr segment_holding(mstate m, char* addr) {
  2126 static msegmentptr
  2109   msegmentptr sp = &m->seg;
  2127 segment_holding(mstate m, char *addr)
  2110   for (;;) {
  2128 {
  2111     if (addr >= sp->base && addr < sp->base + sp->size)
  2129     msegmentptr sp = &m->seg;
  2112       return sp;
  2130     for (;;) {
  2113     if ((sp = sp->next) == 0)
  2131         if (addr >= sp->base && addr < sp->base + sp->size)
  2114       return 0;
  2132             return sp;
  2115   }
  2133         if ((sp = sp->next) == 0)
       
  2134             return 0;
       
  2135     }
  2116 }
  2136 }
  2117 
  2137 
  2118 /* Return true if segment contains a segment link */
  2138 /* Return true if segment contains a segment link */
  2119 static int has_segment_link(mstate m, msegmentptr ss) {
  2139 static int
  2120   msegmentptr sp = &m->seg;
  2140 has_segment_link(mstate m, msegmentptr ss)
  2121   for (;;) {
  2141 {
  2122     if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
  2142     msegmentptr sp = &m->seg;
  2123       return 1;
  2143     for (;;) {
  2124     if ((sp = sp->next) == 0)
  2144         if ((char *) sp >= ss->base && (char *) sp < ss->base + ss->size)
  2125       return 0;
  2145             return 1;
  2126   }
  2146         if ((sp = sp->next) == 0)
       
  2147             return 0;
       
  2148     }
  2127 }
  2149 }
  2128 
  2150 
  2129 #ifndef MORECORE_CANNOT_TRIM
  2151 #ifndef MORECORE_CANNOT_TRIM
  2130 #define should_trim(M,s)  ((s) > (M)->trim_check)
  2152 #define should_trim(M,s)  ((s) > (M)->trim_check)
  2131 #else  /* MORECORE_CANNOT_TRIM */
  2153 #else /* MORECORE_CANNOT_TRIM */
  2132 #define should_trim(M,s)  (0)
  2154 #define should_trim(M,s)  (0)
  2133 #endif /* MORECORE_CANNOT_TRIM */
  2155 #endif /* MORECORE_CANNOT_TRIM */
  2134 
  2156 
  2135 /*
  2157 /*
  2136   TOP_FOOT_SIZE is padding at the end of a segment, including space
  2158   TOP_FOOT_SIZE is padding at the end of a segment, including space
  2158 #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
  2180 #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
  2159 #else /* USE_LOCKS */
  2181 #else /* USE_LOCKS */
  2160 
  2182 
  2161 #ifndef PREACTION
  2183 #ifndef PREACTION
  2162 #define PREACTION(M) (0)
  2184 #define PREACTION(M) (0)
  2163 #endif  /* PREACTION */
  2185 #endif /* PREACTION */
  2164 
  2186 
  2165 #ifndef POSTACTION
  2187 #ifndef POSTACTION
  2166 #define POSTACTION(M)
  2188 #define POSTACTION(M)
  2167 #endif  /* POSTACTION */
  2189 #endif /* POSTACTION */
  2168 
  2190 
  2169 #endif /* USE_LOCKS */
  2191 #endif /* USE_LOCKS */
  2170 
  2192 
  2171 /*
  2193 /*
  2172   CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
  2194   CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
  2216 #define check_top_chunk(M,P)        do_check_top_chunk(M,P)
  2238 #define check_top_chunk(M,P)        do_check_top_chunk(M,P)
  2217 #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
  2239 #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
  2218 #define check_mmapped_chunk(M,P)    do_check_mmapped_chunk(M,P)
  2240 #define check_mmapped_chunk(M,P)    do_check_mmapped_chunk(M,P)
  2219 #define check_malloc_state(M)       do_check_malloc_state(M)
  2241 #define check_malloc_state(M)       do_check_malloc_state(M)
  2220 
  2242 
  2221 static void   do_check_any_chunk(mstate m, mchunkptr p);
  2243 static void do_check_any_chunk(mstate m, mchunkptr p);
  2222 static void   do_check_top_chunk(mstate m, mchunkptr p);
  2244 static void do_check_top_chunk(mstate m, mchunkptr p);
  2223 static void   do_check_mmapped_chunk(mstate m, mchunkptr p);
  2245 static void do_check_mmapped_chunk(mstate m, mchunkptr p);
  2224 static void   do_check_inuse_chunk(mstate m, mchunkptr p);
  2246 static void do_check_inuse_chunk(mstate m, mchunkptr p);
  2225 static void   do_check_free_chunk(mstate m, mchunkptr p);
  2247 static void do_check_free_chunk(mstate m, mchunkptr p);
  2226 static void   do_check_malloced_chunk(mstate m, void* mem, size_t s);
  2248 static void do_check_malloced_chunk(mstate m, void *mem, size_t s);
  2227 static void   do_check_tree(mstate m, tchunkptr t);
  2249 static void do_check_tree(mstate m, tchunkptr t);
  2228 static void   do_check_treebin(mstate m, bindex_t i);
  2250 static void do_check_treebin(mstate m, bindex_t i);
  2229 static void   do_check_smallbin(mstate m, bindex_t i);
  2251 static void do_check_smallbin(mstate m, bindex_t i);
  2230 static void   do_check_malloc_state(mstate m);
  2252 static void do_check_malloc_state(mstate m);
  2231 static int    bin_find(mstate m, mchunkptr x);
  2253 static int bin_find(mstate m, mchunkptr x);
  2232 static size_t traverse_and_check(mstate m);
  2254 static size_t traverse_and_check(mstate m);
  2233 #endif /* DEBUG */
  2255 #endif /* DEBUG */
  2234 
  2256 
  2235 /* ---------------------------- Indexing Bins ---------------------------- */
  2257 /* ---------------------------- Indexing Bins ---------------------------- */
  2236 
  2258 
  2392 #endif /* !INSECURE */
  2414 #endif /* !INSECURE */
  2393 
  2415 
  2394 #if (FOOTERS && !INSECURE)
  2416 #if (FOOTERS && !INSECURE)
  2395 /* Check if (alleged) mstate m has expected magic field */
  2417 /* Check if (alleged) mstate m has expected magic field */
  2396 #define ok_magic(M)      ((M)->magic == mparams.magic)
  2418 #define ok_magic(M)      ((M)->magic == mparams.magic)
  2397 #else  /* (FOOTERS && !INSECURE) */
  2419 #else /* (FOOTERS && !INSECURE) */
  2398 #define ok_magic(M)      (1)
  2420 #define ok_magic(M)      (1)
  2399 #endif /* (FOOTERS && !INSECURE) */
  2421 #endif /* (FOOTERS && !INSECURE) */
  2400 
  2422 
  2401 
  2423 
  2402 /* In gcc, use __builtin_expect to minimize impact of checks */
  2424 /* In gcc, use __builtin_expect to minimize impact of checks */
  2457 #endif /* !FOOTERS */
  2479 #endif /* !FOOTERS */
  2458 
  2480 
  2459 /* ---------------------------- setting mparams -------------------------- */
  2481 /* ---------------------------- setting mparams -------------------------- */
  2460 
  2482 
  2461 /* Initialize mparams */
  2483 /* Initialize mparams */
  2462 static int init_mparams(void) {
  2484 static int
  2463   if (mparams.page_size == 0) {
  2485 init_mparams(void)
  2464     size_t s;
  2486 {
  2465 
  2487     if (mparams.page_size == 0) {
  2466     mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
  2488         size_t s;
  2467     mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
  2489 
       
  2490         mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
       
  2491         mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
  2468 #if MORECORE_CONTIGUOUS
  2492 #if MORECORE_CONTIGUOUS
  2469     mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;
  2493         mparams.default_mflags = USE_LOCK_BIT | USE_MMAP_BIT;
  2470 #else  /* MORECORE_CONTIGUOUS */
  2494 #else /* MORECORE_CONTIGUOUS */
  2471     mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
  2495         mparams.default_mflags =
       
  2496             USE_LOCK_BIT | USE_MMAP_BIT | USE_NONCONTIGUOUS_BIT;
  2472 #endif /* MORECORE_CONTIGUOUS */
  2497 #endif /* MORECORE_CONTIGUOUS */
  2473 
  2498 
  2474 #if (FOOTERS && !INSECURE)
  2499 #if (FOOTERS && !INSECURE)
  2475     {
  2500         {
  2476 #if USE_DEV_RANDOM
  2501 #if USE_DEV_RANDOM
  2477       int fd;
  2502             int fd;
  2478       unsigned char buf[sizeof(size_t)];
  2503             unsigned char buf[sizeof(size_t)];
  2479       /* Try to use /dev/urandom, else fall back on using time */
  2504             /* Try to use /dev/urandom, else fall back on using time */
  2480       if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
  2505             if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
  2481           read(fd, buf, sizeof(buf)) == sizeof(buf)) {
  2506                 read(fd, buf, sizeof(buf)) == sizeof(buf)) {
  2482         s = *((size_t *) buf);
  2507                 s = *((size_t *) buf);
  2483         close(fd);
  2508                 close(fd);
  2484       }
  2509             } else
  2485       else
       
  2486 #endif /* USE_DEV_RANDOM */
  2510 #endif /* USE_DEV_RANDOM */
  2487         s = (size_t)(time(0) ^ (size_t)0x55555555U);
  2511                 s = (size_t) (time(0) ^ (size_t) 0x55555555U);
  2488 
  2512 
  2489       s |= (size_t)8U;    /* ensure nonzero */
  2513             s |= (size_t) 8U;   /* ensure nonzero */
  2490       s &= ~(size_t)7U;   /* improve chances of fault for bad values */
  2514             s &= ~(size_t) 7U;  /* improve chances of fault for bad values */
  2491 
  2515 
  2492     }
  2516         }
  2493 #else /* (FOOTERS && !INSECURE) */
  2517 #else /* (FOOTERS && !INSECURE) */
  2494     s = (size_t)0x58585858U;
  2518         s = (size_t) 0x58585858U;
  2495 #endif /* (FOOTERS && !INSECURE) */
  2519 #endif /* (FOOTERS && !INSECURE) */
  2496     ACQUIRE_MAGIC_INIT_LOCK();
  2520         ACQUIRE_MAGIC_INIT_LOCK();
  2497     if (mparams.magic == 0) {
  2521         if (mparams.magic == 0) {
  2498       mparams.magic = s;
  2522             mparams.magic = s;
  2499       /* Set up lock for main malloc area */
  2523             /* Set up lock for main malloc area */
  2500       INITIAL_LOCK(&gm->mutex);
  2524             INITIAL_LOCK(&gm->mutex);
  2501       gm->mflags = mparams.default_mflags;
  2525             gm->mflags = mparams.default_mflags;
  2502     }
  2526         }
  2503     RELEASE_MAGIC_INIT_LOCK();
  2527         RELEASE_MAGIC_INIT_LOCK();
  2504 
  2528 
  2505 #ifndef WIN32
  2529 #ifndef WIN32
  2506     mparams.page_size = malloc_getpagesize;
  2530         mparams.page_size = malloc_getpagesize;
  2507     mparams.granularity = ((DEFAULT_GRANULARITY != 0)?
  2531         mparams.granularity = ((DEFAULT_GRANULARITY != 0) ?
  2508                            DEFAULT_GRANULARITY : mparams.page_size);
  2532                                DEFAULT_GRANULARITY : mparams.page_size);
  2509 #else /* WIN32 */
  2533 #else /* WIN32 */
  2510     {
  2534         {
  2511       SYSTEM_INFO system_info;
  2535             SYSTEM_INFO system_info;
  2512       GetSystemInfo(&system_info);
  2536             GetSystemInfo(&system_info);
  2513       mparams.page_size = system_info.dwPageSize;
  2537             mparams.page_size = system_info.dwPageSize;
  2514       mparams.granularity = system_info.dwAllocationGranularity;
  2538             mparams.granularity = system_info.dwAllocationGranularity;
  2515     }
  2539         }
  2516 #endif /* WIN32 */
  2540 #endif /* WIN32 */
  2517 
  2541 
  2518     /* Sanity-check configuration:
  2542         /* Sanity-check configuration:
  2519        size_t must be unsigned and as wide as pointer type.
  2543            size_t must be unsigned and as wide as pointer type.
  2520        ints must be at least 4 bytes.
  2544            ints must be at least 4 bytes.
  2521        alignment must be at least 8.
  2545            alignment must be at least 8.
  2522        Alignment, min chunk size, and page size must all be powers of 2.
  2546            Alignment, min chunk size, and page size must all be powers of 2.
  2523     */
  2547          */
  2524     if ((sizeof(size_t) != sizeof(char*)) ||
  2548         if ((sizeof(size_t) != sizeof(char *)) ||
  2525         (MAX_SIZE_T < MIN_CHUNK_SIZE)  ||
  2549             (MAX_SIZE_T < MIN_CHUNK_SIZE) ||
  2526         (sizeof(int) < 4)  ||
  2550             (sizeof(int) < 4) ||
  2527         (MALLOC_ALIGNMENT < (size_t)8U) ||
  2551             (MALLOC_ALIGNMENT < (size_t) 8U) ||
  2528         ((MALLOC_ALIGNMENT    & (MALLOC_ALIGNMENT-SIZE_T_ONE))    != 0) ||
  2552             ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - SIZE_T_ONE)) != 0) ||
  2529         ((MCHUNK_SIZE         & (MCHUNK_SIZE-SIZE_T_ONE))         != 0) ||
  2553             ((MCHUNK_SIZE & (MCHUNK_SIZE - SIZE_T_ONE)) != 0) ||
  2530         ((mparams.granularity & (mparams.granularity-SIZE_T_ONE)) != 0) ||
  2554             ((mparams.granularity & (mparams.granularity - SIZE_T_ONE)) != 0)
  2531         ((mparams.page_size   & (mparams.page_size-SIZE_T_ONE))   != 0))
  2555             || ((mparams.page_size & (mparams.page_size - SIZE_T_ONE)) != 0))
  2532       ABORT;
  2556             ABORT;
  2533   }
  2557     }
  2534   return 0;
  2558     return 0;
  2535 }
  2559 }
  2536 
  2560 
  2537 /* support for mallopt */
  2561 /* support for mallopt */
  2538 static int change_mparam(int param_number, int value) {
  2562 static int
  2539   size_t val = (size_t)value;
  2563 change_mparam(int param_number, int value)
  2540   init_mparams();
  2564 {
  2541   switch(param_number) {
  2565     size_t val = (size_t) value;
  2542   case M_TRIM_THRESHOLD:
  2566     init_mparams();
  2543     mparams.trim_threshold = val;
  2567     switch (param_number) {
  2544     return 1;
  2568     case M_TRIM_THRESHOLD:
  2545   case M_GRANULARITY:
  2569         mparams.trim_threshold = val;
  2546     if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
  2570         return 1;
  2547       mparams.granularity = val;
  2571     case M_GRANULARITY:
  2548       return 1;
  2572         if (val >= mparams.page_size && ((val & (val - 1)) == 0)) {
  2549     }
  2573             mparams.granularity = val;
  2550     else
  2574             return 1;
  2551       return 0;
  2575         } else
  2552   case M_MMAP_THRESHOLD:
  2576             return 0;
  2553     mparams.mmap_threshold = val;
  2577     case M_MMAP_THRESHOLD:
  2554     return 1;
  2578         mparams.mmap_threshold = val;
  2555   default:
  2579         return 1;
  2556     return 0;
  2580     default:
  2557   }
  2581         return 0;
       
  2582     }
  2558 }
  2583 }
  2559 
  2584 
  2560 #if DEBUG
  2585 #if DEBUG
  2561 /* ------------------------- Debugging Support --------------------------- */
  2586 /* ------------------------- Debugging Support --------------------------- */
  2562 
  2587 
  2563 /* Check properties of any chunk, whether free, inuse, mmapped etc  */
  2588 /* Check properties of any chunk, whether free, inuse, mmapped etc  */
  2564 static void do_check_any_chunk(mstate m, mchunkptr p) {
  2589 static void
  2565   assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
  2590 do_check_any_chunk(mstate m, mchunkptr p)
  2566   assert(ok_address(m, p));
  2591 {
       
  2592     assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
       
  2593     assert(ok_address(m, p));
  2567 }
  2594 }
  2568 
  2595 
  2569 /* Check properties of top chunk */
  2596 /* Check properties of top chunk */
  2570 static void do_check_top_chunk(mstate m, mchunkptr p) {
  2597 static void
  2571   msegmentptr sp = segment_holding(m, (char*)p);
  2598 do_check_top_chunk(mstate m, mchunkptr p)
  2572   size_t  sz = chunksize(p);
  2599 {
  2573   assert(sp != 0);
  2600     msegmentptr sp = segment_holding(m, (char *) p);
  2574   assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
  2601     size_t sz = chunksize(p);
  2575   assert(ok_address(m, p));
  2602     assert(sp != 0);
  2576   assert(sz == m->topsize);
  2603     assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
  2577   assert(sz > 0);
  2604     assert(ok_address(m, p));
  2578   assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
  2605     assert(sz == m->topsize);
  2579   assert(pinuse(p));
  2606     assert(sz > 0);
  2580   assert(!next_pinuse(p));
  2607     assert(sz == ((sp->base + sp->size) - (char *) p) - TOP_FOOT_SIZE);
       
  2608     assert(pinuse(p));
       
  2609     assert(!next_pinuse(p));
  2581 }
  2610 }
  2582 
  2611 
  2583 /* Check properties of (inuse) mmapped chunks */
  2612 /* Check properties of (inuse) mmapped chunks */
  2584 static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
  2613 static void
  2585   size_t  sz = chunksize(p);
  2614 do_check_mmapped_chunk(mstate m, mchunkptr p)
  2586   size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD);
  2615 {
  2587   assert(is_mmapped(p));
  2616     size_t sz = chunksize(p);
  2588   assert(use_mmap(m));
  2617     size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD);
  2589   assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
  2618     assert(is_mmapped(p));
  2590   assert(ok_address(m, p));
  2619     assert(use_mmap(m));
  2591   assert(!is_small(sz));
  2620     assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
  2592   assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);
  2621     assert(ok_address(m, p));
  2593   assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
  2622     assert(!is_small(sz));
  2594   assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);
  2623     assert((len & (mparams.page_size - SIZE_T_ONE)) == 0);
       
  2624     assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
       
  2625     assert(chunk_plus_offset(p, sz + SIZE_T_SIZE)->head == 0);
  2595 }
  2626 }
  2596 
  2627 
  2597 /* Check properties of inuse chunks */
  2628 /* Check properties of inuse chunks */
  2598 static void do_check_inuse_chunk(mstate m, mchunkptr p) {
  2629 static void
  2599   do_check_any_chunk(m, p);
  2630 do_check_inuse_chunk(mstate m, mchunkptr p)
  2600   assert(cinuse(p));
  2631 {
  2601   assert(next_pinuse(p));
  2632     do_check_any_chunk(m, p);
  2602   /* If not pinuse and not mmapped, previous chunk has OK offset */
  2633     assert(cinuse(p));
  2603   assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
  2634     assert(next_pinuse(p));
  2604   if (is_mmapped(p))
  2635     /* If not pinuse and not mmapped, previous chunk has OK offset */
  2605     do_check_mmapped_chunk(m, p);
  2636     assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
       
  2637     if (is_mmapped(p))
       
  2638         do_check_mmapped_chunk(m, p);
  2606 }
  2639 }
  2607 
  2640 
  2608 /* Check properties of free chunks */
  2641 /* Check properties of free chunks */
  2609 static void do_check_free_chunk(mstate m, mchunkptr p) {
  2642 static void
  2610   size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
  2643 do_check_free_chunk(mstate m, mchunkptr p)
  2611   mchunkptr next = chunk_plus_offset(p, sz);
  2644 {
  2612   do_check_any_chunk(m, p);
  2645     size_t sz = p->head & ~(PINUSE_BIT | CINUSE_BIT);
  2613   assert(!cinuse(p));
  2646     mchunkptr next = chunk_plus_offset(p, sz);
  2614   assert(!next_pinuse(p));
  2647     do_check_any_chunk(m, p);
  2615   assert (!is_mmapped(p));
  2648     assert(!cinuse(p));
  2616   if (p != m->dv && p != m->top) {
  2649     assert(!next_pinuse(p));
  2617     if (sz >= MIN_CHUNK_SIZE) {
  2650     assert(!is_mmapped(p));
  2618       assert((sz & CHUNK_ALIGN_MASK) == 0);
  2651     if (p != m->dv && p != m->top) {
  2619       assert(is_aligned(chunk2mem(p)));
  2652         if (sz >= MIN_CHUNK_SIZE) {
  2620       assert(next->prev_foot == sz);
  2653             assert((sz & CHUNK_ALIGN_MASK) == 0);
  2621       assert(pinuse(p));
  2654             assert(is_aligned(chunk2mem(p)));
  2622       assert (next == m->top || cinuse(next));
  2655             assert(next->prev_foot == sz);
  2623       assert(p->fd->bk == p);
  2656             assert(pinuse(p));
  2624       assert(p->bk->fd == p);
  2657             assert(next == m->top || cinuse(next));
  2625     }
  2658             assert(p->fd->bk == p);
  2626     else  /* markers are always of size SIZE_T_SIZE */
  2659             assert(p->bk->fd == p);
  2627       assert(sz == SIZE_T_SIZE);
  2660         } else                  /* markers are always of size SIZE_T_SIZE */
  2628   }
  2661             assert(sz == SIZE_T_SIZE);
       
  2662     }
  2629 }
  2663 }
  2630 
  2664 
  2631 /* Check properties of malloced chunks at the point they are malloced */
  2665 /* Check properties of malloced chunks at the point they are malloced */
  2632 static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
  2666 static void
  2633   if (mem != 0) {
  2667 do_check_malloced_chunk(mstate m, void *mem, size_t s)
  2634     mchunkptr p = mem2chunk(mem);
  2668 {
  2635     size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
  2669     if (mem != 0) {
  2636     do_check_inuse_chunk(m, p);
  2670         mchunkptr p = mem2chunk(mem);
  2637     assert((sz & CHUNK_ALIGN_MASK) == 0);
  2671         size_t sz = p->head & ~(PINUSE_BIT | CINUSE_BIT);
  2638     assert(sz >= MIN_CHUNK_SIZE);
  2672         do_check_inuse_chunk(m, p);
  2639     assert(sz >= s);
  2673         assert((sz & CHUNK_ALIGN_MASK) == 0);
  2640     /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
  2674         assert(sz >= MIN_CHUNK_SIZE);
  2641     assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
  2675         assert(sz >= s);
  2642   }
  2676         /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
       
  2677         assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
       
  2678     }
  2643 }
  2679 }
  2644 
  2680 
  2645 /* Check a tree and its subtrees.  */
  2681 /* Check a tree and its subtrees.  */
  2646 static void do_check_tree(mstate m, tchunkptr t) {
  2682 static void
  2647   tchunkptr head = 0;
  2683 do_check_tree(mstate m, tchunkptr t)
  2648   tchunkptr u = t;
  2684 {
  2649   bindex_t tindex = t->index;
  2685     tchunkptr head = 0;
  2650   size_t tsize = chunksize(t);
  2686     tchunkptr u = t;
  2651   bindex_t idx;
  2687     bindex_t tindex = t->index;
  2652   compute_tree_index(tsize, idx);
  2688     size_t tsize = chunksize(t);
  2653   assert(tindex == idx);
  2689     bindex_t idx;
  2654   assert(tsize >= MIN_LARGE_SIZE);
  2690     compute_tree_index(tsize, idx);
  2655   assert(tsize >= minsize_for_tree_index(idx));
  2691     assert(tindex == idx);
  2656   assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
  2692     assert(tsize >= MIN_LARGE_SIZE);
  2657 
  2693     assert(tsize >= minsize_for_tree_index(idx));
  2658   do { /* traverse through chain of same-sized nodes */
  2694     assert((idx == NTREEBINS - 1)
  2659     do_check_any_chunk(m, ((mchunkptr)u));
  2695            || (tsize < minsize_for_tree_index((idx + 1))));
  2660     assert(u->index == tindex);
  2696 
  2661     assert(chunksize(u) == tsize);
  2697     do {                        /* traverse through chain of same-sized nodes */
  2662     assert(!cinuse(u));
  2698         do_check_any_chunk(m, ((mchunkptr) u));
  2663     assert(!next_pinuse(u));
  2699         assert(u->index == tindex);
  2664     assert(u->fd->bk == u);
  2700         assert(chunksize(u) == tsize);
  2665     assert(u->bk->fd == u);
  2701         assert(!cinuse(u));
  2666     if (u->parent == 0) {
  2702         assert(!next_pinuse(u));
  2667       assert(u->child[0] == 0);
  2703         assert(u->fd->bk == u);
  2668       assert(u->child[1] == 0);
  2704         assert(u->bk->fd == u);
  2669     }
  2705         if (u->parent == 0) {
  2670     else {
  2706             assert(u->child[0] == 0);
  2671       assert(head == 0); /* only one node on chain has parent */
  2707             assert(u->child[1] == 0);
  2672       head = u;
  2708         } else {
  2673       assert(u->parent != u);
  2709             assert(head == 0);  /* only one node on chain has parent */
  2674       assert (u->parent->child[0] == u ||
  2710             head = u;
  2675               u->parent->child[1] == u ||
  2711             assert(u->parent != u);
  2676               *((tbinptr*)(u->parent)) == u);
  2712             assert(u->parent->child[0] == u ||
  2677       if (u->child[0] != 0) {
  2713                    u->parent->child[1] == u ||
  2678         assert(u->child[0]->parent == u);
  2714                    *((tbinptr *) (u->parent)) == u);
  2679         assert(u->child[0] != u);
  2715             if (u->child[0] != 0) {
  2680         do_check_tree(m, u->child[0]);
  2716                 assert(u->child[0]->parent == u);
  2681       }
  2717                 assert(u->child[0] != u);
  2682       if (u->child[1] != 0) {
  2718                 do_check_tree(m, u->child[0]);
  2683         assert(u->child[1]->parent == u);
  2719             }
  2684         assert(u->child[1] != u);
  2720             if (u->child[1] != 0) {
  2685         do_check_tree(m, u->child[1]);
  2721                 assert(u->child[1]->parent == u);
  2686       }
  2722                 assert(u->child[1] != u);
  2687       if (u->child[0] != 0 && u->child[1] != 0) {
  2723                 do_check_tree(m, u->child[1]);
  2688         assert(chunksize(u->child[0]) < chunksize(u->child[1]));
  2724             }
  2689       }
  2725             if (u->child[0] != 0 && u->child[1] != 0) {
  2690     }
  2726                 assert(chunksize(u->child[0]) < chunksize(u->child[1]));
  2691     u = u->fd;
  2727             }
  2692   } while (u != t);
  2728         }
  2693   assert(head != 0);
  2729         u = u->fd;
       
  2730     }
       
  2731     while (u != t);
       
  2732     assert(head != 0);
  2694 }
  2733 }
  2695 
  2734 
  2696 /*  Check all the chunks in a treebin.  */
  2735 /*  Check all the chunks in a treebin.  */
  2697 static void do_check_treebin(mstate m, bindex_t i) {
  2736 static void
  2698   tbinptr* tb = treebin_at(m, i);
  2737 do_check_treebin(mstate m, bindex_t i)
  2699   tchunkptr t = *tb;
  2738 {
  2700   int empty = (m->treemap & (1U << i)) == 0;
  2739     tbinptr *tb = treebin_at(m, i);
  2701   if (t == 0)
  2740     tchunkptr t = *tb;
  2702     assert(empty);
  2741     int empty = (m->treemap & (1U << i)) == 0;
  2703   if (!empty)
  2742     if (t == 0)
  2704     do_check_tree(m, t);
  2743         assert(empty);
       
  2744     if (!empty)
       
  2745         do_check_tree(m, t);
  2705 }
  2746 }
  2706 
  2747 
  2707 /*  Check all the chunks in a smallbin.  */
  2748 /*  Check all the chunks in a smallbin.  */
  2708 static void do_check_smallbin(mstate m, bindex_t i) {
  2749 static void
  2709   sbinptr b = smallbin_at(m, i);
  2750 do_check_smallbin(mstate m, bindex_t i)
  2710   mchunkptr p = b->bk;
  2751 {
  2711   unsigned int empty = (m->smallmap & (1U << i)) == 0;
  2752     sbinptr b = smallbin_at(m, i);
  2712   if (p == b)
  2753     mchunkptr p = b->bk;
  2713     assert(empty);
  2754     unsigned int empty = (m->smallmap & (1U << i)) == 0;
  2714   if (!empty) {
  2755     if (p == b)
  2715     for (; p != b; p = p->bk) {
  2756         assert(empty);
  2716       size_t size = chunksize(p);
  2757     if (!empty) {
  2717       mchunkptr q;
  2758         for (; p != b; p = p->bk) {
  2718       /* each chunk claims to be free */
  2759             size_t size = chunksize(p);
  2719       do_check_free_chunk(m, p);
  2760             mchunkptr q;
  2720       /* chunk belongs in bin */
  2761             /* each chunk claims to be free */
  2721       assert(small_index(size) == i);
  2762             do_check_free_chunk(m, p);
  2722       assert(p->bk == b || chunksize(p->bk) == chunksize(p));
  2763             /* chunk belongs in bin */
  2723       /* chunk is followed by an inuse chunk */
  2764             assert(small_index(size) == i);
  2724       q = next_chunk(p);
  2765             assert(p->bk == b || chunksize(p->bk) == chunksize(p));
  2725       if (q->head != FENCEPOST_HEAD)
  2766             /* chunk is followed by an inuse chunk */
  2726         do_check_inuse_chunk(m, q);
  2767             q = next_chunk(p);
  2727     }
  2768             if (q->head != FENCEPOST_HEAD)
  2728   }
  2769                 do_check_inuse_chunk(m, q);
       
  2770         }
       
  2771     }
  2729 }
  2772 }
  2730 
  2773 
  2731 /* Find x in a bin. Used in other check functions. */
  2774 /* Find x in a bin. Used in other check functions. */
  2732 static int bin_find(mstate m, mchunkptr x) {
  2775 static int
  2733   size_t size = chunksize(x);
  2776 bin_find(mstate m, mchunkptr x)
  2734   if (is_small(size)) {
  2777 {
  2735     bindex_t sidx = small_index(size);
  2778     size_t size = chunksize(x);
  2736     sbinptr b = smallbin_at(m, sidx);
  2779     if (is_small(size)) {
  2737     if (smallmap_is_marked(m, sidx)) {
  2780         bindex_t sidx = small_index(size);
  2738       mchunkptr p = b;
  2781         sbinptr b = smallbin_at(m, sidx);
  2739       do {
  2782         if (smallmap_is_marked(m, sidx)) {
  2740         if (p == x)
  2783             mchunkptr p = b;
  2741           return 1;
  2784             do {
  2742       } while ((p = p->fd) != b);
  2785                 if (p == x)
  2743     }
  2786                     return 1;
  2744   }
  2787             }
  2745   else {
  2788             while ((p = p->fd) != b);
  2746     bindex_t tidx;
  2789         }
  2747     compute_tree_index(size, tidx);
  2790     } else {
  2748     if (treemap_is_marked(m, tidx)) {
  2791         bindex_t tidx;
  2749       tchunkptr t = *treebin_at(m, tidx);
  2792         compute_tree_index(size, tidx);
  2750       size_t sizebits = size << leftshift_for_tree_index(tidx);
  2793         if (treemap_is_marked(m, tidx)) {
  2751       while (t != 0 && chunksize(t) != size) {
  2794             tchunkptr t = *treebin_at(m, tidx);
  2752         t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
  2795             size_t sizebits = size << leftshift_for_tree_index(tidx);
  2753         sizebits <<= 1;
  2796             while (t != 0 && chunksize(t) != size) {
  2754       }
  2797                 t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1];
  2755       if (t != 0) {
  2798                 sizebits <<= 1;
  2756         tchunkptr u = t;
  2799             }
  2757         do {
  2800             if (t != 0) {
  2758           if (u == (tchunkptr)x)
  2801                 tchunkptr u = t;
  2759             return 1;
  2802                 do {
  2760         } while ((u = u->fd) != t);
  2803                     if (u == (tchunkptr) x)
  2761       }
  2804                         return 1;
  2762     }
  2805                 }
  2763   }
  2806                 while ((u = u->fd) != t);
  2764   return 0;
  2807             }
       
  2808         }
       
  2809     }
       
  2810     return 0;
  2765 }
  2811 }
  2766 
  2812 
  2767 /* Traverse each chunk and check it; return total */
  2813 /* Traverse each chunk and check it; return total */
  2768 static size_t traverse_and_check(mstate m) {
  2814 static size_t
  2769   size_t sum = 0;
  2815 traverse_and_check(mstate m)
  2770   if (is_initialized(m)) {
  2816 {
  2771     msegmentptr s = &m->seg;
  2817     size_t sum = 0;
  2772     sum += m->topsize + TOP_FOOT_SIZE;
  2818     if (is_initialized(m)) {
  2773     while (s != 0) {
  2819         msegmentptr s = &m->seg;
  2774       mchunkptr q = align_as_chunk(s->base);
  2820         sum += m->topsize + TOP_FOOT_SIZE;
  2775       mchunkptr lastq = 0;
  2821         while (s != 0) {
  2776       assert(pinuse(q));
  2822             mchunkptr q = align_as_chunk(s->base);
  2777       while (segment_holds(s, q) &&
  2823             mchunkptr lastq = 0;
  2778              q != m->top && q->head != FENCEPOST_HEAD) {
  2824             assert(pinuse(q));
  2779         sum += chunksize(q);
  2825             while (segment_holds(s, q) &&
  2780         if (cinuse(q)) {
  2826                    q != m->top && q->head != FENCEPOST_HEAD) {
  2781           assert(!bin_find(m, q));
  2827                 sum += chunksize(q);
  2782           do_check_inuse_chunk(m, q);
  2828                 if (cinuse(q)) {
       
  2829                     assert(!bin_find(m, q));
       
  2830                     do_check_inuse_chunk(m, q);
       
  2831                 } else {
       
  2832                     assert(q == m->dv || bin_find(m, q));
       
  2833                     assert(lastq == 0 || cinuse(lastq));        /* Not 2 consecutive free */
       
  2834                     do_check_free_chunk(m, q);
       
  2835                 }
       
  2836                 lastq = q;
       
  2837                 q = next_chunk(q);
       
  2838             }
       
  2839             s = s->next;
  2783         }
  2840         }
  2784         else {
  2841     }
  2785           assert(q == m->dv || bin_find(m, q));
  2842     return sum;
  2786           assert(lastq == 0 || cinuse(lastq)); /* Not 2 consecutive free */
  2843 }
  2787           do_check_free_chunk(m, q);
  2844 
       
  2845 /* Check all properties of malloc_state. */
       
  2846 static void
       
  2847 do_check_malloc_state(mstate m)
       
  2848 {
       
  2849     bindex_t i;
       
  2850     size_t total;
       
  2851     /* check bins */
       
  2852     for (i = 0; i < NSMALLBINS; ++i)
       
  2853         do_check_smallbin(m, i);
       
  2854     for (i = 0; i < NTREEBINS; ++i)
       
  2855         do_check_treebin(m, i);
       
  2856 
       
  2857     if (m->dvsize != 0) {       /* check dv chunk */
       
  2858         do_check_any_chunk(m, m->dv);
       
  2859         assert(m->dvsize == chunksize(m->dv));
       
  2860         assert(m->dvsize >= MIN_CHUNK_SIZE);
       
  2861         assert(bin_find(m, m->dv) == 0);
       
  2862     }
       
  2863 
       
  2864     if (m->top != 0) {          /* check top chunk */
       
  2865         do_check_top_chunk(m, m->top);
       
  2866         assert(m->topsize == chunksize(m->top));
       
  2867         assert(m->topsize > 0);
       
  2868         assert(bin_find(m, m->top) == 0);
       
  2869     }
       
  2870 
       
  2871     total = traverse_and_check(m);
       
  2872     assert(total <= m->footprint);
       
  2873     assert(m->footprint <= m->max_footprint);
       
  2874 }
       
  2875 #endif /* DEBUG */
       
  2876 
       
  2877 /* ----------------------------- statistics ------------------------------ */
       
  2878 
       
  2879 #if !NO_MALLINFO
       
  2880 static struct mallinfo
       
  2881 internal_mallinfo(mstate m)
       
  2882 {
       
  2883     struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
       
  2884     if (!PREACTION(m)) {
       
  2885         check_malloc_state(m);
       
  2886         if (is_initialized(m)) {
       
  2887             size_t nfree = SIZE_T_ONE;  /* top always free */
       
  2888             size_t mfree = m->topsize + TOP_FOOT_SIZE;
       
  2889             size_t sum = mfree;
       
  2890             msegmentptr s = &m->seg;
       
  2891             while (s != 0) {
       
  2892                 mchunkptr q = align_as_chunk(s->base);
       
  2893                 while (segment_holds(s, q) &&
       
  2894                        q != m->top && q->head != FENCEPOST_HEAD) {
       
  2895                     size_t sz = chunksize(q);
       
  2896                     sum += sz;
       
  2897                     if (!cinuse(q)) {
       
  2898                         mfree += sz;
       
  2899                         ++nfree;
       
  2900                     }
       
  2901                     q = next_chunk(q);
       
  2902                 }
       
  2903                 s = s->next;
       
  2904             }
       
  2905 
       
  2906             nm.arena = sum;
       
  2907             nm.ordblks = nfree;
       
  2908             nm.hblkhd = m->footprint - sum;
       
  2909             nm.usmblks = m->max_footprint;
       
  2910             nm.uordblks = m->footprint - mfree;
       
  2911             nm.fordblks = mfree;
       
  2912             nm.keepcost = m->topsize;
  2788         }
  2913         }
  2789         lastq = q;
  2914 
  2790         q = next_chunk(q);
  2915         POSTACTION(m);
  2791       }
  2916     }
  2792       s = s->next;
  2917     return nm;
  2793     }
  2918 }
  2794   }
  2919 #endif /* !NO_MALLINFO */
  2795   return sum;
  2920 
  2796 }
  2921 static void
  2797 
  2922 internal_malloc_stats(mstate m)
  2798 /* Check all properties of malloc_state. */
  2923 {
  2799 static void do_check_malloc_state(mstate m) {
  2924     if (!PREACTION(m)) {
  2800   bindex_t i;
  2925         size_t maxfp = 0;
  2801   size_t total;
  2926         size_t fp = 0;
  2802   /* check bins */
  2927         size_t used = 0;
  2803   for (i = 0; i < NSMALLBINS; ++i)
  2928         check_malloc_state(m);
  2804     do_check_smallbin(m, i);
  2929         if (is_initialized(m)) {
  2805   for (i = 0; i < NTREEBINS; ++i)
  2930             msegmentptr s = &m->seg;
  2806     do_check_treebin(m, i);
  2931             maxfp = m->max_footprint;
  2807 
  2932             fp = m->footprint;
  2808   if (m->dvsize != 0) { /* check dv chunk */
  2933             used = fp - (m->topsize + TOP_FOOT_SIZE);
  2809     do_check_any_chunk(m, m->dv);
  2934 
  2810     assert(m->dvsize == chunksize(m->dv));
  2935             while (s != 0) {
  2811     assert(m->dvsize >= MIN_CHUNK_SIZE);
  2936                 mchunkptr q = align_as_chunk(s->base);
  2812     assert(bin_find(m, m->dv) == 0);
  2937                 while (segment_holds(s, q) &&
  2813   }
  2938                        q != m->top && q->head != FENCEPOST_HEAD) {
  2814 
  2939                     if (!cinuse(q))
  2815   if (m->top != 0) {   /* check top chunk */
  2940                         used -= chunksize(q);
  2816     do_check_top_chunk(m, m->top);
  2941                     q = next_chunk(q);
  2817     assert(m->topsize == chunksize(m->top));
  2942                 }
  2818     assert(m->topsize > 0);
  2943                 s = s->next;
  2819     assert(bin_find(m, m->top) == 0);
  2944             }
  2820   }
       
  2821 
       
  2822   total = traverse_and_check(m);
       
  2823   assert(total <= m->footprint);
       
  2824   assert(m->footprint <= m->max_footprint);
       
  2825 }
       
  2826 #endif /* DEBUG */
       
  2827 
       
  2828 /* ----------------------------- statistics ------------------------------ */
       
  2829 
       
  2830 #if !NO_MALLINFO
       
  2831 static struct mallinfo internal_mallinfo(mstate m) {
       
  2832   struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
       
  2833   if (!PREACTION(m)) {
       
  2834     check_malloc_state(m);
       
  2835     if (is_initialized(m)) {
       
  2836       size_t nfree = SIZE_T_ONE; /* top always free */
       
  2837       size_t mfree = m->topsize + TOP_FOOT_SIZE;
       
  2838       size_t sum = mfree;
       
  2839       msegmentptr s = &m->seg;
       
  2840       while (s != 0) {
       
  2841         mchunkptr q = align_as_chunk(s->base);
       
  2842         while (segment_holds(s, q) &&
       
  2843                q != m->top && q->head != FENCEPOST_HEAD) {
       
  2844           size_t sz = chunksize(q);
       
  2845           sum += sz;
       
  2846           if (!cinuse(q)) {
       
  2847             mfree += sz;
       
  2848             ++nfree;
       
  2849           }
       
  2850           q = next_chunk(q);
       
  2851         }
  2945         }
  2852         s = s->next;
       
  2853       }
       
  2854 
       
  2855       nm.arena    = sum;
       
  2856       nm.ordblks  = nfree;
       
  2857       nm.hblkhd   = m->footprint - sum;
       
  2858       nm.usmblks  = m->max_footprint;
       
  2859       nm.uordblks = m->footprint - mfree;
       
  2860       nm.fordblks = mfree;
       
  2861       nm.keepcost = m->topsize;
       
  2862     }
       
  2863 
       
  2864     POSTACTION(m);
       
  2865   }
       
  2866   return nm;
       
  2867 }
       
  2868 #endif /* !NO_MALLINFO */
       
  2869 
       
  2870 static void internal_malloc_stats(mstate m) {
       
  2871   if (!PREACTION(m)) {
       
  2872     size_t maxfp = 0;
       
  2873     size_t fp = 0;
       
  2874     size_t used = 0;
       
  2875     check_malloc_state(m);
       
  2876     if (is_initialized(m)) {
       
  2877       msegmentptr s = &m->seg;
       
  2878       maxfp = m->max_footprint;
       
  2879       fp = m->footprint;
       
  2880       used = fp - (m->topsize + TOP_FOOT_SIZE);
       
  2881 
       
  2882       while (s != 0) {
       
  2883         mchunkptr q = align_as_chunk(s->base);
       
  2884         while (segment_holds(s, q) &&
       
  2885                q != m->top && q->head != FENCEPOST_HEAD) {
       
  2886           if (!cinuse(q))
       
  2887             used -= chunksize(q);
       
  2888           q = next_chunk(q);
       
  2889         }
       
  2890         s = s->next;
       
  2891       }
       
  2892     }
       
  2893 
       
  2894 #ifndef LACKS_STDIO_H
  2946 #ifndef LACKS_STDIO_H
  2895     fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp));
  2947         fprintf(stderr, "max system bytes = %10lu\n",
  2896     fprintf(stderr, "system bytes     = %10lu\n", (unsigned long)(fp));
  2948                 (unsigned long) (maxfp));
  2897     fprintf(stderr, "in use bytes     = %10lu\n", (unsigned long)(used));
  2949         fprintf(stderr, "system bytes     = %10lu\n", (unsigned long) (fp));
       
  2950         fprintf(stderr, "in use bytes     = %10lu\n", (unsigned long) (used));
  2898 #endif
  2951 #endif
  2899 
  2952 
  2900     POSTACTION(m);
  2953         POSTACTION(m);
  2901   }
  2954     }
  2902 }
  2955 }
  2903 
  2956 
  2904 /* ----------------------- Operations on smallbins ----------------------- */
  2957 /* ----------------------- Operations on smallbins ----------------------- */
  2905 
  2958 
  2906 /*
  2959 /*
  3160   allocated to hold a fake next chunk of size SIZE_T_SIZE to maintain
  3213   allocated to hold a fake next chunk of size SIZE_T_SIZE to maintain
  3161   the PINUSE bit so frees can be checked.
  3214   the PINUSE bit so frees can be checked.
  3162 */
  3215 */
  3163 
  3216 
  3164 /* Malloc using mmap */
  3217 /* Malloc using mmap */
  3165 static void* mmap_alloc(mstate m, size_t nb) {
  3218 static void *
  3166   size_t mmsize = granularity_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
  3219 mmap_alloc(mstate m, size_t nb)
  3167   if (mmsize > nb) {     /* Check for wrap around 0 */
  3220 {
  3168     char* mm = (char*)(DIRECT_MMAP(mmsize));
  3221     size_t mmsize =
  3169     if (mm != CMFAIL) {
  3222         granularity_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
  3170       size_t offset = align_offset(chunk2mem(mm));
  3223     if (mmsize > nb) {          /* Check for wrap around 0 */
  3171       size_t psize = mmsize - offset - MMAP_FOOT_PAD;
  3224         char *mm = (char *) (DIRECT_MMAP(mmsize));
  3172       mchunkptr p = (mchunkptr)(mm + offset);
  3225         if (mm != CMFAIL) {
  3173       p->prev_foot = offset | IS_MMAPPED_BIT;
  3226             size_t offset = align_offset(chunk2mem(mm));
  3174       (p)->head = (psize|CINUSE_BIT);
  3227             size_t psize = mmsize - offset - MMAP_FOOT_PAD;
  3175       mark_inuse_foot(m, p, psize);
  3228             mchunkptr p = (mchunkptr) (mm + offset);
  3176       chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
  3229             p->prev_foot = offset | IS_MMAPPED_BIT;
  3177       chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
  3230             (p)->head = (psize | CINUSE_BIT);
  3178 
  3231             mark_inuse_foot(m, p, psize);
  3179       if (mm < m->least_addr)
  3232             chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
  3180         m->least_addr = mm;
  3233             chunk_plus_offset(p, psize + SIZE_T_SIZE)->head = 0;
  3181       if ((m->footprint += mmsize) > m->max_footprint)
  3234 
  3182         m->max_footprint = m->footprint;
  3235             if (mm < m->least_addr)
  3183       assert(is_aligned(chunk2mem(p)));
  3236                 m->least_addr = mm;
  3184       check_mmapped_chunk(m, p);
  3237             if ((m->footprint += mmsize) > m->max_footprint)
  3185       return chunk2mem(p);
  3238                 m->max_footprint = m->footprint;
  3186     }
  3239             assert(is_aligned(chunk2mem(p)));
  3187   }
  3240             check_mmapped_chunk(m, p);
  3188   return 0;
  3241             return chunk2mem(p);
       
  3242         }
       
  3243     }
       
  3244     return 0;
  3189 }
  3245 }
  3190 
  3246 
  3191 /* Realloc using mmap */
  3247 /* Realloc using mmap */
  3192 static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb) {
  3248 static mchunkptr
  3193   size_t oldsize = chunksize(oldp);
  3249 mmap_resize(mstate m, mchunkptr oldp, size_t nb)
  3194   if (is_small(nb)) /* Can't shrink mmap regions below small size */
  3250 {
       
  3251     size_t oldsize = chunksize(oldp);
       
  3252     if (is_small(nb))           /* Can't shrink mmap regions below small size */
       
  3253         return 0;
       
  3254     /* Keep old chunk if big enough but not too big */
       
  3255     if (oldsize >= nb + SIZE_T_SIZE &&
       
  3256         (oldsize - nb) <= (mparams.granularity << 1))
       
  3257         return oldp;
       
  3258     else {
       
  3259         size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT;
       
  3260         size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
       
  3261         size_t newmmsize = granularity_align(nb + SIX_SIZE_T_SIZES +
       
  3262                                              CHUNK_ALIGN_MASK);
       
  3263         char *cp = (char *) CALL_MREMAP((char *) oldp - offset,
       
  3264                                         oldmmsize, newmmsize, 1);
       
  3265         if (cp != CMFAIL) {
       
  3266             mchunkptr newp = (mchunkptr) (cp + offset);
       
  3267             size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
       
  3268             newp->head = (psize | CINUSE_BIT);
       
  3269             mark_inuse_foot(m, newp, psize);
       
  3270             chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
       
  3271             chunk_plus_offset(newp, psize + SIZE_T_SIZE)->head = 0;
       
  3272 
       
  3273             if (cp < m->least_addr)
       
  3274                 m->least_addr = cp;
       
  3275             if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
       
  3276                 m->max_footprint = m->footprint;
       
  3277             check_mmapped_chunk(m, newp);
       
  3278             return newp;
       
  3279         }
       
  3280     }
  3195     return 0;
  3281     return 0;
  3196   /* Keep old chunk if big enough but not too big */
       
  3197   if (oldsize >= nb + SIZE_T_SIZE &&
       
  3198       (oldsize - nb) <= (mparams.granularity << 1))
       
  3199     return oldp;
       
  3200   else {
       
  3201     size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT;
       
  3202     size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
       
  3203     size_t newmmsize = granularity_align(nb + SIX_SIZE_T_SIZES +
       
  3204                                          CHUNK_ALIGN_MASK);
       
  3205     char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
       
  3206                                   oldmmsize, newmmsize, 1);
       
  3207     if (cp != CMFAIL) {
       
  3208       mchunkptr newp = (mchunkptr)(cp + offset);
       
  3209       size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
       
  3210       newp->head = (psize|CINUSE_BIT);
       
  3211       mark_inuse_foot(m, newp, psize);
       
  3212       chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
       
  3213       chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
       
  3214 
       
  3215       if (cp < m->least_addr)
       
  3216         m->least_addr = cp;
       
  3217       if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
       
  3218         m->max_footprint = m->footprint;
       
  3219       check_mmapped_chunk(m, newp);
       
  3220       return newp;
       
  3221     }
       
  3222   }
       
  3223   return 0;
       
  3224 }
  3282 }
  3225 
  3283 
  3226 /* -------------------------- mspace management -------------------------- */
  3284 /* -------------------------- mspace management -------------------------- */
  3227 
  3285 
  3228 /* Initialize top chunk and its size */
  3286 /* Initialize top chunk and its size */
  3229 static void init_top(mstate m, mchunkptr p, size_t psize) {
  3287 static void
  3230   /* Ensure alignment */
  3288 init_top(mstate m, mchunkptr p, size_t psize)
  3231   size_t offset = align_offset(chunk2mem(p));
  3289 {
  3232   p = (mchunkptr)((char*)p + offset);
  3290     /* Ensure alignment */
  3233   psize -= offset;
  3291     size_t offset = align_offset(chunk2mem(p));
  3234 
  3292     p = (mchunkptr) ((char *) p + offset);
  3235   m->top = p;
  3293     psize -= offset;
  3236   m->topsize = psize;
  3294 
  3237   p->head = psize | PINUSE_BIT;
  3295     m->top = p;
  3238   /* set size of fake trailing chunk holding overhead space only once */
  3296     m->topsize = psize;
  3239   chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
  3297     p->head = psize | PINUSE_BIT;
  3240   m->trim_check = mparams.trim_threshold; /* reset on each update */
  3298     /* set size of fake trailing chunk holding overhead space only once */
       
  3299     chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
       
  3300     m->trim_check = mparams.trim_threshold;     /* reset on each update */
  3241 }
  3301 }
  3242 
  3302 
  3243 /* Initialize bins for a new mstate that is otherwise zeroed out */
  3303 /* Initialize bins for a new mstate that is otherwise zeroed out */
  3244 static void init_bins(mstate m) {
  3304 static void
  3245   /* Establish circular links for smallbins */
  3305 init_bins(mstate m)
  3246   bindex_t i;
  3306 {
  3247   for (i = 0; i < NSMALLBINS; ++i) {
  3307     /* Establish circular links for smallbins */
  3248     sbinptr bin = smallbin_at(m,i);
  3308     bindex_t i;
  3249     bin->fd = bin->bk = bin;
  3309     for (i = 0; i < NSMALLBINS; ++i) {
  3250   }
  3310         sbinptr bin = smallbin_at(m, i);
       
  3311         bin->fd = bin->bk = bin;
       
  3312     }
  3251 }
  3313 }
  3252 
  3314 
  3253 #if PROCEED_ON_ERROR
  3315 #if PROCEED_ON_ERROR
  3254 
  3316 
  3255 /* default corruption action */
  3317 /* default corruption action */
  3256 static void reset_on_error(mstate m) {
  3318 static void
  3257   int i;
  3319 reset_on_error(mstate m)
  3258   ++malloc_corruption_error_count;
  3320 {
  3259   /* Reinitialize fields to forget about all memory */
  3321     int i;
  3260   m->smallbins = m->treebins = 0;
  3322     ++malloc_corruption_error_count;
  3261   m->dvsize = m->topsize = 0;
  3323     /* Reinitialize fields to forget about all memory */
  3262   m->seg.base = 0;
  3324     m->smallbins = m->treebins = 0;
  3263   m->seg.size = 0;
  3325     m->dvsize = m->topsize = 0;
  3264   m->seg.next = 0;
  3326     m->seg.base = 0;
  3265   m->top = m->dv = 0;
  3327     m->seg.size = 0;
  3266   for (i = 0; i < NTREEBINS; ++i)
  3328     m->seg.next = 0;
  3267     *treebin_at(m, i) = 0;
  3329     m->top = m->dv = 0;
  3268   init_bins(m);
  3330     for (i = 0; i < NTREEBINS; ++i)
       
  3331         *treebin_at(m, i) = 0;
       
  3332     init_bins(m);
  3269 }
  3333 }
  3270 #endif /* PROCEED_ON_ERROR */
  3334 #endif /* PROCEED_ON_ERROR */
  3271 
  3335 
  3272 /* Allocate chunk and prepend remainder with chunk in successor base. */
  3336 /* Allocate chunk and prepend remainder with chunk in successor base. */
  3273 static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
  3337 static void *
  3274                            size_t nb) {
  3338 prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb)
  3275   mchunkptr p = align_as_chunk(newbase);
  3339 {
  3276   mchunkptr oldfirst = align_as_chunk(oldbase);
  3340     mchunkptr p = align_as_chunk(newbase);
  3277   size_t psize = (char*)oldfirst - (char*)p;
  3341     mchunkptr oldfirst = align_as_chunk(oldbase);
  3278   mchunkptr q = chunk_plus_offset(p, nb);
  3342     size_t psize = (char *) oldfirst - (char *) p;
  3279   size_t qsize = psize - nb;
  3343     mchunkptr q = chunk_plus_offset(p, nb);
  3280   set_size_and_pinuse_of_inuse_chunk(m, p, nb);
  3344     size_t qsize = psize - nb;
  3281 
  3345     set_size_and_pinuse_of_inuse_chunk(m, p, nb);
  3282   assert((char*)oldfirst > (char*)q);
  3346 
  3283   assert(pinuse(oldfirst));
  3347     assert((char *) oldfirst > (char *) q);
  3284   assert(qsize >= MIN_CHUNK_SIZE);
  3348     assert(pinuse(oldfirst));
  3285 
  3349     assert(qsize >= MIN_CHUNK_SIZE);
  3286   /* consolidate remainder with first chunk of old base */
  3350 
  3287   if (oldfirst == m->top) {
  3351     /* consolidate remainder with first chunk of old base */
  3288     size_t tsize = m->topsize += qsize;
  3352     if (oldfirst == m->top) {
  3289     m->top = q;
  3353         size_t tsize = m->topsize += qsize;
  3290     q->head = tsize | PINUSE_BIT;
  3354         m->top = q;
  3291     check_top_chunk(m, q);
  3355         q->head = tsize | PINUSE_BIT;
  3292   }
  3356         check_top_chunk(m, q);
  3293   else if (oldfirst == m->dv) {
  3357     } else if (oldfirst == m->dv) {
  3294     size_t dsize = m->dvsize += qsize;
  3358         size_t dsize = m->dvsize += qsize;
  3295     m->dv = q;
  3359         m->dv = q;
  3296     set_size_and_pinuse_of_free_chunk(q, dsize);
  3360         set_size_and_pinuse_of_free_chunk(q, dsize);
  3297   }
  3361     } else {
  3298   else {
  3362         if (!cinuse(oldfirst)) {
  3299     if (!cinuse(oldfirst)) {
  3363             size_t nsize = chunksize(oldfirst);
  3300       size_t nsize = chunksize(oldfirst);
  3364             unlink_chunk(m, oldfirst, nsize);
  3301       unlink_chunk(m, oldfirst, nsize);
  3365             oldfirst = chunk_plus_offset(oldfirst, nsize);
  3302       oldfirst = chunk_plus_offset(oldfirst, nsize);
  3366             qsize += nsize;
  3303       qsize += nsize;
  3367         }
  3304     }
  3368         set_free_with_pinuse(q, qsize, oldfirst);
  3305     set_free_with_pinuse(q, qsize, oldfirst);
  3369         insert_chunk(m, q, qsize);
  3306     insert_chunk(m, q, qsize);
  3370         check_free_chunk(m, q);
  3307     check_free_chunk(m, q);
  3371     }
  3308   }
  3372 
  3309 
  3373     check_malloced_chunk(m, chunk2mem(p), nb);
  3310   check_malloced_chunk(m, chunk2mem(p), nb);
  3374     return chunk2mem(p);
  3311   return chunk2mem(p);
       
  3312 }
  3375 }
  3313 
  3376 
  3314 
  3377 
  3315 /* Add a segment to hold a new noncontiguous region */
  3378 /* Add a segment to hold a new noncontiguous region */
  3316 static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) {
  3379 static void
  3317   /* Determine locations and sizes of segment, fenceposts, old top */
  3380 add_segment(mstate m, char *tbase, size_t tsize, flag_t mmapped)
  3318   char* old_top = (char*)m->top;
  3381 {
  3319   msegmentptr oldsp = segment_holding(m, old_top);
  3382     /* Determine locations and sizes of segment, fenceposts, old top */
  3320   char* old_end = oldsp->base + oldsp->size;
  3383     char *old_top = (char *) m->top;
  3321   size_t ssize = pad_request(sizeof(struct malloc_segment));
  3384     msegmentptr oldsp = segment_holding(m, old_top);
  3322   char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
  3385     char *old_end = oldsp->base + oldsp->size;
  3323   size_t offset = align_offset(chunk2mem(rawsp));
  3386     size_t ssize = pad_request(sizeof(struct malloc_segment));
  3324   char* asp = rawsp + offset;
  3387     char *rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
  3325   char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
  3388     size_t offset = align_offset(chunk2mem(rawsp));
  3326   mchunkptr sp = (mchunkptr)csp;
  3389     char *asp = rawsp + offset;
  3327   msegmentptr ss = (msegmentptr)(chunk2mem(sp));
  3390     char *csp = (asp < (old_top + MIN_CHUNK_SIZE)) ? old_top : asp;
  3328   mchunkptr tnext = chunk_plus_offset(sp, ssize);
  3391     mchunkptr sp = (mchunkptr) csp;
  3329   mchunkptr p = tnext;
  3392     msegmentptr ss = (msegmentptr) (chunk2mem(sp));
  3330   int nfences = 0;
  3393     mchunkptr tnext = chunk_plus_offset(sp, ssize);
  3331 
  3394     mchunkptr p = tnext;
  3332   /* reset top to new space */
  3395     int nfences = 0;
  3333   init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
  3396 
  3334 
  3397     /* reset top to new space */
  3335   /* Set up segment record */
  3398     init_top(m, (mchunkptr) tbase, tsize - TOP_FOOT_SIZE);
  3336   assert(is_aligned(ss));
  3399 
  3337   set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
  3400     /* Set up segment record */
  3338   *ss = m->seg; /* Push current record */
  3401     assert(is_aligned(ss));
  3339   m->seg.base = tbase;
  3402     set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
  3340   m->seg.size = tsize;
  3403     *ss = m->seg;               /* Push current record */
  3341   m->seg.sflags = mmapped;
  3404     m->seg.base = tbase;
  3342   m->seg.next = ss;
  3405     m->seg.size = tsize;
  3343 
  3406     m->seg.sflags = mmapped;
  3344   /* Insert trailing fenceposts */
  3407     m->seg.next = ss;
  3345   for (;;) {
  3408 
  3346     mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
  3409     /* Insert trailing fenceposts */
  3347     p->head = FENCEPOST_HEAD;
  3410     for (;;) {
  3348     ++nfences;
  3411         mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
  3349     if ((char*)(&(nextp->head)) < old_end)
  3412         p->head = FENCEPOST_HEAD;
  3350       p = nextp;
  3413         ++nfences;
  3351     else
  3414         if ((char *) (&(nextp->head)) < old_end)
  3352       break;
  3415             p = nextp;
  3353   }
  3416         else
  3354   assert(nfences >= 2);
  3417             break;
  3355 
  3418     }
  3356   /* Insert the rest of old top into a bin as an ordinary free chunk */
  3419     assert(nfences >= 2);
  3357   if (csp != old_top) {
  3420 
  3358     mchunkptr q = (mchunkptr)old_top;
  3421     /* Insert the rest of old top into a bin as an ordinary free chunk */
  3359     size_t psize = csp - old_top;
  3422     if (csp != old_top) {
  3360     mchunkptr tn = chunk_plus_offset(q, psize);
  3423         mchunkptr q = (mchunkptr) old_top;
  3361     set_free_with_pinuse(q, psize, tn);
  3424         size_t psize = csp - old_top;
  3362     insert_chunk(m, q, psize);
  3425         mchunkptr tn = chunk_plus_offset(q, psize);
  3363   }
  3426         set_free_with_pinuse(q, psize, tn);
  3364 
  3427         insert_chunk(m, q, psize);
  3365   check_top_chunk(m, m->top);
  3428     }
       
  3429 
       
  3430     check_top_chunk(m, m->top);
  3366 }
  3431 }
  3367 
  3432 
  3368 /* -------------------------- System allocation -------------------------- */
  3433 /* -------------------------- System allocation -------------------------- */
  3369 
  3434 
  3370 /* Get memory from system using MORECORE or MMAP */
  3435 /* Get memory from system using MORECORE or MMAP */
  3371 static void* sys_alloc(mstate m, size_t nb) {
  3436 static void *
  3372   char* tbase = CMFAIL;
  3437 sys_alloc(mstate m, size_t nb)
  3373   size_t tsize = 0;
  3438 {
  3374   flag_t mmap_flag = 0;
  3439     char *tbase = CMFAIL;
  3375 
  3440     size_t tsize = 0;
  3376   init_mparams();
  3441     flag_t mmap_flag = 0;
  3377 
  3442 
  3378   /* Directly map large chunks */
  3443     init_mparams();
  3379   if (use_mmap(m) && nb >= mparams.mmap_threshold) {
  3444 
  3380     void* mem = mmap_alloc(m, nb);
  3445     /* Directly map large chunks */
  3381     if (mem != 0)
  3446     if (use_mmap(m) && nb >= mparams.mmap_threshold) {
  3382       return mem;
  3447         void *mem = mmap_alloc(m, nb);
  3383   }
  3448         if (mem != 0)
  3384 
  3449             return mem;
  3385   /*
  3450     }
  3386     Try getting memory in any of three ways (in most-preferred to
  3451 
  3387     least-preferred order):
  3452     /*
  3388     1. A call to MORECORE that can normally contiguously extend memory.
  3453        Try getting memory in any of three ways (in most-preferred to
       
  3454        least-preferred order):
       
  3455        1. A call to MORECORE that can normally contiguously extend memory.
  3389        (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
  3456        (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
  3390        or main space is mmapped or a previous contiguous call failed)
  3457        or main space is mmapped or a previous contiguous call failed)
  3391     2. A call to MMAP new space (disabled if not HAVE_MMAP).
  3458        2. A call to MMAP new space (disabled if not HAVE_MMAP).
  3392        Note that under the default settings, if MORECORE is unable to
  3459        Note that under the default settings, if MORECORE is unable to
  3393        fulfill a request, and HAVE_MMAP is true, then mmap is
  3460        fulfill a request, and HAVE_MMAP is true, then mmap is
  3394        used as a noncontiguous system allocator. This is a useful backup
  3461        used as a noncontiguous system allocator. This is a useful backup
  3395        strategy for systems with holes in address spaces -- in this case
  3462        strategy for systems with holes in address spaces -- in this case
  3396        sbrk cannot contiguously expand the heap, but mmap may be able to
  3463        sbrk cannot contiguously expand the heap, but mmap may be able to
  3397        find space.
  3464        find space.
  3398     3. A call to MORECORE that cannot usually contiguously extend memory.
  3465        3. A call to MORECORE that cannot usually contiguously extend memory.
  3399        (disabled if not HAVE_MORECORE)
  3466        (disabled if not HAVE_MORECORE)
  3400   */
  3467      */
  3401 
  3468 
  3402   if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
  3469     if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
  3403     char* br = CMFAIL;
  3470         char *br = CMFAIL;
  3404     msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);
  3471         msegmentptr ss =
  3405     size_t asize = 0;
  3472             (m->top == 0) ? 0 : segment_holding(m, (char *) m->top);
  3406     ACQUIRE_MORECORE_LOCK();
  3473         size_t asize = 0;
  3407 
  3474         ACQUIRE_MORECORE_LOCK();
  3408     if (ss == 0) {  /* First time through or recovery */
  3475 
  3409       char* base = (char*)CALL_MORECORE(0);
  3476         if (ss == 0) {          /* First time through or recovery */
  3410       if (base != CMFAIL) {
  3477             char *base = (char *) CALL_MORECORE(0);
  3411         asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
  3478             if (base != CMFAIL) {
  3412         /* Adjust to end on a page boundary */
  3479                 asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
  3413         if (!is_page_aligned(base))
  3480                 /* Adjust to end on a page boundary */
  3414           asize += (page_align((size_t)base) - (size_t)base);
  3481                 if (!is_page_aligned(base))
  3415         /* Can't call MORECORE if size is negative when treated as signed */
  3482                     asize += (page_align((size_t) base) - (size_t) base);
  3416         if (asize < HALF_MAX_SIZE_T &&
  3483                 /* Can't call MORECORE if size is negative when treated as signed */
  3417             (br = (char*)(CALL_MORECORE(asize))) == base) {
  3484                 if (asize < HALF_MAX_SIZE_T &&
  3418           tbase = base;
  3485                     (br = (char *) (CALL_MORECORE(asize))) == base) {
  3419           tsize = asize;
  3486                     tbase = base;
       
  3487                     tsize = asize;
       
  3488                 }
       
  3489             }
       
  3490         } else {
       
  3491             /* Subtract out existing available top space from MORECORE request. */
       
  3492             asize =
       
  3493                 granularity_align(nb - m->topsize + TOP_FOOT_SIZE +
       
  3494                                   SIZE_T_ONE);
       
  3495             /* Use mem here only if it did continuously extend old space */
       
  3496             if (asize < HALF_MAX_SIZE_T &&
       
  3497                 (br =
       
  3498                  (char *) (CALL_MORECORE(asize))) == ss->base + ss->size) {
       
  3499                 tbase = br;
       
  3500                 tsize = asize;
       
  3501             }
  3420         }
  3502         }
  3421       }
  3503 
  3422     }
  3504         if (tbase == CMFAIL) {  /* Cope with partial failure */
  3423     else {
  3505             if (br != CMFAIL) { /* Try to use/extend the space we did get */
  3424       /* Subtract out existing available top space from MORECORE request. */
  3506                 if (asize < HALF_MAX_SIZE_T &&
  3425       asize = granularity_align(nb - m->topsize + TOP_FOOT_SIZE + SIZE_T_ONE);
  3507                     asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) {
  3426       /* Use mem here only if it did continuously extend old space */
  3508                     size_t esize =
  3427       if (asize < HALF_MAX_SIZE_T &&
  3509                         granularity_align(nb + TOP_FOOT_SIZE +
  3428           (br = (char*)(CALL_MORECORE(asize))) == ss->base+ss->size) {
  3510                                           SIZE_T_ONE - asize);
  3429         tbase = br;
  3511                     if (esize < HALF_MAX_SIZE_T) {
  3430         tsize = asize;
  3512                         char *end = (char *) CALL_MORECORE(esize);
  3431       }
  3513                         if (end != CMFAIL)
  3432     }
  3514                             asize += esize;
  3433 
  3515                         else {  /* Can't use; try to release */
  3434     if (tbase == CMFAIL) {    /* Cope with partial failure */
  3516                             end = (char *) CALL_MORECORE(-asize);
  3435       if (br != CMFAIL) {    /* Try to use/extend the space we did get */
  3517                             br = CMFAIL;
  3436         if (asize < HALF_MAX_SIZE_T &&
  3518                         }
  3437             asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) {
  3519                     }
  3438           size_t esize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE - asize);
  3520                 }
  3439           if (esize < HALF_MAX_SIZE_T) {
       
  3440             char* end = (char*)CALL_MORECORE(esize);
       
  3441             if (end != CMFAIL)
       
  3442               asize += esize;
       
  3443             else {            /* Can't use; try to release */
       
  3444               end = (char*)CALL_MORECORE(-asize);
       
  3445               br = CMFAIL;
       
  3446             }
  3521             }
  3447           }
  3522             if (br != CMFAIL) { /* Use the space we did get */
       
  3523                 tbase = br;
       
  3524                 tsize = asize;
       
  3525             } else
       
  3526                 disable_contiguous(m);  /* Don't try contiguous path in the future */
  3448         }
  3527         }
  3449       }
  3528 
  3450       if (br != CMFAIL) {    /* Use the space we did get */
  3529         RELEASE_MORECORE_LOCK();
  3451         tbase = br;
  3530     }
  3452         tsize = asize;
  3531 
  3453       }
  3532     if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */
  3454       else
  3533         size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE;
  3455         disable_contiguous(m); /* Don't try contiguous path in the future */
  3534         size_t rsize = granularity_align(req);
  3456     }
  3535         if (rsize > nb) {       /* Fail if wraps around zero */
  3457 
  3536             char *mp = (char *) (CALL_MMAP(rsize));
  3458     RELEASE_MORECORE_LOCK();
  3537             if (mp != CMFAIL) {
  3459   }
  3538                 tbase = mp;
  3460 
  3539                 tsize = rsize;
  3461   if (HAVE_MMAP && tbase == CMFAIL) {  /* Try MMAP */
  3540                 mmap_flag = IS_MMAPPED_BIT;
  3462     size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE;
  3541             }
  3463     size_t rsize = granularity_align(req);
       
  3464     if (rsize > nb) { /* Fail if wraps around zero */
       
  3465       char* mp = (char*)(CALL_MMAP(rsize));
       
  3466       if (mp != CMFAIL) {
       
  3467         tbase = mp;
       
  3468         tsize = rsize;
       
  3469         mmap_flag = IS_MMAPPED_BIT;
       
  3470       }
       
  3471     }
       
  3472   }
       
  3473 
       
  3474   if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
       
  3475     size_t asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
       
  3476     if (asize < HALF_MAX_SIZE_T) {
       
  3477       char* br = CMFAIL;
       
  3478       char* end = CMFAIL;
       
  3479       ACQUIRE_MORECORE_LOCK();
       
  3480       br = (char*)(CALL_MORECORE(asize));
       
  3481       end = (char*)(CALL_MORECORE(0));
       
  3482       RELEASE_MORECORE_LOCK();
       
  3483       if (br != CMFAIL && end != CMFAIL && br < end) {
       
  3484         size_t ssize = end - br;
       
  3485         if (ssize > nb + TOP_FOOT_SIZE) {
       
  3486           tbase = br;
       
  3487           tsize = ssize;
       
  3488         }
  3542         }
  3489       }
  3543     }
  3490     }
  3544 
  3491   }
  3545     if (HAVE_MORECORE && tbase == CMFAIL) {     /* Try noncontiguous MORECORE */
  3492 
  3546         size_t asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
  3493   if (tbase != CMFAIL) {
  3547         if (asize < HALF_MAX_SIZE_T) {
  3494 
  3548             char *br = CMFAIL;
  3495     if ((m->footprint += tsize) > m->max_footprint)
  3549             char *end = CMFAIL;
  3496       m->max_footprint = m->footprint;
  3550             ACQUIRE_MORECORE_LOCK();
  3497 
  3551             br = (char *) (CALL_MORECORE(asize));
  3498     if (!is_initialized(m)) { /* first-time initialization */
  3552             end = (char *) (CALL_MORECORE(0));
  3499       m->seg.base = m->least_addr = tbase;
  3553             RELEASE_MORECORE_LOCK();
  3500       m->seg.size = tsize;
  3554             if (br != CMFAIL && end != CMFAIL && br < end) {
  3501       m->seg.sflags = mmap_flag;
  3555                 size_t ssize = end - br;
  3502       m->magic = mparams.magic;
  3556                 if (ssize > nb + TOP_FOOT_SIZE) {
  3503       init_bins(m);
  3557                     tbase = br;
  3504       if (is_global(m)) 
  3558                     tsize = ssize;
  3505         init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
  3559                 }
  3506       else {
  3560             }
  3507         /* Offset top by embedded malloc_state */
       
  3508         mchunkptr mn = next_chunk(mem2chunk(m));
       
  3509         init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
       
  3510       }
       
  3511     }
       
  3512 
       
  3513     else {
       
  3514       /* Try to merge with an existing segment */
       
  3515       msegmentptr sp = &m->seg;
       
  3516       while (sp != 0 && tbase != sp->base + sp->size)
       
  3517         sp = sp->next;
       
  3518       if (sp != 0 &&
       
  3519           !is_extern_segment(sp) &&
       
  3520           (sp->sflags & IS_MMAPPED_BIT) == mmap_flag &&
       
  3521           segment_holds(sp, m->top)) { /* append */
       
  3522         sp->size += tsize;
       
  3523         init_top(m, m->top, m->topsize + tsize);
       
  3524       }
       
  3525       else {
       
  3526         if (tbase < m->least_addr)
       
  3527           m->least_addr = tbase;
       
  3528         sp = &m->seg;
       
  3529         while (sp != 0 && sp->base != tbase + tsize)
       
  3530           sp = sp->next;
       
  3531         if (sp != 0 &&
       
  3532             !is_extern_segment(sp) &&
       
  3533             (sp->sflags & IS_MMAPPED_BIT) == mmap_flag) {
       
  3534           char* oldbase = sp->base;
       
  3535           sp->base = tbase;
       
  3536           sp->size += tsize;
       
  3537           return prepend_alloc(m, tbase, oldbase, nb);
       
  3538         }
  3561         }
  3539         else
  3562     }
  3540           add_segment(m, tbase, tsize, mmap_flag);
  3563 
  3541       }
  3564     if (tbase != CMFAIL) {
  3542     }
  3565 
  3543 
  3566         if ((m->footprint += tsize) > m->max_footprint)
  3544     if (nb < m->topsize) { /* Allocate from new or extended top space */
  3567             m->max_footprint = m->footprint;
  3545       size_t rsize = m->topsize -= nb;
  3568 
  3546       mchunkptr p = m->top;
  3569         if (!is_initialized(m)) {       /* first-time initialization */
  3547       mchunkptr r = m->top = chunk_plus_offset(p, nb);
  3570             m->seg.base = m->least_addr = tbase;
  3548       r->head = rsize | PINUSE_BIT;
  3571             m->seg.size = tsize;
  3549       set_size_and_pinuse_of_inuse_chunk(m, p, nb);
  3572             m->seg.sflags = mmap_flag;
  3550       check_top_chunk(m, m->top);
  3573             m->magic = mparams.magic;
  3551       check_malloced_chunk(m, chunk2mem(p), nb);
  3574             init_bins(m);
  3552       return chunk2mem(p);
  3575             if (is_global(m))
  3553     }
  3576                 init_top(m, (mchunkptr) tbase, tsize - TOP_FOOT_SIZE);
  3554   }
  3577             else {
  3555 
  3578                 /* Offset top by embedded malloc_state */
  3556   MALLOC_FAILURE_ACTION;
  3579                 mchunkptr mn = next_chunk(mem2chunk(m));
  3557   return 0;
  3580                 init_top(m, mn,
  3558 }
  3581                          (size_t) ((tbase + tsize) - (char *) mn) -
  3559 
  3582                          TOP_FOOT_SIZE);
  3560 /* -----------------------  system deallocation -------------------------- */
  3583             }
  3561 
       
  3562 /* Unmap and unlink any mmapped segments that don't contain used chunks */
       
  3563 static size_t release_unused_segments(mstate m) {
       
  3564   size_t released = 0;
       
  3565   msegmentptr pred = &m->seg;
       
  3566   msegmentptr sp = pred->next;
       
  3567   while (sp != 0) {
       
  3568     char* base = sp->base;
       
  3569     size_t size = sp->size;
       
  3570     msegmentptr next = sp->next;
       
  3571     if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
       
  3572       mchunkptr p = align_as_chunk(base);
       
  3573       size_t psize = chunksize(p);
       
  3574       /* Can unmap if first chunk holds entire segment and not pinned */
       
  3575       if (!cinuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
       
  3576         tchunkptr tp = (tchunkptr)p;
       
  3577         assert(segment_holds(sp, (char*)sp));
       
  3578         if (p == m->dv) {
       
  3579           m->dv = 0;
       
  3580           m->dvsize = 0;
       
  3581         }
  3584         }
       
  3585 
  3582         else {
  3586         else {
  3583           unlink_large_chunk(m, tp);
  3587             /* Try to merge with an existing segment */
       
  3588             msegmentptr sp = &m->seg;
       
  3589             while (sp != 0 && tbase != sp->base + sp->size)
       
  3590                 sp = sp->next;
       
  3591             if (sp != 0 && !is_extern_segment(sp) && (sp->sflags & IS_MMAPPED_BIT) == mmap_flag && segment_holds(sp, m->top)) { /* append */
       
  3592                 sp->size += tsize;
       
  3593                 init_top(m, m->top, m->topsize + tsize);
       
  3594             } else {
       
  3595                 if (tbase < m->least_addr)
       
  3596                     m->least_addr = tbase;
       
  3597                 sp = &m->seg;
       
  3598                 while (sp != 0 && sp->base != tbase + tsize)
       
  3599                     sp = sp->next;
       
  3600                 if (sp != 0 &&
       
  3601                     !is_extern_segment(sp) &&
       
  3602                     (sp->sflags & IS_MMAPPED_BIT) == mmap_flag) {
       
  3603                     char *oldbase = sp->base;
       
  3604                     sp->base = tbase;
       
  3605                     sp->size += tsize;
       
  3606                     return prepend_alloc(m, tbase, oldbase, nb);
       
  3607                 } else
       
  3608                     add_segment(m, tbase, tsize, mmap_flag);
       
  3609             }
  3584         }
  3610         }
  3585         if (CALL_MUNMAP(base, size) == 0) {
  3611 
  3586           released += size;
  3612         if (nb < m->topsize) {  /* Allocate from new or extended top space */
  3587           m->footprint -= size;
  3613             size_t rsize = m->topsize -= nb;
  3588           /* unlink obsoleted record */
  3614             mchunkptr p = m->top;
  3589           sp = pred;
  3615             mchunkptr r = m->top = chunk_plus_offset(p, nb);
  3590           sp->next = next;
  3616             r->head = rsize | PINUSE_BIT;
       
  3617             set_size_and_pinuse_of_inuse_chunk(m, p, nb);
       
  3618             check_top_chunk(m, m->top);
       
  3619             check_malloced_chunk(m, chunk2mem(p), nb);
       
  3620             return chunk2mem(p);
  3591         }
  3621         }
  3592         else { /* back out if cannot unmap */
  3622     }
  3593           insert_large_chunk(m, tp, psize);
  3623 
  3594         }
       
  3595       }
       
  3596     }
       
  3597     pred = sp;
       
  3598     sp = next;
       
  3599   }
       
  3600   return released;
       
  3601 }
       
  3602 
       
  3603 static int sys_trim(mstate m, size_t pad) {
       
  3604   size_t released = 0;
       
  3605   if (pad < MAX_REQUEST && is_initialized(m)) {
       
  3606     pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
       
  3607 
       
  3608     if (m->topsize > pad) {
       
  3609       /* Shrink top space in granularity-size units, keeping at least one */
       
  3610       size_t unit = mparams.granularity;
       
  3611       size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
       
  3612                       SIZE_T_ONE) * unit;
       
  3613       msegmentptr sp = segment_holding(m, (char*)m->top);
       
  3614 
       
  3615       if (!is_extern_segment(sp)) {
       
  3616         if (is_mmapped_segment(sp)) {
       
  3617           if (HAVE_MMAP &&
       
  3618               sp->size >= extra &&
       
  3619               !has_segment_link(m, sp)) { /* can't shrink if pinned */
       
  3620             size_t newsize = sp->size - extra;
       
  3621             /* Prefer mremap, fall back to munmap */
       
  3622             if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
       
  3623                 (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
       
  3624               released = extra;
       
  3625             }
       
  3626           }
       
  3627         }
       
  3628         else if (HAVE_MORECORE) {
       
  3629           if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
       
  3630             extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
       
  3631           ACQUIRE_MORECORE_LOCK();
       
  3632           {
       
  3633             /* Make sure end of memory is where we last set it. */
       
  3634             char* old_br = (char*)(CALL_MORECORE(0));
       
  3635             if (old_br == sp->base + sp->size) {
       
  3636               char* rel_br = (char*)(CALL_MORECORE(-extra));
       
  3637               char* new_br = (char*)(CALL_MORECORE(0));
       
  3638               if (rel_br != CMFAIL && new_br < old_br)
       
  3639                 released = old_br - new_br;
       
  3640             }
       
  3641           }
       
  3642           RELEASE_MORECORE_LOCK();
       
  3643         }
       
  3644       }
       
  3645 
       
  3646       if (released != 0) {
       
  3647         sp->size -= released;
       
  3648         m->footprint -= released;
       
  3649         init_top(m, m->top, m->topsize - released);
       
  3650         check_top_chunk(m, m->top);
       
  3651       }
       
  3652     }
       
  3653 
       
  3654     /* Unmap any unused mmapped segments */
       
  3655     if (HAVE_MMAP) 
       
  3656       released += release_unused_segments(m);
       
  3657 
       
  3658     /* On failure, disable autotrim to avoid repeated failed future calls */
       
  3659     if (released == 0)
       
  3660       m->trim_check = MAX_SIZE_T;
       
  3661   }
       
  3662 
       
  3663   return (released != 0)? 1 : 0;
       
  3664 }
       
  3665 
       
  3666 /* ---------------------------- malloc support --------------------------- */
       
  3667 
       
  3668 /* allocate a large request from the best fitting chunk in a treebin */
       
  3669 static void* tmalloc_large(mstate m, size_t nb) {
       
  3670   tchunkptr v = 0;
       
  3671   size_t rsize = -nb; /* Unsigned negation */
       
  3672   tchunkptr t;
       
  3673   bindex_t idx;
       
  3674   compute_tree_index(nb, idx);
       
  3675 
       
  3676   if ((t = *treebin_at(m, idx)) != 0) {
       
  3677     /* Traverse tree for this bin looking for node with size == nb */
       
  3678     size_t sizebits = nb << leftshift_for_tree_index(idx);
       
  3679     tchunkptr rst = 0;  /* The deepest untaken right subtree */
       
  3680     for (;;) {
       
  3681       tchunkptr rt;
       
  3682       size_t trem = chunksize(t) - nb;
       
  3683       if (trem < rsize) {
       
  3684         v = t;
       
  3685         if ((rsize = trem) == 0)
       
  3686           break;
       
  3687       }
       
  3688       rt = t->child[1];
       
  3689       t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
       
  3690       if (rt != 0 && rt != t)
       
  3691         rst = rt;
       
  3692       if (t == 0) {
       
  3693         t = rst; /* set t to least subtree holding sizes > nb */
       
  3694         break;
       
  3695       }
       
  3696       sizebits <<= 1;
       
  3697     }
       
  3698   }
       
  3699 
       
  3700   if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
       
  3701     binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
       
  3702     if (leftbits != 0) {
       
  3703       bindex_t i;
       
  3704       binmap_t leastbit = least_bit(leftbits);
       
  3705       compute_bit2idx(leastbit, i);
       
  3706       t = *treebin_at(m, i);
       
  3707     }
       
  3708   }
       
  3709 
       
  3710   while (t != 0) { /* find smallest of tree or subtree */
       
  3711     size_t trem = chunksize(t) - nb;
       
  3712     if (trem < rsize) {
       
  3713       rsize = trem;
       
  3714       v = t;
       
  3715     }
       
  3716     t = leftmost_child(t);
       
  3717   }
       
  3718 
       
  3719   /*  If dv is a better fit, return 0 so malloc will use it */
       
  3720   if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
       
  3721     if (RTCHECK(ok_address(m, v))) { /* split */
       
  3722       mchunkptr r = chunk_plus_offset(v, nb);
       
  3723       assert(chunksize(v) == rsize + nb);
       
  3724       if (RTCHECK(ok_next(v, r))) {
       
  3725         unlink_large_chunk(m, v);
       
  3726         if (rsize < MIN_CHUNK_SIZE)
       
  3727           set_inuse_and_pinuse(m, v, (rsize + nb));
       
  3728         else {
       
  3729           set_size_and_pinuse_of_inuse_chunk(m, v, nb);
       
  3730           set_size_and_pinuse_of_free_chunk(r, rsize);
       
  3731           insert_chunk(m, r, rsize);
       
  3732         }
       
  3733         return chunk2mem(v);
       
  3734       }
       
  3735     }
       
  3736     CORRUPTION_ERROR_ACTION(m);
       
  3737   }
       
  3738   return 0;
       
  3739 }
       
  3740 
       
  3741 /* allocate a small request from the best fitting chunk in a treebin */
       
  3742 static void* tmalloc_small(mstate m, size_t nb) {
       
  3743   tchunkptr t, v;
       
  3744   size_t rsize;
       
  3745   bindex_t i;
       
  3746   binmap_t leastbit = least_bit(m->treemap);
       
  3747   compute_bit2idx(leastbit, i);
       
  3748 
       
  3749   v = t = *treebin_at(m, i);
       
  3750   rsize = chunksize(t) - nb;
       
  3751 
       
  3752   while ((t = leftmost_child(t)) != 0) {
       
  3753     size_t trem = chunksize(t) - nb;
       
  3754     if (trem < rsize) {
       
  3755       rsize = trem;
       
  3756       v = t;
       
  3757     }
       
  3758   }
       
  3759 
       
  3760   if (RTCHECK(ok_address(m, v))) {
       
  3761     mchunkptr r = chunk_plus_offset(v, nb);
       
  3762     assert(chunksize(v) == rsize + nb);
       
  3763     if (RTCHECK(ok_next(v, r))) {
       
  3764       unlink_large_chunk(m, v);
       
  3765       if (rsize < MIN_CHUNK_SIZE)
       
  3766         set_inuse_and_pinuse(m, v, (rsize + nb));
       
  3767       else {
       
  3768         set_size_and_pinuse_of_inuse_chunk(m, v, nb);
       
  3769         set_size_and_pinuse_of_free_chunk(r, rsize);
       
  3770         replace_dv(m, r, rsize);
       
  3771       }
       
  3772       return chunk2mem(v);
       
  3773     }
       
  3774   }
       
  3775 
       
  3776   CORRUPTION_ERROR_ACTION(m);
       
  3777   return 0;
       
  3778 }
       
  3779 
       
  3780 /* --------------------------- realloc support --------------------------- */
       
  3781 
       
  3782 static void* internal_realloc(mstate m, void* oldmem, size_t bytes) {
       
  3783   if (bytes >= MAX_REQUEST) {
       
  3784     MALLOC_FAILURE_ACTION;
  3624     MALLOC_FAILURE_ACTION;
  3785     return 0;
  3625     return 0;
  3786   }
  3626 }
  3787   if (!PREACTION(m)) {
  3627 
  3788     mchunkptr oldp = mem2chunk(oldmem);
  3628 /* -----------------------  system deallocation -------------------------- */
  3789     size_t oldsize = chunksize(oldp);
  3629 
  3790     mchunkptr next = chunk_plus_offset(oldp, oldsize);
  3630 /* Unmap and unlink any mmapped segments that don't contain used chunks */
  3791     mchunkptr newp = 0;
  3631 static size_t
  3792     void* extra = 0;
  3632 release_unused_segments(mstate m)
  3793 
  3633 {
  3794     /* Try to either shrink or extend into top. Else malloc-copy-free */
  3634     size_t released = 0;
  3795 
  3635     msegmentptr pred = &m->seg;
  3796     if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) &&
  3636     msegmentptr sp = pred->next;
  3797                 ok_next(oldp, next) && ok_pinuse(next))) {
  3637     while (sp != 0) {
  3798       size_t nb = request2size(bytes);
  3638         char *base = sp->base;
  3799       if (is_mmapped(oldp))
  3639         size_t size = sp->size;
  3800         newp = mmap_resize(m, oldp, nb);
  3640         msegmentptr next = sp->next;
  3801       else if (oldsize >= nb) { /* already big enough */
  3641         if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
  3802         size_t rsize = oldsize - nb;
  3642             mchunkptr p = align_as_chunk(base);
  3803         newp = oldp;
  3643             size_t psize = chunksize(p);
  3804         if (rsize >= MIN_CHUNK_SIZE) {
  3644             /* Can unmap if first chunk holds entire segment and not pinned */
  3805           mchunkptr remainder = chunk_plus_offset(newp, nb);
  3645             if (!cinuse(p)
  3806           set_inuse(m, newp, nb);
  3646                 && (char *) p + psize >= base + size - TOP_FOOT_SIZE) {
  3807           set_inuse(m, remainder, rsize);
  3647                 tchunkptr tp = (tchunkptr) p;
  3808           extra = chunk2mem(remainder);
  3648                 assert(segment_holds(sp, (char *) sp));
       
  3649                 if (p == m->dv) {
       
  3650                     m->dv = 0;
       
  3651                     m->dvsize = 0;
       
  3652                 } else {
       
  3653                     unlink_large_chunk(m, tp);
       
  3654                 }
       
  3655                 if (CALL_MUNMAP(base, size) == 0) {
       
  3656                     released += size;
       
  3657                     m->footprint -= size;
       
  3658                     /* unlink obsoleted record */
       
  3659                     sp = pred;
       
  3660                     sp->next = next;
       
  3661                 } else {        /* back out if cannot unmap */
       
  3662                     insert_large_chunk(m, tp, psize);
       
  3663                 }
       
  3664             }
  3809         }
  3665         }
  3810       }
  3666         pred = sp;
  3811       else if (next == m->top && oldsize + m->topsize > nb) {
  3667         sp = next;
  3812         /* Expand into top */
  3668     }
  3813         size_t newsize = oldsize + m->topsize;
  3669     return released;
  3814         size_t newtopsize = newsize - nb;
  3670 }
  3815         mchunkptr newtop = chunk_plus_offset(oldp, nb);
  3671 
  3816         set_inuse(m, oldp, nb);
  3672 static int
  3817         newtop->head = newtopsize |PINUSE_BIT;
  3673 sys_trim(mstate m, size_t pad)
  3818         m->top = newtop;
  3674 {
  3819         m->topsize = newtopsize;
  3675     size_t released = 0;
  3820         newp = oldp;
  3676     if (pad < MAX_REQUEST && is_initialized(m)) {
  3821       }
  3677         pad += TOP_FOOT_SIZE;   /* ensure enough room for segment overhead */
  3822     }
  3678 
  3823     else {
  3679         if (m->topsize > pad) {
  3824       USAGE_ERROR_ACTION(m, oldmem);
  3680             /* Shrink top space in granularity-size units, keeping at least one */
  3825       POSTACTION(m);
  3681             size_t unit = mparams.granularity;
  3826       return 0;
  3682             size_t extra =
  3827     }
  3683                 ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
       
  3684                  SIZE_T_ONE) * unit;
       
  3685             msegmentptr sp = segment_holding(m, (char *) m->top);
       
  3686 
       
  3687             if (!is_extern_segment(sp)) {
       
  3688                 if (is_mmapped_segment(sp)) {
       
  3689                     if (HAVE_MMAP && sp->size >= extra && !has_segment_link(m, sp)) {   /* can't shrink if pinned */
       
  3690                         size_t newsize = sp->size - extra;
       
  3691                         /* Prefer mremap, fall back to munmap */
       
  3692                         if ((CALL_MREMAP
       
  3693                              (sp->base, sp->size, newsize, 0) != MFAIL)
       
  3694                             || (CALL_MUNMAP(sp->base + newsize, extra)
       
  3695                                 == 0)) {
       
  3696                             released = extra;
       
  3697                         }
       
  3698                     }
       
  3699                 } else if (HAVE_MORECORE) {
       
  3700                     if (extra >= HALF_MAX_SIZE_T)       /* Avoid wrapping negative */
       
  3701                         extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
       
  3702                     ACQUIRE_MORECORE_LOCK();
       
  3703                     {
       
  3704                         /* Make sure end of memory is where we last set it. */
       
  3705                         char *old_br = (char *) (CALL_MORECORE(0));
       
  3706                         if (old_br == sp->base + sp->size) {
       
  3707                             char *rel_br = (char *) (CALL_MORECORE(-extra));
       
  3708                             char *new_br = (char *) (CALL_MORECORE(0));
       
  3709                             if (rel_br != CMFAIL && new_br < old_br)
       
  3710                                 released = old_br - new_br;
       
  3711                         }
       
  3712                     }
       
  3713                     RELEASE_MORECORE_LOCK();
       
  3714                 }
       
  3715             }
       
  3716 
       
  3717             if (released != 0) {
       
  3718                 sp->size -= released;
       
  3719                 m->footprint -= released;
       
  3720                 init_top(m, m->top, m->topsize - released);
       
  3721                 check_top_chunk(m, m->top);
       
  3722             }
       
  3723         }
       
  3724 
       
  3725         /* Unmap any unused mmapped segments */
       
  3726         if (HAVE_MMAP)
       
  3727             released += release_unused_segments(m);
       
  3728 
       
  3729         /* On failure, disable autotrim to avoid repeated failed future calls */
       
  3730         if (released == 0)
       
  3731             m->trim_check = MAX_SIZE_T;
       
  3732     }
       
  3733 
       
  3734     return (released != 0) ? 1 : 0;
       
  3735 }
       
  3736 
       
  3737 /* ---------------------------- malloc support --------------------------- */
       
  3738 
       
  3739 /* allocate a large request from the best fitting chunk in a treebin */
       
  3740 static void *
       
  3741 tmalloc_large(mstate m, size_t nb)
       
  3742 {
       
  3743     tchunkptr v = 0;
       
  3744     size_t rsize = -nb;         /* Unsigned negation */
       
  3745     tchunkptr t;
       
  3746     bindex_t idx;
       
  3747     compute_tree_index(nb, idx);
       
  3748 
       
  3749     if ((t = *treebin_at(m, idx)) != 0) {
       
  3750         /* Traverse tree for this bin looking for node with size == nb */
       
  3751         size_t sizebits = nb << leftshift_for_tree_index(idx);
       
  3752         tchunkptr rst = 0;      /* The deepest untaken right subtree */
       
  3753         for (;;) {
       
  3754             tchunkptr rt;
       
  3755             size_t trem = chunksize(t) - nb;
       
  3756             if (trem < rsize) {
       
  3757                 v = t;
       
  3758                 if ((rsize = trem) == 0)
       
  3759                     break;
       
  3760             }
       
  3761             rt = t->child[1];
       
  3762             t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1];
       
  3763             if (rt != 0 && rt != t)
       
  3764                 rst = rt;
       
  3765             if (t == 0) {
       
  3766                 t = rst;        /* set t to least subtree holding sizes > nb */
       
  3767                 break;
       
  3768             }
       
  3769             sizebits <<= 1;
       
  3770         }
       
  3771     }
       
  3772 
       
  3773     if (t == 0 && v == 0) {     /* set t to root of next non-empty treebin */
       
  3774         binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
       
  3775         if (leftbits != 0) {
       
  3776             bindex_t i;
       
  3777             binmap_t leastbit = least_bit(leftbits);
       
  3778             compute_bit2idx(leastbit, i);
       
  3779             t = *treebin_at(m, i);
       
  3780         }
       
  3781     }
       
  3782 
       
  3783     while (t != 0) {            /* find smallest of tree or subtree */
       
  3784         size_t trem = chunksize(t) - nb;
       
  3785         if (trem < rsize) {
       
  3786             rsize = trem;
       
  3787             v = t;
       
  3788         }
       
  3789         t = leftmost_child(t);
       
  3790     }
       
  3791 
       
  3792     /*  If dv is a better fit, return 0 so malloc will use it */
       
  3793     if (v != 0 && rsize < (size_t) (m->dvsize - nb)) {
       
  3794         if (RTCHECK(ok_address(m, v))) {        /* split */
       
  3795             mchunkptr r = chunk_plus_offset(v, nb);
       
  3796             assert(chunksize(v) == rsize + nb);
       
  3797             if (RTCHECK(ok_next(v, r))) {
       
  3798                 unlink_large_chunk(m, v);
       
  3799                 if (rsize < MIN_CHUNK_SIZE)
       
  3800                     set_inuse_and_pinuse(m, v, (rsize + nb));
       
  3801                 else {
       
  3802                     set_size_and_pinuse_of_inuse_chunk(m, v, nb);
       
  3803                     set_size_and_pinuse_of_free_chunk(r, rsize);
       
  3804                     insert_chunk(m, r, rsize);
       
  3805                 }
       
  3806                 return chunk2mem(v);
       
  3807             }
       
  3808         }
       
  3809         CORRUPTION_ERROR_ACTION(m);
       
  3810     }
       
  3811     return 0;
       
  3812 }
       
  3813 
       
  3814 /* allocate a small request from the best fitting chunk in a treebin */
       
  3815 static void *
       
  3816 tmalloc_small(mstate m, size_t nb)
       
  3817 {
       
  3818     tchunkptr t, v;
       
  3819     size_t rsize;
       
  3820     bindex_t i;
       
  3821     binmap_t leastbit = least_bit(m->treemap);
       
  3822     compute_bit2idx(leastbit, i);
       
  3823 
       
  3824     v = t = *treebin_at(m, i);
       
  3825     rsize = chunksize(t) - nb;
       
  3826 
       
  3827     while ((t = leftmost_child(t)) != 0) {
       
  3828         size_t trem = chunksize(t) - nb;
       
  3829         if (trem < rsize) {
       
  3830             rsize = trem;
       
  3831             v = t;
       
  3832         }
       
  3833     }
       
  3834 
       
  3835     if (RTCHECK(ok_address(m, v))) {
       
  3836         mchunkptr r = chunk_plus_offset(v, nb);
       
  3837         assert(chunksize(v) == rsize + nb);
       
  3838         if (RTCHECK(ok_next(v, r))) {
       
  3839             unlink_large_chunk(m, v);
       
  3840             if (rsize < MIN_CHUNK_SIZE)
       
  3841                 set_inuse_and_pinuse(m, v, (rsize + nb));
       
  3842             else {
       
  3843                 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
       
  3844                 set_size_and_pinuse_of_free_chunk(r, rsize);
       
  3845                 replace_dv(m, r, rsize);
       
  3846             }
       
  3847             return chunk2mem(v);
       
  3848         }
       
  3849     }
       
  3850 
       
  3851     CORRUPTION_ERROR_ACTION(m);
       
  3852     return 0;
       
  3853 }
       
  3854 
       
  3855 /* --------------------------- realloc support --------------------------- */
       
  3856 
       
  3857 static void *
       
  3858 internal_realloc(mstate m, void *oldmem, size_t bytes)
       
  3859 {
       
  3860     if (bytes >= MAX_REQUEST) {
       
  3861         MALLOC_FAILURE_ACTION;
       
  3862         return 0;
       
  3863     }
       
  3864     if (!PREACTION(m)) {
       
  3865         mchunkptr oldp = mem2chunk(oldmem);
       
  3866         size_t oldsize = chunksize(oldp);
       
  3867         mchunkptr next = chunk_plus_offset(oldp, oldsize);
       
  3868         mchunkptr newp = 0;
       
  3869         void *extra = 0;
       
  3870 
       
  3871         /* Try to either shrink or extend into top. Else malloc-copy-free */
       
  3872 
       
  3873         if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) &&
       
  3874                     ok_next(oldp, next) && ok_pinuse(next))) {
       
  3875             size_t nb = request2size(bytes);
       
  3876             if (is_mmapped(oldp))
       
  3877                 newp = mmap_resize(m, oldp, nb);
       
  3878             else if (oldsize >= nb) {   /* already big enough */
       
  3879                 size_t rsize = oldsize - nb;
       
  3880                 newp = oldp;
       
  3881                 if (rsize >= MIN_CHUNK_SIZE) {
       
  3882                     mchunkptr remainder = chunk_plus_offset(newp, nb);
       
  3883                     set_inuse(m, newp, nb);
       
  3884                     set_inuse(m, remainder, rsize);
       
  3885                     extra = chunk2mem(remainder);
       
  3886                 }
       
  3887             } else if (next == m->top && oldsize + m->topsize > nb) {
       
  3888                 /* Expand into top */
       
  3889                 size_t newsize = oldsize + m->topsize;
       
  3890                 size_t newtopsize = newsize - nb;
       
  3891                 mchunkptr newtop = chunk_plus_offset(oldp, nb);
       
  3892                 set_inuse(m, oldp, nb);
       
  3893                 newtop->head = newtopsize | PINUSE_BIT;
       
  3894                 m->top = newtop;
       
  3895                 m->topsize = newtopsize;
       
  3896                 newp = oldp;
       
  3897             }
       
  3898         } else {
       
  3899             USAGE_ERROR_ACTION(m, oldmem);
       
  3900             POSTACTION(m);
       
  3901             return 0;
       
  3902         }
       
  3903 
       
  3904         POSTACTION(m);
       
  3905 
       
  3906         if (newp != 0) {
       
  3907             if (extra != 0) {
       
  3908                 internal_free(m, extra);
       
  3909             }
       
  3910             check_inuse_chunk(m, newp);
       
  3911             return chunk2mem(newp);
       
  3912         } else {
       
  3913             void *newmem = internal_malloc(m, bytes);
       
  3914             if (newmem != 0) {
       
  3915                 size_t oc = oldsize - overhead_for(oldp);
       
  3916                 memcpy(newmem, oldmem, (oc < bytes) ? oc : bytes);
       
  3917                 internal_free(m, oldmem);
       
  3918             }
       
  3919             return newmem;
       
  3920         }
       
  3921     }
       
  3922     return 0;
       
  3923 }
       
  3924 
       
  3925 /* --------------------------- memalign support -------------------------- */
       
  3926 
       
  3927 static void *
       
  3928 internal_memalign(mstate m, size_t alignment, size_t bytes)
       
  3929 {
       
  3930     if (alignment <= MALLOC_ALIGNMENT)  /* Can just use malloc */
       
  3931         return internal_malloc(m, bytes);
       
  3932     if (alignment < MIN_CHUNK_SIZE)     /* must be at least a minimum chunk size */
       
  3933         alignment = MIN_CHUNK_SIZE;
       
  3934     if ((alignment & (alignment - SIZE_T_ONE)) != 0) {  /* Ensure a power of 2 */
       
  3935         size_t a = MALLOC_ALIGNMENT << 1;
       
  3936         while (a < alignment)
       
  3937             a <<= 1;
       
  3938         alignment = a;
       
  3939     }
       
  3940 
       
  3941     if (bytes >= MAX_REQUEST - alignment) {
       
  3942         if (m != 0) {           /* Test isn't needed but avoids compiler warning */
       
  3943             MALLOC_FAILURE_ACTION;
       
  3944         }
       
  3945     } else {
       
  3946         size_t nb = request2size(bytes);
       
  3947         size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
       
  3948         char *mem = (char *) internal_malloc(m, req);
       
  3949         if (mem != 0) {
       
  3950             void *leader = 0;
       
  3951             void *trailer = 0;
       
  3952             mchunkptr p = mem2chunk(mem);
       
  3953 
       
  3954             if (PREACTION(m))
       
  3955                 return 0;
       
  3956             if ((((size_t) (mem)) % alignment) != 0) {  /* misaligned */
       
  3957                 /*
       
  3958                    Find an aligned spot inside chunk.  Since we need to give
       
  3959                    back leading space in a chunk of at least MIN_CHUNK_SIZE, if
       
  3960                    the first calculation places us at a spot with less than
       
  3961                    MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
       
  3962                    We've allocated enough total room so that this is always
       
  3963                    possible.
       
  3964                  */
       
  3965                 char *br = (char *)
       
  3966                     mem2chunk((size_t)
       
  3967                               (((size_t)
       
  3968                                 (mem + alignment -
       
  3969                                  SIZE_T_ONE)) & -alignment));
       
  3970                 char *pos =
       
  3971                     ((size_t) (br - (char *) (p)) >=
       
  3972                      MIN_CHUNK_SIZE) ? br : br + alignment;
       
  3973                 mchunkptr newp = (mchunkptr) pos;
       
  3974                 size_t leadsize = pos - (char *) (p);
       
  3975                 size_t newsize = chunksize(p) - leadsize;
       
  3976 
       
  3977                 if (is_mmapped(p)) {    /* For mmapped chunks, just adjust offset */
       
  3978                     newp->prev_foot = p->prev_foot + leadsize;
       
  3979                     newp->head = (newsize | CINUSE_BIT);
       
  3980                 } else {        /* Otherwise, give back leader, use the rest */
       
  3981                     set_inuse(m, newp, newsize);
       
  3982                     set_inuse(m, p, leadsize);
       
  3983                     leader = chunk2mem(p);
       
  3984                 }
       
  3985                 p = newp;
       
  3986             }
       
  3987 
       
  3988             /* Give back spare room at the end */
       
  3989             if (!is_mmapped(p)) {
       
  3990                 size_t size = chunksize(p);
       
  3991                 if (size > nb + MIN_CHUNK_SIZE) {
       
  3992                     size_t remainder_size = size - nb;
       
  3993                     mchunkptr remainder = chunk_plus_offset(p, nb);
       
  3994                     set_inuse(m, p, nb);
       
  3995                     set_inuse(m, remainder, remainder_size);
       
  3996                     trailer = chunk2mem(remainder);
       
  3997                 }
       
  3998             }
       
  3999 
       
  4000             assert(chunksize(p) >= nb);
       
  4001             assert((((size_t) (chunk2mem(p))) % alignment) == 0);
       
  4002             check_inuse_chunk(m, p);
       
  4003             POSTACTION(m);
       
  4004             if (leader != 0) {
       
  4005                 internal_free(m, leader);
       
  4006             }
       
  4007             if (trailer != 0) {
       
  4008                 internal_free(m, trailer);
       
  4009             }
       
  4010             return chunk2mem(p);
       
  4011         }
       
  4012     }
       
  4013     return 0;
       
  4014 }
       
  4015 
       
  4016 /* ------------------------ comalloc/coalloc support --------------------- */
       
  4017 
       
  4018 static void **
       
  4019 ialloc(mstate m, size_t n_elements, size_t * sizes, int opts, void *chunks[])
       
  4020 {
       
  4021     /*
       
  4022        This provides common support for independent_X routines, handling
       
  4023        all of the combinations that can result.
       
  4024 
       
  4025        The opts arg has:
       
  4026        bit 0 set if all elements are same size (using sizes[0])
       
  4027        bit 1 set if elements should be zeroed
       
  4028      */
       
  4029 
       
  4030     size_t element_size;        /* chunksize of each element, if all same */
       
  4031     size_t contents_size;       /* total size of elements */
       
  4032     size_t array_size;          /* request size of pointer array */
       
  4033     void *mem;                  /* malloced aggregate space */
       
  4034     mchunkptr p;                /* corresponding chunk */
       
  4035     size_t remainder_size;      /* remaining bytes while splitting */
       
  4036     void **marray;              /* either "chunks" or malloced ptr array */
       
  4037     mchunkptr array_chunk;      /* chunk for malloced ptr array */
       
  4038     flag_t was_enabled;         /* to disable mmap */
       
  4039     size_t size;
       
  4040     size_t i;
       
  4041 
       
  4042     /* compute array length, if needed */
       
  4043     if (chunks != 0) {
       
  4044         if (n_elements == 0)
       
  4045             return chunks;      /* nothing to do */
       
  4046         marray = chunks;
       
  4047         array_size = 0;
       
  4048     } else {
       
  4049         /* if empty req, must still return chunk representing empty array */
       
  4050         if (n_elements == 0)
       
  4051             return (void **) internal_malloc(m, 0);
       
  4052         marray = 0;
       
  4053         array_size = request2size(n_elements * (sizeof(void *)));
       
  4054     }
       
  4055 
       
  4056     /* compute total element size */
       
  4057     if (opts & 0x1) {           /* all-same-size */
       
  4058         element_size = request2size(*sizes);
       
  4059         contents_size = n_elements * element_size;
       
  4060     } else {                    /* add up all the sizes */
       
  4061         element_size = 0;
       
  4062         contents_size = 0;
       
  4063         for (i = 0; i != n_elements; ++i)
       
  4064             contents_size += request2size(sizes[i]);
       
  4065     }
       
  4066 
       
  4067     size = contents_size + array_size;
       
  4068 
       
  4069     /*
       
  4070        Allocate the aggregate chunk.  First disable direct-mmapping so
       
  4071        malloc won't use it, since we would not be able to later
       
  4072        free/realloc space internal to a segregated mmap region.
       
  4073      */
       
  4074     was_enabled = use_mmap(m);
       
  4075     disable_mmap(m);
       
  4076     mem = internal_malloc(m, size - CHUNK_OVERHEAD);
       
  4077     if (was_enabled)
       
  4078         enable_mmap(m);
       
  4079     if (mem == 0)
       
  4080         return 0;
       
  4081 
       
  4082     if (PREACTION(m))
       
  4083         return 0;
       
  4084     p = mem2chunk(mem);
       
  4085     remainder_size = chunksize(p);
       
  4086 
       
  4087     assert(!is_mmapped(p));
       
  4088 
       
  4089     if (opts & 0x2) {           /* optionally clear the elements */
       
  4090         memset((size_t *) mem, 0, remainder_size - SIZE_T_SIZE - array_size);
       
  4091     }
       
  4092 
       
  4093     /* If not provided, allocate the pointer array as final part of chunk */
       
  4094     if (marray == 0) {
       
  4095         size_t array_chunk_size;
       
  4096         array_chunk = chunk_plus_offset(p, contents_size);
       
  4097         array_chunk_size = remainder_size - contents_size;
       
  4098         marray = (void **) (chunk2mem(array_chunk));
       
  4099         set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
       
  4100         remainder_size = contents_size;
       
  4101     }
       
  4102 
       
  4103     /* split out elements */
       
  4104     for (i = 0;; ++i) {
       
  4105         marray[i] = chunk2mem(p);
       
  4106         if (i != n_elements - 1) {
       
  4107             if (element_size != 0)
       
  4108                 size = element_size;
       
  4109             else
       
  4110                 size = request2size(sizes[i]);
       
  4111             remainder_size -= size;
       
  4112             set_size_and_pinuse_of_inuse_chunk(m, p, size);
       
  4113             p = chunk_plus_offset(p, size);
       
  4114         } else {                /* the final element absorbs any overallocation slop */
       
  4115             set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
       
  4116             break;
       
  4117         }
       
  4118     }
       
  4119 
       
  4120 #if DEBUG
       
  4121     if (marray != chunks) {
       
  4122         /* final element must have exactly exhausted chunk */
       
  4123         if (element_size != 0) {
       
  4124             assert(remainder_size == element_size);
       
  4125         } else {
       
  4126             assert(remainder_size == request2size(sizes[i]));
       
  4127         }
       
  4128         check_inuse_chunk(m, mem2chunk(marray));
       
  4129     }
       
  4130     for (i = 0; i != n_elements; ++i)
       
  4131         check_inuse_chunk(m, mem2chunk(marray[i]));
       
  4132 
       
  4133 #endif /* DEBUG */
  3828 
  4134 
  3829     POSTACTION(m);
  4135     POSTACTION(m);
  3830 
  4136     return marray;
  3831     if (newp != 0) {
       
  3832       if (extra != 0) {
       
  3833         internal_free(m, extra);
       
  3834       }
       
  3835       check_inuse_chunk(m, newp);
       
  3836       return chunk2mem(newp);
       
  3837     }
       
  3838     else {
       
  3839       void* newmem = internal_malloc(m, bytes);
       
  3840       if (newmem != 0) {
       
  3841         size_t oc = oldsize - overhead_for(oldp);
       
  3842         memcpy(newmem, oldmem, (oc < bytes)? oc : bytes);
       
  3843         internal_free(m, oldmem);
       
  3844       }
       
  3845       return newmem;
       
  3846     }
       
  3847   }
       
  3848   return 0;
       
  3849 }
       
  3850 
       
  3851 /* --------------------------- memalign support -------------------------- */
       
  3852 
       
  3853 static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
       
  3854   if (alignment <= MALLOC_ALIGNMENT)    /* Can just use malloc */
       
  3855     return internal_malloc(m, bytes);
       
  3856   if (alignment <  MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
       
  3857     alignment = MIN_CHUNK_SIZE;
       
  3858   if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */
       
  3859     size_t a = MALLOC_ALIGNMENT << 1;
       
  3860     while (a < alignment) a <<= 1;
       
  3861     alignment = a;
       
  3862   }
       
  3863   
       
  3864   if (bytes >= MAX_REQUEST - alignment) {
       
  3865     if (m != 0)  { /* Test isn't needed but avoids compiler warning */
       
  3866       MALLOC_FAILURE_ACTION;
       
  3867     }
       
  3868   }
       
  3869   else {
       
  3870     size_t nb = request2size(bytes);
       
  3871     size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
       
  3872     char* mem = (char*)internal_malloc(m, req);
       
  3873     if (mem != 0) {
       
  3874       void* leader = 0;
       
  3875       void* trailer = 0;
       
  3876       mchunkptr p = mem2chunk(mem);
       
  3877 
       
  3878       if (PREACTION(m)) return 0;
       
  3879       if ((((size_t)(mem)) % alignment) != 0) { /* misaligned */
       
  3880         /*
       
  3881           Find an aligned spot inside chunk.  Since we need to give
       
  3882           back leading space in a chunk of at least MIN_CHUNK_SIZE, if
       
  3883           the first calculation places us at a spot with less than
       
  3884           MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
       
  3885           We've allocated enough total room so that this is always
       
  3886           possible.
       
  3887         */
       
  3888         char* br = (char*)mem2chunk((size_t)(((size_t)(mem +
       
  3889                                                        alignment -
       
  3890                                                        SIZE_T_ONE)) &
       
  3891                                              -alignment));
       
  3892         char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
       
  3893           br : br+alignment;
       
  3894         mchunkptr newp = (mchunkptr)pos;
       
  3895         size_t leadsize = pos - (char*)(p);
       
  3896         size_t newsize = chunksize(p) - leadsize;
       
  3897 
       
  3898         if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
       
  3899           newp->prev_foot = p->prev_foot + leadsize;
       
  3900           newp->head = (newsize|CINUSE_BIT);
       
  3901         }
       
  3902         else { /* Otherwise, give back leader, use the rest */
       
  3903           set_inuse(m, newp, newsize);
       
  3904           set_inuse(m, p, leadsize);
       
  3905           leader = chunk2mem(p);
       
  3906         }
       
  3907         p = newp;
       
  3908       }
       
  3909 
       
  3910       /* Give back spare room at the end */
       
  3911       if (!is_mmapped(p)) {
       
  3912         size_t size = chunksize(p);
       
  3913         if (size > nb + MIN_CHUNK_SIZE) {
       
  3914           size_t remainder_size = size - nb;
       
  3915           mchunkptr remainder = chunk_plus_offset(p, nb);
       
  3916           set_inuse(m, p, nb);
       
  3917           set_inuse(m, remainder, remainder_size);
       
  3918           trailer = chunk2mem(remainder);
       
  3919         }
       
  3920       }
       
  3921 
       
  3922       assert (chunksize(p) >= nb);
       
  3923       assert((((size_t)(chunk2mem(p))) % alignment) == 0);
       
  3924       check_inuse_chunk(m, p);
       
  3925       POSTACTION(m);
       
  3926       if (leader != 0) {
       
  3927         internal_free(m, leader);
       
  3928       }
       
  3929       if (trailer != 0) {
       
  3930         internal_free(m, trailer);
       
  3931       }
       
  3932       return chunk2mem(p);
       
  3933     }
       
  3934   }
       
  3935   return 0;
       
  3936 }
       
  3937 
       
  3938 /* ------------------------ comalloc/coalloc support --------------------- */
       
  3939 
       
  3940 static void** ialloc(mstate m,
       
  3941                      size_t n_elements,
       
  3942                      size_t* sizes,
       
  3943                      int opts,
       
  3944                      void* chunks[]) {
       
  3945   /*
       
  3946     This provides common support for independent_X routines, handling
       
  3947     all of the combinations that can result.
       
  3948 
       
  3949     The opts arg has:
       
  3950     bit 0 set if all elements are same size (using sizes[0])
       
  3951     bit 1 set if elements should be zeroed
       
  3952   */
       
  3953 
       
  3954   size_t    element_size;   /* chunksize of each element, if all same */
       
  3955   size_t    contents_size;  /* total size of elements */
       
  3956   size_t    array_size;     /* request size of pointer array */
       
  3957   void*     mem;            /* malloced aggregate space */
       
  3958   mchunkptr p;              /* corresponding chunk */
       
  3959   size_t    remainder_size; /* remaining bytes while splitting */
       
  3960   void**    marray;         /* either "chunks" or malloced ptr array */
       
  3961   mchunkptr array_chunk;    /* chunk for malloced ptr array */
       
  3962   flag_t    was_enabled;    /* to disable mmap */
       
  3963   size_t    size;
       
  3964   size_t    i;
       
  3965 
       
  3966   /* compute array length, if needed */
       
  3967   if (chunks != 0) {
       
  3968     if (n_elements == 0)
       
  3969       return chunks; /* nothing to do */
       
  3970     marray = chunks;
       
  3971     array_size = 0;
       
  3972   }
       
  3973   else {
       
  3974     /* if empty req, must still return chunk representing empty array */
       
  3975     if (n_elements == 0)
       
  3976       return (void**)internal_malloc(m, 0);
       
  3977     marray = 0;
       
  3978     array_size = request2size(n_elements * (sizeof(void*)));
       
  3979   }
       
  3980 
       
  3981   /* compute total element size */
       
  3982   if (opts & 0x1) { /* all-same-size */
       
  3983     element_size = request2size(*sizes);
       
  3984     contents_size = n_elements * element_size;
       
  3985   }
       
  3986   else { /* add up all the sizes */
       
  3987     element_size = 0;
       
  3988     contents_size = 0;
       
  3989     for (i = 0; i != n_elements; ++i)
       
  3990       contents_size += request2size(sizes[i]);
       
  3991   }
       
  3992 
       
  3993   size = contents_size + array_size;
       
  3994 
       
  3995   /*
       
  3996      Allocate the aggregate chunk.  First disable direct-mmapping so
       
  3997      malloc won't use it, since we would not be able to later
       
  3998      free/realloc space internal to a segregated mmap region.
       
  3999   */
       
  4000   was_enabled = use_mmap(m);
       
  4001   disable_mmap(m);
       
  4002   mem = internal_malloc(m, size - CHUNK_OVERHEAD);
       
  4003   if (was_enabled)
       
  4004     enable_mmap(m);
       
  4005   if (mem == 0)
       
  4006     return 0;
       
  4007 
       
  4008   if (PREACTION(m)) return 0;
       
  4009   p = mem2chunk(mem);
       
  4010   remainder_size = chunksize(p);
       
  4011 
       
  4012   assert(!is_mmapped(p));
       
  4013 
       
  4014   if (opts & 0x2) {       /* optionally clear the elements */
       
  4015     memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size);
       
  4016   }
       
  4017 
       
  4018   /* If not provided, allocate the pointer array as final part of chunk */
       
  4019   if (marray == 0) {
       
  4020     size_t  array_chunk_size;
       
  4021     array_chunk = chunk_plus_offset(p, contents_size);
       
  4022     array_chunk_size = remainder_size - contents_size;
       
  4023     marray = (void**) (chunk2mem(array_chunk));
       
  4024     set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
       
  4025     remainder_size = contents_size;
       
  4026   }
       
  4027 
       
  4028   /* split out elements */
       
  4029   for (i = 0; ; ++i) {
       
  4030     marray[i] = chunk2mem(p);
       
  4031     if (i != n_elements-1) {
       
  4032       if (element_size != 0)
       
  4033         size = element_size;
       
  4034       else
       
  4035         size = request2size(sizes[i]);
       
  4036       remainder_size -= size;
       
  4037       set_size_and_pinuse_of_inuse_chunk(m, p, size);
       
  4038       p = chunk_plus_offset(p, size);
       
  4039     }
       
  4040     else { /* the final element absorbs any overallocation slop */
       
  4041       set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
       
  4042       break;
       
  4043     }
       
  4044   }
       
  4045 
       
  4046 #if DEBUG
       
  4047   if (marray != chunks) {
       
  4048     /* final element must have exactly exhausted chunk */
       
  4049     if (element_size != 0) {
       
  4050       assert(remainder_size == element_size);
       
  4051     }
       
  4052     else {
       
  4053       assert(remainder_size == request2size(sizes[i]));
       
  4054     }
       
  4055     check_inuse_chunk(m, mem2chunk(marray));
       
  4056   }
       
  4057   for (i = 0; i != n_elements; ++i)
       
  4058     check_inuse_chunk(m, mem2chunk(marray[i]));
       
  4059 
       
  4060 #endif /* DEBUG */
       
  4061 
       
  4062   POSTACTION(m);
       
  4063   return marray;
       
  4064 }
  4137 }
  4065 
  4138 
  4066 
  4139 
  4067 /* -------------------------- public routines ---------------------------- */
  4140 /* -------------------------- public routines ---------------------------- */
  4068 
  4141 
  4069 #if !ONLY_MSPACES
  4142 #if !ONLY_MSPACES
  4070 
  4143 
  4071 void* dlmalloc(size_t bytes) {
  4144 void *
  4072   /*
  4145 dlmalloc(size_t bytes)
  4073      Basic algorithm:
  4146 {
  4074      If a small request (< 256 bytes minus per-chunk overhead):
  4147     /*
       
  4148        Basic algorithm:
       
  4149        If a small request (< 256 bytes minus per-chunk overhead):
  4075        1. If one exists, use a remainderless chunk in associated smallbin.
  4150        1. If one exists, use a remainderless chunk in associated smallbin.
  4076           (Remainderless means that there are too few excess bytes to
  4151        (Remainderless means that there are too few excess bytes to
  4077           represent as a chunk.)
  4152        represent as a chunk.)
  4078        2. If it is big enough, use the dv chunk, which is normally the
  4153        2. If it is big enough, use the dv chunk, which is normally the
  4079           chunk adjacent to the one used for the most recent small request.
  4154        chunk adjacent to the one used for the most recent small request.
  4080        3. If one exists, split the smallest available chunk in a bin,
  4155        3. If one exists, split the smallest available chunk in a bin,
  4081           saving remainder in dv.
  4156        saving remainder in dv.
  4082        4. If it is big enough, use the top chunk.
  4157        4. If it is big enough, use the top chunk.
  4083        5. If available, get memory from system and use it
  4158        5. If available, get memory from system and use it
  4084      Otherwise, for a large request:
  4159        Otherwise, for a large request:
  4085        1. Find the smallest available binned chunk that fits, and use it
  4160        1. Find the smallest available binned chunk that fits, and use it
  4086           if it is better fitting than dv chunk, splitting if necessary.
  4161        if it is better fitting than dv chunk, splitting if necessary.
  4087        2. If better fitting than any binned chunk, use the dv chunk.
  4162        2. If better fitting than any binned chunk, use the dv chunk.
  4088        3. If it is big enough, use the top chunk.
  4163        3. If it is big enough, use the top chunk.
  4089        4. If request size >= mmap threshold, try to directly mmap this chunk.
  4164        4. If request size >= mmap threshold, try to directly mmap this chunk.
  4090        5. If available, get memory from system and use it
  4165        5. If available, get memory from system and use it
  4091 
  4166 
  4092      The ugly goto's here ensure that postaction occurs along all paths.
  4167        The ugly goto's here ensure that postaction occurs along all paths.
  4093   */
  4168      */
  4094 
  4169 
  4095   if (!PREACTION(gm)) {
  4170     if (!PREACTION(gm)) {
  4096     void* mem;
  4171         void *mem;
  4097     size_t nb;
  4172         size_t nb;
  4098     if (bytes <= MAX_SMALL_REQUEST) {
  4173         if (bytes <= MAX_SMALL_REQUEST) {
  4099       bindex_t idx;
  4174             bindex_t idx;
  4100       binmap_t smallbits;
  4175             binmap_t smallbits;
  4101       nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
  4176             nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes);
  4102       idx = small_index(nb);
  4177             idx = small_index(nb);
  4103       smallbits = gm->smallmap >> idx;
  4178             smallbits = gm->smallmap >> idx;
  4104 
  4179 
  4105       if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
  4180             if ((smallbits & 0x3U) != 0) {      /* Remainderless fit to a smallbin. */
  4106         mchunkptr b, p;
  4181                 mchunkptr b, p;
  4107         idx += ~smallbits & 1;       /* Uses next bin if idx empty */
  4182                 idx += ~smallbits & 1;  /* Uses next bin if idx empty */
  4108         b = smallbin_at(gm, idx);
  4183                 b = smallbin_at(gm, idx);
  4109         p = b->fd;
  4184                 p = b->fd;
  4110         assert(chunksize(p) == small_index2size(idx));
  4185                 assert(chunksize(p) == small_index2size(idx));
  4111         unlink_first_small_chunk(gm, b, p, idx);
  4186                 unlink_first_small_chunk(gm, b, p, idx);
  4112         set_inuse_and_pinuse(gm, p, small_index2size(idx));
  4187                 set_inuse_and_pinuse(gm, p, small_index2size(idx));
  4113         mem = chunk2mem(p);
  4188                 mem = chunk2mem(p);
  4114         check_malloced_chunk(gm, mem, nb);
  4189                 check_malloced_chunk(gm, mem, nb);
  4115         goto postaction;
  4190                 goto postaction;
  4116       }
  4191             }
  4117 
  4192 
  4118       else if (nb > gm->dvsize) {
  4193             else if (nb > gm->dvsize) {
  4119         if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
  4194                 if (smallbits != 0) {   /* Use chunk in next nonempty smallbin */
  4120           mchunkptr b, p, r;
  4195                     mchunkptr b, p, r;
  4121           size_t rsize;
  4196                     size_t rsize;
  4122           bindex_t i;
  4197                     bindex_t i;
  4123           binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
  4198                     binmap_t leftbits =
  4124           binmap_t leastbit = least_bit(leftbits);
  4199                         (smallbits << idx) & left_bits(idx2bit(idx));
  4125           compute_bit2idx(leastbit, i);
  4200                     binmap_t leastbit = least_bit(leftbits);
  4126           b = smallbin_at(gm, i);
  4201                     compute_bit2idx(leastbit, i);
  4127           p = b->fd;
  4202                     b = smallbin_at(gm, i);
  4128           assert(chunksize(p) == small_index2size(i));
  4203                     p = b->fd;
  4129           unlink_first_small_chunk(gm, b, p, i);
  4204                     assert(chunksize(p) == small_index2size(i));
  4130           rsize = small_index2size(i) - nb;
  4205                     unlink_first_small_chunk(gm, b, p, i);
  4131           /* Fit here cannot be remainderless if 4byte sizes */
  4206                     rsize = small_index2size(i) - nb;
  4132           if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
  4207                     /* Fit here cannot be remainderless if 4byte sizes */
  4133             set_inuse_and_pinuse(gm, p, small_index2size(i));
  4208                     if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
  4134           else {
  4209                         set_inuse_and_pinuse(gm, p, small_index2size(i));
       
  4210                     else {
       
  4211                         set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
       
  4212                         r = chunk_plus_offset(p, nb);
       
  4213                         set_size_and_pinuse_of_free_chunk(r, rsize);
       
  4214                         replace_dv(gm, r, rsize);
       
  4215                     }
       
  4216                     mem = chunk2mem(p);
       
  4217                     check_malloced_chunk(gm, mem, nb);
       
  4218                     goto postaction;
       
  4219                 }
       
  4220 
       
  4221                 else if (gm->treemap != 0
       
  4222                          && (mem = tmalloc_small(gm, nb)) != 0) {
       
  4223                     check_malloced_chunk(gm, mem, nb);
       
  4224                     goto postaction;
       
  4225                 }
       
  4226             }
       
  4227         } else if (bytes >= MAX_REQUEST)
       
  4228             nb = MAX_SIZE_T;    /* Too big to allocate. Force failure (in sys alloc) */
       
  4229         else {
       
  4230             nb = pad_request(bytes);
       
  4231             if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
       
  4232                 check_malloced_chunk(gm, mem, nb);
       
  4233                 goto postaction;
       
  4234             }
       
  4235         }
       
  4236 
       
  4237         if (nb <= gm->dvsize) {
       
  4238             size_t rsize = gm->dvsize - nb;
       
  4239             mchunkptr p = gm->dv;
       
  4240             if (rsize >= MIN_CHUNK_SIZE) {      /* split dv */
       
  4241                 mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
       
  4242                 gm->dvsize = rsize;
       
  4243                 set_size_and_pinuse_of_free_chunk(r, rsize);
       
  4244                 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
       
  4245             } else {            /* exhaust dv */
       
  4246                 size_t dvs = gm->dvsize;
       
  4247                 gm->dvsize = 0;
       
  4248                 gm->dv = 0;
       
  4249                 set_inuse_and_pinuse(gm, p, dvs);
       
  4250             }
       
  4251             mem = chunk2mem(p);
       
  4252             check_malloced_chunk(gm, mem, nb);
       
  4253             goto postaction;
       
  4254         }
       
  4255 
       
  4256         else if (nb < gm->topsize) {    /* Split top */
       
  4257             size_t rsize = gm->topsize -= nb;
       
  4258             mchunkptr p = gm->top;
       
  4259             mchunkptr r = gm->top = chunk_plus_offset(p, nb);
       
  4260             r->head = rsize | PINUSE_BIT;
  4135             set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
  4261             set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
  4136             r = chunk_plus_offset(p, nb);
  4262             mem = chunk2mem(p);
  4137             set_size_and_pinuse_of_free_chunk(r, rsize);
  4263             check_top_chunk(gm, gm->top);
  4138             replace_dv(gm, r, rsize);
  4264             check_malloced_chunk(gm, mem, nb);
  4139           }
  4265             goto postaction;
  4140           mem = chunk2mem(p);
       
  4141           check_malloced_chunk(gm, mem, nb);
       
  4142           goto postaction;
       
  4143         }
  4266         }
  4144 
  4267 
  4145         else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
  4268         mem = sys_alloc(gm, nb);
  4146           check_malloced_chunk(gm, mem, nb);
  4269 
  4147           goto postaction;
  4270       postaction:
       
  4271         POSTACTION(gm);
       
  4272         return mem;
       
  4273     }
       
  4274 
       
  4275     return 0;
       
  4276 }
       
  4277 
       
  4278 void
       
  4279 dlfree(void *mem)
       
  4280 {
       
  4281     /*
       
  4282        Consolidate freed chunks with preceeding or succeeding bordering
       
  4283        free chunks, if they exist, and then place in a bin.  Intermixed
       
  4284        with special cases for top, dv, mmapped chunks, and usage errors.
       
  4285      */
       
  4286 
       
  4287     if (mem != 0) {
       
  4288         mchunkptr p = mem2chunk(mem);
       
  4289 #if FOOTERS
       
  4290         mstate fm = get_mstate_for(p);
       
  4291         if (!ok_magic(fm)) {
       
  4292             USAGE_ERROR_ACTION(fm, p);
       
  4293             return;
  4148         }
  4294         }
  4149       }
       
  4150     }
       
  4151     else if (bytes >= MAX_REQUEST)
       
  4152       nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
       
  4153     else {
       
  4154       nb = pad_request(bytes);
       
  4155       if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
       
  4156         check_malloced_chunk(gm, mem, nb);
       
  4157         goto postaction;
       
  4158       }
       
  4159     }
       
  4160 
       
  4161     if (nb <= gm->dvsize) {
       
  4162       size_t rsize = gm->dvsize - nb;
       
  4163       mchunkptr p = gm->dv;
       
  4164       if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
       
  4165         mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
       
  4166         gm->dvsize = rsize;
       
  4167         set_size_and_pinuse_of_free_chunk(r, rsize);
       
  4168         set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
       
  4169       }
       
  4170       else { /* exhaust dv */
       
  4171         size_t dvs = gm->dvsize;
       
  4172         gm->dvsize = 0;
       
  4173         gm->dv = 0;
       
  4174         set_inuse_and_pinuse(gm, p, dvs);
       
  4175       }
       
  4176       mem = chunk2mem(p);
       
  4177       check_malloced_chunk(gm, mem, nb);
       
  4178       goto postaction;
       
  4179     }
       
  4180 
       
  4181     else if (nb < gm->topsize) { /* Split top */
       
  4182       size_t rsize = gm->topsize -= nb;
       
  4183       mchunkptr p = gm->top;
       
  4184       mchunkptr r = gm->top = chunk_plus_offset(p, nb);
       
  4185       r->head = rsize | PINUSE_BIT;
       
  4186       set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
       
  4187       mem = chunk2mem(p);
       
  4188       check_top_chunk(gm, gm->top);
       
  4189       check_malloced_chunk(gm, mem, nb);
       
  4190       goto postaction;
       
  4191     }
       
  4192 
       
  4193     mem = sys_alloc(gm, nb);
       
  4194 
       
  4195   postaction:
       
  4196     POSTACTION(gm);
       
  4197     return mem;
       
  4198   }
       
  4199 
       
  4200   return 0;
       
  4201 }
       
  4202 
       
  4203 void dlfree(void* mem) {
       
  4204   /*
       
  4205      Consolidate freed chunks with preceeding or succeeding bordering
       
  4206      free chunks, if they exist, and then place in a bin.  Intermixed
       
  4207      with special cases for top, dv, mmapped chunks, and usage errors.
       
  4208   */
       
  4209 
       
  4210   if (mem != 0) {
       
  4211     mchunkptr p  = mem2chunk(mem);
       
  4212 #if FOOTERS
       
  4213     mstate fm = get_mstate_for(p);
       
  4214     if (!ok_magic(fm)) {
       
  4215       USAGE_ERROR_ACTION(fm, p);
       
  4216       return;
       
  4217     }
       
  4218 #else /* FOOTERS */
  4295 #else /* FOOTERS */
  4219 #define fm gm
  4296 #define fm gm
  4220 #endif /* FOOTERS */
  4297 #endif /* FOOTERS */
  4221     if (!PREACTION(fm)) {
  4298         if (!PREACTION(fm)) {
  4222       check_inuse_chunk(fm, p);
  4299             check_inuse_chunk(fm, p);
  4223       if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
  4300             if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
  4224         size_t psize = chunksize(p);
  4301                 size_t psize = chunksize(p);
  4225         mchunkptr next = chunk_plus_offset(p, psize);
  4302                 mchunkptr next = chunk_plus_offset(p, psize);
  4226         if (!pinuse(p)) {
  4303                 if (!pinuse(p)) {
  4227           size_t prevsize = p->prev_foot;
  4304                     size_t prevsize = p->prev_foot;
  4228           if ((prevsize & IS_MMAPPED_BIT) != 0) {
  4305                     if ((prevsize & IS_MMAPPED_BIT) != 0) {
  4229             prevsize &= ~IS_MMAPPED_BIT;
  4306                         prevsize &= ~IS_MMAPPED_BIT;
  4230             psize += prevsize + MMAP_FOOT_PAD;
  4307                         psize += prevsize + MMAP_FOOT_PAD;
  4231             if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
  4308                         if (CALL_MUNMAP((char *) p - prevsize, psize) == 0)
  4232               fm->footprint -= psize;
  4309                             fm->footprint -= psize;
  4233             goto postaction;
  4310                         goto postaction;
  4234           }
  4311                     } else {
  4235           else {
  4312                         mchunkptr prev = chunk_minus_offset(p, prevsize);
  4236             mchunkptr prev = chunk_minus_offset(p, prevsize);
  4313                         psize += prevsize;
  4237             psize += prevsize;
  4314                         p = prev;
  4238             p = prev;
  4315                         if (RTCHECK(ok_address(fm, prev))) {    /* consolidate backward */
  4239             if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
  4316                             if (p != fm->dv) {
  4240               if (p != fm->dv) {
  4317                                 unlink_chunk(fm, p, prevsize);
  4241                 unlink_chunk(fm, p, prevsize);
  4318                             } else if ((next->head & INUSE_BITS) ==
  4242               }
  4319                                        INUSE_BITS) {
  4243               else if ((next->head & INUSE_BITS) == INUSE_BITS) {
  4320                                 fm->dvsize = psize;
  4244                 fm->dvsize = psize;
  4321                                 set_free_with_pinuse(p, psize, next);
  4245                 set_free_with_pinuse(p, psize, next);
  4322                                 goto postaction;
  4246                 goto postaction;
  4323                             }
  4247               }
  4324                         } else
       
  4325                             goto erroraction;
       
  4326                     }
       
  4327                 }
       
  4328 
       
  4329                 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
       
  4330                     if (!cinuse(next)) {        /* consolidate forward */
       
  4331                         if (next == fm->top) {
       
  4332                             size_t tsize = fm->topsize += psize;
       
  4333                             fm->top = p;
       
  4334                             p->head = tsize | PINUSE_BIT;
       
  4335                             if (p == fm->dv) {
       
  4336                                 fm->dv = 0;
       
  4337                                 fm->dvsize = 0;
       
  4338                             }
       
  4339                             if (should_trim(fm, tsize))
       
  4340                                 sys_trim(fm, 0);
       
  4341                             goto postaction;
       
  4342                         } else if (next == fm->dv) {
       
  4343                             size_t dsize = fm->dvsize += psize;
       
  4344                             fm->dv = p;
       
  4345                             set_size_and_pinuse_of_free_chunk(p, dsize);
       
  4346                             goto postaction;
       
  4347                         } else {
       
  4348                             size_t nsize = chunksize(next);
       
  4349                             psize += nsize;
       
  4350                             unlink_chunk(fm, next, nsize);
       
  4351                             set_size_and_pinuse_of_free_chunk(p, psize);
       
  4352                             if (p == fm->dv) {
       
  4353                                 fm->dvsize = psize;
       
  4354                                 goto postaction;
       
  4355                             }
       
  4356                         }
       
  4357                     } else
       
  4358                         set_free_with_pinuse(p, psize, next);
       
  4359                     insert_chunk(fm, p, psize);
       
  4360                     check_free_chunk(fm, p);
       
  4361                     goto postaction;
       
  4362                 }
  4248             }
  4363             }
  4249             else
  4364           erroraction:
  4250               goto erroraction;
  4365             USAGE_ERROR_ACTION(fm, p);
  4251           }
  4366           postaction:
       
  4367             POSTACTION(fm);
  4252         }
  4368         }
  4253 
  4369     }
  4254         if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
       
  4255           if (!cinuse(next)) {  /* consolidate forward */
       
  4256             if (next == fm->top) {
       
  4257               size_t tsize = fm->topsize += psize;
       
  4258               fm->top = p;
       
  4259               p->head = tsize | PINUSE_BIT;
       
  4260               if (p == fm->dv) {
       
  4261                 fm->dv = 0;
       
  4262                 fm->dvsize = 0;
       
  4263               }
       
  4264               if (should_trim(fm, tsize))
       
  4265                 sys_trim(fm, 0);
       
  4266               goto postaction;
       
  4267             }
       
  4268             else if (next == fm->dv) {
       
  4269               size_t dsize = fm->dvsize += psize;
       
  4270               fm->dv = p;
       
  4271               set_size_and_pinuse_of_free_chunk(p, dsize);
       
  4272               goto postaction;
       
  4273             }
       
  4274             else {
       
  4275               size_t nsize = chunksize(next);
       
  4276               psize += nsize;
       
  4277               unlink_chunk(fm, next, nsize);
       
  4278               set_size_and_pinuse_of_free_chunk(p, psize);
       
  4279               if (p == fm->dv) {
       
  4280                 fm->dvsize = psize;
       
  4281                 goto postaction;
       
  4282               }
       
  4283             }
       
  4284           }
       
  4285           else
       
  4286             set_free_with_pinuse(p, psize, next);
       
  4287           insert_chunk(fm, p, psize);
       
  4288           check_free_chunk(fm, p);
       
  4289           goto postaction;
       
  4290         }
       
  4291       }
       
  4292     erroraction:
       
  4293       USAGE_ERROR_ACTION(fm, p);
       
  4294     postaction:
       
  4295       POSTACTION(fm);
       
  4296     }
       
  4297   }
       
  4298 #if !FOOTERS
  4370 #if !FOOTERS
  4299 #undef fm
  4371 #undef fm
  4300 #endif /* FOOTERS */
  4372 #endif /* FOOTERS */
  4301 }
  4373 }
  4302 
  4374 
  4303 void* dlcalloc(size_t n_elements, size_t elem_size) {
  4375 void *
  4304   void* mem;
  4376 dlcalloc(size_t n_elements, size_t elem_size)
  4305   size_t req = 0;
  4377 {
  4306   if (n_elements != 0) {
  4378     void *mem;
  4307     req = n_elements * elem_size;
  4379     size_t req = 0;
  4308     if (((n_elements | elem_size) & ~(size_t)0xffff) &&
  4380     if (n_elements != 0) {
  4309         (req / n_elements != elem_size))
  4381         req = n_elements * elem_size;
  4310       req = MAX_SIZE_T; /* force downstream failure on overflow */
  4382         if (((n_elements | elem_size) & ~(size_t) 0xffff) &&
  4311   }
  4383             (req / n_elements != elem_size))
  4312   mem = dlmalloc(req);
  4384             req = MAX_SIZE_T;   /* force downstream failure on overflow */
  4313   if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
  4385     }
  4314     memset(mem, 0, req);
  4386     mem = dlmalloc(req);
  4315   return mem;
  4387     if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
  4316 }
  4388         memset(mem, 0, req);
  4317 
  4389     return mem;
  4318 void* dlrealloc(void* oldmem, size_t bytes) {
  4390 }
  4319   if (oldmem == 0)
  4391 
  4320     return dlmalloc(bytes);
  4392 void *
       
  4393 dlrealloc(void *oldmem, size_t bytes)
       
  4394 {
       
  4395     if (oldmem == 0)
       
  4396         return dlmalloc(bytes);
  4321 #ifdef REALLOC_ZERO_BYTES_FREES
  4397 #ifdef REALLOC_ZERO_BYTES_FREES
  4322   if (bytes == 0) {
  4398     if (bytes == 0) {
  4323     dlfree(oldmem);
  4399         dlfree(oldmem);
       
  4400         return 0;
       
  4401     }
       
  4402 #endif /* REALLOC_ZERO_BYTES_FREES */
       
  4403     else {
       
  4404 #if ! FOOTERS
       
  4405         mstate m = gm;
       
  4406 #else /* FOOTERS */
       
  4407         mstate m = get_mstate_for(mem2chunk(oldmem));
       
  4408         if (!ok_magic(m)) {
       
  4409             USAGE_ERROR_ACTION(m, oldmem);
       
  4410             return 0;
       
  4411         }
       
  4412 #endif /* FOOTERS */
       
  4413         return internal_realloc(m, oldmem, bytes);
       
  4414     }
       
  4415 }
       
  4416 
       
  4417 void *
       
  4418 dlmemalign(size_t alignment, size_t bytes)
       
  4419 {
       
  4420     return internal_memalign(gm, alignment, bytes);
       
  4421 }
       
  4422 
       
  4423 void **
       
  4424 dlindependent_calloc(size_t n_elements, size_t elem_size, void *chunks[])
       
  4425 {
       
  4426     size_t sz = elem_size;      /* serves as 1-element array */
       
  4427     return ialloc(gm, n_elements, &sz, 3, chunks);
       
  4428 }
       
  4429 
       
  4430 void **
       
  4431 dlindependent_comalloc(size_t n_elements, size_t sizes[], void *chunks[])
       
  4432 {
       
  4433     return ialloc(gm, n_elements, sizes, 0, chunks);
       
  4434 }
       
  4435 
       
  4436 void *
       
  4437 dlvalloc(size_t bytes)
       
  4438 {
       
  4439     size_t pagesz;
       
  4440     init_mparams();
       
  4441     pagesz = mparams.page_size;
       
  4442     return dlmemalign(pagesz, bytes);
       
  4443 }
       
  4444 
       
  4445 void *
       
  4446 dlpvalloc(size_t bytes)
       
  4447 {
       
  4448     size_t pagesz;
       
  4449     init_mparams();
       
  4450     pagesz = mparams.page_size;
       
  4451     return dlmemalign(pagesz,
       
  4452                       (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
       
  4453 }
       
  4454 
       
  4455 int
       
  4456 dlmalloc_trim(size_t pad)
       
  4457 {
       
  4458     int result = 0;
       
  4459     if (!PREACTION(gm)) {
       
  4460         result = sys_trim(gm, pad);
       
  4461         POSTACTION(gm);
       
  4462     }
       
  4463     return result;
       
  4464 }
       
  4465 
       
  4466 size_t
       
  4467 dlmalloc_footprint(void)
       
  4468 {
       
  4469     return gm->footprint;
       
  4470 }
       
  4471 
       
  4472 size_t
       
  4473 dlmalloc_max_footprint(void)
       
  4474 {
       
  4475     return gm->max_footprint;
       
  4476 }
       
  4477 
       
  4478 #if !NO_MALLINFO
       
  4479 struct mallinfo
       
  4480 dlmallinfo(void)
       
  4481 {
       
  4482     return internal_mallinfo(gm);
       
  4483 }
       
  4484 #endif /* NO_MALLINFO */
       
  4485 
       
  4486 void
       
  4487 dlmalloc_stats()
       
  4488 {
       
  4489     internal_malloc_stats(gm);
       
  4490 }
       
  4491 
       
  4492 size_t
       
  4493 dlmalloc_usable_size(void *mem)
       
  4494 {
       
  4495     if (mem != 0) {
       
  4496         mchunkptr p = mem2chunk(mem);
       
  4497         if (cinuse(p))
       
  4498             return chunksize(p) - overhead_for(p);
       
  4499     }
  4324     return 0;
  4500     return 0;
  4325   }
  4501 }
  4326 #endif /* REALLOC_ZERO_BYTES_FREES */
  4502 
  4327   else {
  4503 int
  4328 #if ! FOOTERS
  4504 dlmallopt(int param_number, int value)
  4329     mstate m = gm;
  4505 {
  4330 #else /* FOOTERS */
  4506     return change_mparam(param_number, value);
  4331     mstate m = get_mstate_for(mem2chunk(oldmem));
       
  4332     if (!ok_magic(m)) {
       
  4333       USAGE_ERROR_ACTION(m, oldmem);
       
  4334       return 0;
       
  4335     }
       
  4336 #endif /* FOOTERS */
       
  4337     return internal_realloc(m, oldmem, bytes);
       
  4338   }
       
  4339 }
       
  4340 
       
  4341 void* dlmemalign(size_t alignment, size_t bytes) {
       
  4342   return internal_memalign(gm, alignment, bytes);
       
  4343 }
       
  4344 
       
  4345 void** dlindependent_calloc(size_t n_elements, size_t elem_size,
       
  4346                                  void* chunks[]) {
       
  4347   size_t sz = elem_size; /* serves as 1-element array */
       
  4348   return ialloc(gm, n_elements, &sz, 3, chunks);
       
  4349 }
       
  4350 
       
  4351 void** dlindependent_comalloc(size_t n_elements, size_t sizes[],
       
  4352                                    void* chunks[]) {
       
  4353   return ialloc(gm, n_elements, sizes, 0, chunks);
       
  4354 }
       
  4355 
       
  4356 void* dlvalloc(size_t bytes) {
       
  4357   size_t pagesz;
       
  4358   init_mparams();
       
  4359   pagesz = mparams.page_size;
       
  4360   return dlmemalign(pagesz, bytes);
       
  4361 }
       
  4362 
       
  4363 void* dlpvalloc(size_t bytes) {
       
  4364   size_t pagesz;
       
  4365   init_mparams();
       
  4366   pagesz = mparams.page_size;
       
  4367   return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
       
  4368 }
       
  4369 
       
  4370 int dlmalloc_trim(size_t pad) {
       
  4371   int result = 0;
       
  4372   if (!PREACTION(gm)) {
       
  4373     result = sys_trim(gm, pad);
       
  4374     POSTACTION(gm);
       
  4375   }
       
  4376   return result;
       
  4377 }
       
  4378 
       
  4379 size_t dlmalloc_footprint(void) {
       
  4380   return gm->footprint;
       
  4381 }
       
  4382 
       
  4383 size_t dlmalloc_max_footprint(void) {
       
  4384   return gm->max_footprint;
       
  4385 }
       
  4386 
       
  4387 #if !NO_MALLINFO
       
  4388 struct mallinfo dlmallinfo(void) {
       
  4389   return internal_mallinfo(gm);
       
  4390 }
       
  4391 #endif /* NO_MALLINFO */
       
  4392 
       
  4393 void dlmalloc_stats() {
       
  4394   internal_malloc_stats(gm);
       
  4395 }
       
  4396 
       
  4397 size_t dlmalloc_usable_size(void* mem) {
       
  4398   if (mem != 0) {
       
  4399     mchunkptr p = mem2chunk(mem);
       
  4400     if (cinuse(p))
       
  4401       return chunksize(p) - overhead_for(p);
       
  4402   }
       
  4403   return 0;
       
  4404 }
       
  4405 
       
  4406 int dlmallopt(int param_number, int value) {
       
  4407   return change_mparam(param_number, value);
       
  4408 }
  4507 }
  4409 
  4508 
  4410 #endif /* !ONLY_MSPACES */
  4509 #endif /* !ONLY_MSPACES */
  4411 
  4510 
  4412 /* ----------------------------- user mspaces ---------------------------- */
  4511 /* ----------------------------- user mspaces ---------------------------- */
  4413 
  4512 
  4414 #if MSPACES
  4513 #if MSPACES
  4415 
  4514 
  4416 static mstate init_user_mstate(char* tbase, size_t tsize) {
  4515 static mstate
  4417   size_t msize = pad_request(sizeof(struct malloc_state));
  4516 init_user_mstate(char *tbase, size_t tsize)
  4418   mchunkptr mn;
  4517 {
  4419   mchunkptr msp = align_as_chunk(tbase);
  4518     size_t msize = pad_request(sizeof(struct malloc_state));
  4420   mstate m = (mstate)(chunk2mem(msp));
  4519     mchunkptr mn;
  4421   memset(m, 0, msize);
  4520     mchunkptr msp = align_as_chunk(tbase);
  4422   INITIAL_LOCK(&m->mutex);
  4521     mstate m = (mstate) (chunk2mem(msp));
  4423   msp->head = (msize|PINUSE_BIT|CINUSE_BIT);
  4522     memset(m, 0, msize);
  4424   m->seg.base = m->least_addr = tbase;
  4523     INITIAL_LOCK(&m->mutex);
  4425   m->seg.size = m->footprint = m->max_footprint = tsize;
  4524     msp->head = (msize | PINUSE_BIT | CINUSE_BIT);
  4426   m->magic = mparams.magic;
  4525     m->seg.base = m->least_addr = tbase;
  4427   m->mflags = mparams.default_mflags;
  4526     m->seg.size = m->footprint = m->max_footprint = tsize;
  4428   disable_contiguous(m);
  4527     m->magic = mparams.magic;
  4429   init_bins(m);
  4528     m->mflags = mparams.default_mflags;
  4430   mn = next_chunk(mem2chunk(m));
  4529     disable_contiguous(m);
  4431   init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);
  4530     init_bins(m);
  4432   check_top_chunk(m, m->top);
  4531     mn = next_chunk(mem2chunk(m));
  4433   return m;
  4532     init_top(m, mn, (size_t) ((tbase + tsize) - (char *) mn) - TOP_FOOT_SIZE);
  4434 }
  4533     check_top_chunk(m, m->top);
  4435 
  4534     return m;
  4436 mspace create_mspace(size_t capacity, int locked) {
  4535 }
  4437   mstate m = 0;
  4536 
  4438   size_t msize = pad_request(sizeof(struct malloc_state));
  4537 mspace
  4439   init_mparams(); /* Ensure pagesize etc initialized */
  4538 create_mspace(size_t capacity, int locked)
  4440 
  4539 {
  4441   if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
  4540     mstate m = 0;
  4442     size_t rs = ((capacity == 0)? mparams.granularity :
  4541     size_t msize = pad_request(sizeof(struct malloc_state));
  4443                  (capacity + TOP_FOOT_SIZE + msize));
  4542     init_mparams();             /* Ensure pagesize etc initialized */
  4444     size_t tsize = granularity_align(rs);
  4543 
  4445     char* tbase = (char*)(CALL_MMAP(tsize));
  4544     if (capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) {
  4446     if (tbase != CMFAIL) {
  4545         size_t rs = ((capacity == 0) ? mparams.granularity :
  4447       m = init_user_mstate(tbase, tsize);
  4546                      (capacity + TOP_FOOT_SIZE + msize));
  4448       m->seg.sflags = IS_MMAPPED_BIT;
  4547         size_t tsize = granularity_align(rs);
  4449       set_lock(m, locked);
  4548         char *tbase = (char *) (CALL_MMAP(tsize));
  4450     }
  4549         if (tbase != CMFAIL) {
  4451   }
  4550             m = init_user_mstate(tbase, tsize);
  4452   return (mspace)m;
  4551             m->seg.sflags = IS_MMAPPED_BIT;
  4453 }
  4552             set_lock(m, locked);
  4454 
  4553         }
  4455 mspace create_mspace_with_base(void* base, size_t capacity, int locked) {
  4554     }
  4456   mstate m = 0;
  4555     return (mspace) m;
  4457   size_t msize = pad_request(sizeof(struct malloc_state));
  4556 }
  4458   init_mparams(); /* Ensure pagesize etc initialized */
  4557 
  4459 
  4558 mspace
  4460   if (capacity > msize + TOP_FOOT_SIZE &&
  4559 create_mspace_with_base(void *base, size_t capacity, int locked)
  4461       capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
  4560 {
  4462     m = init_user_mstate((char*)base, capacity);
  4561     mstate m = 0;
  4463     m->seg.sflags = EXTERN_BIT;
  4562     size_t msize = pad_request(sizeof(struct malloc_state));
  4464     set_lock(m, locked);
  4563     init_mparams();             /* Ensure pagesize etc initialized */
  4465   }
  4564 
  4466   return (mspace)m;
  4565     if (capacity > msize + TOP_FOOT_SIZE &&
  4467 }
  4566         capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) {
  4468 
  4567         m = init_user_mstate((char *) base, capacity);
  4469 size_t destroy_mspace(mspace msp) {
  4568         m->seg.sflags = EXTERN_BIT;
  4470   size_t freed = 0;
  4569         set_lock(m, locked);
  4471   mstate ms = (mstate)msp;
  4570     }
  4472   if (ok_magic(ms)) {
  4571     return (mspace) m;
  4473     msegmentptr sp = &ms->seg;
  4572 }
  4474     while (sp != 0) {
  4573 
  4475       char* base = sp->base;
  4574 size_t
  4476       size_t size = sp->size;
  4575 destroy_mspace(mspace msp)
  4477       flag_t flag = sp->sflags;
  4576 {
  4478       sp = sp->next;
  4577     size_t freed = 0;
  4479       if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) &&
  4578     mstate ms = (mstate) msp;
  4480           CALL_MUNMAP(base, size) == 0)
  4579     if (ok_magic(ms)) {
  4481         freed += size;
  4580         msegmentptr sp = &ms->seg;
  4482     }
  4581         while (sp != 0) {
  4483   }
  4582             char *base = sp->base;
  4484   else {
  4583             size_t size = sp->size;
  4485     USAGE_ERROR_ACTION(ms,ms);
  4584             flag_t flag = sp->sflags;
  4486   }
  4585             sp = sp->next;
  4487   return freed;
  4586             if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) &&
       
  4587                 CALL_MUNMAP(base, size) == 0)
       
  4588                 freed += size;
       
  4589         }
       
  4590     } else {
       
  4591         USAGE_ERROR_ACTION(ms, ms);
       
  4592     }
       
  4593     return freed;
  4488 }
  4594 }
  4489 
  4595 
  4490 /*
  4596 /*
  4491   mspace versions of routines are near-clones of the global
  4597   mspace versions of routines are near-clones of the global
  4492   versions. This is not so nice but better than the alternatives.
  4598   versions. This is not so nice but better than the alternatives.
  4493 */
  4599 */
  4494 
  4600 
  4495 
  4601 
  4496 void* mspace_malloc(mspace msp, size_t bytes) {
  4602 void *
  4497   mstate ms = (mstate)msp;
  4603 mspace_malloc(mspace msp, size_t bytes)
  4498   if (!ok_magic(ms)) {
  4604 {
  4499     USAGE_ERROR_ACTION(ms,ms);
  4605     mstate ms = (mstate) msp;
       
  4606     if (!ok_magic(ms)) {
       
  4607         USAGE_ERROR_ACTION(ms, ms);
       
  4608         return 0;
       
  4609     }
       
  4610     if (!PREACTION(ms)) {
       
  4611         void *mem;
       
  4612         size_t nb;
       
  4613         if (bytes <= MAX_SMALL_REQUEST) {
       
  4614             bindex_t idx;
       
  4615             binmap_t smallbits;
       
  4616             nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes);
       
  4617             idx = small_index(nb);
       
  4618             smallbits = ms->smallmap >> idx;
       
  4619 
       
  4620             if ((smallbits & 0x3U) != 0) {      /* Remainderless fit to a smallbin. */
       
  4621                 mchunkptr b, p;
       
  4622                 idx += ~smallbits & 1;  /* Uses next bin if idx empty */
       
  4623                 b = smallbin_at(ms, idx);
       
  4624                 p = b->fd;
       
  4625                 assert(chunksize(p) == small_index2size(idx));
       
  4626                 unlink_first_small_chunk(ms, b, p, idx);
       
  4627                 set_inuse_and_pinuse(ms, p, small_index2size(idx));
       
  4628                 mem = chunk2mem(p);
       
  4629                 check_malloced_chunk(ms, mem, nb);
       
  4630                 goto postaction;
       
  4631             }
       
  4632 
       
  4633             else if (nb > ms->dvsize) {
       
  4634                 if (smallbits != 0) {   /* Use chunk in next nonempty smallbin */
       
  4635                     mchunkptr b, p, r;
       
  4636                     size_t rsize;
       
  4637                     bindex_t i;
       
  4638                     binmap_t leftbits =
       
  4639                         (smallbits << idx) & left_bits(idx2bit(idx));
       
  4640                     binmap_t leastbit = least_bit(leftbits);
       
  4641                     compute_bit2idx(leastbit, i);
       
  4642                     b = smallbin_at(ms, i);
       
  4643                     p = b->fd;
       
  4644                     assert(chunksize(p) == small_index2size(i));
       
  4645                     unlink_first_small_chunk(ms, b, p, i);
       
  4646                     rsize = small_index2size(i) - nb;
       
  4647                     /* Fit here cannot be remainderless if 4byte sizes */
       
  4648                     if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
       
  4649                         set_inuse_and_pinuse(ms, p, small_index2size(i));
       
  4650                     else {
       
  4651                         set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
       
  4652                         r = chunk_plus_offset(p, nb);
       
  4653                         set_size_and_pinuse_of_free_chunk(r, rsize);
       
  4654                         replace_dv(ms, r, rsize);
       
  4655                     }
       
  4656                     mem = chunk2mem(p);
       
  4657                     check_malloced_chunk(ms, mem, nb);
       
  4658                     goto postaction;
       
  4659                 }
       
  4660 
       
  4661                 else if (ms->treemap != 0
       
  4662                          && (mem = tmalloc_small(ms, nb)) != 0) {
       
  4663                     check_malloced_chunk(ms, mem, nb);
       
  4664                     goto postaction;
       
  4665                 }
       
  4666             }
       
  4667         } else if (bytes >= MAX_REQUEST)
       
  4668             nb = MAX_SIZE_T;    /* Too big to allocate. Force failure (in sys alloc) */
       
  4669         else {
       
  4670             nb = pad_request(bytes);
       
  4671             if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
       
  4672                 check_malloced_chunk(ms, mem, nb);
       
  4673                 goto postaction;
       
  4674             }
       
  4675         }
       
  4676 
       
  4677         if (nb <= ms->dvsize) {
       
  4678             size_t rsize = ms->dvsize - nb;
       
  4679             mchunkptr p = ms->dv;
       
  4680             if (rsize >= MIN_CHUNK_SIZE) {      /* split dv */
       
  4681                 mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
       
  4682                 ms->dvsize = rsize;
       
  4683                 set_size_and_pinuse_of_free_chunk(r, rsize);
       
  4684                 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
       
  4685             } else {            /* exhaust dv */
       
  4686                 size_t dvs = ms->dvsize;
       
  4687                 ms->dvsize = 0;
       
  4688                 ms->dv = 0;
       
  4689                 set_inuse_and_pinuse(ms, p, dvs);
       
  4690             }
       
  4691             mem = chunk2mem(p);
       
  4692             check_malloced_chunk(ms, mem, nb);
       
  4693             goto postaction;
       
  4694         }
       
  4695 
       
  4696         else if (nb < ms->topsize) {    /* Split top */
       
  4697             size_t rsize = ms->topsize -= nb;
       
  4698             mchunkptr p = ms->top;
       
  4699             mchunkptr r = ms->top = chunk_plus_offset(p, nb);
       
  4700             r->head = rsize | PINUSE_BIT;
       
  4701             set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
       
  4702             mem = chunk2mem(p);
       
  4703             check_top_chunk(ms, ms->top);
       
  4704             check_malloced_chunk(ms, mem, nb);
       
  4705             goto postaction;
       
  4706         }
       
  4707 
       
  4708         mem = sys_alloc(ms, nb);
       
  4709 
       
  4710       postaction:
       
  4711         POSTACTION(ms);
       
  4712         return mem;
       
  4713     }
       
  4714 
  4500     return 0;
  4715     return 0;
  4501   }
  4716 }
  4502   if (!PREACTION(ms)) {
  4717 
  4503     void* mem;
  4718 void
  4504     size_t nb;
  4719 mspace_free(mspace msp, void *mem)
  4505     if (bytes <= MAX_SMALL_REQUEST) {
  4720 {
  4506       bindex_t idx;
  4721     if (mem != 0) {
  4507       binmap_t smallbits;
  4722         mchunkptr p = mem2chunk(mem);
  4508       nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
  4723 #if FOOTERS
  4509       idx = small_index(nb);
  4724         mstate fm = get_mstate_for(p);
  4510       smallbits = ms->smallmap >> idx;
  4725 #else /* FOOTERS */
  4511 
  4726         mstate fm = (mstate) msp;
  4512       if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
  4727 #endif /* FOOTERS */
  4513         mchunkptr b, p;
  4728         if (!ok_magic(fm)) {
  4514         idx += ~smallbits & 1;       /* Uses next bin if idx empty */
  4729             USAGE_ERROR_ACTION(fm, p);
  4515         b = smallbin_at(ms, idx);
  4730             return;
  4516         p = b->fd;
       
  4517         assert(chunksize(p) == small_index2size(idx));
       
  4518         unlink_first_small_chunk(ms, b, p, idx);
       
  4519         set_inuse_and_pinuse(ms, p, small_index2size(idx));
       
  4520         mem = chunk2mem(p);
       
  4521         check_malloced_chunk(ms, mem, nb);
       
  4522         goto postaction;
       
  4523       }
       
  4524 
       
  4525       else if (nb > ms->dvsize) {
       
  4526         if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
       
  4527           mchunkptr b, p, r;
       
  4528           size_t rsize;
       
  4529           bindex_t i;
       
  4530           binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
       
  4531           binmap_t leastbit = least_bit(leftbits);
       
  4532           compute_bit2idx(leastbit, i);
       
  4533           b = smallbin_at(ms, i);
       
  4534           p = b->fd;
       
  4535           assert(chunksize(p) == small_index2size(i));
       
  4536           unlink_first_small_chunk(ms, b, p, i);
       
  4537           rsize = small_index2size(i) - nb;
       
  4538           /* Fit here cannot be remainderless if 4byte sizes */
       
  4539           if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
       
  4540             set_inuse_and_pinuse(ms, p, small_index2size(i));
       
  4541           else {
       
  4542             set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
       
  4543             r = chunk_plus_offset(p, nb);
       
  4544             set_size_and_pinuse_of_free_chunk(r, rsize);
       
  4545             replace_dv(ms, r, rsize);
       
  4546           }
       
  4547           mem = chunk2mem(p);
       
  4548           check_malloced_chunk(ms, mem, nb);
       
  4549           goto postaction;
       
  4550         }
  4731         }
  4551 
  4732         if (!PREACTION(fm)) {
  4552         else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
  4733             check_inuse_chunk(fm, p);
  4553           check_malloced_chunk(ms, mem, nb);
  4734             if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
  4554           goto postaction;
  4735                 size_t psize = chunksize(p);
       
  4736                 mchunkptr next = chunk_plus_offset(p, psize);
       
  4737                 if (!pinuse(p)) {
       
  4738                     size_t prevsize = p->prev_foot;
       
  4739                     if ((prevsize & IS_MMAPPED_BIT) != 0) {
       
  4740                         prevsize &= ~IS_MMAPPED_BIT;
       
  4741                         psize += prevsize + MMAP_FOOT_PAD;
       
  4742                         if (CALL_MUNMAP((char *) p - prevsize, psize) == 0)
       
  4743                             fm->footprint -= psize;
       
  4744                         goto postaction;
       
  4745                     } else {
       
  4746                         mchunkptr prev = chunk_minus_offset(p, prevsize);
       
  4747                         psize += prevsize;
       
  4748                         p = prev;
       
  4749                         if (RTCHECK(ok_address(fm, prev))) {    /* consolidate backward */
       
  4750                             if (p != fm->dv) {
       
  4751                                 unlink_chunk(fm, p, prevsize);
       
  4752                             } else if ((next->head & INUSE_BITS) ==
       
  4753                                        INUSE_BITS) {
       
  4754                                 fm->dvsize = psize;
       
  4755                                 set_free_with_pinuse(p, psize, next);
       
  4756                                 goto postaction;
       
  4757                             }
       
  4758                         } else
       
  4759                             goto erroraction;
       
  4760                     }
       
  4761                 }
       
  4762 
       
  4763                 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
       
  4764                     if (!cinuse(next)) {        /* consolidate forward */
       
  4765                         if (next == fm->top) {
       
  4766                             size_t tsize = fm->topsize += psize;
       
  4767                             fm->top = p;
       
  4768                             p->head = tsize | PINUSE_BIT;
       
  4769                             if (p == fm->dv) {
       
  4770                                 fm->dv = 0;
       
  4771                                 fm->dvsize = 0;
       
  4772                             }
       
  4773                             if (should_trim(fm, tsize))
       
  4774                                 sys_trim(fm, 0);
       
  4775                             goto postaction;
       
  4776                         } else if (next == fm->dv) {
       
  4777                             size_t dsize = fm->dvsize += psize;
       
  4778                             fm->dv = p;
       
  4779                             set_size_and_pinuse_of_free_chunk(p, dsize);
       
  4780                             goto postaction;
       
  4781                         } else {
       
  4782                             size_t nsize = chunksize(next);
       
  4783                             psize += nsize;
       
  4784                             unlink_chunk(fm, next, nsize);
       
  4785                             set_size_and_pinuse_of_free_chunk(p, psize);
       
  4786                             if (p == fm->dv) {
       
  4787                                 fm->dvsize = psize;
       
  4788                                 goto postaction;
       
  4789                             }
       
  4790                         }
       
  4791                     } else
       
  4792                         set_free_with_pinuse(p, psize, next);
       
  4793                     insert_chunk(fm, p, psize);
       
  4794                     check_free_chunk(fm, p);
       
  4795                     goto postaction;
       
  4796                 }
       
  4797             }
       
  4798           erroraction:
       
  4799             USAGE_ERROR_ACTION(fm, p);
       
  4800           postaction:
       
  4801             POSTACTION(fm);
  4555         }
  4802         }
  4556       }
  4803     }
  4557     }
  4804 }
  4558     else if (bytes >= MAX_REQUEST)
  4805 
  4559       nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
  4806 void *
       
  4807 mspace_calloc(mspace msp, size_t n_elements, size_t elem_size)
       
  4808 {
       
  4809     void *mem;
       
  4810     size_t req = 0;
       
  4811     mstate ms = (mstate) msp;
       
  4812     if (!ok_magic(ms)) {
       
  4813         USAGE_ERROR_ACTION(ms, ms);
       
  4814         return 0;
       
  4815     }
       
  4816     if (n_elements != 0) {
       
  4817         req = n_elements * elem_size;
       
  4818         if (((n_elements | elem_size) & ~(size_t) 0xffff) &&
       
  4819             (req / n_elements != elem_size))
       
  4820             req = MAX_SIZE_T;   /* force downstream failure on overflow */
       
  4821     }
       
  4822     mem = internal_malloc(ms, req);
       
  4823     if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
       
  4824         memset(mem, 0, req);
       
  4825     return mem;
       
  4826 }
       
  4827 
       
  4828 void *
       
  4829 mspace_realloc(mspace msp, void *oldmem, size_t bytes)
       
  4830 {
       
  4831     if (oldmem == 0)
       
  4832         return mspace_malloc(msp, bytes);
       
  4833 #ifdef REALLOC_ZERO_BYTES_FREES
       
  4834     if (bytes == 0) {
       
  4835         mspace_free(msp, oldmem);
       
  4836         return 0;
       
  4837     }
       
  4838 #endif /* REALLOC_ZERO_BYTES_FREES */
  4560     else {
  4839     else {
  4561       nb = pad_request(bytes);
       
  4562       if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
       
  4563         check_malloced_chunk(ms, mem, nb);
       
  4564         goto postaction;
       
  4565       }
       
  4566     }
       
  4567 
       
  4568     if (nb <= ms->dvsize) {
       
  4569       size_t rsize = ms->dvsize - nb;
       
  4570       mchunkptr p = ms->dv;
       
  4571       if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
       
  4572         mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
       
  4573         ms->dvsize = rsize;
       
  4574         set_size_and_pinuse_of_free_chunk(r, rsize);
       
  4575         set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
       
  4576       }
       
  4577       else { /* exhaust dv */
       
  4578         size_t dvs = ms->dvsize;
       
  4579         ms->dvsize = 0;
       
  4580         ms->dv = 0;
       
  4581         set_inuse_and_pinuse(ms, p, dvs);
       
  4582       }
       
  4583       mem = chunk2mem(p);
       
  4584       check_malloced_chunk(ms, mem, nb);
       
  4585       goto postaction;
       
  4586     }
       
  4587 
       
  4588     else if (nb < ms->topsize) { /* Split top */
       
  4589       size_t rsize = ms->topsize -= nb;
       
  4590       mchunkptr p = ms->top;
       
  4591       mchunkptr r = ms->top = chunk_plus_offset(p, nb);
       
  4592       r->head = rsize | PINUSE_BIT;
       
  4593       set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
       
  4594       mem = chunk2mem(p);
       
  4595       check_top_chunk(ms, ms->top);
       
  4596       check_malloced_chunk(ms, mem, nb);
       
  4597       goto postaction;
       
  4598     }
       
  4599 
       
  4600     mem = sys_alloc(ms, nb);
       
  4601 
       
  4602   postaction:
       
  4603     POSTACTION(ms);
       
  4604     return mem;
       
  4605   }
       
  4606 
       
  4607   return 0;
       
  4608 }
       
  4609 
       
  4610 void mspace_free(mspace msp, void* mem) {
       
  4611   if (mem != 0) {
       
  4612     mchunkptr p  = mem2chunk(mem);
       
  4613 #if FOOTERS
  4840 #if FOOTERS
  4614     mstate fm = get_mstate_for(p);
  4841         mchunkptr p = mem2chunk(oldmem);
       
  4842         mstate ms = get_mstate_for(p);
  4615 #else /* FOOTERS */
  4843 #else /* FOOTERS */
  4616     mstate fm = (mstate)msp;
  4844         mstate ms = (mstate) msp;
  4617 #endif /* FOOTERS */
  4845 #endif /* FOOTERS */
  4618     if (!ok_magic(fm)) {
  4846         if (!ok_magic(ms)) {
  4619       USAGE_ERROR_ACTION(fm, p);
  4847             USAGE_ERROR_ACTION(ms, ms);
  4620       return;
  4848             return 0;
  4621     }
       
  4622     if (!PREACTION(fm)) {
       
  4623       check_inuse_chunk(fm, p);
       
  4624       if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
       
  4625         size_t psize = chunksize(p);
       
  4626         mchunkptr next = chunk_plus_offset(p, psize);
       
  4627         if (!pinuse(p)) {
       
  4628           size_t prevsize = p->prev_foot;
       
  4629           if ((prevsize & IS_MMAPPED_BIT) != 0) {
       
  4630             prevsize &= ~IS_MMAPPED_BIT;
       
  4631             psize += prevsize + MMAP_FOOT_PAD;
       
  4632             if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
       
  4633               fm->footprint -= psize;
       
  4634             goto postaction;
       
  4635           }
       
  4636           else {
       
  4637             mchunkptr prev = chunk_minus_offset(p, prevsize);
       
  4638             psize += prevsize;
       
  4639             p = prev;
       
  4640             if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
       
  4641               if (p != fm->dv) {
       
  4642                 unlink_chunk(fm, p, prevsize);
       
  4643               }
       
  4644               else if ((next->head & INUSE_BITS) == INUSE_BITS) {
       
  4645                 fm->dvsize = psize;
       
  4646                 set_free_with_pinuse(p, psize, next);
       
  4647                 goto postaction;
       
  4648               }
       
  4649             }
       
  4650             else
       
  4651               goto erroraction;
       
  4652           }
       
  4653         }
  4849         }
  4654 
  4850         return internal_realloc(ms, oldmem, bytes);
  4655         if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
  4851     }
  4656           if (!cinuse(next)) {  /* consolidate forward */
  4852 }
  4657             if (next == fm->top) {
  4853 
  4658               size_t tsize = fm->topsize += psize;
  4854 void *
  4659               fm->top = p;
  4855 mspace_memalign(mspace msp, size_t alignment, size_t bytes)
  4660               p->head = tsize | PINUSE_BIT;
  4856 {
  4661               if (p == fm->dv) {
  4857     mstate ms = (mstate) msp;