|
|
@ -938,10 +938,10 @@ void **nedpindependent_comalloc(nedpool *p, size_t elems, size_t *sizes, void ** |
|
|
|
void **ret; |
|
|
|
void **ret; |
|
|
|
threadcache *tc; |
|
|
|
threadcache *tc; |
|
|
|
int mymspace; |
|
|
|
int mymspace; |
|
|
|
size_t i, *adjustedsizes=(size_t *) alloca(elems*sizeof(size_t)); |
|
|
|
size_t i, *adjustedsizes=(size_t *) alloca(elems*sizeof(size_t)); |
|
|
|
if(!adjustedsizes) return 0; |
|
|
|
if(!adjustedsizes) return 0; |
|
|
|
for(i=0; i<elems; i++) |
|
|
|
for(i=0; i<elems; i++) |
|
|
|
adjustedsizes[i]=sizes[i]<sizeof(threadcacheblk) ? sizeof(threadcacheblk) : sizes[i]; |
|
|
|
adjustedsizes[i]=sizes[i]<sizeof(threadcacheblk) ? sizeof(threadcacheblk) : sizes[i]; |
|
|
|
GetThreadCache(&p, &tc, &mymspace, 0); |
|
|
|
GetThreadCache(&p, &tc, &mymspace, 0); |
|
|
|
GETMSPACE(m, p, tc, mymspace, 0, |
|
|
|
GETMSPACE(m, p, tc, mymspace, 0, |
|
|
|
ret=mspace_independent_comalloc(m, elems, adjustedsizes, chunks)); |
|
|
|
ret=mspace_independent_comalloc(m, elems, adjustedsizes, chunks)); |
|
|
@ -955,12 +955,11 @@ void **nedpindependent_comalloc(nedpool *p, size_t elems, size_t *sizes, void ** |
|
|
|
*/ |
|
|
|
*/ |
|
|
|
char *strdup(const char *s1) |
|
|
|
char *strdup(const char *s1) |
|
|
|
{ |
|
|
|
{ |
|
|
|
char *s2 = 0; |
|
|
|
size_t len = strlen(s1) + 1; |
|
|
|
if (s1) { |
|
|
|
char *s2 = malloc(len); |
|
|
|
size_t len = strlen(s1) + 1; |
|
|
|
|
|
|
|
s2 = malloc(len); |
|
|
|
if (s2) |
|
|
|
memcpy(s2, s1, len); |
|
|
|
memcpy(s2, s1, len); |
|
|
|
} |
|
|
|
|
|
|
|
return s2; |
|
|
|
return s2; |
|
|
|
} |
|
|
|
} |
|
|
|
#endif |
|
|
|
#endif |
|
|
|