free

Reading time: 14 minutes

tip

学习和实践 AWS 黑客技术:HackTricks Training AWS Red Team Expert (ARTE)
学习和实践 GCP 黑客技术:HackTricks Training GCP Red Team Expert (GRTE) 学习和实践 Azure 黑客技术:HackTricks Training Azure Red Team Expert (AzRTE)

支持 HackTricks

Free 操作顺序摘要

(此摘要未解释检查,且为简洁起见省略了部分情况)

  1. 如果地址为 null 则不做任何操作
  2. 如果 chunk 是 mmaped 的,munmap 它并结束
  3. 调用 _int_free
  4. 如果可能,将 chunk 放入 tcache
  5. 如果可能,将 chunk 放入 fast bin
  6. 调用 _int_free_merge_chunk 在需要时合并 chunk 并将其加入 unsorted list

注意:从 glibc 2.42 开始,tcache 步骤也可以接受直到更大尺寸阈值的 chunk(参见下面的 “Recent glibc changes”)。这改变了 free 何时落入 tcache 与 unsorted/small/large bins 的判定。

__libc_free

Free 调用 __libc_free

  • 如果传入的地址为 Null (0),则不做任何操作。
  • 检查指针标签
  • 如果 chunk 被 mmaped,则对其执行 munmap,然后结束
  • 如果不是,则添加颜色并对其调用 _int_free
__lib_free 代码
c
void
__libc_free (void *mem)
{
mstate ar_ptr;
mchunkptr p;                          /* chunk corresponding to mem */

if (mem == 0)                              /* free(0) has no effect */
return;

/* Quickly check that the freed pointer matches the tag for the memory.
This gives a useful double-free detection.  */
if (__glibc_unlikely (mtag_enabled))
*(volatile char *)mem;

int err = errno;

p = mem2chunk (mem);

if (chunk_is_mmapped (p))                       /* release mmapped memory. */
{
/* See if the dynamic brk/mmap threshold needs adjusting.
Dumped fake mmapped chunks do not affect the threshold.  */
if (!mp_.no_dyn_threshold
&& chunksize_nomask (p) > mp_.mmap_threshold
&& chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX)
{
mp_.mmap_threshold = chunksize (p);
mp_.trim_threshold = 2 * mp_.mmap_threshold;
LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2,
mp_.mmap_threshold, mp_.trim_threshold);
}
munmap_chunk (p);
}
else
{
MAYBE_INIT_TCACHE ();

/* Mark the chunk as belonging to the library again.  */
(void)tag_region (chunk2mem (p), memsize (p));

ar_ptr = arena_for_chunk (p);
_int_free (ar_ptr, p, 0);
}

__set_errno (err);
}
libc_hidden_def (__libc_free)

_int_free

_int_free 开始

它首先进行一些检查以确保:

  • pointeraligned, 否则触发错误 free(): invalid pointer
  • size 不应小于最小值,且 size 也应为 aligned,否则触发错误:free(): invalid size
_int_free 开始
c
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L4493C1-L4513C28

#define aligned_OK(m) (((unsigned long) (m) &MALLOC_ALIGN_MASK) == 0)

static void
_int_free (mstate av, mchunkptr p, int have_lock)
{
INTERNAL_SIZE_T size;        /* its size */
mfastbinptr *fb;             /* associated fastbin */

size = chunksize (p);

/* Little security check which won't hurt performance: the
allocator never wraps around at the end of the address space.
Therefore we can exclude some size values which might appear
here by accident or by "design" from some intruder.  */
if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
|| __builtin_expect (misaligned_chunk (p), 0))
malloc_printerr ("free(): invalid pointer");
/* We know that each chunk is at least MINSIZE bytes in size or a
multiple of MALLOC_ALIGNMENT.  */
if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size)))
malloc_printerr ("free(): invalid size");

check_inuse_chunk(av, p);

_int_free tcache

它首先会尝试在相关的 tcache 中为该 chunk 分配空间。但是,在此之前会执行一些检查。它会遍历 tcache 中与被 free 的 chunk 位于相同索引的所有 chunk,并执行以下检查:

  • 如果条目数超过 mp_.tcache_count: free(): too many chunks detected in tcache
  • 如果条目未对齐: free(): unaligned chunk detected in tcache 2
  • 如果该被 free 的 chunk 已经被释放并作为 chunk 存在于 tcache 中: free(): double free detected in tcache 2

如果一切正常,该 chunk 会被加入到 tcache 中,函数返回。

_int_free tcache
c
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L4515C1-L4554C7
#if USE_TCACHE
{
size_t tc_idx = csize2tidx (size);
if (tcache != NULL && tc_idx < mp_.tcache_bins)
{
/* Check to see if it's already in the tcache.  */
tcache_entry *e = (tcache_entry *) chunk2mem (p);

/* This test succeeds on double free.  However, we don't 100%
trust it (it also matches random payload data at a 1 in
2^<size_t> chance), so verify it's not an unlikely
coincidence before aborting.  */
if (__glibc_unlikely (e->key == tcache_key))
{
tcache_entry *tmp;
size_t cnt = 0;
LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx);
for (tmp = tcache->entries[tc_idx];
tmp;
tmp = REVEAL_PTR (tmp->next), ++cnt)
{
if (cnt >= mp_.tcache_count)
malloc_printerr ("free(): too many chunks detected in tcache");
if (__glibc_unlikely (!aligned_OK (tmp)))
malloc_printerr ("free(): unaligned chunk detected in tcache 2");
if (tmp == e)
malloc_printerr ("free(): double free detected in tcache 2");
/* If we get here, it was a coincidence.  We've wasted a
few cycles, but don't abort.  */
}
}

if (tcache->counts[tc_idx] < mp_.tcache_count)
{
tcache_put (p, tc_idx);
return;
}
}
}
#endif

_int_free fast bin

首先检查大小是否适合 fast bin,并确认是否可以将其设置得接近 top chunk。

然后,将 freed chunk 添加到 fast bin 的顶部,同时执行一些检查:

  • 如果 chunk 的大小无效(太大或太小),会触发:free(): invalid next size (fast)
  • 如果要添加的 chunk 已经是 fast bin 的顶部:double free or corruption (fasttop)
  • 如果顶部的 chunk 的大小与我们要添加的 chunk 不同:invalid fastbin entry (free)
_int_free Fast Bin
c
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L4556C2-L4631C4

/*
If eligible, place chunk on a fastbin so it can be found
and used quickly in malloc.
*/

if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())

#if TRIM_FASTBINS
/*
If TRIM_FASTBINS set, don't place chunks
bordering top into fastbins
*/
&& (chunk_at_offset(p, size) != av->top)
#endif
) {

if (__builtin_expect (chunksize_nomask (chunk_at_offset (p, size))
<= CHUNK_HDR_SZ, 0)
|| __builtin_expect (chunksize (chunk_at_offset (p, size))
>= av->system_mem, 0))
{
bool fail = true;
/* We might not have a lock at this point and concurrent modifications
of system_mem might result in a false positive.  Redo the test after
getting the lock.  */
if (!have_lock)
{
__libc_lock_lock (av->mutex);
fail = (chunksize_nomask (chunk_at_offset (p, size)) <= CHUNK_HDR_SZ
|| chunksize (chunk_at_offset (p, size)) >= av->system_mem);
__libc_lock_unlock (av->mutex);
}

if (fail)
malloc_printerr ("free(): invalid next size (fast)");
}

free_perturb (chunk2mem(p), size - CHUNK_HDR_SZ);

atomic_store_relaxed (&av->have_fastchunks, true);
unsigned int idx = fastbin_index(size);
fb = &fastbin (av, idx);

/* Atomically link P to its fastbin: P->FD = *FB; *FB = P;  */
mchunkptr old = *fb, old2;

if (SINGLE_THREAD_P)
{
/* Check that the top of the bin is not the record we are going to
add (i.e., double free).  */
if (__builtin_expect (old == p, 0))
malloc_printerr ("double free or corruption (fasttop)");
p->fd = PROTECT_PTR (&p->fd, old);
*fb = p;
}
else
do
{
/* Check that the top of the bin is not the record we are going to
add (i.e., double free).  */
if (__builtin_expect (old == p, 0))
malloc_printerr ("double free or corruption (fasttop)");
old2 = old;
p->fd = PROTECT_PTR (&p->fd, old);
}
while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2))
!= old2);

/* Check that size of fastbin chunk at the top is the same as
size of the chunk that we are adding.  We can dereference OLD
only if we have the lock, otherwise it might have already been
allocated again.  */
if (have_lock && old != NULL
&& __builtin_expect (fastbin_index (chunksize (old)) != idx, 0))
malloc_printerr ("invalid fastbin entry (free)");
}

_int_free 结尾

如果该 chunk 尚未在任何 bin 上被分配,调用 _int_free_merge_chunk

_int_free 结尾
c
/*
Consolidate other non-mmapped chunks as they arrive.
*/

else if (!chunk_is_mmapped(p)) {

/* If we're single-threaded, don't lock the arena.  */
if (SINGLE_THREAD_P)
have_lock = true;

if (!have_lock)
__libc_lock_lock (av->mutex);

_int_free_merge_chunk (av, p, size);

if (!have_lock)
__libc_lock_unlock (av->mutex);
}
/*
If the chunk was allocated via mmap, release via munmap().
*/

else {
munmap_chunk (p);
}
}

_int_free_merge_chunk

该函数会尝试将大小为 SIZE 字节的 chunk P 与其相邻的 chunk 合并。将合并后的 chunk 放入 unsorted bin 列表。

执行了一些检查:

  • 如果该 chunk 是 top chunk: double free or corruption (top)
  • 如果下一个 chunk 超出 arena 的边界: double free or corruption (out)
  • 如果该 chunk 没有被标记为已使用(在后一个 chunk 的 prev_inuse 中): double free or corruption (!prev)
  • 如果下一个 chunk 的 size 过小或过大: free(): invalid next size (normal)
  • 如果前一个 chunk 未被使用,将尝试合并。但如果 prev_size 与前一个 chunk 中指示的大小不一致: corrupted size vs. prev_size while consolidating
_int_free_merge_chunk code
c
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L4660C1-L4702C2

/* Try to merge chunk P of SIZE bytes with its neighbors.  Put the
resulting chunk on the appropriate bin list.  P must not be on a
bin list yet, and it can be in use.  */
static void
_int_free_merge_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size)
{
mchunkptr nextchunk = chunk_at_offset(p, size);

/* Lightweight tests: check whether the block is already the
top block.  */
if (__glibc_unlikely (p == av->top))
malloc_printerr ("double free or corruption (top)");
/* Or whether the next chunk is beyond the boundaries of the arena.  */
if (__builtin_expect (contiguous (av)
&& (char *) nextchunk
>= ((char *) av->top + chunksize(av->top)), 0))
malloc_printerr ("double free or corruption (out)");
/* Or whether the block is actually not marked used.  */
if (__glibc_unlikely (!prev_inuse(nextchunk)))
malloc_printerr ("double free or corruption (!prev)");

INTERNAL_SIZE_T nextsize = chunksize(nextchunk);
if (__builtin_expect (chunksize_nomask (nextchunk) <= CHUNK_HDR_SZ, 0)
|| __builtin_expect (nextsize >= av->system_mem, 0))
malloc_printerr ("free(): invalid next size (normal)");

free_perturb (chunk2mem(p), size - CHUNK_HDR_SZ);

/* Consolidate backward.  */
if (!prev_inuse(p))
{
INTERNAL_SIZE_T prevsize = prev_size (p);
size += prevsize;
p = chunk_at_offset(p, -((long) prevsize));
if (__glibc_unlikely (chunksize(p) != prevsize))
malloc_printerr ("corrupted size vs. prev_size while consolidating");
unlink_chunk (av, p);
}

/* Write the chunk header, maybe after merging with the following chunk.  */
size = _int_free_create_chunk (av, p, size, nextchunk, nextsize);
_int_free_maybe_consolidate (av, size);
}

攻击者笔记与近期变更 (2023–2025)

  • tcache/fastbins 中的 Safe-Linking: free() 使用宏 PROTECT_PTR(pos, ptr) = ((size_t)pos >> 12) ^ (size_t)ptr 来存储单链表的 fd 指针。这意味着为 tcache poisoning 构造伪造的下一个指针需要攻击者知道一个堆地址(例如,leak chunk_addr,然后使用 chunk_addr >> 12 作为 XOR 密钥)。更多细节和 PoCs 请参见下面的 tcache 页面。
  • Tcache double-free 检测: 在将 chunk 推入 tcache 之前,free() 会将每个条目的 e->key 与线程私有的 tcache_key 进行比较,并在 bin 中遍历最多 mp_.tcache_count 个条目以查找重复;发现重复时会以 free(): double free detected in tcache 2 中止。
  • 近期 glibc 更改(2.42):tcache 的可接受 chunk 大小增加,可通过新的可调参数 glibc.malloc.tcache_max_bytes 控制。free() 现在会尝试缓存释放的 chunk 直到该字节上限(mmapped 的 chunk 不会被缓存)。这在现代系统上减少了释放操作进入 unsorted/small/large bins 的频率。

快速构造一个 safe-linked fd(用于 tcache poisoning)

py
# Given a leaked heap pointer to an entry located at &entry->next == POS
# compute the protected fd that points to TARGET
protected_fd = TARGET ^ (POS >> 12)
  • 有关完整的 tcache poisoning walkthrough(以及在 safe-linking 下的限制),请参见:

Tcache Bin Attack

在研究时强制使 frees 命中 unsorted/small bins

有时你想在本地实验环境中完全绕过 tcache,以观察经典的 _int_free 行为(unsorted bin consolidation 等)。你可以通过设置 GLIBC_TUNABLES 来实现:

bash
# Disable tcache completely
GLIBC_TUNABLES=glibc.malloc.tcache_count=0 ./vuln

# Pre-2.42: shrink the maximum cached request size to 0
GLIBC_TUNABLES=glibc.malloc.tcache_max=0 ./vuln

# 2.42+: cap the new large-cache threshold (bytes)
GLIBC_TUNABLES=glibc.malloc.tcache_max_bytes=0 ./vuln

HackTricks 相关阅读:

  • First-fit/unsorted 行为与 overlap tricks:

First Fit

  • Double-free 原语与现代检查:

Double Free

关于 hooks 的提醒:Classic __malloc_hook/__free_hook overwrite techniques 在现代 glibc (≥ 2.34) 中不可行。如果你在旧的 write-ups 中仍然看到它们,请改用其他目标(IO_FILE、exit handlers、vtables 等)。如需背景资料,请查阅 HackTricks 上关于 hooks 的页面。

WWW2Exec - __malloc_hook & __free_hook

参考资料

tip

学习和实践 AWS 黑客技术:HackTricks Training AWS Red Team Expert (ARTE)
学习和实践 GCP 黑客技术:HackTricks Training GCP Red Team Expert (GRTE) 学习和实践 Azure 黑客技术:HackTricks Training Azure Red Team Expert (AzRTE)

支持 HackTricks