关闭
Hit
enter
to search or
ESC
to close
May I Suggest ?
#leanote #leanote blog #code #hello world
Mutepig's Blog
Home
Archives
Tags
Search
About Me
Linux堆基础知识(3)
无
980
1
0
mut3p1g
最后就该看看内存分配的具体流程了。 <!--more--> ## 0x01 内存分配 内存分配的核心是`_int_malloc`,不过同样有个`__libc_malloc`来对锁进行一些处理,先从这个入手。 ### 1. __libc_malloc 这个函数主要就是获取分配区的锁,并且寻找一个可以分配空间的分配区。 ``` 3041 void * 3042 __libc_malloc (size_t bytes) 3043 { 3044 mstate ar_ptr; 3045 void *victim; 3046 3047 void *(*hook) (size_t, const void *) 3048 = atomic_forced_read (__malloc_hook); 3049 if (__builtin_expect (hook != NULL, 0)) 3050 return (*hook)(bytes, RETURN_ADDRESS (0)); 3051 #if USE_TCACHE 3052 /* int_free also calls request2size, be careful to not pad twice. */ 3053 size_t tbytes = request2size (bytes); 3054 size_t tc_idx = csize2tidx (tbytes); 3055 3056 MAYBE_INIT_TCACHE (); 3057 3058 DIAG_PUSH_NEEDS_COMMENT; 3059 if (tc_idx < mp_.tcache_bins 3060 /*&& tc_idx < TCACHE_MAX_BINS*/ /* to appease gcc */ 3061 && tcache 3062 && tcache->entries[tc_idx] != NULL) 3063 { 3064 return tcache_get (tc_idx); 3065 } 3066 DIAG_POP_NEEDS_COMMENT; 3067 #endif 3068 3069 arena_get (ar_ptr, bytes); //获得分配区的锁 3070 3071 victim = _int_malloc (ar_ptr, bytes); // 尝试进行分配 3072 /* Retry with another arena only if we were able to find a usable arena 3073 before. */ 3074 if (!victim && ar_ptr != NULL) // 如果分配失败了,则继续寻找可用的分配区 3075 { 3076 LIBC_PROBE (memory_malloc_retry, 1, bytes); 3077 ar_ptr = arena_get_retry (ar_ptr, bytes); 3078 victim = _int_malloc (ar_ptr, bytes); 3079 } 3080 3081 if (ar_ptr != NULL) 3082 __libc_lock_unlock (ar_ptr->mutex); 3083 3084 assert (!victim || chunk_is_mmapped (mem2chunk (victim)) || 3085 ar_ptr == arena_for_chunk (mem2chunk (victim))); 3086 return victim; 3087 } ``` ### 2. _int_malloc #### 1) 变量声明及初始化 ``` 3505 static void * 3506 _int_malloc (mstate av, size_t bytes) 3507 { 3508 INTERNAL_SIZE_T nb; /* normalized request size */ 3509 unsigned int idx; /* associated bin index */ 3510 mbinptr bin; /* associated bin */ 3511 3512 mchunkptr victim; /* inspected/selected chunk */ 3513 INTERNAL_SIZE_T size; /* its size */ 3514 int victim_index; /* its bin index */ 3515 3516 mchunkptr remainder; /* remainder from a split */ 3517 unsigned long remainder_size; /* its size */ 3518 3519 unsigned int block; /* bit map traverser */ 3520 unsigned int bit; /* bit map traverser */ 3521 unsigned int map; /* current word of binmap */ 3522 3523 mchunkptr fwd; /* misc temp for linking */ 3524 mchunkptr bck; /* misc temp for linking */ 3525 3526 #if USE_TCACHE 3527 size_t tcache_unsorted_count; /* count of unsorted chunks processed */ 3528 #endif 3529 3530 const char *errstr = NULL; 3531 3532 /* 3533 Convert request size to internal form by adding SIZE_SZ bytes 3534 overhead plus possibly more to obtain necessary alignment and/or 3535 to obtain a size of at least MINSIZE, the smallest allocatable 3536 size. Also, checked_request2size traps (returning 0) request sizes 3537 that are so large that they wrap around zero when padded and 3538 aligned. 3539 */ 3540 3541 checked_request2size (bytes, nb); // 将用户申请的大小bytes转为我们分配chunk的大小nb(对齐什么的),同时会判断是否大于-2*MINSIZE(这里的-2很重要T_T) 3542 3543 /* There are no usable arenas. Fall back to sysmalloc to get a chunk from 3544 mmap. */ 3545 if (__glibc_unlikely (av == NULL)) // 如果分配区为空,则调用sysmalloc用mmap分配空间 3546 { 3547 void *p = sysmalloc (nb, av); 3548 if (p != NULL) 3549 alloc_perturb (p, bytes); 3550 return p; 3551 } ``` #### 2) fastbin 判断`nb`是否属于`fastbin`的大小范围内,如果是则寻找对应的`fastbin`链表的第一个分配出去。 ``` 3553 /* 3554 If the size qualifies as a fastbin, first check corresponding bin. 3555 This code is safe to execute even if av is not yet initialized, so we 3556 can try it without checking, which saves some time on this fast path. 3557 */ 3558 3559 #define REMOVE_FB(fb, victim, pp) \ 3560 do \ 3561 { \ 3562 victim = pp; \ 3563 if (victim == NULL) \ 3564 break; \ 3565 } \ 3566 while ((pp = catomic_compare_and_exchange_val_acq (fb, victim->fd, victim)) \ 3567 != victim); \ 3568 3569 if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ())) // 如果nb在fastbin的范畴内 3570 { 3571 idx = fastbin_index (nb); 3572 mfastbinptr *fb = &fastbin (av, idx);// 获得nb对应的fastbin 3573 mchunkptr pp = *fb; 3574 REMOVE_FB (fb, victim, pp); // 将符合条件的第一个fastbin取出来分配 3575 if (victim != 0) // 取成功了 3576 { 3577 if (__builtin_expect (fastbin_index (chunksize (victim)) != idx, 0)) // 如果取出来的fastbin和nb对应的fastbin不是一个下标 3578 { 3579 errstr = "malloc(): memory corruption (fast)"; 3580 errout: 3581 malloc_printerr (check_action, errstr, chunk2mem (victim), av); 3582 return NULL; 3583 } 3584 check_remalloced_chunk (av, victim, nb); 3585 #if USE_TCACHE 3586 /* While we're here, if we see other chunks of the same size, 3587 stash them in the tcache. */ 3588 size_t tc_idx = csize2tidx (nb); 3589 if (tcache && tc_idx < mp_.tcache_bins) 3590 { 3591 mchunkptr tc_victim; 3592 3593 /* While bin not empty and tcache not full, copy chunks over. */ 3594 while (tcache->counts[tc_idx] < mp_.tcache_count 3595 && (pp = *fb) != NULL) 3596 { 3597 REMOVE_FB (fb, tc_victim, pp); 3598 if (tc_victim != 0) 3599 { 3600 tcache_put (tc_victim, tc_idx); 3601 } 3602 } 3603 } 3604 #endif 3605 void *p = chunk2mem (victim); // 将p由chunk转mem 3606 alloc_perturb (p, bytes); 3607 return p; 3608 } 3609 } ``` #### 3) small bin 判断`nb`是否属于`smallbin`的大小范围内,如果是则寻找对应的`smallbin`链表的最后一个分配出去。 ``` 3611 /* 3612 If a small request, check regular bin. Since these "smallbins" 3613 hold one size each, no searching within bins is necessary. 3614 (For a large request, we need to wait until unsorted chunks are 3615 processed to find best fit. But for small ones, fits are exact 3616 anyway, so we can check now, which is faster.) 3617 */ 3618 3619 if (in_smallbin_range (nb)) // 如果在smallbin的大小范围内 3620 { 3621 idx = smallbin_index (nb); 3622 bin = bin_at (av, idx); // 找到对应的bins 3623 3624 if ((victim = last (bin)) != bin) // 如果 first(bin)==last(bin) 表明链表为空,所以这里表示链表不为空的情况 3625 { 3626 if (victim == 0) /* initialization check */ // 如果smallbin还未初始化,则调用malloc_consolidate合并fastbin 3627 malloc_consolidate (av); 3628 else 3629 { 3630 bck = victim->bk; // 取出倒数第二个bin 3631 if (__glibc_unlikely (bck->fd != victim)) // 即victim->bk->fd!=victim,就会报doublefree 3632 { 3633 errstr = "malloc(): smallbin double linked list corrupted"; 3634 goto errout; 3635 } 3636 set_inuse_bit_at_offset (victim, nb); // 设置victim在使用中 3637 bin->bk = bck; // 将最后一个从链表中取出来 3638 bck->fd = bin; 3639 3640 if (av != &main_arena) // 根据分配区是否为主分配区设置相关标志位 3641 set_non_main_arena (victim); 3642 check_malloced_chunk (av, victim, nb); 3643 #if USE_TCACHE 3644 /* While we're here, if we see other chunks of the same size, 3645 stash them in the tcache. */ 3646 size_t tc_idx = csize2tidx (nb); 3647 if (tcache && tc_idx < mp_.tcache_bins) 3648 { 3649 mchunkptr tc_victim; 3650 3651 /* While bin not empty and tcache not full, copy chunks over. */ 3652 while (tcache->counts[tc_idx] < mp_.tcache_count 3653 && (tc_victim = last (bin)) != bin) 3654 { 3655 if (tc_victim != 0) 3656 { 3657 bck = tc_victim->bk; 3658 set_inuse_bit_at_offset (tc_victim, nb); 3659 if (av != &main_arena) 3660 set_non_main_arena (tc_victim); 3661 bin->bk = bck; 3662 bck->fd = bin; 3663 3664 tcache_put (tc_victim, tc_idx); 3665 } 3666 } 3667 } 3668 #endif 3669 void *p = chunk2mem (victim); // chunk转mem并返回 3670 alloc_perturb (p, bytes); 3671 return p; 3672 } 3673 } 3674 } ``` #### 4) unsorted bin ##### a. consolidate fastbin 如果当前分配区存在`fastbin`,则直接调用`malloc_consolidate`进行合并。 ``` 3676 /* 3677 If this is a large request, consolidate fastbins before continuing. 3678 While it might look excessive to kill all fastbins before 3679 even seeing if there is space available, this avoids 3680 fragmentation problems normally associated with fastbins. 3681 Also, in practice, programs tend to have runs of either small or 3682 large requests, but less often mixtures, so consolidation is not 3683 invoked all that often in most programs. And the programs that 3684 it is called frequently in otherwise tend to fragment. 3685 */ 3686 3687 else 3688 { 3689 idx = largebin_index (nb); 3690 if (have_fastchunks (av)) 3691 malloc_consolidate (av); 3692 } ``` ##### b. 反向遍历unsorted bin 首先是反向遍历`unsorted bin`,从最后一个取作`victim`用于后面的操作,如果不会直接分配则将其从`unsorted bin`中剔除,之后加入对应的`bins`。 如果`victim`的大小与需求的大小完全相同,则直接将`victim`分配,并且不再遍历`unsorted bin`了。 ``` 3717 for (;; ) 3718 { 3719 int iters = 0; 3720 while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av)) // victim为unsortedbin的最后一个chunk,不等于头部表示unsortedbin不为空 3721 { 3722 bck = victim->bk; // 上一个块 3723 if (__builtin_expect (chunksize_nomask (victim) <= 2 * SIZE_SZ, 0) 3724 || __builtin_expect (chunksize_nomask (victim) 3725 > av->system_mem, 0)) // victim的大小要符合要求 3726 malloc_printerr (check_action, "malloc(): memory corruption", 3727 chunk2mem (victim), av); 3728 size = chunksize (victim); // 获得大小 3765 ..... 3766 /* remove from unsorted list */ 3767 unsorted_chunks (av)->bk = bck; // 从unsorted bin中剔除 3768 bck->fd = unsorted_chunks (av); 3769 3770 /* Take now instead of binning if exact fit */ 3771 3772 if (size == nb) // 如果victim大小这和需求完全相同 3773 { 3774 set_inuse_bit_at_offset (victim, size); // 设置下个chunk的PREV_INUSE 3775 if (av != &main_arena) // 如果是非主分配区,设置相关flag 3776 set_non_main_arena (victim); 3777 #if USE_TCACHE 3778 /* Fill cache first, return to user only if cache fills. 3779 We may return one of these chunks later. */ 3780 if (tcache_nb 3781 && tcache->counts[tc_idx] < mp_.tcache_count) 3782 { 3783 tcache_put (victim, tc_idx); 3784 return_cached = 1; 3785 continue; 3786 } 3787 else 3788 { 3789 #endif 3790 check_malloced_chunk (av, victim, nb); 3791 void *p = chunk2mem (victim); // 直接分配 3792 alloc_perturb (p, bytes); 3793 return p; 3794 #if USE_TCACHE 3795 } 3796 #endif 3797 } ``` ##### c. 直接分配victim 直接分配`victim`的条件有以下四个: 1. 需要分配的是一个`smallbin` 2. `unsorted bin`中只有一个`chunk` 3. `victim`是`last remainder chunk` 4. `victim`的`size`超过需求大小加上`MINSIZE` ``` 3738 if (in_smallbin_range (nb) && 3739 bck == unsorted_chunks (av) && 3740 victim == av->last_remainder && 3741 (unsigned long) (size) > (unsigned long) (nb + MINSIZE)) 3742 { 3743 /* split and reattach remainder */ 3744 remainder_size = size - nb; //剩余大小 3745 remainder = chunk_at_offset (victim, nb); // 剩下的chunk 3746 unsorted_chunks (av)->bk = unsorted_chunks (av)->fd = remainder; // 由于条件就是unsorted bin中只剩这一块了,所以将剩下的chunk放入unsorted bin中 3747 av->last_remainder = remainder; // 剩下的chunk同时也看作last remainder 3748 remainder->bk = remainder->fd = unsorted_chunks (av); 3749 if (!in_smallbin_range (remainder_size)) // 如果剩下的chunk为largebin,则清空其chunk size链表,因为它只存在于unsortedbin中 3750 { 3751 remainder->fd_nextsize = NULL; 3752 remainder->bk_nextsize = NULL; 3753 } 3754 // 设置相关标志 3755 set_head (victim, nb | PREV_INUSE | 3756 (av != &main_arena ? NON_MAIN_ARENA : 0)); 3757 set_head (remainder, remainder_size | PREV_INUSE); 3758 set_foot (remainder, remainder_size); 3759 3760 check_malloced_chunk (av, victim, nb); 3761 void *p = chunk2mem (victim); 3762 alloc_perturb (p, bytes); 3763 return p; 3764 } ``` ##### d. 将victim加入bins * 加入smallbins ``` 3799 /* place chunk in bin */ 3800 3801 if (in_smallbin_range (size)) // 如果属于smallbin 3802 { 3803 victim_index = smallbin_index (size); // 获得对应smallbin的大小 3804 bck = bin_at (av, victim_index); // smallbin的链表头 3805 fwd = bck->fd; // smallbin的第一个chunk 3806 } ``` * 加入largebins 这里首先需要说明一下几个需要注意的地方: 1. `largebin`的链表拥有一个范围的`bins`,并且是从大到小排的,所以最后一个`bin`一定是最小的。 2. 如果是相同大小的,先释放的放在前面,即从后面往前面找第一个大于等于自己的`bin`。 3. 由于`largebin`的链表中每个`bin`大小不一定相同,所以`fd_nextsize`和`bk_nextsize`记录了自己前一个和后一个与自己不同大小的`bin`的地址(只会记录同一链表上的关系,如果链表上只有一个`bin`的话则其两个`nextsize`都指向自己)。如果存在相同大小的`bin`,那么只有最前面的一个`bin`会存在这两个值,后面与之大小相同的`bin`该值都为0。 ``` 3807 else 3808 { 3809 victim_index = largebin_index (size); // 获得对应largebin的大小 3810 bck = bin_at (av, victim_index); // largebin的链表头,如果链表为空的话则仍然为为链表头本身 3811 fwd = bck->fd; // largebin的第一个chunk 3812 3813 /* maintain large bins in sorted order */ 3814 if (fwd != bck) // 如果链表不为空,这里应该先看最下面如果为空的情况 3815 { 3816 /* Or with inuse bit to speed comparisons */ 3817 size |= PREV_INUSE; // 上一块在使用中 3818 /* if smaller than smallest, bypass loop below */ 3819 assert (chunk_main_arena (bck->bk)); 3820 if ((unsigned long) (size) 3821 < (unsigned long) chunksize_nomask (bck->bk)) // 如果victim比largebin中最小的还小,那么直接加入到最后 3822 { 3823 fwd = bck; // 链表头 3824 bck = bck->bk; // 最后一个bin 3825 3826 victim->fd_nextsize = fwd->fd; 3827 victim->bk_nextsize = fwd->fd->bk_nextsize; 3828 fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim; 3829 } 3830 else 3831 { 3832 assert (chunk_main_arena (fwd)); 3833 while ((unsigned long) size < chunksize_nomask (fwd)) // 正向循环,从最大的开始,直到找到恰好大于等于自己的bin 3834 { 3835 fwd = fwd->fd_nextsize; 3836 assert (chunk_main_arena (fwd)); 3837 } 3838 3839 if ((unsigned long) size 3840 == (unsigned long) chunksize_nomask (fwd)) // 如果大小相等,则直接插入就可以了 3841 /* Always insert in the second position. */ 3842 fwd = fwd->fd; // 为了避免修改nextsize,所以分配第二个bin 3843 else // 如果大于,则还需要修改nextsize 3844 { 3845 victim->fd_nextsize = fwd; 3846 victim->bk_nextsize = fwd->bk_nextsize; 3847 fwd->bk_nextsize = victim; 3848 victim->bk_nextsize->fd_nextsize = victim; 3849 } 3850 bck = fwd->bk; 3851 } 3852 } 3853 else // 为空的情况 3854 victim->fd_nextsize = victim->bk_nextsize = victim; 3855 } ``` * 插入到对应的bins ``` 3856 // 如果victim是smallbin或者是largebin但是对应链表为空的话,则将victim插入到该链表的第一个 // 总之是插入到bck与fwd之间 3857 mark_bin (av, victim_index); // 首先标记对应链表不为空了 3858 victim->bk = bck; // 将victim插入到链表,作为第一个bin 3859 victim->fd = fwd; 3860 fwd->bk = victim; 3861 bck->fd = victim; ``` 最后插入的链表效果是这样的: ``` smallbins: 后释放的插入在链表头部,每个链表对应一个size;释放时从尾部释放,即先进先出。 largebins: 每个链表拥有多个size,不同size之间通过nextsize来链接。size越小越在链表后面。同样size的话只有第一个释放的bin拥有nextsize,所以为了避免修改nextize所以分配第二个bin(如果存在的话),插入的话也是会插入到第二个(使得分配的就是最近释放的)。 ``` 对于`largebin`的情况比较复杂,下面举个例子进行说明: ``` 释放了3个相同大小的largebin记作1、2、3,最后在对应链表的情况如下: +=======+ 1 (最后分配) +=======+ 3 (最先分配) +=======+ 2 (其次分配) +=======+ ``` #### 5) 分配largebin 到现在为止,`unsorted bin`已经全部放置到`smallbin`或`largebin`。所以分配`largebin`的时候,就是找到满足需求的`idx`号`bins`,并反向遍历该链表,直到找到第一个最小且符合需求的`bin`。同时为了避免修改`nextsize`链表,存在多个大小相同的`bin`的话优先取第二个。 ``` 3888 /* 3889 If a large request, scan through the chunks of current bin in 3890 sorted order to find smallest that fits. Use the skip list for this. 3891 */ 3892 3893 if (!in_smallbin_range (nb)) // 属于largebin 3894 { 3895 bin = bin_at (av, idx); // 获得对应bins的链表头 3896 3897 /* skip scan if empty or largest chunk is too small */ 3898 if ((victim = first (bin)) != bin // 链表不为空 3899 && (unsigned long) chunksize_nomask (victim) 3900 >= (unsigned long) (nb)) // 第一个bin就是最大的,需要能满足需求 3901 { 3902 victim = victim->bk_nextsize; // 取最后一个,反向遍历 3903 while (((unsigned long) (size = chunksize (victim)) < 3904 (unsigned long) (nb))) // 找到第一个符合要求的最小的bin 3905 victim = victim->bk_nextsize; 3906 3907 /* Avoid removing the first entry for a size so that the skip 3908 list does not have to be rerouted. */ 3909 if (victim != last (bin) 3910 && chunksize_nomask (victim) 3911 == chunksize_nomask (victim->fd)) // 由于相同大小的只有第一个存在nextsize,所以为了避免调整该链表,所以如果存在多个的话优先取后面的 3912 victim = victim->fd; 3913 // 后面就是正常的从victim取出并返回操作了============================= 3914 remainder_size = size - nb; 3915 unlink (av, victim, bck, fwd); 3916 3917 /* Exhaust */ 3918 if (remainder_size < MINSIZE) 3919 { 3920 set_inuse_bit_at_offset (victim, size); 3921 if (av != &main_arena) 3922 set_non_main_arena (victim); 3923 } 3924 /* Split */ 3925 else 3926 { 3927 remainder = chunk_at_offset (victim, nb); 3928 /* We cannot assume the unsorted list is empty and therefore 3929 have to perform a complete insert here. */ 3930 bck = unsorted_chunks (av); 3931 fwd = bck->fd; 3932 if (__glibc_unlikely (fwd->bk != bck)) 3933 { 3934 errstr = "malloc(): corrupted unsorted chunks"; 3935 goto errout; 3936 } 3937 remainder->bk = bck; 3938 remainder->fd = fwd; 3939 bck->fd = remainder; 3940 fwd->bk = remainder; 3941 if (!in_smallbin_range (remainder_size)) 3942 { 3943 remainder->fd_nextsize = NULL; 3944 remainder->bk_nextsize = NULL; 3945 } 3946 set_head (victim, nb | PREV_INUSE | 3947 (av != &main_arena ? NON_MAIN_ARENA : 0)); 3948 set_head (remainder, remainder_size | PREV_INUSE); 3949 set_foot (remainder, remainder_size); 3950 } 3951 check_malloced_chunk (av, victim, nb); 3952 void *p = chunk2mem (victim); 3953 alloc_perturb (p, bytes); 3954 return p; 3955 } 3956 } ``` #### 6) scan bins 由于可能通过`nb`对应的`bins`并不存在符合要求的空闲`chunk`,所以还需要向更大的`bins`来寻找可用的最小空间,这里适用于`smallbin`和`largebin`。 ##### a. 通过bitmap遍历存在空闲chunk的bins 这里是通过`bitmap`的优化来快速寻找不为空的`bins`,具体解析可以参考[第一节](http://mutepig.club/index.php/archives/45/#bitmap)对应内容。 ``` 3958 /* 3959 Search for a chunk by scanning bins, starting with next largest 3960 bin. This search is strictly by best-fit; i.e., the smallest 3961 (with ties going to approximately the least recently used) chunk 3962 that fits is selected. 3963 3964 The bitmap avoids needing to check that most blocks are nonempty. 3965 The particular case of skipping all bins during warm-up phases 3966 when no chunks have been returned yet is faster than it might look. 3967 */ 3968 3969 ++idx; // 由于bins对应大小是递增的,所以向后面寻找 3970 bin = bin_at (av, idx); // 获得对应的bins 3971 block = idx2block (idx); // 获得对应的block 3972 map = av->binmap[block]; // 找到对应的map 3973 bit = idx2bit (idx); // 获得对应的bit 3974 3975 for (;; ) 3976 { 3977 /* Skip rest of block if there are no more set bits in this block. */ 3978 if (bit > map || bit == 0) // 如果bit比map大说明对应的block中不存在满足条件的,所以跳过 3979 { 3980 do 3981 { 3982 if (++block >= BINMAPSIZE) /* out of bins */ // 如果block遍历完了,就通过top分配 3983 goto use_top; 3984 } 3985 while ((map = av->binmap[block]) == 0); // 如果block对应map为空则跳过到下一个block 3986 3987 bin = bin_at (av, (block << BINMAPSHIFT)); // 从第一个(最小的)开始遍历寻找符合条件的bins 3988 bit = 1; // 取第一个bins 3989 } 3990 3991 /* Advance to bin with set bit. There must be one. */ 3992 while ((bit & map) == 0) // 满足该条件说明bit对应的bins不存在空闲chunk , 那么就该寻找下个bins 3993 { 3994 bin = next_bin (bin); // 取下一个bins 3995 bit <<= 1; // bit也指向下一个 3996 assert (bit != 0); 3997 } 3998 3999 /* Inspect the bin. It is likely to be non-empty */ 4000 victim = last (bin); // 符合条件的bins ``` ##### b. 更新bitmap或者分配victim 由于`bitmap`只会在扫描的时候才更新,所以不一定记录是正确的。当`bins`不包含空闲`chunk`时就需要更新`bitmap`,并继续从下一个bins开始扫描。 ``` 4002 /* If a false alarm (empty bin), clear the bit. */ 4003 if (victim == bin) // 当前bins为空,更新bitmap 4004 { 4005 av->binmap[block] = map &= ~bit; /* Write through */ // 更新bitmap 4006 bin = next_bin (bin); // 指向下一个bin 4007 bit <<= 1; 4008 } 4009 4010 else // 如果不为空,那么直接分配就可以了,下面也是正常的分配过程====================================== 4011 { 4012 size = chunksize (victim); 4013 4014 /* We know the first chunk in this bin is big enough to use. */ 4015 assert ((unsigned long) (size) >= (unsigned long) (nb)); 4016 4017 remainder_size = size - nb; 4018 4019 /* unlink */ 4020 unlink (av, victim, bck, fwd); 4021 4022 /* Exhaust */ 4023 if (remainder_size < MINSIZE) 4024 { 4025 set_inuse_bit_at_offset (victim, size); 4026 if (av != &main_arena) 4027 set_non_main_arena (victim); 4028 } 4029 4030 /* Split */ 4031 else 4032 { 4033 remainder = chunk_at_offset (victim, nb); 4034 4035 /* We cannot assume the unsorted list is empty and therefore 4036 have to perform a complete insert here. */ 4037 bck = unsorted_chunks (av); 4038 fwd = bck->fd; 4039 if (__glibc_unlikely (fwd->bk != bck)) 4040 { 4041 errstr = "malloc(): corrupted unsorted chunks 2"; 4042 goto errout; 4043 } 4044 remainder->bk = bck; 4045 remainder->fd = fwd; 4046 bck->fd = remainder; 4047 fwd->bk = remainder; 4048 4049 /* advertise as last remainder */ 4050 if (in_smallbin_range (nb)) 4051 av->last_remainder = remainder; 4052 if (!in_smallbin_range (remainder_size)) 4053 { 4054 remainder->fd_nextsize = NULL; 4055 remainder->bk_nextsize = NULL; 4056 } 4057 set_head (victim, nb | PREV_INUSE | 4058 (av != &main_arena ? NON_MAIN_ARENA : 0)); 4059 set_head (remainder, remainder_size | PREV_INUSE); 4060 set_foot (remainder, remainder_size); 4061 } 4062 check_malloced_chunk (av, victim, nb); 4063 void *p = chunk2mem (victim); 4064 alloc_perturb (p, bytes); 4065 return p; 4066 } 4067 } ``` #### 6) top chunk 如果上面都无法分配,那么就只能通过`top chunk`来进行分配了。 ``` 4069 use_top: 4070 /* 4071 If large enough, split off the chunk bordering the end of memory 4072 (held in av->top). Note that this is in accord with the best-fit 4073 search rule. In effect, av->top is treated as larger (and thus 4074 less well fitting) than any other available chunk since it can 4075 be extended to be as large as necessary (up to system 4076 limitations). 4077 4078 We require that av->top always exists (i.e., has size >= 4079 MINSIZE) after initialization, so if it would otherwise be 4080 exhausted by current request, it is replenished. (The main 4081 reason for ensuring it exists is that we may need MINSIZE space 4082 to put in fenceposts in sysmalloc.) 4083 */ 4084 4085 victim = av->top; // 获得topchunk 4086 size = chunksize (victim); 4087 4088 if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE)) // 如果topchunk的大小能够满足需求,则从topchunk进行分配 这里还需要加上MINSIZE是由于top必须留下来用作fencepost,以分隔堆和其他空间。 4089 { 4090 remainder_size = size - nb; 4091 remainder = chunk_at_offset (victim, nb); 4092 av->top = remainder; // 更新topchunk 4093 set_head (victim, nb | PREV_INUSE | 4094 (av != &main_arena ? NON_MAIN_ARENA : 0)); 4095 set_head (remainder, remainder_size | PREV_INUSE); 4096 4097 check_malloced_chunk (av, victim, nb); 4098 void *p = chunk2mem (victim); 4099 alloc_perturb (p, bytes); 4100 return p; 4101 } ``` #### 7) 合并fastbins 由于可能在之前操作的过程中还有`fastbins`给插入了进来,所以还需要判断是否存在`fastbins`,如果有那么需要合并,并从头再来一遍。 ``` 4103 /* When we are using atomic ops to free fast chunks we can get 4104 here for all block sizes. */ 4105 else if (have_fastchunks (av)) // 如果有fastbins 4106 { 4107 malloc_consolidate (av); // 合并fastbin 4108 /* restore original bin index */ 4109 if (in_smallbin_range (nb)) 4110 idx = smallbin_index (nb); 4111 else 4112 idx = largebin_index (nb); 4113 } 4114 4115 /* 4116 Otherwise, relay to handle system-dependent cases 4117 */ 4118 else // 如果没有,只能向系统申请内存了 4119 { 4120 void *p = sysmalloc (nb, av); 4121 if (p != NULL) 4122 alloc_perturb (p, bytes); 4123 return p; 4124 } 4125 } 4126 } ``` #### 8) 总结 1.尝试从对应的`fastbins`中的第一个`fastbin`分配出去 2.尝试从对应的`smallbins`中的最后一个`bin`分配出去 3.尝试用`largebin`分配,首先如果存在`fastbins`则调用`malloc_consolidate`合并 4.反向遍历`unsorted bin`,先看有没有`last remainder chunk`能直接分配 5.上一步失败了,则将`unsorted bin`中的`bin`加入`smallbins`或`largebins`中 6.从对应大小的`largebins`中反向遍历寻找最合适的最小的`bin`进行分配 7.尝试从比需求大小更大的`largebins`中反向遍历寻找最合适的最小的`bin`进行分配 8.尝试通过`topchunk`进行分配空间 9.尝试再次合并`fastbins`,合并成功则从第4步再来一次,否则调用`sysmalloc`从系统分配内存 校验的情况: ``` fastbin: 1. nb <= get_max_fast () 2. fastbin_index (chunksize (victim)) == idx smallbin: 1. in_smallbin_range (nb) 2. victim->bk->fd==victim (victim为smallbin链表上的最后一个) unsortedbin: 1. 前提:victim的size符合要求(2 * SIZE_SZ~av->system_mem) 2. size==nb就会直接分配 3. 如果无法直接分配,直接通过size获得对应的bin_index,然后加入到对应的bins largebin: 1. !in_smallbin_range (nb) 2. victim需要通过unlink校验 binmap: 1. size>=nb 2. victim需要通过unlink校验 unlink校验: 1. victim的size == next_chunk的prev_size 2. victim->fd->bk == victim->bk->fd == victim 3. 对于largebin,victim->fd_nextsize->bk_nextsize == victim->bk_nextsize->fd_nextsize == victim ``` ### 3. sysmalloc #### 1) 变量申明 ``` 2286 static void * 2287 sysmalloc (INTERNAL_SIZE_T nb, mstate av) 2288 { 2289 mchunkptr old_top; /* incoming value of av->top */ 2290 INTERNAL_SIZE_T old_size; /* its size */ 2291 char *old_end; /* its end address */ 2292 2293 long size; /* arg to first MORECORE or mmap call */ 2294 char *brk; /* return value from MORECORE */ 2295 2296 long correction; /* arg to 2nd MORECORE call */ 2297 char *snd_brk; /* 2nd return val */ 2298 2299 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */ 2300 INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */ 2301 char *aligned_brk; /* aligned offset into brk */ 2302 2303 mchunkptr p; /* the allocated/returned chunk */ 2304 mchunkptr remainder; /* remainder from allocation */ 2305 unsigned long remainder_size; /* its size */ 2306 2307 2308 size_t pagesize = GLRO (dl_pagesize); 2309 bool tried_mmap = false; ``` #### 2) 通过mmap分配 这里就是尝试通过`mmap`来对空间进行分配,首先需要通过下面两个条件: 1. 分配区已经初始化了 2. 所需分配的大小超过了mmap的分配阈值,并且当前使用mmap分配的内存块数量小于限定值 通过后会先将`nb`转换成需要通过`mmap`分配的值`size`,然后再尝试通过`mmap`进行分配,如果成功的话会更新一些相关配置的值,最后返回对应的内存空间。 ``` 2319 if (av == NULL // 分配区未初始化 2320 || ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold) // 所需分配的大小超过了mmap的分配阈值 2321 && (mp_.n_mmaps < mp_.n_mmaps_max))) // 当前使用mmap分配的内存块数量小于限定值 2322 { 2323 char *mm; /* return value from mmap call*/ 2324 2325 try_mmap: 2326 /* 2327 Round up size to nearest page. For mmapped chunks, the overhead 2328 is one SIZE_SZ unit larger than for normal chunks, because there 2329 is no following chunk whose prev_size field could be used. 2330 2331 See the front_misalign handling below, for glibc there is no 2332 need for further alignments unless we have have high alignment. 2333 */ 2334 if (MALLOC_ALIGNMENT == 2 * SIZE_SZ) 2335 size = ALIGN_UP (nb + SIZE_SZ, pagesize); // 由于是通过mmap申请的空间,所以不存在下一个相邻的chunk可以提供prev_size来进行复用,所以需要多加一下SIZE_SZ;同时mmap申请的空间必须保证是页对齐的 2336 else 2337 size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize); 2338 tried_mmap = true; // 尝试使用mmap进行分配 2339 2340 /* Don't try if size wraps around 0 */ 2341 if ((unsigned long) (size) > (unsigned long) (nb)) // 如果size小于nb则表示溢出了,于是不将分配空间 2342 { 2343 mm = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0)); // 调用mmap申请空间 2344 2345 if (mm != MAP_FAILED) // 如果mmap申请空间成功了 2346 { 2347 /* 2348 The offset to the start of the mmapped region is stored 2349 in the prev_size field of the chunk. This allows us to adjust 2350 returned start address to meet alignment requirements here 2351 and in memalign(), and still be able to compute proper 2352 address argument for later munmap in free() and realloc(). 2353 */ 2354 2355 if (MALLOC_ALIGNMENT == 2 * SIZE_SZ) // 不需要额外的对齐操作,一般都是不需要的 2356 { 2357 /* For glibc, chunk2mem increases the address by 2*SIZE_SZ and 2358 MALLOC_ALIGN_MASK is 2*SIZE_SZ-1. Each mmap'ed area is page 2359 aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */ 2360 assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0); 2361 front_misalign = 0; 2362 } 2363 else 2364 front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK; 2365 if (front_misalign > 0) 2366 { 2367 correction = MALLOC_ALIGNMENT - front_misalign; // 对齐修正 2368 p = (mchunkptr) (mm + correction); 2369 set_prev_size (p, correction); 2370 set_head (p, (size - correction) | IS_MMAPPED); 2371 } 2372 else 2373 { 2374 p = (mchunkptr) mm; // 将申请的内存转换为chunk指针 2375 set_prev_size (p, 0); 2376 set_head (p, size | IS_MMAPPED); // 设置标志,并设置当前chunk是由mmap申请的 2377 } 2378 2379 /* update statistics */ 2380 2381 int new = atomic_exchange_and_add (&mp_.n_mmaps, 1) + 1; // 当前mmap分配的内存块数目加1 2382 atomic_max (&mp_.max_n_mmaps, new); // 更新最大值 2383 2384 unsigned long sum; 2385 sum = atomic_exchange_and_add (&mp_.mmapped_mem, size) + size; // 当前mmap分配的内存总大小 2386 atomic_max (&mp_.max_mmapped_mem, sum); // 更新最大值 2387 2388 check_chunk (av, p); 2389 2390 return chunk2mem (p); // 将申请的chunk返回 2391 } 2392 } 2393 } ``` #### 3) check old top 这里首先将原`topchunk`的相关值都记录下来,同时将`brk`和`snd_brk`都初始化为`MORECORE_FAILURE`,因为通过`sbrk`分配的是连续空间所以只需要`brk`就行了,而`mmap`分配的空间和原`topchunk`是分开的,所以需要两个值。后面可以通过是一个值还是两个值变化判断是通过`sbrk`还是`mmap`分配的。 ``` 2395 /* There are no usable arenas and mmap also failed. */ 2396 if (av == NULL) // 如果分配区未初始化,那么直接退出 2397 return 0; 2398 2399 /* Record incoming configuration of top */ 2400 2401 old_top = av->top; // 当前top 2402 old_size = chunksize (old_top);// 当前top大小 2403 old_end = (char *) (chunk_at_offset (old_top, old_size)); // 当前top结束地址 2404 2405 brk = snd_brk = (char *) (MORECORE_FAILURE); 2406 2407 /* 2408 If not the first time through, we require old_size to be 2409 at least MINSIZE and to have prev_inuse set. 2410 */ 2411 2412 assert ((old_top == initial_top (av) && old_size == 0) || // top没有初始化,所以其size为0 2413 ((unsigned long) (old_size) >= MINSIZE && // top已经初始化,所以包含fencepost,size一定大于MINSIZE 2414 prev_inuse (old_top) && // top的前一个chunk一定是inuse 2415 ((unsigned long) old_end & (pagesize - 1)) == 0)); // top结束地址必须页对齐 2416 2417 /* Precondition: not enough current space to satisfy nb request */ 2418 assert ((unsigned long) (old_size) < (unsigned long) (nb + MINSIZE)); // top的大小无法满足分配的要求 ``` #### 4) 非主分配区 首先这里需要回顾一下第一节的和[模拟堆](http://www.mutepig.club/index.php/archives/45/#2\)none_main_area)有关知识。 * 通过增长堆扩大topchunk 由于`topchunk`处于其所在模拟堆的顶部,所以可以通过调用`grow_heap`增加模拟堆的大小从而增加`topchunk`的大小。失败的原因一般可能是模拟堆的空间`HEAP_MAX_SIZE`已经分配完了,所以需要再新建一个模拟堆进行分配。 ``` 2421 if (av != &main_arena) // 非主分配区 2422 { 2423 heap_info *old_heap, *heap; 2424 size_t old_heap_size; 2425 2426 /* First try to extend the current heap. */ 2427 old_heap = heap_for_ptr (old_top); // top对应的模拟堆 2428 old_heap_size = old_heap->size; // 模拟堆的大小 2429 if ((long) (MINSIZE + nb - old_size) > 0 // topchunk无法满足需求大小 2430 && grow_heap (old_heap, MINSIZE + nb - old_size) == 0) // 增长模拟堆成功 2431 { 2432 av->system_mem += old_heap->size - old_heap_size; // 表示系统已经分配空间的大小,加上刚刚分配的空间 2433 set_head (old_top, (((char *) old_heap + old_heap->size) - (char *) old_top) 2434 | PREV_INUSE); // 设置top的size 2435 } ``` * 通过创建新模拟堆扩大topchunk 如果上面的方法失败,就只能申请一个新的模拟堆来创造一个新的`topchunk`了。创建新的模拟堆之后,需要在前面一个模拟堆最后创建一个`fencepost`以区分两者。这里需要用到第一节关于[fencepost](http://mutepig.club/index.php/archives/45/)的知识。 ``` 2436 else if ((heap = new_heap (nb + (MINSIZE + sizeof (*heap)), mp_.top_pad))) // 创建一个新模拟堆,包含需求nb、fencepost、heapinfo实例 2437 { 2438 /* Use a newly allocated heap. */ 2439 heap->ar_ptr = av; // 设置新模拟堆的相关信息 2440 heap->prev = old_heap; 2441 av->system_mem += heap->size; 2442 /* Set up the new top. */ 2443 top (av) = chunk_at_offset (heap, sizeof (*heap)); // av对应的topchunk转为新模拟堆 2444 set_head (top (av), (heap->size - sizeof (*heap)) | PREV_INUSE);// 由于是新申请的模拟堆,所以整个模拟堆都属于topchunk,所以大小模拟堆大小减去模拟堆头的大小 2445 2446 /* Setup fencepost and free the old top chunk with a multiple of 2447 MALLOC_ALIGNMENT in size. */ 2448 /* The fencepost takes at least MINSIZE bytes, because it might 2449 become the top chunk again later. Note that a footer is set 2450 up, too, although the chunk is marked in use. */ 2451 old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK; // 前一个模拟堆减去MINSIZE用作fencepost 2452 set_head (chunk_at_offset (old_top, old_size + 2 * SIZE_SZ), 0 | PREV_INUSE); // 设置fencepost第二个chunk的头部 2453 if (old_size >= MINSIZE) // 如果topchunk剩余的空间足够大,则继续作为单独chunk存在 2454 { // 设置fencepost第一个chunk的头部 2455 set_head (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ) | PREV_INUSE); // 先将原topchunk作为在使用中,之后free掉 2456 set_foot (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ)); 2457 set_head (old_top, old_size | PREV_INUSE | NON_MAIN_ARENA); 2458 _int_free (av, old_top, 1); // free掉原topchunk 2459 } 2460 else // 原topchunk太小,则加入到fencepost的第一个chunk 2461 { 2462 set_head (old_top, (old_size + 2 * SIZE_SZ) | PREV_INUSE); 2463 set_foot (old_top, (old_size + 2 * SIZE_SZ)); 2464 } 2465 } ``` * 尝试mmap 如果上面两种方法都失败了,且之前没有通过`mmap`申请空间,那么在这里尝试利用`mmap`来申请。 ``` 2466 else if (!tried_mmap) 2467 /* We can at least try to use to mmap memory. */ 2468 goto try_mmap; 2469 } ``` #### 5) 主分配区 ##### a. 通过sbrk申请空间 由于是主分配区,所以可以直接向`heap`分配内存,从而提高`topchunk`。 ``` 2470 else /* av == main_arena */ 2471 2472 2473 { /* Request enough space for nb + pad + overhead */ 2474 size = nb + mp_.top_pad + MINSIZE; // size调整,保证有足够的空间 2475 2476 /* 2477 If contiguous, we can subtract out existing space that we hope to 2478 combine with new space. We add it back later only if 2479 we don't actually get contiguous space. 2480 */ 2481 2482 if (contiguous (av)) // 如果av是连续的,那么只用申请之前topchunk不足的部分 2483 size -= old_size; 2484 2485 /* 2486 Round to a multiple of page size. 2487 If MORECORE is not contiguous, this ensures that we only call it 2488 with whole-page arguments. And if MORECORE is contiguous and 2489 this is not first time through, this preserves page-alignment of 2490 previous calls. Otherwise, we correct to page-align below. 2491 */ 2492 2493 size = ALIGN_UP (size, pagesize); // size对齐 2494 2495 /* 2496 Don't try to call MORECORE if argument is so big as to appear 2497 negative. Note that since mmap takes size_t arg, it may succeed 2498 below even if we cannot call MORECORE. 2499 */ 2500 2501 if (size > 0) // 调用sbrk提高堆上界 2502 { 2503 brk = (char *) (MORECORE (size)); 2504 LIBC_PROBE (memory_sbrk_more, 2, brk, size); 2505 } 2506 2507 if (brk != (char *) (MORECORE_FAILURE)) // 如果成功了,则调用hook 2508 { 2509 /* Call the `morecore' hook if necessary. */ 2510 void (*hook) (void) = atomic_forced_read (__after_morecore_hook); 2511 if (__builtin_expect (hook != NULL, 0)) 2512 (*hook)(); 2513 } ``` ##### b. 通过mmap 如果`sbrk`失败或者不可用,则使用`mmap`进行分配,使用`mmap`作为`morecore`分配的最小空间大小为1M。 ``` 2514 else // 如果sbrk失败了,则调用mmap分配 2515 { 2516 /* 2517 If have mmap, try using it as a backup when MORECORE fails or 2518 cannot be used. This is worth doing on systems that have "holes" in 2519 address space, so sbrk cannot extend to give contiguous space, but 2520 space is available elsewhere. Note that we ignore mmap max count 2521 and threshold limits, since the space will not be used as a 2522 segregated mmap region. 2523 */ 2524 2525 /* Cannot merge with old top, so add its size back in */ 2526 if (contiguous (av)) 2527 size = ALIGN_UP (size + old_size, pagesize); // 页对齐 2528 2529 /* If we are relying on mmap as backup, then use larger units */ 2530 if ((unsigned long) (size) < (unsigned long) (MMAP_AS_MORECORE_SIZE)) 2531 size = MMAP_AS_MORECORE_SIZE; // mmap分配的最小内存大小为1M 2532 2533 /* Don't try if size wraps around 0 */ 2534 if ((unsigned long) (size) > (unsigned long) (nb)) // size满足需求 2535 { 2536 char *mbrk = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0)); 2537 2538 if (mbrk != MAP_FAILED) // 申请成功 2539 { 2540 /* We do not need, and cannot use, another sbrk call to find end */ 2541 brk = mbrk; // 分配出来的空间的下界 2542 snd_brk = brk + size; // 分配出来的空间的上界 2543 2544 /* 2545 Record that we no longer have a contiguous sbrk region. 2546 After the first time mmap is used as backup, we do not 2547 ever rely on contiguous space since this could incorrectly 2548 bridge regions. 2549 */ 2550 set_noncontiguous (av); // 设置当前分配区设置为可分配不连续虚拟内存块 2551 } 2552 } 2553 } ``` ##### c. 分配成功 ``` 2555 if (brk != (char *) (MORECORE_FAILURE)) // 如果使用sbrk或mmap成功申请了空间 2556 { 2557 if (mp_.sbrk_base == 0) // 如果系统的sbrk_base没初始化,则现在初始化 2558 mp_.sbrk_base = brk; 2559 av->system_mem += size; 2560 2561 /* 2562 If MORECORE extends previous space, we can likewise extend top size. 2563 */ 2564 2565 if (brk == old_end && snd_brk == (char *) (MORECORE_FAILURE)) // 由于snd_brk仍为初始化时的值,说明是sbrk分配成功,那么topchunk不变,直接将空间加上去 2566 set_head (old_top, (size + old_size) | PREV_INUSE); 2567 2568 else if (contiguous (av) && old_size && brk < old_end) // 如果当前空间可分配连续空间,且原topchunk大小不为0,但新的brk值比原topchunk的结束地址小,那么报错 2569 { 2570 /* Oops! Someone else killed our space.. Can't touch anything. */ 2571 malloc_printerr (3, "break adjusted to free malloc space", brk, 2572 av); 2573 } ``` ##### d. 校正sbrk分配的brk 能走到这里表示`sbrk`返回的`brk`超过了`topchunk`的结束地址,可能是其他地方调用`sbrk`函数导致的,所以需要重新处理一下。主要是由于`topchunk`与当前`brk`不相邻,所以需要重新用`sbrk`分配空间。根据一些规则计算出校正值`correction`,然后调用`sbrk`重新分配。 ``` 2594 else 2595 { // 初始化 2596 front_misalign = 0; // 前方对齐值 2597 end_misalign = 0; // 结束对齐值 2598 correction = 0; // 校正值,用来再次调用sbrk申请空间 2599 aligned_brk = brk; // 调整后的brk 2600 2601 /* handle contiguous cases */ 2602 if (contiguous (av)) // 先处理可分配连续空间的情况 2603 { 2604 /* Count foreign sbrk as system_mem. */ 2605 if (old_size) // 如果外部调用了sbrk函数,那么把外部分配的内存计入当前分配区分配的内存统计中 2606 av->system_mem += brk - old_end; 2607 2608 /* Guarantee alignment of first new chunk made from this space */ 2609 2610 front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK; // 计算当前brk要更正的字节数据,保证按MALLOC_ALIGN_MASK对齐 2611 if (front_misalign > 0) // 如果需要更正 2612 { 2613 /* 2614 Skip over some bytes to arrive at an aligned position. 2615 We don't need to specially mark these wasted front bytes. 2616 They will never be accessed anyway because 2617 prev_inuse of av->top (and any chunk created from its start) 2618 is always true after initialization. 2619 */ 2620 2621 correction = MALLOC_ALIGNMENT - front_misalign; 2622 aligned_brk += correction; 2623 } 2624 2625 /* 2626 If this isn't adjacent to existing space, then we will not 2627 be able to merge with old_top space, so must add to 2nd request. 2628 */ 2629 2630 correction += old_size; // 需要重新为topchunk分配空间,所以将原topchunk的大小加入校正值中 2631 2632 /* Extend the end address to hit a page boundary */ 2633 end_misalign = (INTERNAL_SIZE_T) (brk + size + correction); 2634 correction += (ALIGN_UP (end_misalign, pagesize)) - end_misalign; 2635 2636 assert (correction >= 0); 2637 snd_brk = (char *) (MORECORE (correction)); // 重新调用sbrk进行分配 2638 2639 /* 2640 If can't allocate correction, try to at least find out current 2641 brk. It might be enough to proceed without failing. 2642 2643 Note that if second sbrk did NOT fail, we assume that space 2644 is contiguous with first sbrk. This is a safe assumption unless 2645 program is multithreaded but doesn't use locks and a foreign sbrk 2646 occurred between our first and second calls. 2647 */ 2648 2649 if (snd_brk == (char *) (MORECORE_FAILURE)) // 如果brk的结束地址非法,则使用morecore获得brk的结束地址 2650 { 2651 correction = 0; 2652 snd_brk = (char *) (MORECORE (0)); 2653 } 2654 else // 成功了则调用hook函数 2655 { 2656 /* Call the `morecore' hook if necessary. */ 2657 void (*hook) (void) = atomic_forced_read (__after_morecore_hook); 2658 if (__builtin_expect (hook != NULL, 0)) 2659 (*hook)(); 2660 } 2661 } ``` ##### e. 校正mmap分配的brk 能走到这里就一定是用`mmap`分配的空间了,同样需要校验。 ``` 2663 /* handle non-contiguous cases */ 2664 else 2665 { 2666 if (MALLOC_ALIGNMENT == 2 * SIZE_SZ) // 判断brk有无按MALLOC_ALIGN_MASK对齐 2667 /* MORECORE/mmap must correctly align */ 2668 assert (((unsigned long) chunk2mem (brk) & MALLOC_ALIGN_MASK) == 0); 2669 else 2670 { 2671 front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK; // 计算brk需要校正的字节数据 2672 if (front_misalign > 0) // 如果需要校正 2673 { 2674 /* 2675 Skip over some bytes to arrive at an aligned position. 2676 We don't need to specially mark these wasted front bytes. 2677 They will never be accessed anyway because 2678 prev_inuse of av->top (and any chunk created from its start) 2679 is always true after initialization. 2680 */ 2681 2682 aligned_brk += MALLOC_ALIGNMENT - front_misalign; 2683 } 2684 } 2685 2686 /* Find out current end of memory */ 2687 if (snd_brk == (char *) (MORECORE_FAILURE)) // 如果brk的结束地址非法,则使用morecore获得brk的结束地址 2688 { 2689 snd_brk = (char *) (MORECORE (0)); 2690 } 2691 } ``` ##### f. 更新topchunk并处理原topchunk 通过上面分配后的空间与原`topchunk`不是相邻的,那么需要插入一下`fencepost`,主分配区的`fencepost`和非主分配区的`fencepost`是不同的。 ``` 2693 /* Adjust top based on results of second sbrk */ 2694 if (snd_brk != (char *) (MORECORE_FAILURE)) // 如果brk的结束地址合法 2695 { 2696 av->top = (mchunkptr) aligned_brk; // 设置topchunk为当前brk 2697 set_head (av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE); // 设置topchunk的大小 2698 av->system_mem += correction; // 更新统计数据 2699 2700 /* 2701 If not the first time through, we either have a 2702 gap due to foreign sbrk or a non-contiguous region. Insert a 2703 double fencepost at old_top to prevent consolidation with space 2704 we don't own. These fenceposts are artificial chunks that are 2705 marked as inuse and are in any case too small to use. We need 2706 two to make sizes and alignments work out. 2707 */ 2708 2709 if (old_size != 0) // 如果原来topchunk不为空,还需要对fencepost进行处理 2710 { 2711 /* 2712 Shrink old_top to insert fenceposts, keeping size a 2713 multiple of MALLOC_ALIGNMENT. We know there is at least 2714 enough space in old_top to do this. 2715 */ 2716 old_size = (old_size - 4 * SIZE_SZ) & ~MALLOC_ALIGN_MASK; // 原topchunk减去fencepost两个chunk后的大小 2717 set_head (old_top, old_size | PREV_INUSE); // 设置原topchunk的大小 2718 2719 /* 2720 Note that the following assignments completely overwrite 2721 old_top when old_size was previously MINSIZE. This is 2722 intentional. We need the fencepost, even if old_top otherwise gets 2723 lost. 2724 */ 2725 set_head (chunk_at_offset (old_top, old_size), 2726 (2 * SIZE_SZ) | PREV_INUSE); // 设置fencepost的chunk1的size 2727 set_head (chunk_at_offset (old_top, old_size + 2 * SIZE_SZ), 2728 (2 * SIZE_SZ) | PREV_INUSE); // 设置fencepost的chunk2的size 2729 2730 /* If possible, release the rest. */ 2731 if (old_size >= MINSIZE) // 如果原topchunk足够大,则独立出来 2732 { 2733 _int_free (av, old_top, 1); 2734 } 2735 } 2736 } 2737 } 2738 } 2739 } /* if (av != &main_arena) */ ``` #### 7) 从更新后的topchunk分配空间 通过上面的操作,现在的`topchunk`已经足够大来支持分配了,下面就是分配的操作。 ``` 2741 if ((unsigned long) av->system_mem > (unsigned long) (av->max_system_mem)) // 更新当前分配区系统分配内存的最大值 2742 av->max_system_mem = av->system_mem; 2743 check_malloc_state (av); 2744 2745 /* finally, do the allocation */ 2746 p = av->top; 2747 size = chunksize (p); 2748 2749 /* check that one of the above allocation paths succeeded */ 2750 if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE)) // 判断新topchunk能否满足需求,可以的话则进行分配 2751 { 2752 remainder_size = size - nb; 2753 remainder = chunk_at_offset (p, nb); 2754 av->top = remainder; 2755 set_head (p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0)); 2756 set_head (remainder, remainder_size | PREV_INUSE); 2757 check_malloced_chunk (av, p, nb); 2758 return chunk2mem (p); 2759 } 2760 2761 /* catch all failure paths */ 2762 __set_errno (ENOMEM); 2763 return 0; 2764 } ``` #### 8) 总结 1.如果需要分配的空间超过了mmap的分配阈值,则直接尝试用mmap分配 2.如果是非主分配区,先通过增长模拟堆拓展`topchunk` 3.再尝试创建新的模拟堆拓展`topchunk` 4.最后尝试通过`mmap`申请空间 5.如果是主分配区,先通过`brk`提高`heap`的上界,从而拓展`topchunk` 6.之后尝试用`mmap`进行分配空间,此时主分配区可以分配不连续的虚拟内存块了 7.校正分配后的空间,更新调整`topchunk`,并处理原`topchunk` 8.从新`topchunk`中分配空间
觉得不错,点个赞?
提交评论
Sign in
to leave a comment.
No Leanote account ?
Sign up now
.
0
条评论
More...
文章目录
No Leanote account ? Sign up now.