cma
Linux内核最新的连续内存分配器(CMA)——避免预留大块内存【转】 - 腾讯云开发者社区-腾讯云
kernel-4.19/mm/cma.c
103 static int __init cma_activate_area(struct cma *cma)
104 {
####表示需要多少位来表示页面数
105 int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
106 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
####表示多少个块
107 unsigned i = cma->count >> pageblock_order;
108 struct zone *zone;
109
110 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
111
112 if (!cma->bitmap) {
113 cma->count = 0;
114 return -ENOMEM;
115 }
116
117 WARN_ON_ONCE(!pfn_valid(pfn));
118 zone = page_zone(pfn_to_page(pfn));
119
120 do {
121 unsigned j;
122
123 base_pfn = pfn;
124 for (j = pageblock_nr_pages; j; --j, pfn++) {
125 WARN_ON_ONCE(!pfn_valid(pfn));
126 /*
127 * alloc_contig_range requires the pfn range
128 * specified to be in the same zone. Make this
129 * simple by forcing the entire CMA resv range
130 * to be in the same zone.
131 */
132 if (page_zone(pfn_to_page(pfn)) != zone)
133 goto not_in_zone;
134 }
135 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
136 } while (--i);
137
138 mutex_init(&cma->lock);
139
140 #ifdef CONFIG_CMA_DEBUGFS
141 INIT_HLIST_HEAD(&cma->mem_head);
142 spin_lock_init(&cma->mem_head_lock);
143 #endif
144
145 return 0;
146
147 not_in_zone:
148 pr_err("CMA area %s could not be activated\n", cma->name);
149 kfree(cma->bitmap);
150 cma->count = 0;
151 return -EINVAL;
152 }
153
ION
【内存管理】ION内存管理器浅析(system heap)(基于linux 4.14) - yibuyibu01 - 博客园
#if TARGET_ION_ABI_VERSION >= 2
mIonDeviceFd = ion_open();
#else
mIonDeviceFd = open("/dev/ion", O_RDONLY);
#endif
int32_t ret = 0;
289 unsigned char *v_addr;
290 int32_t map_fd = -1;
291 struct dma_buf_sync buf_sync;
292 uint32_t len = PAGE_ALIGN(size);
293 uint32_t align = 0;
294 uint32_t flags = 0;
295 unsigned int heap_id = ION_QSECOM_HEAP_ID;
296 ret = ion_alloc_fd(mIonDeviceFd, len, align, ION_HEAP(heap_id), flags, &map_fd);
297
298 if (ret) {
299 LOG_E(LOG_TAG,
300 "[%s] Error::ion_alloc_fd for heap %u size %u len %u failed ret = %d, errno = %d\n",
301 __func__, heap_id, size, len, ret, errno);
302 err = GF_ERROR_GENERIC;
303 goto alloc_fail;
304 }
305
306 v_addr = (unsigned char *)mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED,
307 map_fd, 0);
308
309 if (v_addr == MAP_FAILED) {
310 LOG_E(LOG_TAG, "[%s] Error::mmap failed", __func__);
311 err = GF_ERROR_GENERIC;
312 goto map_fail;
313 }
314
315 pExtInfo->base.bufferFd = map_fd; // map fd
316 pExtInfo->base.buffer = v_addr; // mmap virtual addr
317 pExtInfo->bufferSize = len; // mmap virtual buffer size
318 buf_sync.flags = DMA_BUF_SYNC_START | DMA_BUF_SYNC_RW;
319 ret = ioctl(map_fd, DMA_BUF_IOCTL_SYNC, &buf_sync);
320
321 if (ret) {
322 LOG_E(LOG_TAG,
323 "[%s] Error:: DMA_BUF_IOCTL_SYNC start failed, ret = %d, errno = %d\n",
324 __func__, ret, errno);
325 err = GF_ERROR_GENERIC;
326 goto sync_fail;
327 }
328
329 return err;
330 sync_fail:
331
332 if (v_addr) {
333 munmap(v_addr, len);
334 }
335
336 map_fail:
337
338 if (map_fd > 0) {
339 ion_close(map_fd);
340 }
341
342 alloc_fail:
343
344 if (mIonDeviceFd > 0) {
345 ion_close(mIonDeviceFd);
346 mIonDeviceFd = 0;
347 }
348
system/memory/libion/ion.c
160 int ion_alloc_fd(int fd, size_t len, size_t align, unsigned int heap_mask, unsigned int flags,
161 int* handle_fd) {
162 ion_user_handle_t handle;
163 int ret;
164
165 if (!handle_fd) return -EINVAL;
166
167 if (!ion_is_legacy(fd)) {
168 struct ion_new_allocation_data data = {
169 .len = len,
170 .heap_id_mask = heap_mask,
171 .flags = flags,
172 };
173
174 ret = ion_ioctl(fd, ION_IOC_NEW_ALLOC, &data);
175 if (ret < 0) return ret;
176 *handle_fd = data.fd;
177 } else {
178 ret = ion_alloc(fd, len, align, heap_mask, flags, &handle);
179 if (ret < 0) return ret;
180 ret = ion_share(fd, handle, handle_fd);
181 ion_free(fd, handle);
182 }
183 return ret;
184 }
kernel-4.19/drivers/staging/android/aosp_ion/