1 From 07365182b998af3dc2b79e822b8e21a3f50262c4 Mon Sep 17 00:00:00 2001
2 From: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
3 Date: Wed, 28 Jan 2009 21:32:08 +0200
4 Subject: [PATCH] omap iommu: simple virtual address space management
6 This patch provides a device drivers, which has a omap iommu, with
7 address mapping APIs between device virtual address(iommu), physical
8 address and MPU virtual address.
10 There are 4 possible patterns for iommu virtual address(iova/da) mapping.
12 |iova/ mapping iommu_ page
13 | da pa va (d)-(p)-(v) function type
14 ---------------------------------------------------------------------------
15 1 | c c c 1 - 1 - 1 _kmap() / _kunmap() s
16 2 | c c,a c 1 - 1 - 1 _kmalloc()/ _kfree() s
17 3 | c d c 1 - n - 1 _vmap() / _vunmap() s
18 4 | c d,a c 1 - n - 1 _vmalloc()/ _vfree() n*
20 'iova': device iommu virtual address
22 'pa': physical address
23 'va': mpu virtual address
25 'c': contiguous memory area
26 'd': dicontiguous memory area
27 'a': anonymous memory allocation
28 '()': optional feature
30 'n': a normal page(4KB) size is used.
31 's': multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used.
33 '*': not yet, but feasible.
35 Signed-off-by: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
37 arch/arm/include/asm/io.h | 6 +
38 arch/arm/mm/ioremap.c | 11 +
39 arch/arm/plat-omap/include/mach/iovmm.h | 94 ++++
40 arch/arm/plat-omap/iovmm.c | 891 +++++++++++++++++++++++++++++++
41 4 files changed, 1002 insertions(+), 0 deletions(-)
42 create mode 100644 arch/arm/plat-omap/include/mach/iovmm.h
43 create mode 100644 arch/arm/plat-omap/iovmm.c
45 diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
46 index d2a59cf..cbdadfe 100644
47 --- a/arch/arm/include/asm/io.h
48 +++ b/arch/arm/include/asm/io.h
49 @@ -75,6 +75,12 @@ extern void __iomem * __arm_ioremap(unsigned long, size_t, unsigned int);
50 extern void __iounmap(volatile void __iomem *addr);
53 + * external interface to remap single page with appropriate type
55 +extern int ioremap_page(unsigned long virt, unsigned long phys,
56 + unsigned int mtype);
59 * Bad read/write accesses...
61 extern void __readwrite_bug(const char *fn);
62 diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
63 index 9f88dd3..8441351 100644
64 --- a/arch/arm/mm/ioremap.c
65 +++ b/arch/arm/mm/ioremap.c
66 @@ -110,6 +110,17 @@ static int remap_area_pages(unsigned long start, unsigned long pfn,
70 +int ioremap_page(unsigned long virt, unsigned long phys, unsigned int mtype)
72 + const struct mem_type *type;
74 + type = get_mem_type(mtype);
78 + return remap_area_pages(virt, __phys_to_pfn(phys), PAGE_SIZE, type);
80 +EXPORT_SYMBOL(ioremap_page);
82 void __check_kvm_seq(struct mm_struct *mm)
84 diff --git a/arch/arm/plat-omap/include/mach/iovmm.h b/arch/arm/plat-omap/include/mach/iovmm.h
86 index 0000000..bdc7ce5
88 +++ b/arch/arm/plat-omap/include/mach/iovmm.h
91 + * omap iommu: simple virtual address space management
93 + * Copyright (C) 2008-2009 Nokia Corporation
95 + * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
97 + * This program is free software; you can redistribute it and/or modify
98 + * it under the terms of the GNU General Public License version 2 as
99 + * published by the Free Software Foundation.
102 +#ifndef __IOMMU_MMAP_H
103 +#define __IOMMU_MMAP_H
105 +struct iovm_struct {
106 + struct iommu *iommu; /* iommu object which this belongs to */
107 + u32 da_start; /* area definition */
109 + u32 flags; /* IOVMF_: see below */
110 + struct list_head list; /* linked in ascending order */
111 + const struct sg_table *sgt; /* keep 'page' <-> 'da' mapping */
112 + void *va; /* mpu side mapped address */
116 + * IOVMF_FLAGS: attribute for iommu virtual memory area(iovma)
118 + * lower 16 bit is used for h/w and upper 16 bit is for s/w.
120 +#define IOVMF_SW_SHIFT 16
121 +#define IOVMF_HW_SIZE (1 << IOVMF_SW_SHIFT)
122 +#define IOVMF_HW_MASK (IOVMF_HW_SIZE - 1)
123 +#define IOVMF_SW_MASK (~IOVMF_HW_MASK)UL
126 + * iovma: h/w flags derived from cam and ram attribute
128 +#define IOVMF_CAM_MASK (~((1 << 10) - 1))
129 +#define IOVMF_RAM_MASK (~IOVMF_CAM_MASK)
131 +#define IOVMF_PGSZ_MASK (3 << 0)
132 +#define IOVMF_PGSZ_1M MMU_CAM_PGSZ_1M
133 +#define IOVMF_PGSZ_64K MMU_CAM_PGSZ_64K
134 +#define IOVMF_PGSZ_4K MMU_CAM_PGSZ_4K
135 +#define IOVMF_PGSZ_16M MMU_CAM_PGSZ_16M
137 +#define IOVMF_ENDIAN_MASK (1 << 9)
138 +#define IOVMF_ENDIAN_BIG MMU_RAM_ENDIAN_BIG
139 +#define IOVMF_ENDIAN_LITTLE MMU_RAM_ENDIAN_LITTLE
141 +#define IOVMF_ELSZ_MASK (3 << 7)
142 +#define IOVMF_ELSZ_8 MMU_RAM_ELSZ_8
143 +#define IOVMF_ELSZ_16 MMU_RAM_ELSZ_16
144 +#define IOVMF_ELSZ_32 MMU_RAM_ELSZ_32
145 +#define IOVMF_ELSZ_NONE MMU_RAM_ELSZ_NONE
147 +#define IOVMF_MIXED_MASK (1 << 6)
148 +#define IOVMF_MIXED MMU_RAM_MIXED
151 + * iovma: s/w flags, used for mapping and umapping internally.
153 +#define IOVMF_MMIO (1 << IOVMF_SW_SHIFT)
154 +#define IOVMF_ALLOC (2 << IOVMF_SW_SHIFT)
155 +#define IOVMF_ALLOC_MASK (3 << IOVMF_SW_SHIFT)
157 +/* "superpages" is supported just with physically linear pages */
158 +#define IOVMF_DISCONT (1 << (2 + IOVMF_SW_SHIFT))
159 +#define IOVMF_LINEAR (2 << (2 + IOVMF_SW_SHIFT))
160 +#define IOVMF_LINEAR_MASK (3 << (2 + IOVMF_SW_SHIFT))
162 +#define IOVMF_DA_FIXED (1 << (4 + IOVMF_SW_SHIFT))
163 +#define IOVMF_DA_ANON (2 << (4 + IOVMF_SW_SHIFT))
164 +#define IOVMF_DA_MASK (3 << (4 + IOVMF_SW_SHIFT))
167 +extern struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da);
168 +extern u32 iommu_vmap(struct iommu *obj, u32 da,
169 + const struct sg_table *sgt, u32 flags);
170 +extern struct sg_table *iommu_vunmap(struct iommu *obj, u32 da);
171 +extern u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes,
173 +extern void iommu_vfree(struct iommu *obj, const u32 da);
174 +extern u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes,
176 +extern void iommu_kunmap(struct iommu *obj, u32 da);
177 +extern u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes,
179 +extern void iommu_kfree(struct iommu *obj, u32 da);
181 +extern void *da_to_va(struct iommu *obj, u32 da);
183 +#endif /* __IOMMU_MMAP_H */
184 diff --git a/arch/arm/plat-omap/iovmm.c b/arch/arm/plat-omap/iovmm.c
186 index 0000000..6726d10
188 +++ b/arch/arm/plat-omap/iovmm.c
191 + * omap iommu: simple virtual address space management
193 + * Copyright (C) 2008-2009 Nokia Corporation
195 + * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
197 + * This program is free software; you can redistribute it and/or modify
198 + * it under the terms of the GNU General Public License version 2 as
199 + * published by the Free Software Foundation.
202 +#include <linux/err.h>
203 +#include <linux/vmalloc.h>
204 +#include <linux/device.h>
205 +#include <linux/scatterlist.h>
208 +#include <asm/cacheflush.h>
210 +#include <mach/iommu.h>
211 +#include <mach/iovmm.h>
213 +#include "iopgtable.h"
216 + * A device driver needs to create address mappings between:
218 + * - iommu/device address
219 + * - physical address
220 + * - mpu virtual address
222 + * There are 4 possible patterns for them:
224 + * |iova/ mapping iommu_ page
225 + * | da pa va (d)-(p)-(v) function type
226 + * ---------------------------------------------------------------------------
227 + * 1 | c c c 1 - 1 - 1 _kmap() / _kunmap() s
228 + * 2 | c c,a c 1 - 1 - 1 _kmalloc()/ _kfree() s
229 + * 3 | c d c 1 - n - 1 _vmap() / _vunmap() s
230 + * 4 | c d,a c 1 - n - 1 _vmalloc()/ _vfree() n*
233 + * 'iova': device iommu virtual address
234 + * 'da': alias of 'iova'
235 + * 'pa': physical address
236 + * 'va': mpu virtual address
238 + * 'c': contiguous memory area
239 + * 'd': dicontiguous memory area
240 + * 'a': anonymous memory allocation
241 + * '()': optional feature
243 + * 'n': a normal page(4KB) size is used.
244 + * 's': multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used.
246 + * '*': not yet, but feasible.
249 +static struct kmem_cache *iovm_area_cachep;
251 +/* return total bytes of sg buffers */
252 +static size_t sgtable_len(const struct sg_table *sgt)
254 + unsigned int i, total = 0;
255 + struct scatterlist *sg;
260 + for_each_sg(sgt->sgl, sg, sgt->nents, i) {
263 + bytes = sg_dma_len(sg);
265 + if (!iopgsz_ok(bytes)) {
266 + pr_err("%s: sg[%d] not iommu pagesize(%x)\n",
267 + __func__, i, bytes);
276 +#define sgtable_ok(x) (!!sgtable_len(x))
279 + * calculate the optimal number sg elements from total bytes based on
282 +static unsigned int sgtable_nents(size_t bytes)
285 + unsigned int nr_entries;
286 + const unsigned long pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
288 + if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
289 + pr_err("%s: wrong size %08x\n", __func__, bytes);
294 + for (i = 0; i < ARRAY_SIZE(pagesize); i++) {
295 + if (bytes >= pagesize[i]) {
296 + nr_entries += (bytes / pagesize[i]);
297 + bytes %= pagesize[i];
305 +/* allocate and initialize sg_table header(a kind of 'superblock') */
306 +static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags)
308 + unsigned int nr_entries;
310 + struct sg_table *sgt;
313 + return ERR_PTR(-EINVAL);
315 + if (!IS_ALIGNED(bytes, PAGE_SIZE))
316 + return ERR_PTR(-EINVAL);
318 + /* FIXME: IOVMF_DA_FIXED should support 'superpages' */
319 + if ((flags & IOVMF_LINEAR) && (flags & IOVMF_DA_ANON)) {
320 + nr_entries = sgtable_nents(bytes);
322 + return ERR_PTR(-EINVAL);
324 + nr_entries = bytes / PAGE_SIZE;
326 + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
328 + return ERR_PTR(-ENOMEM);
330 + err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
332 + return ERR_PTR(err);
334 + pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
339 +/* free sg_table header(a kind of superblock) */
340 +static void sgtable_free(struct sg_table *sgt)
345 + sg_free_table(sgt);
348 + pr_debug("%s: sgt:%p\n", __func__, sgt);
351 +/* map 'sglist' to a contiguous mpu virtual area and return 'va' */
352 +static void *vmap_sg(const struct sg_table *sgt)
357 + struct scatterlist *sg;
358 + struct vm_struct *new;
360 + total = sgtable_len(sgt);
362 + return ERR_PTR(-EINVAL);
364 + new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
366 + return ERR_PTR(-ENOMEM);
367 + va = (u32)new->addr;
369 + for_each_sg(sgt->sgl, sg, sgt->nents, i) {
375 + bytes = sg_dma_len(sg);
377 + BUG_ON(bytes != PAGE_SIZE);
379 + err = ioremap_page(va, pa, MT_DEVICE);
386 + flush_cache_vmap(new->addr, total);
390 + WARN_ON(1); /* FIXME: cleanup some mpu mappings */
392 + return ERR_PTR(-EAGAIN);
395 +static inline void vunmap_sg(const void *va)
400 +static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da)
402 + struct iovm_struct *tmp;
404 + list_for_each_entry(tmp, &obj->mmap, list) {
405 + if ((da >= tmp->da_start) && (da < tmp->da_end)) {
408 + len = tmp->da_end - tmp->da_start;
410 + dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
411 + __func__, tmp->da_start, da, tmp->da_end, len,
422 + * find_iovm_area - find iovma which includes @da
423 + * @da: iommu device virtual address
425 + * Find the existing iovma starting at @da
427 +struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da)
429 + struct iovm_struct *area;
431 + mutex_lock(&obj->mmap_lock);
432 + area = __find_iovm_area(obj, da);
433 + mutex_unlock(&obj->mmap_lock);
437 +EXPORT_SYMBOL_GPL(find_iovm_area);
440 + * This finds the hole(area) which fits the requested address and len
441 + * in iovmas mmap, and returns the new allocated iovma.
443 +static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da,
444 + size_t bytes, u32 flags)
446 + struct iovm_struct *new, *tmp;
447 + u32 start, prev_end, alignement;
449 + if (!obj || !bytes)
450 + return ERR_PTR(-EINVAL);
453 + alignement = PAGE_SIZE;
455 + if (flags & IOVMF_DA_ANON) {
457 + * Reserve the first page for NULL
460 + if (flags & IOVMF_LINEAR)
461 + alignement = iopgsz_max(bytes);
462 + start = roundup(start, alignement);
466 + if (list_empty(&obj->mmap))
470 + list_for_each_entry(tmp, &obj->mmap, list) {
472 + if ((prev_end <= start) && (start + bytes < tmp->da_start))
475 + if (flags & IOVMF_DA_ANON)
476 + start = roundup(tmp->da_end, alignement);
478 + prev_end = tmp->da_end;
481 + if ((start >= prev_end) && (ULONG_MAX - start >= bytes))
484 + dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
485 + __func__, da, bytes, flags);
487 + return ERR_PTR(-EINVAL);
490 + new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
492 + return ERR_PTR(-ENOMEM);
495 + new->da_start = start;
496 + new->da_end = start + bytes;
497 + new->flags = flags;
500 + * keep ascending order of iovmas
503 + list_add_tail(&new->list, &tmp->list);
505 + list_add(&new->list, &obj->mmap);
507 + dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
508 + __func__, new->da_start, start, new->da_end, bytes, flags);
513 +static void free_iovm_area(struct iommu *obj, struct iovm_struct *area)
517 + BUG_ON(!obj || !area);
519 + bytes = area->da_end - area->da_start;
521 + dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
522 + __func__, area->da_start, area->da_end, bytes, area->flags);
524 + list_del(&area->list);
525 + kmem_cache_free(iovm_area_cachep, area);
529 + * da_to_va - convert (d) to (v)
530 + * @obj: objective iommu
531 + * @da: iommu device virtual address
532 + * @va: mpu virtual address
534 + * Returns mpu virtual addr which corresponds to a given device virtual addr
536 +void *da_to_va(struct iommu *obj, u32 da)
539 + struct iovm_struct *area;
541 + mutex_lock(&obj->mmap_lock);
543 + area = __find_iovm_area(obj, da);
545 + dev_warn(obj->dev, "%s: no da area(%08x)\n", __func__, da);
549 + mutex_unlock(&obj->mmap_lock);
553 +EXPORT_SYMBOL_GPL(da_to_va);
555 +static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
558 + struct scatterlist *sg;
562 + for_each_sg(sgt->sgl, sg, sgt->nents, i) {
564 + const size_t bytes = PAGE_SIZE;
567 + * iommu 'superpage' isn't supported with 'iommu_vmalloc()'
569 + pg = vmalloc_to_page(va);
571 + sg_set_page(sg, pg, bytes, 0);
576 + va_end = _va + PAGE_SIZE * i;
577 + flush_cache_vmap(_va, va_end);
580 +static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
583 + * Actually this is not necessary at all, just exists for
584 + * consistency of the code readibility.
589 +static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, size_t len)
592 + struct scatterlist *sg;
595 + va = phys_to_virt(pa);
597 + for_each_sg(sgt->sgl, sg, sgt->nents, i) {
600 + bytes = iopgsz_max(len);
602 + BUG_ON(!iopgsz_ok(bytes));
604 + sg_set_buf(sg, phys_to_virt(pa), bytes);
606 + * 'pa' is cotinuous(linear).
613 + clean_dcache_area(va, len);
616 +static inline void sgtable_drain_kmalloc(struct sg_table *sgt)
619 + * Actually this is not necessary at all, just exists for
620 + * consistency of the code readibility
625 +/* create 'da' <-> 'pa' mapping from 'sgt' */
626 +static int map_iovm_area(struct iommu *obj, struct iovm_struct *new,
627 + const struct sg_table *sgt, u32 flags)
631 + struct scatterlist *sg;
632 + u32 da = new->da_start;
634 + if (!obj || !new || !sgt)
637 + BUG_ON(!sgtable_ok(sgt));
639 + for_each_sg(sgt->sgl, sg, sgt->nents, i) {
643 + struct iotlb_entry e;
646 + bytes = sg_dma_len(sg);
648 + flags &= ~IOVMF_PGSZ_MASK;
649 + pgsz = bytes_to_iopgsz(bytes);
654 + pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
657 + iotlb_init_entry(&e, da, pa, flags);
658 + err = iopgtable_store_entry(obj, &e);
667 + da = new->da_start;
669 + for_each_sg(sgt->sgl, sg, i, j) {
672 + bytes = iopgtable_clear_entry(obj, da);
674 + BUG_ON(!iopgsz_ok(bytes));
681 +/* release 'da' <-> 'pa' mapping */
682 +static void unmap_iovm_area(struct iommu *obj, struct iovm_struct *area)
685 + size_t total = area->da_end - area->da_start;
687 + BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
689 + start = area->da_start;
690 + while (total > 0) {
693 + bytes = iopgtable_clear_entry(obj, start);
697 + dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
698 + __func__, start, bytes, area->flags);
700 + BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
708 +/* template function for all unmapping */
709 +static struct sg_table *unmap_vm_area(struct iommu *obj, const u32 da,
710 + void (*fn)(const void *), u32 flags)
712 + struct sg_table *sgt = NULL;
713 + struct iovm_struct *area;
715 + BUG_ON(in_interrupt());
717 + if (!IS_ALIGNED(da, PAGE_SIZE)) {
718 + dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
722 + mutex_lock(&obj->mmap_lock);
724 + area = __find_iovm_area(obj, da);
726 + dev_err(obj->dev, "%s: no da area(%08x)\n", __func__, da);
730 + if ((area->flags & flags) != flags) {
731 + dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
735 + sgt = (struct sg_table *)area->sgt;
737 + unmap_iovm_area(obj, area);
741 + dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
742 + area->da_start, da, area->da_end,
743 + area->da_end - area->da_start, area->flags);
745 + free_iovm_area(obj, area);
747 + mutex_unlock(&obj->mmap_lock);
752 +static u32 map_iommu_region(struct iommu *obj, u32 da,
753 + const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
756 + struct iovm_struct *new;
758 + mutex_lock(&obj->mmap_lock);
760 + new = alloc_iovm_area(obj, da, bytes, flags);
762 + err = PTR_ERR(new);
763 + goto err_alloc_iovma;
768 + if (map_iovm_area(obj, new, sgt, new->flags))
771 + mutex_unlock(&obj->mmap_lock);
773 + dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
774 + __func__, new->da_start, bytes, new->flags, va);
776 + return new->da_start;
779 + free_iovm_area(obj, new);
781 + mutex_unlock(&obj->mmap_lock);
785 +static inline u32 __iommu_vmap(struct iommu *obj, u32 da,
786 + const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
788 + return map_iommu_region(obj, da, sgt, va, bytes, flags);
792 + * iommu_vmap - (d)-(p)-(v) address mapper
793 + * @obj: objective iommu
794 + * @sgt: address of scatter gather table
795 + * @flags: iovma and page property
797 + * Creates 1-n-1 mapping with given @sgt and returns @da.
798 + * All @sgt element must be io page size aligned.
800 +u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt,
806 + if (!obj || !obj->dev || !sgt)
809 + bytes = sgtable_len(sgt);
812 + bytes = PAGE_ALIGN(bytes);
816 + return PTR_ERR(va);
818 + flags &= IOVMF_HW_MASK;
819 + flags |= IOVMF_DISCONT;
820 + flags |= IOVMF_MMIO;
821 + flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
823 + da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
824 + if (IS_ERR_VALUE(da))
829 +EXPORT_SYMBOL_GPL(iommu_vmap);
832 + * iommu_vunmap - release virtual mapping obtained by 'iommu_vmap()'
833 + * @obj: objective iommu
834 + * @da: iommu device virtual address
836 + * Free the iommu virtually contiguous memory area starting at
837 + * @da, which was returned by 'iommu_vmap()'.
839 +struct sg_table *iommu_vunmap(struct iommu *obj, u32 da)
841 + struct sg_table *sgt;
843 + * 'sgt' is allocated before 'iommu_vmalloc()' is called.
844 + * Just returns 'sgt' to the caller to free
846 + sgt = unmap_vm_area(obj, da, vunmap_sg, IOVMF_DISCONT | IOVMF_MMIO);
848 + dev_err(obj->dev, "%s: No sgt\n", __func__);
851 +EXPORT_SYMBOL_GPL(iommu_vunmap);
854 + * iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
855 + * @obj: objective iommu
856 + * @da: contiguous iommu virtual memory
857 + * @bytes: allocation size
858 + * @flags: iovma and page property
860 + * Allocate @bytes linearly and creates 1-n-1 mapping and returns
861 + * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set.
863 +u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
866 + struct sg_table *sgt;
868 + if (!obj || !obj->dev || !bytes)
871 + bytes = PAGE_ALIGN(bytes);
873 + va = vmalloc(bytes);
877 + sgt = sgtable_alloc(bytes, flags);
880 + goto err_sgt_alloc;
882 + sgtable_fill_vmalloc(sgt, va);
884 + flags &= IOVMF_HW_MASK;
885 + flags |= IOVMF_DISCONT;
886 + flags |= IOVMF_ALLOC;
887 + flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
889 + da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
890 + if (IS_ERR_VALUE(da))
891 + goto err_iommu_vmap;
896 + sgtable_drain_vmalloc(sgt);
902 +EXPORT_SYMBOL_GPL(iommu_vmalloc);
905 + * iommu_vfree - release memory allocated by 'iommu_vmalloc()'
906 + * @obj: objective iommu
907 + * @da: iommu device virtual address
909 + * Frees the iommu virtually continuous memory area starting at
910 + * @da, as obtained from 'iommu_vmalloc()'.
912 +void iommu_vfree(struct iommu *obj, const u32 da)
914 + struct sg_table *sgt;
916 + sgt = unmap_vm_area(obj, da, vfree, IOVMF_DISCONT | IOVMF_ALLOC);
918 + dev_err(obj->dev, "%s: No sgt\n", __func__);
921 +EXPORT_SYMBOL_GPL(iommu_vfree);
923 +static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va,
924 + size_t bytes, u32 flags)
926 + struct sg_table *sgt;
928 + sgt = sgtable_alloc(bytes, flags);
930 + return PTR_ERR(sgt);
932 + sgtable_fill_kmalloc(sgt, pa, bytes);
934 + da = map_iommu_region(obj, da, sgt, va, bytes, flags);
935 + if (IS_ERR_VALUE(da)) {
936 + sgtable_drain_kmalloc(sgt);
944 + * iommu_kmap - (d)-(p)-(v) address mapper
945 + * @obj: objective iommu
946 + * @da: contiguous iommu virtual memory
947 + * @pa: contiguous physical memory
948 + * @flags: iovma and page property
950 + * Creates 1-1-1 mapping and returns @da again, which can be
951 + * adjusted if 'IOVMF_DA_ANON' is set.
953 +u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes,
958 + if (!obj || !obj->dev || !bytes)
961 + bytes = PAGE_ALIGN(bytes);
963 + va = ioremap(pa, bytes);
967 + flags &= IOVMF_HW_MASK;
968 + flags |= IOVMF_LINEAR;
969 + flags |= IOVMF_MMIO;
970 + flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
972 + da = __iommu_kmap(obj, da, pa, va, bytes, flags);
973 + if (IS_ERR_VALUE(da))
978 +EXPORT_SYMBOL_GPL(iommu_kmap);
981 + * iommu_kunmap - release virtual mapping obtained by 'iommu_kmap()'
982 + * @obj: objective iommu
983 + * @da: iommu device virtual address
985 + * Frees the iommu virtually contiguous memory area starting at
986 + * @da, which was passed to and was returned by'iommu_kmap()'.
988 +void iommu_kunmap(struct iommu *obj, u32 da)
990 + struct sg_table *sgt;
992 + sgt = unmap_vm_area(obj, da, __iounmap, IOVMF_LINEAR | IOVMF_MMIO);
994 + dev_err(obj->dev, "%s: No sgt\n", __func__);
997 +EXPORT_SYMBOL_GPL(iommu_kunmap);
1000 + * iommu_kmalloc - (d)-(p)-(v) address allocator and mapper
1001 + * @obj: objective iommu
1002 + * @da: contiguous iommu virtual memory
1003 + * @bytes: bytes for allocation
1004 + * @flags: iovma and page property
1006 + * Allocate @bytes linearly and creates 1-1-1 mapping and returns
1007 + * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set.
1009 +u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
1014 + if (!obj || !obj->dev || !bytes)
1017 + bytes = PAGE_ALIGN(bytes);
1019 + va = kmalloc(bytes, GFP_KERNEL | GFP_DMA);
1022 + pa = virt_to_phys(va);
1024 + flags &= IOVMF_HW_MASK;
1025 + flags |= IOVMF_LINEAR;
1026 + flags |= IOVMF_ALLOC;
1027 + flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
1029 + da = __iommu_kmap(obj, da, pa, va, bytes, flags);
1030 + if (IS_ERR_VALUE(da))
1035 +EXPORT_SYMBOL_GPL(iommu_kmalloc);
1038 + * iommu_kfree - release virtual mapping obtained by 'iommu_kmalloc()'
1039 + * @obj: objective iommu
1040 + * @da: iommu device virtual address
1042 + * Frees the iommu virtually contiguous memory area starting at
1043 + * @da, which was passed to and was returned by'iommu_kmalloc()'.
1045 +void iommu_kfree(struct iommu *obj, u32 da)
1047 + struct sg_table *sgt;
1049 + sgt = unmap_vm_area(obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC);
1051 + dev_err(obj->dev, "%s: No sgt\n", __func__);
1052 + sgtable_free(sgt);
1054 +EXPORT_SYMBOL_GPL(iommu_kfree);
1057 +static int __init iovmm_init(void)
1059 + const unsigned long flags = SLAB_HWCACHE_ALIGN;
1060 + struct kmem_cache *p;
1062 + p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
1066 + iovm_area_cachep = p;
1070 +module_init(iovmm_init);
1072 +static void __exit iovmm_exit(void)
1074 + kmem_cache_destroy(iovm_area_cachep);
1076 +module_exit(iovmm_exit);
1078 +MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
1079 +MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
1080 +MODULE_LICENSE("GPL v2");