1 From c79d7959c45f40e47520aa6acd54c19094754787 Mon Sep 17 00:00:00 2001
2 From: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
3 Date: Mon, 26 Jan 2009 15:13:45 +0200
4 Subject: [PATCH] omap iommu: omap2 architecture specific functions
6 The structure 'arch_mmu' accommodates the difference between omap1 and
9 This patch provides omap2/3 specific functions
11 Signed-off-by: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
13 arch/arm/mach-omap2/iommu2.c | 326 ++++++++++++++++++++++++++++++
14 arch/arm/plat-omap/include/mach/iommu2.h | 94 +++++++++
15 2 files changed, 420 insertions(+), 0 deletions(-)
16 create mode 100644 arch/arm/mach-omap2/iommu2.c
17 create mode 100644 arch/arm/plat-omap/include/mach/iommu2.h
19 diff --git a/arch/arm/mach-omap2/iommu2.c b/arch/arm/mach-omap2/iommu2.c
21 index 0000000..88a44f1
23 +++ b/arch/arm/mach-omap2/iommu2.c
26 + * omap iommu: omap2/3 architecture specific functions
28 + * Copyright (C) 2008-2009 Nokia Corporation
30 + * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
31 + * Paul Mundt and Toshihiro Kobayashi
33 + * This program is free software; you can redistribute it and/or modify
34 + * it under the terms of the GNU General Public License version 2 as
35 + * published by the Free Software Foundation.
38 +#include <linux/err.h>
39 +#include <linux/device.h>
40 +#include <linux/jiffies.h>
41 +#include <linux/module.h>
42 +#include <linux/stringify.h>
46 +#include <mach/iommu.h>
47 +#include <mach/iommu2.h>
50 + * omap2 architecture specific register bit definitions
52 +#define IOMMU_ARCH_VERSION 0x00000011
55 +#define MMU_SYS_IDLE_SHIFT 3
56 +#define MMU_SYS_IDLE_FORCE (0 << MMU_SYS_IDLE_SHIFT)
57 +#define MMU_SYS_IDLE_NONE (1 << MMU_SYS_IDLE_SHIFT)
58 +#define MMU_SYS_IDLE_SMART (2 << MMU_SYS_IDLE_SHIFT)
59 +#define MMU_SYS_IDLE_MASK (3 << MMU_SYS_IDLE_SHIFT)
61 +#define MMU_SYS_SOFTRESET (1 << 1)
62 +#define MMU_SYS_AUTOIDLE 1
65 +#define MMU_SYS_RESETDONE 1
67 +/* IRQSTATUS & IRQENABLE */
68 +#define MMU_IRQ_MULTIHITFAULT (1 << 4)
69 +#define MMU_IRQ_TABLEWALKFAULT (1 << 3)
70 +#define MMU_IRQ_EMUMISS (1 << 2)
71 +#define MMU_IRQ_TRANSLATIONFAULT (1 << 1)
72 +#define MMU_IRQ_TLBMISS (1 << 0)
73 +#define MMU_IRQ_MASK \
74 + (MMU_IRQ_MULTIHITFAULT | MMU_IRQ_TABLEWALKFAULT | MMU_IRQ_EMUMISS | \
75 + MMU_IRQ_TRANSLATIONFAULT)
78 +#define MMU_CNTL_SHIFT 1
79 +#define MMU_CNTL_MASK (7 << MMU_CNTL_SHIFT)
80 +#define MMU_CNTL_EML_TLB (1 << 3)
81 +#define MMU_CNTL_TWL_EN (1 << 2)
82 +#define MMU_CNTL_MMU_EN (1 << 1)
84 +#define get_cam_va_mask(pgsz) \
85 + (((pgsz) == MMU_CAM_PGSZ_16M) ? 0xff000000 : \
86 + ((pgsz) == MMU_CAM_PGSZ_1M) ? 0xfff00000 : \
87 + ((pgsz) == MMU_CAM_PGSZ_64K) ? 0xffff0000 : \
88 + ((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0)
90 +static int omap2_iommu_enable(struct iommu *obj)
93 + unsigned long timeout;
95 + if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K))
98 + pa = virt_to_phys(obj->iopgd);
99 + if (!IS_ALIGNED(pa, SZ_16K))
102 + iommu_write_reg(obj, MMU_SYS_SOFTRESET, MMU_SYSCONFIG);
104 + timeout = jiffies + msecs_to_jiffies(20);
106 + l = iommu_read_reg(obj, MMU_SYSSTATUS);
107 + if (l & MMU_SYS_RESETDONE)
109 + } while (time_after(jiffies, timeout));
111 + if (!(l & MMU_SYS_RESETDONE)) {
112 + dev_err(obj->dev, "can't take mmu out of reset\n");
116 + l = iommu_read_reg(obj, MMU_REVISION);
117 + dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
118 + (l >> 4) & 0xf, l & 0xf);
120 + l = iommu_read_reg(obj, MMU_SYSCONFIG);
121 + l &= ~MMU_SYS_IDLE_MASK;
122 + l |= (MMU_SYS_IDLE_SMART | MMU_SYS_AUTOIDLE);
123 + iommu_write_reg(obj, l, MMU_SYSCONFIG);
125 + iommu_write_reg(obj, MMU_IRQ_MASK, MMU_IRQENABLE);
126 + iommu_write_reg(obj, pa, MMU_TTB);
128 + l = iommu_read_reg(obj, MMU_CNTL);
129 + l &= ~MMU_CNTL_MASK;
130 + l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
131 + iommu_write_reg(obj, l, MMU_CNTL);
136 +static void omap2_iommu_disable(struct iommu *obj)
138 + u32 l = iommu_read_reg(obj, MMU_CNTL);
140 + l &= ~MMU_CNTL_MASK;
141 + iommu_write_reg(obj, l, MMU_CNTL);
142 + iommu_write_reg(obj, MMU_SYS_IDLE_FORCE, MMU_SYSCONFIG);
144 + dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
147 +static u32 omap2_iommu_fault_isr(struct iommu *obj, u32 *ra)
151 + const char *err_msg[] = {
153 + "translation fault",
155 + "table walk fault",
159 + stat = iommu_read_reg(obj, MMU_IRQSTATUS);
160 + stat &= MMU_IRQ_MASK;
164 + da = iommu_read_reg(obj, MMU_FAULT_AD);
167 + dev_err(obj->dev, "%s:\tda:%08x ", __func__, da);
169 + for (i = 0; i < ARRAY_SIZE(err_msg); i++) {
170 + if (stat & (1 << i))
171 + printk("%s ", err_msg[i]);
175 + iommu_write_reg(obj, stat, MMU_IRQSTATUS);
179 +static void omap2_tlb_read_cr(struct iommu *obj, struct cr_regs *cr)
181 + cr->cam = iommu_read_reg(obj, MMU_READ_CAM);
182 + cr->ram = iommu_read_reg(obj, MMU_READ_RAM);
185 +static void omap2_tlb_load_cr(struct iommu *obj, struct cr_regs *cr)
187 + iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM);
188 + iommu_write_reg(obj, cr->ram, MMU_RAM);
191 +static u32 omap2_cr_to_virt(struct cr_regs *cr)
193 + u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
194 + u32 mask = get_cam_va_mask(cr->cam & page_size);
196 + return cr->cam & mask;
199 +static struct cr_regs *omap2_alloc_cr(struct iommu *obj, struct iotlb_entry *e)
201 + struct cr_regs *cr;
203 + if (e->da & ~(get_cam_va_mask(e->pgsz))) {
204 + dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__,
206 + return ERR_PTR(-EINVAL);
209 + cr = kmalloc(sizeof(*cr), GFP_KERNEL);
211 + return ERR_PTR(-ENOMEM);
213 + cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz;
214 + cr->ram = e->pa | e->endian | e->elsz | e->mixed;
219 +static inline int omap2_cr_valid(struct cr_regs *cr)
221 + return cr->cam & MMU_CAM_V;
224 +static u32 omap2_get_pte_attr(struct iotlb_entry *e)
228 + attr = e->mixed << 5;
230 + attr |= e->elsz >> 3;
231 + attr <<= ((e->pgsz & MMU_CAM_PGSZ_4K) ? 0 : 6);
236 +static ssize_t omap2_dump_cr(struct iommu *obj, struct cr_regs *cr, char *buf)
240 + /* FIXME: Need more detail analysis of cam/ram */
241 + p += sprintf(p, "%08x %08x\n", cr->cam, cr->ram);
246 +#define pr_reg(name) \
247 + p += sprintf(p, "%20s: %08x\n", \
248 + __stringify(name), iommu_read_reg(obj, MMU_##name));
250 +static ssize_t omap2_iommu_dump_ctx(struct iommu *obj, char *buf)
259 + pr_reg(WALKING_ST);
268 + pr_reg(FLUSH_ENTRY);
271 + pr_reg(EMU_FAULT_AD);
276 +static void omap2_iommu_save_ctx(struct iommu *obj)
281 + for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
282 + p[i] = iommu_read_reg(obj, i * sizeof(u32));
283 + dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
286 + BUG_ON(p[0] != IOMMU_ARCH_VERSION);
289 +static void omap2_iommu_restore_ctx(struct iommu *obj)
294 + for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
295 + iommu_write_reg(obj, p[i], i * sizeof(u32));
296 + dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
299 + BUG_ON(p[0] != IOMMU_ARCH_VERSION);
302 +static void omap2_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
304 + e->da = cr->cam & MMU_CAM_VATAG_MASK;
305 + e->pa = cr->ram & MMU_RAM_PADDR_MASK;
306 + e->valid = cr->cam & MMU_CAM_V;
307 + e->pgsz = cr->cam & MMU_CAM_PGSZ_MASK;
308 + e->endian = cr->ram & MMU_RAM_ENDIAN_MASK;
309 + e->elsz = cr->ram & MMU_RAM_ELSZ_MASK;
310 + e->mixed = cr->ram & MMU_RAM_MIXED;
313 +static const struct iommu_functions omap2_iommu_ops = {
314 + .version = IOMMU_ARCH_VERSION,
316 + .enable = omap2_iommu_enable,
317 + .disable = omap2_iommu_disable,
318 + .fault_isr = omap2_iommu_fault_isr,
320 + .tlb_read_cr = omap2_tlb_read_cr,
321 + .tlb_load_cr = omap2_tlb_load_cr,
323 + .cr_to_e = omap2_cr_to_e,
324 + .cr_to_virt = omap2_cr_to_virt,
325 + .alloc_cr = omap2_alloc_cr,
326 + .cr_valid = omap2_cr_valid,
327 + .dump_cr = omap2_dump_cr,
329 + .get_pte_attr = omap2_get_pte_attr,
331 + .save_ctx = omap2_iommu_save_ctx,
332 + .restore_ctx = omap2_iommu_restore_ctx,
333 + .dump_ctx = omap2_iommu_dump_ctx,
336 +static int __init omap2_iommu_init(void)
338 + return install_iommu_arch(&omap2_iommu_ops);
340 +module_init(omap2_iommu_init);
342 +static void __exit omap2_iommu_exit(void)
344 + uninstall_iommu_arch(&omap2_iommu_ops);
346 +module_exit(omap2_iommu_exit);
348 +MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
349 +MODULE_DESCRIPTION("omap iommu: omap2/3 architecture specific functions");
350 +MODULE_LICENSE("GPL v2");
351 diff --git a/arch/arm/plat-omap/include/mach/iommu2.h b/arch/arm/plat-omap/include/mach/iommu2.h
353 index 0000000..d746047
355 +++ b/arch/arm/plat-omap/include/mach/iommu2.h
358 + * omap iommu: omap2 architecture specific definitions
360 + * Copyright (C) 2008-2009 Nokia Corporation
362 + * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
364 + * This program is free software; you can redistribute it and/or modify
365 + * it under the terms of the GNU General Public License version 2 as
366 + * published by the Free Software Foundation.
369 +#ifndef __MACH_IOMMU2_H
370 +#define __MACH_IOMMU2_H
373 + * MMU Register offsets
375 +#define MMU_REVISION 0x00
376 +#define MMU_SYSCONFIG 0x10
377 +#define MMU_SYSSTATUS 0x14
378 +#define MMU_IRQSTATUS 0x18
379 +#define MMU_IRQENABLE 0x1c
380 +#define MMU_WALKING_ST 0x40
381 +#define MMU_CNTL 0x44
382 +#define MMU_FAULT_AD 0x48
383 +#define MMU_TTB 0x4c
384 +#define MMU_LOCK 0x50
385 +#define MMU_LD_TLB 0x54
386 +#define MMU_CAM 0x58
387 +#define MMU_RAM 0x5c
388 +#define MMU_GFLUSH 0x60
389 +#define MMU_FLUSH_ENTRY 0x64
390 +#define MMU_READ_CAM 0x68
391 +#define MMU_READ_RAM 0x6c
392 +#define MMU_EMU_FAULT_AD 0x70
394 +#define MMU_REG_SIZE 256
397 + * MMU Register bit definitions
399 +#define MMU_LOCK_BASE_SHIFT 10
400 +#define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT)
401 +#define MMU_LOCK_BASE(x) \
402 + ((x & MMU_LOCK_BASE_MASK) >> MMU_LOCK_BASE_SHIFT)
404 +#define MMU_LOCK_VICT_SHIFT 4
405 +#define MMU_LOCK_VICT_MASK (0x1f << MMU_LOCK_VICT_SHIFT)
406 +#define MMU_LOCK_VICT(x) \
407 + ((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT)
409 +#define MMU_CAM_VATAG_SHIFT 12
410 +#define MMU_CAM_VATAG_MASK \
411 + ((~0UL >> MMU_CAM_VATAG_SHIFT) << MMU_CAM_VATAG_SHIFT)
412 +#define MMU_CAM_P (1 << 3)
413 +#define MMU_CAM_V (1 << 2)
414 +#define MMU_CAM_PGSZ_MASK 3
415 +#define MMU_CAM_PGSZ_1M (0 << 0)
416 +#define MMU_CAM_PGSZ_64K (1 << 0)
417 +#define MMU_CAM_PGSZ_4K (2 << 0)
418 +#define MMU_CAM_PGSZ_16M (3 << 0)
420 +#define MMU_RAM_PADDR_SHIFT 12
421 +#define MMU_RAM_PADDR_MASK \
422 + ((~0UL >> MMU_RAM_PADDR_SHIFT) << MMU_RAM_PADDR_SHIFT)
423 +#define MMU_RAM_ENDIAN_SHIFT 9
424 +#define MMU_RAM_ENDIAN_MASK (1 << MMU_RAM_ENDIAN_SHIFT)
425 +#define MMU_RAM_ENDIAN_BIG (1 << MMU_RAM_ENDIAN_SHIFT)
426 +#define MMU_RAM_ENDIAN_LITTLE (0 << MMU_RAM_ENDIAN_SHIFT)
427 +#define MMU_RAM_ELSZ_SHIFT 7
428 +#define MMU_RAM_ELSZ_MASK (3 << MMU_RAM_ELSZ_SHIFT)
429 +#define MMU_RAM_ELSZ_8 (0 << MMU_RAM_ELSZ_SHIFT)
430 +#define MMU_RAM_ELSZ_16 (1 << MMU_RAM_ELSZ_SHIFT)
431 +#define MMU_RAM_ELSZ_32 (2 << MMU_RAM_ELSZ_SHIFT)
432 +#define MMU_RAM_ELSZ_NONE (3 << MMU_RAM_ELSZ_SHIFT)
433 +#define MMU_RAM_MIXED_SHIFT 6
434 +#define MMU_RAM_MIXED_MASK (1 << MMU_RAM_MIXED_SHIFT)
435 +#define MMU_RAM_MIXED MMU_RAM_MIXED_MASK
438 + * register accessors
440 +static inline u32 iommu_read_reg(struct iommu *obj, size_t offs)
442 + return __raw_readl(obj->regbase + offs);
445 +static inline void iommu_write_reg(struct iommu *obj, u32 val, size_t offs)
447 + __raw_writel(val, obj->regbase + offs);
450 +#endif /* __MACH_IOMMU2_H */