1 commit 034994cfffbb2371b720e3f49378031ebc12645e
2 Author: Eric Anholt <eric@anholt.net>
3 Date: Thu Oct 2 12:24:47 2008 -0700
5 drm: Clean up many sparse warnings in i915.
7 Signed-off-by: Eric Anholt <eric@anholt.net>
9 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
10 index dbd3f49..814cc12 100644
11 --- a/drivers/gpu/drm/i915/i915_dma.c
12 +++ b/drivers/gpu/drm/i915/i915_dma.c
13 @@ -76,7 +76,7 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
14 * Sets up the hardware status page for devices that need a physical address
17 -int i915_init_phys_hws(struct drm_device *dev)
18 +static int i915_init_phys_hws(struct drm_device *dev)
20 drm_i915_private_t *dev_priv = dev->dev_private;
21 /* Program Hardware Status Page */
22 @@ -101,7 +101,7 @@ int i915_init_phys_hws(struct drm_device *dev)
23 * Frees the hardware status page, whether it's a physical address or a virtual
24 * address set up by the X Server.
26 -void i915_free_hws(struct drm_device *dev)
27 +static void i915_free_hws(struct drm_device *dev)
29 drm_i915_private_t *dev_priv = dev->dev_private;
30 if (dev_priv->status_page_dmah) {
31 @@ -145,8 +145,8 @@ static int i915_dma_cleanup(struct drm_device * dev)
33 if (dev_priv->ring.virtual_start) {
34 drm_core_ioremapfree(&dev_priv->ring.map, dev);
35 - dev_priv->ring.virtual_start = 0;
36 - dev_priv->ring.map.handle = 0;
37 + dev_priv->ring.virtual_start = NULL;
38 + dev_priv->ring.map.handle = NULL;
39 dev_priv->ring.map.size = 0;
42 @@ -827,9 +827,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
43 base = drm_get_resource_start(dev, mmio_bar);
44 size = drm_get_resource_len(dev, mmio_bar);
46 - ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
47 - _DRM_KERNEL | _DRM_DRIVER,
48 - &dev_priv->mmio_map);
49 + dev_priv->regs = ioremap(base, size);
53 @@ -867,8 +865,8 @@ int i915_driver_unload(struct drm_device *dev)
57 - if (dev_priv->mmio_map)
58 - drm_rmmap(dev, dev_priv->mmio_map);
59 + if (dev_priv->regs != NULL)
60 + iounmap(dev_priv->regs);
62 intel_opregion_free(dev);
64 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
65 index 8547f0a..b184d54 100644
66 --- a/drivers/gpu/drm/i915/i915_drv.h
67 +++ b/drivers/gpu/drm/i915/i915_drv.h
68 @@ -110,8 +110,8 @@ struct intel_opregion {
69 typedef struct drm_i915_private {
70 struct drm_device *dev;
73 drm_local_map_t *sarea;
74 - drm_local_map_t *mmio_map;
76 drm_i915_sarea_t *sarea_priv;
77 drm_i915_ring_buffer_t ring;
78 @@ -553,12 +553,12 @@ extern void opregion_enable_asle(struct drm_device *dev);
79 LOCK_TEST_WITH_RETURN(dev, file_priv); \
82 -#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
83 -#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
84 -#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
85 -#define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
86 -#define I915_READ8(reg) DRM_READ8(dev_priv->mmio_map, (reg))
87 -#define I915_WRITE8(reg,val) DRM_WRITE8(dev_priv->mmio_map, (reg), (val))
88 +#define I915_READ(reg) readl(dev_priv->regs + (reg))
89 +#define I915_WRITE(reg,val) writel(val, dev_priv->regs + (reg))
90 +#define I915_READ16(reg) readw(dev_priv->regs + (reg))
91 +#define I915_WRITE16(reg,val) writel(val, dev_priv->regs + (reg))
92 +#define I915_READ8(reg) readb(dev_priv->regs + (reg))
93 +#define I915_WRITE8(reg,val) writeb(val, dev_priv->regs + (reg))
95 #define I915_VERBOSE 0
97 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
98 index 6ecfd10..6a89449 100644
99 --- a/drivers/gpu/drm/i915/i915_gem.c
100 +++ b/drivers/gpu/drm/i915/i915_gem.c
101 @@ -176,7 +176,8 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
104 char __user *user_data;
106 + char __iomem *vaddr;
107 + char *vaddr_atomic;
111 @@ -219,16 +220,20 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
112 pfn = (dev->agp->base >> PAGE_SHIFT) + i;
114 #ifdef CONFIG_HIGHMEM
115 - /* kmap_atomic can't map IO pages on non-HIGHMEM kernels
116 + /* This is a workaround for the low performance of iounmap
117 + * (approximate 10% cpu cost on normal 3D workloads).
118 + * kmap_atomic on HIGHMEM kernels happens to let us map card
119 + * memory without taking IPIs. When the vmap rework lands
120 + * we should be able to dump this hack.
122 - vaddr = kmap_atomic_pfn(pfn, KM_USER0);
123 + vaddr_atomic = kmap_atomic_pfn(pfn, KM_USER0);
125 DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n",
126 - i, o, l, pfn, vaddr);
127 + i, o, l, pfn, vaddr_atomic);
129 - unwritten = __copy_from_user_inatomic_nocache(vaddr + o,
130 + unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + o,
132 - kunmap_atomic(vaddr, KM_USER0);
133 + kunmap_atomic(vaddr_atomic, KM_USER0);
136 #endif /* CONFIG_HIGHMEM */
137 @@ -271,7 +276,7 @@ fail:
143 i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
144 struct drm_i915_gem_pwrite *args,
145 struct drm_file *file_priv)
146 @@ -587,7 +592,7 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains)
147 * Ensures that all commands in the ring are finished
148 * before signalling the CPU
152 i915_retire_commands(struct drm_device *dev)
154 drm_i915_private_t *dev_priv = dev->dev_private;
155 @@ -734,7 +739,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
156 * Waits for a sequence number to be signaled, and cleans up the
157 * request and object lists appropriately for that event.
161 i915_wait_request(struct drm_device *dev, uint32_t seqno)
163 drm_i915_private_t *dev_priv = dev->dev_private;
164 @@ -1483,7 +1488,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
165 struct drm_i915_gem_object *obj_priv = obj->driver_private;
167 uint32_t last_reloc_offset = -1;
168 - void *reloc_page = NULL;
169 + void __iomem *reloc_page = NULL;
171 /* Choose the GTT offset for our buffer and put it there. */
172 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
173 @@ -1500,8 +1505,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
174 for (i = 0; i < entry->relocation_count; i++) {
175 struct drm_gem_object *target_obj;
176 struct drm_i915_gem_object *target_obj_priv;
177 - uint32_t reloc_val, reloc_offset, *reloc_entry;
179 + uint32_t reloc_val, reloc_offset;
180 + uint32_t __iomem *reloc_entry;
182 ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
184 @@ -1624,7 +1629,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
188 - reloc_entry = (uint32_t *)((char *)reloc_page +
189 + reloc_entry = (uint32_t __iomem *)(reloc_page +
190 (reloc_offset & (PAGE_SIZE - 1)));
191 reloc_val = target_obj_priv->gtt_offset + reloc.delta;