1 Index: linux-2.6.33/drivers/dma/Kconfig
2 ===================================================================
3 --- linux-2.6.33.orig/drivers/dma/Kconfig
4 +++ linux-2.6.33/drivers/dma/Kconfig
5 @@ -20,6 +20,37 @@ comment "DMA Devices"
6 config ASYNC_TX_DISABLE_CHANNEL_SWITCH
9 +config INTEL_LNW_DMAC1
10 + bool "Intel MID DMA support for LPE DMA"
11 + depends on PCI && X86 && (SND_INTEL_SST||SND_INTEL_LPE)
14 + Enable support for the Intel(R) MID DMA1 engine present
15 + in Intel MID chipsets.
17 + Say Y here if you have such a chipset.
21 +config INTEL_LNW_DMAC2
22 + bool "Intel MID DMA support for SC DMA"
23 + depends on PCI && X86
26 + Enable support for the Intel(R) MID DMA2 engine present
27 + in Intel MID chipsets.
29 + Say Y here if you have such a chipset.
34 + bool "LNW DMA Debugging Enable"
35 + depends on INTEL_LNW_DMAC1 || INTEL_LNW_DMAC2
38 + Enable logging in the LNW DMA drivers
41 tristate "Intel I/OAT DMA support"
43 Index: linux-2.6.33/drivers/dma/Makefile
44 ===================================================================
45 --- linux-2.6.33.orig/drivers/dma/Makefile
46 +++ linux-2.6.33/drivers/dma/Makefile
48 obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
49 obj-$(CONFIG_NET_DMA) += iovlock.o
50 +obj-$(CONFIG_INTEL_LNW_DMAC2) += lnw_dmac2.o
51 +obj-$(CONFIG_INTEL_LNW_DMAC1) += lnw_dmac1.o
52 obj-$(CONFIG_DMATEST) += dmatest.o
53 obj-$(CONFIG_INTEL_IOATDMA) += ioat/
54 obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
55 Index: linux-2.6.33/drivers/dma/lnw_dma_regs.h
56 ===================================================================
58 +++ linux-2.6.33/drivers/dma/lnw_dma_regs.h
61 + * lnw_dma.c - Intel Langwell DMA Drivers
63 + * Copyright (C) 2008-09 Intel Corp
64 + * Author: Vinod Koul <vinod.koul@intel.com>
65 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
67 + * This program is free software; you can redistribute it and/or modify
68 + * it under the terms of the GNU General Public License as published by
69 + * the Free Software Foundation; version 2 of the License.
71 + * This program is distributed in the hope that it will be useful, but
72 + * WITHOUT ANY WARRANTY; without even the implied warranty of
73 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
74 + * General Public License for more details.
76 + * You should have received a copy of the GNU General Public License along
77 + * with this program; if not, write to the Free Software Foundation, Inc.,
78 + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
80 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
84 +#ifndef __LNW_DMA_REGS_H__
85 +#define __LNW_DMA_REGS_H__
87 +#include <linux/dmaengine.h>
88 +#include <linux/dmapool.h>
89 +#include <linux/pci_ids.h>
91 +#define LNW_DMA_DRIVER_VERSION "0.3.1"
95 +#define REG_BIT0 0x00000001
96 +#define REG_BIT8 0x00000100
98 +#define UNMASK_INTR_REG(chan_num) \
99 + ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num))
100 +#define MASK_INTR_REG(chan_num) (REG_BIT8 << chan_num)
102 +#define ENABLE_CHANNEL(chan_num) \
103 + ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num))
105 +#define DESCS_PER_CHANNEL 16
107 +/*registers associated with channel programming*/
108 +#define DMA_REG_SIZE 0x400
109 +#define DMA_CH_SIZE 0x58
111 +/*CH X REG = (DMA_CH_SIZE)*CH_NO + REG*/
112 +#define SAR 0x00 /* Source Address Register*/
113 +#define DAR 0x08 /* Destination Address Register*/
114 +#define CTL_LOW 0x18 /* Control Register*/
115 +#define CTL_HIGH 0x1C /* Control Register*/
116 +#define CFG_LOW 0x40 /* Configuration Register Low*/
117 +#define CFG_HIGH 0x44 /* Configuration Register high*/
119 +#define STATUS_TFR 0x2E8
120 +#define STATUS_BLOCK 0x2F0
121 +#define STATUS_ERR 0x308
123 +#define RAW_TFR 0x2C0
124 +#define RAW_BLOCK 0x2C8
125 +#define RAW_ERR 0x2E0
127 +#define MASK_TFR 0x310
128 +#define MASK_BLOCK 0x318
129 +#define MASK_SRC_TRAN 0x320
130 +#define MASK_DST_TRAN 0x328
131 +#define MASK_ERR 0x330
133 +#define CLEAR_TFR 0x338
134 +#define CLEAR_BLOCK 0x340
135 +#define CLEAR_SRC_TRAN 0x348
136 +#define CLEAR_DST_TRAN 0x350
137 +#define CLEAR_ERR 0x358
139 +#define INTR_STATUS 0x360
140 +#define DMA_CFG 0x398
141 +#define DMA_CHAN_EN 0x3A0
144 + * struct lnw_dma_chan - internal representation of a DMA channel
146 +struct lnw_dma_chan {
147 + struct dma_chan chan;
148 + void __iomem *ch_regs;
149 + void __iomem *dma_base;
152 + dma_cookie_t completed;
153 + struct list_head active_list;
154 + struct list_head queue;
155 + struct list_head free_list;
156 + struct lnw_dma_slave *slave;
157 + unsigned int descs_allocated;
158 + struct lnwdma_device *dma;
161 +static inline struct lnw_dma_chan *to_lnw_dma_chan(struct dma_chan *chan)
163 + return container_of(chan, struct lnw_dma_chan, chan);
167 + * struct lnwdma_device - internal representation of a DMA device
168 + * @pdev: PCI device
169 + * @dma_base: MMIO register space base address of DMA
170 + * @lpe_base: MMIO register space base address of LPE
171 + * @dma_pool: for allocating DMA descriptors
172 + * @common: embedded struct dma_device
173 + * @idx: per channel data
175 +struct lnwdma_device {
176 + struct pci_dev *pdev;
177 + void __iomem *dma_base;
178 + struct pci_pool *dma_pool;
179 + struct dma_device common;
180 + struct tasklet_struct tasklet;
181 + struct lnw_dma_chan ch[MAX_CHAN];
184 +static inline struct lnwdma_device *to_lnwdma_device(struct dma_device *common)
186 + return container_of(common, struct lnwdma_device, common);
189 +struct lnw_dma_desc {
190 + void __iomem *block; /*ch ptr*/
191 + struct list_head desc_node;
192 + struct dma_async_tx_descriptor txd;
201 + enum dma_data_direction dirn;
202 + enum dma_status status;
203 + dma_async_tx_callback callback;
204 + void *callback_param;
205 + enum lnw_dma_width width; /*width of DMA txn*/
206 + enum lnw_dma_mode cfg_mode; /*mode configuration*/
210 +static inline int test_ch_en(void __iomem *dma, u32 ch_no)
212 + u32 en_reg = ioread32(dma + DMA_CHAN_EN);
213 + return (en_reg >> ch_no) & 0x1;
216 +static inline struct lnw_dma_desc *to_lnw_dma_desc
217 + (struct dma_async_tx_descriptor *txd)
219 + return container_of(txd, struct lnw_dma_desc, txd);
222 +#define _dma_printk(level, format, arg...) \
223 + printk(level "LNW_DMA: %s %d " format, __func__, __LINE__, ## arg)
225 +#ifdef CONFIG_LNW_DMA_DEBUG
226 +#define dma_dbg(format, arg...) _dma_printk(KERN_DEBUG, "DBG " format , ## arg)
228 +#define dma_dbg(format, arg...) do {} while (0);
231 +#define dma_err(format, arg...) _dma_printk(KERN_ERR, "ERR " format , ## arg)
232 +#define dma_info(format, arg...) \
233 + _dma_printk(KERN_INFO , "INFO " format , ## arg)
235 +#endif /*__LNW_DMA_REGS_H__*/
236 Index: linux-2.6.33/drivers/dma/lnw_dmac1.c
237 ===================================================================
239 +++ linux-2.6.33/drivers/dma/lnw_dmac1.c
242 + * lnw_dmac1.c - Intel Langwell DMA Drivers
244 + * Copyright (C) 2008-09 Intel Corp
245 + * Authhor: Vinod Koul <vinod.koul@intel.com>
246 + * The driver design is based on dw_dmac driver
247 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
249 + * This program is free software; you can redistribute it and/or modify
250 + * it under the terms of the GNU General Public License as published by
251 + * the Free Software Foundation; version 2 of the License.
253 + * This program is distributed in the hope that it will be useful, but
254 + * WITHOUT ANY WARRANTY; without even the implied warranty of
255 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
256 + * General Public License for more details.
258 + * You should have received a copy of the GNU General Public License along
259 + * with this program; if not, write to the Free Software Foundation, Inc.,
260 + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
262 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
266 +#include <linux/init.h>
267 +#include <linux/module.h>
268 +#include <linux/pci.h>
269 +#include <linux/interrupt.h>
270 +#include <sound/intel_lpe.h>
271 +#include <linux/lnw_dma.h>
274 +#include "lnw_dma_regs.h"
276 +MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
277 +MODULE_DESCRIPTION("Intel (R) Moorestown Langwell DMAC1 Driver");
278 +MODULE_LICENSE("GPL v2");
279 +MODULE_VERSION(LNW_DMA_DRIVER_VERSION);
283 +#define CH_BLOCK_SIZE 4095
285 +static int __devinit lnw_dma1_probe(struct pci_dev *pdev,
286 + const struct pci_device_id *id);
287 +static void __devexit lnw_dma1_remove(struct pci_dev *pdev);
288 +static void enable_dma1_interrupt(struct lnw_dma_chan *lnwc);
289 +static void disable_dma1_interrupt(struct lnw_dma_chan *lnwc);
292 + struct pci_dev *pdev;
293 + void __iomem *dma_base;
294 + struct lnwdma_device *dma;
297 +/*CH dep code, if ch no's mapping changes only change here*/
298 +static int get_ch_id(int index)
302 + else if (index == 1)
308 +static int get_ch_index(int ch_id)
310 + if (ch_id == DMA_CH0)
312 + if (ch_id == DMA_CH1)
318 +static int get_ch_num(int *status)
320 + if (*status & (1 << DMA_CH0)) {
321 + *status = *status & (~(1 << DMA_CH0));
323 + } else if (*status & (1 << DMA_CH1)) {
324 + *status = *status & (~(1 << DMA_CH1));
330 +static int get_block_ts(int len, int tx_width)
332 + int byte_width = 0, block_ts = 0;
334 + switch (tx_width) {
335 + case LNW_DMA_WIDTH_8BIT:
338 + case LNW_DMA_WIDTH_16BIT:
341 + case LNW_DMA_WIDTH_32BIT:
347 + block_ts = len/byte_width;
348 + if (block_ts > CH_BLOCK_SIZE)
353 +static struct lnw_dma_desc *lnwc_desc_get1(struct lnw_dma_chan *lnwc)
355 + struct lnw_dma_desc *desc, *_desc;
356 + struct lnw_dma_desc *ret = NULL;
358 + dma_dbg("called \n");
359 + spin_lock_bh(&lnwc->lock);
360 + list_for_each_entry_safe(desc, _desc, &lnwc->free_list, desc_node) {
361 + if (async_tx_test_ack(&desc->txd)) {
362 + list_del(&desc->desc_node);
364 + dma_dbg("got free desc \n");
368 + spin_unlock_bh(&lnwc->lock);
373 +static void lnwc_desc_put1(struct lnw_dma_chan *lnwc, struct lnw_dma_desc *desc)
376 + spin_lock_bh(&lnwc->lock);
377 + list_add_tail(&desc->desc_node, &lnwc->free_list);
378 + spin_unlock_bh(&lnwc->lock);
382 +/* Called with dwc->lock held and bh disabled */
383 +static void lnwc_dostart1(struct lnw_dma_chan *lnwc, struct lnw_dma_desc *first)
385 + struct lnwdma_device *lnw = to_lnwdma_device(lnwc->chan.device);
387 + dma_dbg("called \n");
388 + /* ASSERT: channel is idle */
389 + if (lnwc->in_use && test_ch_en(lnwc->dma_base, lnwc->ch_id)) {
391 + dma_err("channel is busy \n");
392 + /* The tasklet will hopefully advance the queue... */
396 + /*write registers and en*/
397 + iowrite32(first->sar, lnwc->ch_regs + SAR);
398 + iowrite32(first->dar, lnwc->ch_regs + DAR);
399 + iowrite32(first->cfg_hi, lnwc->ch_regs + CFG_HIGH);
400 + iowrite32(first->cfg_lo, lnwc->ch_regs + CFG_LOW);
401 + iowrite32(first->ctl_lo, lnwc->ch_regs + CTL_LOW);
402 + iowrite32(first->ctl_hi, lnwc->ch_regs + CTL_HIGH);
403 + dma_dbg("TX SAR %lx, DAR %lx, CFGL %x, CFGH %x, CTLH %x, CTLL %x \n",
404 + first->sar, first->dar, first->cfg_hi,
405 + first->cfg_lo, first->ctl_hi, first->ctl_lo);
407 + iowrite32(ENABLE_CHANNEL(lnwc->ch_id), lnw->dma_base + DMA_CHAN_EN);
408 + first->status = DMA_IN_PROGRESS;
412 +lnwc_descriptor_complete1(struct lnw_dma_chan *lnwc, struct lnw_dma_desc *desc)
414 + struct dma_async_tx_descriptor *txd = &desc->txd;
415 + dma_async_tx_callback callback = NULL;
416 + dma_async_tx_callback callback_txd = NULL;
417 + void *param = NULL;
418 + void *param_txd = NULL;
420 + union lnw_dma_ctl_hi ctl_hi;
422 + dma_dbg("called \n");
424 + /*check if full tx is complete or not*/
425 + sar = ioread32(lnwc->ch_regs + SAR);
426 + dar = ioread32(lnwc->ch_regs + DAR);
428 + if (desc->dirn == DMA_FROM_DEVICE)
429 + len = dar - desc->dar;
431 + len = sar - desc->sar;
433 + dma_dbg("SAR %x DAR %x, DMA done: %x \n", sar, dar, len);
434 + if (desc->len > len) {
435 + dma_dbg("dirn = %d\n", desc->dirn);
436 + dma_dbg("SAR %x DAR %x, len: %x \n", sar, dar, len);
437 + /*we have to copy more bytes*/
439 + ctl_hi.ctl_hi = desc->ctl_hi;
440 + ctl_hi.ctlx.block_ts = get_block_ts(desc->len, desc->width);
441 + dma_dbg("setting for %x bytes \n", ctl_hi.ctlx.block_ts);
442 + desc->ctl_hi = ctl_hi.ctl_hi;
443 + if (desc->cfg_mode == LNW_DMA_MEM_TO_MEM) {
446 + } else if (desc->dirn == DMA_TO_DEVICE)
448 + else if (desc->dirn == DMA_FROM_DEVICE)
452 + dma_dbg("New SAR %x DAR %x \n", sar, dar);
453 + lnwc_dostart1(lnwc, desc);
457 + lnwc->completed = txd->cookie;
458 + callback = desc->callback;
459 + param = desc->callback_param;
460 + callback_txd = txd->callback;
461 + param_txd = txd->callback_param;
463 + list_move(&desc->desc_node, &lnwc->free_list);
465 + spin_unlock_bh(&lnwc->lock);
466 + dma_dbg("Now we are calling callback \n");
467 + if (callback_txd) {
468 + dma_dbg("lnw TXD callback set ... calling \n");
469 + callback_txd(param_txd);
470 + spin_lock_bh(&lnwc->lock);
474 + dma_dbg("lnw callback set ... calling \n");
477 + spin_lock_bh(&lnwc->lock);
480 +/*check desc, mark as complete when tx is complete*/
482 +lnwc_scan_descriptors1(struct lnwdma_device *lnw, struct lnw_dma_chan *lnwc)
484 + struct lnw_dma_desc *desc = NULL, *_desc = NULL;
487 + dma_dbg("called \n");
488 + status_xfer = ioread32(lnwc->dma_base + RAW_BLOCK);
489 + status_xfer = (status_xfer >> lnwc->ch_id) & 0x1;
490 + dma_dbg("ch[%d]: status_xfer %x \n", lnwc->ch_id, status_xfer);
494 + list_for_each_entry_safe(desc, _desc, &lnwc->active_list, desc_node) {
497 + if (desc->status == DMA_IN_PROGRESS) {
498 + desc->status = DMA_SUCCESS;
499 + lnwc_descriptor_complete1(lnwc, desc);
505 +/*****************************************************************************
507 +static dma_cookie_t lnw_dma1_tx_submit(struct dma_async_tx_descriptor *tx)
509 + struct lnw_dma_desc *desc = to_lnw_dma_desc(tx);
510 + struct lnw_dma_chan *lnwc = to_lnw_dma_chan(tx->chan);
511 + dma_cookie_t cookie;
513 + dma_dbg("called \n");
514 + spin_lock_bh(&lnwc->lock);
515 + cookie = lnwc->chan.cookie;
520 + lnwc->chan.cookie = cookie;
521 + desc->txd.cookie = cookie;
524 + if (list_empty(&lnwc->active_list)) {
525 + lnwc_dostart1(lnwc, desc);
526 + list_add_tail(&desc->desc_node, &lnwc->active_list);
528 + list_add_tail(&desc->desc_node, &lnwc->queue);
530 + spin_unlock_bh(&lnwc->lock);
535 +static void lnw_dma1_issue_pending(struct dma_chan *chan)
537 + struct lnw_dma_chan *lnwc = to_lnw_dma_chan(chan);
539 + spin_lock_bh(&lnwc->lock);
540 + if (!list_empty(&lnwc->queue))
541 + lnwc_scan_descriptors1(to_lnwdma_device(chan->device), lnwc);
542 + spin_unlock_bh(&lnwc->lock);
545 +static enum dma_status
546 +lnw_dma1_tx_is_complete(struct dma_chan *chan,
547 + dma_cookie_t cookie,
548 + dma_cookie_t *done,
549 + dma_cookie_t *used)
551 + struct lnw_dma_chan *lnwc = to_lnw_dma_chan(chan);
552 + dma_cookie_t last_used;
553 + dma_cookie_t last_complete;
556 + last_complete = lnwc->completed;
557 + last_used = chan->cookie;
559 + ret = dma_async_is_complete(cookie, last_complete, last_used);
560 + if (ret != DMA_SUCCESS) {
561 + lnwc_scan_descriptors1(to_lnwdma_device(chan->device), lnwc);
563 + last_complete = lnwc->completed;
564 + last_used = chan->cookie;
566 + ret = dma_async_is_complete(cookie, last_complete, last_used);
570 + *done = last_complete;
577 +static void lnw_dma1_terminate_all(struct dma_chan *chan)
579 + struct lnw_dma_chan *lnwc = to_lnw_dma_chan(chan);
580 + struct lnwdma_device *lnw = to_lnwdma_device(chan->device);
581 + struct lnw_dma_desc *desc, *_desc;
584 + /* ASSERT: channel is idle */
585 + if (lnwc->in_use == false) {
586 + /*ch is not in use, wrong call*/
589 + spin_lock_bh(&lnwc->lock);
590 + list_splice_init(&lnwc->free_list, &list);
591 + lnwc->descs_allocated = 0;
592 + lnwc->slave = NULL;
594 + /* Disable interrupts */
595 + disable_dma1_interrupt(lnwc);
597 + spin_unlock_bh(&lnwc->lock);
598 + list_for_each_entry_safe(desc, _desc, &list, desc_node) {
599 + dma_dbg("freeing descriptor %p\n", desc);
600 + pci_pool_free(lnw->dma_pool, desc, desc->txd.phys);
605 +static struct dma_async_tx_descriptor *
606 +lnw_dma1_prep_slave_sg(struct dma_chan *chan,
607 + struct scatterlist *sgl, unsigned int sg_len,
608 + enum dma_data_direction direction,
609 + unsigned long flags)
611 + /*not supported now*/
615 +static struct dma_async_tx_descriptor *
616 +lnw_dma1_prep_memcpy(struct dma_chan *chan, dma_addr_t dest,
617 + dma_addr_t src, size_t len, unsigned long flags)
619 + struct lnw_dma_chan *lnwc;
620 + struct lnw_dma_desc *desc = NULL;
621 + struct lnw_dma_slave *lnws;
622 + union lnw_dma_ctl_lo ctl_lo;
623 + union lnw_dma_ctl_hi ctl_hi;
624 + union lnw_dma_cfg_lo cfg_lo;
625 + union lnw_dma_cfg_hi cfg_hi;
626 + enum lnw_dma_width width = 0;
628 + dma_dbg("called \n");
633 + lnws = chan->private;
636 + lnwc = to_lnw_dma_chan(chan);
639 + dma_dbg("called for CH %d\n", lnwc->ch_id);
640 + dma_dbg("Cfg passed Mode %x, Dirn %x, HS %x, Width %x \n",
641 + lnws->cfg_mode, lnws->dirn, lnws->hs_mode, lnws->src_width);
643 + /*calculate CFG_LO*/
644 + if (lnws->hs_mode == LNW_DMA_SW_HS) {
646 + cfg_lo.cfgx.hs_sel_dst = 1;
647 + cfg_lo.cfgx.hs_sel_src = 1;
648 + } else if (lnws->hs_mode == LNW_DMA_HW_HS)
649 + cfg_lo.cfg_lo = 0x00000;
651 + /*calculate CFG_HI*/
652 + if (lnws->cfg_mode == LNW_DMA_MEM_TO_MEM) {
654 + dma_dbg("CFG: Mem to mem dma \n");
657 + dma_dbg("HW DMA \n");
659 + cfg_hi.cfgx.protctl = 0x0; /*default value*/
660 + cfg_hi.cfgx.fifo_mode = 1;
661 + if (lnws->dirn == DMA_TO_DEVICE) {
662 + cfg_hi.cfgx.src_per = 0;
663 + cfg_hi.cfgx.dst_per = 3;
664 + } else if (lnws->dirn == DMA_FROM_DEVICE) {
665 + cfg_hi.cfgx.src_per = 2;
666 + cfg_hi.cfgx.dst_per = 0;
670 + /*calculate CTL_HI*/
671 + ctl_hi.ctlx.reser = 0;
672 + width = lnws->src_width;
674 + ctl_hi.ctlx.block_ts = get_block_ts(len, width);
676 + /*calculate CTL_LO*/
678 + ctl_lo.ctlx.int_en = 1;
679 + ctl_lo.ctlx.dst_tr_width = lnws->dst_width;
680 + ctl_lo.ctlx.src_tr_width = lnws->src_width;
681 + ctl_lo.ctlx.dst_msize = lnws->src_msize;
682 + ctl_lo.ctlx.src_msize = lnws->dst_msize;
684 + if (lnws->cfg_mode == LNW_DMA_MEM_TO_MEM) {
685 + dma_dbg("CTL: Mem to mem dma \n");
686 + ctl_lo.ctlx.tt_fc = 0;
687 + ctl_lo.ctlx.sinc = 0;
688 + ctl_lo.ctlx.dinc = 0;
690 + if (lnws->dirn == DMA_TO_DEVICE) {
691 + dma_dbg("CTL: DMA_TO_DEVICE \n");
692 + ctl_lo.ctlx.sinc = 0;
693 + ctl_lo.ctlx.dinc = 2;
694 + ctl_lo.ctlx.tt_fc = 1;
695 + } else if (lnws->dirn == DMA_FROM_DEVICE) {
696 + dma_dbg("CTL: DMA_FROM_DEVICE \n");
697 + ctl_lo.ctlx.sinc = 2;
698 + ctl_lo.ctlx.dinc = 0;
699 + ctl_lo.ctlx.tt_fc = 2;
703 + dma_dbg("Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n",
704 + ctl_lo.ctl_lo, ctl_hi.ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi);
706 + enable_dma1_interrupt(lnwc);
708 + desc = lnwc_desc_get1(lnwc);
714 + desc->cfg_hi = cfg_hi.cfg_hi;
715 + desc->cfg_lo = cfg_lo.cfg_lo;
716 + desc->ctl_lo = ctl_lo.ctl_lo;
717 + desc->ctl_hi = ctl_hi.ctl_hi;
718 + desc->width = width;
719 + desc->dirn = lnws->dirn;
720 + if (lnws->callback) {
721 + desc->callback = lnws->callback;
722 + desc->callback_param = lnws->callback_param;
723 + dma_dbg("Callback passed... setting\n");
725 + desc->callback = NULL;
729 + dma_err("Failed to get desc \n");
730 + lnwc_desc_put1(lnwc, desc);
734 +static void lnw_dma1_free_chan_resources(struct dma_chan *chan)
736 + struct lnw_dma_chan *lnwc = to_lnw_dma_chan(chan);
737 + struct lnwdma_device *lnw = to_lnwdma_device(chan->device);
738 + struct lnw_dma_desc *desc, *_desc;
740 + dma_dbg("..called for ch_id %d, lnwch_id %d\n",
741 + chan->chan_id, lnwc->ch_id);
742 + if (true == lnwc->in_use) {
743 + /*trying to free ch in use!!!!!*/
744 + dma_err("trying to free ch in use \n");
747 + spin_lock_bh(&lnwc->lock);
748 + lnwc->descs_allocated = 0;
749 + list_for_each_entry_safe(desc, _desc, &lnwc->active_list, desc_node) {
750 + dma_dbg("del active \n");
751 + list_del(&desc->desc_node);
752 + pci_pool_free(lnw->dma_pool, desc, desc->txd.phys);
754 + list_for_each_entry_safe(desc, _desc, &lnwc->free_list, desc_node) {
755 + list_del(&desc->desc_node);
756 + pci_pool_free(lnw->dma_pool, desc, desc->txd.phys);
758 + list_for_each_entry_safe(desc, _desc, &lnwc->queue, desc_node) {
759 + dma_dbg("del queue \n");
760 + list_del(&desc->desc_node);
761 + pci_pool_free(lnw->dma_pool, desc, desc->txd.phys);
763 + spin_unlock_bh(&lnwc->lock);
764 + lnwc->in_use = false;
765 + chan->client_count--;
766 + /* Disable CH interrupts */
767 + iowrite32(MASK_INTR_REG(lnwc->ch_id), lnw->dma_base + MASK_BLOCK);
768 + iowrite32(MASK_INTR_REG(lnwc->ch_id), lnw->dma_base + MASK_ERR);
769 + dma_dbg("done \n");
772 +static int lnw_dma1_alloc_chan_resources(struct dma_chan *chan)
774 + struct lnw_dma_chan *lnwc = to_lnw_dma_chan(chan);
775 + struct lnwdma_device *lnw = to_lnwdma_device(chan->device);
776 + struct lnw_dma_desc *desc;
780 + dma_dbg("called \n");
782 + /* ASSERT: channel is idle */
783 + if (test_ch_en(lnw->dma_base, lnwc->ch_id)) {
785 + dma_err(".ch not idle\n");
788 + dma_dbg("..called for ch_id %d, lnwch_id %d\n",
789 + chan->chan_id, lnwc->ch_id);
790 + lnwc->completed = chan->cookie = 1;
792 + chan->client_count++;
794 + spin_lock_bh(&lnwc->lock);
795 + while (lnwc->descs_allocated < DESCS_PER_CHANNEL) {
796 + spin_unlock_bh(&lnwc->lock);
797 + desc = pci_pool_alloc(lnw->dma_pool, GFP_KERNEL, &phys);
799 + dma_err("desc failed\n");
803 + dma_async_tx_descriptor_init(&desc->txd, chan);
804 + desc->txd.tx_submit = lnw_dma1_tx_submit;
805 + desc->txd.flags = DMA_CTRL_ACK;
806 + desc->txd.phys = phys;
807 + spin_lock_bh(&lnwc->lock);
808 + i = ++lnwc->descs_allocated;
809 + list_add_tail(&desc->desc_node, &lnwc->free_list);
811 + spin_unlock_bh(&lnwc->lock);
812 + lnwc->in_use = false;
813 + dma_dbg("Desc alloc done ret: %d desc\n", i);
817 +static void lnwc_handle_error1(struct lnwdma_device *lnw,
818 + struct lnw_dma_chan *lnwc)
820 + lnwc_scan_descriptors1(lnw, lnwc);
823 +/******************************************************************************
826 +static struct pci_device_id lnw_dma1_ids[] = {
827 + { PCI_VENDOR_ID_INTEL, 0x0814, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
831 +MODULE_DEVICE_TABLE(pci, lnw_dma1_ids);
833 +static struct pci_driver lnw_dma1_pci = {
834 + .name = "Intel LNW DMA1",
835 + .id_table = lnw_dma1_ids,
836 + .probe = lnw_dma1_probe,
837 + .remove = __devexit_p(lnw_dma1_remove),
840 +static void dma_tasklet1(unsigned long data)
842 + struct lnwdma_device *lnw = NULL;
843 + struct lnw_dma_chan *lnwc = NULL;
847 + dma_dbg("called \n");
848 + lnw = (struct lnwdma_device *)data;
850 + dma_err("Null param \n");
853 + status = ioread32(lnw->dma_base + RAW_BLOCK);
854 + dma_dbg("RAW_TFR %x \n", status);
858 + ch_no = get_ch_num(&status);
860 + dma_err("Ch no is invalid %x, abort!\n", ch_no);
863 + dma_dbg("Got Ch %x, new Status %x \n", ch_no, status);
864 + i = get_ch_index(ch_no);
866 + dma_err("Invalid ch index %x\n", i);
869 + dma_dbg("Tx complete interrupt %x, Ch No %d Index %d \n",
871 + lnwc = &lnw->ch[i];
872 + if (lnwc == NULL) {
873 + dma_err("Null param lnwc\n");
876 + dma_dbg("CH %x \n", lnwc->ch_id);
877 + spin_lock_bh(&lnwc->lock);
878 + lnwc_scan_descriptors1(lnw, lnwc);
879 + dma_dbg("Scan of desc... complete, unmasking\n");
880 + iowrite32((1 << lnwc->ch_id),
881 + lnw->dma_base + CLEAR_TFR);
882 + dma_dbg("Wrote to clear %x\n", (1 << lnwc->ch_id));
883 + iowrite32((1 << lnwc->ch_id),
884 + lnw->dma_base + CLEAR_BLOCK);
885 + iowrite32(UNMASK_INTR_REG(lnwc->ch_id),
886 + lnw->dma_base + MASK_TFR);
887 + spin_unlock_bh(&lnwc->lock);
890 + dma_dbg("Trf interrupt done... \n");
891 + status = ioread32(lnw->dma_base + RAW_ERR);
895 + ch_no = get_ch_num(&status);
897 + dma_err("Ch no is invalid %x, abort!\n", ch_no);
900 + dma_dbg("Got Ch %x, new Status %x \n", ch_no, status);
901 + i = get_ch_index(ch_no);
903 + dma_err("Invalid CH lnwc\n");
906 + dma_dbg("Tx error interrupt %x, No %d Index %d \n",
908 + lnwc = &lnw->ch[i];
909 + if (lnwc == NULL) {
910 + dma_err("Null param lnwc\n");
913 + spin_lock_bh(&lnwc->lock);
914 + lnwc_handle_error1(lnw, lnwc);
915 + iowrite32((1 << lnwc->ch_id),
916 + lnw->dma_base + CLEAR_ERR);
917 + iowrite32(UNMASK_INTR_REG(lnwc->ch_id),
918 + lnw->dma_base + MASK_ERR);
919 + spin_unlock_bh(&lnwc->lock);
921 + dma_dbg("Exiting takslet... \n");
925 +static irqreturn_t lnw_dma1_interrupt(int irq, void *data)
927 + struct lnw_device *lnw = data;
929 + int call_tasklet = 0;
931 + /*check interrupt src*/
932 + lpe_periphral_intr_status(LPE_DMA, &status);
934 + /*not our interrupt*/
939 + status = ioread32(lnw->dma_base + RAW_TFR);
942 + iowrite32((status << 8), lnw->dma_base + MASK_TFR);
945 + status = ioread32(lnw->dma_base + RAW_ERR);
948 + iowrite32(MASK_INTR_REG(status), lnw->dma_base + MASK_ERR);
953 + tasklet_schedule(&lnw->dma->tasklet);
955 + return IRQ_HANDLED;
958 +static void enable_dma1_interrupt(struct lnw_dma_chan *lnwc)
960 + dma_dbg("Called for ch_id %d\n", lnwc->ch_id);
962 + lpe_unmask_periphral_intr(LPE_DMA);
964 + /*en ch interrupts*/
965 + iowrite32(UNMASK_INTR_REG(lnwc->ch_id), lnwc->dma_base + MASK_TFR);
966 + iowrite32(UNMASK_INTR_REG(lnwc->ch_id), lnwc->dma_base + MASK_ERR);
970 +static void disable_dma1_interrupt(struct lnw_dma_chan *lnwc)
972 + /*Check LPE PISR, make sure fwd is disabled*/
973 + lpe_mask_periphral_intr(LPE_DMA);
974 + iowrite32(MASK_INTR_REG(lnwc->ch_id), lnwc->dma_base + MASK_BLOCK);
975 + iowrite32(MASK_INTR_REG(lnwc->ch_id), lnwc->dma_base + MASK_TFR);
976 + iowrite32(MASK_INTR_REG(lnwc->ch_id), lnwc->dma_base + MASK_ERR);
977 + dma_dbg(" called \n");
981 +static int lnw_setup_dma1(struct pci_dev *pdev)
983 + struct lnw_device *device = pci_get_drvdata(pdev);
984 + struct lnwdma_device *dma = NULL;
987 + dma_dbg("setup_dma called \n");
988 + dma = kzalloc(sizeof(*dma), GFP_KERNEL);
990 + dma_err("kzalloc failed \n");
996 + dma->dma_base = device->dma_base;
998 + /* DMA coherent memory pool for DMA descriptor allocations */
999 + dma->dma_pool = pci_pool_create("dma_desc_pool", pdev,
1000 + sizeof(struct lnw_dma_desc),
1002 + if (NULL == dma->dma_pool) {
1003 + dma_err("pci_pool_create failed \n");
1006 + goto err_dma_pool;
1009 + INIT_LIST_HEAD(&dma->common.channels);
1012 + /*init CH structures*/
1013 + for (i = 0; i < MAX_CHAN; i++) {
1014 + struct lnw_dma_chan *lnwch = &dma->ch[i];
1016 + lnwch->chan.device = &dma->common;
1017 + lnwch->chan.cookie = 1;
1018 + lnwch->chan.chan_id = i;
1019 + lnwch->ch_id = get_ch_id(i);
1020 + dma_dbg("Init CH %d, ID %d \n", i, lnwch->ch_id);
1022 + lnwch->dma_base = dma->dma_base;
1023 + lnwch->ch_regs = dma->dma_base + DMA_CH_SIZE * lnwch->ch_id;
1025 + spin_lock_init(&lnwch->lock);
1027 + INIT_LIST_HEAD(&lnwch->active_list);
1028 + INIT_LIST_HEAD(&lnwch->queue);
1029 + INIT_LIST_HEAD(&lnwch->free_list);
1030 + /*mask interrupts*/
1031 + iowrite32(MASK_INTR_REG(lnwch->ch_id),
1032 + dma->dma_base + MASK_BLOCK);
1033 + iowrite32(MASK_INTR_REG(lnwch->ch_id),
1034 + dma->dma_base + MASK_SRC_TRAN);
1035 + iowrite32(MASK_INTR_REG(lnwch->ch_id),
1036 + dma->dma_base + MASK_DST_TRAN);
1037 + iowrite32(MASK_INTR_REG(lnwch->ch_id),
1038 + dma->dma_base + MASK_ERR);
1039 + iowrite32(MASK_INTR_REG(lnwch->ch_id),
1040 + dma->dma_base + MASK_TFR);
1042 + disable_dma1_interrupt(lnwch);
1043 + list_add_tail(&lnwch->chan.device_node, &dma->common.channels);
1046 + /*init dma structure*/
1047 + dma_cap_zero(dma->common.cap_mask);
1048 + dma_cap_set(DMA_MEMCPY, dma->common.cap_mask);
1049 + dma_cap_set(DMA_SLAVE, dma->common.cap_mask);
1050 + dma_cap_set(DMA_PRIVATE, dma->common.cap_mask);
1051 + dma->common.dev = &pdev->dev;
1052 + dma->common.chancnt = MAX_CHAN;
1054 + dma->common.device_alloc_chan_resources =
1055 + lnw_dma1_alloc_chan_resources;
1056 + dma->common.device_free_chan_resources =
1057 + lnw_dma1_free_chan_resources;
1059 + dma->common.device_is_tx_complete = lnw_dma1_tx_is_complete;
1060 + dma->common.device_prep_dma_memcpy = lnw_dma1_prep_memcpy;
1061 + dma->common.device_issue_pending = lnw_dma1_issue_pending;
1062 + dma->common.device_prep_slave_sg = lnw_dma1_prep_slave_sg;
1063 + dma->common.device_terminate_all = lnw_dma1_terminate_all;
1065 + /*enable dma cntrl*/
1066 + iowrite32(REG_BIT0, dma->dma_base + DMA_CFG);
1069 + err = request_irq(pdev->irq, lnw_dma1_interrupt,
1070 + IRQF_SHARED, lnw_dma1_pci.name, device);
1074 + /*register device w/ engine*/
1075 + err = dma_async_device_register(&dma->common);
1077 + dma_err("device_register failed: %d \n", err);
1080 + tasklet_init(&dma->tasklet, dma_tasklet1, (unsigned long)dma);
1081 + dma_dbg("...done \n");
1085 + free_irq(pdev->irq, device);
1087 + pci_pool_destroy(dma->dma_pool);
1091 + dma_err("setup_dma failed: %d \n", err);
1096 +static void lnwdma_shutdown1(struct pci_dev *pdev)
1098 + struct lnw_device *device = pci_get_drvdata(pdev);
1100 + dma_dbg("shutdown called \n");
1101 + dma_async_device_unregister(&device->dma->common);
1102 + pci_pool_destroy(device->dma->dma_pool);
1103 + if (device->dma_base)
1104 + iounmap(device->dma_base);
1105 + free_irq(pdev->irq, device);
1109 +static int __devinit
1110 +lnw_dma1_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1112 + struct lnw_device *device = NULL;
1113 + u32 base_addr = 0, bar_size = 0;
1116 + dma_info("probe called for %x \n", pdev->device);
1117 + err = pci_enable_device(pdev);
1119 + goto err_enable_device;
1121 + err = pci_request_regions(pdev, lnw_dma1_pci.name);
1123 + goto err_request_regions;
1125 + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1127 + goto err_set_dma_mask;
1129 + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1131 + goto err_set_dma_mask;
1133 + device = kzalloc(sizeof(*device), GFP_KERNEL);
1135 + dma_err("kzalloc failed \n");
1139 + device->pdev = pci_dev_get(pdev);
1141 + base_addr = pci_resource_start(pdev, 0);
1142 + bar_size = pci_resource_len(pdev, 0);
1143 + dma_dbg("BAR0 %x Size %x \n", base_addr, bar_size);
1144 + device->dma_base = ioremap_nocache(base_addr, DMA_REG_SIZE);
1145 + if (!device->dma_base) {
1146 + dma_err("ioremap failed \n");
1148 + goto err_ioremap1;
1150 + pci_set_drvdata(pdev, device);
1151 + pci_set_master(pdev);
1153 + err = lnw_setup_dma1(pdev);
1160 + iounmap(device->dma_base);
1162 + pci_dev_put(pdev);
1166 + pci_release_regions(pdev);
1167 + pci_disable_device(pdev);
1168 +err_request_regions:
1170 + dma_err("Probe failed %d\n", err);
1174 +static void __devexit lnw_dma1_remove(struct pci_dev *pdev)
1176 + struct lnw_device *device = pci_get_drvdata(pdev);
1178 + lnwdma_shutdown1(pdev);
1179 + pci_dev_put(pdev);
1181 + pci_release_regions(pdev);
1182 + pci_disable_device(pdev);
1185 +static int __init lnw_dma1_init(void)
1187 + dma_info("LNW DMA Driver\n Version %s \n", LNW_DMA_DRIVER_VERSION);
1188 + return pci_register_driver(&lnw_dma1_pci);
1190 +late_initcall(lnw_dma1_init);
1192 +static void __exit lnw_dma1_exit(void)
1194 + pci_unregister_driver(&lnw_dma1_pci);
1196 +module_exit(lnw_dma1_exit);
1198 Index: linux-2.6.33/drivers/dma/lnw_dmac2.c
1199 ===================================================================
1201 +++ linux-2.6.33/drivers/dma/lnw_dmac2.c
1204 + * lnw_dmac2.c - Intel Langwell DMA Drivers
1206 + * Copyright (C) 2008-09 Intel Corp
1207 + * Author: Vinod Koul <vinod.koul@intel.com>
1208 + * The driver design is based on dw_dmac driver
1209 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1211 + * This program is free software; you can redistribute it and/or modify
1212 + * it under the terms of the GNU General Public License as published by
1213 + * the Free Software Foundation; version 2 of the License.
1215 + * This program is distributed in the hope that it will be useful, but
1216 + * WITHOUT ANY WARRANTY; without even the implied warranty of
1217 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1218 + * General Public License for more details.
1220 + * You should have received a copy of the GNU General Public License along
1221 + * with this program; if not, write to the Free Software Foundation, Inc.,
1222 + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
1224 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1228 +#include <linux/init.h>
1229 +#include <linux/module.h>
1230 +#include <linux/pci.h>
1231 +#include <linux/interrupt.h>
1232 +#include <linux/lnw_dma.h>
1235 +#include "lnw_dma_regs.h"
1237 +MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
1238 +MODULE_DESCRIPTION("Intel (R) Moorestown Langwell DMAC2 Driver");
1239 +MODULE_LICENSE("GPL v2");
1240 +MODULE_VERSION(LNW_DMA_DRIVER_VERSION);
1244 +#define CH_BLOCK_SIZE 2047
1246 +static int __devinit lnw_dma2_probe(struct pci_dev *pdev,
1247 + const struct pci_device_id *id);
1248 +static void __devexit lnw_dma2_remove(struct pci_dev *pdev);
1249 +static void enable_dma2_interrupt(struct lnw_dma_chan *lnwc);
1251 +struct lnw_device {
1252 + struct pci_dev *pdev;
1253 + void __iomem *dma_base;
1254 + struct lnwdma_device *dma;
1257 +/*CH dep code, if ch no's mapping changes only change here*/
1258 +static int get_ch_id(int index)
1262 + else if (index == 1)
1268 +static int get_ch_index(int ch_id)
1270 + if (ch_id == DMA_CH0)
1272 + if (ch_id == DMA_CH1)
1278 +static int get_ch_num(int *status)
1280 + if (*status & (1 << DMA_CH0)) {
1281 + *status = *status & (~(1 << DMA_CH0));
1283 + } else if (*status & (1 << DMA_CH1)) {
1284 + *status = *status & (~(1 << DMA_CH1));
1290 +static int get_block_ts(int len, int tx_width)
1292 + int byte_width = 0, block_ts = 0;
1294 + switch (tx_width) {
1295 + case LNW_DMA_WIDTH_8BIT:
1298 + case LNW_DMA_WIDTH_16BIT:
1301 + case LNW_DMA_WIDTH_32BIT:
1307 + block_ts = len/byte_width;
1308 + if (block_ts > CH_BLOCK_SIZE)
1309 + block_ts = 0xFFFF;
1313 +static struct lnw_dma_desc *lnwc_desc_get(struct lnw_dma_chan *lnwc)
1315 + struct lnw_dma_desc *desc, *_desc;
1316 + struct lnw_dma_desc *ret = NULL;
1318 + dma_dbg("called \n");
1319 + spin_lock_bh(&lnwc->lock);
1320 + list_for_each_entry_safe(desc, _desc, &lnwc->free_list, desc_node) {
1321 + if (async_tx_test_ack(&desc->txd)) {
1322 + list_del(&desc->desc_node);
1324 + dma_dbg("got free desc \n");
1328 + spin_unlock_bh(&lnwc->lock);
1332 +static void lnwc_desc_put(struct lnw_dma_chan *lnwc, struct lnw_dma_desc *desc)
1335 + spin_lock_bh(&lnwc->lock);
1336 + list_add_tail(&desc->desc_node, &lnwc->free_list);
1337 + spin_unlock_bh(&lnwc->lock);
1341 +/* Called with lock held and bh disabled */
1342 +static void lnwc_dostart(struct lnw_dma_chan *lnwc, struct lnw_dma_desc *first)
1344 + struct lnwdma_device *lnw = to_lnwdma_device(lnwc->chan.device);
1346 + dma_dbg("called \n");
1347 + /* channel is idle */
1348 + if (lnwc->in_use && test_ch_en(lnwc->dma_base, lnwc->ch_id)) {
1350 + dma_err("channel is busy \n");
1351 + /* The tasklet will hopefully advance the queue... */
1355 + /*write registers and en*/
1356 + iowrite32(first->sar, lnwc->ch_regs + SAR);
1357 + iowrite32(first->dar, lnwc->ch_regs + DAR);
1358 + iowrite32(first->cfg_hi, lnwc->ch_regs + CFG_HIGH);
1359 + iowrite32(first->cfg_lo, lnwc->ch_regs + CFG_LOW);
1360 + iowrite32(first->ctl_lo, lnwc->ch_regs + CTL_LOW);
1361 + iowrite32(first->ctl_hi, lnwc->ch_regs + CTL_HIGH);
1362 + dma_dbg("TX SAR %lx, DAR %lx, CFGL %x, CFGH %x, CTLH %x, CTLL %x \n",
1363 + first->sar, first->dar, first->cfg_hi,
1364 + first->cfg_lo, first->ctl_hi, first->ctl_lo);
1366 + iowrite32(ENABLE_CHANNEL(lnwc->ch_id), lnw->dma_base + DMA_CHAN_EN);
1367 + first->status = DMA_IN_PROGRESS;
1371 +lnwc_descriptor_complete(struct lnw_dma_chan *lnwc, struct lnw_dma_desc *desc)
1373 + struct dma_async_tx_descriptor *txd = &desc->txd;
1374 + dma_async_tx_callback callback = NULL;
1375 + dma_async_tx_callback callback_txd = NULL;
1376 + void *param = NULL;
1377 + void *param_txd = NULL;
1378 + u32 sar, dar, len;
1379 + union lnw_dma_ctl_hi ctl_hi;
1381 + dma_dbg("called \n");
1383 + /*check if full tx is complete or not*/
1384 + sar = ioread32(lnwc->ch_regs + SAR);
1385 + dar = ioread32(lnwc->ch_regs + DAR);
1387 + if (desc->dirn == DMA_FROM_DEVICE)
1388 + len = dar - desc->dar;
1390 + len = sar - desc->sar;
1392 + dma_dbg("SAR %x DAR %x, DMA done: %x \n", sar, dar, len);
1393 + if (desc->len > len) {
1394 + dma_dbg("dirn = %d\n", desc->dirn);
1395 + dma_dbg("SAR %x DAR %x, len: %x \n", sar, dar, len);
1396 + /*we have to copy more bytes*/
1398 + ctl_hi.ctl_hi = desc->ctl_hi;
1399 + ctl_hi.ctlx.block_ts = get_block_ts(desc->len, desc->width);
1400 + dma_dbg("setting for %x bytes \n", ctl_hi.ctlx.block_ts);
1401 + desc->ctl_hi = ctl_hi.ctl_hi;
1402 + if (desc->cfg_mode == LNW_DMA_MEM_TO_MEM) {
1405 + } else if (desc->dirn == DMA_TO_DEVICE)
1407 + else if (desc->dirn == DMA_FROM_DEVICE)
1411 + dma_dbg("New SAR %x DAR %x \n", sar, dar);
1412 + lnwc_dostart(lnwc, desc);
1416 + lnwc->completed = txd->cookie;
1417 + callback = desc->callback;
1418 + param = desc->callback_param;
1419 + callback_txd = txd->callback;
1420 + param_txd = txd->callback_param;
1422 + list_move(&desc->desc_node, &lnwc->free_list);
1424 + spin_unlock_bh(&lnwc->lock);
1425 + dma_dbg("Now we are calling callback \n");
1426 + if (callback_txd) {
1427 + dma_dbg("lnw TXD callback set ... calling \n");
1428 + callback_txd(param_txd);
1429 + spin_lock_bh(&lnwc->lock);
1433 + dma_dbg("lnw callback set ... calling \n");
1436 + spin_lock_bh(&lnwc->lock);
1440 +/*check desc, mark as complete when tx is complete*/
1442 +lnwc_scan_descriptors(struct lnwdma_device *lnw, struct lnw_dma_chan *lnwc)
1444 + struct lnw_dma_desc *desc = NULL, *_desc = NULL;
1447 + dma_dbg("called \n");
1448 + status_xfer = ioread32(lnwc->dma_base + RAW_TFR);
1449 + status_xfer = (status_xfer >> lnwc->ch_id) & 0x1;
1450 + dma_dbg("ch[%d]: status_xfer %x \n", lnwc->ch_id, status_xfer);
1454 + /*tx is complete*/
1455 + list_for_each_entry_safe(desc, _desc, &lnwc->active_list, desc_node) {
1458 + if (desc->status == DMA_IN_PROGRESS) {
1459 + desc->status = DMA_SUCCESS;
1460 + lnwc_descriptor_complete(lnwc, desc);
1466 +/*****************************************************************************
1468 +static dma_cookie_t lnw_dma2_tx_submit(struct dma_async_tx_descriptor *tx)
1470 + struct lnw_dma_desc *desc = to_lnw_dma_desc(tx);
1471 + struct lnw_dma_chan *lnwc = to_lnw_dma_chan(tx->chan);
1472 + dma_cookie_t cookie;
1474 + dma_dbg("called \n");
1476 + spin_lock_bh(&lnwc->lock);
1477 + cookie = lnwc->chan.cookie;
1482 + lnwc->chan.cookie = cookie;
1483 + desc->txd.cookie = cookie;
1485 + if (list_empty(&lnwc->active_list)) {
1486 + lnwc_dostart(lnwc, desc);
1487 + list_add_tail(&desc->desc_node, &lnwc->active_list);
1489 + list_add_tail(&desc->desc_node, &lnwc->queue);
1491 + spin_unlock_bh(&lnwc->lock);
1496 +static void lnw_dma2_issue_pending(struct dma_chan *chan)
1498 + struct lnw_dma_chan *lnwc = to_lnw_dma_chan(chan);
1500 + spin_lock_bh(&lnwc->lock);
1501 + if (!list_empty(&lnwc->queue))
1502 + lnwc_scan_descriptors(to_lnwdma_device(chan->device), lnwc);
1503 + spin_unlock_bh(&lnwc->lock);
1506 +static enum dma_status
1507 +lnw_dma2_tx_is_complete(struct dma_chan *chan,
1508 + dma_cookie_t cookie,
1509 + dma_cookie_t *done,
1510 + dma_cookie_t *used)
1512 + struct lnw_dma_chan *lnwc = to_lnw_dma_chan(chan);
1513 + dma_cookie_t last_used;
1514 + dma_cookie_t last_complete;
1517 + last_complete = lnwc->completed;
1518 + last_used = chan->cookie;
1520 + ret = dma_async_is_complete(cookie, last_complete, last_used);
1521 + if (ret != DMA_SUCCESS) {
1522 + lnwc_scan_descriptors(to_lnwdma_device(chan->device), lnwc);
1524 + last_complete = lnwc->completed;
1525 + last_used = chan->cookie;
1527 + ret = dma_async_is_complete(cookie, last_complete, last_used);
1531 + *done = last_complete;
1533 + *used = last_used;
1538 +static void lnw_dma2_terminate_all(struct dma_chan *chan)
1540 + struct lnw_dma_chan *lnwc = to_lnw_dma_chan(chan);
1541 + struct lnwdma_device *lnw = to_lnwdma_device(chan->device);
1542 + struct lnw_dma_desc *desc, *_desc;
1545 + /* ASSERT: channel is idle */
1546 + if (lnwc->in_use == false) {
1547 + /*ch is not in use, wrong call*/
1550 + spin_lock_bh(&lnwc->lock);
1551 + list_splice_init(&lnwc->free_list, &list);
1552 + lnwc->descs_allocated = 0;
1553 + lnwc->slave = NULL;
1555 + /* Disable interrupts*/
1556 + iowrite32(MASK_INTR_REG(lnwc->ch_id), lnw->dma_base + MASK_BLOCK);
1557 + iowrite32(MASK_INTR_REG(lnwc->ch_id), lnw->dma_base + MASK_ERR);
1559 + spin_unlock_bh(&lnwc->lock);
1560 + list_for_each_entry_safe(desc, _desc, &list, desc_node) {
1561 + dma_dbg("freeing descriptor %p\n", desc);
1562 + pci_pool_free(lnw->dma_pool, desc, desc->txd.phys);
1568 +static struct dma_async_tx_descriptor *
1569 +lnw_dma2_prep_slave_sg(struct dma_chan *chan,
1570 + struct scatterlist *sgl, unsigned int sg_len,
1571 + enum dma_data_direction direction,
1572 + unsigned long flags)
1574 + /*not supported now*/
1578 +static struct dma_async_tx_descriptor *
1579 +lnw_dma2_prep_memcpy(struct dma_chan *chan, dma_addr_t dest,
1580 + dma_addr_t src, size_t len, unsigned long flags)
1582 + struct lnw_dma_chan *lnwc;
1583 + struct lnw_dma_desc *desc = NULL;
1584 + struct lnw_dma_slave *lnws;
1585 + union lnw_dma_ctl_lo ctl_lo;
1586 + union lnw_dma_ctl_hi ctl_hi;
1587 + union lnw_dma_cfg_lo cfg_lo;
1588 + union lnw_dma_cfg_hi cfg_hi;
1589 + enum lnw_dma_width width = 0;
1591 + dma_dbg("called \n");
1596 + lnws = chan->private;
1599 + lnwc = to_lnw_dma_chan(chan);
1602 + dma_dbg("called for CH %d\n", lnwc->ch_id);
1603 + dma_dbg("Cfg passed Mode %x, Dirn %x, HS %x, Width %x \n",
1604 + lnws->cfg_mode, lnws->dirn, lnws->hs_mode, lnws->src_width);
1606 + /*calculate CFG_LO*/
1607 + if (lnws->hs_mode == LNW_DMA_SW_HS) {
1608 + cfg_lo.cfg_lo = 0;
1609 + cfg_lo.cfgx.hs_sel_dst = 1;
1610 + cfg_lo.cfgx.hs_sel_src = 1;
1611 + } else if (lnws->hs_mode == LNW_DMA_HW_HS)
1612 + cfg_lo.cfg_lo = 0x00000;
1614 + /*calculate CFG_HI*/
1615 + if (lnws->cfg_mode == LNW_DMA_MEM_TO_MEM) {
1617 + dma_dbg("CFG: Mem to mem dma \n");
1618 + cfg_hi.cfg_hi = 0;
1620 + dma_dbg("HW DMA \n");
1621 + cfg_hi.cfg_hi = 0;
1622 + cfg_hi.cfgx.protctl = 0x1; /*default value*/
1623 + cfg_hi.cfgx.src_per = get_ch_index(lnwc->ch_id);
1624 + cfg_hi.cfgx.dst_per = get_ch_index(lnwc->ch_id);
1627 + /*calculate CTL_HI*/
1628 + ctl_hi.ctlx.reser = 0;
1629 + width = lnws->src_width;
1630 + ctl_hi.ctlx.block_ts = get_block_ts(len, width);
1632 + /*calculate CTL_LO*/
1633 + ctl_lo.ctl_lo = 0;
1634 + ctl_lo.ctlx.int_en = 1;
1635 + ctl_lo.ctlx.dst_tr_width = lnws->dst_width;
1636 + ctl_lo.ctlx.src_tr_width = lnws->src_width;
1637 + ctl_lo.ctlx.dst_msize = lnws->src_msize;
1638 + ctl_lo.ctlx.src_msize = lnws->dst_msize;
1640 + if (lnws->cfg_mode == LNW_DMA_MEM_TO_MEM) {
1641 + dma_dbg("CTL: Mem to mem dma \n");
1642 + ctl_lo.ctlx.tt_fc = 0;
1643 + ctl_lo.ctlx.sinc = 0;
1644 + ctl_lo.ctlx.dinc = 0;
1646 + if (lnws->dirn == DMA_TO_DEVICE) {
1647 + dma_dbg("CTL: DMA_TO_DEVICE \n");
1648 + ctl_lo.ctlx.sinc = 0;
1649 + ctl_lo.ctlx.dinc = 2;
1650 + ctl_lo.ctlx.tt_fc = 1;
1651 + } else if (lnws->dirn == DMA_FROM_DEVICE) {
1652 + dma_dbg("CTL: DMA_FROM_DEVICE \n");
1653 + ctl_lo.ctlx.sinc = 2;
1654 + ctl_lo.ctlx.dinc = 0;
1655 + ctl_lo.ctlx.tt_fc = 2;
1659 + dma_dbg("Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n",
1660 + ctl_lo.ctl_lo, ctl_hi.ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi);
1662 + enable_dma2_interrupt(lnwc);
1664 + desc = lnwc_desc_get(lnwc);
1666 + goto err_desc_get;
1668 + desc->dar = dest ;
1670 + desc->cfg_hi = cfg_hi.cfg_hi;
1671 + desc->cfg_lo = cfg_lo.cfg_lo;
1672 + desc->ctl_lo = ctl_lo.ctl_lo;
1673 + desc->ctl_hi = ctl_hi.ctl_hi;
1674 + desc->width = width;
1675 + desc->dirn = lnws->dirn;
1676 + if (lnws->callback) {
1677 + desc->callback = lnws->callback;
1678 + desc->callback_param = lnws->callback_param;
1679 + dma_dbg("Callback passed... setting\n");
1681 + desc->callback = NULL;
1682 + return &desc->txd;
1685 + dma_err("Failed to get desc \n");
1686 + lnwc_desc_put(lnwc, desc);
1691 +static void lnw_dma2_free_chan_resources(struct dma_chan *chan)
1693 + struct lnw_dma_chan *lnwc = to_lnw_dma_chan(chan);
1694 + struct lnwdma_device *lnw = to_lnwdma_device(chan->device);
1695 + struct lnw_dma_desc *desc, *_desc;
1697 + dma_dbg("..called for ch_id %d, lnwch_id %d\n",
1698 + chan->chan_id, lnwc->ch_id);
1699 + if (true == lnwc->in_use) {
1700 + /*trying to free ch in use!!!!!*/
1701 + dma_err("trying to free ch in use \n");
1704 + spin_lock_bh(&lnwc->lock);
1705 + lnwc->descs_allocated = 0;
1706 + list_for_each_entry_safe(desc, _desc, &lnwc->active_list, desc_node) {
1707 + dma_dbg("del active \n");
1708 + list_del(&desc->desc_node);
1709 + pci_pool_free(lnw->dma_pool, desc, desc->txd.phys);
1711 + list_for_each_entry_safe(desc, _desc, &lnwc->free_list, desc_node) {
1712 + list_del(&desc->desc_node);
1713 + pci_pool_free(lnw->dma_pool, desc, desc->txd.phys);
1715 + list_for_each_entry_safe(desc, _desc, &lnwc->queue, desc_node) {
1716 + dma_dbg("del queue \n");
1717 + list_del(&desc->desc_node);
1718 + pci_pool_free(lnw->dma_pool, desc, desc->txd.phys);
1720 + spin_unlock_bh(&lnwc->lock);
1721 + lnwc->in_use = false;
1722 + chan->client_count--;
1723 + /* Disable CH interrupts*/
1724 + iowrite32(MASK_INTR_REG(lnwc->ch_id), lnw->dma_base + MASK_BLOCK);
1725 + iowrite32(MASK_INTR_REG(lnwc->ch_id), lnw->dma_base + MASK_ERR);
1726 + dma_dbg("done \n");
1729 +static int lnw_dma2_alloc_chan_resources(struct dma_chan *chan)
1731 + struct lnw_dma_chan *lnwc = to_lnw_dma_chan(chan);
1732 + struct lnwdma_device *lnw = to_lnwdma_device(chan->device);
1733 + struct lnw_dma_desc *desc;
1737 + dma_dbg("called \n");
1739 + /* ASSERT: channel is idle */
1740 + if (test_ch_en(lnw->dma_base, lnwc->ch_id)) {
1741 + /*ch is not idle*/
1742 + dma_err(".ch not idle\n");
1745 + dma_dbg("..called for ch_id %d, lnwch_id %d\n",
1746 + chan->chan_id, lnwc->ch_id);
1747 + lnwc->completed = chan->cookie = 1;
1749 + chan->client_count++;
1751 + spin_lock_bh(&lnwc->lock);
1752 + while (lnwc->descs_allocated < DESCS_PER_CHANNEL) {
1753 + spin_unlock_bh(&lnwc->lock);
1754 + desc = pci_pool_alloc(lnw->dma_pool, GFP_KERNEL, &phys);
1756 + dma_err("desc failed\n");
1760 + dma_async_tx_descriptor_init(&desc->txd, chan);
1761 + desc->txd.tx_submit = lnw_dma2_tx_submit;
1762 + desc->txd.flags = DMA_CTRL_ACK;
1763 + desc->txd.phys = phys;
1764 + spin_lock_bh(&lnwc->lock);
1765 + i = ++lnwc->descs_allocated;
1766 + list_add_tail(&desc->desc_node, &lnwc->free_list);
1768 + spin_unlock_bh(&lnwc->lock);
1769 + lnwc->in_use = false;
1770 + dma_dbg("Desc alloc done ret: %d desc\n", i);
1774 +static void lnwc_handle_error(struct lnwdma_device *lnw,
1775 + struct lnw_dma_chan *lnwc)
1777 + lnwc_scan_descriptors(lnw, lnwc);
1780 +/******************************************************************************
1783 +static struct pci_device_id lnw_dma2_ids[] = {
1784 + { PCI_VENDOR_ID_INTEL, 0x0813, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1788 +MODULE_DEVICE_TABLE(pci, lnw_dma2_ids);
1790 +static struct pci_driver lnw_dma2_pci = {
1791 + .name = "Intel LNW DMA2",
1792 + .id_table = lnw_dma2_ids,
1793 + .probe = lnw_dma2_probe,
1794 + .remove = __devexit_p(lnw_dma2_remove),
1797 +static void dma_tasklet(unsigned long data)
1799 + struct lnwdma_device *lnw = NULL;
1800 + struct lnw_dma_chan *lnwc = NULL;
1804 + dma_dbg("called \n");
1805 + lnw = (struct lnwdma_device *)data;
1806 + if (lnw == NULL) {
1807 + dma_err("Null param \n");
1811 + status = ioread32(lnw->dma_base + RAW_TFR);
1812 + dma_dbg("RAW_TFR %x \n", status);
1815 + ch_no = get_ch_num(&status);
1817 + dma_err("Ch no is invalid %x, abort!\n", ch_no);
1820 + dma_dbg("Got Ch %x, new Status %x \n", ch_no, status);
1821 + i = get_ch_index(ch_no);
1823 + dma_err("Invalid ch index %x\n", i);
1826 + dma_dbg("Tx complete interrupt %x, Ch No %d Index %d \n",
1827 + status, ch_no, i);
1828 + lnwc = &lnw->ch[i];
1829 + if (lnwc == NULL) {
1830 + dma_err("Null param lnwc\n");
1833 + dma_dbg("CH %x \n", lnwc->ch_id);
1834 + spin_lock_bh(&lnwc->lock);
1835 + lnwc_scan_descriptors(lnw, lnwc);
1836 + dma_dbg("Scan of desc... complete, unmasking\n");
1837 + iowrite32((1 << lnwc->ch_id),
1838 + lnw->dma_base + CLEAR_TFR);
1839 + dma_dbg("Wrote to clear %x\n", (1 << lnwc->ch_id));
1840 + iowrite32((1 << lnwc->ch_id),
1841 + lnw->dma_base + CLEAR_BLOCK);
1842 + iowrite32(UNMASK_INTR_REG(lnwc->ch_id),
1843 + lnw->dma_base + MASK_TFR);
1844 + spin_unlock_bh(&lnwc->lock);
1847 + dma_dbg("Trf interrupt done... \n");
1848 + status = ioread32(lnw->dma_base + RAW_ERR);
1851 + ch_no = get_ch_num(&status);
1853 + dma_err("Ch no is invalid %x, abort!\n", ch_no);
1856 + dma_dbg("Got Ch %x, new Status %x \n", ch_no, status);
1857 + i = get_ch_index(ch_no);
1859 + dma_err("Invalid CH lnwc\n");
1862 + dma_dbg("Tx error interrupt %x, No %d Index %d \n",
1863 + status, ch_no, i);
1864 + lnwc = &lnw->ch[i];
1865 + if (lnwc == NULL) {
1866 + dma_err("Null param lnwc\n");
1869 + spin_lock_bh(&lnwc->lock);
1870 + lnwc_handle_error(lnw, lnwc);
1871 + iowrite32((1 << lnwc->ch_id),
1872 + lnw->dma_base + CLEAR_ERR);
1873 + iowrite32(UNMASK_INTR_REG(lnwc->ch_id),
1874 + lnw->dma_base + MASK_ERR);
1875 + spin_unlock_bh(&lnwc->lock);
1877 + dma_dbg("Exiting takslet... \n");
1881 +static irqreturn_t lnw_dma2_interrupt(int irq, void *data)
1883 + struct lnw_device *lnw = data;
1885 + int call_tasklet = 0;
1887 + /*will mask interrupt for now and schedule tasklet
1888 + tasklet shud unmask and clear*/
1889 + status = ioread32(lnw->dma_base + STATUS_TFR);
1892 + iowrite32((status << 8), lnw->dma_base + MASK_TFR);
1895 + status = ioread32(lnw->dma_base + STATUS_ERR);
1898 + iowrite32(MASK_INTR_REG(status), lnw->dma_base + MASK_ERR);
1903 + tasklet_schedule(&lnw->dma->tasklet);
1905 + return IRQ_HANDLED;
1908 +static void enable_dma2_interrupt(struct lnw_dma_chan *lnwc)
1910 + dma_dbg("Called for ch_id %d\n", lnwc->ch_id);
1912 + iowrite32(REG_BIT0, lnwc->dma->dma_base + DMA_CFG);
1913 + /*en ch interrupts */
1914 + iowrite32(UNMASK_INTR_REG(lnwc->ch_id), lnwc->dma_base + MASK_TFR);
1915 + iowrite32(UNMASK_INTR_REG(lnwc->ch_id), lnwc->dma_base + MASK_ERR);
1919 +static void disable_dma2_interrupt(struct lnw_device *device)
1924 + dma_dbg(" called \n");
1930 +static int lnw_setup_dma2(struct pci_dev *pdev)
1932 + struct lnw_device *device = pci_get_drvdata(pdev);
1933 + struct lnwdma_device *dma = NULL;
1936 + dma_dbg("setup_dma called \n");
1937 + dma = kzalloc(sizeof(*dma), GFP_KERNEL);
1938 + if (NULL == dma) {
1939 + dma_err("kzalloc failed \n");
1943 + device->dma = dma;
1945 + dma->dma_base = device->dma_base;
1947 + /* DMA coherent memory pool for DMA descriptor allocations */
1948 + dma->dma_pool = pci_pool_create("dma_desc_pool", pdev,
1949 + sizeof(struct lnw_dma_desc),
1951 + if (NULL == dma->dma_pool) {
1952 + dma_err("pci_pool_create failed \n");
1955 + goto err_dma_pool;
1958 + INIT_LIST_HEAD(&dma->common.channels);
1961 + /*init CH structures*/
1962 + for (i = 0; i < MAX_CHAN; i++) {
1963 + struct lnw_dma_chan *lnwch = &dma->ch[i];
1965 + lnwch->chan.device = &dma->common;
1966 + lnwch->chan.cookie = 1;
1967 + lnwch->chan.chan_id = i;
1968 + lnwch->ch_id = get_ch_id(i);
1969 + dma_dbg("Init CH %d, ID %d \n", i, lnwch->ch_id);
1971 + lnwch->dma_base = dma->dma_base;
1972 + lnwch->ch_regs = dma->dma_base + DMA_CH_SIZE * lnwch->ch_id;
1974 + spin_lock_init(&lnwch->lock);
1976 + INIT_LIST_HEAD(&lnwch->active_list);
1977 + INIT_LIST_HEAD(&lnwch->queue);
1978 + INIT_LIST_HEAD(&lnwch->free_list);
1980 + /*mask interrupts*/
1981 + iowrite32(MASK_INTR_REG(lnwch->ch_id),
1982 + dma->dma_base + MASK_BLOCK);
1983 + iowrite32(MASK_INTR_REG(lnwch->ch_id),
1984 + dma->dma_base + MASK_SRC_TRAN);
1985 + iowrite32(MASK_INTR_REG(lnwch->ch_id),
1986 + dma->dma_base + MASK_DST_TRAN);
1987 + iowrite32(MASK_INTR_REG(lnwch->ch_id),
1988 + dma->dma_base + MASK_ERR);
1989 + iowrite32(MASK_INTR_REG(lnwch->ch_id),
1990 + dma->dma_base + MASK_TFR);
1992 + dma_dbg("Init CH %d, ID %d \n", i, lnwch->ch_id);
1993 + list_add_tail(&lnwch->chan.device_node, &dma->common.channels);
1996 + /*init dma structure*/
1997 + dma_cap_zero(dma->common.cap_mask);
1998 + dma_cap_set(DMA_MEMCPY, dma->common.cap_mask);
1999 + dma_cap_set(DMA_SLAVE, dma->common.cap_mask);
2000 + dma_cap_set(DMA_PRIVATE, dma->common.cap_mask);
2001 + dma->common.dev = &pdev->dev;
2002 + dma->common.chancnt = MAX_CHAN;
2004 + dma->common.device_alloc_chan_resources =
2005 + lnw_dma2_alloc_chan_resources;
2006 + dma->common.device_free_chan_resources =
2007 + lnw_dma2_free_chan_resources;
2009 + dma->common.device_is_tx_complete = lnw_dma2_tx_is_complete;
2010 + dma->common.device_prep_dma_memcpy = lnw_dma2_prep_memcpy;
2011 + dma->common.device_issue_pending = lnw_dma2_issue_pending;
2012 + dma->common.device_prep_slave_sg = lnw_dma2_prep_slave_sg;
2013 + dma->common.device_terminate_all = lnw_dma2_terminate_all;
2015 + /*enable dma cntrl*/
2016 + iowrite32(REG_BIT0, dma->dma_base + DMA_CFG);
2018 + disable_dma2_interrupt(device);
2021 + err = request_irq(pdev->irq, lnw_dma2_interrupt,
2022 + 0, lnw_dma2_pci.name, device);
2026 + /*register device w/ engine*/
2027 + err = dma_async_device_register(&dma->common);
2029 + dma_err("device_register failed: %d \n", err);
2032 + tasklet_init(&dma->tasklet, dma_tasklet, (unsigned long)dma);
2033 + dma_dbg("...done\n");
2037 + free_irq(pdev->irq, device);
2039 + pci_pool_destroy(dma->dma_pool);
2043 + dma_err("setup_dma failed: %d \n", err);
2048 +static void lnwdma_shutdown(struct pci_dev *pdev)
2050 + struct lnw_device *device = pci_get_drvdata(pdev);
2052 + dma_dbg("shutdown called \n");
2053 + dma_async_device_unregister(&device->dma->common);
2054 + pci_pool_destroy(device->dma->dma_pool);
2055 + if (device->dma_base)
2056 + iounmap(device->dma_base);
2057 + free_irq(pdev->irq, device);
2060 +static int __devinit
2061 +lnw_dma2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2063 + struct lnw_device *device = NULL;
2064 + u32 base_addr = 0, bar_size = 0;
2067 + dma_info("probe called for %x \n", pdev->device);
2068 + err = pci_enable_device(pdev);
2070 + goto err_enable_device;
2072 + err = pci_request_regions(pdev, lnw_dma2_pci.name);
2074 + goto err_request_regions;
2076 + err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2078 + goto err_set_dma_mask;
2080 + err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2082 + goto err_set_dma_mask;
2084 + device = kzalloc(sizeof(*device), GFP_KERNEL);
2086 + dma_err("kzalloc failed \n");
2090 + device->pdev = pci_dev_get(pdev);
2092 + base_addr = pci_resource_start(pdev, 0);
2093 + bar_size = pci_resource_len(pdev, 0);
2094 + dma_dbg("BAR0 %x Size %x \n", base_addr, bar_size);
2095 + device->dma_base = ioremap_nocache(base_addr, DMA_REG_SIZE);
2096 + if (!device->dma_base) {
2097 + dma_err("ioremap failed \n");
2102 + pci_set_drvdata(pdev, device);
2103 + pci_set_master(pdev);
2105 + err = lnw_setup_dma2(pdev);
2112 + iounmap(device->dma_base);
2114 + pci_dev_put(pdev);
2118 + pci_release_regions(pdev);
2119 + pci_disable_device(pdev);
2120 +err_request_regions:
2122 + dma_err("Probe failed %d\n", err);
2126 +static void __devexit lnw_dma2_remove(struct pci_dev *pdev)
2128 + struct lnw_device *device = pci_get_drvdata(pdev);
2130 + lnwdma_shutdown(pdev);
2131 + pci_dev_put(pdev);
2133 + pci_release_regions(pdev);
2134 + pci_disable_device(pdev);
2137 +static int __init lnw_dma2_init(void)
2139 + dma_info("LNW DMA Driver\n Version %s \n", LNW_DMA_DRIVER_VERSION);
2140 + return pci_register_driver(&lnw_dma2_pci);
2142 +fs_initcall(lnw_dma2_init);
2144 +static void __exit lnw_dma2_exit(void)
2146 + pci_unregister_driver(&lnw_dma2_pci);
2148 +module_exit(lnw_dma2_exit);
2150 Index: linux-2.6.33/include/linux/lnw_dma.h
2151 ===================================================================
2153 +++ linux-2.6.33/include/linux/lnw_dma.h
2156 + * lnw_dma.c - Intel Langwell DMA Drivers
2158 + * Copyright (C) 2008i-09 Intel Corp
2159 + * Author: Vinod Koul <vinod.koul@intel.com>
2160 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2162 + * This program is free software; you can redistribute it and/or modify
2163 + * it under the terms of the GNU General Public License as published by
2164 + * the Free Software Foundation; version 2 of the License.
2166 + * This program is distributed in the hope that it will be useful, but
2167 + * WITHOUT ANY WARRANTY; without even the implied warranty of
2168 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
2169 + * General Public License for more details.
2171 + * You should have received a copy of the GNU General Public License along
2172 + * with this program; if not, write to the Free Software Foundation, Inc.,
2173 + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
2175 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2179 +#ifndef __LNW_DMA_H__
2180 +#define __LNW_DMA_H__
2182 +#include <linux/dmaengine.h>
2184 +/*DMA transaction width, src and dstn width would be same
2185 +The DMA length must be width aligned,
2186 +for 32 bit width the length must be 32 bit (4bytes) aligned only*/
2187 +enum lnw_dma_width {
2188 + LNW_DMA_WIDTH_8BIT = 0x0,
2189 + LNW_DMA_WIDTH_16BIT = 0x1,
2190 + LNW_DMA_WIDTH_32BIT = 0x2,
2193 +/*DMA mode configurations*/
2194 +enum lnw_dma_mode {
2195 + LNW_DMA_PER_TO_MEM = 0, /*periphral to memory configuration*/
2196 + LNW_DMA_MEM_TO_PER, /*memory to periphral configuration*/
2197 + LNW_DMA_MEM_TO_MEM, /*mem to mem confg (testing only)*/
2200 +/*DMA handshaking*/
2201 +enum lnw_dma_hs_mode {
2202 + LNW_DMA_HW_HS = 0, /*HW Handshaking only*/
2203 + LNW_DMA_SW_HS = 1, /*SW Handshaking not recommended*/
2206 +/*Burst size configuration*/
2207 +enum lnw_dma_msize {
2208 + LNW_DMA_MSIZE_1 = 0x0,
2209 + LNW_DMA_MSIZE_4 = 0x1,
2210 + LNW_DMA_MSIZE_8 = 0x2,
2211 + LNW_DMA_MSIZE_16 = 0x3,
2212 + LNW_DMA_MSIZE_32 = 0x4,
2213 + LNW_DMA_MSIZE_64 = 0x5,
2217 + * struct lnw_dma_slave - DMA slave structure
2219 + * @dma_dev: DMA master client
2220 + * @tx_reg: physical address of data register used for
2221 + * memory-to-peripheral transfers
2222 + * @rx_reg: physical address of data register used for
2223 + * peripheral-to-memory transfers
2224 + * @tx_width: tx register width
2225 + * @rx_width: rx register width
2226 + * @dirn: DMA trf direction
2228 + * @cfg_hi: Platform-specific initializer for the CFG_HI register
2229 + * @cfg_lo: Platform-specific initializer for the CFG_LO register
2231 + * @ tx_width: width of src and dstn
2232 + * @ hs_mode: SW or HW handskaking mode
2233 + * @ cfg_mode: Mode configuration, DMA mem to mem to dev & mem
2235 +struct lnw_dma_slave {
2236 + enum dma_data_direction dirn;
2237 + enum lnw_dma_width src_width; /*width of DMA src txn*/
2238 + enum lnw_dma_width dst_width; /*width of DMA dst txn*/
2239 + enum lnw_dma_hs_mode hs_mode; /*handshaking*/
2240 + enum lnw_dma_mode cfg_mode; /*mode configuration*/
2241 + enum lnw_dma_msize src_msize; /*size if src burst*/
2242 + enum lnw_dma_msize dst_msize; /*size of dst burst*/
2243 + dma_async_tx_callback callback; /*callback function*/
2244 + void *callback_param; /*param for callback*/
2247 +/*DMA channel control registers*/
2248 +union lnw_dma_ctl_lo {
2250 + u32 int_en:1; /*enable or disable interrupts*/
2252 + u32 dst_tr_width:3; /*destination transfer width*/
2253 + /*usually 32 bits = 010*/
2254 + u32 src_tr_width:3; /*source transfer width*/
2255 + /*usually 32 bits = 010*/
2256 + u32 dinc:2; /*destination address inc/dec*/
2257 + /*For mem:INC=00, Periphral NoINC=11*/
2258 + u32 sinc:2; /*source address inc or dec, as above*/
2259 + u32 dst_msize:3; /*destination burst transaction length*/
2260 + /*always = 16 ie 011*/
2261 + u32 src_msize:3; /*source burst transaction length*/
2262 + /*always = 16 ie 011*/
2264 + u32 tt_fc:3; /*transfer type and flow controller*/
2268 + u32 dms:2; /*destination master select = 0*/
2269 + u32 sms:2; /*source master select = 0*/
2270 + u32 llp_dst_en:1; /*enable/disable destination LLP = 0*/
2271 + u32 llp_src_en:1; /*enable/disable source LLP = 0*/
2277 +union lnw_dma_ctl_hi {
2279 + u32 block_ts:12; /*block transfer size*/
2280 + /*configured by DMAC*/
2287 +/*DMA channel configuration registers*/
2288 +union lnw_dma_cfg_lo {
2291 + u32 ch_prior:3; /*channel priority = 0*/
2292 + u32 ch_susp:1; /*channel suspend = 0*/
2293 + u32 fifo_empty:1; /*FIFO empty or not R bit = 0*/
2294 + u32 hs_sel_dst:1; /*select HW/SW destn handshaking*/
2295 + /*HW = 0, SW = 1*/
2296 + u32 hs_sel_src:1; /*select HW/SW src handshaking*/
2298 + u32 dst_hs_pol:1; /*dest HS interface polarity*/
2299 + u32 src_hs_pol:1; /*src HS interface polarity*/
2300 + u32 max_abrst:10; /*max AMBA burst len = 0 (no sw limit*/
2301 + u32 reload_src:1; /*auto reload src addr =1 if src is P*/
2302 + u32 reload_dst:1; /*AR destn addr =1 if dstn is P*/
2307 +union lnw_dma_cfg_hi {
2309 + u32 fcmode:1; /*flow control mode = 1*/
2310 + u32 fifo_mode:1; /*FIFO mode select = 1*/
2311 + u32 protctl:3; /*protection control = 0*/
2313 + u32 src_per:4; /*src hw HS interface*/
2314 + u32 dst_per:4; /*dstn hw HS interface*/
2320 +#endif /*__LNW_DMA_H__*/
2321 Index: linux-2.6.33/include/linux/intel_mid.h
2322 ===================================================================
2324 +++ linux-2.6.33/include/linux/intel_mid.h
2327 + * intel_mid.h - Netlink multicast interface definition for OSPM.
2329 + * Copyright (C) 2009 Intel Corp
2330 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2332 + * This program is free software; you can redistribute it and/or modify
2333 + * it under the terms of the GNU General Public License as published by
2334 + * the Free Software Foundation; version 2 of the License.
2336 + * This program is distributed in the hope that it will be useful, but
2337 + * WITHOUT ANY WARRANTY; without even the implied warranty of
2338 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
2339 + * General Public License for more details.
2341 + * You should have received a copy of the GNU General Public License along
2342 + * with this program; if not, write to the Free Software Foundation, Inc.,
2343 + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
2345 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2346 + * Authors: Sujith Thomas
2347 + * Rajeev D Muralidhar
2348 + * Vishwesh M Rudramuni
2349 + * Nithish Mahalingam
2350 + * Contact information:
2351 + * Sujith Thomas <sujith.thomas@intel.com>
2352 + * Rajeev D Muralidhar <rajeev.d.muralidhar@intel.com>
2353 + * Vishwesh M Rudramuni <vishwesh.m.rudramuni@intel.com>
2354 + * Nithish Mahalingam <nithish.mahalingam@intel.com>
2358 +#ifndef INTEL_MID_H
2359 +#define INTEL_MID_H
2361 +#define PMU1_MAX_DEVS 2
2362 +#define PMU2_MAX_DEVS 12
2363 +#define PERIPH_MAX_DEVS 3
2364 +#define MAX_DEVICES (PMU1_MAX_DEVS + PMU2_MAX_DEVS + PERIPH_MAX_DEVS)
2365 +#define WAKE_CAPABLE 0x80000000
2367 +struct pci_dev_info {
2371 + u16 phy_susbsysid;
2373 + struct pci_dev *dev_driver;
2379 + u32 *pmu1_pm_base;
2380 + void __iomem *pmu2_base;
2381 + u32 *pm_table_base;
2382 + u32 pmu1_sub_systems;
2383 + u32 pmu2_sub_systems;
2385 + u32 pmu_wake_ss_states;
2386 + u32 perepheral_sub_systems;
2388 + int platform_sx_state;
2389 + int s0ix_retry_enb;
2390 + int fast_retry_exit;
2394 +extern struct pci_dev_info platform_pci_devices[MAX_DEVICES];
2395 +extern unsigned long g_intel_mid_wakeup_address;
2397 +enum pmu_ss_state {
2398 + SS_STATE_D0I0 = 0,
2399 + SS_STATE_D0I1 = 1,
2400 + SS_STATE_D0I2 = 2,
2404 +enum eospm_events {
2405 + OSPM_EVENT_SUBSYS_INACTIVITY,
2406 + OSPM_EVENT_SUBSYS_WAKE,
2407 + OSPM_EVENT_SUBSYS_START_PLAY,
2408 + OSPM_EVENT_SUBSYS_STOP_PLAY,
2409 + OSPM_EVENT_CMD_SUCCESS,
2410 + OSPM_EVENT_CMD_ERROR,
2411 + OSPM_EVENT_CMD_NO_C6_ERROR,
2412 + OSPM_EVENT_AUDIO_BUF_EMPTY,
2413 + OSPM_EVENT_AUDIO_BUF_FULL,
2414 + OSPM_EVENT_THERMAL_AUX0,
2415 + OSPM_EVENT_THERMAL_AUX1,
2416 + OSPM_EVENT_THERMAL_CRITICAL,
2417 + OSPM_EVENT_THERMAL_DEV_FAULT,
2418 + __OSPM_EVENT_COUNT,
2421 +#define AUDIO_SUBSYTEM_ID 25
2422 +#define MID_S0I1_STATE 1
2423 +#define MID_S0I3_STATE 3
2424 +/* Thermal device Id */
2425 +#define TEMP_DEV_ID1 40
2426 +#define TEMP_DEV_ID2 41
2427 +#define TEMP_DEV_ID3 42
2429 +/* First 32 (0-31) originators are subsystems
2430 + Next 8 (0-7) are cmd IDs */
2431 +#define OSPM_CMDID_OFFSET 32
2432 +#define OSPM_MAX_CMD_ID 8
2434 +struct ospm_genl_event {
2436 + enum eospm_events event;
2439 +/* attributes of ospm_genl_family */
2441 + OSPM_GENL_ATTR_UNSPEC,
2442 + OSPM_GENL_ATTR_EVENT, /* OSPM event info needed by user space */
2443 + __OSPM_GENL_ATTR_MAX,
2445 +#define OSPM_GENL_ATTR_MAX (__OSPM_GENL_ATTR_MAX - 1)
2447 +/* commands supported by the ospm_genl_family */
2450 + OSPM_GENL_CMD_UNSPEC,
2451 + OSPM_GENL_CMD_EVENT, /* kernel->user notifications for OSPM events */
2452 + __OSPM_GENL_CMD_MAX,
2454 +#define OSPM_GENL_CMD_MAX (__OSPM_GENL_CMD_MAX - 1)
2456 +#define OSPM_GENL_FAMILY_NAME "ospm_event"
2457 +#define OSPM_GENL_VERSION 0x01
2458 +#define OSPM_GENL_MCAST_GROUP_NAME "ospm_mc_group"
2460 +int ospm_generate_netlink_event(u32 orig, enum eospm_events event);
2461 +int ospm_event_genetlink_init(void);
2462 +void ospm_event_genetlink_exit(void);
2464 +extern void intel_mid_reserve_bootmem(void);
2465 +extern unsigned long g_intel_mid_wakeup_address;
2466 +extern void find_pci_info(u32 device_id, u32 vendor_id, u32 *index);
2467 +extern int s0ix_non_bsp_init(void);