These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / staging / comedi / drivers / mite.c
1 /*
2  * comedi/drivers/mite.c
3  * Hardware driver for NI Mite PCI interface chip
4  *
5  * COMEDI - Linux Control and Measurement Device Interface
6  * Copyright (C) 1997-2002 David A. Schleef <ds@schleef.org>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  */
18
19 /*
20  * The PCI-MIO E series driver was originally written by
21  * Tomasz Motylewski <...>, and ported to comedi by ds.
22  *
23  * References for specifications:
24  *
25  *    321747b.pdf  Register Level Programmer Manual (obsolete)
26  *    321747c.pdf  Register Level Programmer Manual (new)
27  *    DAQ-STC reference manual
28  *
29  * Other possibly relevant info:
30  *
31  *    320517c.pdf  User manual (obsolete)
32  *    320517f.pdf  User manual (new)
33  *    320889a.pdf  delete
34  *    320906c.pdf  maximum signal ratings
35  *    321066a.pdf  about 16x
36  *    321791a.pdf  discontinuation of at-mio-16e-10 rev. c
37  *    321808a.pdf  about at-mio-16e-10 rev P
38  *    321837a.pdf  discontinuation of at-mio-16de-10 rev d
39  *    321838a.pdf  about at-mio-16de-10 rev N
40  *
41  * ISSUES:
42  *
43  */
44
45 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46
47 #include <linux/module.h>
48 #include <linux/slab.h>
49
50 #include "../comedi_pci.h"
51
52 #include "mite.h"
53
54 #define TOP_OF_PAGE(x) ((x)|(~(PAGE_MASK)))
55
56 struct mite_struct *mite_alloc(struct pci_dev *pcidev)
57 {
58         struct mite_struct *mite;
59         unsigned int i;
60
61         mite = kzalloc(sizeof(*mite), GFP_KERNEL);
62         if (mite) {
63                 spin_lock_init(&mite->lock);
64                 mite->pcidev = pcidev;
65                 for (i = 0; i < MAX_MITE_DMA_CHANNELS; ++i) {
66                         mite->channels[i].mite = mite;
67                         mite->channels[i].channel = i;
68                         mite->channels[i].done = 1;
69                 }
70         }
71         return mite;
72 }
73 EXPORT_SYMBOL_GPL(mite_alloc);
74
75 static void dump_chip_signature(u32 csigr_bits)
76 {
77         pr_info("version = %i, type = %i, mite mode = %i, interface mode = %i\n",
78                 mite_csigr_version(csigr_bits), mite_csigr_type(csigr_bits),
79                 mite_csigr_mmode(csigr_bits), mite_csigr_imode(csigr_bits));
80         pr_info("num channels = %i, write post fifo depth = %i, wins = %i, iowins = %i\n",
81                 mite_csigr_dmac(csigr_bits), mite_csigr_wpdep(csigr_bits),
82                 mite_csigr_wins(csigr_bits), mite_csigr_iowins(csigr_bits));
83 }
84
85 static unsigned mite_fifo_size(struct mite_struct *mite, unsigned channel)
86 {
87         unsigned fcr_bits = readl(mite->mite_io_addr + MITE_FCR(channel));
88         unsigned empty_count = (fcr_bits >> 16) & 0xff;
89         unsigned full_count = fcr_bits & 0xff;
90
91         return empty_count + full_count;
92 }
93
94 int mite_setup2(struct comedi_device *dev,
95                 struct mite_struct *mite, bool use_win1)
96 {
97         unsigned long length;
98         int i;
99         u32 csigr_bits;
100         unsigned unknown_dma_burst_bits;
101
102         pci_set_master(mite->pcidev);
103
104         mite->mite_io_addr = pci_ioremap_bar(mite->pcidev, 0);
105         if (!mite->mite_io_addr) {
106                 dev_err(dev->class_dev,
107                         "Failed to remap mite io memory address\n");
108                 return -ENOMEM;
109         }
110         mite->mite_phys_addr = pci_resource_start(mite->pcidev, 0);
111
112         dev->mmio = pci_ioremap_bar(mite->pcidev, 1);
113         if (!dev->mmio) {
114                 dev_err(dev->class_dev,
115                         "Failed to remap daq io memory address\n");
116                 return -ENOMEM;
117         }
118         mite->daq_phys_addr = pci_resource_start(mite->pcidev, 1);
119         length = pci_resource_len(mite->pcidev, 1);
120
121         if (use_win1) {
122                 writel(0, mite->mite_io_addr + MITE_IODWBSR);
123                 dev_info(dev->class_dev,
124                          "using I/O Window Base Size register 1\n");
125                 writel(mite->daq_phys_addr | WENAB |
126                        MITE_IODWBSR_1_WSIZE_bits(length),
127                        mite->mite_io_addr + MITE_IODWBSR_1);
128                 writel(0, mite->mite_io_addr + MITE_IODWCR_1);
129         } else {
130                 writel(mite->daq_phys_addr | WENAB,
131                        mite->mite_io_addr + MITE_IODWBSR);
132         }
133         /*
134          * Make sure dma bursts work. I got this from running a bus analyzer
135          * on a pxi-6281 and a pxi-6713. 6713 powered up with register value
136          * of 0x61f and bursts worked. 6281 powered up with register value of
137          * 0x1f and bursts didn't work. The NI windows driver reads the
138          * register, then does a bitwise-or of 0x600 with it and writes it back.
139          */
140         unknown_dma_burst_bits =
141             readl(mite->mite_io_addr + MITE_UNKNOWN_DMA_BURST_REG);
142         unknown_dma_burst_bits |= UNKNOWN_DMA_BURST_ENABLE_BITS;
143         writel(unknown_dma_burst_bits,
144                mite->mite_io_addr + MITE_UNKNOWN_DMA_BURST_REG);
145
146         csigr_bits = readl(mite->mite_io_addr + MITE_CSIGR);
147         mite->num_channels = mite_csigr_dmac(csigr_bits);
148         if (mite->num_channels > MAX_MITE_DMA_CHANNELS) {
149                 dev_warn(dev->class_dev,
150                          "mite: bug? chip claims to have %i dma channels. Setting to %i.\n",
151                          mite->num_channels, MAX_MITE_DMA_CHANNELS);
152                 mite->num_channels = MAX_MITE_DMA_CHANNELS;
153         }
154         dump_chip_signature(csigr_bits);
155         for (i = 0; i < mite->num_channels; i++) {
156                 writel(CHOR_DMARESET, mite->mite_io_addr + MITE_CHOR(i));
157                 /* disable interrupts */
158                 writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | CHCR_CLR_SAR_IE |
159                        CHCR_CLR_DONE_IE | CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
160                        CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
161                        mite->mite_io_addr + MITE_CHCR(i));
162         }
163         mite->fifo_size = mite_fifo_size(mite, 0);
164         dev_info(dev->class_dev, "fifo size is %i.\n", mite->fifo_size);
165         return 0;
166 }
167 EXPORT_SYMBOL_GPL(mite_setup2);
168
169 void mite_detach(struct mite_struct *mite)
170 {
171         if (!mite)
172                 return;
173
174         if (mite->mite_io_addr)
175                 iounmap(mite->mite_io_addr);
176
177         kfree(mite);
178 }
179 EXPORT_SYMBOL_GPL(mite_detach);
180
181 struct mite_dma_descriptor_ring *mite_alloc_ring(struct mite_struct *mite)
182 {
183         struct mite_dma_descriptor_ring *ring =
184             kmalloc(sizeof(struct mite_dma_descriptor_ring), GFP_KERNEL);
185
186         if (!ring)
187                 return NULL;
188         ring->hw_dev = get_device(&mite->pcidev->dev);
189         if (!ring->hw_dev) {
190                 kfree(ring);
191                 return NULL;
192         }
193         ring->n_links = 0;
194         ring->descriptors = NULL;
195         ring->descriptors_dma_addr = 0;
196         return ring;
197 };
198 EXPORT_SYMBOL_GPL(mite_alloc_ring);
199
200 void mite_free_ring(struct mite_dma_descriptor_ring *ring)
201 {
202         if (ring) {
203                 if (ring->descriptors) {
204                         dma_free_coherent(ring->hw_dev,
205                                           ring->n_links *
206                                           sizeof(struct mite_dma_descriptor),
207                                           ring->descriptors,
208                                           ring->descriptors_dma_addr);
209                 }
210                 put_device(ring->hw_dev);
211                 kfree(ring);
212         }
213 };
214 EXPORT_SYMBOL_GPL(mite_free_ring);
215
216 struct mite_channel *mite_request_channel_in_range(struct mite_struct *mite,
217                                                    struct
218                                                    mite_dma_descriptor_ring
219                                                    *ring, unsigned min_channel,
220                                                    unsigned max_channel)
221 {
222         int i;
223         unsigned long flags;
224         struct mite_channel *channel = NULL;
225
226         /*
227          * spin lock so mite_release_channel can be called safely
228          * from interrupts
229          */
230         spin_lock_irqsave(&mite->lock, flags);
231         for (i = min_channel; i <= max_channel; ++i) {
232                 if (mite->channel_allocated[i] == 0) {
233                         mite->channel_allocated[i] = 1;
234                         channel = &mite->channels[i];
235                         channel->ring = ring;
236                         break;
237                 }
238         }
239         spin_unlock_irqrestore(&mite->lock, flags);
240         return channel;
241 }
242 EXPORT_SYMBOL_GPL(mite_request_channel_in_range);
243
244 void mite_release_channel(struct mite_channel *mite_chan)
245 {
246         struct mite_struct *mite = mite_chan->mite;
247         unsigned long flags;
248
249         /* spin lock to prevent races with mite_request_channel */
250         spin_lock_irqsave(&mite->lock, flags);
251         if (mite->channel_allocated[mite_chan->channel]) {
252                 mite_dma_disarm(mite_chan);
253                 mite_dma_reset(mite_chan);
254                 /*
255                  * disable all channel's interrupts (do it after disarm/reset so
256                  * MITE_CHCR reg isn't changed while dma is still active!)
257                  */
258                 writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE |
259                        CHCR_CLR_SAR_IE | CHCR_CLR_DONE_IE |
260                        CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
261                        CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
262                        mite->mite_io_addr + MITE_CHCR(mite_chan->channel));
263                 mite->channel_allocated[mite_chan->channel] = 0;
264                 mite_chan->ring = NULL;
265                 mmiowb();
266         }
267         spin_unlock_irqrestore(&mite->lock, flags);
268 }
269 EXPORT_SYMBOL_GPL(mite_release_channel);
270
271 void mite_dma_arm(struct mite_channel *mite_chan)
272 {
273         struct mite_struct *mite = mite_chan->mite;
274         int chor;
275         unsigned long flags;
276
277         /*
278          * memory barrier is intended to insure any twiddling with the buffer
279          * is done before writing to the mite to arm dma transfer
280          */
281         smp_mb();
282         /* arm */
283         chor = CHOR_START;
284         spin_lock_irqsave(&mite->lock, flags);
285         mite_chan->done = 0;
286         writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
287         mmiowb();
288         spin_unlock_irqrestore(&mite->lock, flags);
289         /* mite_dma_tcr(mite, channel); */
290 }
291 EXPORT_SYMBOL_GPL(mite_dma_arm);
292
293 /**************************************/
294
295 int mite_buf_change(struct mite_dma_descriptor_ring *ring,
296                     struct comedi_subdevice *s)
297 {
298         struct comedi_async *async = s->async;
299         unsigned int n_links;
300         int i;
301
302         if (ring->descriptors) {
303                 dma_free_coherent(ring->hw_dev,
304                                   ring->n_links *
305                                   sizeof(struct mite_dma_descriptor),
306                                   ring->descriptors,
307                                   ring->descriptors_dma_addr);
308         }
309         ring->descriptors = NULL;
310         ring->descriptors_dma_addr = 0;
311         ring->n_links = 0;
312
313         if (async->prealloc_bufsz == 0)
314                 return 0;
315
316         n_links = async->prealloc_bufsz >> PAGE_SHIFT;
317
318         ring->descriptors =
319             dma_alloc_coherent(ring->hw_dev,
320                                n_links * sizeof(struct mite_dma_descriptor),
321                                &ring->descriptors_dma_addr, GFP_KERNEL);
322         if (!ring->descriptors) {
323                 dev_err(s->device->class_dev,
324                         "mite: ring buffer allocation failed\n");
325                 return -ENOMEM;
326         }
327         ring->n_links = n_links;
328
329         for (i = 0; i < n_links; i++) {
330                 ring->descriptors[i].count = cpu_to_le32(PAGE_SIZE);
331                 ring->descriptors[i].addr =
332                     cpu_to_le32(async->buf_map->page_list[i].dma_addr);
333                 ring->descriptors[i].next =
334                     cpu_to_le32(ring->descriptors_dma_addr + (i +
335                                                               1) *
336                                 sizeof(struct mite_dma_descriptor));
337         }
338         ring->descriptors[n_links - 1].next =
339             cpu_to_le32(ring->descriptors_dma_addr);
340         /*
341          * barrier is meant to insure that all the writes to the dma descriptors
342          * have completed before the dma controller is commanded to read them
343          */
344         smp_wmb();
345         return 0;
346 }
347 EXPORT_SYMBOL_GPL(mite_buf_change);
348
349 void mite_prep_dma(struct mite_channel *mite_chan,
350                    unsigned int num_device_bits, unsigned int num_memory_bits)
351 {
352         unsigned int chor, chcr, mcr, dcr, lkcr;
353         struct mite_struct *mite = mite_chan->mite;
354
355         /* reset DMA and FIFO */
356         chor = CHOR_DMARESET | CHOR_FRESET;
357         writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
358
359         /* short link chaining mode */
360         chcr = CHCR_SET_DMA_IE | CHCR_LINKSHORT | CHCR_SET_DONE_IE |
361             CHCR_BURSTEN;
362         /*
363          * Link Complete Interrupt: interrupt every time a link
364          * in MITE_RING is completed. This can generate a lot of
365          * extra interrupts, but right now we update the values
366          * of buf_int_ptr and buf_int_count at each interrupt. A
367          * better method is to poll the MITE before each user
368          * "read()" to calculate the number of bytes available.
369          */
370         chcr |= CHCR_SET_LC_IE;
371         if (num_memory_bits == 32 && num_device_bits == 16) {
372                 /*
373                  * Doing a combined 32 and 16 bit byteswap gets the 16 bit
374                  * samples into the fifo in the right order. Tested doing 32 bit
375                  * memory to 16 bit device transfers to the analog out of a
376                  * pxi-6281, which has mite version = 1, type = 4. This also
377                  * works for dma reads from the counters on e-series boards.
378                  */
379                 chcr |= CHCR_BYTE_SWAP_DEVICE | CHCR_BYTE_SWAP_MEMORY;
380         }
381         if (mite_chan->dir == COMEDI_INPUT)
382                 chcr |= CHCR_DEV_TO_MEM;
383
384         writel(chcr, mite->mite_io_addr + MITE_CHCR(mite_chan->channel));
385
386         /* to/from memory */
387         mcr = CR_RL(64) | CR_ASEQUP;
388         switch (num_memory_bits) {
389         case 8:
390                 mcr |= CR_PSIZE8;
391                 break;
392         case 16:
393                 mcr |= CR_PSIZE16;
394                 break;
395         case 32:
396                 mcr |= CR_PSIZE32;
397                 break;
398         default:
399                 pr_warn("bug! invalid mem bit width for dma transfer\n");
400                 break;
401         }
402         writel(mcr, mite->mite_io_addr + MITE_MCR(mite_chan->channel));
403
404         /* from/to device */
405         dcr = CR_RL(64) | CR_ASEQUP;
406         dcr |= CR_PORTIO | CR_AMDEVICE | CR_REQSDRQ(mite_chan->channel);
407         switch (num_device_bits) {
408         case 8:
409                 dcr |= CR_PSIZE8;
410                 break;
411         case 16:
412                 dcr |= CR_PSIZE16;
413                 break;
414         case 32:
415                 dcr |= CR_PSIZE32;
416                 break;
417         default:
418                 pr_warn("bug! invalid dev bit width for dma transfer\n");
419                 break;
420         }
421         writel(dcr, mite->mite_io_addr + MITE_DCR(mite_chan->channel));
422
423         /* reset the DAR */
424         writel(0, mite->mite_io_addr + MITE_DAR(mite_chan->channel));
425
426         /* the link is 32bits */
427         lkcr = CR_RL(64) | CR_ASEQUP | CR_PSIZE32;
428         writel(lkcr, mite->mite_io_addr + MITE_LKCR(mite_chan->channel));
429
430         /* starting address for link chaining */
431         writel(mite_chan->ring->descriptors_dma_addr,
432                mite->mite_io_addr + MITE_LKAR(mite_chan->channel));
433 }
434 EXPORT_SYMBOL_GPL(mite_prep_dma);
435
436 static u32 mite_device_bytes_transferred(struct mite_channel *mite_chan)
437 {
438         struct mite_struct *mite = mite_chan->mite;
439
440         return readl(mite->mite_io_addr + MITE_DAR(mite_chan->channel));
441 }
442
443 u32 mite_bytes_in_transit(struct mite_channel *mite_chan)
444 {
445         struct mite_struct *mite = mite_chan->mite;
446
447         return readl(mite->mite_io_addr +
448                      MITE_FCR(mite_chan->channel)) & 0x000000FF;
449 }
450 EXPORT_SYMBOL_GPL(mite_bytes_in_transit);
451
452 /* returns lower bound for number of bytes transferred from device to memory */
453 u32 mite_bytes_written_to_memory_lb(struct mite_channel *mite_chan)
454 {
455         u32 device_byte_count;
456
457         device_byte_count = mite_device_bytes_transferred(mite_chan);
458         return device_byte_count - mite_bytes_in_transit(mite_chan);
459 }
460 EXPORT_SYMBOL_GPL(mite_bytes_written_to_memory_lb);
461
462 /* returns upper bound for number of bytes transferred from device to memory */
463 u32 mite_bytes_written_to_memory_ub(struct mite_channel *mite_chan)
464 {
465         u32 in_transit_count;
466
467         in_transit_count = mite_bytes_in_transit(mite_chan);
468         return mite_device_bytes_transferred(mite_chan) - in_transit_count;
469 }
470 EXPORT_SYMBOL_GPL(mite_bytes_written_to_memory_ub);
471
472 /* returns lower bound for number of bytes read from memory to device */
473 u32 mite_bytes_read_from_memory_lb(struct mite_channel *mite_chan)
474 {
475         u32 device_byte_count;
476
477         device_byte_count = mite_device_bytes_transferred(mite_chan);
478         return device_byte_count + mite_bytes_in_transit(mite_chan);
479 }
480 EXPORT_SYMBOL_GPL(mite_bytes_read_from_memory_lb);
481
482 /* returns upper bound for number of bytes read from memory to device */
483 u32 mite_bytes_read_from_memory_ub(struct mite_channel *mite_chan)
484 {
485         u32 in_transit_count;
486
487         in_transit_count = mite_bytes_in_transit(mite_chan);
488         return mite_device_bytes_transferred(mite_chan) + in_transit_count;
489 }
490 EXPORT_SYMBOL_GPL(mite_bytes_read_from_memory_ub);
491
492 unsigned mite_dma_tcr(struct mite_channel *mite_chan)
493 {
494         struct mite_struct *mite = mite_chan->mite;
495
496         return readl(mite->mite_io_addr + MITE_TCR(mite_chan->channel));
497 }
498 EXPORT_SYMBOL_GPL(mite_dma_tcr);
499
500 void mite_dma_disarm(struct mite_channel *mite_chan)
501 {
502         struct mite_struct *mite = mite_chan->mite;
503         unsigned chor;
504
505         /* disarm */
506         chor = CHOR_ABORT;
507         writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
508 }
509 EXPORT_SYMBOL_GPL(mite_dma_disarm);
510
511 int mite_sync_input_dma(struct mite_channel *mite_chan,
512                         struct comedi_subdevice *s)
513 {
514         struct comedi_async *async = s->async;
515         int count;
516         unsigned int nbytes, old_alloc_count;
517
518         old_alloc_count = async->buf_write_alloc_count;
519         /* write alloc as much as we can */
520         comedi_buf_write_alloc(s, async->prealloc_bufsz);
521
522         nbytes = mite_bytes_written_to_memory_lb(mite_chan);
523         if ((int)(mite_bytes_written_to_memory_ub(mite_chan) -
524                   old_alloc_count) > 0) {
525                 dev_warn(s->device->class_dev,
526                          "mite: DMA overwrite of free area\n");
527                 async->events |= COMEDI_CB_OVERFLOW;
528                 return -1;
529         }
530
531         count = nbytes - async->buf_write_count;
532         /*
533          * it's possible count will be negative due to conservative value
534          * returned by mite_bytes_written_to_memory_lb
535          */
536         if (count <= 0)
537                 return 0;
538
539         comedi_buf_write_free(s, count);
540         comedi_inc_scan_progress(s, count);
541         async->events |= COMEDI_CB_BLOCK;
542         return 0;
543 }
544 EXPORT_SYMBOL_GPL(mite_sync_input_dma);
545
546 int mite_sync_output_dma(struct mite_channel *mite_chan,
547                          struct comedi_subdevice *s)
548 {
549         struct comedi_async *async = s->async;
550         struct comedi_cmd *cmd = &async->cmd;
551         u32 stop_count = cmd->stop_arg * comedi_bytes_per_scan(s);
552         unsigned int old_alloc_count = async->buf_read_alloc_count;
553         u32 nbytes_ub, nbytes_lb;
554         int count;
555
556         /* read alloc as much as we can */
557         comedi_buf_read_alloc(s, async->prealloc_bufsz);
558         nbytes_lb = mite_bytes_read_from_memory_lb(mite_chan);
559         if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_lb - stop_count) > 0)
560                 nbytes_lb = stop_count;
561         nbytes_ub = mite_bytes_read_from_memory_ub(mite_chan);
562         if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_ub - stop_count) > 0)
563                 nbytes_ub = stop_count;
564         if ((int)(nbytes_ub - old_alloc_count) > 0) {
565                 dev_warn(s->device->class_dev, "mite: DMA underrun\n");
566                 async->events |= COMEDI_CB_OVERFLOW;
567                 return -1;
568         }
569         count = nbytes_lb - async->buf_read_count;
570         if (count <= 0)
571                 return 0;
572
573         if (count) {
574                 comedi_buf_read_free(s, count);
575                 async->events |= COMEDI_CB_BLOCK;
576         }
577         return 0;
578 }
579 EXPORT_SYMBOL_GPL(mite_sync_output_dma);
580
581 unsigned mite_get_status(struct mite_channel *mite_chan)
582 {
583         struct mite_struct *mite = mite_chan->mite;
584         unsigned status;
585         unsigned long flags;
586
587         spin_lock_irqsave(&mite->lock, flags);
588         status = readl(mite->mite_io_addr + MITE_CHSR(mite_chan->channel));
589         if (status & CHSR_DONE) {
590                 mite_chan->done = 1;
591                 writel(CHOR_CLRDONE,
592                        mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
593         }
594         mmiowb();
595         spin_unlock_irqrestore(&mite->lock, flags);
596         return status;
597 }
598 EXPORT_SYMBOL_GPL(mite_get_status);
599
600 int mite_done(struct mite_channel *mite_chan)
601 {
602         struct mite_struct *mite = mite_chan->mite;
603         unsigned long flags;
604         int done;
605
606         mite_get_status(mite_chan);
607         spin_lock_irqsave(&mite->lock, flags);
608         done = mite_chan->done;
609         spin_unlock_irqrestore(&mite->lock, flags);
610         return done;
611 }
612 EXPORT_SYMBOL_GPL(mite_done);
613
614 static int __init mite_module_init(void)
615 {
616         return 0;
617 }
618
619 static void __exit mite_module_exit(void)
620 {
621 }
622
623 module_init(mite_module_init);
624 module_exit(mite_module_exit);
625
626 MODULE_AUTHOR("Comedi http://www.comedi.org");
627 MODULE_DESCRIPTION("Comedi helper for NI Mite PCI interface chip");
628 MODULE_LICENSE("GPL");