4 * Copyright (c) 2003-2004 Vassili Karpov (malc)
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "hw/isa/isa.h"
26 #include "qemu/main-loop.h"
29 /* #define DEBUG_DMA */
31 #define dolog(...) fprintf (stderr, "dma: " __VA_ARGS__)
33 #define linfo(...) fprintf (stderr, "dma: " __VA_ARGS__)
34 #define ldebug(...) fprintf (stderr, "dma: " __VA_ARGS__)
48 DMA_transfer_handler transfer_handler;
55 static struct dma_cont {
61 struct dma_regs regs[4];
62 qemu_irq *cpu_request_exit;
63 MemoryRegion channel_io;
68 CMD_MEMORY_TO_MEMORY = 0x01,
69 CMD_FIXED_ADDRESS = 0x02,
70 CMD_BLOCK_CONTROLLER = 0x04,
71 CMD_COMPRESSED_TIME = 0x08,
72 CMD_CYCLIC_PRIORITY = 0x10,
73 CMD_EXTENDED_WRITE = 0x20,
76 CMD_NOT_SUPPORTED = CMD_MEMORY_TO_MEMORY | CMD_FIXED_ADDRESS
77 | CMD_COMPRESSED_TIME | CMD_CYCLIC_PRIORITY | CMD_EXTENDED_WRITE
78 | CMD_LOW_DREQ | CMD_LOW_DACK
82 static void DMA_run (void);
84 static int channels[8] = {-1, 2, 3, 1, -1, -1, -1, 0};
86 static void write_page (void *opaque, uint32_t nport, uint32_t data)
88 struct dma_cont *d = opaque;
91 ichan = channels[nport & 7];
93 dolog ("invalid channel %#x %#x\n", nport, data);
96 d->regs[ichan].page = data;
99 static void write_pageh (void *opaque, uint32_t nport, uint32_t data)
101 struct dma_cont *d = opaque;
104 ichan = channels[nport & 7];
106 dolog ("invalid channel %#x %#x\n", nport, data);
109 d->regs[ichan].pageh = data;
112 static uint32_t read_page (void *opaque, uint32_t nport)
114 struct dma_cont *d = opaque;
117 ichan = channels[nport & 7];
119 dolog ("invalid channel read %#x\n", nport);
122 return d->regs[ichan].page;
125 static uint32_t read_pageh (void *opaque, uint32_t nport)
127 struct dma_cont *d = opaque;
130 ichan = channels[nport & 7];
132 dolog ("invalid channel read %#x\n", nport);
135 return d->regs[ichan].pageh;
138 static inline void init_chan (struct dma_cont *d, int ichan)
143 r->now[ADDR] = r->base[ADDR] << d->dshift;
147 static inline int getff (struct dma_cont *d)
156 static uint64_t read_chan(void *opaque, hwaddr nport, unsigned size)
158 struct dma_cont *d = opaque;
159 int ichan, nreg, iport, ff, val, dir;
162 iport = (nport >> d->dshift) & 0x0f;
167 dir = ((r->mode >> 5) & 1) ? -1 : 1;
170 val = (r->base[COUNT] << d->dshift) - r->now[COUNT];
172 val = r->now[ADDR] + r->now[COUNT] * dir;
174 ldebug ("read_chan %#x -> %d\n", iport, val);
175 return (val >> (d->dshift + (ff << 3))) & 0xff;
178 static void write_chan(void *opaque, hwaddr nport, uint64_t data,
181 struct dma_cont *d = opaque;
182 int iport, ichan, nreg;
185 iport = (nport >> d->dshift) & 0x0f;
190 r->base[nreg] = (r->base[nreg] & 0xff) | ((data << 8) & 0xff00);
191 init_chan (d, ichan);
193 r->base[nreg] = (r->base[nreg] & 0xff00) | (data & 0xff);
197 static void write_cont(void *opaque, hwaddr nport, uint64_t data,
200 struct dma_cont *d = opaque;
201 int iport, ichan = 0;
203 iport = (nport >> d->dshift) & 0x0f;
205 case 0x00: /* command */
206 if ((data != 0) && (data & CMD_NOT_SUPPORTED)) {
207 dolog("command %"PRIx64" not supported\n", data);
216 d->status |= 1 << (ichan + 4);
219 d->status &= ~(1 << (ichan + 4));
221 d->status &= ~(1 << ichan);
225 case 0x02: /* single mask */
227 d->mask |= 1 << (data & 3);
229 d->mask &= ~(1 << (data & 3));
233 case 0x03: /* mode */
238 int op, ai, dir, opmode;
239 op = (data >> 2) & 3;
240 ai = (data >> 4) & 1;
241 dir = (data >> 5) & 1;
242 opmode = (data >> 6) & 3;
244 linfo ("ichan %d, op %d, ai %d, dir %d, opmode %d\n",
245 ichan, op, ai, dir, opmode);
248 d->regs[ichan].mode = data;
252 case 0x04: /* clear flip flop */
256 case 0x05: /* reset */
263 case 0x06: /* clear mask for all channels */
268 case 0x07: /* write mask for all channels */
274 dolog ("unknown iport %#x\n", iport);
280 linfo ("write_cont: nport %#06x, ichan % 2d, val %#06x\n",
286 static uint64_t read_cont(void *opaque, hwaddr nport, unsigned size)
288 struct dma_cont *d = opaque;
291 iport = (nport >> d->dshift) & 0x0f;
293 case 0x00: /* status */
297 case 0x01: /* mask */
305 ldebug ("read_cont: nport %#06x, iport %#04x val %#x\n", nport, iport, val);
309 int DMA_get_channel_mode (int nchan)
311 return dma_controllers[nchan > 3].regs[nchan & 3].mode;
314 void DMA_hold_DREQ (int nchan)
320 linfo ("held cont=%d chan=%d\n", ncont, ichan);
321 dma_controllers[ncont].status |= 1 << (ichan + 4);
325 void DMA_release_DREQ (int nchan)
331 linfo ("released cont=%d chan=%d\n", ncont, ichan);
332 dma_controllers[ncont].status &= ~(1 << (ichan + 4));
336 static void channel_run (int ncont, int ichan)
339 struct dma_regs *r = &dma_controllers[ncont].regs[ichan];
343 dir = (r->mode >> 5) & 1;
344 opmode = (r->mode >> 6) & 3;
347 dolog ("DMA in address decrement mode\n");
350 dolog ("DMA not in single mode select %#x\n", opmode);
354 n = r->transfer_handler (r->opaque, ichan + (ncont << 2),
355 r->now[COUNT], (r->base[COUNT] + 1) << ncont);
357 ldebug ("dma_pos %d size %d\n", n, (r->base[COUNT] + 1) << ncont);
360 static QEMUBH *dma_bh;
362 static void DMA_run (void)
367 static int running = 0;
378 for (icont = 0; icont < 2; icont++, d++) {
379 for (ichan = 0; ichan < 4; ichan++) {
384 if ((0 == (d->mask & mask)) && (0 != (d->status & (mask << 4)))) {
385 channel_run (icont, ichan);
394 qemu_bh_schedule_idle(dma_bh);
397 static void DMA_run_bh(void *unused)
402 void DMA_register_channel (int nchan,
403 DMA_transfer_handler transfer_handler,
412 r = dma_controllers[ncont].regs + ichan;
413 r->transfer_handler = transfer_handler;
417 int DMA_read_memory (int nchan, void *buf, int pos, int len)
419 struct dma_regs *r = &dma_controllers[nchan > 3].regs[nchan & 3];
420 hwaddr addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR];
422 if (r->mode & 0x20) {
426 cpu_physical_memory_read (addr - pos - len, buf, len);
427 /* What about 16bit transfers? */
428 for (i = 0; i < len >> 1; i++) {
429 uint8_t b = p[len - i - 1];
434 cpu_physical_memory_read (addr + pos, buf, len);
439 int DMA_write_memory (int nchan, void *buf, int pos, int len)
441 struct dma_regs *r = &dma_controllers[nchan > 3].regs[nchan & 3];
442 hwaddr addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR];
444 if (r->mode & 0x20) {
448 cpu_physical_memory_write (addr - pos - len, buf, len);
449 /* What about 16bit transfers? */
450 for (i = 0; i < len; i++) {
451 uint8_t b = p[len - i - 1];
456 cpu_physical_memory_write (addr + pos, buf, len);
461 /* request the emulator to transfer a new DMA memory block ASAP */
462 void DMA_schedule(int nchan)
464 struct dma_cont *d = &dma_controllers[nchan > 3];
466 qemu_irq_pulse(*d->cpu_request_exit);
469 static void dma_reset(void *opaque)
471 struct dma_cont *d = opaque;
472 write_cont(d, (0x05 << d->dshift), 0, 1);
475 static int dma_phony_handler (void *opaque, int nchan, int dma_pos, int dma_len)
477 trace_i8257_unregistered_dma(nchan, dma_pos, dma_len);
482 static const MemoryRegionOps channel_io_ops = {
485 .endianness = DEVICE_NATIVE_ENDIAN,
487 .min_access_size = 1,
488 .max_access_size = 1,
492 /* IOport from page_base */
493 static const MemoryRegionPortio page_portio_list[] = {
494 { 0x01, 3, 1, .write = write_page, .read = read_page, },
495 { 0x07, 1, 1, .write = write_page, .read = read_page, },
496 PORTIO_END_OF_LIST(),
499 /* IOport from pageh_base */
500 static const MemoryRegionPortio pageh_portio_list[] = {
501 { 0x01, 3, 1, .write = write_pageh, .read = read_pageh, },
502 { 0x07, 3, 1, .write = write_pageh, .read = read_pageh, },
503 PORTIO_END_OF_LIST(),
506 static const MemoryRegionOps cont_io_ops = {
509 .endianness = DEVICE_NATIVE_ENDIAN,
511 .min_access_size = 1,
512 .max_access_size = 1,
516 /* dshift = 0: 8 bit DMA, 1 = 16 bit DMA */
517 static void dma_init2(struct dma_cont *d, int base, int dshift,
518 int page_base, int pageh_base,
519 qemu_irq *cpu_request_exit)
524 d->cpu_request_exit = cpu_request_exit;
526 memory_region_init_io(&d->channel_io, NULL, &channel_io_ops, d,
527 "dma-chan", 8 << d->dshift);
528 memory_region_add_subregion(isa_address_space_io(NULL),
529 base, &d->channel_io);
531 isa_register_portio_list(NULL, page_base, page_portio_list, d,
533 if (pageh_base >= 0) {
534 isa_register_portio_list(NULL, pageh_base, pageh_portio_list, d,
538 memory_region_init_io(&d->cont_io, NULL, &cont_io_ops, d, "dma-cont",
540 memory_region_add_subregion(isa_address_space_io(NULL),
541 base + (8 << d->dshift), &d->cont_io);
543 qemu_register_reset(dma_reset, d);
545 for (i = 0; i < ARRAY_SIZE (d->regs); ++i) {
546 d->regs[i].transfer_handler = dma_phony_handler;
550 static const VMStateDescription vmstate_dma_regs = {
553 .minimum_version_id = 1,
554 .fields = (VMStateField[]) {
555 VMSTATE_INT32_ARRAY(now, struct dma_regs, 2),
556 VMSTATE_UINT16_ARRAY(base, struct dma_regs, 2),
557 VMSTATE_UINT8(mode, struct dma_regs),
558 VMSTATE_UINT8(page, struct dma_regs),
559 VMSTATE_UINT8(pageh, struct dma_regs),
560 VMSTATE_UINT8(dack, struct dma_regs),
561 VMSTATE_UINT8(eop, struct dma_regs),
562 VMSTATE_END_OF_LIST()
566 static int dma_post_load(void *opaque, int version_id)
573 static const VMStateDescription vmstate_dma = {
576 .minimum_version_id = 1,
577 .post_load = dma_post_load,
578 .fields = (VMStateField[]) {
579 VMSTATE_UINT8(command, struct dma_cont),
580 VMSTATE_UINT8(mask, struct dma_cont),
581 VMSTATE_UINT8(flip_flop, struct dma_cont),
582 VMSTATE_INT32(dshift, struct dma_cont),
583 VMSTATE_STRUCT_ARRAY(regs, struct dma_cont, 4, 1, vmstate_dma_regs, struct dma_regs),
584 VMSTATE_END_OF_LIST()
588 void DMA_init(int high_page_enable, qemu_irq *cpu_request_exit)
590 dma_init2(&dma_controllers[0], 0x00, 0, 0x80,
591 high_page_enable ? 0x480 : -1, cpu_request_exit);
592 dma_init2(&dma_controllers[1], 0xc0, 1, 0x88,
593 high_page_enable ? 0x488 : -1, cpu_request_exit);
594 vmstate_register (NULL, 0, &vmstate_dma, &dma_controllers[0]);
595 vmstate_register (NULL, 1, &vmstate_dma, &dma_controllers[1]);
597 dma_bh = qemu_bh_new(DMA_run_bh, NULL);