xref: /illumos-kvm-cmd/hw/ide/pci.c (revision 68396ea9)
1 /*
2  * QEMU IDE Emulation: PCI Bus support.
3  *
4  * Copyright (c) 2003 Fabrice Bellard
5  * Copyright (c) 2006 Openedhand Ltd.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 #include <hw/hw.h>
26 #include <hw/pc.h>
27 #include <hw/pci.h>
28 #include <hw/isa.h>
29 #include "block.h"
30 #include "block_int.h"
31 #include "sysemu.h"
32 #include "dma.h"
33 
34 #include <hw/ide/pci.h>
35 
36 #define BMDMA_PAGE_SIZE 4096
37 
bmdma_start_dma(IDEDMA * dma,IDEState * s,BlockDriverCompletionFunc * dma_cb)38 static void bmdma_start_dma(IDEDMA *dma, IDEState *s,
39                             BlockDriverCompletionFunc *dma_cb)
40 {
41     BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
42 
43     bm->unit = s->unit;
44     bm->dma_cb = dma_cb;
45     bm->cur_prd_last = 0;
46     bm->cur_prd_addr = 0;
47     bm->cur_prd_len = 0;
48     bm->sector_num = ide_get_sector(s);
49     bm->nsector = s->nsector;
50 
51     if (bm->status & BM_STATUS_DMAING) {
52         bm->dma_cb(bmdma_active_if(bm), 0);
53     }
54 }
55 
56 /* return 0 if buffer completed */
bmdma_prepare_buf(IDEDMA * dma,int is_write)57 static int bmdma_prepare_buf(IDEDMA *dma, int is_write)
58 {
59     BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
60     IDEState *s = bmdma_active_if(bm);
61     struct {
62         uint32_t addr;
63         uint32_t size;
64     } prd;
65     int l, len;
66 
67     qemu_sglist_init(&s->sg, s->nsector / (BMDMA_PAGE_SIZE / 512) + 1);
68     s->io_buffer_size = 0;
69     for(;;) {
70         if (bm->cur_prd_len == 0) {
71             /* end of table (with a fail safe of one page) */
72             if (bm->cur_prd_last ||
73                 (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE)
74                 return s->io_buffer_size != 0;
75             cpu_physical_memory_read(bm->cur_addr, (uint8_t *)&prd, 8);
76             bm->cur_addr += 8;
77             prd.addr = le32_to_cpu(prd.addr);
78             prd.size = le32_to_cpu(prd.size);
79             len = prd.size & 0xfffe;
80             if (len == 0)
81                 len = 0x10000;
82             bm->cur_prd_len = len;
83             bm->cur_prd_addr = prd.addr;
84             bm->cur_prd_last = (prd.size & 0x80000000);
85         }
86         l = bm->cur_prd_len;
87         if (l > 0) {
88             qemu_sglist_add(&s->sg, bm->cur_prd_addr, l);
89             bm->cur_prd_addr += l;
90             bm->cur_prd_len -= l;
91             s->io_buffer_size += l;
92         }
93     }
94     return 1;
95 }
96 
97 /* return 0 if buffer completed */
bmdma_rw_buf(IDEDMA * dma,int is_write)98 static int bmdma_rw_buf(IDEDMA *dma, int is_write)
99 {
100     BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
101     IDEState *s = bmdma_active_if(bm);
102     struct {
103         uint32_t addr;
104         uint32_t size;
105     } prd;
106     int l, len;
107 
108     for(;;) {
109         l = s->io_buffer_size - s->io_buffer_index;
110         if (l <= 0)
111             break;
112         if (bm->cur_prd_len == 0) {
113             /* end of table (with a fail safe of one page) */
114             if (bm->cur_prd_last ||
115                 (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE)
116                 return 0;
117             cpu_physical_memory_read(bm->cur_addr, (uint8_t *)&prd, 8);
118             bm->cur_addr += 8;
119             prd.addr = le32_to_cpu(prd.addr);
120             prd.size = le32_to_cpu(prd.size);
121             len = prd.size & 0xfffe;
122             if (len == 0)
123                 len = 0x10000;
124             bm->cur_prd_len = len;
125             bm->cur_prd_addr = prd.addr;
126             bm->cur_prd_last = (prd.size & 0x80000000);
127         }
128         if (l > bm->cur_prd_len)
129             l = bm->cur_prd_len;
130         if (l > 0) {
131             if (is_write) {
132                 cpu_physical_memory_write(bm->cur_prd_addr,
133                                           s->io_buffer + s->io_buffer_index, l);
134             } else {
135                 cpu_physical_memory_read(bm->cur_prd_addr,
136                                           s->io_buffer + s->io_buffer_index, l);
137             }
138             bm->cur_prd_addr += l;
139             bm->cur_prd_len -= l;
140             s->io_buffer_index += l;
141         }
142     }
143     return 1;
144 }
145 
bmdma_set_unit(IDEDMA * dma,int unit)146 static int bmdma_set_unit(IDEDMA *dma, int unit)
147 {
148     BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
149     bm->unit = unit;
150 
151     return 0;
152 }
153 
bmdma_add_status(IDEDMA * dma,int status)154 static int bmdma_add_status(IDEDMA *dma, int status)
155 {
156     BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
157     bm->status |= status;
158 
159     return 0;
160 }
161 
bmdma_set_inactive(IDEDMA * dma)162 static int bmdma_set_inactive(IDEDMA *dma)
163 {
164     BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
165 
166     bm->status &= ~BM_STATUS_DMAING;
167     bm->dma_cb = NULL;
168     bm->unit = -1;
169 
170     return 0;
171 }
172 
bmdma_restart_dma(BMDMAState * bm,int is_read)173 static void bmdma_restart_dma(BMDMAState *bm, int is_read)
174 {
175     IDEState *s = bmdma_active_if(bm);
176 
177     ide_set_sector(s, bm->sector_num);
178     s->io_buffer_index = 0;
179     s->io_buffer_size = 0;
180     s->nsector = bm->nsector;
181     s->is_read = is_read;
182     bm->cur_addr = bm->addr;
183     bm->dma_cb = ide_dma_cb;
184     bmdma_start_dma(&bm->dma, s, bm->dma_cb);
185 }
186 
bmdma_restart_bh(void * opaque)187 static void bmdma_restart_bh(void *opaque)
188 {
189     BMDMAState *bm = opaque;
190     int is_read;
191 
192     qemu_bh_delete(bm->bh);
193     bm->bh = NULL;
194 
195     is_read = !!(bm->status & BM_STATUS_RETRY_READ);
196 
197     if (bm->status & BM_STATUS_DMA_RETRY) {
198         bm->status &= ~(BM_STATUS_DMA_RETRY | BM_STATUS_RETRY_READ);
199         bmdma_restart_dma(bm, is_read);
200     } else if (bm->status & BM_STATUS_PIO_RETRY) {
201         bm->status &= ~(BM_STATUS_PIO_RETRY | BM_STATUS_RETRY_READ);
202         if (is_read) {
203             ide_sector_read(bmdma_active_if(bm));
204         } else {
205             ide_sector_write(bmdma_active_if(bm));
206         }
207     } else if (bm->status & BM_STATUS_RETRY_FLUSH) {
208         ide_flush_cache(bmdma_active_if(bm));
209     }
210 }
211 
bmdma_restart_cb(void * opaque,int running,int reason)212 static void bmdma_restart_cb(void *opaque, int running, int reason)
213 {
214     IDEDMA *dma = opaque;
215     BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
216 
217     if (!running)
218         return;
219 
220     if (!bm->bh) {
221         bm->bh = qemu_bh_new(bmdma_restart_bh, &bm->dma);
222         qemu_bh_schedule(bm->bh);
223     }
224 }
225 
bmdma_cancel(BMDMAState * bm)226 static void bmdma_cancel(BMDMAState *bm)
227 {
228     if (bm->status & BM_STATUS_DMAING) {
229         /* cancel DMA request */
230         bmdma_set_inactive(&bm->dma);
231     }
232 }
233 
bmdma_reset(IDEDMA * dma)234 static int bmdma_reset(IDEDMA *dma)
235 {
236     BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
237 
238 #ifdef DEBUG_IDE
239     printf("ide: dma_reset\n");
240 #endif
241     bmdma_cancel(bm);
242     bm->cmd = 0;
243     bm->status = 0;
244     bm->addr = 0;
245     bm->cur_addr = 0;
246     bm->cur_prd_last = 0;
247     bm->cur_prd_addr = 0;
248     bm->cur_prd_len = 0;
249     bm->sector_num = 0;
250     bm->nsector = 0;
251 
252     return 0;
253 }
254 
bmdma_start_transfer(IDEDMA * dma)255 static int bmdma_start_transfer(IDEDMA *dma)
256 {
257     return 0;
258 }
259 
bmdma_irq(void * opaque,int n,int level)260 static void bmdma_irq(void *opaque, int n, int level)
261 {
262     BMDMAState *bm = opaque;
263 
264     if (!level) {
265         /* pass through lower */
266         qemu_set_irq(bm->irq, level);
267         return;
268     }
269 
270     bm->status |= BM_STATUS_INT;
271 
272     /* trigger the real irq */
273     qemu_set_irq(bm->irq, level);
274 }
275 
bmdma_cmd_writeb(void * opaque,uint32_t addr,uint32_t val)276 void bmdma_cmd_writeb(void *opaque, uint32_t addr, uint32_t val)
277 {
278     BMDMAState *bm = opaque;
279 #ifdef DEBUG_IDE
280     printf("%s: 0x%08x\n", __func__, val);
281 #endif
282 
283     /* Ignore writes to SSBM if it keeps the old value */
284     if ((val & BM_CMD_START) != (bm->cmd & BM_CMD_START)) {
285         if (!(val & BM_CMD_START)) {
286             /*
287              * We can't cancel Scatter Gather DMA in the middle of the
288              * operation or a partial (not full) DMA transfer would reach
289              * the storage so we wait for completion instead (we beahve
290              * like if the DMA was completed by the time the guest trying
291              * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
292              * set).
293              *
294              * In the future we'll be able to safely cancel the I/O if the
295              * whole DMA operation will be submitted to disk with a single
296              * aio operation with preadv/pwritev.
297              */
298             if (bm->bus->dma->aiocb) {
299                 qemu_aio_flush();
300 #ifdef DEBUG_IDE
301                 if (bm->bus->dma->aiocb)
302                     printf("ide_dma_cancel: aiocb still pending");
303                 if (bm->status & BM_STATUS_DMAING)
304                     printf("ide_dma_cancel: BM_STATUS_DMAING still pending");
305 #endif
306             }
307         } else {
308             bm->cur_addr = bm->addr;
309             if (!(bm->status & BM_STATUS_DMAING)) {
310                 bm->status |= BM_STATUS_DMAING;
311                 /* start dma transfer if possible */
312                 if (bm->dma_cb)
313                     bm->dma_cb(bmdma_active_if(bm), 0);
314             }
315         }
316     }
317 
318     bm->cmd = val & 0x09;
319 }
320 
bmdma_addr_read(IORange * ioport,uint64_t addr,unsigned width,uint64_t * data)321 static void bmdma_addr_read(IORange *ioport, uint64_t addr,
322                             unsigned width, uint64_t *data)
323 {
324     BMDMAState *bm = container_of(ioport, BMDMAState, addr_ioport);
325     uint32_t mask = (1ULL << (width * 8)) - 1;
326 
327     *data = (bm->addr >> (addr * 8)) & mask;
328 #ifdef DEBUG_IDE
329     printf("%s: 0x%08x\n", __func__, (unsigned)*data);
330 #endif
331 }
332 
bmdma_addr_write(IORange * ioport,uint64_t addr,unsigned width,uint64_t data)333 static void bmdma_addr_write(IORange *ioport, uint64_t addr,
334                              unsigned width, uint64_t data)
335 {
336     BMDMAState *bm = container_of(ioport, BMDMAState, addr_ioport);
337     int shift = addr * 8;
338     uint32_t mask = (1ULL << (width * 8)) - 1;
339 
340 #ifdef DEBUG_IDE
341     printf("%s: 0x%08x\n", __func__, (unsigned)data);
342 #endif
343     bm->addr &= ~(mask << shift);
344     bm->addr |= ((data & mask) << shift) & ~3;
345 }
346 
347 const IORangeOps bmdma_addr_ioport_ops = {
348     .read = bmdma_addr_read,
349     .write = bmdma_addr_write,
350 };
351 
ide_bmdma_current_needed(void * opaque)352 static bool ide_bmdma_current_needed(void *opaque)
353 {
354     BMDMAState *bm = opaque;
355 
356     return (bm->cur_prd_len != 0);
357 }
358 
359 static const VMStateDescription vmstate_bmdma_current = {
360     .name = "ide bmdma_current",
361     .version_id = 1,
362     .minimum_version_id = 1,
363     .minimum_version_id_old = 1,
364     .fields      = (VMStateField []) {
365         VMSTATE_UINT32(cur_addr, BMDMAState),
366         VMSTATE_UINT32(cur_prd_last, BMDMAState),
367         VMSTATE_UINT32(cur_prd_addr, BMDMAState),
368         VMSTATE_UINT32(cur_prd_len, BMDMAState),
369         VMSTATE_END_OF_LIST()
370     }
371 };
372 
373 
374 static const VMStateDescription vmstate_bmdma = {
375     .name = "ide bmdma",
376     .version_id = 3,
377     .minimum_version_id = 0,
378     .minimum_version_id_old = 0,
379     .fields      = (VMStateField []) {
380         VMSTATE_UINT8(cmd, BMDMAState),
381         VMSTATE_UINT8(status, BMDMAState),
382         VMSTATE_UINT32(addr, BMDMAState),
383         VMSTATE_INT64(sector_num, BMDMAState),
384         VMSTATE_UINT32(nsector, BMDMAState),
385         VMSTATE_UINT8(unit, BMDMAState),
386         VMSTATE_END_OF_LIST()
387     },
388     .subsections = (VMStateSubsection []) {
389         {
390             .vmsd = &vmstate_bmdma_current,
391             .needed = ide_bmdma_current_needed,
392         }, {
393             /* empty */
394         }
395     }
396 };
397 
ide_pci_post_load(void * opaque,int version_id)398 static int ide_pci_post_load(void *opaque, int version_id)
399 {
400     PCIIDEState *d = opaque;
401     int i;
402 
403     for(i = 0; i < 2; i++) {
404         /* current versions always store 0/1, but older version
405            stored bigger values. We only need last bit */
406         d->bmdma[i].unit &= 1;
407     }
408     return 0;
409 }
410 
411 const VMStateDescription vmstate_ide_pci = {
412     .name = "ide",
413     .version_id = 3,
414     .minimum_version_id = 0,
415     .minimum_version_id_old = 0,
416     .post_load = ide_pci_post_load,
417     .fields      = (VMStateField []) {
418         VMSTATE_PCI_DEVICE(dev, PCIIDEState),
419         VMSTATE_STRUCT_ARRAY(bmdma, PCIIDEState, 2, 0,
420                              vmstate_bmdma, BMDMAState),
421         VMSTATE_IDE_BUS_ARRAY(bus, PCIIDEState, 2),
422         VMSTATE_IDE_DRIVES(bus[0].ifs, PCIIDEState),
423         VMSTATE_IDE_DRIVES(bus[1].ifs, PCIIDEState),
424         VMSTATE_END_OF_LIST()
425     }
426 };
427 
pci_ide_create_devs(PCIDevice * dev,DriveInfo ** hd_table)428 void pci_ide_create_devs(PCIDevice *dev, DriveInfo **hd_table)
429 {
430     PCIIDEState *d = DO_UPCAST(PCIIDEState, dev, dev);
431     static const int bus[4]  = { 0, 0, 1, 1 };
432     static const int unit[4] = { 0, 1, 0, 1 };
433     int i;
434 
435     for (i = 0; i < 4; i++) {
436         if (hd_table[i] == NULL)
437             continue;
438         ide_create_drive(d->bus+bus[i], unit[i], hd_table[i]);
439     }
440 }
441 
442 static const struct IDEDMAOps bmdma_ops = {
443     .start_dma = bmdma_start_dma,
444     .start_transfer = bmdma_start_transfer,
445     .prepare_buf = bmdma_prepare_buf,
446     .rw_buf = bmdma_rw_buf,
447     .set_unit = bmdma_set_unit,
448     .add_status = bmdma_add_status,
449     .set_inactive = bmdma_set_inactive,
450     .restart_cb = bmdma_restart_cb,
451     .reset = bmdma_reset,
452 };
453 
bmdma_init(IDEBus * bus,BMDMAState * bm)454 void bmdma_init(IDEBus *bus, BMDMAState *bm)
455 {
456     qemu_irq *irq;
457 
458     if (bus->dma == &bm->dma) {
459         return;
460     }
461 
462     bm->dma.ops = &bmdma_ops;
463     bus->dma = &bm->dma;
464     bm->irq = bus->irq;
465     irq = qemu_allocate_irqs(bmdma_irq, bm, 1);
466     bus->irq = *irq;
467 }
468