/illumos-kvm-cmd/slirp/ |
H A D | socket.c | 105 iov[0].iov_len -= iov[0].iov_len%mss; in sopreprbuf() 110 if (iov[0].iov_len > len) iov[0].iov_len = len; in sopreprbuf() 117 total = iov[0].iov_len + iov[1].iov_len; in sopreprbuf() 132 iov[0].iov_len -= iov[0].iov_len%mss; in sopreprbuf() 139 return iov[0].iov_len + (n - 1) * iov[1].iov_len; in sopreprbuf() 167 nn = recv(so->s, iov[0].iov_base, iov[0].iov_len,0); in soread() 192 ret = recv(so->s, iov[1].iov_base, iov[1].iov_len,0); in soread() 370 if (iov[0].iov_len > len) iov[0].iov_len = len; in sowrite() 374 if (iov[0].iov_len > len) iov[0].iov_len = len; in sowrite() 379 if (iov[1].iov_len > len) iov[1].iov_len = len; in sowrite() [all …]
|
H A D | socket.h | 92 size_t sopreprbuf(struct socket *so, struct iovec *iov, int *np);
|
/illumos-kvm-cmd/ |
H A D | cutils.c | 149 qiov->iov = iov; in qemu_iovec_init_external() 154 qiov->size += iov[i].iov_len; in qemu_iovec_init_external() 163 qiov->iov = qemu_realloc(qiov->iov, qiov->nalloc * sizeof(struct iovec)); in qemu_iovec_add() 165 qiov->iov[qiov->niov].iov_base = base; in qemu_iovec_add() 166 qiov->iov[qiov->niov].iov_len = len; in qemu_iovec_add() 192 skip -= src->iov[i].iov_len; in qemu_iovec_copy() 220 qemu_free(qiov->iov); in qemu_iovec_destroy() 237 memcpy(p, qiov->iov[i].iov_base, qiov->iov[i].iov_len); in qemu_iovec_to_buffer() 238 p += qiov->iov[i].iov_len; in qemu_iovec_to_buffer() 255 if (copy > qiov->iov[i].iov_len) in qemu_iovec_from_buffer() [all …]
|
H A D | dma-helpers.c | 48 QEMUIOVector iov; member 75 for (i = 0; i < dbs->iov.niov; ++i) { in dma_bdrv_unmap() 76 cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base, in dma_bdrv_unmap() 77 dbs->iov.iov[i].iov_len, !dbs->is_write, in dma_bdrv_unmap() 78 dbs->iov.iov[i].iov_len); in dma_bdrv_unmap() 89 dbs->sector_num += dbs->iov.size / 512; in dma_bdrv_cb() 91 qemu_iovec_reset(&dbs->iov); in dma_bdrv_cb() 95 qemu_iovec_destroy(&dbs->iov); in dma_bdrv_cb() 114 if (dbs->iov.size == 0) { in dma_bdrv_cb() 128 qemu_iovec_destroy(&dbs->iov); in dma_bdrv_cb() [all …]
|
H A D | iov.c | 17 size_t iov_from_buf(struct iovec *iov, unsigned int iovcnt, in iov_from_buf() argument 27 len = MIN(iov[i].iov_len, size - offset); in iov_from_buf() 29 memcpy(iov[i].iov_base, buf + offset, len); in iov_from_buf() local 35 size_t iov_to_buf(const struct iovec *iov, const unsigned int iovcnt, in iov_to_buf() argument 46 if (offset < (iov_off + iov[i].iov_len)) { in iov_to_buf() 47 size_t len = MIN((iov_off + iov[i].iov_len) - offset , size); in iov_to_buf() 49 memcpy(ptr + buf_off, iov[i].iov_base + (offset - iov_off), len); in iov_to_buf() 55 iov_off += iov[i].iov_len; in iov_to_buf() 60 size_t iov_size(const struct iovec *iov, const unsigned int iovcnt) in iov_size() argument 67 len += iov[i].iov_len; in iov_size()
|
H A D | net.c | 587 memcpy(buffer + offset, iov[i].iov_base, len); in vc_sendv_compat() 600 offset += iov[i].iov_len; in calc_iov_length() 613 return calc_iov_length(iov, iovcnt); in qemu_deliver_packet_iov() 617 return vc->info->receive_iov(vc, iov, iovcnt); in qemu_deliver_packet_iov() 619 return vc_sendv_compat(vc, iov, iovcnt); in qemu_deliver_packet_iov() 641 ret = calc_iov_length(iov, iovcnt); in qemu_vlan_deliver_packet_iov() 648 len = vc->info->receive_iov(vc, iov, iovcnt); in qemu_vlan_deliver_packet_iov() 650 len = vc_sendv_compat(vc, iov, iovcnt); in qemu_vlan_deliver_packet_iov() 666 return calc_iov_length(iov, iovcnt); in qemu_sendv_packet_async() 677 iov, iovcnt, sent_cb); in qemu_sendv_packet_async() [all …]
|
H A D | posix-aio-compat.c | 155 qemu_preadv(int fd, const struct iovec *iov, int nr_iov, off_t offset) in qemu_preadv() argument 157 return preadv(fd, iov, nr_iov, offset); in qemu_preadv() 161 qemu_pwritev(int fd, const struct iovec *iov, int nr_iov, off_t offset) in qemu_pwritev() argument 163 return pwritev(fd, iov, nr_iov, offset); in qemu_pwritev() 169 qemu_preadv(int fd, const struct iovec *iov, int nr_iov, off_t offset) in qemu_preadv() argument 175 qemu_pwritev(int fd, const struct iovec *iov, int nr_iov, off_t offset) in qemu_pwritev() argument 577 acb->aio_iov = qiov->iov; in paio_submit()
|
H A D | block-migration.c | 61 struct iovec iov; member 249 blk->iov.iov_base = blk->buf; in mig_save_device_bulk() 250 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE; in mig_save_device_bulk() 251 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1); in mig_save_device_bulk() 408 blk->iov.iov_base = blk->buf; in mig_save_device_dirty() 409 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE; in mig_save_device_dirty() 410 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1); in mig_save_device_dirty()
|
H A D | net.h | 110 ssize_t qemu_sendv_packet(VLANClientState *vc, const struct iovec *iov, 112 ssize_t qemu_sendv_packet_async(VLANClientState *vc, const struct iovec *iov,
|
H A D | linux-aio.c | 213 io_prep_pwritev(iocbs, fd, qiov->iov, qiov->niov, offset); in laio_submit() 216 io_prep_preadv(iocbs, fd, qiov->iov, qiov->niov, offset); in laio_submit()
|
H A D | qemu-common.h | 308 struct iovec *iov; member 315 void qemu_iovec_init_external(QEMUIOVector *qiov, struct iovec *iov, int niov);
|
/illumos-kvm-cmd/net/ |
H A D | vnic.c | 224 vnic_receive_iov(VLANClientState *ncp, const struct iovec *iov, in vnic_receive_iov() argument 233 total += (iov + i)->iov_len; in vnic_receive_iov() 250 if (!is_arp_requestv(iov, iovcnt, vdsp)) in vnic_receive_iov() 252 ret = create_arp_responsev(iov, iovcnt, vdsp); in vnic_receive_iov() 255 if (!is_dhcp_requestv(iov, iovcnt)) in vnic_receive_iov() 257 ret = create_dhcp_responsev(iov, iovcnt, vdsp); in vnic_receive_iov() 287 vsp->vns_wfio->fio_vecs[i].fv_buf = iov->iov_base; in vnic_receive_iov() 288 vsp->vns_wfio->fio_vecs[i].fv_buflen = iov->iov_len; in vnic_receive_iov() 296 if (altsize + iov->iov_len > VNIC_BUFSIZE) in vnic_receive_iov() 299 bcopy(iov->iov_base, vsp->vns_txbuf + altsize, iov->iov_len); in vnic_receive_iov() [all …]
|
H A D | queue.c | 115 const struct iovec *iov, in qemu_net_queue_append_iov() argument 124 max_len += iov[i].iov_len; in qemu_net_queue_append_iov() 134 size_t len = iov[i].iov_len; in qemu_net_queue_append_iov() 136 memcpy(packet->data + packet->size, iov[i].iov_base, len); in qemu_net_queue_append_iov() 163 const struct iovec *iov, in qemu_net_queue_deliver_iov() argument 169 ret = queue->deliver_iov(sender, flags, iov, iovcnt, queue->opaque); in qemu_net_queue_deliver_iov() 202 const struct iovec *iov, in qemu_net_queue_send_iov() argument 209 return qemu_net_queue_append_iov(queue, sender, flags, iov, iovcnt, NULL); in qemu_net_queue_send_iov() 212 ret = qemu_net_queue_deliver_iov(queue, sender, flags, iov, iovcnt); in qemu_net_queue_send_iov() 214 qemu_net_queue_append_iov(queue, sender, flags, iov, iovcnt, sent_cb); in qemu_net_queue_send_iov()
|
H A D | tap.c | 107 len = writev(s->fd, iov, iovcnt); in tap_write_packet() 122 const struct iovec *iovp = iov; in tap_receive_iov() 129 memcpy(&iov_copy[1], iov, iovcnt * sizeof(*iov)); in tap_receive_iov() 140 struct iovec iov[2]; in tap_receive_raw() local 145 iov[iovcnt].iov_base = &hdr; in tap_receive_raw() 150 iov[iovcnt].iov_base = (char *)buf; in tap_receive_raw() 151 iov[iovcnt].iov_len = size; in tap_receive_raw() 160 struct iovec iov[1]; in tap_receive() local 166 iov[0].iov_base = (char *)buf; in tap_receive() 167 iov[0].iov_len = size; in tap_receive() [all …]
|
H A D | vnic-dhcp.c | 390 *olen = iov->iov_len; in join_iov() 391 return (iov->iov_base); in join_iov() 395 clen = iov->iov_len; in join_iov() 398 bcopy(iov->iov_base, buf + toff, clen); in join_iov() 401 iov++; in join_iov() 422 if (iov->iov_len >= len) { in copy_iov() 423 *olen = iov->iov_len; in copy_iov() 424 return (iov->iov_base); in copy_iov() 427 clen = iov->iov_len; in copy_iov() 435 iov++; in copy_iov() [all …]
|
H A D | queue.h | 42 const struct iovec *iov, 64 const struct iovec *iov,
|
/illumos-kvm-cmd/block/ |
H A D | qed-table.c | 24 struct iovec iov; member 32 int noffsets = read_table_cb->iov.iov_len / sizeof(uint64_t); in qed_read_table_cb() 63 read_table_cb->iov.iov_base = table->offsets, in qed_read_table() 64 read_table_cb->iov.iov_len = s->header.cluster_size * s->header.table_size, in qed_read_table() 66 qemu_iovec_init_external(qiov, &read_table_cb->iov, 1); in qed_read_table() 68 read_table_cb->iov.iov_len / BDRV_SECTOR_SIZE, in qed_read_table() 82 struct iovec iov; member 148 write_table_cb->iov.iov_base = write_table_cb->table->offsets; in qed_write_table() 149 write_table_cb->iov.iov_len = len_bytes; in qed_write_table() 150 qemu_iovec_init_external(&write_table_cb->qiov, &write_table_cb->iov, 1); in qed_write_table() [all …]
|
H A D | sheepdog.c | 546 msg.msg_iov = iov; in do_send_recv() 554 iov++; in do_send_recv() 559 iov->iov_len -= diff; in do_send_recv() 580 iov->iov_len += diff; in do_send_recv() 671 struct iovec iov; in do_read_write() local 673 iov.iov_base = buf; in do_read_write() 674 iov.iov_len = len; in do_read_write() 693 struct iovec iov[2]; in send_req() local 695 iov[0].iov_base = hdr; in send_req() 1445 struct iovec iov; in sd_write_done() local [all …]
|
H A D | blkverify.c | 149 uint8_t *p = (uint8_t *)a->iov[i].iov_base; in blkverify_iovec_compare() 150 uint8_t *q = (uint8_t *)b->iov[i].iov_base; in blkverify_iovec_compare() 152 assert(a->iov[i].iov_len == b->iov[i].iov_len); in blkverify_iovec_compare() 153 while (len < a->iov[i].iov_len && *p++ == *q++) { in blkverify_iovec_compare() 159 if (len != a->iov[i].iov_len) { in blkverify_iovec_compare() 211 sortelems[i].src_iov = &src->iov[i]; in blkverify_iovec_clone() 234 qemu_iovec_add(dest, sortelems[i].dest_base, src->iov[i].iov_len); in blkverify_iovec_clone()
|
H A D | qed.c | 106 struct iovec iov; member 167 write_header_cb->iov.iov_base = write_header_cb->buf; in qed_write_header() 168 write_header_cb->iov.iov_len = len; in qed_write_header() 169 qemu_iovec_init_external(&write_header_cb->qiov, &write_header_cb->iov, 1); in qed_write_header() 675 struct iovec iov; member 682 qemu_vfree(copy_cb->iov.iov_base); in qed_copy_from_backing_file_cb() 733 copy_cb->iov.iov_base = qemu_blockalign(s->bs, len); in qed_copy_from_backing_file() 734 copy_cb->iov.iov_len = len; in qed_copy_from_backing_file() 735 qemu_iovec_init_external(©_cb->qiov, ©_cb->iov, 1); in qed_copy_from_backing_file()
|
H A D | qcow2-cluster.c | 299 struct iovec iov; in qcow2_read() local 315 iov.iov_base = buf; in qcow2_read() 316 iov.iov_len = n * 512; in qcow2_read() 317 qemu_iovec_init_external(&qiov, &iov, 1); in qcow2_read()
|
/illumos-kvm-cmd/hw/ |
H A D | xen_disk.c | 299 ioreq->v.iov[i].iov_base = ioreq->page[i] + (uintptr_t)ioreq->v.iov[i].iov_base; in ioreq_map() 322 ioreq->v.iov[i].iov_base, in ioreq_runio_qemu_sync() 326 ioreq->v.iov[i].iov_base, in ioreq_runio_qemu_sync() 327 ioreq->v.iov[i].iov_len); in ioreq_runio_qemu_sync() 330 len += ioreq->v.iov[i].iov_len; in ioreq_runio_qemu_sync() 331 pos += ioreq->v.iov[i].iov_len; in ioreq_runio_qemu_sync() 341 ioreq->v.iov[i].iov_base, in ioreq_runio_qemu_sync() 345 ioreq->v.iov[i].iov_base, in ioreq_runio_qemu_sync() 346 ioreq->v.iov[i].iov_len); in ioreq_runio_qemu_sync() 349 len += ioreq->v.iov[i].iov_len; in ioreq_runio_qemu_sync() [all …]
|
H A D | scsi-disk.c | 63 struct iovec iov; member 100 qemu_vfree(r->iov.iov_base); in scsi_remove_request() 167 n = r->iov.iov_len / 512; in scsi_read_complete() 198 r->iov.iov_len = n * 512; in scsi_read_request() 199 qemu_iovec_init_external(&r->qiov, &r->iov, 1); in scsi_read_request() 269 n = r->iov.iov_len / 512; in scsi_write_complete() 279 r->iov.iov_len = len; in scsi_write_complete() 293 n = r->iov.iov_len / 512; in scsi_write_request() 386 return (uint8_t *)r->iov.iov_base; in scsi_get_buf() 1049 outbuf = (uint8_t *)r->iov.iov_base; in scsi_send_command() [all …]
|
H A D | virtio-9p-local.c | 171 static ssize_t local_preadv(FsContext *ctx, int fd, const struct iovec *iov, in local_preadv() argument 175 return preadv(fd, iov, iovcnt, offset); in local_preadv() 181 return readv(fd, iov, iovcnt); in local_preadv() 186 static ssize_t local_pwritev(FsContext *ctx, int fd, const struct iovec *iov, in local_pwritev() argument 190 return pwritev(fd, iov, iovcnt, offset); in local_pwritev() 196 return writev(fd, iov, iovcnt); in local_pwritev()
|
H A D | virtio-9p.h | 312 struct iovec iov[128]; /* FIXME: bad, bad, bad */ member 332 struct iovec iov[128]; /* FIXME: bad, bad, bad */ member
|