xref: /illumos-kvm-cmd/kvm-all.c (revision 36b02c7d)
1 /*
2  * QEMU KVM support
3  *
4  * Copyright IBM, Corp. 2008
5  *           Red Hat, Inc. 2008
6  *
7  * Authors:
8  *  Anthony Liguori   <aliguori@us.ibm.com>
9  *  Glauber Costa     <gcosta@redhat.com>
10  *
11  * This work is licensed under the terms of the GNU GPL, version 2 or later.
12  * See the COPYING file in the top-level directory.
13  *
14  * Copyright 2011 Joyent, Inc.
15  */
16 
17 #include <sys/types.h>
18 #include <sys/ioctl.h>
19 #include <sys/mman.h>
20 #include <stdarg.h>
21 
22 #ifdef __sun__
23 #include <sys/kvm.h>
24 #else
25 #include <linux/kvm.h>
26 #endif
27 
28 
29 #include "qemu-common.h"
30 #include "qemu-barrier.h"
31 #include "sysemu.h"
32 #include "hw/hw.h"
33 #include "gdbstub.h"
34 #include "kvm.h"
35 #include "bswap.h"
36 
37 /* This check must be after config-host.h is included */
38 #ifdef CONFIG_EVENTFD
39 #include <sys/eventfd.h>
40 #endif
41 
42 /* KVM uses PAGE_SIZE in it's definition of COALESCED_MMIO_MAX */
43 #define PAGE_SIZE TARGET_PAGE_SIZE
44 
45 //#define DEBUG_KVM
46 
47 #ifdef DEBUG_KVM
48 #define DPRINTF(fmt, ...) \
49     do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
50 #else
51 #define DPRINTF(fmt, ...) \
52     do { } while (0)
53 #endif
54 
55 #ifdef OBSOLETE_KVM_IMPL
56 
57 typedef struct KVMSlot
58 {
59     target_phys_addr_t start_addr;
60     ram_addr_t memory_size;
61     ram_addr_t phys_offset;
62     int slot;
63     int flags;
64 } KVMSlot;
65 
66 typedef struct kvm_dirty_log KVMDirtyLog;
67 
68 struct KVMState
69 {
70     KVMSlot slots[32];
71     int fd;
72     int vmfd;
73     int coalesced_mmio;
74     struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
75     int broken_set_mem_region;
76     int migration_log;
77     int vcpu_events;
78     int robust_singlestep;
79     int debugregs;
80 #ifdef KVM_CAP_SET_GUEST_DEBUG
81     struct kvm_sw_breakpoint_head kvm_sw_breakpoints;
82 #endif
83     int irqchip_in_kernel;
84     int pit_in_kernel;
85     int xsave, xcrs;
86     int many_ioeventfds;
87 };
88 
89 static KVMState *kvm_state;
90 
91 
92 static const KVMCapabilityInfo kvm_required_capabilites[] = {
93     KVM_CAP_INFO(USER_MEMORY),
94     KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
95     KVM_CAP_LAST_INFO
96 };
97 
98 #endif
99 
kvm_alloc_slot(KVMState * s)100 static KVMSlot *kvm_alloc_slot(KVMState *s)
101 {
102     int i;
103 
104     for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
105         /* KVM private memory slots */
106         if (i >= 8 && i < 12) {
107             continue;
108         }
109         if (s->slots[i].memory_size == 0) {
110             return &s->slots[i];
111         }
112     }
113 
114     fprintf(stderr, "%s: no free slot available\n", __func__);
115     abort();
116 }
117 
kvm_lookup_matching_slot(KVMState * s,target_phys_addr_t start_addr,target_phys_addr_t end_addr)118 static KVMSlot *kvm_lookup_matching_slot(KVMState *s,
119                                          target_phys_addr_t start_addr,
120                                          target_phys_addr_t end_addr)
121 {
122     int i;
123 
124     for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
125         KVMSlot *mem = &s->slots[i];
126 
127         if (start_addr == mem->start_addr &&
128             end_addr == mem->start_addr + mem->memory_size) {
129             return mem;
130         }
131     }
132 
133     return NULL;
134 }
135 
136 /*
137  * Find overlapping slot with lowest start address
138  */
kvm_lookup_overlapping_slot(KVMState * s,target_phys_addr_t start_addr,target_phys_addr_t end_addr)139 static KVMSlot *kvm_lookup_overlapping_slot(KVMState *s,
140                                             target_phys_addr_t start_addr,
141                                             target_phys_addr_t end_addr)
142 {
143     KVMSlot *found = NULL;
144     int i;
145 
146     for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
147         KVMSlot *mem = &s->slots[i];
148 
149         if (mem->memory_size == 0 ||
150             (found && found->start_addr < mem->start_addr)) {
151             continue;
152         }
153 
154         if (end_addr > mem->start_addr &&
155             start_addr < mem->start_addr + mem->memory_size) {
156             found = mem;
157         }
158     }
159 
160     return found;
161 }
162 
kvm_physical_memory_addr_from_ram(KVMState * s,ram_addr_t ram_addr,target_phys_addr_t * phys_addr)163 int kvm_physical_memory_addr_from_ram(KVMState *s, ram_addr_t ram_addr,
164                                       target_phys_addr_t *phys_addr)
165 {
166     int i;
167 
168     for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
169         KVMSlot *mem = &s->slots[i];
170 
171         if (ram_addr >= mem->phys_offset &&
172             ram_addr < mem->phys_offset + mem->memory_size) {
173             *phys_addr = mem->start_addr + (ram_addr - mem->phys_offset);
174             return 1;
175         }
176     }
177 
178     return 0;
179 }
180 
kvm_set_user_memory_region(KVMState * s,KVMSlot * slot)181 static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot)
182 {
183     struct kvm_userspace_memory_region mem;
184 #ifdef CONFIG_SOLARIS
185     caddr_t p;
186     char c;
187 #endif
188 
189     mem.slot = slot->slot;
190     mem.guest_phys_addr = slot->start_addr;
191     mem.memory_size = slot->memory_size;
192     mem.userspace_addr = (unsigned long)qemu_safe_ram_ptr(slot->phys_offset);
193     mem.flags = slot->flags;
194 #ifdef CONFIG_SOLARIS
195     for (p = (caddr_t)mem.userspace_addr;
196 	 p < (caddr_t)mem.userspace_addr + mem.memory_size;
197 	 p += PAGE_SIZE)
198 	c = *p;
199 #endif /* CONFIG_SOLARIS */
200 
201     if (s->migration_log) {
202         mem.flags |= KVM_MEM_LOG_DIRTY_PAGES;
203     }
204     return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
205 }
206 
207 #ifdef OBSOLETE_KVM_IMPL
kvm_reset_vcpu(void * opaque)208 static void kvm_reset_vcpu(void *opaque)
209 {
210     CPUState *env = opaque;
211 
212     kvm_arch_reset_vcpu(env);
213 }
214 #endif
215 
kvm_irqchip_in_kernel(void)216 int kvm_irqchip_in_kernel(void)
217 {
218     return kvm_state->irqchip_in_kernel;
219 }
220 
kvm_pit_in_kernel(void)221 int kvm_pit_in_kernel(void)
222 {
223     return kvm_state->pit_in_kernel;
224 }
225 
226 
227 #ifdef OBSOLETE_KVM_IMPL
kvm_init_vcpu(CPUState * env)228 int kvm_init_vcpu(CPUState *env)
229 {
230     KVMState *s = kvm_state;
231     long mmap_size;
232     int ret;
233 
234     DPRINTF("kvm_init_vcpu\n");
235 
236     ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU, env->cpu_index);
237     if (ret < 0) {
238         DPRINTF("kvm_create_vcpu failed\n");
239         goto err;
240     }
241 
242     env->kvm_fd = ret;
243     env->kvm_state = s;
244 
245     mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
246     if (mmap_size < 0) {
247         DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
248         goto err;
249     }
250 
251     env->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
252                         env->kvm_fd, 0);
253     if (env->kvm_run == MAP_FAILED) {
254         ret = -errno;
255         DPRINTF("mmap'ing vcpu state failed\n");
256         goto err;
257     }
258 
259     if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
260         s->coalesced_mmio_ring =
261             (void *)env->kvm_run + s->coalesced_mmio * PAGE_SIZE;
262     }
263 
264     ret = kvm_arch_init_vcpu(env);
265     if (ret == 0) {
266         qemu_register_reset(kvm_reset_vcpu, env);
267         kvm_arch_reset_vcpu(env);
268     }
269 err:
270     return ret;
271 }
272 #endif
273 
274 /*
275  * dirty pages logging control
276  */
kvm_dirty_pages_log_change(target_phys_addr_t phys_addr,ram_addr_t size,int flags,int mask)277 static int kvm_dirty_pages_log_change(target_phys_addr_t phys_addr,
278                                       ram_addr_t size, int flags, int mask)
279 {
280     KVMState *s = kvm_state;
281     KVMSlot *mem = kvm_lookup_matching_slot(s, phys_addr, phys_addr + size);
282     int old_flags;
283 
284     if (mem == NULL)  {
285             fprintf(stderr, "BUG: %s: invalid parameters " TARGET_FMT_plx "-"
286                     TARGET_FMT_plx "\n", __func__, phys_addr,
287                     (target_phys_addr_t)(phys_addr + size - 1));
288             return -EINVAL;
289     }
290 
291     old_flags = mem->flags;
292 
293     flags = (mem->flags & ~mask) | flags;
294     mem->flags = flags;
295 
296     /* If nothing changed effectively, no need to issue ioctl */
297     if (s->migration_log) {
298         flags |= KVM_MEM_LOG_DIRTY_PAGES;
299     }
300     if (flags == old_flags) {
301             return 0;
302     }
303 
304     return kvm_set_user_memory_region(s, mem);
305 }
306 
kvm_log_start(target_phys_addr_t phys_addr,ram_addr_t size)307 int kvm_log_start(target_phys_addr_t phys_addr, ram_addr_t size)
308 {
309     return kvm_dirty_pages_log_change(phys_addr, size, KVM_MEM_LOG_DIRTY_PAGES,
310                                       KVM_MEM_LOG_DIRTY_PAGES);
311 }
312 
kvm_log_stop(target_phys_addr_t phys_addr,ram_addr_t size)313 int kvm_log_stop(target_phys_addr_t phys_addr, ram_addr_t size)
314 {
315     return kvm_dirty_pages_log_change(phys_addr, size, 0,
316                                       KVM_MEM_LOG_DIRTY_PAGES);
317 }
318 
kvm_set_migration_log(int enable)319 static int kvm_set_migration_log(int enable)
320 {
321     KVMState *s = kvm_state;
322     KVMSlot *mem;
323     int i, err;
324 
325     s->migration_log = enable;
326 
327     for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
328         mem = &s->slots[i];
329 
330         if (!mem->memory_size) {
331             continue;
332         }
333         if (!!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) == enable) {
334             continue;
335         }
336         err = kvm_set_user_memory_region(s, mem);
337         if (err) {
338             return err;
339         }
340     }
341     return 0;
342 }
343 
344 /* get kvm's dirty pages bitmap and update qemu's */
kvm_get_dirty_pages_log_range(unsigned long start_addr,unsigned long * bitmap,unsigned long offset,unsigned long mem_size)345 static int kvm_get_dirty_pages_log_range(unsigned long start_addr,
346                                          unsigned long *bitmap,
347                                          unsigned long offset,
348                                          unsigned long mem_size)
349 {
350     unsigned int i, j;
351     unsigned long page_number, addr, addr1, c;
352     ram_addr_t ram_addr;
353     unsigned int len = ((mem_size / TARGET_PAGE_SIZE) + HOST_LONG_BITS - 1) /
354         HOST_LONG_BITS;
355 
356     /*
357      * bitmap-traveling is faster than memory-traveling (for addr...)
358      * especially when most of the memory is not dirty.
359      */
360     for (i = 0; i < len; i++) {
361         if (bitmap[i] != 0) {
362             c = leul_to_cpu(bitmap[i]);
363             do {
364                 j = ffsl(c) - 1;
365                 c &= ~(1ul << j);
366                 page_number = i * HOST_LONG_BITS + j;
367                 addr1 = page_number * TARGET_PAGE_SIZE;
368                 addr = offset + addr1;
369                 ram_addr = cpu_get_physical_page_desc(addr);
370                 cpu_physical_memory_set_dirty(ram_addr);
371             } while (c != 0);
372         }
373     }
374     return 0;
375 }
376 
377 #define ALIGN(x, y)  (((x)+(y)-1) & ~((y)-1))
378 
379 /**
380  * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
381  * This function updates qemu's dirty bitmap using cpu_physical_memory_set_dirty().
382  * This means all bits are set to dirty.
383  *
384  * @start_add: start of logged region.
385  * @end_addr: end of logged region.
386  */
kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,target_phys_addr_t end_addr)387 static int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
388                                           target_phys_addr_t end_addr)
389 {
390     KVMState *s = kvm_state;
391     unsigned long size, allocated_size = 0;
392     KVMDirtyLog d;
393     KVMSlot *mem;
394     int ret = 0;
395 
396     d.dirty_bitmap = NULL;
397     while (start_addr < end_addr) {
398         mem = kvm_lookup_overlapping_slot(s, start_addr, end_addr);
399         if (mem == NULL) {
400             break;
401         }
402 
403         size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS), HOST_LONG_BITS) / 8;
404         if (!d.dirty_bitmap) {
405             d.dirty_bitmap = qemu_malloc(size);
406         } else if (size > allocated_size) {
407             d.dirty_bitmap = qemu_realloc(d.dirty_bitmap, size);
408         }
409         allocated_size = size;
410         memset(d.dirty_bitmap, 0, allocated_size);
411 
412         d.slot = mem->slot;
413 
414         if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
415             DPRINTF("ioctl failed %d\n", errno);
416             ret = -1;
417             break;
418         }
419 
420         kvm_get_dirty_pages_log_range(mem->start_addr, d.dirty_bitmap,
421                                       mem->start_addr, mem->memory_size);
422         start_addr = mem->start_addr + mem->memory_size;
423     }
424     qemu_free(d.dirty_bitmap);
425 
426     return ret;
427 }
428 
kvm_coalesce_mmio_region(target_phys_addr_t start,ram_addr_t size)429 int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
430 {
431     int ret = -ENOSYS;
432     KVMState *s = kvm_state;
433 
434     if (s->coalesced_mmio) {
435         struct kvm_coalesced_mmio_zone zone;
436 
437         zone.addr = start;
438         zone.size = size;
439 
440         ret = kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
441     }
442 
443     return ret;
444 }
445 
kvm_uncoalesce_mmio_region(target_phys_addr_t start,ram_addr_t size)446 int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
447 {
448     int ret = -ENOSYS;
449     KVMState *s = kvm_state;
450 
451     if (s->coalesced_mmio) {
452         struct kvm_coalesced_mmio_zone zone;
453 
454         zone.addr = start;
455         zone.size = size;
456 
457         ret = kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
458     }
459 
460     return ret;
461 }
462 
kvm_check_extension(KVMState * s,unsigned int extension)463 int kvm_check_extension(KVMState *s, unsigned int extension)
464 {
465     int ret;
466 
467     ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
468     if (ret < 0) {
469         ret = 0;
470     }
471 
472     return ret;
473 }
474 
kvm_check_many_ioeventfds(void)475 static int kvm_check_many_ioeventfds(void)
476 {
477     /* Userspace can use ioeventfd for io notification.  This requires a host
478      * that supports eventfd(2) and an I/O thread; since eventfd does not
479      * support SIGIO it cannot interrupt the vcpu.
480      *
481      * Older kernels have a 6 device limit on the KVM io bus.  Find out so we
482      * can avoid creating too many ioeventfds.
483      */
484 #if defined(CONFIG_EVENTFD) && defined(CONFIG_IOTHREAD)
485     int ioeventfds[7];
486     int i, ret = 0;
487     for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) {
488         ioeventfds[i] = eventfd(0, EFD_CLOEXEC);
489         if (ioeventfds[i] < 0) {
490             break;
491         }
492         ret = kvm_set_ioeventfd_pio_word(ioeventfds[i], 0, i, true);
493         if (ret < 0) {
494             close(ioeventfds[i]);
495             break;
496         }
497     }
498 
499     /* Decide whether many devices are supported or not */
500     ret = i == ARRAY_SIZE(ioeventfds);
501 
502     while (i-- > 0) {
503         kvm_set_ioeventfd_pio_word(ioeventfds[i], 0, i, false);
504         close(ioeventfds[i]);
505     }
506     return ret;
507 #else
508     return 0;
509 #endif
510 }
511 
512 #ifdef OBSOLETE_KVM_IMPL
513 static const KVMCapabilityInfo *
kvm_check_extension_list(KVMState * s,const KVMCapabilityInfo * list)514 kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
515 {
516     while (list->name) {
517         if (!kvm_check_extension(s, list->value)) {
518             return list;
519         }
520         list++;
521     }
522     return NULL;
523 }
524 #endif
525 
kvm_set_phys_mem(target_phys_addr_t start_addr,ram_addr_t size,ram_addr_t phys_offset)526 static void kvm_set_phys_mem(target_phys_addr_t start_addr, ram_addr_t size,
527                              ram_addr_t phys_offset)
528 {
529     KVMState *s = kvm_state;
530     ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK;
531     KVMSlot *mem, old;
532     int err;
533 
534     /* kvm works in page size chunks, but the function may be called
535        with sub-page size and unaligned start address. */
536     size = TARGET_PAGE_ALIGN(size);
537     start_addr = TARGET_PAGE_ALIGN(start_addr);
538 
539     /* KVM does not support read-only slots */
540     phys_offset &= ~IO_MEM_ROM;
541 
542     while (1) {
543         mem = kvm_lookup_overlapping_slot(s, start_addr, start_addr + size);
544         if (!mem) {
545             break;
546         }
547 
548         if (flags < IO_MEM_UNASSIGNED && start_addr >= mem->start_addr &&
549             (start_addr + size <= mem->start_addr + mem->memory_size) &&
550             (phys_offset - start_addr == mem->phys_offset - mem->start_addr)) {
551             /* The new slot fits into the existing one and comes with
552              * identical parameters - nothing to be done. */
553             return;
554         }
555 
556         old = *mem;
557 
558         /* unregister the overlapping slot */
559         mem->memory_size = 0;
560         err = kvm_set_user_memory_region(s, mem);
561         if (err) {
562             fprintf(stderr, "%s: error unregistering overlapping slot: %s\n",
563                     __func__, strerror(-err));
564             abort();
565         }
566 
567         /* Workaround for older KVM versions: we can't join slots, even not by
568          * unregistering the previous ones and then registering the larger
569          * slot. We have to maintain the existing fragmentation. Sigh.
570          *
571          * This workaround assumes that the new slot starts at the same
572          * address as the first existing one. If not or if some overlapping
573          * slot comes around later, we will fail (not seen in practice so far)
574          * - and actually require a recent KVM version. */
575         if (s->broken_set_mem_region &&
576             old.start_addr == start_addr && old.memory_size < size &&
577             flags < IO_MEM_UNASSIGNED) {
578             mem = kvm_alloc_slot(s);
579             mem->memory_size = old.memory_size;
580             mem->start_addr = old.start_addr;
581             mem->phys_offset = old.phys_offset;
582             mem->flags = 0;
583 
584             err = kvm_set_user_memory_region(s, mem);
585             if (err) {
586                 fprintf(stderr, "%s: error updating slot: %s\n", __func__,
587                         strerror(-err));
588                 abort();
589             }
590 
591             start_addr += old.memory_size;
592             phys_offset += old.memory_size;
593             size -= old.memory_size;
594             continue;
595         }
596 
597         /* register prefix slot */
598         if (old.start_addr < start_addr) {
599             mem = kvm_alloc_slot(s);
600             mem->memory_size = start_addr - old.start_addr;
601             mem->start_addr = old.start_addr;
602             mem->phys_offset = old.phys_offset;
603             mem->flags = 0;
604 
605             err = kvm_set_user_memory_region(s, mem);
606             if (err) {
607                 fprintf(stderr, "%s: error registering prefix slot: %s\n",
608                         __func__, strerror(-err));
609                 abort();
610             }
611         }
612 
613         /* register suffix slot */
614         if (old.start_addr + old.memory_size > start_addr + size) {
615             ram_addr_t size_delta;
616 
617             mem = kvm_alloc_slot(s);
618             mem->start_addr = start_addr + size;
619             size_delta = mem->start_addr - old.start_addr;
620             mem->memory_size = old.memory_size - size_delta;
621             mem->phys_offset = old.phys_offset + size_delta;
622             mem->flags = 0;
623 
624             err = kvm_set_user_memory_region(s, mem);
625             if (err) {
626                 fprintf(stderr, "%s: error registering suffix slot: %s\n",
627                         __func__, strerror(-err));
628                 abort();
629             }
630         }
631     }
632 
633     /* in case the KVM bug workaround already "consumed" the new slot */
634     if (!size) {
635         return;
636     }
637     /* KVM does not need to know about this memory */
638     if (flags >= IO_MEM_UNASSIGNED) {
639         return;
640     }
641     mem = kvm_alloc_slot(s);
642     mem->memory_size = size;
643     mem->start_addr = start_addr;
644     mem->phys_offset = phys_offset;
645     mem->flags = 0;
646 
647     err = kvm_set_user_memory_region(s, mem);
648     if (err) {
649         fprintf(stderr, "%s: error registering slot: %s\n", __func__,
650                 strerror(-err));
651         abort();
652     }
653 }
654 
kvm_client_set_memory(struct CPUPhysMemoryClient * client,target_phys_addr_t start_addr,ram_addr_t size,ram_addr_t phys_offset)655 static void kvm_client_set_memory(struct CPUPhysMemoryClient *client,
656                                   target_phys_addr_t start_addr,
657                                   ram_addr_t size, ram_addr_t phys_offset)
658 {
659     kvm_set_phys_mem(start_addr, size, phys_offset);
660 }
661 
kvm_client_sync_dirty_bitmap(struct CPUPhysMemoryClient * client,target_phys_addr_t start_addr,target_phys_addr_t end_addr)662 static int kvm_client_sync_dirty_bitmap(struct CPUPhysMemoryClient *client,
663                                         target_phys_addr_t start_addr,
664                                         target_phys_addr_t end_addr)
665 {
666     return kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
667 }
668 
kvm_client_migration_log(struct CPUPhysMemoryClient * client,int enable)669 static int kvm_client_migration_log(struct CPUPhysMemoryClient *client,
670                                     int enable)
671 {
672     return kvm_set_migration_log(enable);
673 }
674 
675 static CPUPhysMemoryClient kvm_cpu_phys_memory_client = {
676     .set_memory = kvm_client_set_memory,
677     .sync_dirty_bitmap = kvm_client_sync_dirty_bitmap,
678     .migration_log = kvm_client_migration_log,
679 };
680 
kvm_cpu_register_phys_memory_client(void)681 void kvm_cpu_register_phys_memory_client(void)
682 {
683     cpu_register_phys_memory_client(&kvm_cpu_phys_memory_client);
684 }
685 
686 #ifdef OBSOLETE_KVM_IMPL
687 
kvm_init(void)688 int kvm_init(void)
689 {
690     static const char upgrade_note[] =
691         "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
692         "(see http://sourceforge.net/projects/kvm).\n";
693     KVMState *s;
694     const KVMCapabilityInfo *missing_cap;
695     int ret;
696     int i;
697 
698     s = qemu_mallocz(sizeof(KVMState));
699 
700 #ifdef KVM_CAP_SET_GUEST_DEBUG
701     QTAILQ_INIT(&s->kvm_sw_breakpoints);
702 #endif
703     for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
704         s->slots[i].slot = i;
705     }
706     s->vmfd = -1;
707     s->fd = qemu_open("/dev/kvm", O_RDWR);
708     if (s->fd == -1) {
709         fprintf(stderr, "Could not access KVM kernel module: %m\n");
710         ret = -errno;
711         goto err;
712     }
713 
714     ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
715     if (ret < KVM_API_VERSION) {
716         if (ret > 0) {
717             ret = -EINVAL;
718         }
719         fprintf(stderr, "kvm version too old\n");
720         goto err;
721     }
722 
723     if (ret > KVM_API_VERSION) {
724         ret = -EINVAL;
725         fprintf(stderr, "kvm version not supported\n");
726         goto err;
727     }
728 
729     s->vmfd = kvm_ioctl(s, KVM_CREATE_VM, 0);
730     if (s->vmfd < 0) {
731 #ifdef TARGET_S390X
732         fprintf(stderr, "Please add the 'switch_amode' kernel parameter to "
733                         "your host kernel command line\n");
734 #endif
735         goto err;
736     }
737 
738     missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
739     if (!missing_cap) {
740         missing_cap =
741             kvm_check_extension_list(s, kvm_arch_required_capabilities);
742     }
743     if (missing_cap) {
744         ret = -EINVAL;
745         fprintf(stderr, "kvm does not support %s\n%s",
746                 missing_cap->name, upgrade_note);
747         goto err;
748     }
749 
750     s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
751 
752     s->broken_set_mem_region = 1;
753 #ifdef KVM_CAP_JOIN_MEMORY_REGIONS_WORKS
754     ret = kvm_check_extension(s, KVM_CAP_JOIN_MEMORY_REGIONS_WORKS);
755     if (ret > 0) {
756         s->broken_set_mem_region = 0;
757     }
758 #endif
759 
760     s->vcpu_events = 0;
761 #ifdef KVM_CAP_VCPU_EVENTS
762     s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
763 #endif
764 
765     s->robust_singlestep = 0;
766 #ifdef KVM_CAP_X86_ROBUST_SINGLESTEP
767     s->robust_singlestep =
768         kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP);
769 #endif
770 
771     s->debugregs = 0;
772 #ifdef KVM_CAP_DEBUGREGS
773     s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS);
774 #endif
775 
776     s->xsave = 0;
777 #ifdef KVM_CAP_XSAVE
778     s->xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
779 #endif
780 
781     s->xcrs = 0;
782 #ifdef KVM_CAP_XCRS
783     s->xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
784 #endif
785 
786     ret = kvm_arch_init(s);
787     if (ret < 0) {
788         goto err;
789     }
790 
791     kvm_state = s;
792     cpu_register_phys_memory_client(&kvm_cpu_phys_memory_client);
793 
794     s->many_ioeventfds = kvm_check_many_ioeventfds();
795 
796     return 0;
797 
798 err:
799     if (s) {
800         if (s->vmfd != -1) {
801             close(s->vmfd);
802         }
803         if (s->fd != -1) {
804             close(s->fd);
805         }
806     }
807     qemu_free(s);
808 
809     return ret;
810 }
811 #endif
812 
kvm_handle_io(uint16_t port,void * data,int direction,int size,uint32_t count)813 static int kvm_handle_io(uint16_t port, void *data, int direction, int size,
814                          uint32_t count)
815 {
816     int i;
817     uint8_t *ptr = data;
818 
819     for (i = 0; i < count; i++) {
820         if (direction == KVM_EXIT_IO_IN) {
821             switch (size) {
822             case 1:
823                 stb_p(ptr, cpu_inb(port));
824                 break;
825             case 2:
826                 stw_p(ptr, cpu_inw(port));
827                 break;
828             case 4:
829                 stl_p(ptr, cpu_inl(port));
830                 break;
831             }
832         } else {
833             switch (size) {
834             case 1:
835                 cpu_outb(port, ldub_p(ptr));
836                 break;
837             case 2:
838                 cpu_outw(port, lduw_p(ptr));
839                 break;
840             case 4:
841                 cpu_outl(port, ldl_p(ptr));
842                 break;
843             }
844         }
845 
846         ptr += size;
847     }
848 
849     return 1;
850 }
851 
852 #ifdef KVM_CAP_INTERNAL_ERROR_DATA
kvm_handle_internal_error(CPUState * env,struct kvm_run * run)853 static int kvm_handle_internal_error(CPUState *env, struct kvm_run *run)
854 {
855     fprintf(stderr, "KVM internal error.");
856     if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
857         int i;
858 
859         fprintf(stderr, " Suberror: %d\n", run->internal.suberror);
860         for (i = 0; i < run->internal.ndata; ++i) {
861             fprintf(stderr, "extra data[%d]: %"PRIx64"\n",
862                     i, (uint64_t)run->internal.data[i]);
863         }
864     } else {
865         fprintf(stderr, "\n");
866     }
867     if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
868         fprintf(stderr, "emulation failure\n");
869         if (!kvm_arch_stop_on_emulation_error(env)) {
870             cpu_dump_state(env, stderr, fprintf, CPU_DUMP_CODE);
871             return 0;
872         }
873     }
874     /* FIXME: Should trigger a qmp message to let management know
875      * something went wrong.
876      */
877     return -1;
878 }
879 #endif
880 
kvm_flush_coalesced_mmio_buffer(void)881 void kvm_flush_coalesced_mmio_buffer(void)
882 {
883     KVMState *s = kvm_state;
884     if (s->coalesced_mmio_ring) {
885         struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
886         while (ring->first != ring->last) {
887             struct kvm_coalesced_mmio *ent;
888 
889             ent = &ring->coalesced_mmio[ring->first];
890 
891             cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
892             smp_wmb();
893             ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
894         }
895     }
896 }
897 
898 #ifdef OBSOLETE_KVM_IMPL
899 
do_kvm_cpu_synchronize_state(void * _env)900 static void do_kvm_cpu_synchronize_state(void *_env)
901 {
902     CPUState *env = _env;
903 
904     if (!env->kvm_vcpu_dirty) {
905         kvm_arch_get_registers(env);
906         env->kvm_vcpu_dirty = 1;
907     }
908 }
909 
kvm_cpu_synchronize_state(CPUState * env)910 void kvm_cpu_synchronize_state(CPUState *env)
911 {
912     if (!env->kvm_vcpu_dirty) {
913         run_on_cpu(env, do_kvm_cpu_synchronize_state, env);
914     }
915 }
916 
kvm_cpu_synchronize_post_reset(CPUState * env)917 void kvm_cpu_synchronize_post_reset(CPUState *env)
918 {
919     kvm_arch_put_registers(env, KVM_PUT_RESET_STATE);
920     env->kvm_vcpu_dirty = 0;
921 }
922 
kvm_cpu_synchronize_post_init(CPUState * env)923 void kvm_cpu_synchronize_post_init(CPUState *env)
924 {
925     kvm_arch_put_registers(env, KVM_PUT_FULL_STATE);
926     env->kvm_vcpu_dirty = 0;
927 }
928 
kvm_cpu_exec(CPUState * env)929 int kvm_cpu_exec(CPUState *env)
930 {
931     struct kvm_run *run = env->kvm_run;
932     int ret;
933 
934     DPRINTF("kvm_cpu_exec()\n");
935 
936     do {
937 #ifndef CONFIG_IOTHREAD
938         if (env->exit_request) {
939             DPRINTF("interrupt exit requested\n");
940             ret = 0;
941             break;
942         }
943 #endif
944 
945         if (kvm_arch_process_irqchip_events(env)) {
946             ret = 0;
947             break;
948         }
949 
950         if (env->kvm_vcpu_dirty) {
951             kvm_arch_put_registers(env, KVM_PUT_RUNTIME_STATE);
952             env->kvm_vcpu_dirty = 0;
953         }
954 
955         kvm_arch_pre_run(env, run);
956         cpu_single_env = NULL;
957         qemu_mutex_unlock_iothread();
958         ret = kvm_vcpu_ioctl(env, KVM_RUN, 0);
959         qemu_mutex_lock_iothread();
960         cpu_single_env = env;
961         kvm_arch_post_run(env, run);
962 
963         kvm_flush_coalesced_mmio_buffer();
964 
965         if (ret == -EINTR || ret == -EAGAIN) {
966             cpu_exit(env);
967             DPRINTF("io window exit\n");
968             ret = 0;
969             break;
970         }
971 
972         if (ret < 0) {
973             DPRINTF("kvm run failed %s\n", strerror(-ret));
974             abort();
975         }
976 
977         ret = 0; /* exit loop */
978         switch (run->exit_reason) {
979         case KVM_EXIT_IO:
980             DPRINTF("handle_io\n");
981             ret = kvm_handle_io(run->io.port,
982                                 (uint8_t *)run + run->io.data_offset,
983                                 run->io.direction,
984                                 run->io.size,
985                                 run->io.count);
986             break;
987         case KVM_EXIT_MMIO:
988             DPRINTF("handle_mmio\n");
989             cpu_physical_memory_rw(run->mmio.phys_addr,
990                                    run->mmio.data,
991                                    run->mmio.len,
992                                    run->mmio.is_write);
993             ret = 1;
994             break;
995         case KVM_EXIT_IRQ_WINDOW_OPEN:
996             DPRINTF("irq_window_open\n");
997             break;
998         case KVM_EXIT_SHUTDOWN:
999             DPRINTF("shutdown\n");
1000             qemu_system_reset_request();
1001             ret = 1;
1002             break;
1003         case KVM_EXIT_UNKNOWN:
1004             fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
1005                     (uint64_t)run->hw.hardware_exit_reason);
1006             ret = -1;
1007             break;
1008 #ifdef KVM_CAP_INTERNAL_ERROR_DATA
1009         case KVM_EXIT_INTERNAL_ERROR:
1010             ret = kvm_handle_internal_error(env, run);
1011             break;
1012 #endif
1013         case KVM_EXIT_DEBUG:
1014             DPRINTF("kvm_exit_debug\n");
1015 #ifdef KVM_CAP_SET_GUEST_DEBUG
1016             if (kvm_arch_debug(&run->debug.arch)) {
1017                 env->exception_index = EXCP_DEBUG;
1018                 return 0;
1019             }
1020             /* re-enter, this exception was guest-internal */
1021             ret = 1;
1022 #endif /* KVM_CAP_SET_GUEST_DEBUG */
1023             break;
1024         default:
1025             DPRINTF("kvm_arch_handle_exit\n");
1026             ret = kvm_arch_handle_exit(env, run);
1027             break;
1028         }
1029     } while (ret > 0);
1030 
1031     if (ret < 0) {
1032         cpu_dump_state(env, stderr, fprintf, CPU_DUMP_CODE);
1033         vm_stop(0);
1034         env->exit_request = 1;
1035     }
1036     if (env->exit_request) {
1037         env->exit_request = 0;
1038         env->exception_index = EXCP_INTERRUPT;
1039     }
1040 
1041     return ret;
1042 }
1043 
1044 #endif
kvm_ioctl(KVMState * s,int type,...)1045 int kvm_ioctl(KVMState *s, int type, ...)
1046 {
1047     int ret;
1048     void *arg;
1049     va_list ap;
1050 
1051     va_start(ap, type);
1052     arg = va_arg(ap, void *);
1053     va_end(ap);
1054 
1055     ret = ioctl(s->fd, type, arg);
1056     if (ret == -1) {
1057         ret = -errno;
1058     }
1059     return ret;
1060 }
1061 
kvm_vm_ioctl(KVMState * s,int type,...)1062 int kvm_vm_ioctl(KVMState *s, int type, ...)
1063 {
1064     int ret;
1065     void *arg;
1066     va_list ap;
1067 
1068     va_start(ap, type);
1069     arg = va_arg(ap, void *);
1070     va_end(ap);
1071 
1072     ret = ioctl(s->vmfd, type, arg);
1073     if (ret == -1) {
1074         ret = -errno;
1075     }
1076     return ret;
1077 }
1078 
1079 #ifdef CONFIG_SOLARIS
kvm_vm_clone(KVMState * s)1080 int kvm_vm_clone(KVMState *s)
1081 {
1082     struct stat stat;
1083     int fd;
1084 
1085     if (fstat(s->fd, &stat) != 0)
1086         return -errno;
1087 
1088     fd = qemu_open("/dev/kvm", O_RDWR);
1089 
1090     if (fd == -1)
1091          return -errno;
1092 
1093     if (ioctl(fd, KVM_CLONE, stat.st_rdev) == -1) {
1094         close(fd);
1095         return -errno;
1096     }
1097 
1098     return fd;
1099 }
1100 #endif
1101 
kvm_vcpu_ioctl(CPUState * env,int type,...)1102 int kvm_vcpu_ioctl(CPUState *env, int type, ...)
1103 {
1104     int ret;
1105     void *arg;
1106     va_list ap;
1107 
1108     va_start(ap, type);
1109     arg = va_arg(ap, void *);
1110     va_end(ap);
1111 
1112     ret = ioctl(env->kvm_fd, type, arg);
1113     if (ret == -1) {
1114         ret = -errno;
1115     }
1116     return ret;
1117 }
1118 
kvm_has_sync_mmu(void)1119 int kvm_has_sync_mmu(void)
1120 {
1121     return kvm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
1122 }
1123 
kvm_has_vcpu_events(void)1124 int kvm_has_vcpu_events(void)
1125 {
1126     return kvm_state->vcpu_events;
1127 }
1128 
kvm_has_robust_singlestep(void)1129 int kvm_has_robust_singlestep(void)
1130 {
1131     return kvm_state->robust_singlestep;
1132 }
1133 
kvm_has_debugregs(void)1134 int kvm_has_debugregs(void)
1135 {
1136     return kvm_state->debugregs;
1137 }
1138 
kvm_has_xsave(void)1139 int kvm_has_xsave(void)
1140 {
1141     return kvm_state->xsave;
1142 }
1143 
kvm_has_xcrs(void)1144 int kvm_has_xcrs(void)
1145 {
1146     return kvm_state->xcrs;
1147 }
1148 
kvm_has_many_ioeventfds(void)1149 int kvm_has_many_ioeventfds(void)
1150 {
1151     if (!kvm_enabled()) {
1152         return 0;
1153     }
1154     return kvm_state->many_ioeventfds;
1155 }
1156 
kvm_setup_guest_memory(void * start,size_t size)1157 void kvm_setup_guest_memory(void *start, size_t size)
1158 {
1159     if (!kvm_has_sync_mmu()) {
1160         int ret = qemu_madvise(start, size, QEMU_MADV_DONTFORK);
1161 
1162         if (ret) {
1163             perror("qemu_madvise");
1164             fprintf(stderr,
1165                     "Need MADV_DONTFORK in absence of synchronous KVM MMU\n");
1166             exit(1);
1167         }
1168     }
1169 }
1170 
1171 #ifdef KVM_CAP_SET_GUEST_DEBUG
1172 #ifndef OBSOLETE_KVM_IMPL
1173 #define run_on_cpu on_vcpu
1174 #endif /* !OBSOLETE_KVM_IMPL */
1175 
kvm_find_sw_breakpoint(CPUState * env,target_ulong pc)1176 struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env,
1177                                                  target_ulong pc)
1178 {
1179     struct kvm_sw_breakpoint *bp;
1180 
1181     QTAILQ_FOREACH(bp, &env->kvm_state->kvm_sw_breakpoints, entry) {
1182         if (bp->pc == pc) {
1183             return bp;
1184         }
1185     }
1186     return NULL;
1187 }
1188 
kvm_sw_breakpoints_active(CPUState * env)1189 int kvm_sw_breakpoints_active(CPUState *env)
1190 {
1191     return !QTAILQ_EMPTY(&env->kvm_state->kvm_sw_breakpoints);
1192 }
1193 
1194 struct kvm_set_guest_debug_data {
1195     struct kvm_guest_debug dbg;
1196     CPUState *env;
1197     int err;
1198 };
1199 
kvm_invoke_set_guest_debug(void * data)1200 static void kvm_invoke_set_guest_debug(void *data)
1201 {
1202     struct kvm_set_guest_debug_data *dbg_data = data;
1203     CPUState *env = dbg_data->env;
1204 
1205     dbg_data->err = kvm_vcpu_ioctl(env, KVM_SET_GUEST_DEBUG, &dbg_data->dbg);
1206 }
1207 
kvm_update_guest_debug(CPUState * env,unsigned long reinject_trap)1208 int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap)
1209 {
1210     struct kvm_set_guest_debug_data data;
1211 
1212     data.dbg.control = reinject_trap;
1213 
1214     if (env->singlestep_enabled) {
1215         data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
1216     }
1217     kvm_arch_update_guest_debug(env, &data.dbg);
1218     data.env = env;
1219 
1220     run_on_cpu(env, kvm_invoke_set_guest_debug, &data);
1221     return data.err;
1222 }
1223 
kvm_insert_breakpoint(CPUState * current_env,target_ulong addr,target_ulong len,int type)1224 int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr,
1225                           target_ulong len, int type)
1226 {
1227     struct kvm_sw_breakpoint *bp;
1228     CPUState *env;
1229     int err;
1230 
1231     if (type == GDB_BREAKPOINT_SW) {
1232         bp = kvm_find_sw_breakpoint(current_env, addr);
1233         if (bp) {
1234             bp->use_count++;
1235             return 0;
1236         }
1237 
1238         bp = qemu_malloc(sizeof(struct kvm_sw_breakpoint));
1239         if (!bp) {
1240             return -ENOMEM;
1241         }
1242 
1243         bp->pc = addr;
1244         bp->use_count = 1;
1245         err = kvm_arch_insert_sw_breakpoint(current_env, bp);
1246         if (err) {
1247             free(bp);
1248             return err;
1249         }
1250 
1251         QTAILQ_INSERT_HEAD(&current_env->kvm_state->kvm_sw_breakpoints,
1252                           bp, entry);
1253     } else {
1254         err = kvm_arch_insert_hw_breakpoint(addr, len, type);
1255         if (err) {
1256             return err;
1257         }
1258     }
1259 
1260     for (env = first_cpu; env != NULL; env = env->next_cpu) {
1261         err = kvm_update_guest_debug(env, 0);
1262         if (err) {
1263             return err;
1264         }
1265     }
1266     return 0;
1267 }
1268 
kvm_remove_breakpoint(CPUState * current_env,target_ulong addr,target_ulong len,int type)1269 int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr,
1270                           target_ulong len, int type)
1271 {
1272     struct kvm_sw_breakpoint *bp;
1273     CPUState *env;
1274     int err;
1275 
1276     if (type == GDB_BREAKPOINT_SW) {
1277         bp = kvm_find_sw_breakpoint(current_env, addr);
1278         if (!bp) {
1279             return -ENOENT;
1280         }
1281 
1282         if (bp->use_count > 1) {
1283             bp->use_count--;
1284             return 0;
1285         }
1286 
1287         err = kvm_arch_remove_sw_breakpoint(current_env, bp);
1288         if (err) {
1289             return err;
1290         }
1291 
1292         QTAILQ_REMOVE(&current_env->kvm_state->kvm_sw_breakpoints, bp, entry);
1293         qemu_free(bp);
1294     } else {
1295         err = kvm_arch_remove_hw_breakpoint(addr, len, type);
1296         if (err) {
1297             return err;
1298         }
1299     }
1300 
1301     for (env = first_cpu; env != NULL; env = env->next_cpu) {
1302         err = kvm_update_guest_debug(env, 0);
1303         if (err) {
1304             return err;
1305         }
1306     }
1307     return 0;
1308 }
1309 
kvm_remove_all_breakpoints(CPUState * current_env)1310 void kvm_remove_all_breakpoints(CPUState *current_env)
1311 {
1312     struct kvm_sw_breakpoint *bp, *next;
1313     KVMState *s = current_env->kvm_state;
1314     CPUState *env;
1315 
1316     QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
1317         if (kvm_arch_remove_sw_breakpoint(current_env, bp) != 0) {
1318             /* Try harder to find a CPU that currently sees the breakpoint. */
1319             for (env = first_cpu; env != NULL; env = env->next_cpu) {
1320                 if (kvm_arch_remove_sw_breakpoint(env, bp) == 0) {
1321                     break;
1322                 }
1323             }
1324         }
1325     }
1326     kvm_arch_remove_all_hw_breakpoints();
1327 
1328     for (env = first_cpu; env != NULL; env = env->next_cpu) {
1329         kvm_update_guest_debug(env, 0);
1330     }
1331 }
1332 
1333 #else /* !KVM_CAP_SET_GUEST_DEBUG */
1334 
kvm_update_guest_debug(CPUState * env,unsigned long reinject_trap)1335 int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap)
1336 {
1337     return -EINVAL;
1338 }
1339 
kvm_insert_breakpoint(CPUState * current_env,target_ulong addr,target_ulong len,int type)1340 int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr,
1341                           target_ulong len, int type)
1342 {
1343     return -EINVAL;
1344 }
1345 
kvm_remove_breakpoint(CPUState * current_env,target_ulong addr,target_ulong len,int type)1346 int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr,
1347                           target_ulong len, int type)
1348 {
1349     return -EINVAL;
1350 }
1351 
kvm_remove_all_breakpoints(CPUState * current_env)1352 void kvm_remove_all_breakpoints(CPUState *current_env)
1353 {
1354 }
1355 #endif /* !KVM_CAP_SET_GUEST_DEBUG */
1356 
kvm_set_signal_mask(CPUState * env,const sigset_t * sigset)1357 int kvm_set_signal_mask(CPUState *env, const sigset_t *sigset)
1358 {
1359     struct kvm_signal_mask *sigmask;
1360     int r;
1361 
1362     if (!sigset) {
1363         return kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, NULL);
1364     }
1365 
1366     sigmask = qemu_malloc(sizeof(*sigmask) + sizeof(*sigset));
1367 
1368     sigmask->len = sizeof (sigset_t);
1369     memcpy(sigmask->sigset, sigset, sizeof(*sigset));
1370     r = kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, sigmask);
1371     free(sigmask);
1372 
1373     return r;
1374 }
1375 
kvm_set_ioeventfd_mmio_long(int fd,uint32_t addr,uint32_t val,bool assign)1376 int kvm_set_ioeventfd_mmio_long(int fd, uint32_t addr, uint32_t val, bool assign)
1377 {
1378 #ifdef KVM_IOEVENTFD
1379     int ret;
1380     struct kvm_ioeventfd iofd;
1381 
1382     iofd.datamatch = val;
1383     iofd.addr = addr;
1384     iofd.len = 4;
1385     iofd.flags = KVM_IOEVENTFD_FLAG_DATAMATCH;
1386     iofd.fd = fd;
1387 
1388     if (!kvm_enabled()) {
1389         return -ENOSYS;
1390     }
1391 
1392     if (!assign) {
1393         iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1394     }
1395 
1396     ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);
1397 
1398     if (ret < 0) {
1399         return -errno;
1400     }
1401 
1402     return 0;
1403 #else
1404     return -ENOSYS;
1405 #endif
1406 }
1407 
kvm_set_ioeventfd_pio_word(int fd,uint16_t addr,uint16_t val,bool assign)1408 int kvm_set_ioeventfd_pio_word(int fd, uint16_t addr, uint16_t val, bool assign)
1409 {
1410 #ifdef KVM_IOEVENTFD
1411     struct kvm_ioeventfd kick = {
1412         .datamatch = val,
1413         .addr = addr,
1414         .len = 2,
1415         .flags = KVM_IOEVENTFD_FLAG_DATAMATCH | KVM_IOEVENTFD_FLAG_PIO,
1416         .fd = fd,
1417     };
1418     int r;
1419     if (!kvm_enabled()) {
1420         return -ENOSYS;
1421     }
1422     if (!assign) {
1423         kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1424     }
1425     r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
1426     if (r < 0) {
1427         return r;
1428     }
1429     return 0;
1430 #else
1431     return -ENOSYS;
1432 #endif
1433 }
1434 
1435 #if defined(KVM_IRQFD)
kvm_set_irqfd(int gsi,int fd,bool assigned)1436 int kvm_set_irqfd(int gsi, int fd, bool assigned)
1437 {
1438     struct kvm_irqfd irqfd = {
1439         .fd = fd,
1440         .gsi = gsi,
1441         .flags = assigned ? 0 : KVM_IRQFD_FLAG_DEASSIGN,
1442     };
1443     int r;
1444     if (!kvm_enabled() || !kvm_irqchip_in_kernel())
1445         return -ENOSYS;
1446 
1447     r = kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd);
1448     if (r < 0)
1449         return r;
1450     return 0;
1451 }
1452 #endif
1453 
1454 #undef PAGE_SIZE
1455 #include "qemu-kvm.c"
1456