xref: /illumos-kvm/kvm_host.h (revision 08e1d7f3)
1 /*
2  * This work is licensed under the terms of the GNU GPL, version 2. See the
3  * COPYING file in the top-level directory.
4  *
5  * Copyright 2011 various Linux Kernel contributors.
6  * Copyright 2018 Joyent, Inc.
7  */
8 
9 #ifndef __KVM_HOST_H
10 #define	__KVM_HOST_H
11 
12 #include <sys/types.h>
13 #include <sys/list.h>
14 #include <sys/mutex.h>
15 #include <sys/sunddi.h>
16 
17 #include "kvm_types.h"
18 #include "kvm_impl.h"
19 #include "kvm_x86host.h"
20 
21 #define	NSEC_PER_MSEC 1000000L
22 #define	NSEC_PER_SEC 1000000000L
23 
24 #define	BITS_PER_LONG	(sizeof (unsigned long) * 8)
25 
26 /*
27  * vcpu->requests bit members
28  */
29 #define	KVM_REQ_TLB_FLUSH		0
30 #define	KVM_REQ_REPORT_TPR_ACCESS	2
31 #define	KVM_REQ_MMU_RELOAD		3
32 #define	KVM_REQ_TRIPLE_FAULT		4
33 #define	KVM_REQ_PENDING_TIMER		5
34 #define	KVM_REQ_UNHALT			6
35 #define	KVM_REQ_MMU_SYNC		7
36 #define	KVM_REQ_KVMCLOCK_UPDATE		8
37 #define	KVM_REQ_KICK			9
38 #define	KVM_REQ_DEACTIVATE_FPU		10
39 
40 #define	KVM_USERSPACE_IRQ_SOURCE_ID	0
41 
42 struct kvm;
43 struct kvm_vcpu;
44 
45 typedef struct kvm_user_return_notifier {
46 	void (*on_user_return)(struct kvm_vcpu *,
47 	    struct kvm_user_return_notifier *);
48 } kvm_user_return_notifier_t;
49 
50 extern void kvm_user_return_notifier_register(struct kvm_vcpu *,
51     struct kvm_user_return_notifier *);
52 extern void kvm_user_return_notifier_unregister(struct kvm_vcpu *,
53     struct kvm_user_return_notifier *);
54 extern void kvm_fire_urn(struct kvm_vcpu *);
55 
56 #define	KVM_NR_SHARED_MSRS 16
57 
58 typedef struct kvm_shared_msrs_global {
59 	int nr;
60 	uint32_t msrs[KVM_NR_SHARED_MSRS];
61 } kvm_shared_msrs_global_t;
62 
63 typedef struct kvm_shared_msrs {
64 	struct kvm_user_return_notifier urn;
65 	int registered;
66 	uint_t host_saved;
67 	struct kvm_shared_msr_values {
68 		uint64_t host;
69 		uint64_t curr;
70 	} values[KVM_NR_SHARED_MSRS];
71 } kvm_shared_msrs_t;
72 
73 /*
74  * It would be nice to use something smarter than a linear search, TBD...
75  * Thankfully we dont expect many devices to register (famous last words :),
76  * so until then it will suffice.  At least its abstracted so we can change
77  * in one place.
78  */
79 typedef struct kvm_io_bus {
80 	int			dev_count;
81 #define	NR_IOBUS_DEVS 200
82 	struct kvm_io_device	*devs[NR_IOBUS_DEVS];
83 } kvm_io_bus_t;
84 
85 enum kvm_bus {
86 	KVM_MMIO_BUS,
87 	KVM_PIO_BUS,
88 	KVM_NR_BUSES
89 };
90 
91 extern int kvm_io_bus_write(struct kvm *, enum kvm_bus, gpa_t,
92     int, const void *);
93 extern int kvm_io_bus_read(struct kvm *, enum kvm_bus, gpa_t, int,
94     void *);
95 extern int kvm_io_bus_register_dev(struct kvm *, enum kvm_bus,
96     struct kvm_io_device *);
97 extern int kvm_io_bus_unregister_dev(struct kvm *, enum kvm_bus,
98     struct kvm_io_device *);
99 
100 #define	KVM_MAX_IRQ_ROUTES 1024
101 
102 #define	KVM_RINGBUF_NENTRIES	512
103 
104 #define	KVM_RINGBUF_TAG_CTXSAVE		1
105 #define	KVM_RINGBUF_TAG_CTXRESTORE	2
106 #define	KVM_RINGBUF_TAG_VMPTRLD		3
107 #define	KVM_RINGBUF_TAG_VCPUMIGRATE	4
108 #define	KVM_RINGBUF_TAG_VCPUCLEAR	5
109 #define	KVM_RINGBUF_TAG_VCPULOAD	6
110 #define	KVM_RINGBUF_TAG_VCPUPUT		7
111 #define	KVM_RINGBUF_TAG_RELOAD		8
112 #define	KVM_RINGBUF_TAG_EMUFAIL0	9
113 #define	KVM_RINGBUF_TAG_EMUFAIL1	10
114 #define	KVM_RINGBUF_TAG_EMUFAIL2	11
115 #define	KVM_RINGBUF_TAG_EMUXADD		12
116 #define	KVM_RINGBUF_TAG_MAX		12
117 
118 typedef struct kvm_ringbuf_entry {
119 	uint32_t kvmre_tag;			/* tag for this entry */
120 	uint32_t kvmre_cpuid;			/* CPU of entry */
121 	uint64_t kvmre_thread;			/* thread for entry */
122 	uint64_t kvmre_tsc;			/* TSC at time of entry */
123 	uint64_t kvmre_payload;			/* payload for this entry */
124 } kvm_ringbuf_entry_t;
125 
126 typedef struct kvm_ringbuf {
127 	kvm_ringbuf_entry_t kvmr_buf[KVM_RINGBUF_NENTRIES]; /* ring buffer */
128 	kvm_ringbuf_entry_t kvmr_taglast[KVM_RINGBUF_TAG_MAX + 1];
129 	uint32_t kvmr_tagcount[KVM_RINGBUF_TAG_MAX + 1]; /* count of tags */
130 	uint32_t kvmr_ent;			/* current entry */
131 } kvm_ringbuf_t;
132 
133 extern void kvm_ringbuf_record(kvm_ringbuf_t *, uint32_t, uint64_t);
134 
135 typedef struct kvm_vcpu {
136 	struct kvm *kvm;
137 	int vcpu_id;
138 	kmutex_t mutex;
139 	int   cpu;
140 	struct kvm_run *run;
141 	unsigned long requests;
142 	unsigned long guest_debug;
143 	int srcu_idx;
144 
145 	int fpu_active;
146 	int guest_fpu_loaded;
147 
148 	kmutex_t kvcpu_kick_lock;
149 	kcondvar_t kvcpu_kick_cv;
150 	kvm_vcpu_stats_t kvcpu_stats;
151 	kstat_t *kvcpu_kstat;
152 	kvm_ringbuf_t kvcpu_ringbuf;
153 	int sigset_active;
154 	sigset_t sigset;
155 	int mmio_needed;
156 	int mmio_read_completed;
157 	int mmio_is_write;
158 	int mmio_size;
159 	unsigned char mmio_data[8];
160 	gpa_t mmio_phys_addr;
161 	struct kvm_vcpu_arch arch;
162 	ddi_umem_cookie_t cookie;
163 	struct kvm_user_return_notifier *urn;
164 } kvm_vcpu_t;
165 
166 typedef struct kvm_memory_slot {
167 	gfn_t base_gfn;
168 	unsigned long npages;
169 	unsigned long flags;
170 	unsigned long *rmap;
171 	unsigned long *dirty_bitmap;
172 	size_t dirty_bitmap_sz;
173 	struct {
174 		unsigned long rmap_pde;
175 		int write_count;
176 	} *lpage_info[KVM_NR_PAGE_SIZES];
177 	size_t lpage_info_sz[KVM_NR_PAGE_SIZES];
178 	unsigned long userspace_addr;
179 	int user_alloc;
180 } kvm_memory_slot_t;
181 
182 unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot);
183 
184 typedef struct kvm_kernel_irq_routing_entry {
185 	uint32_t gsi;
186 	uint32_t type;
187 	int (*set)(struct kvm_kernel_irq_routing_entry *e,
188 	    struct kvm *kvm, int irq_source_id, int level);
189 	union {
190 		struct {
191 			unsigned irqchip;
192 			unsigned pin;
193 		} irqchip;
194 		struct msi_msg msi;
195 	};
196 	struct list_node link;
197 } kvm_kernel_irq_routing_entry_t;
198 
199 typedef struct kvm_irq_routing_table {
200 	int chip[KVM_NR_IRQCHIPS][KVM_IOAPIC_NUM_PINS];
201 	struct kvm_kernel_irq_routing_entry *rt_entries;
202 	uint32_t nr_rt_entries;
203 	/*
204 	 * Array indexed by gsi. Each entry contains list of irq chips
205 	 * the gsi is connected to.
206 	 */
207 	list_t map[KVM_MAX_IRQ_ROUTES+1];
208 } kvm_irq_routing_table_t;
209 
210 typedef struct kvm_memslots {
211 	int nmemslots;
212 	struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
213 					KVM_PRIVATE_MEM_SLOTS];
214 } kvm_memslots_t;
215 
216 typedef struct kvm {
217 	kmutex_t mmu_lock;
218 	kmutex_t requests_lock;
219 	kmutex_t slots_lock;
220 	struct kvm_memslots *memslots;
221 	kmutex_t memslots_lock; /* linux uses rcu for this */
222 	/* the following was a read-copy update mechanism */
223 	/* we'll use a reader-writer lock, for now */
224 	krwlock_t kvm_rwlock;
225 	uint32_t bsp_vcpu_id;
226 	struct kvm_vcpu *bsp_vcpu;
227 	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
228 	volatile int online_vcpus;
229 	struct list_node vm_list;
230 	kmutex_t lock;
231 	struct kvm_io_bus *buses[KVM_NR_BUSES];
232 	kmutex_t buses_lock;
233 	struct kstat *kvm_kstat;
234 	kvm_stats_t kvm_stats;
235 	struct kvm_arch arch;
236 	volatile int users_count;
237 	struct kvm_coalesced_mmio_dev *coalesced_mmio_dev;
238 	struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
239 	ddi_umem_cookie_t mmio_cookie;
240 
241 	kmutex_t irq_lock;
242 	struct kvm_irq_routing_table *irq_routing;
243 	int irq_routing_sz;
244 	list_t mask_notifier_list;
245 	list_t irq_ack_notifier_list;
246 
247 	int kvmid;  /* unique identifier for this kvm */
248 	int kvm_clones;
249 	pid_t kvm_pid;			/* pid associated with this kvm */
250 	kmutex_t kvm_avllock;
251 	avl_tree_t kvm_avlmp;		/* avl tree for mmu to page_t mapping */
252 } kvm_t;
253 
254 
255 extern struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i);
256 
257 #define	kvm_for_each_vcpu(idx, vcpup, kvm) \
258 	for (idx = 0, vcpup = kvm_get_vcpu(kvm, idx); \
259 	    idx < kvm->online_vcpus && vcpup; /* XXX - need protection */ \
260 	    vcpup = kvm_get_vcpu(kvm, ++idx))
261 
262 extern int kvm_vcpu_init(struct kvm_vcpu *, struct kvm *, unsigned);
263 extern void kvm_vcpu_uninit(struct kvm_vcpu *);
264 
265 extern void vcpu_load(struct kvm_vcpu *);
266 extern void vcpu_put(struct kvm_vcpu *);
267 
268 extern int kvm_init(void *);
269 extern void kvm_exit(void);
270 
271 extern void kvm_get_kvm(struct kvm *);
272 extern void kvm_put_kvm(struct kvm *);
273 
274 #define	HPA_MSB ((sizeof (hpa_t) * 8) - 1)
275 #define	HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
is_error_hpa(hpa_t hpa)276 static int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
277 
278 extern page_t *bad_page;
279 extern void *bad_page_kma;
280 extern pfn_t bad_pfn;
281 
282 extern int is_error_page(struct page *);
283 extern int is_error_pfn(pfn_t);
284 extern int kvm_is_error_hva(unsigned long);
285 
286 extern int kvm_set_memory_region(struct kvm *,
287     struct kvm_userspace_memory_region *, int);
288 extern int __kvm_set_memory_region(struct kvm *,
289     struct kvm_userspace_memory_region *, int);
290 extern int kvm_arch_prepare_memory_region(struct kvm *,
291     struct kvm_memory_slot *, struct kvm_memory_slot,
292     struct kvm_userspace_memory_region *, int);
293 extern void kvm_arch_commit_memory_region(struct kvm *,
294     struct kvm_userspace_memory_region *,
295     struct kvm_memory_slot, int);
296 
297 extern void kvm_disable_largepages(void);
298 extern void kvm_arch_flush_shadow(struct kvm *);
299 extern gfn_t unalias_gfn(struct kvm *, gfn_t);
300 extern gfn_t unalias_gfn_instantiation(struct kvm *, gfn_t);
301 
302 extern page_t *gfn_to_page(struct kvm *, gfn_t);
303 extern unsigned long gfn_to_hva(struct kvm *, gfn_t);
304 extern void kvm_release_page_clean(struct page *);
305 extern void kvm_release_page_dirty(struct page *);
306 extern void kvm_set_page_dirty(struct page *);
307 extern void kvm_set_page_accessed(struct page *);
308 
309 extern pfn_t gfn_to_pfn(struct kvm *, gfn_t);
310 extern pfn_t gfn_to_pfn_memslot(struct kvm *,
311     struct kvm_memory_slot *, gfn_t);
312 extern int memslot_id(struct kvm *, gfn_t);
313 extern void kvm_release_pfn_dirty(pfn_t);
314 extern void kvm_release_pfn_clean(pfn_t);
315 extern void kvm_set_pfn_dirty(pfn_t);
316 extern void kvm_set_pfn_accessed(struct kvm *, pfn_t);
317 extern void kvm_get_pfn(struct kvm_vcpu *, pfn_t);
318 
319 extern int kvm_read_guest_page(struct kvm *, gfn_t, void *, int, int);
320 extern int kvm_read_guest_atomic(struct kvm *, gpa_t, void *, unsigned long);
321 extern int kvm_read_guest(struct kvm *, gpa_t, void *, unsigned long);
322 extern int kvm_read_guest_virt_helper(gva_t, void *, unsigned int,
323     struct kvm_vcpu *, uint32_t, uint32_t *);
324 extern int kvm_write_guest_page(struct kvm *, gfn_t, const void *, int, int);
325 extern int kvm_write_guest(struct kvm *, gpa_t, const void *, unsigned long);
326 extern int kvm_clear_guest_page(struct kvm *, gfn_t, int, int);
327 extern int kvm_clear_guest(struct kvm *, gpa_t, unsigned long);
328 extern struct kvm_memory_slot *gfn_to_memslot(struct kvm *, gfn_t);
329 extern int kvm_is_visible_gfn(struct kvm *, gfn_t);
330 extern unsigned long kvm_host_page_size(struct kvm *, gfn_t);
331 extern void mark_page_dirty(struct kvm *, gfn_t);
332 
333 extern void kvm_vcpu_block(struct kvm_vcpu *);
334 extern void kvm_vcpu_on_spin(struct kvm_vcpu *);
335 extern void kvm_resched(struct kvm_vcpu *);
336 extern void kvm_load_guest_fpu(struct kvm_vcpu *);
337 extern void kvm_put_guest_fpu(struct kvm_vcpu *);
338 extern void kvm_flush_remote_tlbs(struct kvm *);
339 extern void kvm_reload_remote_mmus(struct kvm *);
340 
341 extern long kvm_arch_dev_ioctl(struct file *, unsigned int, unsigned long);
342 extern long kvm_arch_vcpu_ioctl(struct file *, unsigned int, unsigned long);
343 extern int kvm_dev_ioctl_check_extension(long, int *);
344 extern int kvm_get_dirty_log(struct kvm *, struct kvm_dirty_log *, int *);
345 extern int kvm_vm_ioctl_get_dirty_log(struct kvm *, struct kvm_dirty_log *);
346 
347 extern int kvm_vm_ioctl_get_msr_index_list(struct kvm *, uintptr_t);
348 extern int kvm_vm_ioctl_set_memory_region(struct kvm *,
349     struct kvm_userspace_memory_region *, int);
350 extern int kvm_vm_ioctl_set_tss_addr(struct kvm *, uintptr_t);
351 extern int kvm_vm_ioctl_get_irqchip(struct kvm *, struct kvm_irqchip *);
352 extern int kvm_vm_ioctl_set_irqchip(struct kvm *, struct kvm_irqchip *);
353 
354 extern int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *, struct kvm_fpu *);
355 extern int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *, struct kvm_fpu *);
356 
357 extern int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *, struct kvm_regs *);
358 extern int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *, struct kvm_regs *);
359 extern int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *, struct kvm_sregs *);
360 extern int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *, struct kvm_sregs *);
361 extern int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *,
362     struct kvm_mp_state *);
363 extern int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *,
364     struct kvm_mp_state *);
365 extern int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *,
366     struct kvm_guest_debug *);
367 extern int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *);
368 
369 extern int kvm_vcpu_ioctl_get_msrs(struct kvm_vcpu *, struct kvm_msrs *, int *);
370 extern int kvm_vcpu_ioctl_set_msrs(struct kvm_vcpu *, struct kvm_msrs *, int *);
371 extern int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *, uint64_t *);
372 extern int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *, struct kvm_cpuid2 *);
373 extern int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *, struct kvm_cpuid2 *,
374     int *, intptr_t);
375 extern int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *,
376     struct kvm_lapic_state *);
377 extern int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *,
378     struct kvm_lapic_state *);
379 extern int kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *,
380     struct kvm_vcpu_events *);
381 extern int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *,
382     struct kvm_vcpu_events *);
383 extern int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *, struct kvm_interrupt *);
384 extern int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *);
385 extern int kvm_vm_ioctl_get_pit2(struct kvm *, struct kvm_pit_state2 *);
386 extern int kvm_vm_ioctl_set_pit2(struct kvm *, struct kvm_pit_state2 *);
387 extern int kvm_vm_ioctl_set_identity_map_addr(struct kvm *, uint64_t);
388 extern int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *,
389     struct kvm_cpuid_entry2 *);
390 
391 extern int kvm_arch_init(void *);
392 extern void kvm_arch_exit(void);
393 
394 extern int kvm_arch_vcpu_init(struct kvm_vcpu *);
395 extern void kvm_arch_vcpu_uninit(struct kvm_vcpu *);
396 
397 extern void kvm_arch_vcpu_free(struct kvm_vcpu *);
398 extern void kvm_arch_vcpu_load(struct kvm_vcpu *, int);
399 extern void kvm_arch_vcpu_put(struct kvm_vcpu *);
400 extern struct kvm_vcpu * kvm_arch_vcpu_create(struct kvm *, unsigned int);
401 extern int kvm_arch_vcpu_setup(struct kvm_vcpu *);
402 extern void kvm_arch_vcpu_destroy(struct kvm_vcpu *);
403 
404 extern int kvm_arch_vcpu_reset(struct kvm_vcpu *);
405 extern int kvm_arch_hardware_setup(void);
406 extern void kvm_arch_hardware_unsetup(void);
407 extern void kvm_arch_check_processor_compat(void *);
408 extern int kvm_arch_vcpu_runnable(struct kvm_vcpu *);
409 
410 extern void kvm_free_physmem(struct kvm *);
411 
412 extern struct  kvm *kvm_arch_create_vm(void);
413 extern void kvm_arch_destroy_vm(struct kvm *);
414 extern void kvm_arch_destroy_vm_comps(struct kvm *);
415 extern void kvm_free_all_assigned_devices(struct kvm *);
416 extern void kvm_arch_sync_events(struct kvm *);
417 
418 extern int kvm_cpu_has_pending_timer(struct kvm_vcpu *);
419 extern void kvm_vcpu_kick(struct kvm_vcpu *);
420 
421 extern int kvm_is_mmio_pfn(pfn_t);
422 
423 typedef struct kvm_irq_ack_notifier {
424 	list_t link;
425 	unsigned gsi;
426 	void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
427 } kvm_irq_ack_notifier_t;
428 
429 #define	KVM_ASSIGNED_MSIX_PENDING		0x1
430 typedef struct kvm_guest_msix_entry {
431 	uint32_t vector;
432 	unsigned short entry;
433 	unsigned short flags;
434 } kvm_guest_msix_entry_t;
435 
436 typedef struct kvm_assigned_dev_kernel {
437 	struct kvm_irq_ack_notifier ack_notifier;
438 	list_t interrupt_work;
439 	list_t list;
440 	int assigned_dev_id;
441 	int host_segnr;
442 	int host_busnr;
443 	int host_devfn;
444 	unsigned int entries_nr;
445 	int host_irq;
446 	unsigned char host_irq_disabled;
447 	struct msix_entry *host_msix_entries;
448 	int guest_irq;
449 	struct kvm_guest_msix_entry *guest_msix_entries;
450 	unsigned long irq_requested_type;
451 	int irq_source_id;
452 	int flags;
453 	struct pci_dev *dev;
454 	struct kvm *kvm;
455 	kmutex_t assigned_dev_lock;
456 } kvm_assigned_dev_kernel_t;
457 
458 typedef struct kvm_irq_mask_notifier {
459 	void (*func)(struct kvm_irq_mask_notifier *kimn, int masked);
460 	int irq;
461 	struct list_node link;
462 } kvm_irq_mask_notifier_t;
463 
464 extern void kvm_register_irq_mask_notifier(struct kvm *, int,
465     struct kvm_irq_mask_notifier *);
466 extern void kvm_unregister_irq_mask_notifier(struct kvm *, int,
467     struct kvm_irq_mask_notifier *);
468 extern void kvm_fire_mask_notifiers(struct kvm *, int, int);
469 
470 extern int kvm_set_irq(struct kvm *, int, uint32_t, int);
471 extern void kvm_notify_acked_irq(struct kvm *, unsigned, unsigned);
472 extern void kvm_register_irq_ack_notifier(struct kvm *,
473     struct kvm_irq_ack_notifier *);
474 extern void kvm_unregister_irq_ack_notifier(struct kvm *,
475     struct kvm_irq_ack_notifier *);
476 extern int kvm_request_irq_source_id(struct kvm *);
477 extern void kvm_free_irq_source_id(struct kvm *, int);
478 
479 /* For vcpu->arch.iommu_flags */
480 #define	KVM_IOMMU_CACHE_COHERENCY	0x1
481 
482 extern void kvm_guest_enter(struct kvm_vcpu *);
483 extern void kvm_guest_exit(struct kvm_vcpu *);
484 
485 #ifndef KVM_ARCH_HAS_UNALIAS_INSTANTIATION
486 #define	unalias_gfn_instantiation unalias_gfn
487 #endif
488 
489 extern int kvm_setup_default_irq_routing(struct kvm *);
490 extern int kvm_set_irq_routing(struct kvm *,
491     const struct kvm_irq_routing_entry *,
492     unsigned, unsigned);
493 extern void kvm_free_irq_routing(struct kvm *);
494 
495 extern int kvm_vcpu_is_bsp(struct kvm_vcpu *);
496 
497 extern void kvm_sigprocmask(int how, sigset_t *, sigset_t *);
498 
499 #define	offset_in_page(p)	((unsigned long)(p) & ~PAGEMASK)
500 
501 #define	page_to_pfn(page) (page->p_pagenum)
502 
503 /* LDT or TSS descriptor in the GDT. 16 bytes. */
504 struct ldttss_desc64 {
505 	unsigned short limit0;
506 	unsigned short base0;
507 	unsigned base1 : 8, type : 5, dpl : 2, p : 1;
508 	unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
509 	uint32_t base3;
510 	uint32_t zero1;
511 } __attribute__((packed));
512 
513 typedef struct ldttss_desc64 ldttss_desc64_t;
514 
515 #endif /* __KVM_HOST_H */
516