xref: /illumos-kvm-cmd/qemu-timer.h (revision 4f48eea6)
1 #ifndef QEMU_TIMER_H
2 #define QEMU_TIMER_H
3 
4 #include "qemu-common.h"
5 #include <time.h>
6 #include <sys/time.h>
7 
8 #ifdef _WIN32
9 #include <windows.h>
10 #include <mmsystem.h>
11 #endif
12 
13 /* timers */
14 
15 typedef struct QEMUClock QEMUClock;
16 typedef void QEMUTimerCB(void *opaque);
17 
18 /* The real time clock should be used only for stuff which does not
19    change the virtual machine state, as it is run even if the virtual
20    machine is stopped. The real time clock has a frequency of 1000
21    Hz. */
22 extern QEMUClock *rt_clock;
23 
24 /* The virtual clock is only run during the emulation. It is stopped
25    when the virtual machine is stopped. Virtual timers use a high
26    precision clock, usually cpu cycles (use ticks_per_sec). */
27 extern QEMUClock *vm_clock;
28 
29 /* The host clock should be use for device models that emulate accurate
30    real time sources. It will continue to run when the virtual machine
31    is suspended, and it will reflect system time changes the host may
32    undergo (e.g. due to NTP). The host clock has the same precision as
33    the virtual clock. */
34 extern QEMUClock *host_clock;
35 
36 int64_t qemu_get_clock(QEMUClock *clock);
37 int64_t qemu_get_clock_ns(QEMUClock *clock);
38 void qemu_clock_enable(QEMUClock *clock, int enabled);
39 
40 QEMUTimer *qemu_new_timer(QEMUClock *clock, QEMUTimerCB *cb, void *opaque);
41 void qemu_free_timer(QEMUTimer *ts);
42 void qemu_del_timer(QEMUTimer *ts);
43 void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time);
44 int qemu_timer_pending(QEMUTimer *ts);
45 int qemu_timer_expired(QEMUTimer *timer_head, int64_t current_time);
46 
47 void qemu_run_all_timers(void);
48 int qemu_alarm_pending(void);
49 int64_t qemu_next_deadline(void);
50 void configure_alarms(char const *opt);
51 void configure_icount(const char *option);
52 int qemu_calculate_timeout(void);
53 void init_clocks(void);
54 int init_timer_alarm(void);
55 void quit_timers(void);
56 
get_ticks_per_sec(void)57 static inline int64_t get_ticks_per_sec(void)
58 {
59     return 1000000000LL;
60 }
61 
62 /* real time host monotonic timer */
get_clock_realtime(void)63 static inline int64_t get_clock_realtime(void)
64 {
65     struct timeval tv;
66 
67     gettimeofday(&tv, NULL);
68     return tv.tv_sec * 1000000000LL + (tv.tv_usec * 1000);
69 }
70 
71 /* Warning: don't insert tracepoints into these functions, they are
72    also used by simpletrace backend and tracepoints would cause
73    an infinite recursion! */
74 #ifdef _WIN32
75 extern int64_t clock_freq;
76 
get_clock(void)77 static inline int64_t get_clock(void)
78 {
79     LARGE_INTEGER ti;
80     QueryPerformanceCounter(&ti);
81     return muldiv64(ti.QuadPart, get_ticks_per_sec(), clock_freq);
82 }
83 
84 #else
85 
86 extern int use_rt_clock;
87 
get_clock(void)88 static inline int64_t get_clock(void)
89 {
90 #if defined(__sun__)
91     return gethrtime();
92 #else
93 #if defined(__linux__) || (defined(__FreeBSD__) && __FreeBSD_version >= 500000) \
94     || defined(__DragonFly__) || defined(__FreeBSD_kernel__)
95     if (use_rt_clock) {
96         struct timespec ts;
97         clock_gettime(CLOCK_MONOTONIC, &ts);
98         return ts.tv_sec * 1000000000LL + ts.tv_nsec;
99     } else
100 #endif
101     {
102         /* XXX: using gettimeofday leads to problems if the date
103            changes, so it should be avoided. */
104         return get_clock_realtime();
105     }
106 #endif
107 }
108 #endif
109 
110 void qemu_get_timer(QEMUFile *f, QEMUTimer *ts);
111 void qemu_put_timer(QEMUFile *f, QEMUTimer *ts);
112 
113 /* ptimer.c */
114 typedef struct ptimer_state ptimer_state;
115 typedef void (*ptimer_cb)(void *opaque);
116 
117 ptimer_state *ptimer_init(QEMUBH *bh);
118 void ptimer_set_period(ptimer_state *s, int64_t period);
119 void ptimer_set_freq(ptimer_state *s, uint32_t freq);
120 void ptimer_set_limit(ptimer_state *s, uint64_t limit, int reload);
121 uint64_t ptimer_get_count(ptimer_state *s);
122 void ptimer_set_count(ptimer_state *s, uint64_t count);
123 void ptimer_run(ptimer_state *s, int oneshot);
124 void ptimer_stop(ptimer_state *s);
125 void qemu_put_ptimer(QEMUFile *f, ptimer_state *s);
126 void qemu_get_ptimer(QEMUFile *f, ptimer_state *s);
127 
128 /* icount */
129 int64_t qemu_icount_round(int64_t count);
130 extern int64_t qemu_icount;
131 extern int use_icount;
132 extern int icount_time_shift;
133 extern int64_t qemu_icount_bias;
134 int64_t cpu_get_icount(void);
135 
136 /*******************************************/
137 /* host CPU ticks (if available) */
138 
139 #if defined(_ARCH_PPC)
140 
cpu_get_real_ticks(void)141 static inline int64_t cpu_get_real_ticks(void)
142 {
143     int64_t retval;
144 #ifdef _ARCH_PPC64
145     /* This reads timebase in one 64bit go and includes Cell workaround from:
146        http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html
147     */
148     __asm__ __volatile__ ("mftb    %0\n\t"
149                           "cmpwi   %0,0\n\t"
150                           "beq-    $-8"
151                           : "=r" (retval));
152 #else
153     /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */
154     unsigned long junk;
155     __asm__ __volatile__ ("mfspr   %1,269\n\t"  /* mftbu */
156                           "mfspr   %L0,268\n\t" /* mftb */
157                           "mfspr   %0,269\n\t"  /* mftbu */
158                           "cmpw    %0,%1\n\t"
159                           "bne     $-16"
160                           : "=r" (retval), "=r" (junk));
161 #endif
162     return retval;
163 }
164 
165 #elif defined(__i386__)
166 
cpu_get_real_ticks(void)167 static inline int64_t cpu_get_real_ticks(void)
168 {
169     int64_t val;
170     asm volatile ("rdtsc" : "=A" (val));
171     return val;
172 }
173 
174 #elif defined(__x86_64__)
175 
cpu_get_real_ticks(void)176 static inline int64_t cpu_get_real_ticks(void)
177 {
178     uint32_t low,high;
179     int64_t val;
180     asm volatile("rdtsc" : "=a" (low), "=d" (high));
181     val = high;
182     val <<= 32;
183     val |= low;
184     return val;
185 }
186 
187 #elif defined(__hppa__)
188 
cpu_get_real_ticks(void)189 static inline int64_t cpu_get_real_ticks(void)
190 {
191     int val;
192     asm volatile ("mfctl %%cr16, %0" : "=r"(val));
193     return val;
194 }
195 
196 #elif defined(__ia64)
197 
cpu_get_real_ticks(void)198 static inline int64_t cpu_get_real_ticks(void)
199 {
200     int64_t val;
201     asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
202     return val;
203 }
204 
205 #elif defined(__s390__)
206 
cpu_get_real_ticks(void)207 static inline int64_t cpu_get_real_ticks(void)
208 {
209     int64_t val;
210     asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
211     return val;
212 }
213 
214 #elif defined(__sparc_v8plus__) || defined(__sparc_v8plusa__) || defined(__sparc_v9__)
215 
cpu_get_real_ticks(void)216 static inline int64_t cpu_get_real_ticks (void)
217 {
218 #if defined(_LP64)
219     uint64_t        rval;
220     asm volatile("rd %%tick,%0" : "=r"(rval));
221     return rval;
222 #else
223     union {
224         uint64_t i64;
225         struct {
226             uint32_t high;
227             uint32_t low;
228         }       i32;
229     } rval;
230     asm volatile("rd %%tick,%1; srlx %1,32,%0"
231                  : "=r"(rval.i32.high), "=r"(rval.i32.low));
232     return rval.i64;
233 #endif
234 }
235 
236 #elif defined(__mips__) && \
237     ((defined(__mips_isa_rev) && __mips_isa_rev >= 2) || defined(__linux__))
238 /*
239  * binutils wants to use rdhwr only on mips32r2
240  * but as linux kernel emulate it, it's fine
241  * to use it.
242  *
243  */
244 #define MIPS_RDHWR(rd, value) {                         \
245         __asm__ __volatile__ (".set   push\n\t"         \
246                               ".set mips32r2\n\t"       \
247                               "rdhwr  %0, "rd"\n\t"     \
248                               ".set   pop"              \
249                               : "=r" (value));          \
250     }
251 
cpu_get_real_ticks(void)252 static inline int64_t cpu_get_real_ticks(void)
253 {
254     /* On kernels >= 2.6.25 rdhwr <reg>, $2 and $3 are emulated */
255     uint32_t count;
256     static uint32_t cyc_per_count = 0;
257 
258     if (!cyc_per_count) {
259         MIPS_RDHWR("$3", cyc_per_count);
260     }
261 
262     MIPS_RDHWR("$2", count);
263     return (int64_t)(count * cyc_per_count);
264 }
265 
266 #elif defined(__alpha__)
267 
cpu_get_real_ticks(void)268 static inline int64_t cpu_get_real_ticks(void)
269 {
270     uint64_t cc;
271     uint32_t cur, ofs;
272 
273     asm volatile("rpcc %0" : "=r"(cc));
274     cur = cc;
275     ofs = cc >> 32;
276     return cur - ofs;
277 }
278 
279 #else
280 /* The host CPU doesn't have an easily accessible cycle counter.
281    Just return a monotonically increasing value.  This will be
282    totally wrong, but hopefully better than nothing.  */
cpu_get_real_ticks(void)283 static inline int64_t cpu_get_real_ticks (void)
284 {
285     static int64_t ticks = 0;
286     return ticks++;
287 }
288 #endif
289 
290 #ifdef NEED_CPU_H
291 /* Deterministic execution requires that IO only be performed on the last
292    instruction of a TB so that interrupts take effect immediately.  */
can_do_io(CPUState * env)293 static inline int can_do_io(CPUState *env)
294 {
295     if (!use_icount)
296         return 1;
297 
298     /* If not executing code then assume we are ok.  */
299     if (!env->current_tb)
300         return 1;
301 
302     return env->can_do_io != 0;
303 }
304 #endif
305 
306 #ifdef CONFIG_PROFILER
profile_getclock(void)307 static inline int64_t profile_getclock(void)
308 {
309     return cpu_get_real_ticks();
310 }
311 
312 extern int64_t qemu_time, qemu_time_start;
313 extern int64_t tlb_flush_time;
314 extern int64_t dev_time;
315 #endif
316 
317 #endif
318