xref: /illumos-kvm-cmd/cpu-exec.c (revision 68396ea9)
1 /*
2  *  i386 emulator main execution loop
3  *
4  *  Copyright (c) 2003-2005 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "config.h"
20 #include "exec.h"
21 #include "disas.h"
22 #if !defined(TARGET_IA64)
23 #include "tcg.h"
24 #endif
25 #include "kvm.h"
26 #include "qemu-barrier.h"
27 
28 #if !defined(CONFIG_SOFTMMU)
29 #undef EAX
30 #undef ECX
31 #undef EDX
32 #undef EBX
33 #undef ESP
34 #undef EBP
35 #undef ESI
36 #undef EDI
37 #undef EIP
38 #include <signal.h>
39 #ifdef __linux__
40 #include <sys/ucontext.h>
41 #endif
42 #endif
43 
44 #include "qemu-kvm.h"
45 
46 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
47 // Work around ugly bugs in glibc that mangle global register contents
48 #undef env
49 #define env cpu_single_env
50 #endif
51 
52 int tb_invalidated_flag;
53 
54 //#define CONFIG_DEBUG_EXEC
55 //#define DEBUG_SIGNAL
56 
qemu_cpu_has_work(CPUState * env)57 int qemu_cpu_has_work(CPUState *env)
58 {
59     return cpu_has_work(env);
60 }
61 
cpu_loop_exit(void)62 void cpu_loop_exit(void)
63 {
64     env->current_tb = NULL;
65     longjmp(env->jmp_env, 1);
66 }
67 
68 /* exit the current TB from a signal handler. The host registers are
69    restored in a state compatible with the CPU emulator
70  */
cpu_resume_from_signal(CPUState * env1,void * puc)71 void cpu_resume_from_signal(CPUState *env1, void *puc)
72 {
73 #if !defined(CONFIG_SOFTMMU)
74 #ifdef __linux__
75     struct ucontext *uc = puc;
76 #elif defined(__OpenBSD__)
77     struct sigcontext *uc = puc;
78 #endif
79 #endif
80 
81     env = env1;
82 
83     /* XXX: restore cpu registers saved in host registers */
84 
85 #if !defined(CONFIG_SOFTMMU)
86     if (puc) {
87         /* XXX: use siglongjmp ? */
88 #ifdef __linux__
89 #ifdef __ia64
90         sigprocmask(SIG_SETMASK, (sigset_t *)&uc->uc_sigmask, NULL);
91 #else
92         sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
93 #endif
94 #elif defined(__OpenBSD__)
95         sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
96 #endif
97     }
98 #endif
99     env->exception_index = -1;
100     longjmp(env->jmp_env, 1);
101 }
102 
103 /* Execute the code without caching the generated code. An interpreter
104    could be used if available. */
cpu_exec_nocache(int max_cycles,TranslationBlock * orig_tb)105 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
106 {
107     unsigned long next_tb;
108     TranslationBlock *tb;
109 
110     /* Should never happen.
111        We only end up here when an existing TB is too long.  */
112     if (max_cycles > CF_COUNT_MASK)
113         max_cycles = CF_COUNT_MASK;
114 
115     tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
116                      max_cycles);
117     env->current_tb = tb;
118     /* execute the generated code */
119     next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
120     env->current_tb = NULL;
121 
122     if ((next_tb & 3) == 2) {
123         /* Restore PC.  This may happen if async event occurs before
124            the TB starts executing.  */
125         cpu_pc_from_tb(env, tb);
126     }
127     tb_phys_invalidate(tb, -1);
128     tb_free(tb);
129 }
130 
tb_find_slow(target_ulong pc,target_ulong cs_base,uint64_t flags)131 static TranslationBlock *tb_find_slow(target_ulong pc,
132                                       target_ulong cs_base,
133                                       uint64_t flags)
134 {
135     TranslationBlock *tb, **ptb1;
136     unsigned int h;
137     tb_page_addr_t phys_pc, phys_page1, phys_page2;
138     target_ulong virt_page2;
139 
140     tb_invalidated_flag = 0;
141 
142     /* find translated block using physical mappings */
143     phys_pc = get_page_addr_code(env, pc);
144     phys_page1 = phys_pc & TARGET_PAGE_MASK;
145     phys_page2 = -1;
146     h = tb_phys_hash_func(phys_pc);
147     ptb1 = &tb_phys_hash[h];
148     for(;;) {
149         tb = *ptb1;
150         if (!tb)
151             goto not_found;
152         if (tb->pc == pc &&
153             tb->page_addr[0] == phys_page1 &&
154             tb->cs_base == cs_base &&
155             tb->flags == flags) {
156             /* check next page if needed */
157             if (tb->page_addr[1] != -1) {
158                 virt_page2 = (pc & TARGET_PAGE_MASK) +
159                     TARGET_PAGE_SIZE;
160                 phys_page2 = get_page_addr_code(env, virt_page2);
161                 if (tb->page_addr[1] == phys_page2)
162                     goto found;
163             } else {
164                 goto found;
165             }
166         }
167         ptb1 = &tb->phys_hash_next;
168     }
169  not_found:
170    /* if no translated code available, then translate it now */
171     tb = tb_gen_code(env, pc, cs_base, flags, 0);
172 
173  found:
174     /* Move the last found TB to the head of the list */
175     if (likely(*ptb1)) {
176         *ptb1 = tb->phys_hash_next;
177         tb->phys_hash_next = tb_phys_hash[h];
178         tb_phys_hash[h] = tb;
179     }
180     /* we add the TB in the virtual pc hash table */
181     env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
182     return tb;
183 }
184 
tb_find_fast(void)185 static inline TranslationBlock *tb_find_fast(void)
186 {
187     TranslationBlock *tb;
188     target_ulong cs_base, pc;
189     int flags;
190 
191     /* we record a subset of the CPU state. It will
192        always be the same before a given translated block
193        is executed. */
194     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
195     tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
196     if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
197                  tb->flags != flags)) {
198         tb = tb_find_slow(pc, cs_base, flags);
199     }
200     return tb;
201 }
202 
203 static CPUDebugExcpHandler *debug_excp_handler;
204 
cpu_set_debug_excp_handler(CPUDebugExcpHandler * handler)205 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
206 {
207     CPUDebugExcpHandler *old_handler = debug_excp_handler;
208 
209     debug_excp_handler = handler;
210     return old_handler;
211 }
212 
cpu_handle_debug_exception(CPUState * env)213 static void cpu_handle_debug_exception(CPUState *env)
214 {
215     CPUWatchpoint *wp;
216 
217     if (!env->watchpoint_hit)
218         QTAILQ_FOREACH(wp, &env->watchpoints, entry)
219             wp->flags &= ~BP_WATCHPOINT_HIT;
220 
221     if (debug_excp_handler)
222         debug_excp_handler(env);
223 }
224 
225 /* main execution loop */
226 
227 volatile sig_atomic_t exit_request;
228 
cpu_exec(CPUState * env1)229 int cpu_exec(CPUState *env1)
230 {
231     volatile host_reg_t saved_env_reg;
232     int ret, interrupt_request;
233     TranslationBlock *tb;
234     uint8_t *tc_ptr;
235     unsigned long next_tb;
236 
237     if (cpu_halted(env1) == EXCP_HALTED)
238         return EXCP_HALTED;
239 
240     cpu_single_env = env1;
241 
242     /* the access to env below is actually saving the global register's
243        value, so that files not including target-xyz/exec.h are free to
244        use it.  */
245     QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
246     saved_env_reg = (host_reg_t) env;
247     barrier();
248     env = env1;
249 
250     if (unlikely(exit_request)) {
251         env->exit_request = 1;
252     }
253 
254 #if defined(TARGET_I386)
255     if (!kvm_enabled()) {
256         /* put eflags in CPU temporary format */
257         CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
258         DF = 1 - (2 * ((env->eflags >> 10) & 1));
259         CC_OP = CC_OP_EFLAGS;
260         env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
261     }
262 #elif defined(TARGET_SPARC)
263 #elif defined(TARGET_M68K)
264     env->cc_op = CC_OP_FLAGS;
265     env->cc_dest = env->sr & 0xf;
266     env->cc_x = (env->sr >> 4) & 1;
267 #elif defined(TARGET_ALPHA)
268 #elif defined(TARGET_ARM)
269 #elif defined(TARGET_PPC)
270 #elif defined(TARGET_MICROBLAZE)
271 #elif defined(TARGET_MIPS)
272 #elif defined(TARGET_SH4)
273 #elif defined(TARGET_CRIS)
274 #elif defined(TARGET_S390X)
275 #elif defined(TARGET_IA64)
276     /* XXXXX */
277 #else
278 #error unsupported target CPU
279 #endif
280     env->exception_index = -1;
281 
282     /* prepare setjmp context for exception handling */
283     for(;;) {
284         if (setjmp(env->jmp_env) == 0) {
285 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
286 #undef env
287                     env = cpu_single_env;
288 #define env cpu_single_env
289 #endif
290             /* if an exception is pending, we execute it here */
291             if (env->exception_index >= 0) {
292                 if (env->exception_index >= EXCP_INTERRUPT) {
293                     /* exit request from the cpu execution loop */
294                     ret = env->exception_index;
295                     if (ret == EXCP_DEBUG)
296                         cpu_handle_debug_exception(env);
297                     break;
298                 } else {
299 #if defined(CONFIG_USER_ONLY)
300                     /* if user mode only, we simulate a fake exception
301                        which will be handled outside the cpu execution
302                        loop */
303 #if defined(TARGET_I386)
304                     do_interrupt_user(env->exception_index,
305                                       env->exception_is_int,
306                                       env->error_code,
307                                       env->exception_next_eip);
308                     /* successfully delivered */
309                     env->old_exception = -1;
310 #endif
311                     ret = env->exception_index;
312                     break;
313 #else
314 #if defined(TARGET_I386)
315                     /* simulate a real cpu exception. On i386, it can
316                        trigger new exceptions, but we do not handle
317                        double or triple faults yet. */
318                     do_interrupt(env->exception_index,
319                                  env->exception_is_int,
320                                  env->error_code,
321                                  env->exception_next_eip, 0);
322                     /* successfully delivered */
323                     env->old_exception = -1;
324 #elif defined(TARGET_PPC)
325                     do_interrupt(env);
326 #elif defined(TARGET_MICROBLAZE)
327                     do_interrupt(env);
328 #elif defined(TARGET_MIPS)
329                     do_interrupt(env);
330 #elif defined(TARGET_SPARC)
331                     do_interrupt(env);
332 #elif defined(TARGET_ARM)
333                     do_interrupt(env);
334 #elif defined(TARGET_SH4)
335 		    do_interrupt(env);
336 #elif defined(TARGET_ALPHA)
337                     do_interrupt(env);
338 #elif defined(TARGET_CRIS)
339                     do_interrupt(env);
340 #elif defined(TARGET_M68K)
341                     do_interrupt(0);
342 #elif defined(TARGET_IA64)
343 		    do_interrupt(env);
344 #endif
345                     env->exception_index = -1;
346 #endif
347                 }
348             }
349 
350             if (kvm_enabled()) {
351                 kvm_cpu_exec(env);
352                 longjmp(env->jmp_env, 1);
353             }
354 
355             next_tb = 0; /* force lookup of first TB */
356             for(;;) {
357                 interrupt_request = env->interrupt_request;
358                 if (unlikely(interrupt_request)) {
359                     if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
360                         /* Mask out external interrupts for this step. */
361                         interrupt_request &= ~(CPU_INTERRUPT_HARD |
362                                                CPU_INTERRUPT_FIQ |
363                                                CPU_INTERRUPT_SMI |
364                                                CPU_INTERRUPT_NMI);
365                     }
366                     if (interrupt_request & CPU_INTERRUPT_DEBUG) {
367                         env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
368                         env->exception_index = EXCP_DEBUG;
369                         cpu_loop_exit();
370                     }
371 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
372     defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
373     defined(TARGET_MICROBLAZE)
374                     if (interrupt_request & CPU_INTERRUPT_HALT) {
375                         env->interrupt_request &= ~CPU_INTERRUPT_HALT;
376                         env->halted = 1;
377                         env->exception_index = EXCP_HLT;
378                         cpu_loop_exit();
379                     }
380 #endif
381 #if defined(TARGET_I386)
382                     if (interrupt_request & CPU_INTERRUPT_INIT) {
383                             svm_check_intercept(SVM_EXIT_INIT);
384                             do_cpu_init(env);
385                             env->exception_index = EXCP_HALTED;
386                             cpu_loop_exit();
387                     } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
388                             do_cpu_sipi(env);
389                     } else if (env->hflags2 & HF2_GIF_MASK) {
390                         if ((interrupt_request & CPU_INTERRUPT_SMI) &&
391                             !(env->hflags & HF_SMM_MASK)) {
392                             svm_check_intercept(SVM_EXIT_SMI);
393                             env->interrupt_request &= ~CPU_INTERRUPT_SMI;
394                             do_smm_enter();
395                             next_tb = 0;
396                         } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
397                                    !(env->hflags2 & HF2_NMI_MASK)) {
398                             env->interrupt_request &= ~CPU_INTERRUPT_NMI;
399                             env->hflags2 |= HF2_NMI_MASK;
400                             do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
401                             next_tb = 0;
402 			} else if (interrupt_request & CPU_INTERRUPT_MCE) {
403                             env->interrupt_request &= ~CPU_INTERRUPT_MCE;
404                             do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
405                             next_tb = 0;
406                         } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
407                                    (((env->hflags2 & HF2_VINTR_MASK) &&
408                                      (env->hflags2 & HF2_HIF_MASK)) ||
409                                     (!(env->hflags2 & HF2_VINTR_MASK) &&
410                                      (env->eflags & IF_MASK &&
411                                       !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
412                             int intno;
413                             svm_check_intercept(SVM_EXIT_INTR);
414                             env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
415                             intno = cpu_get_pic_interrupt(env);
416                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
417 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
418 #undef env
419                     env = cpu_single_env;
420 #define env cpu_single_env
421 #endif
422                             do_interrupt(intno, 0, 0, 0, 1);
423                             /* ensure that no TB jump will be modified as
424                                the program flow was changed */
425                             next_tb = 0;
426 #if !defined(CONFIG_USER_ONLY)
427                         } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
428                                    (env->eflags & IF_MASK) &&
429                                    !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
430                             int intno;
431                             /* FIXME: this should respect TPR */
432                             svm_check_intercept(SVM_EXIT_VINTR);
433                             intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
434                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
435                             do_interrupt(intno, 0, 0, 0, 1);
436                             env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
437                             next_tb = 0;
438 #endif
439                         }
440                     }
441 #elif defined(TARGET_PPC)
442 #if 0
443                     if ((interrupt_request & CPU_INTERRUPT_RESET)) {
444                         cpu_reset(env);
445                     }
446 #endif
447                     if (interrupt_request & CPU_INTERRUPT_HARD) {
448                         ppc_hw_interrupt(env);
449                         if (env->pending_interrupts == 0)
450                             env->interrupt_request &= ~CPU_INTERRUPT_HARD;
451                         next_tb = 0;
452                     }
453 #elif defined(TARGET_MICROBLAZE)
454                     if ((interrupt_request & CPU_INTERRUPT_HARD)
455                         && (env->sregs[SR_MSR] & MSR_IE)
456                         && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
457                         && !(env->iflags & (D_FLAG | IMM_FLAG))) {
458                         env->exception_index = EXCP_IRQ;
459                         do_interrupt(env);
460                         next_tb = 0;
461                     }
462 #elif defined(TARGET_MIPS)
463                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
464                         cpu_mips_hw_interrupts_pending(env)) {
465                         /* Raise it */
466                         env->exception_index = EXCP_EXT_INTERRUPT;
467                         env->error_code = 0;
468                         do_interrupt(env);
469                         next_tb = 0;
470                     }
471 #elif defined(TARGET_SPARC)
472                     if (interrupt_request & CPU_INTERRUPT_HARD) {
473                         if (cpu_interrupts_enabled(env) &&
474                             env->interrupt_index > 0) {
475                             int pil = env->interrupt_index & 0xf;
476                             int type = env->interrupt_index & 0xf0;
477 
478                             if (((type == TT_EXTINT) &&
479                                   cpu_pil_allowed(env, pil)) ||
480                                   type != TT_EXTINT) {
481                                 env->exception_index = env->interrupt_index;
482                                 do_interrupt(env);
483                                 next_tb = 0;
484                             }
485                         }
486 		    } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
487 			//do_interrupt(0, 0, 0, 0, 0);
488 			env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
489 		    }
490 #elif defined(TARGET_ARM)
491                     if (interrupt_request & CPU_INTERRUPT_FIQ
492                         && !(env->uncached_cpsr & CPSR_F)) {
493                         env->exception_index = EXCP_FIQ;
494                         do_interrupt(env);
495                         next_tb = 0;
496                     }
497                     /* ARMv7-M interrupt return works by loading a magic value
498                        into the PC.  On real hardware the load causes the
499                        return to occur.  The qemu implementation performs the
500                        jump normally, then does the exception return when the
501                        CPU tries to execute code at the magic address.
502                        This will cause the magic PC value to be pushed to
503                        the stack if an interrupt occured at the wrong time.
504                        We avoid this by disabling interrupts when
505                        pc contains a magic address.  */
506                     if (interrupt_request & CPU_INTERRUPT_HARD
507                         && ((IS_M(env) && env->regs[15] < 0xfffffff0)
508                             || !(env->uncached_cpsr & CPSR_I))) {
509                         env->exception_index = EXCP_IRQ;
510                         do_interrupt(env);
511                         next_tb = 0;
512                     }
513 #elif defined(TARGET_SH4)
514                     if (interrupt_request & CPU_INTERRUPT_HARD) {
515                         do_interrupt(env);
516                         next_tb = 0;
517                     }
518 #elif defined(TARGET_ALPHA)
519                     if (interrupt_request & CPU_INTERRUPT_HARD) {
520                         do_interrupt(env);
521                         next_tb = 0;
522                     }
523 #elif defined(TARGET_CRIS)
524                     if (interrupt_request & CPU_INTERRUPT_HARD
525                         && (env->pregs[PR_CCS] & I_FLAG)
526                         && !env->locked_irq) {
527                         env->exception_index = EXCP_IRQ;
528                         do_interrupt(env);
529                         next_tb = 0;
530                     }
531                     if (interrupt_request & CPU_INTERRUPT_NMI
532                         && (env->pregs[PR_CCS] & M_FLAG)) {
533                         env->exception_index = EXCP_NMI;
534                         do_interrupt(env);
535                         next_tb = 0;
536                     }
537 #elif defined(TARGET_M68K)
538                     if (interrupt_request & CPU_INTERRUPT_HARD
539                         && ((env->sr & SR_I) >> SR_I_SHIFT)
540                             < env->pending_level) {
541                         /* Real hardware gets the interrupt vector via an
542                            IACK cycle at this point.  Current emulated
543                            hardware doesn't rely on this, so we
544                            provide/save the vector when the interrupt is
545                            first signalled.  */
546                         env->exception_index = env->pending_vector;
547                         do_interrupt(1);
548                         next_tb = 0;
549                     }
550 #endif
551                    /* Don't use the cached interupt_request value,
552                       do_interrupt may have updated the EXITTB flag. */
553                     if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
554                         env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
555                         /* ensure that no TB jump will be modified as
556                            the program flow was changed */
557                         next_tb = 0;
558                     }
559                 }
560                 if (unlikely(env->exit_request)) {
561                     env->exit_request = 0;
562                     env->exception_index = EXCP_INTERRUPT;
563                     cpu_loop_exit();
564                 }
565 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
566                 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
567                     /* restore flags in standard format */
568 #if defined(TARGET_I386)
569                     env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
570                     log_cpu_state(env, X86_DUMP_CCOP);
571                     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
572 #elif defined(TARGET_M68K)
573                     cpu_m68k_flush_flags(env, env->cc_op);
574                     env->cc_op = CC_OP_FLAGS;
575                     env->sr = (env->sr & 0xffe0)
576                               | env->cc_dest | (env->cc_x << 4);
577                     log_cpu_state(env, 0);
578 #else
579                     log_cpu_state(env, 0);
580 #endif
581                 }
582 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
583                 spin_lock(&tb_lock);
584                 tb = tb_find_fast();
585                 /* Note: we do it here to avoid a gcc bug on Mac OS X when
586                    doing it in tb_find_slow */
587                 if (tb_invalidated_flag) {
588                     /* as some TB could have been invalidated because
589                        of memory exceptions while generating the code, we
590                        must recompute the hash index here */
591                     next_tb = 0;
592                     tb_invalidated_flag = 0;
593                 }
594 #ifdef CONFIG_DEBUG_EXEC
595                 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
596                              (long)tb->tc_ptr, tb->pc,
597                              lookup_symbol(tb->pc));
598 #endif
599                 /* see if we can patch the calling TB. When the TB
600                    spans two pages, we cannot safely do a direct
601                    jump. */
602                 if (next_tb != 0 && tb->page_addr[1] == -1) {
603                     tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
604                 }
605                 spin_unlock(&tb_lock);
606 
607                 /* cpu_interrupt might be called while translating the
608                    TB, but before it is linked into a potentially
609                    infinite loop and becomes env->current_tb. Avoid
610                    starting execution if there is a pending interrupt. */
611                 env->current_tb = tb;
612                 barrier();
613                 if (likely(!env->exit_request)) {
614                     tc_ptr = tb->tc_ptr;
615                 /* execute the generated code */
616 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
617 #undef env
618                     env = cpu_single_env;
619 #define env cpu_single_env
620 #endif
621                     next_tb = tcg_qemu_tb_exec(tc_ptr);
622                     if ((next_tb & 3) == 2) {
623                         /* Instruction counter expired.  */
624                         int insns_left;
625                         tb = (TranslationBlock *)(long)(next_tb & ~3);
626                         /* Restore PC.  */
627                         cpu_pc_from_tb(env, tb);
628                         insns_left = env->icount_decr.u32;
629                         if (env->icount_extra && insns_left >= 0) {
630                             /* Refill decrementer and continue execution.  */
631                             env->icount_extra += insns_left;
632                             if (env->icount_extra > 0xffff) {
633                                 insns_left = 0xffff;
634                             } else {
635                                 insns_left = env->icount_extra;
636                             }
637                             env->icount_extra -= insns_left;
638                             env->icount_decr.u16.low = insns_left;
639                         } else {
640                             if (insns_left > 0) {
641                                 /* Execute remaining instructions.  */
642                                 cpu_exec_nocache(insns_left, tb);
643                             }
644                             env->exception_index = EXCP_INTERRUPT;
645                             next_tb = 0;
646                             cpu_loop_exit();
647                         }
648                     }
649                 }
650                 env->current_tb = NULL;
651                 /* reset soft MMU for next block (it can currently
652                    only be set by a memory fault) */
653             } /* for(;;) */
654         }
655     } /* for(;;) */
656 
657 
658 #if defined(TARGET_I386)
659     /* restore flags in standard format */
660     env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
661 #elif defined(TARGET_ARM)
662     /* XXX: Save/restore host fpu exception state?.  */
663 #elif defined(TARGET_SPARC)
664 #elif defined(TARGET_PPC)
665 #elif defined(TARGET_M68K)
666     cpu_m68k_flush_flags(env, env->cc_op);
667     env->cc_op = CC_OP_FLAGS;
668     env->sr = (env->sr & 0xffe0)
669               | env->cc_dest | (env->cc_x << 4);
670 #elif defined(TARGET_MICROBLAZE)
671 #elif defined(TARGET_MIPS)
672 #elif defined(TARGET_SH4)
673 #elif defined(TARGET_IA64)
674 #elif defined(TARGET_ALPHA)
675 #elif defined(TARGET_CRIS)
676 #elif defined(TARGET_S390X)
677     /* XXXXX */
678 #else
679 #error unsupported target CPU
680 #endif
681 
682     /* restore global registers */
683     barrier();
684     env = (void *) saved_env_reg;
685 
686     /* fail safe : never use cpu_single_env outside cpu_exec() */
687     cpu_single_env = NULL;
688     return ret;
689 }
690 
691 /* must only be called from the generated code as an exception can be
692    generated */
tb_invalidate_page_range(target_ulong start,target_ulong end)693 void tb_invalidate_page_range(target_ulong start, target_ulong end)
694 {
695     /* XXX: cannot enable it yet because it yields to MMU exception
696        where NIP != read address on PowerPC */
697 #if 0
698     target_ulong phys_addr;
699     phys_addr = get_phys_addr_code(env, start);
700     tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
701 #endif
702 }
703 
704 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
705 
cpu_x86_load_seg(CPUX86State * s,int seg_reg,int selector)706 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
707 {
708     CPUX86State *saved_env;
709 
710     saved_env = env;
711     env = s;
712     if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
713         selector &= 0xffff;
714         cpu_x86_load_seg_cache(env, seg_reg, selector,
715                                (selector << 4), 0xffff, 0);
716     } else {
717         helper_load_seg(seg_reg, selector);
718     }
719     env = saved_env;
720 }
721 
cpu_x86_fsave(CPUX86State * s,target_ulong ptr,int data32)722 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
723 {
724     CPUX86State *saved_env;
725 
726     saved_env = env;
727     env = s;
728 
729     helper_fsave(ptr, data32);
730 
731     env = saved_env;
732 }
733 
cpu_x86_frstor(CPUX86State * s,target_ulong ptr,int data32)734 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
735 {
736     CPUX86State *saved_env;
737 
738     saved_env = env;
739     env = s;
740 
741     helper_frstor(ptr, data32);
742 
743     env = saved_env;
744 }
745 
746 #endif /* TARGET_I386 */
747 
748 #if !defined(CONFIG_SOFTMMU)
749 
750 #if defined(TARGET_I386)
751 #define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
752 #else
753 #define EXCEPTION_ACTION cpu_loop_exit()
754 #endif
755 
756 /* 'pc' is the host PC at which the exception was raised. 'address' is
757    the effective address of the memory exception. 'is_write' is 1 if a
758    write caused the exception and otherwise 0'. 'old_set' is the
759    signal set which should be restored */
handle_cpu_signal(unsigned long pc,unsigned long address,int is_write,sigset_t * old_set,void * puc)760 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
761                                     int is_write, sigset_t *old_set,
762                                     void *puc)
763 {
764     TranslationBlock *tb;
765     int ret;
766 
767     if (cpu_single_env)
768         env = cpu_single_env; /* XXX: find a correct solution for multithread */
769 #if defined(DEBUG_SIGNAL)
770     qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
771                 pc, address, is_write, *(unsigned long *)old_set);
772 #endif
773     /* XXX: locking issue */
774     if (is_write && page_unprotect(h2g(address), pc, puc)) {
775         return 1;
776     }
777 
778     /* see if it is an MMU fault */
779     ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
780     if (ret < 0)
781         return 0; /* not an MMU fault */
782     if (ret == 0)
783         return 1; /* the MMU fault was handled without causing real CPU fault */
784     /* now we have a real cpu fault */
785     tb = tb_find_pc(pc);
786     if (tb) {
787         /* the PC is inside the translated code. It means that we have
788            a virtual CPU fault */
789         cpu_restore_state(tb, env, pc, puc);
790     }
791 
792     /* we restore the process signal mask as the sigreturn should
793        do it (XXX: use sigsetjmp) */
794     sigprocmask(SIG_SETMASK, old_set, NULL);
795     EXCEPTION_ACTION;
796 
797     /* never comes here */
798     return 1;
799 }
800 
801 #if defined(__i386__)
802 
803 #if defined(__APPLE__)
804 # include <sys/ucontext.h>
805 
806 # define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
807 # define TRAP_sig(context)    ((context)->uc_mcontext->es.trapno)
808 # define ERROR_sig(context)   ((context)->uc_mcontext->es.err)
809 # define MASK_sig(context)    ((context)->uc_sigmask)
810 #elif defined (__NetBSD__)
811 # include <ucontext.h>
812 
813 # define EIP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_EIP])
814 # define TRAP_sig(context)    ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
815 # define ERROR_sig(context)   ((context)->uc_mcontext.__gregs[_REG_ERR])
816 # define MASK_sig(context)    ((context)->uc_sigmask)
817 #elif defined (__FreeBSD__) || defined(__DragonFly__)
818 # include <ucontext.h>
819 
820 # define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
821 # define TRAP_sig(context)    ((context)->uc_mcontext.mc_trapno)
822 # define ERROR_sig(context)   ((context)->uc_mcontext.mc_err)
823 # define MASK_sig(context)    ((context)->uc_sigmask)
824 #elif defined(__OpenBSD__)
825 # define EIP_sig(context)     ((context)->sc_eip)
826 # define TRAP_sig(context)    ((context)->sc_trapno)
827 # define ERROR_sig(context)   ((context)->sc_err)
828 # define MASK_sig(context)    ((context)->sc_mask)
829 #else
830 # define EIP_sig(context)     ((context)->uc_mcontext.gregs[REG_EIP])
831 # define TRAP_sig(context)    ((context)->uc_mcontext.gregs[REG_TRAPNO])
832 # define ERROR_sig(context)   ((context)->uc_mcontext.gregs[REG_ERR])
833 # define MASK_sig(context)    ((context)->uc_sigmask)
834 #endif
835 
cpu_signal_handler(int host_signum,void * pinfo,void * puc)836 int cpu_signal_handler(int host_signum, void *pinfo,
837                        void *puc)
838 {
839     siginfo_t *info = pinfo;
840 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
841     ucontext_t *uc = puc;
842 #elif defined(__OpenBSD__)
843     struct sigcontext *uc = puc;
844 #else
845     struct ucontext *uc = puc;
846 #endif
847     unsigned long pc;
848     int trapno;
849 
850 #ifndef REG_EIP
851 /* for glibc 2.1 */
852 #define REG_EIP    EIP
853 #define REG_ERR    ERR
854 #define REG_TRAPNO TRAPNO
855 #endif
856     pc = EIP_sig(uc);
857     trapno = TRAP_sig(uc);
858     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
859                              trapno == 0xe ?
860                              (ERROR_sig(uc) >> 1) & 1 : 0,
861                              &MASK_sig(uc), puc);
862 }
863 
864 #elif defined(__x86_64__)
865 
866 #ifdef __NetBSD__
867 #define PC_sig(context)       _UC_MACHINE_PC(context)
868 #define TRAP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
869 #define ERROR_sig(context)    ((context)->uc_mcontext.__gregs[_REG_ERR])
870 #define MASK_sig(context)     ((context)->uc_sigmask)
871 #elif defined(__OpenBSD__)
872 #define PC_sig(context)       ((context)->sc_rip)
873 #define TRAP_sig(context)     ((context)->sc_trapno)
874 #define ERROR_sig(context)    ((context)->sc_err)
875 #define MASK_sig(context)     ((context)->sc_mask)
876 #elif defined (__FreeBSD__) || defined(__DragonFly__)
877 #include <ucontext.h>
878 
879 #define PC_sig(context)  (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
880 #define TRAP_sig(context)     ((context)->uc_mcontext.mc_trapno)
881 #define ERROR_sig(context)    ((context)->uc_mcontext.mc_err)
882 #define MASK_sig(context)     ((context)->uc_sigmask)
883 #else
884 #define PC_sig(context)       ((context)->uc_mcontext.gregs[REG_RIP])
885 #define TRAP_sig(context)     ((context)->uc_mcontext.gregs[REG_TRAPNO])
886 #define ERROR_sig(context)    ((context)->uc_mcontext.gregs[REG_ERR])
887 #define MASK_sig(context)     ((context)->uc_sigmask)
888 #endif
889 
cpu_signal_handler(int host_signum,void * pinfo,void * puc)890 int cpu_signal_handler(int host_signum, void *pinfo,
891                        void *puc)
892 {
893     siginfo_t *info = pinfo;
894     unsigned long pc;
895 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
896     ucontext_t *uc = puc;
897 #elif defined(__OpenBSD__)
898     struct sigcontext *uc = puc;
899 #else
900     struct ucontext *uc = puc;
901 #endif
902 
903     pc = PC_sig(uc);
904     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
905                              TRAP_sig(uc) == 0xe ?
906                              (ERROR_sig(uc) >> 1) & 1 : 0,
907                              &MASK_sig(uc), puc);
908 }
909 
910 #elif defined(_ARCH_PPC)
911 
912 /***********************************************************************
913  * signal context platform-specific definitions
914  * From Wine
915  */
916 #ifdef linux
917 /* All Registers access - only for local access */
918 # define REG_sig(reg_name, context)		((context)->uc_mcontext.regs->reg_name)
919 /* Gpr Registers access  */
920 # define GPR_sig(reg_num, context)		REG_sig(gpr[reg_num], context)
921 # define IAR_sig(context)			REG_sig(nip, context)	/* Program counter */
922 # define MSR_sig(context)			REG_sig(msr, context)   /* Machine State Register (Supervisor) */
923 # define CTR_sig(context)			REG_sig(ctr, context)   /* Count register */
924 # define XER_sig(context)			REG_sig(xer, context) /* User's integer exception register */
925 # define LR_sig(context)			REG_sig(link, context) /* Link register */
926 # define CR_sig(context)			REG_sig(ccr, context) /* Condition register */
927 /* Float Registers access  */
928 # define FLOAT_sig(reg_num, context)		(((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
929 # define FPSCR_sig(context)			(*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
930 /* Exception Registers access */
931 # define DAR_sig(context)			REG_sig(dar, context)
932 # define DSISR_sig(context)			REG_sig(dsisr, context)
933 # define TRAP_sig(context)			REG_sig(trap, context)
934 #endif /* linux */
935 
936 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
937 #include <ucontext.h>
938 # define IAR_sig(context)		((context)->uc_mcontext.mc_srr0)
939 # define MSR_sig(context)		((context)->uc_mcontext.mc_srr1)
940 # define CTR_sig(context)		((context)->uc_mcontext.mc_ctr)
941 # define XER_sig(context)		((context)->uc_mcontext.mc_xer)
942 # define LR_sig(context)		((context)->uc_mcontext.mc_lr)
943 # define CR_sig(context)		((context)->uc_mcontext.mc_cr)
944 /* Exception Registers access */
945 # define DAR_sig(context)		((context)->uc_mcontext.mc_dar)
946 # define DSISR_sig(context)		((context)->uc_mcontext.mc_dsisr)
947 # define TRAP_sig(context)		((context)->uc_mcontext.mc_exc)
948 #endif /* __FreeBSD__|| __FreeBSD_kernel__ */
949 
950 #ifdef __APPLE__
951 # include <sys/ucontext.h>
952 typedef struct ucontext SIGCONTEXT;
953 /* All Registers access - only for local access */
954 # define REG_sig(reg_name, context)		((context)->uc_mcontext->ss.reg_name)
955 # define FLOATREG_sig(reg_name, context)	((context)->uc_mcontext->fs.reg_name)
956 # define EXCEPREG_sig(reg_name, context)	((context)->uc_mcontext->es.reg_name)
957 # define VECREG_sig(reg_name, context)		((context)->uc_mcontext->vs.reg_name)
958 /* Gpr Registers access */
959 # define GPR_sig(reg_num, context)		REG_sig(r##reg_num, context)
960 # define IAR_sig(context)			REG_sig(srr0, context)	/* Program counter */
961 # define MSR_sig(context)			REG_sig(srr1, context)  /* Machine State Register (Supervisor) */
962 # define CTR_sig(context)			REG_sig(ctr, context)
963 # define XER_sig(context)			REG_sig(xer, context) /* Link register */
964 # define LR_sig(context)			REG_sig(lr, context)  /* User's integer exception register */
965 # define CR_sig(context)			REG_sig(cr, context)  /* Condition register */
966 /* Float Registers access */
967 # define FLOAT_sig(reg_num, context)		FLOATREG_sig(fpregs[reg_num], context)
968 # define FPSCR_sig(context)			((double)FLOATREG_sig(fpscr, context))
969 /* Exception Registers access */
970 # define DAR_sig(context)			EXCEPREG_sig(dar, context)     /* Fault registers for coredump */
971 # define DSISR_sig(context)			EXCEPREG_sig(dsisr, context)
972 # define TRAP_sig(context)			EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
973 #endif /* __APPLE__ */
974 
cpu_signal_handler(int host_signum,void * pinfo,void * puc)975 int cpu_signal_handler(int host_signum, void *pinfo,
976                        void *puc)
977 {
978     siginfo_t *info = pinfo;
979 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
980     ucontext_t *uc = puc;
981 #else
982     struct ucontext *uc = puc;
983 #endif
984     unsigned long pc;
985     int is_write;
986 
987     pc = IAR_sig(uc);
988     is_write = 0;
989 #if 0
990     /* ppc 4xx case */
991     if (DSISR_sig(uc) & 0x00800000)
992         is_write = 1;
993 #else
994     if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
995         is_write = 1;
996 #endif
997     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
998                              is_write, &uc->uc_sigmask, puc);
999 }
1000 
1001 #elif defined(__alpha__)
1002 
cpu_signal_handler(int host_signum,void * pinfo,void * puc)1003 int cpu_signal_handler(int host_signum, void *pinfo,
1004                            void *puc)
1005 {
1006     siginfo_t *info = pinfo;
1007     struct ucontext *uc = puc;
1008     uint32_t *pc = uc->uc_mcontext.sc_pc;
1009     uint32_t insn = *pc;
1010     int is_write = 0;
1011 
1012     /* XXX: need kernel patch to get write flag faster */
1013     switch (insn >> 26) {
1014     case 0x0d: // stw
1015     case 0x0e: // stb
1016     case 0x0f: // stq_u
1017     case 0x24: // stf
1018     case 0x25: // stg
1019     case 0x26: // sts
1020     case 0x27: // stt
1021     case 0x2c: // stl
1022     case 0x2d: // stq
1023     case 0x2e: // stl_c
1024     case 0x2f: // stq_c
1025 	is_write = 1;
1026     }
1027 
1028     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1029                              is_write, &uc->uc_sigmask, puc);
1030 }
1031 #elif defined(__sparc__)
1032 
cpu_signal_handler(int host_signum,void * pinfo,void * puc)1033 int cpu_signal_handler(int host_signum, void *pinfo,
1034                        void *puc)
1035 {
1036     siginfo_t *info = pinfo;
1037     int is_write;
1038     uint32_t insn;
1039 #if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1040     uint32_t *regs = (uint32_t *)(info + 1);
1041     void *sigmask = (regs + 20);
1042     /* XXX: is there a standard glibc define ? */
1043     unsigned long pc = regs[1];
1044 #else
1045 #ifdef __linux__
1046     struct sigcontext *sc = puc;
1047     unsigned long pc = sc->sigc_regs.tpc;
1048     void *sigmask = (void *)sc->sigc_mask;
1049 #elif defined(__OpenBSD__)
1050     struct sigcontext *uc = puc;
1051     unsigned long pc = uc->sc_pc;
1052     void *sigmask = (void *)(long)uc->sc_mask;
1053 #endif
1054 #endif
1055 
1056     /* XXX: need kernel patch to get write flag faster */
1057     is_write = 0;
1058     insn = *(uint32_t *)pc;
1059     if ((insn >> 30) == 3) {
1060       switch((insn >> 19) & 0x3f) {
1061       case 0x05: // stb
1062       case 0x15: // stba
1063       case 0x06: // sth
1064       case 0x16: // stha
1065       case 0x04: // st
1066       case 0x14: // sta
1067       case 0x07: // std
1068       case 0x17: // stda
1069       case 0x0e: // stx
1070       case 0x1e: // stxa
1071       case 0x24: // stf
1072       case 0x34: // stfa
1073       case 0x27: // stdf
1074       case 0x37: // stdfa
1075       case 0x26: // stqf
1076       case 0x36: // stqfa
1077       case 0x25: // stfsr
1078       case 0x3c: // casa
1079       case 0x3e: // casxa
1080 	is_write = 1;
1081 	break;
1082       }
1083     }
1084     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1085                              is_write, sigmask, NULL);
1086 }
1087 
1088 #elif defined(__arm__)
1089 
cpu_signal_handler(int host_signum,void * pinfo,void * puc)1090 int cpu_signal_handler(int host_signum, void *pinfo,
1091                        void *puc)
1092 {
1093     siginfo_t *info = pinfo;
1094     struct ucontext *uc = puc;
1095     unsigned long pc;
1096     int is_write;
1097 
1098 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1099     pc = uc->uc_mcontext.gregs[R15];
1100 #else
1101     pc = uc->uc_mcontext.arm_pc;
1102 #endif
1103     /* XXX: compute is_write */
1104     is_write = 0;
1105     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1106                              is_write,
1107                              &uc->uc_sigmask, puc);
1108 }
1109 
1110 #elif defined(__mc68000)
1111 
cpu_signal_handler(int host_signum,void * pinfo,void * puc)1112 int cpu_signal_handler(int host_signum, void *pinfo,
1113                        void *puc)
1114 {
1115     siginfo_t *info = pinfo;
1116     struct ucontext *uc = puc;
1117     unsigned long pc;
1118     int is_write;
1119 
1120     pc = uc->uc_mcontext.gregs[16];
1121     /* XXX: compute is_write */
1122     is_write = 0;
1123     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1124                              is_write,
1125                              &uc->uc_sigmask, puc);
1126 }
1127 
1128 #elif defined(__ia64)
1129 
1130 #ifndef __ISR_VALID
1131   /* This ought to be in <bits/siginfo.h>... */
1132 # define __ISR_VALID	1
1133 #endif
1134 
cpu_signal_handler(int host_signum,void * pinfo,void * puc)1135 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1136 {
1137     siginfo_t *info = pinfo;
1138     struct ucontext *uc = puc;
1139     unsigned long ip;
1140     int is_write = 0;
1141 
1142     ip = uc->uc_mcontext.sc_ip;
1143     switch (host_signum) {
1144       case SIGILL:
1145       case SIGFPE:
1146       case SIGSEGV:
1147       case SIGBUS:
1148       case SIGTRAP:
1149 	  if (info->si_code && (info->si_segvflags & __ISR_VALID))
1150 	      /* ISR.W (write-access) is bit 33:  */
1151 	      is_write = (info->si_isr >> 33) & 1;
1152 	  break;
1153 
1154       default:
1155 	  break;
1156     }
1157     return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1158                              is_write,
1159                              (sigset_t *)&uc->uc_sigmask, puc);
1160 }
1161 
1162 #elif defined(__s390__)
1163 
cpu_signal_handler(int host_signum,void * pinfo,void * puc)1164 int cpu_signal_handler(int host_signum, void *pinfo,
1165                        void *puc)
1166 {
1167     siginfo_t *info = pinfo;
1168     struct ucontext *uc = puc;
1169     unsigned long pc;
1170     uint16_t *pinsn;
1171     int is_write = 0;
1172 
1173     pc = uc->uc_mcontext.psw.addr;
1174 
1175     /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
1176        of the normal 2 arguments.  The 3rd argument contains the "int_code"
1177        from the hardware which does in fact contain the is_write value.
1178        The rt signal handler, as far as I can tell, does not give this value
1179        at all.  Not that we could get to it from here even if it were.  */
1180     /* ??? This is not even close to complete, since it ignores all
1181        of the read-modify-write instructions.  */
1182     pinsn = (uint16_t *)pc;
1183     switch (pinsn[0] >> 8) {
1184     case 0x50: /* ST */
1185     case 0x42: /* STC */
1186     case 0x40: /* STH */
1187         is_write = 1;
1188         break;
1189     case 0xc4: /* RIL format insns */
1190         switch (pinsn[0] & 0xf) {
1191         case 0xf: /* STRL */
1192         case 0xb: /* STGRL */
1193         case 0x7: /* STHRL */
1194             is_write = 1;
1195         }
1196         break;
1197     case 0xe3: /* RXY format insns */
1198         switch (pinsn[2] & 0xff) {
1199         case 0x50: /* STY */
1200         case 0x24: /* STG */
1201         case 0x72: /* STCY */
1202         case 0x70: /* STHY */
1203         case 0x8e: /* STPQ */
1204         case 0x3f: /* STRVH */
1205         case 0x3e: /* STRV */
1206         case 0x2f: /* STRVG */
1207             is_write = 1;
1208         }
1209         break;
1210     }
1211     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1212                              is_write, &uc->uc_sigmask, puc);
1213 }
1214 
1215 #elif defined(__mips__)
1216 
cpu_signal_handler(int host_signum,void * pinfo,void * puc)1217 int cpu_signal_handler(int host_signum, void *pinfo,
1218                        void *puc)
1219 {
1220     siginfo_t *info = pinfo;
1221     struct ucontext *uc = puc;
1222     greg_t pc = uc->uc_mcontext.pc;
1223     int is_write;
1224 
1225     /* XXX: compute is_write */
1226     is_write = 0;
1227     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1228                              is_write, &uc->uc_sigmask, puc);
1229 }
1230 
1231 #elif defined(__hppa__)
1232 
cpu_signal_handler(int host_signum,void * pinfo,void * puc)1233 int cpu_signal_handler(int host_signum, void *pinfo,
1234                        void *puc)
1235 {
1236     struct siginfo *info = pinfo;
1237     struct ucontext *uc = puc;
1238     unsigned long pc = uc->uc_mcontext.sc_iaoq[0];
1239     uint32_t insn = *(uint32_t *)pc;
1240     int is_write = 0;
1241 
1242     /* XXX: need kernel patch to get write flag faster.  */
1243     switch (insn >> 26) {
1244     case 0x1a: /* STW */
1245     case 0x19: /* STH */
1246     case 0x18: /* STB */
1247     case 0x1b: /* STWM */
1248         is_write = 1;
1249         break;
1250 
1251     case 0x09: /* CSTWX, FSTWX, FSTWS */
1252     case 0x0b: /* CSTDX, FSTDX, FSTDS */
1253         /* Distinguish from coprocessor load ... */
1254         is_write = (insn >> 9) & 1;
1255         break;
1256 
1257     case 0x03:
1258         switch ((insn >> 6) & 15) {
1259         case 0xa: /* STWS */
1260         case 0x9: /* STHS */
1261         case 0x8: /* STBS */
1262         case 0xe: /* STWAS */
1263         case 0xc: /* STBYS */
1264             is_write = 1;
1265         }
1266         break;
1267     }
1268 
1269     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1270                              is_write, &uc->uc_sigmask, puc);
1271 }
1272 
1273 #else
1274 
1275 #error host CPU specific signal handler needed
1276 
1277 #endif
1278 
1279 #endif /* !defined(CONFIG_SOFTMMU) */
1280