1 /*
2  *  i386 helpers
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "exec.h"
21 #include "exec-all.h"
22 #include "host-utils.h"
23 #include "ioport.h"
24 
25 //#define DEBUG_PCALL
26 
27 
28 #ifdef DEBUG_PCALL
29 #  define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30 #  define LOG_PCALL_STATE(env) \
31           log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
32 #else
33 #  define LOG_PCALL(...) do { } while (0)
34 #  define LOG_PCALL_STATE(env) do { } while (0)
35 #endif
36 
37 
38 #if 0
39 #define raise_exception_err(a, b)\
40 do {\
41     qemu_log("raise_exception line=%d\n", __LINE__);\
42     (raise_exception_err)(a, b);\
43 } while (0)
44 #endif
45 
46 static const uint8_t parity_table[256] = {
47     CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48     0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49     0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50     CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51     0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52     CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53     CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54     0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55     0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56     CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57     CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58     0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59     CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60     0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61     0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62     CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63     0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64     CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65     CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66     0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67     CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68     0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69     0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70     CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71     CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72     0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73     0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74     CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75     0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76     CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77     CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78     0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 };
80 
81 /* modulo 17 table */
82 static const uint8_t rclw_table[32] = {
83     0, 1, 2, 3, 4, 5, 6, 7,
84     8, 9,10,11,12,13,14,15,
85    16, 0, 1, 2, 3, 4, 5, 6,
86     7, 8, 9,10,11,12,13,14,
87 };
88 
89 /* modulo 9 table */
90 static const uint8_t rclb_table[32] = {
91     0, 1, 2, 3, 4, 5, 6, 7,
92     8, 0, 1, 2, 3, 4, 5, 6,
93     7, 8, 0, 1, 2, 3, 4, 5,
94     6, 7, 8, 0, 1, 2, 3, 4,
95 };
96 
97 static const CPU86_LDouble f15rk[7] =
98 {
99     0.00000000000000000000L,
100     1.00000000000000000000L,
101     3.14159265358979323851L,  /*pi*/
102     0.30102999566398119523L,  /*lg2*/
103     0.69314718055994530943L,  /*ln2*/
104     1.44269504088896340739L,  /*l2e*/
105     3.32192809488736234781L,  /*l2t*/
106 };
107 
108 /* broken thread support */
109 
110 static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
111 
helper_lock(void)112 void helper_lock(void)
113 {
114     spin_lock(&global_cpu_lock);
115 }
116 
helper_unlock(void)117 void helper_unlock(void)
118 {
119     spin_unlock(&global_cpu_lock);
120 }
121 
helper_write_eflags(target_ulong t0,uint32_t update_mask)122 void helper_write_eflags(target_ulong t0, uint32_t update_mask)
123 {
124     load_eflags(t0, update_mask);
125 }
126 
helper_read_eflags(void)127 target_ulong helper_read_eflags(void)
128 {
129     uint32_t eflags;
130     eflags = helper_cc_compute_all(CC_OP);
131     eflags |= (DF & DF_MASK);
132     eflags |= env->eflags & ~(VM_MASK | RF_MASK);
133     return eflags;
134 }
135 
136 /* return non zero if error */
load_segment(uint32_t * e1_ptr,uint32_t * e2_ptr,int selector)137 static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
138                                int selector)
139 {
140     SegmentCache *dt;
141     int index;
142     target_ulong ptr;
143 
144     if (selector & 0x4)
145         dt = &env->ldt;
146     else
147         dt = &env->gdt;
148     index = selector & ~7;
149     if ((index + 7) > dt->limit)
150         return -1;
151     ptr = dt->base + index;
152     *e1_ptr = ldl_kernel(ptr);
153     *e2_ptr = ldl_kernel(ptr + 4);
154     return 0;
155 }
156 
get_seg_limit(uint32_t e1,uint32_t e2)157 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
158 {
159     unsigned int limit;
160     limit = (e1 & 0xffff) | (e2 & 0x000f0000);
161     if (e2 & DESC_G_MASK)
162         limit = (limit << 12) | 0xfff;
163     return limit;
164 }
165 
get_seg_base(uint32_t e1,uint32_t e2)166 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
167 {
168     return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
169 }
170 
load_seg_cache_raw_dt(SegmentCache * sc,uint32_t e1,uint32_t e2)171 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
172 {
173     sc->base = get_seg_base(e1, e2);
174     sc->limit = get_seg_limit(e1, e2);
175     sc->flags = e2;
176 }
177 
178 /* init the segment cache in vm86 mode. */
load_seg_vm(int seg,int selector)179 static inline void load_seg_vm(int seg, int selector)
180 {
181     selector &= 0xffff;
182     cpu_x86_load_seg_cache(env, seg, selector,
183                            (selector << 4), 0xffff, 0);
184 }
185 
get_ss_esp_from_tss(uint32_t * ss_ptr,uint32_t * esp_ptr,int dpl)186 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
187                                        uint32_t *esp_ptr, int dpl)
188 {
189     int type, index, shift;
190 
191 #if 0
192     {
193         int i;
194         printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
195         for(i=0;i<env->tr.limit;i++) {
196             printf("%02x ", env->tr.base[i]);
197             if ((i & 7) == 7) printf("\n");
198         }
199         printf("\n");
200     }
201 #endif
202 
203     if (!(env->tr.flags & DESC_P_MASK))
204         cpu_abort(env, "invalid tss");
205     type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
206     if ((type & 7) != 1)
207         cpu_abort(env, "invalid tss type");
208     shift = type >> 3;
209     index = (dpl * 4 + 2) << shift;
210     if (index + (4 << shift) - 1 > env->tr.limit)
211         raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
212     if (shift == 0) {
213         *esp_ptr = lduw_kernel(env->tr.base + index);
214         *ss_ptr = lduw_kernel(env->tr.base + index + 2);
215     } else {
216         *esp_ptr = ldl_kernel(env->tr.base + index);
217         *ss_ptr = lduw_kernel(env->tr.base + index + 4);
218     }
219 }
220 
221 /* XXX: merge with load_seg() */
tss_load_seg(int seg_reg,int selector)222 static void tss_load_seg(int seg_reg, int selector)
223 {
224     uint32_t e1, e2;
225     int rpl, dpl, cpl;
226 
227     if ((selector & 0xfffc) != 0) {
228         if (load_segment(&e1, &e2, selector) != 0)
229             raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230         if (!(e2 & DESC_S_MASK))
231             raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232         rpl = selector & 3;
233         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
234         cpl = env->hflags & HF_CPL_MASK;
235         if (seg_reg == R_CS) {
236             if (!(e2 & DESC_CS_MASK))
237                 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238             /* XXX: is it correct ? */
239             if (dpl != rpl)
240                 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
241             if ((e2 & DESC_C_MASK) && dpl > rpl)
242                 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243         } else if (seg_reg == R_SS) {
244             /* SS must be writable data */
245             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
246                 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
247             if (dpl != cpl || dpl != rpl)
248                 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
249         } else {
250             /* not readable code */
251             if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
252                 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
253             /* if data or non conforming code, checks the rights */
254             if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
255                 if (dpl < cpl || dpl < rpl)
256                     raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
257             }
258         }
259         if (!(e2 & DESC_P_MASK))
260             raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
261         cpu_x86_load_seg_cache(env, seg_reg, selector,
262                        get_seg_base(e1, e2),
263                        get_seg_limit(e1, e2),
264                        e2);
265     } else {
266         if (seg_reg == R_SS || seg_reg == R_CS)
267             raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
268     }
269 }
270 
271 #define SWITCH_TSS_JMP  0
272 #define SWITCH_TSS_IRET 1
273 #define SWITCH_TSS_CALL 2
274 
275 /* XXX: restore CPU state in registers (PowerPC case) */
switch_tss(int tss_selector,uint32_t e1,uint32_t e2,int source,uint32_t next_eip)276 static void switch_tss(int tss_selector,
277                        uint32_t e1, uint32_t e2, int source,
278                        uint32_t next_eip)
279 {
280     int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
281     target_ulong tss_base;
282     uint32_t new_regs[8], new_segs[6];
283     uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
284     uint32_t old_eflags, eflags_mask;
285     SegmentCache *dt;
286     int index;
287     target_ulong ptr;
288 
289     type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
290     LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
291 
292     /* if task gate, we read the TSS segment and we load it */
293     if (type == 5) {
294         if (!(e2 & DESC_P_MASK))
295             raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
296         tss_selector = e1 >> 16;
297         if (tss_selector & 4)
298             raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
299         if (load_segment(&e1, &e2, tss_selector) != 0)
300             raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
301         if (e2 & DESC_S_MASK)
302             raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
303         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
304         if ((type & 7) != 1)
305             raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
306     }
307 
308     if (!(e2 & DESC_P_MASK))
309         raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
310 
311     if (type & 8)
312         tss_limit_max = 103;
313     else
314         tss_limit_max = 43;
315     tss_limit = get_seg_limit(e1, e2);
316     tss_base = get_seg_base(e1, e2);
317     if ((tss_selector & 4) != 0 ||
318         tss_limit < tss_limit_max)
319         raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
320     old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
321     if (old_type & 8)
322         old_tss_limit_max = 103;
323     else
324         old_tss_limit_max = 43;
325 
326     /* read all the registers from the new TSS */
327     if (type & 8) {
328         /* 32 bit */
329         new_cr3 = ldl_kernel(tss_base + 0x1c);
330         new_eip = ldl_kernel(tss_base + 0x20);
331         new_eflags = ldl_kernel(tss_base + 0x24);
332         for(i = 0; i < 8; i++)
333             new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
334         for(i = 0; i < 6; i++)
335             new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
336         new_ldt = lduw_kernel(tss_base + 0x60);
337         new_trap = ldl_kernel(tss_base + 0x64);
338     } else {
339         /* 16 bit */
340         new_cr3 = 0;
341         new_eip = lduw_kernel(tss_base + 0x0e);
342         new_eflags = lduw_kernel(tss_base + 0x10);
343         for(i = 0; i < 8; i++)
344             new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
345         for(i = 0; i < 4; i++)
346             new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
347         new_ldt = lduw_kernel(tss_base + 0x2a);
348         new_segs[R_FS] = 0;
349         new_segs[R_GS] = 0;
350         new_trap = 0;
351     }
352     /* XXX: avoid a compiler warning, see
353      http://support.amd.com/us/Processor_TechDocs/24593.pdf
354      chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
355     (void)new_trap;
356 
357     /* NOTE: we must avoid memory exceptions during the task switch,
358        so we make dummy accesses before */
359     /* XXX: it can still fail in some cases, so a bigger hack is
360        necessary to valid the TLB after having done the accesses */
361 
362     v1 = ldub_kernel(env->tr.base);
363     v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
364     stb_kernel(env->tr.base, v1);
365     stb_kernel(env->tr.base + old_tss_limit_max, v2);
366 
367     /* clear busy bit (it is restartable) */
368     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
369         target_ulong ptr;
370         uint32_t e2;
371         ptr = env->gdt.base + (env->tr.selector & ~7);
372         e2 = ldl_kernel(ptr + 4);
373         e2 &= ~DESC_TSS_BUSY_MASK;
374         stl_kernel(ptr + 4, e2);
375     }
376     old_eflags = compute_eflags();
377     if (source == SWITCH_TSS_IRET)
378         old_eflags &= ~NT_MASK;
379 
380     /* save the current state in the old TSS */
381     if (type & 8) {
382         /* 32 bit */
383         stl_kernel(env->tr.base + 0x20, next_eip);
384         stl_kernel(env->tr.base + 0x24, old_eflags);
385         stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
386         stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
387         stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
388         stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
389         stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
390         stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
391         stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
392         stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
393         for(i = 0; i < 6; i++)
394             stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
395     } else {
396         /* 16 bit */
397         stw_kernel(env->tr.base + 0x0e, next_eip);
398         stw_kernel(env->tr.base + 0x10, old_eflags);
399         stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
400         stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
401         stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
402         stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
403         stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
404         stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
405         stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
406         stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
407         for(i = 0; i < 4; i++)
408             stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
409     }
410 
411     /* now if an exception occurs, it will occurs in the next task
412        context */
413 
414     if (source == SWITCH_TSS_CALL) {
415         stw_kernel(tss_base, env->tr.selector);
416         new_eflags |= NT_MASK;
417     }
418 
419     /* set busy bit */
420     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
421         target_ulong ptr;
422         uint32_t e2;
423         ptr = env->gdt.base + (tss_selector & ~7);
424         e2 = ldl_kernel(ptr + 4);
425         e2 |= DESC_TSS_BUSY_MASK;
426         stl_kernel(ptr + 4, e2);
427     }
428 
429     /* set the new CPU state */
430     /* from this point, any exception which occurs can give problems */
431     env->cr[0] |= CR0_TS_MASK;
432     env->hflags |= HF_TS_MASK;
433     env->tr.selector = tss_selector;
434     env->tr.base = tss_base;
435     env->tr.limit = tss_limit;
436     env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
437 
438     if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
439         cpu_x86_update_cr3(env, new_cr3);
440     }
441 
442     /* load all registers without an exception, then reload them with
443        possible exception */
444     env->eip = new_eip;
445     eflags_mask = TF_MASK | AC_MASK | ID_MASK |
446         IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
447     if (!(type & 8))
448         eflags_mask &= 0xffff;
449     load_eflags(new_eflags, eflags_mask);
450     /* XXX: what to do in 16 bit case ? */
451     EAX = new_regs[0];
452     ECX = new_regs[1];
453     EDX = new_regs[2];
454     EBX = new_regs[3];
455     ESP = new_regs[4];
456     EBP = new_regs[5];
457     ESI = new_regs[6];
458     EDI = new_regs[7];
459     if (new_eflags & VM_MASK) {
460         for(i = 0; i < 6; i++)
461             load_seg_vm(i, new_segs[i]);
462         /* in vm86, CPL is always 3 */
463         cpu_x86_set_cpl(env, 3);
464     } else {
465         /* CPL is set the RPL of CS */
466         cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
467         /* first just selectors as the rest may trigger exceptions */
468         for(i = 0; i < 6; i++)
469             cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
470     }
471 
472     env->ldt.selector = new_ldt & ~4;
473     env->ldt.base = 0;
474     env->ldt.limit = 0;
475     env->ldt.flags = 0;
476 
477     /* load the LDT */
478     if (new_ldt & 4)
479         raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
480 
481     if ((new_ldt & 0xfffc) != 0) {
482         dt = &env->gdt;
483         index = new_ldt & ~7;
484         if ((index + 7) > dt->limit)
485             raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
486         ptr = dt->base + index;
487         e1 = ldl_kernel(ptr);
488         e2 = ldl_kernel(ptr + 4);
489         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
490             raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
491         if (!(e2 & DESC_P_MASK))
492             raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
493         load_seg_cache_raw_dt(&env->ldt, e1, e2);
494     }
495 
496     /* load the segments */
497     if (!(new_eflags & VM_MASK)) {
498         tss_load_seg(R_CS, new_segs[R_CS]);
499         tss_load_seg(R_SS, new_segs[R_SS]);
500         tss_load_seg(R_ES, new_segs[R_ES]);
501         tss_load_seg(R_DS, new_segs[R_DS]);
502         tss_load_seg(R_FS, new_segs[R_FS]);
503         tss_load_seg(R_GS, new_segs[R_GS]);
504     }
505 
506     /* check that EIP is in the CS segment limits */
507     if (new_eip > env->segs[R_CS].limit) {
508         /* XXX: different exception if CALL ? */
509         raise_exception_err(EXCP0D_GPF, 0);
510     }
511 
512 #ifndef CONFIG_USER_ONLY
513     /* reset local breakpoints */
514     if (env->dr[7] & 0x55) {
515         for (i = 0; i < 4; i++) {
516             if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
517                 hw_breakpoint_remove(env, i);
518         }
519         env->dr[7] &= ~0x55;
520     }
521 #endif
522 }
523 
524 /* check if Port I/O is allowed in TSS */
check_io(int addr,int size)525 static inline void check_io(int addr, int size)
526 {
527     int io_offset, val, mask;
528 
529     /* TSS must be a valid 32 bit one */
530     if (!(env->tr.flags & DESC_P_MASK) ||
531         ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
532         env->tr.limit < 103)
533         goto fail;
534     io_offset = lduw_kernel(env->tr.base + 0x66);
535     io_offset += (addr >> 3);
536     /* Note: the check needs two bytes */
537     if ((io_offset + 1) > env->tr.limit)
538         goto fail;
539     val = lduw_kernel(env->tr.base + io_offset);
540     val >>= (addr & 7);
541     mask = (1 << size) - 1;
542     /* all bits must be zero to allow the I/O */
543     if ((val & mask) != 0) {
544     fail:
545         raise_exception_err(EXCP0D_GPF, 0);
546     }
547 }
548 
helper_check_iob(uint32_t t0)549 void helper_check_iob(uint32_t t0)
550 {
551     check_io(t0, 1);
552 }
553 
helper_check_iow(uint32_t t0)554 void helper_check_iow(uint32_t t0)
555 {
556     check_io(t0, 2);
557 }
558 
helper_check_iol(uint32_t t0)559 void helper_check_iol(uint32_t t0)
560 {
561     check_io(t0, 4);
562 }
563 
helper_outb(uint32_t port,uint32_t data)564 void helper_outb(uint32_t port, uint32_t data)
565 {
566     cpu_outb(port, data & 0xff);
567 }
568 
helper_inb(uint32_t port)569 target_ulong helper_inb(uint32_t port)
570 {
571     return cpu_inb(port);
572 }
573 
helper_outw(uint32_t port,uint32_t data)574 void helper_outw(uint32_t port, uint32_t data)
575 {
576     cpu_outw(port, data & 0xffff);
577 }
578 
helper_inw(uint32_t port)579 target_ulong helper_inw(uint32_t port)
580 {
581     return cpu_inw(port);
582 }
583 
helper_outl(uint32_t port,uint32_t data)584 void helper_outl(uint32_t port, uint32_t data)
585 {
586     cpu_outl(port, data);
587 }
588 
helper_inl(uint32_t port)589 target_ulong helper_inl(uint32_t port)
590 {
591     return cpu_inl(port);
592 }
593 
get_sp_mask(unsigned int e2)594 static inline unsigned int get_sp_mask(unsigned int e2)
595 {
596     if (e2 & DESC_B_MASK)
597         return 0xffffffff;
598     else
599         return 0xffff;
600 }
601 
exeption_has_error_code(int intno)602 static int exeption_has_error_code(int intno)
603 {
604         switch(intno) {
605         case 8:
606         case 10:
607         case 11:
608         case 12:
609         case 13:
610         case 14:
611         case 17:
612             return 1;
613         }
614 	return 0;
615 }
616 
617 #ifdef TARGET_X86_64
618 #define SET_ESP(val, sp_mask)\
619 do {\
620     if ((sp_mask) == 0xffff)\
621         ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
622     else if ((sp_mask) == 0xffffffffLL)\
623         ESP = (uint32_t)(val);\
624     else\
625         ESP = (val);\
626 } while (0)
627 #else
628 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
629 #endif
630 
631 /* in 64-bit machines, this can overflow. So this segment addition macro
632  * can be used to trim the value to 32-bit whenever needed */
633 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
634 
635 /* XXX: add a is_user flag to have proper security support */
636 #define PUSHW(ssp, sp, sp_mask, val)\
637 {\
638     sp -= 2;\
639     stw_kernel((ssp) + (sp & (sp_mask)), (val));\
640 }
641 
642 #define PUSHL(ssp, sp, sp_mask, val)\
643 {\
644     sp -= 4;\
645     stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
646 }
647 
648 #define POPW(ssp, sp, sp_mask, val)\
649 {\
650     val = lduw_kernel((ssp) + (sp & (sp_mask)));\
651     sp += 2;\
652 }
653 
654 #define POPL(ssp, sp, sp_mask, val)\
655 {\
656     val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
657     sp += 4;\
658 }
659 
660 /* protected mode interrupt */
do_interrupt_protected(int intno,int is_int,int error_code,unsigned int next_eip,int is_hw)661 static void do_interrupt_protected(int intno, int is_int, int error_code,
662                                    unsigned int next_eip, int is_hw)
663 {
664     SegmentCache *dt;
665     target_ulong ptr, ssp;
666     int type, dpl, selector, ss_dpl, cpl;
667     int has_error_code, new_stack, shift;
668     uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
669     uint32_t old_eip, sp_mask;
670 
671     has_error_code = 0;
672     if (!is_int && !is_hw)
673         has_error_code = exeption_has_error_code(intno);
674     if (is_int)
675         old_eip = next_eip;
676     else
677         old_eip = env->eip;
678 
679     dt = &env->idt;
680     if (intno * 8 + 7 > dt->limit)
681         raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
682     ptr = dt->base + intno * 8;
683     e1 = ldl_kernel(ptr);
684     e2 = ldl_kernel(ptr + 4);
685     /* check gate type */
686     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
687     switch(type) {
688     case 5: /* task gate */
689         /* must do that check here to return the correct error code */
690         if (!(e2 & DESC_P_MASK))
691             raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
692         switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
693         if (has_error_code) {
694             int type;
695             uint32_t mask;
696             /* push the error code */
697             type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
698             shift = type >> 3;
699             if (env->segs[R_SS].flags & DESC_B_MASK)
700                 mask = 0xffffffff;
701             else
702                 mask = 0xffff;
703             esp = (ESP - (2 << shift)) & mask;
704             ssp = env->segs[R_SS].base + esp;
705             if (shift)
706                 stl_kernel(ssp, error_code);
707             else
708                 stw_kernel(ssp, error_code);
709             SET_ESP(esp, mask);
710         }
711         return;
712     case 6: /* 286 interrupt gate */
713     case 7: /* 286 trap gate */
714     case 14: /* 386 interrupt gate */
715     case 15: /* 386 trap gate */
716         break;
717     default:
718         raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
719         break;
720     }
721     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
722     cpl = env->hflags & HF_CPL_MASK;
723     /* check privilege if software int */
724     if (is_int && dpl < cpl)
725         raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
726     /* check valid bit */
727     if (!(e2 & DESC_P_MASK))
728         raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
729     selector = e1 >> 16;
730     offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
731     if ((selector & 0xfffc) == 0)
732         raise_exception_err(EXCP0D_GPF, 0);
733 
734     if (load_segment(&e1, &e2, selector) != 0)
735         raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
736     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
737         raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
738     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
739     if (dpl > cpl)
740         raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
741     if (!(e2 & DESC_P_MASK))
742         raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
743     if (!(e2 & DESC_C_MASK) && dpl < cpl) {
744         /* to inner privilege */
745         get_ss_esp_from_tss(&ss, &esp, dpl);
746         if ((ss & 0xfffc) == 0)
747             raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
748         if ((ss & 3) != dpl)
749             raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
750         if (load_segment(&ss_e1, &ss_e2, ss) != 0)
751             raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
752         ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
753         if (ss_dpl != dpl)
754             raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
755         if (!(ss_e2 & DESC_S_MASK) ||
756             (ss_e2 & DESC_CS_MASK) ||
757             !(ss_e2 & DESC_W_MASK))
758             raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
759         if (!(ss_e2 & DESC_P_MASK))
760             raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
761         new_stack = 1;
762         sp_mask = get_sp_mask(ss_e2);
763         ssp = get_seg_base(ss_e1, ss_e2);
764     } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
765         /* to same privilege */
766         if (env->eflags & VM_MASK)
767             raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
768         new_stack = 0;
769         sp_mask = get_sp_mask(env->segs[R_SS].flags);
770         ssp = env->segs[R_SS].base;
771         esp = ESP;
772         dpl = cpl;
773     } else {
774         raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
775         new_stack = 0; /* avoid warning */
776         sp_mask = 0; /* avoid warning */
777         ssp = 0; /* avoid warning */
778         esp = 0; /* avoid warning */
779     }
780 
781     shift = type >> 3;
782 
783 #if 0
784     /* XXX: check that enough room is available */
785     push_size = 6 + (new_stack << 2) + (has_error_code << 1);
786     if (env->eflags & VM_MASK)
787         push_size += 8;
788     push_size <<= shift;
789 #endif
790     if (shift == 1) {
791         if (new_stack) {
792             if (env->eflags & VM_MASK) {
793                 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
794                 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
795                 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
796                 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
797             }
798             PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
799             PUSHL(ssp, esp, sp_mask, ESP);
800         }
801         PUSHL(ssp, esp, sp_mask, compute_eflags());
802         PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
803         PUSHL(ssp, esp, sp_mask, old_eip);
804         if (has_error_code) {
805             PUSHL(ssp, esp, sp_mask, error_code);
806         }
807     } else {
808         if (new_stack) {
809             if (env->eflags & VM_MASK) {
810                 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
811                 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
812                 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
813                 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
814             }
815             PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
816             PUSHW(ssp, esp, sp_mask, ESP);
817         }
818         PUSHW(ssp, esp, sp_mask, compute_eflags());
819         PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
820         PUSHW(ssp, esp, sp_mask, old_eip);
821         if (has_error_code) {
822             PUSHW(ssp, esp, sp_mask, error_code);
823         }
824     }
825 
826     if (new_stack) {
827         if (env->eflags & VM_MASK) {
828             cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
829             cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
830             cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
831             cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
832         }
833         ss = (ss & ~3) | dpl;
834         cpu_x86_load_seg_cache(env, R_SS, ss,
835                                ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
836     }
837     SET_ESP(esp, sp_mask);
838 
839     selector = (selector & ~3) | dpl;
840     cpu_x86_load_seg_cache(env, R_CS, selector,
841                    get_seg_base(e1, e2),
842                    get_seg_limit(e1, e2),
843                    e2);
844     cpu_x86_set_cpl(env, dpl);
845     env->eip = offset;
846 
847     /* interrupt gate clear IF mask */
848     if ((type & 1) == 0) {
849         env->eflags &= ~IF_MASK;
850     }
851     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
852 }
853 
854 #ifdef TARGET_X86_64
855 
856 #define PUSHQ(sp, val)\
857 {\
858     sp -= 8;\
859     stq_kernel(sp, (val));\
860 }
861 
862 #define POPQ(sp, val)\
863 {\
864     val = ldq_kernel(sp);\
865     sp += 8;\
866 }
867 
get_rsp_from_tss(int level)868 static inline target_ulong get_rsp_from_tss(int level)
869 {
870     int index;
871 
872 #if 0
873     printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
874            env->tr.base, env->tr.limit);
875 #endif
876 
877     if (!(env->tr.flags & DESC_P_MASK))
878         cpu_abort(env, "invalid tss");
879     index = 8 * level + 4;
880     if ((index + 7) > env->tr.limit)
881         raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
882     return ldq_kernel(env->tr.base + index);
883 }
884 
885 /* 64 bit interrupt */
do_interrupt64(int intno,int is_int,int error_code,target_ulong next_eip,int is_hw)886 static void do_interrupt64(int intno, int is_int, int error_code,
887                            target_ulong next_eip, int is_hw)
888 {
889     SegmentCache *dt;
890     target_ulong ptr;
891     int type, dpl, selector, cpl, ist;
892     int has_error_code, new_stack;
893     uint32_t e1, e2, e3, ss;
894     target_ulong old_eip, esp, offset;
895 
896     has_error_code = 0;
897     if (!is_int && !is_hw)
898         has_error_code = exeption_has_error_code(intno);
899     if (is_int)
900         old_eip = next_eip;
901     else
902         old_eip = env->eip;
903 
904     dt = &env->idt;
905     if (intno * 16 + 15 > dt->limit)
906         raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
907     ptr = dt->base + intno * 16;
908     e1 = ldl_kernel(ptr);
909     e2 = ldl_kernel(ptr + 4);
910     e3 = ldl_kernel(ptr + 8);
911     /* check gate type */
912     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
913     switch(type) {
914     case 14: /* 386 interrupt gate */
915     case 15: /* 386 trap gate */
916         break;
917     default:
918         raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
919         break;
920     }
921     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
922     cpl = env->hflags & HF_CPL_MASK;
923     /* check privilege if software int */
924     if (is_int && dpl < cpl)
925         raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
926     /* check valid bit */
927     if (!(e2 & DESC_P_MASK))
928         raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
929     selector = e1 >> 16;
930     offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
931     ist = e2 & 7;
932     if ((selector & 0xfffc) == 0)
933         raise_exception_err(EXCP0D_GPF, 0);
934 
935     if (load_segment(&e1, &e2, selector) != 0)
936         raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
937     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
938         raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
939     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
940     if (dpl > cpl)
941         raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
942     if (!(e2 & DESC_P_MASK))
943         raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
944     if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
945         raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
946     if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
947         /* to inner privilege */
948         if (ist != 0)
949             esp = get_rsp_from_tss(ist + 3);
950         else
951             esp = get_rsp_from_tss(dpl);
952         esp &= ~0xfLL; /* align stack */
953         ss = 0;
954         new_stack = 1;
955     } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
956         /* to same privilege */
957         if (env->eflags & VM_MASK)
958             raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
959         new_stack = 0;
960         if (ist != 0)
961             esp = get_rsp_from_tss(ist + 3);
962         else
963             esp = ESP;
964         esp &= ~0xfLL; /* align stack */
965         dpl = cpl;
966     } else {
967         raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
968         new_stack = 0; /* avoid warning */
969         esp = 0; /* avoid warning */
970     }
971 
972     PUSHQ(esp, env->segs[R_SS].selector);
973     PUSHQ(esp, ESP);
974     PUSHQ(esp, compute_eflags());
975     PUSHQ(esp, env->segs[R_CS].selector);
976     PUSHQ(esp, old_eip);
977     if (has_error_code) {
978         PUSHQ(esp, error_code);
979     }
980 
981     if (new_stack) {
982         ss = 0 | dpl;
983         cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
984     }
985     ESP = esp;
986 
987     selector = (selector & ~3) | dpl;
988     cpu_x86_load_seg_cache(env, R_CS, selector,
989                    get_seg_base(e1, e2),
990                    get_seg_limit(e1, e2),
991                    e2);
992     cpu_x86_set_cpl(env, dpl);
993     env->eip = offset;
994 
995     /* interrupt gate clear IF mask */
996     if ((type & 1) == 0) {
997         env->eflags &= ~IF_MASK;
998     }
999     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1000 }
1001 #endif
1002 
1003 #ifdef TARGET_X86_64
1004 #if defined(CONFIG_USER_ONLY)
helper_syscall(int next_eip_addend)1005 void helper_syscall(int next_eip_addend)
1006 {
1007     env->exception_index = EXCP_SYSCALL;
1008     env->exception_next_eip = env->eip + next_eip_addend;
1009     cpu_loop_exit();
1010 }
1011 #else
helper_syscall(int next_eip_addend)1012 void helper_syscall(int next_eip_addend)
1013 {
1014     int selector;
1015 
1016     if (!(env->efer & MSR_EFER_SCE)) {
1017         raise_exception_err(EXCP06_ILLOP, 0);
1018     }
1019     selector = (env->star >> 32) & 0xffff;
1020     if (env->hflags & HF_LMA_MASK) {
1021         int code64;
1022 
1023         ECX = env->eip + next_eip_addend;
1024         env->regs[11] = compute_eflags();
1025 
1026         code64 = env->hflags & HF_CS64_MASK;
1027 
1028         cpu_x86_set_cpl(env, 0);
1029         cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1030                            0, 0xffffffff,
1031                                DESC_G_MASK | DESC_P_MASK |
1032                                DESC_S_MASK |
1033                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1034         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1035                                0, 0xffffffff,
1036                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1037                                DESC_S_MASK |
1038                                DESC_W_MASK | DESC_A_MASK);
1039         env->eflags &= ~env->fmask;
1040         load_eflags(env->eflags, 0);
1041         if (code64)
1042             env->eip = env->lstar;
1043         else
1044             env->eip = env->cstar;
1045     } else {
1046         ECX = (uint32_t)(env->eip + next_eip_addend);
1047 
1048         cpu_x86_set_cpl(env, 0);
1049         cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1050                            0, 0xffffffff,
1051                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1052                                DESC_S_MASK |
1053                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1054         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1055                                0, 0xffffffff,
1056                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1057                                DESC_S_MASK |
1058                                DESC_W_MASK | DESC_A_MASK);
1059         env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1060         env->eip = (uint32_t)env->star;
1061     }
1062 }
1063 #endif
1064 #endif
1065 
1066 #ifdef TARGET_X86_64
helper_sysret(int dflag)1067 void helper_sysret(int dflag)
1068 {
1069     int cpl, selector;
1070 
1071     if (!(env->efer & MSR_EFER_SCE)) {
1072         raise_exception_err(EXCP06_ILLOP, 0);
1073     }
1074     cpl = env->hflags & HF_CPL_MASK;
1075     if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1076         raise_exception_err(EXCP0D_GPF, 0);
1077     }
1078     selector = (env->star >> 48) & 0xffff;
1079     if (env->hflags & HF_LMA_MASK) {
1080         if (dflag == 2) {
1081             cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1082                                    0, 0xffffffff,
1083                                    DESC_G_MASK | DESC_P_MASK |
1084                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1085                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1086                                    DESC_L_MASK);
1087             env->eip = ECX;
1088         } else {
1089             cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1090                                    0, 0xffffffff,
1091                                    DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1092                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1093                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1094             env->eip = (uint32_t)ECX;
1095         }
1096         cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1097                                0, 0xffffffff,
1098                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1099                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1100                                DESC_W_MASK | DESC_A_MASK);
1101         load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1102                     IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1103         cpu_x86_set_cpl(env, 3);
1104     } else {
1105         cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1106                                0, 0xffffffff,
1107                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1108                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1109                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1110         env->eip = (uint32_t)ECX;
1111         cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1112                                0, 0xffffffff,
1113                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1114                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1115                                DESC_W_MASK | DESC_A_MASK);
1116         env->eflags |= IF_MASK;
1117         cpu_x86_set_cpl(env, 3);
1118     }
1119 }
1120 #endif
1121 
1122 /* real mode interrupt */
do_interrupt_real(int intno,int is_int,int error_code,unsigned int next_eip)1123 static void do_interrupt_real(int intno, int is_int, int error_code,
1124                               unsigned int next_eip)
1125 {
1126     SegmentCache *dt;
1127     target_ulong ptr, ssp;
1128     int selector;
1129     uint32_t offset, esp;
1130     uint32_t old_cs, old_eip;
1131 
1132     /* real mode (simpler !) */
1133     dt = &env->idt;
1134     if (intno * 4 + 3 > dt->limit)
1135         raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1136     ptr = dt->base + intno * 4;
1137     offset = lduw_kernel(ptr);
1138     selector = lduw_kernel(ptr + 2);
1139     esp = ESP;
1140     ssp = env->segs[R_SS].base;
1141     if (is_int)
1142         old_eip = next_eip;
1143     else
1144         old_eip = env->eip;
1145     old_cs = env->segs[R_CS].selector;
1146     /* XXX: use SS segment size ? */
1147     PUSHW(ssp, esp, 0xffff, compute_eflags());
1148     PUSHW(ssp, esp, 0xffff, old_cs);
1149     PUSHW(ssp, esp, 0xffff, old_eip);
1150 
1151     /* update processor state */
1152     ESP = (ESP & ~0xffff) | (esp & 0xffff);
1153     env->eip = offset;
1154     env->segs[R_CS].selector = selector;
1155     env->segs[R_CS].base = (selector << 4);
1156     env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1157 }
1158 
1159 /* fake user mode interrupt */
do_interrupt_user(int intno,int is_int,int error_code,target_ulong next_eip)1160 void do_interrupt_user(int intno, int is_int, int error_code,
1161                        target_ulong next_eip)
1162 {
1163     SegmentCache *dt;
1164     target_ulong ptr;
1165     int dpl, cpl, shift;
1166     uint32_t e2;
1167 
1168     dt = &env->idt;
1169     if (env->hflags & HF_LMA_MASK) {
1170         shift = 4;
1171     } else {
1172         shift = 3;
1173     }
1174     ptr = dt->base + (intno << shift);
1175     e2 = ldl_kernel(ptr + 4);
1176 
1177     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1178     cpl = env->hflags & HF_CPL_MASK;
1179     /* check privilege if software int */
1180     if (is_int && dpl < cpl)
1181         raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1182 
1183     /* Since we emulate only user space, we cannot do more than
1184        exiting the emulation with the suitable exception and error
1185        code */
1186     if (is_int)
1187         EIP = next_eip;
1188 }
1189 
1190 #if !defined(CONFIG_USER_ONLY)
handle_even_inj(int intno,int is_int,int error_code,int is_hw,int rm)1191 static void handle_even_inj(int intno, int is_int, int error_code,
1192 		int is_hw, int rm)
1193 {
1194     uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1195     if (!(event_inj & SVM_EVTINJ_VALID)) {
1196 	    int type;
1197 	    if (is_int)
1198 		    type = SVM_EVTINJ_TYPE_SOFT;
1199 	    else
1200 		    type = SVM_EVTINJ_TYPE_EXEPT;
1201 	    event_inj = intno | type | SVM_EVTINJ_VALID;
1202 	    if (!rm && exeption_has_error_code(intno)) {
1203 		    event_inj |= SVM_EVTINJ_VALID_ERR;
1204 		    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1205 	    }
1206 	    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1207     }
1208 }
1209 #endif
1210 
1211 /*
1212  * Begin execution of an interruption. is_int is TRUE if coming from
1213  * the int instruction. next_eip is the EIP value AFTER the interrupt
1214  * instruction. It is only relevant if is_int is TRUE.
1215  */
do_interrupt(int intno,int is_int,int error_code,target_ulong next_eip,int is_hw)1216 void do_interrupt(int intno, int is_int, int error_code,
1217                   target_ulong next_eip, int is_hw)
1218 {
1219     if (qemu_loglevel_mask(CPU_LOG_INT)) {
1220         if ((env->cr[0] & CR0_PE_MASK)) {
1221             static int count;
1222             qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1223                     count, intno, error_code, is_int,
1224                     env->hflags & HF_CPL_MASK,
1225                     env->segs[R_CS].selector, EIP,
1226                     (int)env->segs[R_CS].base + EIP,
1227                     env->segs[R_SS].selector, ESP);
1228             if (intno == 0x0e) {
1229                 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1230             } else {
1231                 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1232             }
1233             qemu_log("\n");
1234             log_cpu_state(env, X86_DUMP_CCOP);
1235 #if 0
1236             {
1237                 int i;
1238                 target_ulong ptr;
1239                 qemu_log("       code=");
1240                 ptr = env->segs[R_CS].base + env->eip;
1241                 for(i = 0; i < 16; i++) {
1242                     qemu_log(" %02x", ldub(ptr + i));
1243                 }
1244                 qemu_log("\n");
1245             }
1246 #endif
1247             count++;
1248         }
1249     }
1250     if (env->cr[0] & CR0_PE_MASK) {
1251 #if !defined(CONFIG_USER_ONLY)
1252         if (env->hflags & HF_SVMI_MASK)
1253             handle_even_inj(intno, is_int, error_code, is_hw, 0);
1254 #endif
1255 #ifdef TARGET_X86_64
1256         if (env->hflags & HF_LMA_MASK) {
1257             do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1258         } else
1259 #endif
1260         {
1261             do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1262         }
1263     } else {
1264 #if !defined(CONFIG_USER_ONLY)
1265         if (env->hflags & HF_SVMI_MASK)
1266             handle_even_inj(intno, is_int, error_code, is_hw, 1);
1267 #endif
1268         do_interrupt_real(intno, is_int, error_code, next_eip);
1269     }
1270 
1271 #if !defined(CONFIG_USER_ONLY)
1272     if (env->hflags & HF_SVMI_MASK) {
1273 	    uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1274 	    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1275     }
1276 #endif
1277 }
1278 
1279 /* This should come from sysemu.h - if we could include it here... */
1280 void qemu_system_reset_request(void);
1281 
1282 /*
1283  * Check nested exceptions and change to double or triple fault if
1284  * needed. It should only be called, if this is not an interrupt.
1285  * Returns the new exception number.
1286  */
check_exception(int intno,int * error_code)1287 static int check_exception(int intno, int *error_code)
1288 {
1289     int first_contributory = env->old_exception == 0 ||
1290                               (env->old_exception >= 10 &&
1291                                env->old_exception <= 13);
1292     int second_contributory = intno == 0 ||
1293                                (intno >= 10 && intno <= 13);
1294 
1295     qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1296                 env->old_exception, intno);
1297 
1298 #if !defined(CONFIG_USER_ONLY)
1299     if (env->old_exception == EXCP08_DBLE) {
1300         if (env->hflags & HF_SVMI_MASK)
1301             helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1302 
1303         qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1304 
1305         qemu_system_reset_request();
1306         return EXCP_HLT;
1307     }
1308 #endif
1309 
1310     if ((first_contributory && second_contributory)
1311         || (env->old_exception == EXCP0E_PAGE &&
1312             (second_contributory || (intno == EXCP0E_PAGE)))) {
1313         intno = EXCP08_DBLE;
1314         *error_code = 0;
1315     }
1316 
1317     if (second_contributory || (intno == EXCP0E_PAGE) ||
1318         (intno == EXCP08_DBLE))
1319         env->old_exception = intno;
1320 
1321     return intno;
1322 }
1323 
1324 /*
1325  * Signal an interruption. It is executed in the main CPU loop.
1326  * is_int is TRUE if coming from the int instruction. next_eip is the
1327  * EIP value AFTER the interrupt instruction. It is only relevant if
1328  * is_int is TRUE.
1329  */
raise_interrupt(int intno,int is_int,int error_code,int next_eip_addend)1330 static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1331                                           int next_eip_addend)
1332 {
1333     if (!is_int) {
1334         helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1335         intno = check_exception(intno, &error_code);
1336     } else {
1337         helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1338     }
1339 
1340     env->exception_index = intno;
1341     env->error_code = error_code;
1342     env->exception_is_int = is_int;
1343     env->exception_next_eip = env->eip + next_eip_addend;
1344     cpu_loop_exit();
1345 }
1346 
1347 /* shortcuts to generate exceptions */
1348 
raise_exception_err(int exception_index,int error_code)1349 void raise_exception_err(int exception_index, int error_code)
1350 {
1351     raise_interrupt(exception_index, 0, error_code, 0);
1352 }
1353 
raise_exception(int exception_index)1354 void raise_exception(int exception_index)
1355 {
1356     raise_interrupt(exception_index, 0, 0, 0);
1357 }
1358 
raise_exception_env(int exception_index,CPUState * nenv)1359 void raise_exception_env(int exception_index, CPUState *nenv)
1360 {
1361     env = nenv;
1362     raise_exception(exception_index);
1363 }
1364 /* SMM support */
1365 
1366 #if defined(CONFIG_USER_ONLY)
1367 
do_smm_enter(void)1368 void do_smm_enter(void)
1369 {
1370 }
1371 
helper_rsm(void)1372 void helper_rsm(void)
1373 {
1374 }
1375 
1376 #else
1377 
1378 #ifdef TARGET_X86_64
1379 #define SMM_REVISION_ID 0x00020064
1380 #else
1381 #define SMM_REVISION_ID 0x00020000
1382 #endif
1383 
do_smm_enter(void)1384 void do_smm_enter(void)
1385 {
1386     target_ulong sm_state;
1387     SegmentCache *dt;
1388     int i, offset;
1389 
1390     qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1391     log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1392 
1393     env->hflags |= HF_SMM_MASK;
1394     cpu_smm_update(env);
1395 
1396     sm_state = env->smbase + 0x8000;
1397 
1398 #ifdef TARGET_X86_64
1399     for(i = 0; i < 6; i++) {
1400         dt = &env->segs[i];
1401         offset = 0x7e00 + i * 16;
1402         stw_phys(sm_state + offset, dt->selector);
1403         stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1404         stl_phys(sm_state + offset + 4, dt->limit);
1405         stq_phys(sm_state + offset + 8, dt->base);
1406     }
1407 
1408     stq_phys(sm_state + 0x7e68, env->gdt.base);
1409     stl_phys(sm_state + 0x7e64, env->gdt.limit);
1410 
1411     stw_phys(sm_state + 0x7e70, env->ldt.selector);
1412     stq_phys(sm_state + 0x7e78, env->ldt.base);
1413     stl_phys(sm_state + 0x7e74, env->ldt.limit);
1414     stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1415 
1416     stq_phys(sm_state + 0x7e88, env->idt.base);
1417     stl_phys(sm_state + 0x7e84, env->idt.limit);
1418 
1419     stw_phys(sm_state + 0x7e90, env->tr.selector);
1420     stq_phys(sm_state + 0x7e98, env->tr.base);
1421     stl_phys(sm_state + 0x7e94, env->tr.limit);
1422     stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1423 
1424     stq_phys(sm_state + 0x7ed0, env->efer);
1425 
1426     stq_phys(sm_state + 0x7ff8, EAX);
1427     stq_phys(sm_state + 0x7ff0, ECX);
1428     stq_phys(sm_state + 0x7fe8, EDX);
1429     stq_phys(sm_state + 0x7fe0, EBX);
1430     stq_phys(sm_state + 0x7fd8, ESP);
1431     stq_phys(sm_state + 0x7fd0, EBP);
1432     stq_phys(sm_state + 0x7fc8, ESI);
1433     stq_phys(sm_state + 0x7fc0, EDI);
1434     for(i = 8; i < 16; i++)
1435         stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1436     stq_phys(sm_state + 0x7f78, env->eip);
1437     stl_phys(sm_state + 0x7f70, compute_eflags());
1438     stl_phys(sm_state + 0x7f68, env->dr[6]);
1439     stl_phys(sm_state + 0x7f60, env->dr[7]);
1440 
1441     stl_phys(sm_state + 0x7f48, env->cr[4]);
1442     stl_phys(sm_state + 0x7f50, env->cr[3]);
1443     stl_phys(sm_state + 0x7f58, env->cr[0]);
1444 
1445     stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1446     stl_phys(sm_state + 0x7f00, env->smbase);
1447 #else
1448     stl_phys(sm_state + 0x7ffc, env->cr[0]);
1449     stl_phys(sm_state + 0x7ff8, env->cr[3]);
1450     stl_phys(sm_state + 0x7ff4, compute_eflags());
1451     stl_phys(sm_state + 0x7ff0, env->eip);
1452     stl_phys(sm_state + 0x7fec, EDI);
1453     stl_phys(sm_state + 0x7fe8, ESI);
1454     stl_phys(sm_state + 0x7fe4, EBP);
1455     stl_phys(sm_state + 0x7fe0, ESP);
1456     stl_phys(sm_state + 0x7fdc, EBX);
1457     stl_phys(sm_state + 0x7fd8, EDX);
1458     stl_phys(sm_state + 0x7fd4, ECX);
1459     stl_phys(sm_state + 0x7fd0, EAX);
1460     stl_phys(sm_state + 0x7fcc, env->dr[6]);
1461     stl_phys(sm_state + 0x7fc8, env->dr[7]);
1462 
1463     stl_phys(sm_state + 0x7fc4, env->tr.selector);
1464     stl_phys(sm_state + 0x7f64, env->tr.base);
1465     stl_phys(sm_state + 0x7f60, env->tr.limit);
1466     stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1467 
1468     stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1469     stl_phys(sm_state + 0x7f80, env->ldt.base);
1470     stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1471     stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1472 
1473     stl_phys(sm_state + 0x7f74, env->gdt.base);
1474     stl_phys(sm_state + 0x7f70, env->gdt.limit);
1475 
1476     stl_phys(sm_state + 0x7f58, env->idt.base);
1477     stl_phys(sm_state + 0x7f54, env->idt.limit);
1478 
1479     for(i = 0; i < 6; i++) {
1480         dt = &env->segs[i];
1481         if (i < 3)
1482             offset = 0x7f84 + i * 12;
1483         else
1484             offset = 0x7f2c + (i - 3) * 12;
1485         stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1486         stl_phys(sm_state + offset + 8, dt->base);
1487         stl_phys(sm_state + offset + 4, dt->limit);
1488         stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1489     }
1490     stl_phys(sm_state + 0x7f14, env->cr[4]);
1491 
1492     stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1493     stl_phys(sm_state + 0x7ef8, env->smbase);
1494 #endif
1495     /* init SMM cpu state */
1496 
1497 #ifdef TARGET_X86_64
1498     cpu_load_efer(env, 0);
1499 #endif
1500     load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1501     env->eip = 0x00008000;
1502     cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1503                            0xffffffff, 0);
1504     cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1505     cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1506     cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1507     cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1508     cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1509 
1510     cpu_x86_update_cr0(env,
1511                        env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1512     cpu_x86_update_cr4(env, 0);
1513     env->dr[7] = 0x00000400;
1514     CC_OP = CC_OP_EFLAGS;
1515 }
1516 
helper_rsm(void)1517 void helper_rsm(void)
1518 {
1519     target_ulong sm_state;
1520     int i, offset;
1521     uint32_t val;
1522 
1523     sm_state = env->smbase + 0x8000;
1524 #ifdef TARGET_X86_64
1525     cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1526 
1527     for(i = 0; i < 6; i++) {
1528         offset = 0x7e00 + i * 16;
1529         cpu_x86_load_seg_cache(env, i,
1530                                lduw_phys(sm_state + offset),
1531                                ldq_phys(sm_state + offset + 8),
1532                                ldl_phys(sm_state + offset + 4),
1533                                (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1534     }
1535 
1536     env->gdt.base = ldq_phys(sm_state + 0x7e68);
1537     env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1538 
1539     env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1540     env->ldt.base = ldq_phys(sm_state + 0x7e78);
1541     env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1542     env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1543 
1544     env->idt.base = ldq_phys(sm_state + 0x7e88);
1545     env->idt.limit = ldl_phys(sm_state + 0x7e84);
1546 
1547     env->tr.selector = lduw_phys(sm_state + 0x7e90);
1548     env->tr.base = ldq_phys(sm_state + 0x7e98);
1549     env->tr.limit = ldl_phys(sm_state + 0x7e94);
1550     env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1551 
1552     EAX = ldq_phys(sm_state + 0x7ff8);
1553     ECX = ldq_phys(sm_state + 0x7ff0);
1554     EDX = ldq_phys(sm_state + 0x7fe8);
1555     EBX = ldq_phys(sm_state + 0x7fe0);
1556     ESP = ldq_phys(sm_state + 0x7fd8);
1557     EBP = ldq_phys(sm_state + 0x7fd0);
1558     ESI = ldq_phys(sm_state + 0x7fc8);
1559     EDI = ldq_phys(sm_state + 0x7fc0);
1560     for(i = 8; i < 16; i++)
1561         env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1562     env->eip = ldq_phys(sm_state + 0x7f78);
1563     load_eflags(ldl_phys(sm_state + 0x7f70),
1564                 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1565     env->dr[6] = ldl_phys(sm_state + 0x7f68);
1566     env->dr[7] = ldl_phys(sm_state + 0x7f60);
1567 
1568     cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1569     cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1570     cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1571 
1572     val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1573     if (val & 0x20000) {
1574         env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1575     }
1576 #else
1577     cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1578     cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1579     load_eflags(ldl_phys(sm_state + 0x7ff4),
1580                 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1581     env->eip = ldl_phys(sm_state + 0x7ff0);
1582     EDI = ldl_phys(sm_state + 0x7fec);
1583     ESI = ldl_phys(sm_state + 0x7fe8);
1584     EBP = ldl_phys(sm_state + 0x7fe4);
1585     ESP = ldl_phys(sm_state + 0x7fe0);
1586     EBX = ldl_phys(sm_state + 0x7fdc);
1587     EDX = ldl_phys(sm_state + 0x7fd8);
1588     ECX = ldl_phys(sm_state + 0x7fd4);
1589     EAX = ldl_phys(sm_state + 0x7fd0);
1590     env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1591     env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1592 
1593     env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1594     env->tr.base = ldl_phys(sm_state + 0x7f64);
1595     env->tr.limit = ldl_phys(sm_state + 0x7f60);
1596     env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1597 
1598     env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1599     env->ldt.base = ldl_phys(sm_state + 0x7f80);
1600     env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1601     env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1602 
1603     env->gdt.base = ldl_phys(sm_state + 0x7f74);
1604     env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1605 
1606     env->idt.base = ldl_phys(sm_state + 0x7f58);
1607     env->idt.limit = ldl_phys(sm_state + 0x7f54);
1608 
1609     for(i = 0; i < 6; i++) {
1610         if (i < 3)
1611             offset = 0x7f84 + i * 12;
1612         else
1613             offset = 0x7f2c + (i - 3) * 12;
1614         cpu_x86_load_seg_cache(env, i,
1615                                ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1616                                ldl_phys(sm_state + offset + 8),
1617                                ldl_phys(sm_state + offset + 4),
1618                                (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1619     }
1620     cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1621 
1622     val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1623     if (val & 0x20000) {
1624         env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1625     }
1626 #endif
1627     CC_OP = CC_OP_EFLAGS;
1628     env->hflags &= ~HF_SMM_MASK;
1629     cpu_smm_update(env);
1630 
1631     qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1632     log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1633 }
1634 
1635 #endif /* !CONFIG_USER_ONLY */
1636 
1637 
1638 /* division, flags are undefined */
1639 
helper_divb_AL(target_ulong t0)1640 void helper_divb_AL(target_ulong t0)
1641 {
1642     unsigned int num, den, q, r;
1643 
1644     num = (EAX & 0xffff);
1645     den = (t0 & 0xff);
1646     if (den == 0) {
1647         raise_exception(EXCP00_DIVZ);
1648     }
1649     q = (num / den);
1650     if (q > 0xff)
1651         raise_exception(EXCP00_DIVZ);
1652     q &= 0xff;
1653     r = (num % den) & 0xff;
1654     EAX = (EAX & ~0xffff) | (r << 8) | q;
1655 }
1656 
helper_idivb_AL(target_ulong t0)1657 void helper_idivb_AL(target_ulong t0)
1658 {
1659     int num, den, q, r;
1660 
1661     num = (int16_t)EAX;
1662     den = (int8_t)t0;
1663     if (den == 0) {
1664         raise_exception(EXCP00_DIVZ);
1665     }
1666     q = (num / den);
1667     if (q != (int8_t)q)
1668         raise_exception(EXCP00_DIVZ);
1669     q &= 0xff;
1670     r = (num % den) & 0xff;
1671     EAX = (EAX & ~0xffff) | (r << 8) | q;
1672 }
1673 
helper_divw_AX(target_ulong t0)1674 void helper_divw_AX(target_ulong t0)
1675 {
1676     unsigned int num, den, q, r;
1677 
1678     num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1679     den = (t0 & 0xffff);
1680     if (den == 0) {
1681         raise_exception(EXCP00_DIVZ);
1682     }
1683     q = (num / den);
1684     if (q > 0xffff)
1685         raise_exception(EXCP00_DIVZ);
1686     q &= 0xffff;
1687     r = (num % den) & 0xffff;
1688     EAX = (EAX & ~0xffff) | q;
1689     EDX = (EDX & ~0xffff) | r;
1690 }
1691 
helper_idivw_AX(target_ulong t0)1692 void helper_idivw_AX(target_ulong t0)
1693 {
1694     int num, den, q, r;
1695 
1696     num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1697     den = (int16_t)t0;
1698     if (den == 0) {
1699         raise_exception(EXCP00_DIVZ);
1700     }
1701     q = (num / den);
1702     if (q != (int16_t)q)
1703         raise_exception(EXCP00_DIVZ);
1704     q &= 0xffff;
1705     r = (num % den) & 0xffff;
1706     EAX = (EAX & ~0xffff) | q;
1707     EDX = (EDX & ~0xffff) | r;
1708 }
1709 
helper_divl_EAX(target_ulong t0)1710 void helper_divl_EAX(target_ulong t0)
1711 {
1712     unsigned int den, r;
1713     uint64_t num, q;
1714 
1715     num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1716     den = t0;
1717     if (den == 0) {
1718         raise_exception(EXCP00_DIVZ);
1719     }
1720     q = (num / den);
1721     r = (num % den);
1722     if (q > 0xffffffff)
1723         raise_exception(EXCP00_DIVZ);
1724     EAX = (uint32_t)q;
1725     EDX = (uint32_t)r;
1726 }
1727 
helper_idivl_EAX(target_ulong t0)1728 void helper_idivl_EAX(target_ulong t0)
1729 {
1730     int den, r;
1731     int64_t num, q;
1732 
1733     num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1734     den = t0;
1735     if (den == 0) {
1736         raise_exception(EXCP00_DIVZ);
1737     }
1738     q = (num / den);
1739     r = (num % den);
1740     if (q != (int32_t)q)
1741         raise_exception(EXCP00_DIVZ);
1742     EAX = (uint32_t)q;
1743     EDX = (uint32_t)r;
1744 }
1745 
1746 /* bcd */
1747 
1748 /* XXX: exception */
helper_aam(int base)1749 void helper_aam(int base)
1750 {
1751     int al, ah;
1752     al = EAX & 0xff;
1753     ah = al / base;
1754     al = al % base;
1755     EAX = (EAX & ~0xffff) | al | (ah << 8);
1756     CC_DST = al;
1757 }
1758 
helper_aad(int base)1759 void helper_aad(int base)
1760 {
1761     int al, ah;
1762     al = EAX & 0xff;
1763     ah = (EAX >> 8) & 0xff;
1764     al = ((ah * base) + al) & 0xff;
1765     EAX = (EAX & ~0xffff) | al;
1766     CC_DST = al;
1767 }
1768 
helper_aaa(void)1769 void helper_aaa(void)
1770 {
1771     int icarry;
1772     int al, ah, af;
1773     int eflags;
1774 
1775     eflags = helper_cc_compute_all(CC_OP);
1776     af = eflags & CC_A;
1777     al = EAX & 0xff;
1778     ah = (EAX >> 8) & 0xff;
1779 
1780     icarry = (al > 0xf9);
1781     if (((al & 0x0f) > 9 ) || af) {
1782         al = (al + 6) & 0x0f;
1783         ah = (ah + 1 + icarry) & 0xff;
1784         eflags |= CC_C | CC_A;
1785     } else {
1786         eflags &= ~(CC_C | CC_A);
1787         al &= 0x0f;
1788     }
1789     EAX = (EAX & ~0xffff) | al | (ah << 8);
1790     CC_SRC = eflags;
1791 }
1792 
helper_aas(void)1793 void helper_aas(void)
1794 {
1795     int icarry;
1796     int al, ah, af;
1797     int eflags;
1798 
1799     eflags = helper_cc_compute_all(CC_OP);
1800     af = eflags & CC_A;
1801     al = EAX & 0xff;
1802     ah = (EAX >> 8) & 0xff;
1803 
1804     icarry = (al < 6);
1805     if (((al & 0x0f) > 9 ) || af) {
1806         al = (al - 6) & 0x0f;
1807         ah = (ah - 1 - icarry) & 0xff;
1808         eflags |= CC_C | CC_A;
1809     } else {
1810         eflags &= ~(CC_C | CC_A);
1811         al &= 0x0f;
1812     }
1813     EAX = (EAX & ~0xffff) | al | (ah << 8);
1814     CC_SRC = eflags;
1815 }
1816 
helper_daa(void)1817 void helper_daa(void)
1818 {
1819     int al, af, cf;
1820     int eflags;
1821 
1822     eflags = helper_cc_compute_all(CC_OP);
1823     cf = eflags & CC_C;
1824     af = eflags & CC_A;
1825     al = EAX & 0xff;
1826 
1827     eflags = 0;
1828     if (((al & 0x0f) > 9 ) || af) {
1829         al = (al + 6) & 0xff;
1830         eflags |= CC_A;
1831     }
1832     if ((al > 0x9f) || cf) {
1833         al = (al + 0x60) & 0xff;
1834         eflags |= CC_C;
1835     }
1836     EAX = (EAX & ~0xff) | al;
1837     /* well, speed is not an issue here, so we compute the flags by hand */
1838     eflags |= (al == 0) << 6; /* zf */
1839     eflags |= parity_table[al]; /* pf */
1840     eflags |= (al & 0x80); /* sf */
1841     CC_SRC = eflags;
1842 }
1843 
helper_das(void)1844 void helper_das(void)
1845 {
1846     int al, al1, af, cf;
1847     int eflags;
1848 
1849     eflags = helper_cc_compute_all(CC_OP);
1850     cf = eflags & CC_C;
1851     af = eflags & CC_A;
1852     al = EAX & 0xff;
1853 
1854     eflags = 0;
1855     al1 = al;
1856     if (((al & 0x0f) > 9 ) || af) {
1857         eflags |= CC_A;
1858         if (al < 6 || cf)
1859             eflags |= CC_C;
1860         al = (al - 6) & 0xff;
1861     }
1862     if ((al1 > 0x99) || cf) {
1863         al = (al - 0x60) & 0xff;
1864         eflags |= CC_C;
1865     }
1866     EAX = (EAX & ~0xff) | al;
1867     /* well, speed is not an issue here, so we compute the flags by hand */
1868     eflags |= (al == 0) << 6; /* zf */
1869     eflags |= parity_table[al]; /* pf */
1870     eflags |= (al & 0x80); /* sf */
1871     CC_SRC = eflags;
1872 }
1873 
helper_into(int next_eip_addend)1874 void helper_into(int next_eip_addend)
1875 {
1876     int eflags;
1877     eflags = helper_cc_compute_all(CC_OP);
1878     if (eflags & CC_O) {
1879         raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1880     }
1881 }
1882 
helper_cmpxchg8b(target_ulong a0)1883 void helper_cmpxchg8b(target_ulong a0)
1884 {
1885     uint64_t d;
1886     int eflags;
1887 
1888     eflags = helper_cc_compute_all(CC_OP);
1889     d = ldq(a0);
1890     if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1891         stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1892         eflags |= CC_Z;
1893     } else {
1894         /* always do the store */
1895         stq(a0, d);
1896         EDX = (uint32_t)(d >> 32);
1897         EAX = (uint32_t)d;
1898         eflags &= ~CC_Z;
1899     }
1900     CC_SRC = eflags;
1901 }
1902 
1903 #ifdef TARGET_X86_64
helper_cmpxchg16b(target_ulong a0)1904 void helper_cmpxchg16b(target_ulong a0)
1905 {
1906     uint64_t d0, d1;
1907     int eflags;
1908 
1909     if ((a0 & 0xf) != 0)
1910         raise_exception(EXCP0D_GPF);
1911     eflags = helper_cc_compute_all(CC_OP);
1912     d0 = ldq(a0);
1913     d1 = ldq(a0 + 8);
1914     if (d0 == EAX && d1 == EDX) {
1915         stq(a0, EBX);
1916         stq(a0 + 8, ECX);
1917         eflags |= CC_Z;
1918     } else {
1919         /* always do the store */
1920         stq(a0, d0);
1921         stq(a0 + 8, d1);
1922         EDX = d1;
1923         EAX = d0;
1924         eflags &= ~CC_Z;
1925     }
1926     CC_SRC = eflags;
1927 }
1928 #endif
1929 
helper_single_step(void)1930 void helper_single_step(void)
1931 {
1932 #ifndef CONFIG_USER_ONLY
1933     check_hw_breakpoints(env, 1);
1934     env->dr[6] |= DR6_BS;
1935 #endif
1936     raise_exception(EXCP01_DB);
1937 }
1938 
helper_cpuid(void)1939 void helper_cpuid(void)
1940 {
1941     uint32_t eax, ebx, ecx, edx;
1942 
1943     helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1944 
1945     cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
1946     EAX = eax;
1947     EBX = ebx;
1948     ECX = ecx;
1949     EDX = edx;
1950 }
1951 
helper_enter_level(int level,int data32,target_ulong t1)1952 void helper_enter_level(int level, int data32, target_ulong t1)
1953 {
1954     target_ulong ssp;
1955     uint32_t esp_mask, esp, ebp;
1956 
1957     esp_mask = get_sp_mask(env->segs[R_SS].flags);
1958     ssp = env->segs[R_SS].base;
1959     ebp = EBP;
1960     esp = ESP;
1961     if (data32) {
1962         /* 32 bit */
1963         esp -= 4;
1964         while (--level) {
1965             esp -= 4;
1966             ebp -= 4;
1967             stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1968         }
1969         esp -= 4;
1970         stl(ssp + (esp & esp_mask), t1);
1971     } else {
1972         /* 16 bit */
1973         esp -= 2;
1974         while (--level) {
1975             esp -= 2;
1976             ebp -= 2;
1977             stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1978         }
1979         esp -= 2;
1980         stw(ssp + (esp & esp_mask), t1);
1981     }
1982 }
1983 
1984 #ifdef TARGET_X86_64
helper_enter64_level(int level,int data64,target_ulong t1)1985 void helper_enter64_level(int level, int data64, target_ulong t1)
1986 {
1987     target_ulong esp, ebp;
1988     ebp = EBP;
1989     esp = ESP;
1990 
1991     if (data64) {
1992         /* 64 bit */
1993         esp -= 8;
1994         while (--level) {
1995             esp -= 8;
1996             ebp -= 8;
1997             stq(esp, ldq(ebp));
1998         }
1999         esp -= 8;
2000         stq(esp, t1);
2001     } else {
2002         /* 16 bit */
2003         esp -= 2;
2004         while (--level) {
2005             esp -= 2;
2006             ebp -= 2;
2007             stw(esp, lduw(ebp));
2008         }
2009         esp -= 2;
2010         stw(esp, t1);
2011     }
2012 }
2013 #endif
2014 
helper_lldt(int selector)2015 void helper_lldt(int selector)
2016 {
2017     SegmentCache *dt;
2018     uint32_t e1, e2;
2019     int index, entry_limit;
2020     target_ulong ptr;
2021 
2022     selector &= 0xffff;
2023     if ((selector & 0xfffc) == 0) {
2024         /* XXX: NULL selector case: invalid LDT */
2025         env->ldt.base = 0;
2026         env->ldt.limit = 0;
2027     } else {
2028         if (selector & 0x4)
2029             raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2030         dt = &env->gdt;
2031         index = selector & ~7;
2032 #ifdef TARGET_X86_64
2033         if (env->hflags & HF_LMA_MASK)
2034             entry_limit = 15;
2035         else
2036 #endif
2037             entry_limit = 7;
2038         if ((index + entry_limit) > dt->limit)
2039             raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2040         ptr = dt->base + index;
2041         e1 = ldl_kernel(ptr);
2042         e2 = ldl_kernel(ptr + 4);
2043         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2044             raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2045         if (!(e2 & DESC_P_MASK))
2046             raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2047 #ifdef TARGET_X86_64
2048         if (env->hflags & HF_LMA_MASK) {
2049             uint32_t e3;
2050             e3 = ldl_kernel(ptr + 8);
2051             load_seg_cache_raw_dt(&env->ldt, e1, e2);
2052             env->ldt.base |= (target_ulong)e3 << 32;
2053         } else
2054 #endif
2055         {
2056             load_seg_cache_raw_dt(&env->ldt, e1, e2);
2057         }
2058     }
2059     env->ldt.selector = selector;
2060 }
2061 
helper_ltr(int selector)2062 void helper_ltr(int selector)
2063 {
2064     SegmentCache *dt;
2065     uint32_t e1, e2;
2066     int index, type, entry_limit;
2067     target_ulong ptr;
2068 
2069     selector &= 0xffff;
2070     if ((selector & 0xfffc) == 0) {
2071         /* NULL selector case: invalid TR */
2072         env->tr.base = 0;
2073         env->tr.limit = 0;
2074         env->tr.flags = 0;
2075     } else {
2076         if (selector & 0x4)
2077             raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2078         dt = &env->gdt;
2079         index = selector & ~7;
2080 #ifdef TARGET_X86_64
2081         if (env->hflags & HF_LMA_MASK)
2082             entry_limit = 15;
2083         else
2084 #endif
2085             entry_limit = 7;
2086         if ((index + entry_limit) > dt->limit)
2087             raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2088         ptr = dt->base + index;
2089         e1 = ldl_kernel(ptr);
2090         e2 = ldl_kernel(ptr + 4);
2091         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2092         if ((e2 & DESC_S_MASK) ||
2093             (type != 1 && type != 9))
2094             raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2095         if (!(e2 & DESC_P_MASK))
2096             raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2097 #ifdef TARGET_X86_64
2098         if (env->hflags & HF_LMA_MASK) {
2099             uint32_t e3, e4;
2100             e3 = ldl_kernel(ptr + 8);
2101             e4 = ldl_kernel(ptr + 12);
2102             if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2103                 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2104             load_seg_cache_raw_dt(&env->tr, e1, e2);
2105             env->tr.base |= (target_ulong)e3 << 32;
2106         } else
2107 #endif
2108         {
2109             load_seg_cache_raw_dt(&env->tr, e1, e2);
2110         }
2111         e2 |= DESC_TSS_BUSY_MASK;
2112         stl_kernel(ptr + 4, e2);
2113     }
2114     env->tr.selector = selector;
2115 }
2116 
2117 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
helper_load_seg(int seg_reg,int selector)2118 void helper_load_seg(int seg_reg, int selector)
2119 {
2120     uint32_t e1, e2;
2121     int cpl, dpl, rpl;
2122     SegmentCache *dt;
2123     int index;
2124     target_ulong ptr;
2125 
2126     selector &= 0xffff;
2127     cpl = env->hflags & HF_CPL_MASK;
2128     if ((selector & 0xfffc) == 0) {
2129         /* null selector case */
2130         if (seg_reg == R_SS
2131 #ifdef TARGET_X86_64
2132             && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2133 #endif
2134             )
2135             raise_exception_err(EXCP0D_GPF, 0);
2136         cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2137     } else {
2138 
2139         if (selector & 0x4)
2140             dt = &env->ldt;
2141         else
2142             dt = &env->gdt;
2143         index = selector & ~7;
2144         if ((index + 7) > dt->limit)
2145             raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2146         ptr = dt->base + index;
2147         e1 = ldl_kernel(ptr);
2148         e2 = ldl_kernel(ptr + 4);
2149 
2150         if (!(e2 & DESC_S_MASK))
2151             raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2152         rpl = selector & 3;
2153         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2154         if (seg_reg == R_SS) {
2155             /* must be writable segment */
2156             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2157                 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2158             if (rpl != cpl || dpl != cpl)
2159                 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2160         } else {
2161             /* must be readable segment */
2162             if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2163                 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2164 
2165             if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2166                 /* if not conforming code, test rights */
2167                 if (dpl < cpl || dpl < rpl)
2168                     raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2169             }
2170         }
2171 
2172         if (!(e2 & DESC_P_MASK)) {
2173             if (seg_reg == R_SS)
2174                 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2175             else
2176                 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2177         }
2178 
2179         /* set the access bit if not already set */
2180         if (!(e2 & DESC_A_MASK)) {
2181             e2 |= DESC_A_MASK;
2182             stl_kernel(ptr + 4, e2);
2183         }
2184 
2185         cpu_x86_load_seg_cache(env, seg_reg, selector,
2186                        get_seg_base(e1, e2),
2187                        get_seg_limit(e1, e2),
2188                        e2);
2189 #if 0
2190         qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2191                 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2192 #endif
2193     }
2194 }
2195 
2196 /* protected mode jump */
helper_ljmp_protected(int new_cs,target_ulong new_eip,int next_eip_addend)2197 void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2198                            int next_eip_addend)
2199 {
2200     int gate_cs, type;
2201     uint32_t e1, e2, cpl, dpl, rpl, limit;
2202     target_ulong next_eip;
2203 
2204     if ((new_cs & 0xfffc) == 0)
2205         raise_exception_err(EXCP0D_GPF, 0);
2206     if (load_segment(&e1, &e2, new_cs) != 0)
2207         raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2208     cpl = env->hflags & HF_CPL_MASK;
2209     if (e2 & DESC_S_MASK) {
2210         if (!(e2 & DESC_CS_MASK))
2211             raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2212         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2213         if (e2 & DESC_C_MASK) {
2214             /* conforming code segment */
2215             if (dpl > cpl)
2216                 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2217         } else {
2218             /* non conforming code segment */
2219             rpl = new_cs & 3;
2220             if (rpl > cpl)
2221                 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2222             if (dpl != cpl)
2223                 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2224         }
2225         if (!(e2 & DESC_P_MASK))
2226             raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2227         limit = get_seg_limit(e1, e2);
2228         if (new_eip > limit &&
2229             !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2230             raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2231         cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2232                        get_seg_base(e1, e2), limit, e2);
2233         EIP = new_eip;
2234     } else {
2235         /* jump to call or task gate */
2236         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2237         rpl = new_cs & 3;
2238         cpl = env->hflags & HF_CPL_MASK;
2239         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2240         switch(type) {
2241         case 1: /* 286 TSS */
2242         case 9: /* 386 TSS */
2243         case 5: /* task gate */
2244             if (dpl < cpl || dpl < rpl)
2245                 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2246             next_eip = env->eip + next_eip_addend;
2247             switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2248             CC_OP = CC_OP_EFLAGS;
2249             break;
2250         case 4: /* 286 call gate */
2251         case 12: /* 386 call gate */
2252             if ((dpl < cpl) || (dpl < rpl))
2253                 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2254             if (!(e2 & DESC_P_MASK))
2255                 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2256             gate_cs = e1 >> 16;
2257             new_eip = (e1 & 0xffff);
2258             if (type == 12)
2259                 new_eip |= (e2 & 0xffff0000);
2260             if (load_segment(&e1, &e2, gate_cs) != 0)
2261                 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2262             dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2263             /* must be code segment */
2264             if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2265                  (DESC_S_MASK | DESC_CS_MASK)))
2266                 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2267             if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2268                 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2269                 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2270             if (!(e2 & DESC_P_MASK))
2271                 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2272             limit = get_seg_limit(e1, e2);
2273             if (new_eip > limit)
2274                 raise_exception_err(EXCP0D_GPF, 0);
2275             cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2276                                    get_seg_base(e1, e2), limit, e2);
2277             EIP = new_eip;
2278             break;
2279         default:
2280             raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2281             break;
2282         }
2283     }
2284 }
2285 
2286 /* real mode call */
helper_lcall_real(int new_cs,target_ulong new_eip1,int shift,int next_eip)2287 void helper_lcall_real(int new_cs, target_ulong new_eip1,
2288                        int shift, int next_eip)
2289 {
2290     int new_eip;
2291     uint32_t esp, esp_mask;
2292     target_ulong ssp;
2293 
2294     new_eip = new_eip1;
2295     esp = ESP;
2296     esp_mask = get_sp_mask(env->segs[R_SS].flags);
2297     ssp = env->segs[R_SS].base;
2298     if (shift) {
2299         PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2300         PUSHL(ssp, esp, esp_mask, next_eip);
2301     } else {
2302         PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2303         PUSHW(ssp, esp, esp_mask, next_eip);
2304     }
2305 
2306     SET_ESP(esp, esp_mask);
2307     env->eip = new_eip;
2308     env->segs[R_CS].selector = new_cs;
2309     env->segs[R_CS].base = (new_cs << 4);
2310 }
2311 
2312 /* protected mode call */
helper_lcall_protected(int new_cs,target_ulong new_eip,int shift,int next_eip_addend)2313 void helper_lcall_protected(int new_cs, target_ulong new_eip,
2314                             int shift, int next_eip_addend)
2315 {
2316     int new_stack, i;
2317     uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2318     uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2319     uint32_t val, limit, old_sp_mask;
2320     target_ulong ssp, old_ssp, next_eip;
2321 
2322     next_eip = env->eip + next_eip_addend;
2323     LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2324     LOG_PCALL_STATE(env);
2325     if ((new_cs & 0xfffc) == 0)
2326         raise_exception_err(EXCP0D_GPF, 0);
2327     if (load_segment(&e1, &e2, new_cs) != 0)
2328         raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2329     cpl = env->hflags & HF_CPL_MASK;
2330     LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2331     if (e2 & DESC_S_MASK) {
2332         if (!(e2 & DESC_CS_MASK))
2333             raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2334         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2335         if (e2 & DESC_C_MASK) {
2336             /* conforming code segment */
2337             if (dpl > cpl)
2338                 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2339         } else {
2340             /* non conforming code segment */
2341             rpl = new_cs & 3;
2342             if (rpl > cpl)
2343                 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2344             if (dpl != cpl)
2345                 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2346         }
2347         if (!(e2 & DESC_P_MASK))
2348             raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2349 
2350 #ifdef TARGET_X86_64
2351         /* XXX: check 16/32 bit cases in long mode */
2352         if (shift == 2) {
2353             target_ulong rsp;
2354             /* 64 bit case */
2355             rsp = ESP;
2356             PUSHQ(rsp, env->segs[R_CS].selector);
2357             PUSHQ(rsp, next_eip);
2358             /* from this point, not restartable */
2359             ESP = rsp;
2360             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2361                                    get_seg_base(e1, e2),
2362                                    get_seg_limit(e1, e2), e2);
2363             EIP = new_eip;
2364         } else
2365 #endif
2366         {
2367             sp = ESP;
2368             sp_mask = get_sp_mask(env->segs[R_SS].flags);
2369             ssp = env->segs[R_SS].base;
2370             if (shift) {
2371                 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2372                 PUSHL(ssp, sp, sp_mask, next_eip);
2373             } else {
2374                 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2375                 PUSHW(ssp, sp, sp_mask, next_eip);
2376             }
2377 
2378             limit = get_seg_limit(e1, e2);
2379             if (new_eip > limit)
2380                 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2381             /* from this point, not restartable */
2382             SET_ESP(sp, sp_mask);
2383             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2384                                    get_seg_base(e1, e2), limit, e2);
2385             EIP = new_eip;
2386         }
2387     } else {
2388         /* check gate type */
2389         type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2390         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2391         rpl = new_cs & 3;
2392         switch(type) {
2393         case 1: /* available 286 TSS */
2394         case 9: /* available 386 TSS */
2395         case 5: /* task gate */
2396             if (dpl < cpl || dpl < rpl)
2397                 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2398             switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2399             CC_OP = CC_OP_EFLAGS;
2400             return;
2401         case 4: /* 286 call gate */
2402         case 12: /* 386 call gate */
2403             break;
2404         default:
2405             raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2406             break;
2407         }
2408         shift = type >> 3;
2409 
2410         if (dpl < cpl || dpl < rpl)
2411             raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2412         /* check valid bit */
2413         if (!(e2 & DESC_P_MASK))
2414             raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2415         selector = e1 >> 16;
2416         offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2417         param_count = e2 & 0x1f;
2418         if ((selector & 0xfffc) == 0)
2419             raise_exception_err(EXCP0D_GPF, 0);
2420 
2421         if (load_segment(&e1, &e2, selector) != 0)
2422             raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2423         if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2424             raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2425         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2426         if (dpl > cpl)
2427             raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2428         if (!(e2 & DESC_P_MASK))
2429             raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2430 
2431         if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2432             /* to inner privilege */
2433             get_ss_esp_from_tss(&ss, &sp, dpl);
2434             LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2435                         ss, sp, param_count, ESP);
2436             if ((ss & 0xfffc) == 0)
2437                 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2438             if ((ss & 3) != dpl)
2439                 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2440             if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2441                 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2442             ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2443             if (ss_dpl != dpl)
2444                 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2445             if (!(ss_e2 & DESC_S_MASK) ||
2446                 (ss_e2 & DESC_CS_MASK) ||
2447                 !(ss_e2 & DESC_W_MASK))
2448                 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2449             if (!(ss_e2 & DESC_P_MASK))
2450                 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2451 
2452             //            push_size = ((param_count * 2) + 8) << shift;
2453 
2454             old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2455             old_ssp = env->segs[R_SS].base;
2456 
2457             sp_mask = get_sp_mask(ss_e2);
2458             ssp = get_seg_base(ss_e1, ss_e2);
2459             if (shift) {
2460                 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2461                 PUSHL(ssp, sp, sp_mask, ESP);
2462                 for(i = param_count - 1; i >= 0; i--) {
2463                     val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2464                     PUSHL(ssp, sp, sp_mask, val);
2465                 }
2466             } else {
2467                 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2468                 PUSHW(ssp, sp, sp_mask, ESP);
2469                 for(i = param_count - 1; i >= 0; i--) {
2470                     val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2471                     PUSHW(ssp, sp, sp_mask, val);
2472                 }
2473             }
2474             new_stack = 1;
2475         } else {
2476             /* to same privilege */
2477             sp = ESP;
2478             sp_mask = get_sp_mask(env->segs[R_SS].flags);
2479             ssp = env->segs[R_SS].base;
2480             //            push_size = (4 << shift);
2481             new_stack = 0;
2482         }
2483 
2484         if (shift) {
2485             PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2486             PUSHL(ssp, sp, sp_mask, next_eip);
2487         } else {
2488             PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2489             PUSHW(ssp, sp, sp_mask, next_eip);
2490         }
2491 
2492         /* from this point, not restartable */
2493 
2494         if (new_stack) {
2495             ss = (ss & ~3) | dpl;
2496             cpu_x86_load_seg_cache(env, R_SS, ss,
2497                                    ssp,
2498                                    get_seg_limit(ss_e1, ss_e2),
2499                                    ss_e2);
2500         }
2501 
2502         selector = (selector & ~3) | dpl;
2503         cpu_x86_load_seg_cache(env, R_CS, selector,
2504                        get_seg_base(e1, e2),
2505                        get_seg_limit(e1, e2),
2506                        e2);
2507         cpu_x86_set_cpl(env, dpl);
2508         SET_ESP(sp, sp_mask);
2509         EIP = offset;
2510     }
2511 }
2512 
2513 /* real and vm86 mode iret */
helper_iret_real(int shift)2514 void helper_iret_real(int shift)
2515 {
2516     uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2517     target_ulong ssp;
2518     int eflags_mask;
2519 
2520     sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2521     sp = ESP;
2522     ssp = env->segs[R_SS].base;
2523     if (shift == 1) {
2524         /* 32 bits */
2525         POPL(ssp, sp, sp_mask, new_eip);
2526         POPL(ssp, sp, sp_mask, new_cs);
2527         new_cs &= 0xffff;
2528         POPL(ssp, sp, sp_mask, new_eflags);
2529     } else {
2530         /* 16 bits */
2531         POPW(ssp, sp, sp_mask, new_eip);
2532         POPW(ssp, sp, sp_mask, new_cs);
2533         POPW(ssp, sp, sp_mask, new_eflags);
2534     }
2535     ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2536     env->segs[R_CS].selector = new_cs;
2537     env->segs[R_CS].base = (new_cs << 4);
2538     env->eip = new_eip;
2539     if (env->eflags & VM_MASK)
2540         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2541     else
2542         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2543     if (shift == 0)
2544         eflags_mask &= 0xffff;
2545     load_eflags(new_eflags, eflags_mask);
2546     env->hflags2 &= ~HF2_NMI_MASK;
2547 }