xref: /illumos-kvm-cmd/tcg/arm/tcg-target.c (revision 68396ea9)
1 /*
2  * Tiny Code Generator for QEMU
3  *
4  * Copyright (c) 2008 Andrzej Zaborowski
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #if defined(__ARM_ARCH_7__) ||  \
26     defined(__ARM_ARCH_7A__) || \
27     defined(__ARM_ARCH_7EM__) || \
28     defined(__ARM_ARCH_7M__) || \
29     defined(__ARM_ARCH_7R__)
30 #define USE_ARMV7_INSTRUCTIONS
31 #endif
32 
33 #if defined(USE_ARMV7_INSTRUCTIONS) || \
34     defined(__ARM_ARCH_6J__) || \
35     defined(__ARM_ARCH_6K__) || \
36     defined(__ARM_ARCH_6T2__) || \
37     defined(__ARM_ARCH_6Z__) || \
38     defined(__ARM_ARCH_6ZK__)
39 #define USE_ARMV6_INSTRUCTIONS
40 #endif
41 
42 #if defined(USE_ARMV6_INSTRUCTIONS) || \
43     defined(__ARM_ARCH_5T__) || \
44     defined(__ARM_ARCH_5TE__) || \
45     defined(__ARM_ARCH_5TEJ__)
46 #define USE_ARMV5_INSTRUCTIONS
47 #endif
48 
49 #ifdef USE_ARMV5_INSTRUCTIONS
50 static const int use_armv5_instructions = 1;
51 #else
52 static const int use_armv5_instructions = 0;
53 #endif
54 #undef USE_ARMV5_INSTRUCTIONS
55 
56 #ifdef USE_ARMV6_INSTRUCTIONS
57 static const int use_armv6_instructions = 1;
58 #else
59 static const int use_armv6_instructions = 0;
60 #endif
61 #undef USE_ARMV6_INSTRUCTIONS
62 
63 #ifdef USE_ARMV7_INSTRUCTIONS
64 static const int use_armv7_instructions = 1;
65 #else
66 static const int use_armv7_instructions = 0;
67 #endif
68 #undef USE_ARMV7_INSTRUCTIONS
69 
70 #ifndef NDEBUG
71 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
72     "%r0",
73     "%r1",
74     "%r2",
75     "%r3",
76     "%r4",
77     "%r5",
78     "%r6",
79     "%r7",
80     "%r8",
81     "%r9",
82     "%r10",
83     "%r11",
84     "%r12",
85     "%r13",
86     "%r14",
87     "%pc",
88 };
89 #endif
90 
91 static const int tcg_target_reg_alloc_order[] = {
92     TCG_REG_R4,
93     TCG_REG_R5,
94     TCG_REG_R6,
95     TCG_REG_R7,
96     TCG_REG_R8,
97     TCG_REG_R9,
98     TCG_REG_R10,
99     TCG_REG_R11,
100     TCG_REG_R13,
101     TCG_REG_R0,
102     TCG_REG_R1,
103     TCG_REG_R2,
104     TCG_REG_R3,
105     TCG_REG_R12,
106     TCG_REG_R14,
107 };
108 
109 static const int tcg_target_call_iarg_regs[4] = {
110     TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
111 };
112 static const int tcg_target_call_oarg_regs[2] = {
113     TCG_REG_R0, TCG_REG_R1
114 };
115 
reloc_abs32(void * code_ptr,tcg_target_long target)116 static inline void reloc_abs32(void *code_ptr, tcg_target_long target)
117 {
118     *(uint32_t *) code_ptr = target;
119 }
120 
reloc_pc24(void * code_ptr,tcg_target_long target)121 static inline void reloc_pc24(void *code_ptr, tcg_target_long target)
122 {
123     uint32_t offset = ((target - ((tcg_target_long) code_ptr + 8)) >> 2);
124 
125     *(uint32_t *) code_ptr = ((*(uint32_t *) code_ptr) & ~0xffffff)
126                              | (offset & 0xffffff);
127 }
128 
patch_reloc(uint8_t * code_ptr,int type,tcg_target_long value,tcg_target_long addend)129 static void patch_reloc(uint8_t *code_ptr, int type,
130                 tcg_target_long value, tcg_target_long addend)
131 {
132     switch (type) {
133     case R_ARM_ABS32:
134         reloc_abs32(code_ptr, value);
135         break;
136 
137     case R_ARM_CALL:
138     case R_ARM_JUMP24:
139     default:
140         tcg_abort();
141 
142     case R_ARM_PC24:
143         reloc_pc24(code_ptr, value);
144         break;
145     }
146 }
147 
148 /* maximum number of register used for input function arguments */
tcg_target_get_call_iarg_regs_count(int flags)149 static inline int tcg_target_get_call_iarg_regs_count(int flags)
150 {
151     return 4;
152 }
153 
154 /* parse target specific constraints */
target_parse_constraint(TCGArgConstraint * ct,const char ** pct_str)155 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
156 {
157     const char *ct_str;
158 
159     ct_str = *pct_str;
160     switch (ct_str[0]) {
161     case 'I':
162          ct->ct |= TCG_CT_CONST_ARM;
163          break;
164 
165     case 'r':
166         ct->ct |= TCG_CT_REG;
167         tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
168         break;
169 
170     /* qemu_ld address */
171     case 'l':
172         ct->ct |= TCG_CT_REG;
173         tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
174 #ifdef CONFIG_SOFTMMU
175         /* r0 and r1 will be overwritten when reading the tlb entry,
176            so don't use these. */
177         tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
178         tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
179 #endif
180         break;
181     case 'L':
182         ct->ct |= TCG_CT_REG;
183         tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
184 #ifdef CONFIG_SOFTMMU
185         /* r1 is still needed to load data_reg or data_reg2,
186            so don't use it. */
187         tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
188 #endif
189         break;
190 
191     /* qemu_st address & data_reg */
192     case 's':
193         ct->ct |= TCG_CT_REG;
194         tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
195         /* r0 and r1 will be overwritten when reading the tlb entry
196            (softmmu only) and doing the byte swapping, so don't
197            use these. */
198         tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
199         tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
200         break;
201     /* qemu_st64 data_reg2 */
202     case 'S':
203         ct->ct |= TCG_CT_REG;
204         tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
205         /* r0 and r1 will be overwritten when reading the tlb entry
206             (softmmu only) and doing the byte swapping, so don't
207             use these. */
208         tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
209         tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
210 #ifdef CONFIG_SOFTMMU
211         /* r2 is still needed to load data_reg, so don't use it. */
212         tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
213 #endif
214         break;
215 
216     default:
217         return -1;
218     }
219     ct_str++;
220     *pct_str = ct_str;
221 
222     return 0;
223 }
224 
rotl(uint32_t val,int n)225 static inline uint32_t rotl(uint32_t val, int n)
226 {
227   return (val << n) | (val >> (32 - n));
228 }
229 
230 /* ARM immediates for ALU instructions are made of an unsigned 8-bit
231    right-rotated by an even amount between 0 and 30. */
encode_imm(uint32_t imm)232 static inline int encode_imm(uint32_t imm)
233 {
234     int shift;
235 
236     /* simple case, only lower bits */
237     if ((imm & ~0xff) == 0)
238         return 0;
239     /* then try a simple even shift */
240     shift = ctz32(imm) & ~1;
241     if (((imm >> shift) & ~0xff) == 0)
242         return 32 - shift;
243     /* now try harder with rotations */
244     if ((rotl(imm, 2) & ~0xff) == 0)
245         return 2;
246     if ((rotl(imm, 4) & ~0xff) == 0)
247         return 4;
248     if ((rotl(imm, 6) & ~0xff) == 0)
249         return 6;
250     /* imm can't be encoded */
251     return -1;
252 }
253 
check_fit_imm(uint32_t imm)254 static inline int check_fit_imm(uint32_t imm)
255 {
256     return encode_imm(imm) >= 0;
257 }
258 
259 /* Test if a constant matches the constraint.
260  * TODO: define constraints for:
261  *
262  * ldr/str offset:   between -0xfff and 0xfff
263  * ldrh/strh offset: between -0xff and 0xff
264  * mov operand2:     values represented with x << (2 * y), x < 0x100
265  * add, sub, eor...: ditto
266  */
tcg_target_const_match(tcg_target_long val,const TCGArgConstraint * arg_ct)267 static inline int tcg_target_const_match(tcg_target_long val,
268                 const TCGArgConstraint *arg_ct)
269 {
270     int ct;
271     ct = arg_ct->ct;
272     if (ct & TCG_CT_CONST)
273         return 1;
274     else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val))
275         return 1;
276     else
277         return 0;
278 }
279 
280 enum arm_data_opc_e {
281     ARITH_AND = 0x0,
282     ARITH_EOR = 0x1,
283     ARITH_SUB = 0x2,
284     ARITH_RSB = 0x3,
285     ARITH_ADD = 0x4,
286     ARITH_ADC = 0x5,
287     ARITH_SBC = 0x6,
288     ARITH_RSC = 0x7,
289     ARITH_TST = 0x8,
290     ARITH_CMP = 0xa,
291     ARITH_CMN = 0xb,
292     ARITH_ORR = 0xc,
293     ARITH_MOV = 0xd,
294     ARITH_BIC = 0xe,
295     ARITH_MVN = 0xf,
296 };
297 
298 #define TO_CPSR(opc) \
299   ((opc == ARITH_CMP || opc == ARITH_CMN || opc == ARITH_TST) << 20)
300 
301 #define SHIFT_IMM_LSL(im)	(((im) << 7) | 0x00)
302 #define SHIFT_IMM_LSR(im)	(((im) << 7) | 0x20)
303 #define SHIFT_IMM_ASR(im)	(((im) << 7) | 0x40)
304 #define SHIFT_IMM_ROR(im)	(((im) << 7) | 0x60)
305 #define SHIFT_REG_LSL(rs)	(((rs) << 8) | 0x10)
306 #define SHIFT_REG_LSR(rs)	(((rs) << 8) | 0x30)
307 #define SHIFT_REG_ASR(rs)	(((rs) << 8) | 0x50)
308 #define SHIFT_REG_ROR(rs)	(((rs) << 8) | 0x70)
309 
310 enum arm_cond_code_e {
311     COND_EQ = 0x0,
312     COND_NE = 0x1,
313     COND_CS = 0x2,	/* Unsigned greater or equal */
314     COND_CC = 0x3,	/* Unsigned less than */
315     COND_MI = 0x4,	/* Negative */
316     COND_PL = 0x5,	/* Zero or greater */
317     COND_VS = 0x6,	/* Overflow */
318     COND_VC = 0x7,	/* No overflow */
319     COND_HI = 0x8,	/* Unsigned greater than */
320     COND_LS = 0x9,	/* Unsigned less or equal */
321     COND_GE = 0xa,
322     COND_LT = 0xb,
323     COND_GT = 0xc,
324     COND_LE = 0xd,
325     COND_AL = 0xe,
326 };
327 
328 static const uint8_t tcg_cond_to_arm_cond[10] = {
329     [TCG_COND_EQ] = COND_EQ,
330     [TCG_COND_NE] = COND_NE,
331     [TCG_COND_LT] = COND_LT,
332     [TCG_COND_GE] = COND_GE,
333     [TCG_COND_LE] = COND_LE,
334     [TCG_COND_GT] = COND_GT,
335     /* unsigned */
336     [TCG_COND_LTU] = COND_CC,
337     [TCG_COND_GEU] = COND_CS,
338     [TCG_COND_LEU] = COND_LS,
339     [TCG_COND_GTU] = COND_HI,
340 };
341 
tcg_out_bx(TCGContext * s,int cond,int rn)342 static inline void tcg_out_bx(TCGContext *s, int cond, int rn)
343 {
344     tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
345 }
346 
tcg_out_b(TCGContext * s,int cond,int32_t offset)347 static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset)
348 {
349     tcg_out32(s, (cond << 28) | 0x0a000000 |
350                     (((offset - 8) >> 2) & 0x00ffffff));
351 }
352 
tcg_out_b_noaddr(TCGContext * s,int cond)353 static inline void tcg_out_b_noaddr(TCGContext *s, int cond)
354 {
355     /* We pay attention here to not modify the branch target by skipping
356        the corresponding bytes. This ensure that caches and memory are
357        kept coherent during retranslation. */
358 #ifdef HOST_WORDS_BIGENDIAN
359     tcg_out8(s, (cond << 4) | 0x0a);
360     s->code_ptr += 3;
361 #else
362     s->code_ptr += 3;
363     tcg_out8(s, (cond << 4) | 0x0a);
364 #endif
365 }
366 
tcg_out_bl(TCGContext * s,int cond,int32_t offset)367 static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset)
368 {
369     tcg_out32(s, (cond << 28) | 0x0b000000 |
370                     (((offset - 8) >> 2) & 0x00ffffff));
371 }
372 
tcg_out_blx(TCGContext * s,int cond,int rn)373 static inline void tcg_out_blx(TCGContext *s, int cond, int rn)
374 {
375     tcg_out32(s, (cond << 28) | 0x012fff30 | rn);
376 }
377 
tcg_out_dat_reg(TCGContext * s,int cond,int opc,int rd,int rn,int rm,int shift)378 static inline void tcg_out_dat_reg(TCGContext *s,
379                 int cond, int opc, int rd, int rn, int rm, int shift)
380 {
381     tcg_out32(s, (cond << 28) | (0 << 25) | (opc << 21) | TO_CPSR(opc) |
382                     (rn << 16) | (rd << 12) | shift | rm);
383 }
384 
tcg_out_dat_reg2(TCGContext * s,int cond,int opc0,int opc1,int rd0,int rd1,int rn0,int rn1,int rm0,int rm1,int shift)385 static inline void tcg_out_dat_reg2(TCGContext *s,
386                 int cond, int opc0, int opc1, int rd0, int rd1,
387                 int rn0, int rn1, int rm0, int rm1, int shift)
388 {
389     if (rd0 == rn1 || rd0 == rm1) {
390         tcg_out32(s, (cond << 28) | (0 << 25) | (opc0 << 21) | (1 << 20) |
391                         (rn0 << 16) | (8 << 12) | shift | rm0);
392         tcg_out32(s, (cond << 28) | (0 << 25) | (opc1 << 21) |
393                         (rn1 << 16) | (rd1 << 12) | shift | rm1);
394         tcg_out_dat_reg(s, cond, ARITH_MOV,
395                         rd0, 0, TCG_REG_R8, SHIFT_IMM_LSL(0));
396     } else {
397         tcg_out32(s, (cond << 28) | (0 << 25) | (opc0 << 21) | (1 << 20) |
398                         (rn0 << 16) | (rd0 << 12) | shift | rm0);
399         tcg_out32(s, (cond << 28) | (0 << 25) | (opc1 << 21) |
400                         (rn1 << 16) | (rd1 << 12) | shift | rm1);
401     }
402 }
403 
tcg_out_dat_imm(TCGContext * s,int cond,int opc,int rd,int rn,int im)404 static inline void tcg_out_dat_imm(TCGContext *s,
405                 int cond, int opc, int rd, int rn, int im)
406 {
407     tcg_out32(s, (cond << 28) | (1 << 25) | (opc << 21) | TO_CPSR(opc) |
408                     (rn << 16) | (rd << 12) | im);
409 }
410 
tcg_out_movi32(TCGContext * s,int cond,int rd,uint32_t arg)411 static inline void tcg_out_movi32(TCGContext *s,
412                 int cond, int rd, uint32_t arg)
413 {
414     /* TODO: This is very suboptimal, we can easily have a constant
415      * pool somewhere after all the instructions.  */
416     if ((int)arg < 0 && (int)arg >= -0x100) {
417         tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, (~arg) & 0xff);
418     } else if (use_armv7_instructions) {
419         /* use movw/movt */
420         /* movw */
421         tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
422                   | ((arg << 4) & 0x000f0000) | (arg & 0xfff));
423         if (arg & 0xffff0000) {
424             /* movt */
425             tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
426                       | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
427         }
428     } else {
429         int opc = ARITH_MOV;
430         int rn = 0;
431 
432         do {
433             int i, rot;
434 
435             i = ctz32(arg) & ~1;
436             rot = ((32 - i) << 7) & 0xf00;
437             tcg_out_dat_imm(s, cond, opc, rd, rn, ((arg >> i) & 0xff) | rot);
438             arg &= ~(0xff << i);
439 
440             opc = ARITH_ORR;
441             rn = rd;
442         } while (arg);
443     }
444 }
445 
tcg_out_mul32(TCGContext * s,int cond,int rd,int rs,int rm)446 static inline void tcg_out_mul32(TCGContext *s,
447                 int cond, int rd, int rs, int rm)
448 {
449     if (rd != rm)
450         tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) |
451                         (rs << 8) | 0x90 | rm);
452     else if (rd != rs)
453         tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) |
454                         (rm << 8) | 0x90 | rs);
455     else {
456         tcg_out32(s, (cond << 28) | ( 8 << 16) | (0 << 12) |
457                         (rs << 8) | 0x90 | rm);
458         tcg_out_dat_reg(s, cond, ARITH_MOV,
459                         rd, 0, TCG_REG_R8, SHIFT_IMM_LSL(0));
460     }
461 }
462 
tcg_out_umull32(TCGContext * s,int cond,int rd0,int rd1,int rs,int rm)463 static inline void tcg_out_umull32(TCGContext *s,
464                 int cond, int rd0, int rd1, int rs, int rm)
465 {
466     if (rd0 != rm && rd1 != rm)
467         tcg_out32(s, (cond << 28) | 0x800090 |
468                         (rd1 << 16) | (rd0 << 12) | (rs << 8) | rm);
469     else if (rd0 != rs && rd1 != rs)
470         tcg_out32(s, (cond << 28) | 0x800090 |
471                         (rd1 << 16) | (rd0 << 12) | (rm << 8) | rs);
472     else {
473         tcg_out_dat_reg(s, cond, ARITH_MOV,
474                         TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0));
475         tcg_out32(s, (cond << 28) | 0x800098 |
476                         (rd1 << 16) | (rd0 << 12) | (rs << 8));
477     }
478 }
479 
tcg_out_smull32(TCGContext * s,int cond,int rd0,int rd1,int rs,int rm)480 static inline void tcg_out_smull32(TCGContext *s,
481                 int cond, int rd0, int rd1, int rs, int rm)
482 {
483     if (rd0 != rm && rd1 != rm)
484         tcg_out32(s, (cond << 28) | 0xc00090 |
485                         (rd1 << 16) | (rd0 << 12) | (rs << 8) | rm);
486     else if (rd0 != rs && rd1 != rs)
487         tcg_out32(s, (cond << 28) | 0xc00090 |
488                         (rd1 << 16) | (rd0 << 12) | (rm << 8) | rs);
489     else {
490         tcg_out_dat_reg(s, cond, ARITH_MOV,
491                         TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0));
492         tcg_out32(s, (cond << 28) | 0xc00098 |
493                         (rd1 << 16) | (rd0 << 12) | (rs << 8));
494     }
495 }
496 
tcg_out_ext8s(TCGContext * s,int cond,int rd,int rn)497 static inline void tcg_out_ext8s(TCGContext *s, int cond,
498                                  int rd, int rn)
499 {
500     if (use_armv6_instructions) {
501         /* sxtb */
502         tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | rn);
503     } else {
504         tcg_out_dat_reg(s, cond, ARITH_MOV,
505                         rd, 0, rn, SHIFT_IMM_LSL(24));
506         tcg_out_dat_reg(s, cond, ARITH_MOV,
507                         rd, 0, rd, SHIFT_IMM_ASR(24));
508     }
509 }
510 
tcg_out_ext8u(TCGContext * s,int cond,int rd,int rn)511 static inline void tcg_out_ext8u(TCGContext *s, int cond,
512                                  int rd, int rn)
513 {
514     tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 0xff);
515 }
516 
tcg_out_ext16s(TCGContext * s,int cond,int rd,int rn)517 static inline void tcg_out_ext16s(TCGContext *s, int cond,
518                                   int rd, int rn)
519 {
520     if (use_armv6_instructions) {
521         /* sxth */
522         tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | rn);
523     } else {
524         tcg_out_dat_reg(s, cond, ARITH_MOV,
525                         rd, 0, rn, SHIFT_IMM_LSL(16));
526         tcg_out_dat_reg(s, cond, ARITH_MOV,
527                         rd, 0, rd, SHIFT_IMM_ASR(16));
528     }
529 }
530 
tcg_out_ext16u(TCGContext * s,int cond,int rd,int rn)531 static inline void tcg_out_ext16u(TCGContext *s, int cond,
532                                   int rd, int rn)
533 {
534     if (use_armv6_instructions) {
535         /* uxth */
536         tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn);
537     } else {
538         tcg_out_dat_reg(s, cond, ARITH_MOV,
539                         rd, 0, rn, SHIFT_IMM_LSL(16));
540         tcg_out_dat_reg(s, cond, ARITH_MOV,
541                         rd, 0, rd, SHIFT_IMM_LSR(16));
542     }
543 }
544 
tcg_out_bswap16s(TCGContext * s,int cond,int rd,int rn)545 static inline void tcg_out_bswap16s(TCGContext *s, int cond, int rd, int rn)
546 {
547     if (use_armv6_instructions) {
548         /* revsh */
549         tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
550     } else {
551         tcg_out_dat_reg(s, cond, ARITH_MOV,
552                         TCG_REG_R8, 0, rn, SHIFT_IMM_LSL(24));
553         tcg_out_dat_reg(s, cond, ARITH_MOV,
554                         TCG_REG_R8, 0, TCG_REG_R8, SHIFT_IMM_ASR(16));
555         tcg_out_dat_reg(s, cond, ARITH_ORR,
556                         rd, TCG_REG_R8, rn, SHIFT_IMM_LSR(8));
557     }
558 }
559 
tcg_out_bswap16(TCGContext * s,int cond,int rd,int rn)560 static inline void tcg_out_bswap16(TCGContext *s, int cond, int rd, int rn)
561 {
562     if (use_armv6_instructions) {
563         /* rev16 */
564         tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
565     } else {
566         tcg_out_dat_reg(s, cond, ARITH_MOV,
567                         TCG_REG_R8, 0, rn, SHIFT_IMM_LSL(24));
568         tcg_out_dat_reg(s, cond, ARITH_MOV,
569                         TCG_REG_R8, 0, TCG_REG_R8, SHIFT_IMM_LSR(16));
570         tcg_out_dat_reg(s, cond, ARITH_ORR,
571                         rd, TCG_REG_R8, rn, SHIFT_IMM_LSR(8));
572     }
573 }
574 
tcg_out_bswap32(TCGContext * s,int cond,int rd,int rn)575 static inline void tcg_out_bswap32(TCGContext *s, int cond, int rd, int rn)
576 {
577     if (use_armv6_instructions) {
578         /* rev */
579         tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
580     } else {
581         tcg_out_dat_reg(s, cond, ARITH_EOR,
582                         TCG_REG_R8, rn, rn, SHIFT_IMM_ROR(16));
583         tcg_out_dat_imm(s, cond, ARITH_BIC,
584                         TCG_REG_R8, TCG_REG_R8, 0xff | 0x800);
585         tcg_out_dat_reg(s, cond, ARITH_MOV,
586                         rd, 0, rn, SHIFT_IMM_ROR(8));
587         tcg_out_dat_reg(s, cond, ARITH_EOR,
588                         rd, rd, TCG_REG_R8, SHIFT_IMM_LSR(8));
589     }
590 }
591 
tcg_out_ld32_12(TCGContext * s,int cond,int rd,int rn,tcg_target_long im)592 static inline void tcg_out_ld32_12(TCGContext *s, int cond,
593                 int rd, int rn, tcg_target_long im)
594 {
595     if (im >= 0)
596         tcg_out32(s, (cond << 28) | 0x05900000 |
597                         (rn << 16) | (rd << 12) | (im & 0xfff));
598     else
599         tcg_out32(s, (cond << 28) | 0x05100000 |
600                         (rn << 16) | (rd << 12) | ((-im) & 0xfff));
601 }
602 
tcg_out_st32_12(TCGContext * s,int cond,int rd,int rn,tcg_target_long im)603 static inline void tcg_out_st32_12(TCGContext *s, int cond,
604                 int rd, int rn, tcg_target_long im)
605 {
606     if (im >= 0)
607         tcg_out32(s, (cond << 28) | 0x05800000 |
608                         (rn << 16) | (rd << 12) | (im & 0xfff));
609     else
610         tcg_out32(s, (cond << 28) | 0x05000000 |
611                         (rn << 16) | (rd << 12) | ((-im) & 0xfff));
612 }
613 
tcg_out_ld32_r(TCGContext * s,int cond,int rd,int rn,int rm)614 static inline void tcg_out_ld32_r(TCGContext *s, int cond,
615                 int rd, int rn, int rm)
616 {
617     tcg_out32(s, (cond << 28) | 0x07900000 |
618                     (rn << 16) | (rd << 12) | rm);
619 }
620 
tcg_out_st32_r(TCGContext * s,int cond,int rd,int rn,int rm)621 static inline void tcg_out_st32_r(TCGContext *s, int cond,
622                 int rd, int rn, int rm)
623 {
624     tcg_out32(s, (cond << 28) | 0x07800000 |
625                     (rn << 16) | (rd << 12) | rm);
626 }
627 
628 /* Register pre-increment with base writeback.  */
tcg_out_ld32_rwb(TCGContext * s,int cond,int rd,int rn,int rm)629 static inline void tcg_out_ld32_rwb(TCGContext *s, int cond,
630                 int rd, int rn, int rm)
631 {
632     tcg_out32(s, (cond << 28) | 0x07b00000 |
633                     (rn << 16) | (rd << 12) | rm);
634 }
635 
tcg_out_st32_rwb(TCGContext * s,int cond,int rd,int rn,int rm)636 static inline void tcg_out_st32_rwb(TCGContext *s, int cond,
637                 int rd, int rn, int rm)
638 {
639     tcg_out32(s, (cond << 28) | 0x07a00000 |
640                     (rn << 16) | (rd << 12) | rm);
641 }
642 
tcg_out_ld16u_8(TCGContext * s,int cond,int rd,int rn,tcg_target_long im)643 static inline void tcg_out_ld16u_8(TCGContext *s, int cond,
644                 int rd, int rn, tcg_target_long im)
645 {
646     if (im >= 0)
647         tcg_out32(s, (cond << 28) | 0x01d000b0 |
648                         (rn << 16) | (rd << 12) |
649                         ((im & 0xf0) << 4) | (im & 0xf));
650     else
651         tcg_out32(s, (cond << 28) | 0x015000b0 |
652                         (rn << 16) | (rd << 12) |
653                         (((-im) & 0xf0) << 4) | ((-im) & 0xf));
654 }
655 
tcg_out_st16_8(TCGContext * s,int cond,int rd,int rn,tcg_target_long im)656 static inline void tcg_out_st16_8(TCGContext *s, int cond,
657                 int rd, int rn, tcg_target_long im)
658 {
659     if (im >= 0)
660         tcg_out32(s, (cond << 28) | 0x01c000b0 |
661                         (rn << 16) | (rd << 12) |
662                         ((im & 0xf0) << 4) | (im & 0xf));
663     else
664         tcg_out32(s, (cond << 28) | 0x014000b0 |
665                         (rn << 16) | (rd << 12) |
666                         (((-im) & 0xf0) << 4) | ((-im) & 0xf));
667 }
668 
tcg_out_ld16u_r(TCGContext * s,int cond,int rd,int rn,int rm)669 static inline void tcg_out_ld16u_r(TCGContext *s, int cond,
670                 int rd, int rn, int rm)
671 {
672     tcg_out32(s, (cond << 28) | 0x019000b0 |
673                     (rn << 16) | (rd << 12) | rm);
674 }
675 
tcg_out_st16_r(TCGContext * s,int cond,int rd,int rn,int rm)676 static inline void tcg_out_st16_r(TCGContext *s, int cond,
677                 int rd, int rn, int rm)
678 {
679     tcg_out32(s, (cond << 28) | 0x018000b0 |
680                     (rn << 16) | (rd << 12) | rm);
681 }
682 
tcg_out_ld16s_8(TCGContext * s,int cond,int rd,int rn,tcg_target_long im)683 static inline void tcg_out_ld16s_8(TCGContext *s, int cond,
684                 int rd, int rn, tcg_target_long im)
685 {
686     if (im >= 0)
687         tcg_out32(s, (cond << 28) | 0x01d000f0 |
688                         (rn << 16) | (rd << 12) |
689                         ((im & 0xf0) << 4) | (im & 0xf));
690     else
691         tcg_out32(s, (cond << 28) | 0x015000f0 |
692                         (rn << 16) | (rd << 12) |
693                         (((-im) & 0xf0) << 4) | ((-im) & 0xf));
694 }
695 
tcg_out_ld16s_r(TCGContext * s,int cond,int rd,int rn,int rm)696 static inline void tcg_out_ld16s_r(TCGContext *s, int cond,
697                 int rd, int rn, int rm)
698 {
699     tcg_out32(s, (cond << 28) | 0x019000f0 |
700                     (rn << 16) | (rd << 12) | rm);
701 }
702 
tcg_out_ld8_12(TCGContext * s,int cond,int rd,int rn,tcg_target_long im)703 static inline void tcg_out_ld8_12(TCGContext *s, int cond,
704                 int rd, int rn, tcg_target_long im)
705 {
706     if (im >= 0)
707         tcg_out32(s, (cond << 28) | 0x05d00000 |
708                         (rn << 16) | (rd << 12) | (im & 0xfff));
709     else
710         tcg_out32(s, (cond << 28) | 0x05500000 |
711                         (rn << 16) | (rd << 12) | ((-im) & 0xfff));
712 }
713 
tcg_out_st8_12(TCGContext * s,int cond,int rd,int rn,tcg_target_long im)714 static inline void tcg_out_st8_12(TCGContext *s, int cond,
715                 int rd, int rn, tcg_target_long im)
716 {
717     if (im >= 0)
718         tcg_out32(s, (cond << 28) | 0x05c00000 |
719                         (rn << 16) | (rd << 12) | (im & 0xfff));
720     else
721         tcg_out32(s, (cond << 28) | 0x05400000 |
722                         (rn << 16) | (rd << 12) | ((-im) & 0xfff));
723 }
724 
tcg_out_ld8_r(TCGContext * s,int cond,int rd,int rn,int rm)725 static inline void tcg_out_ld8_r(TCGContext *s, int cond,
726                 int rd, int rn, int rm)
727 {
728     tcg_out32(s, (cond << 28) | 0x07d00000 |
729                     (rn << 16) | (rd << 12) | rm);
730 }
731 
tcg_out_st8_r(TCGContext * s,int cond,int rd,int rn,int rm)732 static inline void tcg_out_st8_r(TCGContext *s, int cond,
733                 int rd, int rn, int rm)
734 {
735     tcg_out32(s, (cond << 28) | 0x07c00000 |
736                     (rn << 16) | (rd << 12) | rm);
737 }
738 
tcg_out_ld8s_8(TCGContext * s,int cond,int rd,int rn,tcg_target_long im)739 static inline void tcg_out_ld8s_8(TCGContext *s, int cond,
740                 int rd, int rn, tcg_target_long im)
741 {
742     if (im >= 0)
743         tcg_out32(s, (cond << 28) | 0x01d000d0 |
744                         (rn << 16) | (rd << 12) |
745                         ((im & 0xf0) << 4) | (im & 0xf));
746     else
747         tcg_out32(s, (cond << 28) | 0x015000d0 |
748                         (rn << 16) | (rd << 12) |
749                         (((-im) & 0xf0) << 4) | ((-im) & 0xf));
750 }
751 
tcg_out_ld8s_r(TCGContext * s,int cond,int rd,int rn,int rm)752 static inline void tcg_out_ld8s_r(TCGContext *s, int cond,
753                 int rd, int rn, int rm)
754 {
755     tcg_out32(s, (cond << 28) | 0x019000d0 |
756                     (rn << 16) | (rd << 12) | rm);
757 }
758 
tcg_out_ld32u(TCGContext * s,int cond,int rd,int rn,int32_t offset)759 static inline void tcg_out_ld32u(TCGContext *s, int cond,
760                 int rd, int rn, int32_t offset)
761 {
762     if (offset > 0xfff || offset < -0xfff) {
763         tcg_out_movi32(s, cond, TCG_REG_R8, offset);
764         tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_R8);
765     } else
766         tcg_out_ld32_12(s, cond, rd, rn, offset);
767 }
768 
tcg_out_st32(TCGContext * s,int cond,int rd,int rn,int32_t offset)769 static inline void tcg_out_st32(TCGContext *s, int cond,
770                 int rd, int rn, int32_t offset)
771 {
772     if (offset > 0xfff || offset < -0xfff) {
773         tcg_out_movi32(s, cond, TCG_REG_R8, offset);
774         tcg_out_st32_r(s, cond, rd, rn, TCG_REG_R8);
775     } else
776         tcg_out_st32_12(s, cond, rd, rn, offset);
777 }
778 
tcg_out_ld16u(TCGContext * s,int cond,int rd,int rn,int32_t offset)779 static inline void tcg_out_ld16u(TCGContext *s, int cond,
780                 int rd, int rn, int32_t offset)
781 {
782     if (offset > 0xff || offset < -0xff) {
783         tcg_out_movi32(s, cond, TCG_REG_R8, offset);
784         tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_R8);
785     } else
786         tcg_out_ld16u_8(s, cond, rd, rn, offset);
787 }
788 
tcg_out_ld16s(TCGContext * s,int cond,int rd,int rn,int32_t offset)789 static inline void tcg_out_ld16s(TCGContext *s, int cond,
790                 int rd, int rn, int32_t offset)
791 {
792     if (offset > 0xff || offset < -0xff) {
793         tcg_out_movi32(s, cond, TCG_REG_R8, offset);
794         tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_R8);
795     } else
796         tcg_out_ld16s_8(s, cond, rd, rn, offset);
797 }
798 
tcg_out_st16(TCGContext * s,int cond,int rd,int rn,int32_t offset)799 static inline void tcg_out_st16(TCGContext *s, int cond,
800                 int rd, int rn, int32_t offset)
801 {
802     if (offset > 0xff || offset < -0xff) {
803         tcg_out_movi32(s, cond, TCG_REG_R8, offset);
804         tcg_out_st16_r(s, cond, rd, rn, TCG_REG_R8);
805     } else
806         tcg_out_st16_8(s, cond, rd, rn, offset);
807 }
808 
tcg_out_ld8u(TCGContext * s,int cond,int rd,int rn,int32_t offset)809 static inline void tcg_out_ld8u(TCGContext *s, int cond,
810                 int rd, int rn, int32_t offset)
811 {
812     if (offset > 0xfff || offset < -0xfff) {
813         tcg_out_movi32(s, cond, TCG_REG_R8, offset);
814         tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_R8);
815     } else
816         tcg_out_ld8_12(s, cond, rd, rn, offset);
817 }
818 
tcg_out_ld8s(TCGContext * s,int cond,int rd,int rn,int32_t offset)819 static inline void tcg_out_ld8s(TCGContext *s, int cond,
820                 int rd, int rn, int32_t offset)
821 {
822     if (offset > 0xff || offset < -0xff) {
823         tcg_out_movi32(s, cond, TCG_REG_R8, offset);
824         tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_R8);
825     } else
826         tcg_out_ld8s_8(s, cond, rd, rn, offset);
827 }
828 
tcg_out_st8(TCGContext * s,int cond,int rd,int rn,int32_t offset)829 static inline void tcg_out_st8(TCGContext *s, int cond,
830                 int rd, int rn, int32_t offset)
831 {
832     if (offset > 0xfff || offset < -0xfff) {
833         tcg_out_movi32(s, cond, TCG_REG_R8, offset);
834         tcg_out_st8_r(s, cond, rd, rn, TCG_REG_R8);
835     } else
836         tcg_out_st8_12(s, cond, rd, rn, offset);
837 }
838 
tcg_out_goto(TCGContext * s,int cond,uint32_t addr)839 static inline void tcg_out_goto(TCGContext *s, int cond, uint32_t addr)
840 {
841     int32_t val;
842 
843     val = addr - (tcg_target_long) s->code_ptr;
844     if (val - 8 < 0x01fffffd && val - 8 > -0x01fffffd)
845         tcg_out_b(s, cond, val);
846     else {
847 #if 1
848         tcg_abort();
849 #else
850         if (cond == COND_AL) {
851             tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
852             tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */
853         } else {
854             tcg_out_movi32(s, cond, TCG_REG_R8, val - 8);
855             tcg_out_dat_reg(s, cond, ARITH_ADD,
856                             TCG_REG_PC, TCG_REG_PC,
857                             TCG_REG_R8, SHIFT_IMM_LSL(0));
858         }
859 #endif
860     }
861 }
862 
tcg_out_call(TCGContext * s,int cond,uint32_t addr)863 static inline void tcg_out_call(TCGContext *s, int cond, uint32_t addr)
864 {
865     int32_t val;
866 
867     val = addr - (tcg_target_long) s->code_ptr;
868     if (val < 0x01fffffd && val > -0x01fffffd)
869         tcg_out_bl(s, cond, val);
870     else {
871 #if 1
872         tcg_abort();
873 #else
874         if (cond == COND_AL) {
875             tcg_out_dat_imm(s, cond, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 4);
876             tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
877             tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */
878         } else {
879             tcg_out_movi32(s, cond, TCG_REG_R9, addr);
880             tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R14, 0,
881                             TCG_REG_PC, SHIFT_IMM_LSL(0));
882             tcg_out_bx(s, cond, TCG_REG_R9);
883         }
884 #endif
885     }
886 }
887 
tcg_out_callr(TCGContext * s,int cond,int arg)888 static inline void tcg_out_callr(TCGContext *s, int cond, int arg)
889 {
890     if (use_armv5_instructions) {
891         tcg_out_blx(s, cond, arg);
892     } else {
893         tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R14, 0,
894                         TCG_REG_PC, SHIFT_IMM_LSL(0));
895         tcg_out_bx(s, cond, arg);
896     }
897 }
898 
tcg_out_goto_label(TCGContext * s,int cond,int label_index)899 static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index)
900 {
901     TCGLabel *l = &s->labels[label_index];
902 
903     if (l->has_value)
904         tcg_out_goto(s, cond, l->u.value);
905     else if (cond == COND_AL) {
906         tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
907         tcg_out_reloc(s, s->code_ptr, R_ARM_ABS32, label_index, 31337);
908         s->code_ptr += 4;
909     } else {
910         /* Probably this should be preferred even for COND_AL... */
911         tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, label_index, 31337);
912         tcg_out_b_noaddr(s, cond);
913     }
914 }
915 
916 #ifdef CONFIG_SOFTMMU
917 
918 #include "../../softmmu_defs.h"
919 
920 static void *qemu_ld_helpers[4] = {
921     __ldb_mmu,
922     __ldw_mmu,
923     __ldl_mmu,
924     __ldq_mmu,
925 };
926 
927 static void *qemu_st_helpers[4] = {
928     __stb_mmu,
929     __stw_mmu,
930     __stl_mmu,
931     __stq_mmu,
932 };
933 #endif
934 
935 #define TLB_SHIFT	(CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
936 
tcg_out_qemu_ld(TCGContext * s,const TCGArg * args,int opc)937 static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
938 {
939     int addr_reg, data_reg, data_reg2, bswap;
940 #ifdef CONFIG_SOFTMMU
941     int mem_index, s_bits;
942 # if TARGET_LONG_BITS == 64
943     int addr_reg2;
944 # endif
945     uint32_t *label_ptr;
946 #endif
947 
948 #ifdef TARGET_WORDS_BIGENDIAN
949     bswap = 1;
950 #else
951     bswap = 0;
952 #endif
953     data_reg = *args++;
954     if (opc == 3)
955         data_reg2 = *args++;
956     else
957         data_reg2 = 0; /* suppress warning */
958     addr_reg = *args++;
959 #ifdef CONFIG_SOFTMMU
960 # if TARGET_LONG_BITS == 64
961     addr_reg2 = *args++;
962 # endif
963     mem_index = *args;
964     s_bits = opc & 3;
965 
966     /* Should generate something like the following:
967      *  shr r8, addr_reg, #TARGET_PAGE_BITS
968      *  and r0, r8, #(CPU_TLB_SIZE - 1)   @ Assumption: CPU_TLB_BITS <= 8
969      *  add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
970      */
971 #  if CPU_TLB_BITS > 8
972 #   error
973 #  endif
974     tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_R8,
975                     0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
976     tcg_out_dat_imm(s, COND_AL, ARITH_AND,
977                     TCG_REG_R0, TCG_REG_R8, CPU_TLB_SIZE - 1);
978     tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_AREG0,
979                     TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
980     /* In the
981      *  ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_read))]
982      * below, the offset is likely to exceed 12 bits if mem_index != 0 and
983      * not exceed otherwise, so use an
984      *  add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
985      * before.
986      */
987     if (mem_index)
988         tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_REG_R0,
989                         (mem_index << (TLB_SHIFT & 1)) |
990                         ((16 - (TLB_SHIFT >> 1)) << 8));
991     tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R0,
992                     offsetof(CPUState, tlb_table[0][0].addr_read));
993     tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1,
994                     TCG_REG_R8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
995     /* Check alignment.  */
996     if (s_bits)
997         tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
998                         0, addr_reg, (1 << s_bits) - 1);
999 #  if TARGET_LONG_BITS == 64
1000     /* XXX: possibly we could use a block data load or writeback in
1001      * the first access.  */
1002     tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
1003                     offsetof(CPUState, tlb_table[0][0].addr_read) + 4);
1004     tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1005                     TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0));
1006 #  endif
1007     tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
1008                     offsetof(CPUState, tlb_table[0][0].addend));
1009 
1010     switch (opc) {
1011     case 0:
1012         tcg_out_ld8_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1013         break;
1014     case 0 | 4:
1015         tcg_out_ld8s_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1016         break;
1017     case 1:
1018         tcg_out_ld16u_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1019         if (bswap) {
1020             tcg_out_bswap16(s, COND_EQ, data_reg, data_reg);
1021         }
1022         break;
1023     case 1 | 4:
1024         if (bswap) {
1025             tcg_out_ld16u_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1026             tcg_out_bswap16s(s, COND_EQ, data_reg, data_reg);
1027         } else {
1028             tcg_out_ld16s_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1029         }
1030         break;
1031     case 2:
1032     default:
1033         tcg_out_ld32_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1034         if (bswap) {
1035             tcg_out_bswap32(s, COND_EQ, data_reg, data_reg);
1036         }
1037         break;
1038     case 3:
1039         if (bswap) {
1040             tcg_out_ld32_rwb(s, COND_EQ, data_reg2, TCG_REG_R1, addr_reg);
1041             tcg_out_ld32_12(s, COND_EQ, data_reg, TCG_REG_R1, 4);
1042             tcg_out_bswap32(s, COND_EQ, data_reg2, data_reg2);
1043             tcg_out_bswap32(s, COND_EQ, data_reg, data_reg);
1044         } else {
1045             tcg_out_ld32_rwb(s, COND_EQ, data_reg, TCG_REG_R1, addr_reg);
1046             tcg_out_ld32_12(s, COND_EQ, data_reg2, TCG_REG_R1, 4);
1047         }
1048         break;
1049     }
1050 
1051     label_ptr = (void *) s->code_ptr;
1052     tcg_out_b_noaddr(s, COND_EQ);
1053 
1054     /* TODO: move this code to where the constants pool will be */
1055     if (addr_reg != TCG_REG_R0) {
1056         tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1057                         TCG_REG_R0, 0, addr_reg, SHIFT_IMM_LSL(0));
1058     }
1059 # if TARGET_LONG_BITS == 32
1060     tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R1, 0, mem_index);
1061 # else
1062     tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1063                     TCG_REG_R1, 0, addr_reg2, SHIFT_IMM_LSL(0));
1064     tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R2, 0, mem_index);
1065 # endif
1066     tcg_out_bl(s, COND_AL, (tcg_target_long) qemu_ld_helpers[s_bits] -
1067                     (tcg_target_long) s->code_ptr);
1068 
1069     switch (opc) {
1070     case 0 | 4:
1071         tcg_out_ext8s(s, COND_AL, data_reg, TCG_REG_R0);
1072         break;
1073     case 1 | 4:
1074         tcg_out_ext16s(s, COND_AL, data_reg, TCG_REG_R0);
1075         break;
1076     case 0:
1077     case 1:
1078     case 2:
1079     default:
1080         if (data_reg != TCG_REG_R0) {
1081             tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1082                             data_reg, 0, TCG_REG_R0, SHIFT_IMM_LSL(0));
1083         }
1084         break;
1085     case 3:
1086         if (data_reg != TCG_REG_R0) {
1087             tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1088                             data_reg, 0, TCG_REG_R0, SHIFT_IMM_LSL(0));
1089         }
1090         if (data_reg2 != TCG_REG_R1) {
1091             tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1092                             data_reg2, 0, TCG_REG_R1, SHIFT_IMM_LSL(0));
1093         }
1094         break;
1095     }
1096 
1097     reloc_pc24(label_ptr, (tcg_target_long)s->code_ptr);
1098 #else /* !CONFIG_SOFTMMU */
1099     if (GUEST_BASE) {
1100         uint32_t offset = GUEST_BASE;
1101         int i;
1102         int rot;
1103 
1104         while (offset) {
1105             i = ctz32(offset) & ~1;
1106             rot = ((32 - i) << 7) & 0xf00;
1107 
1108             tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R8, addr_reg,
1109                             ((offset >> i) & 0xff) | rot);
1110             addr_reg = TCG_REG_R8;
1111             offset &= ~(0xff << i);
1112         }
1113     }
1114     switch (opc) {
1115     case 0:
1116         tcg_out_ld8_12(s, COND_AL, data_reg, addr_reg, 0);
1117         break;
1118     case 0 | 4:
1119         tcg_out_ld8s_8(s, COND_AL, data_reg, addr_reg, 0);
1120         break;
1121     case 1:
1122         tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0);
1123         if (bswap) {
1124             tcg_out_bswap16(s, COND_AL, data_reg, data_reg);
1125         }
1126         break;
1127     case 1 | 4:
1128         if (bswap) {
1129             tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0);
1130             tcg_out_bswap16s(s, COND_AL, data_reg, data_reg);
1131         } else {
1132             tcg_out_ld16s_8(s, COND_AL, data_reg, addr_reg, 0);
1133         }
1134         break;
1135     case 2:
1136     default:
1137         tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
1138         if (bswap) {
1139             tcg_out_bswap32(s, COND_AL, data_reg, data_reg);
1140         }
1141         break;
1142     case 3:
1143         /* TODO: use block load -
1144          * check that data_reg2 > data_reg or the other way */
1145         if (data_reg == addr_reg) {
1146             tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, bswap ? 0 : 4);
1147             tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, bswap ? 4 : 0);
1148         } else {
1149             tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, bswap ? 4 : 0);
1150             tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, bswap ? 0 : 4);
1151         }
1152         if (bswap) {
1153             tcg_out_bswap32(s, COND_AL, data_reg, data_reg);
1154             tcg_out_bswap32(s, COND_AL, data_reg2, data_reg2);
1155         }
1156         break;
1157     }
1158 #endif
1159 }
1160 
tcg_out_qemu_st(TCGContext * s,const TCGArg * args,int opc)1161 static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
1162 {
1163     int addr_reg, data_reg, data_reg2, bswap;
1164 #ifdef CONFIG_SOFTMMU
1165     int mem_index, s_bits;
1166 # if TARGET_LONG_BITS == 64
1167     int addr_reg2;
1168 # endif
1169     uint32_t *label_ptr;
1170 #endif
1171 
1172 #ifdef TARGET_WORDS_BIGENDIAN
1173     bswap = 1;
1174 #else
1175     bswap = 0;
1176 #endif
1177     data_reg = *args++;
1178     if (opc == 3)
1179         data_reg2 = *args++;
1180     else
1181         data_reg2 = 0; /* suppress warning */
1182     addr_reg = *args++;
1183 #ifdef CONFIG_SOFTMMU
1184 # if TARGET_LONG_BITS == 64
1185     addr_reg2 = *args++;
1186 # endif
1187     mem_index = *args;
1188     s_bits = opc & 3;
1189 
1190     /* Should generate something like the following:
1191      *  shr r8, addr_reg, #TARGET_PAGE_BITS
1192      *  and r0, r8, #(CPU_TLB_SIZE - 1)   @ Assumption: CPU_TLB_BITS <= 8
1193      *  add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
1194      */
1195     tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1196                     TCG_REG_R8, 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
1197     tcg_out_dat_imm(s, COND_AL, ARITH_AND,
1198                     TCG_REG_R0, TCG_REG_R8, CPU_TLB_SIZE - 1);
1199     tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0,
1200                     TCG_AREG0, TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
1201     /* In the
1202      *  ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_write))]
1203      * below, the offset is likely to exceed 12 bits if mem_index != 0 and
1204      * not exceed otherwise, so use an
1205      *  add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
1206      * before.
1207      */
1208     if (mem_index)
1209         tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_REG_R0,
1210                         (mem_index << (TLB_SHIFT & 1)) |
1211                         ((16 - (TLB_SHIFT >> 1)) << 8));
1212     tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R0,
1213                     offsetof(CPUState, tlb_table[0][0].addr_write));
1214     tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1,
1215                     TCG_REG_R8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
1216     /* Check alignment.  */
1217     if (s_bits)
1218         tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
1219                         0, addr_reg, (1 << s_bits) - 1);
1220 #  if TARGET_LONG_BITS == 64
1221     /* XXX: possibly we could use a block data load or writeback in
1222      * the first access.  */
1223     tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
1224                     offsetof(CPUState, tlb_table[0][0].addr_write) + 4);
1225     tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1226                     TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0));
1227 #  endif
1228     tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
1229                     offsetof(CPUState, tlb_table[0][0].addend));
1230 
1231     switch (opc) {
1232     case 0:
1233         tcg_out_st8_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1234         break;
1235     case 1:
1236         if (bswap) {
1237             tcg_out_bswap16(s, COND_EQ, TCG_REG_R0, data_reg);
1238             tcg_out_st16_r(s, COND_EQ, TCG_REG_R0, addr_reg, TCG_REG_R1);
1239         } else {
1240             tcg_out_st16_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1241         }
1242         break;
1243     case 2:
1244     default:
1245         if (bswap) {
1246             tcg_out_bswap32(s, COND_EQ, TCG_REG_R0, data_reg);
1247             tcg_out_st32_r(s, COND_EQ, TCG_REG_R0, addr_reg, TCG_REG_R1);
1248         } else {
1249             tcg_out_st32_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1250         }
1251         break;
1252     case 3:
1253         if (bswap) {
1254             tcg_out_bswap32(s, COND_EQ, TCG_REG_R0, data_reg2);
1255             tcg_out_st32_rwb(s, COND_EQ, TCG_REG_R0, TCG_REG_R1, addr_reg);
1256             tcg_out_bswap32(s, COND_EQ, TCG_REG_R0, data_reg);
1257             tcg_out_st32_12(s, COND_EQ, TCG_REG_R0, TCG_REG_R1, 4);
1258         } else {
1259             tcg_out_st32_rwb(s, COND_EQ, data_reg, TCG_REG_R1, addr_reg);
1260             tcg_out_st32_12(s, COND_EQ, data_reg2, TCG_REG_R1, 4);
1261         }
1262         break;
1263     }
1264 
1265     label_ptr = (void *) s->code_ptr;
1266     tcg_out_b_noaddr(s, COND_EQ);
1267 
1268     /* TODO: move this code to where the constants pool will be */
1269     tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1270                     TCG_REG_R0, 0, addr_reg, SHIFT_IMM_LSL(0));
1271 # if TARGET_LONG_BITS == 32
1272     switch (opc) {
1273     case 0:
1274         tcg_out_ext8u(s, COND_AL, TCG_REG_R1, data_reg);
1275         tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R2, 0, mem_index);
1276         break;
1277     case 1:
1278         tcg_out_ext16u(s, COND_AL, TCG_REG_R1, data_reg);
1279         tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R2, 0, mem_index);
1280         break;
1281     case 2:
1282         tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1283                         TCG_REG_R1, 0, data_reg, SHIFT_IMM_LSL(0));
1284         tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R2, 0, mem_index);
1285         break;
1286     case 3:
1287         tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R8, 0, mem_index);
1288         tcg_out32(s, (COND_AL << 28) | 0x052d8010); /* str r8, [sp, #-0x10]! */
1289         if (data_reg != TCG_REG_R2) {
1290             tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1291                             TCG_REG_R2, 0, data_reg, SHIFT_IMM_LSL(0));
1292         }
1293         if (data_reg2 != TCG_REG_R3) {
1294             tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1295                             TCG_REG_R3, 0, data_reg2, SHIFT_IMM_LSL(0));
1296         }
1297         break;
1298     }
1299 # else
1300     tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1301                     TCG_REG_R1, 0, addr_reg2, SHIFT_IMM_LSL(0));
1302     switch (opc) {
1303     case 0:
1304         tcg_out_ext8u(s, COND_AL, TCG_REG_R2, data_reg);
1305         tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R3, 0, mem_index);
1306         break;
1307     case 1:
1308         tcg_out_ext16u(s, COND_AL, TCG_REG_R2, data_reg);
1309         tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R3, 0, mem_index);
1310         break;
1311     case 2:
1312         if (data_reg != TCG_REG_R2) {
1313             tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1314                             TCG_REG_R2, 0, data_reg, SHIFT_IMM_LSL(0));
1315         }
1316         tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R3, 0, mem_index);
1317         break;
1318     case 3:
1319         tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R8, 0, mem_index);
1320         tcg_out32(s, (COND_AL << 28) | 0x052d8010); /* str r8, [sp, #-0x10]! */
1321         if (data_reg != TCG_REG_R2) {
1322             tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1323                             TCG_REG_R2, 0, data_reg, SHIFT_IMM_LSL(0));
1324         }
1325         if (data_reg2 != TCG_REG_R3) {
1326             tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1327                             TCG_REG_R3, 0, data_reg2, SHIFT_IMM_LSL(0));
1328         }
1329         break;
1330     }
1331 # endif
1332 
1333     tcg_out_bl(s, COND_AL, (tcg_target_long) qemu_st_helpers[s_bits] -
1334                     (tcg_target_long) s->code_ptr);
1335     if (opc == 3)
1336         tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R13, TCG_REG_R13, 0x10);
1337 
1338     reloc_pc24(label_ptr, (tcg_target_long)s->code_ptr);
1339 #else /* !CONFIG_SOFTMMU */
1340     if (GUEST_BASE) {
1341         uint32_t offset = GUEST_BASE;
1342         int i;
1343         int rot;
1344 
1345         while (offset) {
1346             i = ctz32(offset) & ~1;
1347             rot = ((32 - i) << 7) & 0xf00;
1348 
1349             tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R1, addr_reg,
1350                             ((offset >> i) & 0xff) | rot);
1351             addr_reg = TCG_REG_R1;
1352             offset &= ~(0xff << i);
1353         }
1354     }
1355     switch (opc) {
1356     case 0:
1357         tcg_out_st8_12(s, COND_AL, data_reg, addr_reg, 0);
1358         break;
1359     case 1:
1360         if (bswap) {
1361             tcg_out_bswap16(s, COND_AL, TCG_REG_R0, data_reg);
1362             tcg_out_st16_8(s, COND_AL, TCG_REG_R0, addr_reg, 0);
1363         } else {
1364             tcg_out_st16_8(s, COND_AL, data_reg, addr_reg, 0);
1365         }
1366         break;
1367     case 2:
1368     default:
1369         if (bswap) {
1370             tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg);
1371             tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 0);
1372         } else {
1373             tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
1374         }
1375         break;
1376     case 3:
1377         /* TODO: use block store -
1378          * check that data_reg2 > data_reg or the other way */
1379         if (bswap) {
1380             tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg2);
1381             tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 0);
1382             tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg);
1383             tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 4);
1384         } else {
1385             tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
1386             tcg_out_st32_12(s, COND_AL, data_reg2, addr_reg, 4);
1387         }
1388         break;
1389     }
1390 #endif
1391 }
1392 
1393 static uint8_t *tb_ret_addr;
1394 
tcg_out_op(TCGContext * s,TCGOpcode opc,const TCGArg * args,const int * const_args)1395 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1396                 const TCGArg *args, const int *const_args)
1397 {
1398     int c;
1399 
1400     switch (opc) {
1401     case INDEX_op_exit_tb:
1402         {
1403             uint8_t *ld_ptr = s->code_ptr;
1404             if (args[0] >> 8)
1405                 tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
1406             else
1407                 tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R0, 0, args[0]);
1408             tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr);
1409             if (args[0] >> 8) {
1410                 *ld_ptr = (uint8_t) (s->code_ptr - ld_ptr) - 8;
1411                 tcg_out32(s, args[0]);
1412             }
1413         }
1414         break;
1415     case INDEX_op_goto_tb:
1416         if (s->tb_jmp_offset) {
1417             /* Direct jump method */
1418 #if defined(USE_DIRECT_JUMP)
1419             s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1420             tcg_out_b_noaddr(s, COND_AL);
1421 #else
1422             tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
1423             s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1424             tcg_out32(s, 0);
1425 #endif
1426         } else {
1427             /* Indirect jump method */
1428 #if 1
1429             c = (int) (s->tb_next + args[0]) - ((int) s->code_ptr + 8);
1430             if (c > 0xfff || c < -0xfff) {
1431                 tcg_out_movi32(s, COND_AL, TCG_REG_R0,
1432                                 (tcg_target_long) (s->tb_next + args[0]));
1433                 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
1434             } else
1435                 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, c);
1436 #else
1437             tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
1438             tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
1439             tcg_out32(s, (tcg_target_long) (s->tb_next + args[0]));
1440 #endif
1441         }
1442         s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1443         break;
1444     case INDEX_op_call:
1445         if (const_args[0])
1446             tcg_out_call(s, COND_AL, args[0]);
1447         else
1448             tcg_out_callr(s, COND_AL, args[0]);
1449         break;
1450     case INDEX_op_jmp:
1451         if (const_args[0])
1452             tcg_out_goto(s, COND_AL, args[0]);
1453         else
1454             tcg_out_bx(s, COND_AL, args[0]);
1455         break;
1456     case INDEX_op_br:
1457         tcg_out_goto_label(s, COND_AL, args[0]);
1458         break;
1459 
1460     case INDEX_op_ld8u_i32:
1461         tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
1462         break;
1463     case INDEX_op_ld8s_i32:
1464         tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
1465         break;
1466     case INDEX_op_ld16u_i32:
1467         tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
1468         break;
1469     case INDEX_op_ld16s_i32:
1470         tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
1471         break;
1472     case INDEX_op_ld_i32:
1473         tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
1474         break;
1475     case INDEX_op_st8_i32:
1476         tcg_out_st8(s, COND_AL, args[0], args[1], args[2]);
1477         break;
1478     case INDEX_op_st16_i32:
1479         tcg_out_st16(s, COND_AL, args[0], args[1], args[2]);
1480         break;
1481     case INDEX_op_st_i32:
1482         tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
1483         break;
1484 
1485     case INDEX_op_mov_i32:
1486         tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1487                         args[0], 0, args[1], SHIFT_IMM_LSL(0));
1488         break;
1489     case INDEX_op_movi_i32:
1490         tcg_out_movi32(s, COND_AL, args[0], args[1]);
1491         break;
1492     case INDEX_op_add_i32:
1493         c = ARITH_ADD;
1494         goto gen_arith;
1495     case INDEX_op_sub_i32:
1496         c = ARITH_SUB;
1497         goto gen_arith;
1498     case INDEX_op_and_i32:
1499         c = ARITH_AND;
1500         goto gen_arith;
1501     case INDEX_op_andc_i32:
1502         c = ARITH_BIC;
1503         goto gen_arith;
1504     case INDEX_op_or_i32:
1505         c = ARITH_ORR;
1506         goto gen_arith;
1507     case INDEX_op_xor_i32:
1508         c = ARITH_EOR;
1509         /* Fall through.  */
1510     gen_arith:
1511         if (const_args[2]) {
1512             int rot;
1513             rot = encode_imm(args[2]);
1514             tcg_out_dat_imm(s, COND_AL, c,
1515                             args[0], args[1], rotl(args[2], rot) | (rot << 7));
1516         } else
1517             tcg_out_dat_reg(s, COND_AL, c,
1518                             args[0], args[1], args[2], SHIFT_IMM_LSL(0));
1519         break;
1520     case INDEX_op_add2_i32:
1521         tcg_out_dat_reg2(s, COND_AL, ARITH_ADD, ARITH_ADC,
1522                         args[0], args[1], args[2], args[3],
1523                         args[4], args[5], SHIFT_IMM_LSL(0));
1524         break;
1525     case INDEX_op_sub2_i32:
1526         tcg_out_dat_reg2(s, COND_AL, ARITH_SUB, ARITH_SBC,
1527                         args[0], args[1], args[2], args[3],
1528                         args[4], args[5], SHIFT_IMM_LSL(0));
1529         break;
1530     case INDEX_op_neg_i32:
1531         tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
1532         break;
1533     case INDEX_op_not_i32:
1534         tcg_out_dat_reg(s, COND_AL,
1535                         ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
1536         break;
1537     case INDEX_op_mul_i32:
1538         tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
1539         break;
1540     case INDEX_op_mulu2_i32:
1541         tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1542         break;
1543     /* XXX: Perhaps args[2] & 0x1f is wrong */
1544     case INDEX_op_shl_i32:
1545         c = const_args[2] ?
1546                 SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
1547         goto gen_shift32;
1548     case INDEX_op_shr_i32:
1549         c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
1550                 SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
1551         goto gen_shift32;
1552     case INDEX_op_sar_i32:
1553         c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
1554                 SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
1555         goto gen_shift32;
1556     case INDEX_op_rotr_i32:
1557         c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) :
1558                 SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]);
1559         /* Fall through.  */
1560     gen_shift32:
1561         tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
1562         break;
1563 
1564     case INDEX_op_rotl_i32:
1565         if (const_args[2]) {
1566             tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
1567                             ((0x20 - args[2]) & 0x1f) ?
1568                             SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) :
1569                             SHIFT_IMM_LSL(0));
1570         } else {
1571             tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_R8, args[1], 0x20);
1572             tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
1573                             SHIFT_REG_ROR(TCG_REG_R8));
1574         }
1575         break;
1576 
1577     case INDEX_op_brcond_i32:
1578         if (const_args[1]) {
1579             int rot;
1580             rot = encode_imm(args[1]);
1581             tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0,
1582                             args[0], rotl(args[1], rot) | (rot << 7));
1583         } else {
1584             tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1585                             args[0], args[1], SHIFT_IMM_LSL(0));
1586         }
1587         tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]], args[3]);
1588         break;
1589     case INDEX_op_brcond2_i32:
1590         /* The resulting conditions are:
1591          * TCG_COND_EQ    -->  a0 == a2 && a1 == a3,
1592          * TCG_COND_NE    --> (a0 != a2 && a1 == a3) ||  a1 != a3,
1593          * TCG_COND_LT(U) --> (a0 <  a2 && a1 == a3) ||  a1 <  a3,
1594          * TCG_COND_GE(U) --> (a0 >= a2 && a1 == a3) || (a1 >= a3 && a1 != a3),
1595          * TCG_COND_LE(U) --> (a0 <= a2 && a1 == a3) || (a1 <= a3 && a1 != a3),
1596          * TCG_COND_GT(U) --> (a0 >  a2 && a1 == a3) ||  a1 >  a3,
1597          */
1598         tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1599                         args[1], args[3], SHIFT_IMM_LSL(0));
1600         tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1601                         args[0], args[2], SHIFT_IMM_LSL(0));
1602         tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[4]], args[5]);
1603         break;
1604     case INDEX_op_setcond_i32:
1605         if (const_args[2]) {
1606             int rot;
1607             rot = encode_imm(args[2]);
1608             tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0,
1609                             args[1], rotl(args[2], rot) | (rot << 7));
1610         } else {
1611             tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1612                             args[1], args[2], SHIFT_IMM_LSL(0));
1613         }
1614         tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]],
1615                         ARITH_MOV, args[0], 0, 1);
1616         tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
1617                         ARITH_MOV, args[0], 0, 0);
1618         break;
1619     case INDEX_op_setcond2_i32:
1620         /* See brcond2_i32 comment */
1621         tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1622                         args[2], args[4], SHIFT_IMM_LSL(0));
1623         tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1624                         args[1], args[3], SHIFT_IMM_LSL(0));
1625         tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[5]],
1626                         ARITH_MOV, args[0], 0, 1);
1627         tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[5])],
1628                         ARITH_MOV, args[0], 0, 0);
1629         break;
1630 
1631     case INDEX_op_qemu_ld8u:
1632         tcg_out_qemu_ld(s, args, 0);
1633         break;
1634     case INDEX_op_qemu_ld8s:
1635         tcg_out_qemu_ld(s, args, 0 | 4);
1636         break;
1637     case INDEX_op_qemu_ld16u:
1638         tcg_out_qemu_ld(s, args, 1);
1639         break;
1640     case INDEX_op_qemu_ld16s:
1641         tcg_out_qemu_ld(s, args, 1 | 4);
1642         break;
1643     case INDEX_op_qemu_ld32:
1644         tcg_out_qemu_ld(s, args, 2);
1645         break;
1646     case INDEX_op_qemu_ld64:
1647         tcg_out_qemu_ld(s, args, 3);
1648         break;
1649 
1650     case INDEX_op_qemu_st8:
1651         tcg_out_qemu_st(s, args, 0);
1652         break;
1653     case INDEX_op_qemu_st16:
1654         tcg_out_qemu_st(s, args, 1);
1655         break;
1656     case INDEX_op_qemu_st32:
1657         tcg_out_qemu_st(s, args, 2);
1658         break;
1659     case INDEX_op_qemu_st64:
1660         tcg_out_qemu_st(s, args, 3);
1661         break;
1662 
1663     case INDEX_op_bswap16_i32:
1664         tcg_out_bswap16(s, COND_AL, args[0], args[1]);
1665         break;
1666     case INDEX_op_bswap32_i32:
1667         tcg_out_bswap32(s, COND_AL, args[0], args[1]);
1668         break;
1669 
1670     case INDEX_op_ext8s_i32:
1671         tcg_out_ext8s(s, COND_AL, args[0], args[1]);
1672         break;
1673     case INDEX_op_ext16s_i32:
1674         tcg_out_ext16s(s, COND_AL, args[0], args[1]);
1675         break;
1676     case INDEX_op_ext16u_i32:
1677         tcg_out_ext16u(s, COND_AL, args[0], args[1]);
1678         break;
1679 
1680     default:
1681         tcg_abort();
1682     }
1683 }
1684 
1685 static const TCGTargetOpDef arm_op_defs[] = {
1686     { INDEX_op_exit_tb, { } },
1687     { INDEX_op_goto_tb, { } },
1688     { INDEX_op_call, { "ri" } },
1689     { INDEX_op_jmp, { "ri" } },
1690     { INDEX_op_br, { } },
1691 
1692     { INDEX_op_mov_i32, { "r", "r" } },
1693     { INDEX_op_movi_i32, { "r" } },
1694 
1695     { INDEX_op_ld8u_i32, { "r", "r" } },
1696     { INDEX_op_ld8s_i32, { "r", "r" } },
1697     { INDEX_op_ld16u_i32, { "r", "r" } },
1698     { INDEX_op_ld16s_i32, { "r", "r" } },
1699     { INDEX_op_ld_i32, { "r", "r" } },
1700     { INDEX_op_st8_i32, { "r", "r" } },
1701     { INDEX_op_st16_i32, { "r", "r" } },
1702     { INDEX_op_st_i32, { "r", "r" } },
1703 
1704     /* TODO: "r", "r", "ri" */
1705     { INDEX_op_add_i32, { "r", "r", "rI" } },
1706     { INDEX_op_sub_i32, { "r", "r", "rI" } },
1707     { INDEX_op_mul_i32, { "r", "r", "r" } },
1708     { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
1709     { INDEX_op_and_i32, { "r", "r", "rI" } },
1710     { INDEX_op_andc_i32, { "r", "r", "rI" } },
1711     { INDEX_op_or_i32, { "r", "r", "rI" } },
1712     { INDEX_op_xor_i32, { "r", "r", "rI" } },
1713     { INDEX_op_neg_i32, { "r", "r" } },
1714     { INDEX_op_not_i32, { "r", "r" } },
1715 
1716     { INDEX_op_shl_i32, { "r", "r", "ri" } },
1717     { INDEX_op_shr_i32, { "r", "r", "ri" } },
1718     { INDEX_op_sar_i32, { "r", "r", "ri" } },
1719     { INDEX_op_rotl_i32, { "r", "r", "ri" } },
1720     { INDEX_op_rotr_i32, { "r", "r", "ri" } },
1721 
1722     { INDEX_op_brcond_i32, { "r", "rI" } },
1723     { INDEX_op_setcond_i32, { "r", "r", "rI" } },
1724 
1725     /* TODO: "r", "r", "r", "r", "ri", "ri" */
1726     { INDEX_op_add2_i32, { "r", "r", "r", "r", "r", "r" } },
1727     { INDEX_op_sub2_i32, { "r", "r", "r", "r", "r", "r" } },
1728     { INDEX_op_brcond2_i32, { "r", "r", "r", "r" } },
1729     { INDEX_op_setcond2_i32, { "r", "r", "r", "r", "r" } },
1730 
1731 #if TARGET_LONG_BITS == 32
1732     { INDEX_op_qemu_ld8u, { "r", "l" } },
1733     { INDEX_op_qemu_ld8s, { "r", "l" } },
1734     { INDEX_op_qemu_ld16u, { "r", "l" } },
1735     { INDEX_op_qemu_ld16s, { "r", "l" } },
1736     { INDEX_op_qemu_ld32, { "r", "l" } },
1737     { INDEX_op_qemu_ld64, { "L", "L", "l" } },
1738 
1739     { INDEX_op_qemu_st8, { "s", "s" } },
1740     { INDEX_op_qemu_st16, { "s", "s" } },
1741     { INDEX_op_qemu_st32, { "s", "s" } },
1742     { INDEX_op_qemu_st64, { "S", "S", "s" } },
1743 #else
1744     { INDEX_op_qemu_ld8u, { "r", "l", "l" } },
1745     { INDEX_op_qemu_ld8s, { "r", "l", "l" } },
1746     { INDEX_op_qemu_ld16u, { "r", "l", "l" } },
1747     { INDEX_op_qemu_ld16s, { "r", "l", "l" } },
1748     { INDEX_op_qemu_ld32, { "r", "l", "l" } },
1749     { INDEX_op_qemu_ld64, { "L", "L", "l", "l" } },
1750 
1751     { INDEX_op_qemu_st8, { "s", "s", "s" } },
1752     { INDEX_op_qemu_st16, { "s", "s", "s" } },
1753     { INDEX_op_qemu_st32, { "s", "s", "s" } },
1754     { INDEX_op_qemu_st64, { "S", "S", "s", "s" } },
1755 #endif
1756 
1757     { INDEX_op_bswap16_i32, { "r", "r" } },
1758     { INDEX_op_bswap32_i32, { "r", "r" } },
1759 
1760     { INDEX_op_ext8s_i32, { "r", "r" } },
1761     { INDEX_op_ext16s_i32, { "r", "r" } },
1762     { INDEX_op_ext16u_i32, { "r", "r" } },
1763 
1764     { -1 },
1765 };
1766 
tcg_target_init(TCGContext * s)1767 static void tcg_target_init(TCGContext *s)
1768 {
1769 #if !defined(CONFIG_USER_ONLY)
1770     /* fail safe */
1771     if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry))
1772         tcg_abort();
1773 #endif
1774 
1775     tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
1776     tcg_regset_set32(tcg_target_call_clobber_regs, 0,
1777                      (1 << TCG_REG_R0) |
1778                      (1 << TCG_REG_R1) |
1779                      (1 << TCG_REG_R2) |
1780                      (1 << TCG_REG_R3) |
1781                      (1 << TCG_REG_R12) |
1782                      (1 << TCG_REG_R14));
1783 
1784     tcg_regset_clear(s->reserved_regs);
1785     tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
1786     tcg_regset_set_reg(s->reserved_regs, TCG_REG_R8);
1787     tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
1788 
1789     tcg_add_target_add_op_defs(arm_op_defs);
1790 }
1791 
tcg_out_ld(TCGContext * s,TCGType type,int arg,int arg1,tcg_target_long arg2)1792 static inline void tcg_out_ld(TCGContext *s, TCGType type, int arg,
1793                 int arg1, tcg_target_long arg2)
1794 {
1795     tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
1796 }
1797 
tcg_out_st(TCGContext * s,TCGType type,int arg,int arg1,tcg_target_long arg2)1798 static inline void tcg_out_st(TCGContext *s, TCGType type, int arg,
1799                 int arg1, tcg_target_long arg2)
1800 {
1801     tcg_out_st32(s, COND_AL, arg, arg1, arg2);
1802 }
1803 
tcg_out_addi(TCGContext * s,int reg,tcg_target_long val)1804 static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
1805 {
1806     if (val > 0)
1807         if (val < 0x100)
1808             tcg_out_dat_imm(s, COND_AL, ARITH_ADD, reg, reg, val);
1809         else
1810             tcg_abort();
1811     else if (val < 0) {
1812         if (val > -0x100)
1813             tcg_out_dat_imm(s, COND_AL, ARITH_SUB, reg, reg, -val);
1814         else
1815             tcg_abort();
1816     }
1817 }
1818 
tcg_out_mov(TCGContext * s,TCGType type,int ret,int arg)1819 static inline void tcg_out_mov(TCGContext *s, TCGType type, int ret, int arg)
1820 {
1821     tcg_out_dat_reg(s, COND_AL, ARITH_MOV, ret, 0, arg, SHIFT_IMM_LSL(0));
1822 }
1823 
tcg_out_movi(TCGContext * s,TCGType type,int ret,tcg_target_long arg)1824 static inline void tcg_out_movi(TCGContext *s, TCGType type,
1825                 int ret, tcg_target_long arg)
1826 {
1827     tcg_out_movi32(s, COND_AL, ret, arg);
1828 }
1829 
tcg_target_qemu_prologue(TCGContext * s)1830 static void tcg_target_qemu_prologue(TCGContext *s)
1831 {
1832     /* There is no need to save r7, it is used to store the address
1833        of the env structure and is not modified by GCC. */
1834 
1835     /* stmdb sp!, { r4 - r6, r8 - r11, lr } */
1836     tcg_out32(s, (COND_AL << 28) | 0x092d4f70);
1837 
1838     tcg_out_bx(s, COND_AL, TCG_REG_R0);
1839     tb_ret_addr = s->code_ptr;
1840 
1841     /* ldmia sp!, { r4 - r6, r8 - r11, pc } */
1842     tcg_out32(s, (COND_AL << 28) | 0x08bd8f70);
1843 }
1844