xref: /illumos-kvm-cmd/softmmu_template.h (revision 68396ea9)
1 /*
2  *  Software MMU support
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu-timer.h"
20 
21 #define DATA_SIZE (1 << SHIFT)
22 
23 #if DATA_SIZE == 8
24 #define SUFFIX q
25 #define USUFFIX q
26 #define DATA_TYPE uint64_t
27 #elif DATA_SIZE == 4
28 #define SUFFIX l
29 #define USUFFIX l
30 #define DATA_TYPE uint32_t
31 #elif DATA_SIZE == 2
32 #define SUFFIX w
33 #define USUFFIX uw
34 #define DATA_TYPE uint16_t
35 #elif DATA_SIZE == 1
36 #define SUFFIX b
37 #define USUFFIX ub
38 #define DATA_TYPE uint8_t
39 #else
40 #error unsupported data size
41 #endif
42 
43 #ifdef SOFTMMU_CODE_ACCESS
44 #define READ_ACCESS_TYPE 2
45 #define ADDR_READ addr_code
46 #else
47 #define READ_ACCESS_TYPE 0
48 #define ADDR_READ addr_read
49 #endif
50 
51 static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
52                                                         int mmu_idx,
53                                                         void *retaddr);
glue(io_read,SUFFIX)54 static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
55                                               target_ulong addr,
56                                               void *retaddr)
57 {
58     DATA_TYPE res;
59     int index;
60     index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
61     physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
62     env->mem_io_pc = (unsigned long)retaddr;
63     if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
64             && !can_do_io(env)) {
65         cpu_io_recompile(env, retaddr);
66     }
67 
68     env->mem_io_vaddr = addr;
69 #if SHIFT <= 2
70     res = io_mem_read[index][SHIFT](io_mem_opaque[index], physaddr);
71 #else
72 #ifdef TARGET_WORDS_BIGENDIAN
73     res = (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr) << 32;
74     res |= io_mem_read[index][2](io_mem_opaque[index], physaddr + 4);
75 #else
76     res = io_mem_read[index][2](io_mem_opaque[index], physaddr);
77     res |= (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr + 4) << 32;
78 #endif
79 #endif /* SHIFT > 2 */
80     return res;
81 }
82 
83 /* handle all cases except unaligned access which span two pages */
glue(glue (__ld,SUFFIX),MMUSUFFIX)84 DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
85                                                       int mmu_idx)
86 {
87     DATA_TYPE res;
88     int index;
89     target_ulong tlb_addr;
90     target_phys_addr_t ioaddr;
91     unsigned long addend;
92     void *retaddr;
93 
94     /* test if there is match for unaligned or IO access */
95     /* XXX: could done more in memory macro in a non portable way */
96     index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
97  redo:
98     tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
99     if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
100         if (tlb_addr & ~TARGET_PAGE_MASK) {
101             /* IO access */
102             if ((addr & (DATA_SIZE - 1)) != 0)
103                 goto do_unaligned_access;
104             retaddr = GETPC();
105             ioaddr = env->iotlb[mmu_idx][index];
106             res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr);
107         } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
108             /* slow unaligned access (it spans two pages or IO) */
109         do_unaligned_access:
110             retaddr = GETPC();
111 #ifdef ALIGNED_ONLY
112             do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
113 #endif
114             res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr,
115                                                          mmu_idx, retaddr);
116         } else {
117             /* unaligned/aligned access in the same page */
118 #ifdef ALIGNED_ONLY
119             if ((addr & (DATA_SIZE - 1)) != 0) {
120                 retaddr = GETPC();
121                 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
122             }
123 #endif
124             addend = env->tlb_table[mmu_idx][index].addend;
125             res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
126         }
127     } else {
128         /* the page is not in the TLB : fill it */
129         retaddr = GETPC();
130 #ifdef ALIGNED_ONLY
131         if ((addr & (DATA_SIZE - 1)) != 0)
132             do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
133 #endif
134         tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
135         goto redo;
136     }
137     return res;
138 }
139 
140 /* handle all unaligned cases */
glue(glue (slow_ld,SUFFIX),MMUSUFFIX)141 static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
142                                                         int mmu_idx,
143                                                         void *retaddr)
144 {
145     DATA_TYPE res, res1, res2;
146     int index, shift;
147     target_phys_addr_t ioaddr;
148     unsigned long addend;
149     target_ulong tlb_addr, addr1, addr2;
150 
151     index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
152  redo:
153     tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
154     if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
155         if (tlb_addr & ~TARGET_PAGE_MASK) {
156             /* IO access */
157             if ((addr & (DATA_SIZE - 1)) != 0)
158                 goto do_unaligned_access;
159             ioaddr = env->iotlb[mmu_idx][index];
160             res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr);
161         } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
162         do_unaligned_access:
163             /* slow unaligned access (it spans two pages) */
164             addr1 = addr & ~(DATA_SIZE - 1);
165             addr2 = addr1 + DATA_SIZE;
166             res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr1,
167                                                           mmu_idx, retaddr);
168             res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr2,
169                                                           mmu_idx, retaddr);
170             shift = (addr & (DATA_SIZE - 1)) * 8;
171 #ifdef TARGET_WORDS_BIGENDIAN
172             res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
173 #else
174             res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
175 #endif
176             res = (DATA_TYPE)res;
177         } else {
178             /* unaligned/aligned access in the same page */
179             addend = env->tlb_table[mmu_idx][index].addend;
180             res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
181         }
182     } else {
183         /* the page is not in the TLB : fill it */
184         tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
185         goto redo;
186     }
187     return res;
188 }
189 
190 #ifndef SOFTMMU_CODE_ACCESS
191 
192 static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
193                                                    DATA_TYPE val,
194                                                    int mmu_idx,
195                                                    void *retaddr);
196 
glue(io_write,SUFFIX)197 static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
198                                           DATA_TYPE val,
199                                           target_ulong addr,
200                                           void *retaddr)
201 {
202     int index;
203     index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
204     physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
205     if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
206             && !can_do_io(env)) {
207         cpu_io_recompile(env, retaddr);
208     }
209 
210     env->mem_io_vaddr = addr;
211     env->mem_io_pc = (unsigned long)retaddr;
212 #if SHIFT <= 2
213     io_mem_write[index][SHIFT](io_mem_opaque[index], physaddr, val);
214 #else
215 #ifdef TARGET_WORDS_BIGENDIAN
216     io_mem_write[index][2](io_mem_opaque[index], physaddr, val >> 32);
217     io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val);
218 #else
219     io_mem_write[index][2](io_mem_opaque[index], physaddr, val);
220     io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val >> 32);
221 #endif
222 #endif /* SHIFT > 2 */
223 }
224 
glue(glue (__st,SUFFIX),MMUSUFFIX)225 void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
226                                                  DATA_TYPE val,
227                                                  int mmu_idx)
228 {
229     target_phys_addr_t ioaddr;
230     unsigned long addend;
231     target_ulong tlb_addr;
232     void *retaddr;
233     int index;
234 
235     index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
236  redo:
237     tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
238     if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
239         if (tlb_addr & ~TARGET_PAGE_MASK) {
240             /* IO access */
241             if ((addr & (DATA_SIZE - 1)) != 0)
242                 goto do_unaligned_access;
243             retaddr = GETPC();
244             ioaddr = env->iotlb[mmu_idx][index];
245             glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr);
246         } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
247         do_unaligned_access:
248             retaddr = GETPC();
249 #ifdef ALIGNED_ONLY
250             do_unaligned_access(addr, 1, mmu_idx, retaddr);
251 #endif
252             glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val,
253                                                    mmu_idx, retaddr);
254         } else {
255             /* aligned/unaligned access in the same page */
256 #ifdef ALIGNED_ONLY
257             if ((addr & (DATA_SIZE - 1)) != 0) {
258                 retaddr = GETPC();
259                 do_unaligned_access(addr, 1, mmu_idx, retaddr);
260             }
261 #endif
262             addend = env->tlb_table[mmu_idx][index].addend;
263             glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
264         }
265     } else {
266         /* the page is not in the TLB : fill it */
267         retaddr = GETPC();
268 #ifdef ALIGNED_ONLY
269         if ((addr & (DATA_SIZE - 1)) != 0)
270             do_unaligned_access(addr, 1, mmu_idx, retaddr);
271 #endif
272         tlb_fill(addr, 1, mmu_idx, retaddr);
273         goto redo;
274     }
275 }
276 
277 /* handles all unaligned cases */
glue(glue (slow_st,SUFFIX),MMUSUFFIX)278 static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
279                                                    DATA_TYPE val,
280                                                    int mmu_idx,
281                                                    void *retaddr)
282 {
283     target_phys_addr_t ioaddr;
284     unsigned long addend;
285     target_ulong tlb_addr;
286     int index, i;
287 
288     index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
289  redo:
290     tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
291     if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
292         if (tlb_addr & ~TARGET_PAGE_MASK) {
293             /* IO access */
294             if ((addr & (DATA_SIZE - 1)) != 0)
295                 goto do_unaligned_access;
296             ioaddr = env->iotlb[mmu_idx][index];
297             glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr);
298         } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
299         do_unaligned_access:
300             /* XXX: not efficient, but simple */
301             /* Note: relies on the fact that tlb_fill() does not remove the
302              * previous page from the TLB cache.  */
303             for(i = DATA_SIZE - 1; i >= 0; i--) {
304 #ifdef TARGET_WORDS_BIGENDIAN
305                 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
306                                           mmu_idx, retaddr);
307 #else
308                 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (i * 8),
309                                           mmu_idx, retaddr);
310 #endif
311             }
312         } else {
313             /* aligned/unaligned access in the same page */
314             addend = env->tlb_table[mmu_idx][index].addend;
315             glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
316         }
317     } else {
318         /* the page is not in the TLB : fill it */
319         tlb_fill(addr, 1, mmu_idx, retaddr);
320         goto redo;
321     }
322 }
323 
324 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
325 
326 #undef READ_ACCESS_TYPE
327 #undef SHIFT
328 #undef DATA_TYPE
329 #undef SUFFIX
330 #undef USUFFIX
331 #undef DATA_SIZE
332 #undef ADDR_READ
333