35dcbb2eff33dc08c473741d368dda7b7d61711e
1 /* Copyright (C) 2009 Pierre-Marc Fournier
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 #ifndef UST_PROCESSOR_H
19 #define UST_PROCESSOR_H
25 extern __thread
long ust_reg_stack
[500];
26 extern volatile __thread
long *ust_reg_stack_ptr
;
28 #define ____cacheline_aligned __attribute__((aligned(CAA_CACHE_LINE_SIZE)))
46 static inline int fls(int x
)
51 : "=&r" (r
) : "rm" (x
), "rm" (-1));
55 #ifdef CONFIG_UST_GDB_INTEGRATION
57 /* save_registers - saves most of the processor's registers so
58 * they are available to the probe. gdb uses this to give the
59 * value of local variables.
61 * Saving all registers without losing any of their values is
64 * We cannot pass to the asm stub the address of a registers structure
65 * on the stack, because it will use a register and override its value.
67 * We don't want to use a stub to push the regs on the stack and then
68 * another stub to copy them to a structure because changing %sp in asm
69 * and then returning to C (even briefly) can have unexpected results.
70 * Also, gcc might modify %sp between the stubs in reaction to the
71 * register needs of the second stub that needs to know where to copy
72 * the register values.
74 * So the chosen approach is to use another stack, declared in thread-
75 * local storage, to push the registers. They are subsequently copied
76 * to the stack, by C code.
79 #define save_registers(regsptr) \
81 /* save original esp */ \
83 /* push original eflags */ \
85 /* eax will hold the ptr to the private stack bottom */ \
87 /* ebx is used for TLS access */ \
89 /* ecx will be used to temporarily hold the stack bottom addr */\
91 /* rdi is the input to __tls_get_addr, and also a temp var */ \
93 /* For TLS access, we have to do function calls. However, \
94 * we must not lose the original value of: \
95 * esp, eflags, eax, ebx, ecx, edx, esi, edi, ebp, cs, ss \
97 * Some registers' original values have already been saved: \
98 * esp, eflags, eax, ebx, ecx, edi \
100 * In addition, the i386 ABI says the following registers belong\
101 * to the caller function: \
102 * esp, ebp, esi, edi, ebx \
104 * The following registers should not be changed by the callee: \
107 * Therefore, the following registers must be explicitly \
112 /* Get GOT address */ \
113 "call __i686.get_pc_thunk.bx\n\t" \
114 "addl $_GLOBAL_OFFSET_TABLE_, %%ebx\n\t" \
115 /* Start TLS access of private reg stack pointer */ \
116 "leal ust_reg_stack_ptr@tlsgd(,%%ebx,1),%%eax\n\t" \
117 "call ___tls_get_addr@plt\n\t" \
118 /* --- End TLS access */ \
119 /* check if ust_reg_stack_ptr has been initialized */ \
120 "movl (%%eax),%%ecx\n\t" \
121 "testl %%ecx,%%ecx\n\t" \
123 "movl %%eax,%%ecx\n\t" \
124 /* Save ecx because we are using it. */ \
126 /* Start TLS access of private reg stack */ \
127 "leal ust_reg_stack@tlsgd(,%%ebx,1),%%eax\n\t" \
128 "call ___tls_get_addr@plt\n\t" \
129 /* --- End TLS access */ \
131 "addl $500,%%eax\n\t" \
132 "movl %%eax,(%%ecx)\n\t" \
133 "movl %%ecx,%%eax\n\t" \
134 /* now the pointer to the private stack is in eax. \
135 must add stack size so the ptr points to the stack bottom. */ \
137 /* edx was pushed for function calls */ \
139 /* Manually push esp to private stack */ \
140 "addl $-4,(%%eax)\n\t" \
141 "movl 20(%%esp), %%edi\n\t" \
142 "movl (%%eax), %%ebx\n\t" \
143 "movl %%edi, (%%ebx)\n\t" \
144 /* Manually push eflags to private stack */ \
145 "addl $-4,(%%eax)\n\t" \
146 "movl 16(%%esp), %%edi\n\t" \
147 "movl (%%eax), %%ebx\n\t" \
148 "movl %%edi, (%%ebx)\n\t" \
149 /* Manually push eax to private stack */ \
150 "addl $-4,(%%eax)\n\t" \
151 "movl 12(%%esp), %%edi\n\t" \
152 "movl (%%eax), %%ebx\n\t" \
153 "movl %%edi, (%%ebx)\n\t" \
154 /* Manually push ebx to private stack */ \
155 "addl $-4,(%%eax)\n\t" \
156 "movl 8(%%esp), %%edi\n\t" \
157 "movl (%%eax), %%ebx\n\t" \
158 "movl %%edi, (%%ebx)\n\t" \
159 /* Manually push ecx to private stack */ \
160 "addl $-4,(%%eax)\n\t" \
161 "movl 4(%%esp), %%edi\n\t" \
162 "movl (%%eax), %%ebx\n\t" \
163 "movl %%edi, (%%ebx)\n\t" \
164 /* Manually push edi to private stack */ \
165 "addl $-4,(%%eax)\n\t" \
166 "movl 0(%%esp), %%edi\n\t" \
167 "movl (%%eax), %%ebx\n\t" \
168 "movl %%edi, (%%ebx)\n\t" \
169 /* now push regs to tls */ \
170 /* -- esp already pushed -- */ \
171 /* -- eax already pushed -- */ \
172 /* -- ebx already pushed -- */ \
173 /* -- ecx already pushed -- */ \
174 /* -- edi already pushed -- */ \
175 "addl $-4,(%%eax)\n\t" \
176 "movl (%%eax), %%ebx\n\t" \
177 "movl %%edx,(%%ebx)\n\t" \
178 "addl $-4,(%%eax)\n\t" \
179 "movl (%%eax), %%ebx\n\t" \
180 "movl %%ebp,(%%ebx)\n\t" \
181 "addl $-4,(%%eax)\n\t" \
182 "movl (%%eax), %%ebx\n\t" \
183 "movl %%esi,(%%ebx)\n\t" \
185 "addl $-2,(%%eax)\n\t" \
186 "movl (%%eax), %%ebx\n\t" \
187 "movw %%cs, (%%ebx)\n\t" \
189 "addl $-2,(%%eax)\n\t" \
190 "movl (%%eax), %%ebx\n\t" \
191 "movw %%ss, (%%ebx)\n\t" \
192 /* restore original values of regs that were used internally */ \
197 /* cancel push of rsp */ \
198 "addl $4,%%esp\n\t" \
199 /* cancel push of eflags */ \
200 "addl $4,%%esp\n\t" \
202 memcpy(regsptr, (void *)ust_reg_stack_ptr, sizeof(struct registers)); \
203 ust_reg_stack_ptr = (void *)(((long)ust_reg_stack_ptr) + sizeof(struct registers));
205 #else /* CONFIG_UST_GDB_INTEGRATION */
207 #define save_registers(a)
209 #endif /* CONFIG_UST_GDB_INTEGRATION */
211 #define RELATIVE_ADDRESS(__rel_label__) __rel_label__
213 #define ARCH_COPY_ADDR(dst) "lea 2b," dst "\n\t"
215 #define _ASM_PTR ".long "
217 #elif defined(__x86_64)
220 int padding
; /* 4 bytes */
238 unsigned long rflags
;
242 static inline int fls(int x
)
247 : "=&r" (r
) : "rm" (x
), "rm" (-1));
251 #ifdef CONFIG_UST_GDB_INTEGRATION
253 #define save_registers(regsptr) \
255 /* save original rsp */ \
257 /* push original rflags */ \
259 /* rax will hold the ptr to the private stack bottom */ \
261 /* rbx will be used to temporarily hold the stack bottom addr */ \
263 /* rdi is the input to __tls_get_addr, and also a temp var */ \
265 /* For TLS access, we have to do function calls. However, \
266 * we must not lose the original value of: \
267 * rsp, rflags, rax, rbx, rcx, rdx, rsi, rdi, rbp, r8, r9 \
268 * r10, r11, r12, r13, r14, r15, cs, ss \
270 * Some registers' original values have already been saved: \
271 * rsp, rflags, rax, rbx, rdi \
273 * In addition, the x86-64 ABI says the following registers \
274 * belong to the caller function: \
275 * rbp, rbx, r12, r13, r14, r15 \
277 * The following registers should not be changed by the callee: \
280 * Therefore, the following registers must be explicitly \
282 * rcx, rdx, rsi, r8, r9, r10, r11 \
291 /* Start TLS access of private reg stack pointer */ \
293 "leaq ust_reg_stack_ptr@tlsgd(%%rip), %%rdi\n\t" \
294 ".hword 0x6666\n\t" \
296 "call __tls_get_addr@plt\n\t" \
297 /* --- End TLS access */ \
298 /* check if ust_reg_stack_ptr has been initialized */ \
299 "movq (%%rax),%%rbx\n\t" \
300 "testq %%rbx,%%rbx\n\t" \
302 "movq %%rax,%%rbx\n\t" \
303 /* Start TLS access of private reg stack */ \
305 "leaq ust_reg_stack@tlsgd(%%rip), %%rdi\n\t" \
306 ".hword 0x6666\n\t" \
308 "call __tls_get_addr@plt\n\t" \
309 /* --- End TLS access */ \
310 "addq $500,%%rax\n\t" \
311 "movq %%rax,(%%rbx)\n\t" \
312 "movq %%rbx,%%rax\n\t" \
313 /* now the pointer to the private stack is in rax.
314 must add stack size so the ptr points to the stack bottom. */ \
316 /* Pop regs that were pushed for function calls */ \
324 /* Manually push rsp to private stack */ \
325 "addq $-8,(%%rax)\n\t" \
326 "movq 32(%%rsp), %%rdi\n\t" \
327 "movq (%%rax), %%rbx\n\t" \
328 "movq %%rdi, (%%rbx)\n\t" \
329 /* Manually push eflags to private stack */ \
330 "addq $-8,(%%rax)\n\t" \
331 "movq 24(%%rsp), %%rdi\n\t" \
332 "movq (%%rax), %%rbx\n\t" \
333 "movq %%rdi, (%%rbx)\n\t" \
334 /* Manually push rax to private stack */ \
335 "addq $-8,(%%rax)\n\t" \
336 "movq 16(%%rsp), %%rdi\n\t" \
337 "movq (%%rax), %%rbx\n\t" \
338 "movq %%rdi, (%%rbx)\n\t" \
339 /* Manually push rbx to private stack */ \
340 "addq $-8,(%%rax)\n\t" \
341 "movq 8(%%rsp), %%rdi\n\t" \
342 "movq (%%rax), %%rbx\n\t" \
343 "movq %%rdi, (%%rbx)\n\t" \
344 /* Manually push rdi to private stack */ \
345 "addq $-8,(%%rax)\n\t" \
346 "movq 0(%%rsp), %%rdi\n\t" \
347 "movq (%%rax), %%rbx\n\t" \
348 "movq %%rdi, (%%rbx)\n\t" \
349 /* now push regs to tls */ \
350 /* -- rsp already pushed -- */ \
351 /* -- rax already pushed -- */ \
352 /* -- rbx already pushed -- */ \
353 /* -- rdi already pushed -- */ \
354 "addq $-8,(%%rax)\n\t" \
355 "movq (%%rax), %%rbx\n\t" \
356 "movq %%rcx,(%%rbx)\n\t" \
357 "addq $-8,(%%rax)\n\t" \
358 "movq (%%rax), %%rbx\n\t" \
359 "movq %%rdx,(%%rbx)\n\t" \
360 "addq $-8,(%%rax)\n\t" \
361 "movq (%%rax), %%rbx\n\t" \
362 "movq %%rbp,(%%rbx)\n\t" \
363 "addq $-8,(%%rax)\n\t" \
364 "movq (%%rax), %%rbx\n\t" \
365 "movq %%rsi,(%%rbx)\n\t" \
366 "addq $-8,(%%rax)\n\t" \
367 "movq (%%rax), %%rbx\n\t" \
368 "movq %%r8,(%%rbx)\n\t" \
369 "addq $-8,(%%rax)\n\t" \
370 "movq (%%rax), %%rbx\n\t" \
371 "movq %%r9,(%%rbx)\n\t" \
372 "addq $-8,(%%rax)\n\t" \
373 "movq (%%rax), %%rbx\n\t" \
374 "movq %%r10,(%%rbx)\n\t" \
375 "addq $-8,(%%rax)\n\t" \
376 "movq (%%rax), %%rbx\n\t" \
377 "movq %%r11,(%%rbx)\n\t" \
378 "addq $-8,(%%rax)\n\t" \
379 "movq (%%rax), %%rbx\n\t" \
380 "movq %%r12,(%%rbx)\n\t" \
381 "addq $-8,(%%rax)\n\t" \
382 "movq (%%rax), %%rbx\n\t" \
383 "movq %%r13,(%%rbx)\n\t" \
384 "addq $-8,(%%rax)\n\t" \
385 "movq (%%rax), %%rbx\n\t" \
386 "movq %%r14,(%%rbx)\n\t" \
387 "addq $-8,(%%rax)\n\t" \
388 "movq (%%rax), %%rbx\n\t" \
389 "movq %%r15,(%%rbx)\n\t" \
391 "addq $-2,(%%rax)\n\t" \
392 "movq (%%rax), %%rbx\n\t" \
393 "movw %%cs, (%%rbx)\n\t" \
395 "addq $-2,(%%rax)\n\t" \
396 "movq (%%rax), %%rbx\n\t" \
397 "movw %%ss, (%%rbx)\n\t" \
398 /* add padding for struct registers */ \
399 "addq $-4,(%%rax)\n\t" \
400 /* restore original values of regs that were used internally */ \
404 /* cancel push of rsp */ \
405 "addq $8,%%rsp\n\t" \
406 /* cancel push of rflags */ \
407 "addq $8,%%rsp\n\t" \
409 memcpy(regsptr, (void *)ust_reg_stack_ptr, sizeof(struct registers)); \
410 ust_reg_stack_ptr = (void *)(((long)ust_reg_stack_ptr) + sizeof(struct registers));
412 #else /* CONFIG_UST_GDB_INTEGRATION */
414 #define save_registers(a)
416 #endif /* CONFIG_UST_GDB_INTEGRATION */
418 /* Macro to insert the address of a relative jump in an assembly stub,
419 * in a relocatable way. On x86-64, this uses a special (%rip) notation. */
420 #define RELATIVE_ADDRESS(__rel_label__) __rel_label__(%%rip)
422 #define ARCH_COPY_ADDR(dst) "lea 2b(%%rip)," dst "\n\t"
424 #define _ASM_PTR ".quad "
426 #elif defined(__PPC__)
431 static __inline__
int fls(unsigned int x
)
435 asm ("cntlzw %0,%1" : "=r" (lz
) : "r" (x
));
439 #define ARCH_COPY_ADDR(dst) \
440 "lis " dst ",2b@h\n\t" /* load high bytes */ \
441 "ori " dst "," dst ",2b@l\n\t" /* load low bytes */
443 #define _ASM_PTR ".long "
444 #define save_registers(a)
446 #else /* arch-agnostic */
448 static __inline__
int fls(unsigned int x
)
454 if (!(x
& 0xFFFF0000U
)) {
458 if (!(x
& 0xFF000000U
)) {
462 if (!(x
& 0xF0000000U
)) {
466 if (!(x
& 0xC0000000U
)) {
470 if (!(x
& 0x80000000U
)) {
484 #define ARCH_COPY_ADDR(dst) "ldr "dst", =2b\n\t" \
489 #define _ASM_PTR ".long "
490 #define save_registers(a)
494 #endif /* UST_PROCESSOR_H */
This page took 0.053775 seconds and 3 git commands to generate.