1 /* Copyright (C) 2009 Pierre-Marc Fournier
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 #ifndef UST_PROCESSOR_H
19 #define UST_PROCESSOR_H
25 extern __thread
long ust_reg_stack
[500];
26 extern volatile __thread
long *ust_reg_stack_ptr
;
28 #define ____cacheline_aligned __attribute__((aligned(CAA_CACHE_LINE_SIZE)))
46 static inline int fls(int x
)
51 : "=&r" (r
) : "rm" (x
), "rm" (-1));
55 #ifdef CONFIG_UST_GDB_INTEGRATION
57 /* save_registers - saves most of the processor's registers so
58 * they are available to the probe. gdb uses this to give the
59 * value of local variables.
61 * Saving all registers without losing any of their values is
64 * We cannot pass to the asm stub the address of a registers structure
65 * on the stack, because it will use a register and override its value.
67 * We don't want to use a stub to push the regs on the stack and then
68 * another stub to copy them to a structure because changing %sp in asm
69 * and then returning to C (even briefly) can have unexpected results.
70 * Also, gcc might modify %sp between the stubs in reaction to the
71 * register needs of the second stub that needs to know where to copy
72 * the register values.
74 * So the chosen approach is to use another stack, declared in thread-
75 * local storage, to push the registers. They are subsequently copied
76 * to the stack, by C code.
79 #define save_registers(regsptr) \
81 /* save original esp */ \
83 /* push original eflags */ \
85 /* eax will hold the ptr to the private stack bottom */ \
87 /* ebx is used for TLS access */ \
89 /* ecx will be used to temporarily hold the stack bottom addr */\
91 /* rdi is the input to __tls_get_addr, and also a temp var */ \
93 /* For TLS access, we have to do function calls. However, \
94 * we must not lose the original value of: \
95 * esp, eflags, eax, ebx, ecx, edx, esi, edi, ebp, cs, ss \
97 * Some registers' original values have already been saved: \
98 * esp, eflags, eax, ebx, ecx, edi \
100 * In addition, the i386 ABI says the following registers belong\
101 * to the caller function: \
102 * esp, ebp, esi, edi, ebx \
104 * The following registers should not be changed by the callee: \
107 * Therefore, the following registers must be explicitly \
112 /* Get GOT address */ \
113 "call __i686.get_pc_thunk.bx\n\t" \
114 "addl $_GLOBAL_OFFSET_TABLE_, %%ebx\n\t" \
115 /* Start TLS access of private reg stack pointer */ \
116 "leal ust_reg_stack_ptr@tlsgd(,%%ebx,1),%%eax\n\t" \
117 "call ___tls_get_addr@plt\n\t" \
118 /* --- End TLS access */ \
119 /* check if ust_reg_stack_ptr has been initialized */ \
120 "movl (%%eax),%%ecx\n\t" \
121 "testl %%ecx,%%ecx\n\t" \
123 "movl %%eax,%%ecx\n\t" \
124 /* Save ecx because we are using it. */ \
126 /* Start TLS access of private reg stack */ \
127 "leal ust_reg_stack@tlsgd(,%%ebx,1),%%eax\n\t" \
128 "call ___tls_get_addr@plt\n\t" \
129 /* --- End TLS access */ \
131 "addl $500,%%eax\n\t" \
132 "movl %%eax,(%%ecx)\n\t" \
133 "movl %%ecx,%%eax\n\t" \
134 /* now the pointer to the private stack is in eax. \
135 must add stack size so the ptr points to the stack bottom. */ \
137 /* edx was pushed for function calls */ \
139 /* Manually push esp to private stack */ \
140 "addl $-4,(%%eax)\n\t" \
141 "movl 20(%%esp), %%edi\n\t" \
142 "movl (%%eax), %%ebx\n\t" \
143 "movl %%edi, (%%ebx)\n\t" \
144 /* Manually push eflags to private stack */ \
145 "addl $-4,(%%eax)\n\t" \
146 "movl 16(%%esp), %%edi\n\t" \
147 "movl (%%eax), %%ebx\n\t" \
148 "movl %%edi, (%%ebx)\n\t" \
149 /* Manually push eax to private stack */ \
150 "addl $-4,(%%eax)\n\t" \
151 "movl 12(%%esp), %%edi\n\t" \
152 "movl (%%eax), %%ebx\n\t" \
153 "movl %%edi, (%%ebx)\n\t" \
154 /* Manually push ebx to private stack */ \
155 "addl $-4,(%%eax)\n\t" \
156 "movl 8(%%esp), %%edi\n\t" \
157 "movl (%%eax), %%ebx\n\t" \
158 "movl %%edi, (%%ebx)\n\t" \
159 /* Manually push ecx to private stack */ \
160 "addl $-4,(%%eax)\n\t" \
161 "movl 4(%%esp), %%edi\n\t" \
162 "movl (%%eax), %%ebx\n\t" \
163 "movl %%edi, (%%ebx)\n\t" \
164 /* Manually push edi to private stack */ \
165 "addl $-4,(%%eax)\n\t" \
166 "movl 0(%%esp), %%edi\n\t" \
167 "movl (%%eax), %%ebx\n\t" \
168 "movl %%edi, (%%ebx)\n\t" \
169 /* now push regs to tls */ \
170 /* -- esp already pushed -- */ \
171 /* -- eax already pushed -- */ \
172 /* -- ebx already pushed -- */ \
173 /* -- ecx already pushed -- */ \
174 /* -- edi already pushed -- */ \
175 "addl $-4,(%%eax)\n\t" \
176 "movl (%%eax), %%ebx\n\t" \
177 "movl %%edx,(%%ebx)\n\t" \
178 "addl $-4,(%%eax)\n\t" \
179 "movl (%%eax), %%ebx\n\t" \
180 "movl %%ebp,(%%ebx)\n\t" \
181 "addl $-4,(%%eax)\n\t" \
182 "movl (%%eax), %%ebx\n\t" \
183 "movl %%esi,(%%ebx)\n\t" \
185 "addl $-2,(%%eax)\n\t" \
186 "movl (%%eax), %%ebx\n\t" \
187 "movw %%cs, (%%ebx)\n\t" \
189 "addl $-2,(%%eax)\n\t" \
190 "movl (%%eax), %%ebx\n\t" \
191 "movw %%ss, (%%ebx)\n\t" \
192 /* restore original values of regs that were used internally */ \
197 /* cancel push of rsp */ \
198 "addl $4,%%esp\n\t" \
199 /* cancel push of eflags */ \
200 "addl $4,%%esp\n\t" \
202 memcpy(regsptr, (void *)ust_reg_stack_ptr, sizeof(struct registers)); \
203 ust_reg_stack_ptr = (void *)(((long)ust_reg_stack_ptr) + sizeof(struct registers));
205 #else /* CONFIG_UST_GDB_INTEGRATION */
207 #define save_registers(a)
209 #endif /* CONFIG_UST_GDB_INTEGRATION */
211 #define RELATIVE_ADDRESS(__rel_label__) __rel_label__
213 #define ARCH_COPY_ADDR(dst) "lea 2b," dst "\n\t"
215 #define _ASM_PTR ".long "
217 #endif /* below is code for x86-64 */
222 int padding
; /* 4 bytes */
240 unsigned long rflags
;
244 static inline int fls(int x
)
249 : "=&r" (r
) : "rm" (x
), "rm" (-1));
253 #ifdef CONFIG_UST_GDB_INTEGRATION
255 #define save_registers(regsptr) \
257 /* save original rsp */ \
259 /* push original rflags */ \
261 /* rax will hold the ptr to the private stack bottom */ \
263 /* rbx will be used to temporarily hold the stack bottom addr */ \
265 /* rdi is the input to __tls_get_addr, and also a temp var */ \
267 /* For TLS access, we have to do function calls. However, \
268 * we must not lose the original value of: \
269 * rsp, rflags, rax, rbx, rcx, rdx, rsi, rdi, rbp, r8, r9 \
270 * r10, r11, r12, r13, r14, r15, cs, ss \
272 * Some registers' original values have already been saved: \
273 * rsp, rflags, rax, rbx, rdi \
275 * In addition, the x86-64 ABI says the following registers \
276 * belong to the caller function: \
277 * rbp, rbx, r12, r13, r14, r15 \
279 * The following registers should not be changed by the callee: \
282 * Therefore, the following registers must be explicitly \
284 * rcx, rdx, rsi, r8, r9, r10, r11 \
293 /* Start TLS access of private reg stack pointer */ \
295 "leaq ust_reg_stack_ptr@tlsgd(%%rip), %%rdi\n\t" \
296 ".hword 0x6666\n\t" \
298 "call __tls_get_addr@plt\n\t" \
299 /* --- End TLS access */ \
300 /* check if ust_reg_stack_ptr has been initialized */ \
301 "movq (%%rax),%%rbx\n\t" \
302 "testq %%rbx,%%rbx\n\t" \
304 "movq %%rax,%%rbx\n\t" \
305 /* Start TLS access of private reg stack */ \
307 "leaq ust_reg_stack@tlsgd(%%rip), %%rdi\n\t" \
308 ".hword 0x6666\n\t" \
310 "call __tls_get_addr@plt\n\t" \
311 /* --- End TLS access */ \
312 "addq $500,%%rax\n\t" \
313 "movq %%rax,(%%rbx)\n\t" \
314 "movq %%rbx,%%rax\n\t" \
315 /* now the pointer to the private stack is in rax.
316 must add stack size so the ptr points to the stack bottom. */ \
318 /* Pop regs that were pushed for function calls */ \
326 /* Manually push rsp to private stack */ \
327 "addq $-8,(%%rax)\n\t" \
328 "movq 32(%%rsp), %%rdi\n\t" \
329 "movq (%%rax), %%rbx\n\t" \
330 "movq %%rdi, (%%rbx)\n\t" \
331 /* Manually push eflags to private stack */ \
332 "addq $-8,(%%rax)\n\t" \
333 "movq 24(%%rsp), %%rdi\n\t" \
334 "movq (%%rax), %%rbx\n\t" \
335 "movq %%rdi, (%%rbx)\n\t" \
336 /* Manually push rax to private stack */ \
337 "addq $-8,(%%rax)\n\t" \
338 "movq 16(%%rsp), %%rdi\n\t" \
339 "movq (%%rax), %%rbx\n\t" \
340 "movq %%rdi, (%%rbx)\n\t" \
341 /* Manually push rbx to private stack */ \
342 "addq $-8,(%%rax)\n\t" \
343 "movq 8(%%rsp), %%rdi\n\t" \
344 "movq (%%rax), %%rbx\n\t" \
345 "movq %%rdi, (%%rbx)\n\t" \
346 /* Manually push rdi to private stack */ \
347 "addq $-8,(%%rax)\n\t" \
348 "movq 0(%%rsp), %%rdi\n\t" \
349 "movq (%%rax), %%rbx\n\t" \
350 "movq %%rdi, (%%rbx)\n\t" \
351 /* now push regs to tls */ \
352 /* -- rsp already pushed -- */ \
353 /* -- rax already pushed -- */ \
354 /* -- rbx already pushed -- */ \
355 /* -- rdi already pushed -- */ \
356 "addq $-8,(%%rax)\n\t" \
357 "movq (%%rax), %%rbx\n\t" \
358 "movq %%rcx,(%%rbx)\n\t" \
359 "addq $-8,(%%rax)\n\t" \
360 "movq (%%rax), %%rbx\n\t" \
361 "movq %%rdx,(%%rbx)\n\t" \
362 "addq $-8,(%%rax)\n\t" \
363 "movq (%%rax), %%rbx\n\t" \
364 "movq %%rbp,(%%rbx)\n\t" \
365 "addq $-8,(%%rax)\n\t" \
366 "movq (%%rax), %%rbx\n\t" \
367 "movq %%rsi,(%%rbx)\n\t" \
368 "addq $-8,(%%rax)\n\t" \
369 "movq (%%rax), %%rbx\n\t" \
370 "movq %%r8,(%%rbx)\n\t" \
371 "addq $-8,(%%rax)\n\t" \
372 "movq (%%rax), %%rbx\n\t" \
373 "movq %%r9,(%%rbx)\n\t" \
374 "addq $-8,(%%rax)\n\t" \
375 "movq (%%rax), %%rbx\n\t" \
376 "movq %%r10,(%%rbx)\n\t" \
377 "addq $-8,(%%rax)\n\t" \
378 "movq (%%rax), %%rbx\n\t" \
379 "movq %%r11,(%%rbx)\n\t" \
380 "addq $-8,(%%rax)\n\t" \
381 "movq (%%rax), %%rbx\n\t" \
382 "movq %%r12,(%%rbx)\n\t" \
383 "addq $-8,(%%rax)\n\t" \
384 "movq (%%rax), %%rbx\n\t" \
385 "movq %%r13,(%%rbx)\n\t" \
386 "addq $-8,(%%rax)\n\t" \
387 "movq (%%rax), %%rbx\n\t" \
388 "movq %%r14,(%%rbx)\n\t" \
389 "addq $-8,(%%rax)\n\t" \
390 "movq (%%rax), %%rbx\n\t" \
391 "movq %%r15,(%%rbx)\n\t" \
393 "addq $-2,(%%rax)\n\t" \
394 "movq (%%rax), %%rbx\n\t" \
395 "movw %%cs, (%%rbx)\n\t" \
397 "addq $-2,(%%rax)\n\t" \
398 "movq (%%rax), %%rbx\n\t" \
399 "movw %%ss, (%%rbx)\n\t" \
400 /* add padding for struct registers */ \
401 "addq $-4,(%%rax)\n\t" \
402 /* restore original values of regs that were used internally */ \
406 /* cancel push of rsp */ \
407 "addq $8,%%rsp\n\t" \
408 /* cancel push of rflags */ \
409 "addq $8,%%rsp\n\t" \
411 memcpy(regsptr, (void *)ust_reg_stack_ptr, sizeof(struct registers)); \
412 ust_reg_stack_ptr = (void *)(((long)ust_reg_stack_ptr) + sizeof(struct registers));
414 #else /* CONFIG_UST_GDB_INTEGRATION */
416 #define save_registers(a)
418 #endif /* CONFIG_UST_GDB_INTEGRATION */
420 /* Macro to insert the address of a relative jump in an assembly stub,
421 * in a relocatable way. On x86-64, this uses a special (%rip) notation. */
422 #define RELATIVE_ADDRESS(__rel_label__) __rel_label__(%%rip)
424 #define ARCH_COPY_ADDR(dst) "lea 2b(%%rip)," dst "\n\t"
426 #define _ASM_PTR ".quad "
435 static __inline__
int fls(unsigned int x
)
439 asm ("cntlzw %0,%1" : "=r" (lz
) : "r" (x
));
443 #define ARCH_COPY_ADDR(dst) \
444 "lis " dst ",2b@h\n\t" /* load high bytes */ \
445 "ori " dst "," dst ",2b@l\n\t" /* load low bytes */
447 #define _ASM_PTR ".long "
448 #define save_registers(a)
452 #endif /* UST_PROCESSOR_H */
This page took 0.067179 seconds and 4 git commands to generate.