search for: movzbl

Displaying 20 results from an estimated 127 matches for "movzbl".

2017 Oct 20
1
[PATCH v1 01/27] x86/crypto: Adapt assembly for PIE support
...gt; + movl (RBASE,reg_i,4), reg_o; >> + >> +#define round_xor(tab_off, reg_i, reg_o) \ >> + leaq tab_off(%rip), RBASE; \ >> + xorl (RBASE,reg_i,4), reg_o; >> + >> #define round(TAB,OFFSET,r1,r2,r3,r4,r5,r6,r7,r8,ra,rb,rc,rd) \ >> movzbl r2 ## H,r5 ## E; \ >> movzbl r2 ## L,r6 ## E; \ >> - movl TAB+1024(,r5,4),r5 ## E;\ >> + round_mov(TAB+1024, r5, r5 ## E)\ >> movw r4 ## X,r2 ## X; \ >> - movl TAB(,r6,4),r6 ## E; \ >> + round_mov(TA...
2017 Oct 20
1
[PATCH v1 01/27] x86/crypto: Adapt assembly for PIE support
...gt; + movl (RBASE,reg_i,4), reg_o; >> + >> +#define round_xor(tab_off, reg_i, reg_o) \ >> + leaq tab_off(%rip), RBASE; \ >> + xorl (RBASE,reg_i,4), reg_o; >> + >> #define round(TAB,OFFSET,r1,r2,r3,r4,r5,r6,r7,r8,ra,rb,rc,rd) \ >> movzbl r2 ## H,r5 ## E; \ >> movzbl r2 ## L,r6 ## E; \ >> - movl TAB+1024(,r5,4),r5 ## E;\ >> + round_mov(TAB+1024, r5, r5 ## E)\ >> movw r4 ## X,r2 ## X; \ >> - movl TAB(,r6,4),r6 ## E; \ >> + round_mov(TA...
2017 Oct 11
1
[PATCH v1 01/27] x86/crypto: Adapt assembly for PIE support
...ENDPROC(FUNC); +#define round_mov(tab_off, reg_i, reg_o) \ + leaq tab_off(%rip), RBASE; \ + movl (RBASE,reg_i,4), reg_o; + +#define round_xor(tab_off, reg_i, reg_o) \ + leaq tab_off(%rip), RBASE; \ + xorl (RBASE,reg_i,4), reg_o; + #define round(TAB,OFFSET,r1,r2,r3,r4,r5,r6,r7,r8,ra,rb,rc,rd) \ movzbl r2 ## H,r5 ## E; \ movzbl r2 ## L,r6 ## E; \ - movl TAB+1024(,r5,4),r5 ## E;\ + round_mov(TAB+1024, r5, r5 ## E)\ movw r4 ## X,r2 ## X; \ - movl TAB(,r6,4),r6 ## E; \ + round_mov(TAB, r6, r6 ## E) \ roll $16,r2 ## E; \ shrl $16,r4 ## E; \ movzbl r4 ## L,r7 ## E; \ movzbl r4 ## H,r4 ##...
2017 Oct 20
0
[PATCH v1 01/27] x86/crypto: Adapt assembly for PIE support
...reg_i, reg_o) \ > + leaq tab_off(%rip), RBASE; \ > + movl (RBASE,reg_i,4), reg_o; > + > +#define round_xor(tab_off, reg_i, reg_o) \ > + leaq tab_off(%rip), RBASE; \ > + xorl (RBASE,reg_i,4), reg_o; > + > #define round(TAB,OFFSET,r1,r2,r3,r4,r5,r6,r7,r8,ra,rb,rc,rd) \ > movzbl r2 ## H,r5 ## E; \ > movzbl r2 ## L,r6 ## E; \ > - movl TAB+1024(,r5,4),r5 ## E;\ > + round_mov(TAB+1024, r5, r5 ## E)\ > movw r4 ## X,r2 ## X; \ > - movl TAB(,r6,4),r6 ## E; \ > + round_mov(TAB, r6, r6 ## E) \ > roll $16,r2 ## E; \ > shrl $16,r4 ## E; \ > mov...
2016 May 24
5
Liveness of AL, AH and AX in x86 backend
...struct_t z; struct_t foo(char *p) { struct_t s; s.x = *p++; s.y = *p; z = s; s.x++; return s; } But the output at -O2 is foo: # @foo .cfi_startproc # BB#0: # %entry movb (%rdi), %al movzbl 1(%rdi), %ecx movb %al, z(%rip) movb %cl, z+1(%rip) incb %al shll $8, %ecx movzbl %al, %eax orl %ecx, %eax retq I was hoping it would do something along the lines of movb (%rdi), %al movb 1(%rdi), %ah movh...
2016 Apr 06
2
LTO renaming of constants with inline assembly
...parameter "v", and a simple main that calls each function: >> >> $ cat inlineasm1.c >> static const unsigned char __attribute__((used)) __attribute__ ((aligned >> (1))) myvar = 1; >> >> void foo(unsigned long int *v) { >> __asm__ volatile("movzbl myvar(%%rip), %%eax\n\t" >> "movq %%rax, %0\n\t" >> : "=*m" (*v) >> : >> : "%eax" >> ); >> } >> >> $ cat inlineasm2.c >> static const unsigned char __at...
2016 Apr 06
2
LTO renaming of constants with inline assembly
...m symbol > names to globalvalues, for any internal names with the used attribute, and > attach that mapping to inline asm blocks. > > For example, if myvar were renamed to myvar.6, the IR would look like this: > > @myvar.6 = global i8 [...] > > [...] > > call asm("movzbl myvar(%rip), ...", ..., "myvar")(..., i8* @myvar.6) > > The backend would produce assembly that would look like this: > > .rename myvar, myvar.6 > movzbl myvar(%%rip), ... > .norename myvar > > The .rename and .norename directives would delimit the scope...
2016 May 24
0
Liveness of AL, AH and AX in x86 backend
...s.x = *p++; > s.y = *p; > z = s; > s.x++; > return s; >} > >But the output at -O2 is > >foo: # @foo > .cfi_startproc ># BB#0: # %entry > movb (%rdi), %al > movzbl 1(%rdi), %ecx > movb %al, z(%rip) > movb %cl, z+1(%rip) > incb %al > shll $8, %ecx > movzbl %al, %eax > orl %ecx, %eax > retq > > >I was hoping it would do something along the lines of > >...
2017 Oct 11
32
[PATCH v1 00/27] x86: PIE support and option to extend KASLR randomization
Changes: - patch v1: - Simplify ftrace implementation. - Use gcc mstack-protector-guard-reg=%gs with PIE when possible. - rfc v3: - Use --emit-relocs instead of -pie to reduce dynamic relocation space on mapped memory. It also simplifies the relocation process. - Move the start the module section next to the kernel. Remove the need for -mcmodel=large on modules. Extends
2017 Oct 11
32
[PATCH v1 00/27] x86: PIE support and option to extend KASLR randomization
Changes: - patch v1: - Simplify ftrace implementation. - Use gcc mstack-protector-guard-reg=%gs with PIE when possible. - rfc v3: - Use --emit-relocs instead of -pie to reduce dynamic relocation space on mapped memory. It also simplifies the relocation process. - Move the start the module section next to the kernel. Remove the need for -mcmodel=large on modules. Extends
2018 Mar 13
32
[PATCH v2 00/27] x86: PIE support and option to extend KASLR randomization
Changes: - patch v2: - Adapt patch to work post KPTI and compiler changes - Redo all performance testing with latest configs and compilers - Simplify mov macro on PIE (MOVABS now) - Reduce GOT footprint - patch v1: - Simplify ftrace implementation. - Use gcc mstack-protector-guard-reg=%gs with PIE when possible. - rfc v3: - Use --emit-relocs instead of -pie to reduce
2018 Mar 13
32
[PATCH v2 00/27] x86: PIE support and option to extend KASLR randomization
Changes: - patch v2: - Adapt patch to work post KPTI and compiler changes - Redo all performance testing with latest configs and compilers - Simplify mov macro on PIE (MOVABS now) - Reduce GOT footprint - patch v1: - Simplify ftrace implementation. - Use gcc mstack-protector-guard-reg=%gs with PIE when possible. - rfc v3: - Use --emit-relocs instead of -pie to reduce
2017 Oct 04
28
x86: PIE support and option to extend KASLR randomization
These patches make the changes necessary to build the kernel as Position Independent Executable (PIE) on x86_64. A PIE kernel can be relocated below the top 2G of the virtual address space. It allows to optionally extend the KASLR randomization range from 1G to 3G. Thanks a lot to Ard Biesheuvel & Kees Cook on their feedback on compiler changes, PIE support and KASLR in general. Thanks to
2017 Oct 04
28
x86: PIE support and option to extend KASLR randomization
These patches make the changes necessary to build the kernel as Position Independent Executable (PIE) on x86_64. A PIE kernel can be relocated below the top 2G of the virtual address space. It allows to optionally extend the KASLR randomization range from 1G to 3G. Thanks a lot to Ard Biesheuvel & Kees Cook on their feedback on compiler changes, PIE support and KASLR in general. Thanks to
2018 May 23
33
[PATCH v3 00/27] x86: PIE support and option to extend KASLR randomization
Changes: - patch v3: - Update on message to describe longer term PIE goal. - Minor change on ftrace if condition. - Changed code using xchgq. - patch v2: - Adapt patch to work post KPTI and compiler changes - Redo all performance testing with latest configs and compilers - Simplify mov macro on PIE (MOVABS now) - Reduce GOT footprint - patch v1: - Simplify ftrace
2016 Apr 13
2
LTO renaming of constants with inline assembly
...frontend to produce a mapping from symbol names to globalvalues, for any internal names with the used attribute, and attach that mapping to inline asm blocks. For example, if myvar were renamed to myvar.6, the IR would look like this: @myvar.6 = global i8 [...] [...] call asm("movzbl myvar(%rip), ...", ..., "myvar")(..., i8* @myvar.6) The backend would produce assembly that would look like this: .rename myvar, myvar.6 movzbl myvar(%%rip), ... .norename myvar The .rename and .norename directives would delimit the scope of the renaming. That&...
2016 Apr 06
2
LTO renaming of constants with inline assembly
...var named "myvar" into the memory pointed to by its parameter "v", and a simple main that calls each function: $ cat inlineasm1.c static const unsigned char __attribute__((used)) __attribute__ ((aligned (1))) myvar = 1; void foo(unsigned long int *v) { __asm__ volatile("movzbl myvar(%%rip), %%eax\n\t" "movq %%rax, %0\n\t" : "=*m" (*v) : : "%eax" ); } $ cat inlineasm2.c static const unsigned char __attribute__((used)) __attribute__ ((aligned (1))) myvar = 2; void bar(unsigned lo...
2013 Aug 09
1
[PATCH V13 00/14] Paravirtualized ticket spinlocks
...unsigned count = SPIN_THRESHOLD; do { if (ACCESS_ONCE(lock->tickets.head) == inc.tail) goto out; cpu_relax(); } while (--count); __ticket_lock_spinning(lock, inc.tail); } out: barrier(); which results in: push %rbp mov %rsp,%rbp mov $0x200,%eax lock xadd %ax,(%rdi) movzbl %ah,%edx cmp %al,%dl jne 1f # Slowpath if lock in contention pop %rbp retq ### SLOWPATH START 1: and $-2,%edx movzbl %dl,%esi 2: mov $0x800,%eax jmp 4f 3: pause sub $0x1,%eax je 5f 4: movzbl (%rdi),%ecx cmp %cl,%dl jne 3b pop %rbp retq 5...
2013 Aug 09
1
[PATCH V13 00/14] Paravirtualized ticket spinlocks
...unsigned count = SPIN_THRESHOLD; do { if (ACCESS_ONCE(lock->tickets.head) == inc.tail) goto out; cpu_relax(); } while (--count); __ticket_lock_spinning(lock, inc.tail); } out: barrier(); which results in: push %rbp mov %rsp,%rbp mov $0x200,%eax lock xadd %ax,(%rdi) movzbl %ah,%edx cmp %al,%dl jne 1f # Slowpath if lock in contention pop %rbp retq ### SLOWPATH START 1: and $-2,%edx movzbl %dl,%esi 2: mov $0x800,%eax jmp 4f 3: pause sub $0x1,%eax je 5f 4: movzbl (%rdi),%ecx cmp %cl,%dl jne 3b pop %rbp retq 5...
2013 Aug 09
1
[PATCH V13 00/14] Paravirtualized ticket spinlocks
...unsigned count = SPIN_THRESHOLD; do { if (ACCESS_ONCE(lock->tickets.head) == inc.tail) goto out; cpu_relax(); } while (--count); __ticket_lock_spinning(lock, inc.tail); } out: barrier(); which results in: push %rbp mov %rsp,%rbp mov $0x200,%eax lock xadd %ax,(%rdi) movzbl %ah,%edx cmp %al,%dl jne 1f # Slowpath if lock in contention pop %rbp retq ### SLOWPATH START 1: and $-2,%edx movzbl %dl,%esi 2: mov $0x800,%eax jmp 4f 3: pause sub $0x1,%eax je 5f 4: movzbl (%rdi),%ecx cmp %cl,%dl jne 3b pop %rbp retq 5...