Displaying 20 results from an estimated 47 matches for "andb".
Did you mean:
and
2010 Mar 28
2
[LLVMdev] Which floating-point comparison?
I notice llvm provides both ordered and unordered variants of
floating-point comparison. Which of these is the right one to use by
default? I suppose the two criteria would be, in order of importance:
1. Which is more efficient (more directly maps to typical hardware)?
2. Which is more familiar (more like the way C and Fortran do it)?
2010 Mar 28
0
[LLVMdev] Which floating-point comparison?
...> [#uses=1]
ret i1 %0
}
$ Debug/bin/llc <test.ll
.section __TEXT,__text,regular,pure_instructions
.globl _less
.align 4, 0x90
_less: ## @less
## BB#0: ## %entry
movsd 4(%esp), %xmm0
ucomisd 12(%esp), %xmm0
sbbb %al, %al
andb $1, %al
ret
> 2. Which is more familiar (more like the way C and Fortran do it)?
You can use http://llvm.org/demo/ to figure that out.
2007 Apr 18
0
[PATCH 9/12] base-into-desc
...uot; (base)); \
-} while(0)
-
-#define _set_limit(desc,limit) do { \
- unsigned long __tmp; \
- typecheck(struct desc_struct *, desc); \
- asm volatile("movw %w4,%2\n\t" \
- "rorl $16,%4\n\t" \
- "movb %3,%h4\n\t" \
- "andb $0xf0,%h4\n\t" \
- "orb %h4,%b4\n\t" \
- "movb %b4,%3" \
- :"=m"(*(desc)), \
- "=&q" (__tmp) \
- :"m" (*(desc)), \
- "m" (*((char *)(desc)+6)), \
- "1"...
2007 Apr 18
0
[PATCH 9/12] base-into-desc
...uot; (base)); \
-} while(0)
-
-#define _set_limit(desc,limit) do { \
- unsigned long __tmp; \
- typecheck(struct desc_struct *, desc); \
- asm volatile("movw %w4,%2\n\t" \
- "rorl $16,%4\n\t" \
- "movb %3,%h4\n\t" \
- "andb $0xf0,%h4\n\t" \
- "orb %h4,%b4\n\t" \
- "movb %b4,%3" \
- :"=m"(*(desc)), \
- "=&q" (__tmp) \
- :"m" (*(desc)), \
- "m" (*((char *)(desc)+6)), \
- "1"...
2007 Apr 18
0
[PATCH 10/14] i386 / Move descriptor accessors into desc h
...uot; (base)); \
-} while(0)
-
-#define _set_limit(desc,limit) do { \
- unsigned long __tmp; \
- typecheck(struct desc_struct *, desc); \
- asm volatile("movw %w4,%2\n\t" \
- "rorl $16,%4\n\t" \
- "movb %3,%h4\n\t" \
- "andb $0xf0,%h4\n\t" \
- "orb %h4,%b4\n\t" \
- "movb %b4,%3" \
- :"=m"(*(desc)), \
- "=&q" (__tmp) \
- :"m" (*(desc)), \
- "m" (*((char *)(desc)+6)), \
- "1"...
2007 Apr 18
0
[PATCH 10/14] i386 / Move descriptor accessors into desc h
...uot; (base)); \
-} while(0)
-
-#define _set_limit(desc,limit) do { \
- unsigned long __tmp; \
- typecheck(struct desc_struct *, desc); \
- asm volatile("movw %w4,%2\n\t" \
- "rorl $16,%4\n\t" \
- "movb %3,%h4\n\t" \
- "andb $0xf0,%h4\n\t" \
- "orb %h4,%b4\n\t" \
- "movb %b4,%3" \
- :"=m"(*(desc)), \
- "=&q" (__tmp) \
- :"m" (*(desc)), \
- "m" (*((char *)(desc)+6)), \
- "1"...
2012 Dec 18
2
[LLVMdev] Getting rid of tabs in LLVM's assembly output?
...use some really ugly output; can a somewhat more intelligent formatter be used to keep columns aligned?
>
Here's how some random output currently looks in a 2-space-per-tab editor:
cmpl $0, -28(%rbp)
je .LBB2_9
movsbl -81(%rbp), %eax
movq -16(%rbp), %rcx
movb 56(%rcx), %dl
andb $1, %dl
movzbl %dl, %esi
So if you're worried about ugliness, it's already there :-)
Eli
2007 Apr 18
0
[PATCH 8/12] base-limit-cleanup
...; (*((addr)+4)), \
- "m" (*((addr)+7)), \
- "0" (base) \
- ); } while(0)
-
-#define _set_limit(addr,limit) do { unsigned long __lr; \
-__asm__ __volatile__ ("movw %%dx,%1\n\t" \
- "rorl $16,%%edx\n\t" \
- "movb %2,%%dh\n\t" \
- "andb $0xf0,%%dh\n\t" \
- "orb %%dh,%%dl\n\t" \
- "movb %%dl,%2" \
- :"=&d" (__lr) \
- :"m" (*(addr)), \
- "m" (*((addr)+6)), \
- "0" (limit) \
- ); } while(0)
+#define _set_base(desc,base) do { \
+ unsigned long __tmp;...
2007 Apr 18
0
[PATCH 8/12] base-limit-cleanup
...; (*((addr)+4)), \
- "m" (*((addr)+7)), \
- "0" (base) \
- ); } while(0)
-
-#define _set_limit(addr,limit) do { unsigned long __lr; \
-__asm__ __volatile__ ("movw %%dx,%1\n\t" \
- "rorl $16,%%edx\n\t" \
- "movb %2,%%dh\n\t" \
- "andb $0xf0,%%dh\n\t" \
- "orb %%dh,%%dl\n\t" \
- "movb %%dl,%2" \
- :"=&d" (__lr) \
- :"m" (*(addr)), \
- "m" (*((addr)+6)), \
- "0" (limit) \
- ); } while(0)
+#define _set_base(desc,base) do { \
+ unsigned long __tmp;...
2007 Apr 18
0
[PATCH 9/14] i386 / Typecheck and optimize base and limit accessors
...; (*((addr)+4)), \
- "m" (*((addr)+7)), \
- "0" (base) \
- ); } while(0)
-
-#define _set_limit(addr,limit) do { unsigned long __lr; \
-__asm__ __volatile__ ("movw %%dx,%1\n\t" \
- "rorl $16,%%edx\n\t" \
- "movb %2,%%dh\n\t" \
- "andb $0xf0,%%dh\n\t" \
- "orb %%dh,%%dl\n\t" \
- "movb %%dl,%2" \
- :"=&d" (__lr) \
- :"m" (*(addr)), \
- "m" (*((addr)+6)), \
- "0" (limit) \
- ); } while(0)
+#define _set_base(desc,base) do { \
+ unsigned long __tmp;...
2007 Apr 18
0
[PATCH 9/14] i386 / Typecheck and optimize base and limit accessors
...; (*((addr)+4)), \
- "m" (*((addr)+7)), \
- "0" (base) \
- ); } while(0)
-
-#define _set_limit(addr,limit) do { unsigned long __lr; \
-__asm__ __volatile__ ("movw %%dx,%1\n\t" \
- "rorl $16,%%edx\n\t" \
- "movb %2,%%dh\n\t" \
- "andb $0xf0,%%dh\n\t" \
- "orb %%dh,%%dl\n\t" \
- "movb %%dl,%2" \
- :"=&d" (__lr) \
- :"m" (*(addr)), \
- "m" (*((addr)+6)), \
- "0" (limit) \
- ); } while(0)
+#define _set_base(desc,base) do { \
+ unsigned long __tmp;...
2016 Oct 26
2
RFC: a more detailed design for ThinLTO + vcall CFI
...], [ %11, %7 ]
ret i1 %13
}
Here is what the asm for the above looks like:
baz:
leaq .L__unnamed_1(%rip), %rax
subl %eax, %edi
roll $30, %edi ; CONSTANT: 32-rotate count
cmpl $65, %edi ; CONSTANT: size of byte array
ja .LBB2_1
movslq %edi, %rax
leaq .Lbits_use(%rip), %rcx
movb (%rax,%rcx), %al
andb $2, %al ; CONSTANT: bit mask
shrb %al
retq
.LBB2_1:
xorl %eax, %eax
retq
A naive summary encoding would map a type identifier to a tuple of
(resolution, rotate count, size of byte array, bit mask), and pull the
latter three out of the summary as constants. However, the disadvantage of
hard coding...
2020 May 27
4
[cfe-dev] [RFC] Loading Bitfields with Smallest Needed Types
...es mean it's quite easy to create a test-case with
the exact same problematic store->load mismatch which doesn't use
bit-fields at all. For example:
short f2(short *bfs) {
*bfs &= ~0x1;
g();
return *bfs;
}
creates the same bad sequence:
movq %rdi, %rbx
andb $-2, (%rdi)
callq g()
movzwl (%rbx), %eax
On Tue, May 26, 2020 at 9:30 PM John McCall via llvm-dev <
llvm-dev at lists.llvm.org> wrote:
> On 26 May 2020, at 20:31, Arthur O'Dwyer wrote:
>
> On Tue, May 26, 2020 at 7:32 PM John McCall via cfe-dev <
>...
2016 Oct 28
0
RFC: a more detailed design for ThinLTO + vcall CFI
...e above looks like:
>
> baz:
> leaq .L__unnamed_1(%rip), %rax
> subl %eax, %edi
> roll $30, %edi ; CONSTANT: 32-rotate count
> cmpl $65, %edi ; CONSTANT: size of byte array
> ja .LBB2_1
>
> movslq %edi, %rax
> leaq .Lbits_use(%rip), %rcx
> movb (%rax,%rcx), %al
> andb $2, %al ; CONSTANT: bit mask
> shrb %al
> retq
> .LBB2_1:
> xorl %eax, %eax
> retq
>
> A naive summary encoding would map a type identifier to a tuple of
> (resolution, rotate count, size of byte array, bit mask), and pull the
> latter three out of the summary as constant...
2012 Dec 18
0
[LLVMdev] Getting rid of tabs in LLVM's assembly output?
...lligent
> formatter be used to keep columns aligned?
> >
>
> Here's how some random output currently looks in a 2-space-per-tab editor:
>
> cmpl $0, -28(%rbp)
> je .LBB2_9
> movsbl -81(%rbp), %eax
> movq -16(%rbp), %rcx
> movb 56(%rcx), %dl
> andb $1, %dl
> movzbl %dl, %esi
>
> So if you're worried about ugliness, it's already there :-)
>
> Eli
> _______________________________________________
> LLVM Developers mailing list
> LLVMdev at cs.uiuc.edu http://llvm.cs.uiuc.edu
> http://lists.cs.uiuc....
2012 Dec 18
0
[LLVMdev] Getting rid of tabs in LLVM's assembly output?
> From: llvmdev-bounces at cs.uiuc.edu [mailto:llvmdev-bounces at cs.uiuc.edu]
> On Behalf Of Eli Bendersky
> Subject: [LLVMdev] Getting rid of tabs in LLVM's assembly output?
> Problem: I then get tabs in my tests, which are discouraged by LLVM's
> own code standards, because assembly output uses tabs extensively.
> Proposal: get rid of tabs by just replacing them with
2012 Dec 18
3
[LLVMdev] Getting rid of tabs in LLVM's assembly output?
Scenario: sometimes when creating tests for MC, I run llc, take its
assembly (.s) output and copy-paste parts of it into a test.
Problem: I then get tabs in my tests, which are discouraged by LLVM's
own code standards, because assembly output uses tabs extensively.
Proposal: get rid of tabs by just replacing them with two spaces everywhere.
I had an informal chat about this with Jim on the
2020 May 26
6
[RFC] Loading Bitfields with Smallest Needed Types
We're running into an interesting issue with the Linux kernel, and
wanted advice on how to proceed.
Example of what we're talking about: https://godbolt.org/z/ABGySq
The issue is that when working with a bitfield a load may happen
quickly after a store. For instance:
struct napi_gro_cb {
void *frag;
unsigned int frag_len;
u16 flush;
u16 flush_id;
u16 count;
u16
2007 Apr 18
0
[RFC, PATCH 18/24] i386 Vmi tlbflush header
...ot;mov %%cr3, %%eax; mov %%eax, %%cr3",
+ VMI_NO_OUTPUT,
+ 1, VMI_IREG1(VMI_FLUSH_TLB),
+ VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "eax", "memory"));
+}
+
+static inline void __flush_tlb_global(void)
+{
+ vmi_wrap_call(
+ FlushTLB, "mov %%cr4, %%eax; \n"
+ "andb $0x7f, %%al; \n"
+ "mov %%eax, %%cr4; \n"
+ "orb $0x80, %%al; \n"
+ "mov %%eax, %%cr4",
+ VMI_NO_OUTPUT,
+ 1, VMI_IREG1(VMI_FLUSH_TLB | VMI_FLUSH_GLOBAL),
+ VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "eax", "memory"));
+}
+
+static inlin...
2007 Apr 18
0
[RFC, PATCH 18/24] i386 Vmi tlbflush header
...ot;mov %%cr3, %%eax; mov %%eax, %%cr3",
+ VMI_NO_OUTPUT,
+ 1, VMI_IREG1(VMI_FLUSH_TLB),
+ VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "eax", "memory"));
+}
+
+static inline void __flush_tlb_global(void)
+{
+ vmi_wrap_call(
+ FlushTLB, "mov %%cr4, %%eax; \n"
+ "andb $0x7f, %%al; \n"
+ "mov %%eax, %%cr4; \n"
+ "orb $0x80, %%al; \n"
+ "mov %%eax, %%cr4",
+ VMI_NO_OUTPUT,
+ 1, VMI_IREG1(VMI_FLUSH_TLB | VMI_FLUSH_GLOBAL),
+ VMI_CLOBBER_EXTENDED(ZERO_RETURNS, "eax", "memory"));
+}
+
+static inlin...