Hi, I'm compiling a large code base that uses tagged data, with the tag in the two lowest bits. I.e. ints are shifted two steps to the left and have 2 in the tag bits, pointers have 0 in the tag bits, etc. When I compile the code, I notice that there are places where -O3 doesn't remove unnecessary tag bit tests and manipulations, when they are performed with bitwise manipulation (which is how it is implemented in the large code base I'm working with). I've provided a small example below. However, when I change from using 'and' and 'or' to using subtraction and addition, llvm is able to detect and optimise the code correctly. Is there perhaps an optional optimisation pass that I could run that could detect this optimisation opportunity? Thanks for any ideas, /Lars /***************************************************/ /* The two LSB of x0 are 'tag bits' */ /* that we want to manipulate. */ extern long x0; void go_error(void) __attribute__ ((noreturn)); void example_not_optimized(void) { if((x0 & 3) == 2) { // Here the tag bits are removed and added // with bitwise 'and' and 'or'. x0 = ((x0 & ~3) | 2) + 12; } else { go_error(); } } /* define void @example_not_optimized() #0 { %1 = load i64* @x0, align 8, !tbaa !1 %2 = and i64 %1, 3 %3 = icmp eq i64 %2, 2 br i1 %3, label %4, label %8 ; <label>:4 ; preds = %0 %5 = and i64 %1, -4 ; this should be optimized away %6 = or i64 %5, 2 ; this should be optimized away %7 = add nsw i64 %6, 12 store i64 %7, i64* @x0, align 8, !tbaa !1 ret void ; <label>:8 ; preds = %0 tail call void @go_error() #2 unreachable } */ void example_optimized(void) { if((x0 & 3) == 2) { // Here the tag bits are removed and added // with subtraction and addition. x0 = (x0 - (x0 & 3) + 2) + 12; } else { go_error(); } } /* define void @example_optimized() #0 { %1 = load i64* @x0, align 8, !tbaa !1 %2 = and i64 %1, 3 %3 = icmp eq i64 %2, 2 br i1 %3, label %4, label %6 ; <label>:4 ; preds = %0 %5 = add i64 %1, 12 store i64 %5, i64* @x0, align 8, !tbaa !1 ret void ; <label>:6 ; preds = %0 tail call void @go_error() #2 unreachable } */ -------------- next part -------------- An HTML attachment was scrubbed... URL: <http://lists.llvm.org/pipermail/llvm-dev/attachments/20141204/a90b40ea/attachment.html>
On 12/04/2014 02:19 AM, Lars Rasmusson SICS wrote:> Hi, > > I'm compiling a large code base that uses tagged data, with the tag in > the two lowest bits. > > I.e. ints are shifted two steps to the left and have 2 in the tag > bits, pointers have 0 in the tag bits, etc. > > When I compile the code, I notice that there are places where -O3 > doesn't remove > unnecessary tag bit tests and manipulations, when they are performed > with bitwise > manipulation (which is how it is implemented in the large code base > I'm working with). > > I've provided a small example below. > > However, when I change from using 'and' and 'or' to using subtraction > and addition, llvm > is able to detect and optimise the code correctly. > > Is there perhaps an optional optimisation pass that I could run that > could detect this optimisation opportunity?This looks like it's simply a missed optimization. Could you file a bug with both the working and non-working IR fragements? The add/sub case is probably being caught by GVN and constant prop. One useful trick to determine if something is a missing optimization or a pass ordering problem is to run the IR through opt -O3 twice. If the second run improves the IR, there's likely a pass ordering issue. In this case, it doesn't appear to be a pass ordering issue. There's one missing inst combine: %5 = and i64 %1, -4 < --compute known bits should give low zeros %6 = or i64 %5, 2 <-- equivalent to + 2 given known bits %7 = add nsw i64 %6, 12 <-- equivalent to add nsw %5, 14 Getting rid of the 'and' itself is harder. I'm thinking we'd need to add known bits to the lattice in LazyValueInfo, but I'm open to alternate approaches. I'll note that I've been thinking about that for other reasons as well. One very small tweak you could make would be to add an llvm.assume inside the if case of the if condition. (Yes, that is as silly as it sounds.) I tested that, and it did optimize as expected. It's essentially working around a deficiency in the optimizer around path constraints. define void @example_tweaked(i64* %x0) #0 { %1 = load i64* %x0, align 8 %2 = and i64 %1, 3 %3 = icmp eq i64 %2, 2 br i1 %3, label %4, label %8 ; <label>:4 ; preds = %0 call void @llvm.assume(i1 %3) %5 = and i64 %1, -4 ; this should be optimized away %6 = or i64 %5, 2 ; this should be optimized away %7 = add nsw i64 %6, 12 store i64 %7, i64* %x0, align 8 ret void ; <label>:8 ; preds = %0 tail call void @go_error() #2 unreachable } declare void @llvm.assume(i1)> > Thanks for any ideas, > /Lars > > /***************************************************/ > > /* The two LSB of x0 are 'tag bits' */ > /* that we want to manipulate. */ > extern long x0; > > void go_error(void) __attribute__ ((noreturn)); > > void example_not_optimized(void) > { > if((x0 & 3) == 2) { > // Here the tag bits are removed and added > // with bitwise 'and' and 'or'. > x0 = ((x0 & ~3) | 2) + 12; > } else { > go_error(); > } > } > > /* > define void @example_not_optimized() #0 { > %1 = load i64* @x0, align 8, !tbaa !1 > %2 = and i64 %1, 3 > %3 = icmp eq i64 %2, 2 > br i1 %3, label %4, label %8 > > ; <label>:4 ; preds = %0 > %5 = and i64 %1, -4 ; this should be optimized away > %6 = or i64 %5, 2 ; this should be optimized away > %7 = add nsw i64 %6, 12 > store i64 %7, i64* @x0, align 8, !tbaa !1 > ret void > > ; <label>:8 ; preds = %0 > tail call void @go_error() #2 > unreachable > } > */ > > > void example_optimized(void) > { > if((x0 & 3) == 2) { > // Here the tag bits are removed and added > // with subtraction and addition. > x0 = (x0 - (x0 & 3) + 2) + 12; > } else { > go_error(); > } > } > > /* > define void @example_optimized() #0 { > %1 = load i64* @x0, align 8, !tbaa !1 > %2 = and i64 %1, 3 > %3 = icmp eq i64 %2, 2 > br i1 %3, label %4, label %6 > > ; <label>:4 ; preds = %0 > %5 = add i64 %1, 12 > store i64 %5, i64* @x0, align 8, !tbaa !1 > ret void > > ; <label>:6 ; preds = %0 > tail call void @go_error() #2 > unreachable > } > > */ > > > > _______________________________________________ > LLVM Developers mailing list > LLVMdev at cs.uiuc.edu http://llvm.cs.uiuc.edu > http://lists.cs.uiuc.edu/mailman/listinfo/llvmdev-------------- next part -------------- An HTML attachment was scrubbed... URL: <http://lists.llvm.org/pipermail/llvm-dev/attachments/20141204/37fa8e43/attachment.html>
Ø Getting rid of the 'and' itself is harder. I'm thinking we'd need to add known bits to the lattice in LazyValueInfo, but I'm open to alternate approaches. I'll note Ø that I've been thinking about that for other reasons as well. FWIW, I think adding known bits, and known values (of known bits) is an excellent way to go. The Intel compiler has done this as well in its own framework, and the compiler source base from the old Digital/Compaq C/C++ and Fortran compilers had this also. Sounded (from another post here) like the Apollo compiler did as well. This turns out to be highly valuable in a surprisingly large amount of code, especially where macros/class members hide a lot of bit-masking kind of operation, thus leading to many redundant checks. This also turns out to give alignment proving using the same framework, and even represents known modulo information. I also like the idea (that Apollo post had) to add into the SSA graph where specific paths caused more knowledge of values. Kevin Smith From: llvmdev-bounces at cs.uiuc.edu [mailto:llvmdev-bounces at cs.uiuc.edu] On Behalf Of Philip Reames Sent: Thursday, December 04, 2014 11:11 AM To: Lars Rasmusson SICS; LLVMdev at cs.uiuc.edu Subject: Re: [LLVMdev] Optimising bit-flipping code On 12/04/2014 02:19 AM, Lars Rasmusson SICS wrote: Hi, I'm compiling a large code base that uses tagged data, with the tag in the two lowest bits. I.e. ints are shifted two steps to the left and have 2 in the tag bits, pointers have 0 in the tag bits, etc. When I compile the code, I notice that there are places where -O3 doesn't remove unnecessary tag bit tests and manipulations, when they are performed with bitwise manipulation (which is how it is implemented in the large code base I'm working with). I've provided a small example below. However, when I change from using 'and' and 'or' to using subtraction and addition, llvm is able to detect and optimise the code correctly. Is there perhaps an optional optimisation pass that I could run that could detect this optimisation opportunity? This looks like it's simply a missed optimization. Could you file a bug with both the working and non-working IR fragements? The add/sub case is probably being caught by GVN and constant prop. One useful trick to determine if something is a missing optimization or a pass ordering problem is to run the IR through opt -O3 twice. If the second run improves the IR, there's likely a pass ordering issue. In this case, it doesn't appear to be a pass ordering issue. There's one missing inst combine: %5 = and i64 %1, -4 < --compute known bits should give low zeros %6 = or i64 %5, 2 <-- equivalent to + 2 given known bits %7 = add nsw i64 %6, 12 <-- equivalent to add nsw %5, 14 Getting rid of the 'and' itself is harder. I'm thinking we'd need to add known bits to the lattice in LazyValueInfo, but I'm open to alternate approaches. I'll note that I've been thinking about that for other reasons as well. One very small tweak you could make would be to add an llvm.assume inside the if case of the if condition. (Yes, that is as silly as it sounds.) I tested that, and it did optimize as expected. It's essentially working around a deficiency in the optimizer around path constraints. define void @example_tweaked(i64* %x0) #0 { %1 = load i64* %x0, align 8 %2 = and i64 %1, 3 %3 = icmp eq i64 %2, 2 br i1 %3, label %4, label %8 ; <label>:4 ; preds = %0 call void @llvm.assume(i1 %3) %5 = and i64 %1, -4 ; this should be optimized away %6 = or i64 %5, 2 ; this should be optimized away %7 = add nsw i64 %6, 12 store i64 %7, i64* %x0, align 8 ret void ; <label>:8 ; preds = %0 tail call void @go_error() #2 unreachable } declare void @llvm.assume(i1) Thanks for any ideas, /Lars /***************************************************/ /* The two LSB of x0 are 'tag bits' */ /* that we want to manipulate. */ extern long x0; void go_error(void) __attribute__ ((noreturn)); void example_not_optimized(void) { if((x0 & 3) == 2) { // Here the tag bits are removed and added // with bitwise 'and' and 'or'. x0 = ((x0 & ~3) | 2) + 12; } else { go_error(); } } /* define void @example_not_optimized() #0 { %1 = load i64* @x0, align 8, !tbaa !1 %2 = and i64 %1, 3 %3 = icmp eq i64 %2, 2 br i1 %3, label %4, label %8 ; <label>:4 ; preds = %0 %5 = and i64 %1, -4 ; this should be optimized away %6 = or i64 %5, 2 ; this should be optimized away %7 = add nsw i64 %6, 12 store i64 %7, i64* @x0, align 8, !tbaa !1 ret void ; <label>:8 ; preds = %0 tail call void @go_error() #2 unreachable } */ void example_optimized(void) { if((x0 & 3) == 2) { // Here the tag bits are removed and added // with subtraction and addition. x0 = (x0 - (x0 & 3) + 2) + 12; } else { go_error(); } } /* define void @example_optimized() #0 { %1 = load i64* @x0, align 8, !tbaa !1 %2 = and i64 %1, 3 %3 = icmp eq i64 %2, 2 br i1 %3, label %4, label %6 ; <label>:4 ; preds = %0 %5 = add i64 %1, 12 store i64 %5, i64* @x0, align 8, !tbaa !1 ret void ; <label>:6 ; preds = %0 tail call void @go_error() #2 unreachable } */ _______________________________________________ LLVM Developers mailing list LLVMdev at cs.uiuc.edu<mailto:LLVMdev at cs.uiuc.edu> http://llvm.cs.uiuc.edu http://lists.cs.uiuc.edu/mailman/listinfo/llvmdev -------------- next part -------------- An HTML attachment was scrubbed... URL: <http://lists.llvm.org/pipermail/llvm-dev/attachments/20141204/c155523e/attachment.html>