Sure I will split it and put it in two patches.
Give me few hours. I need to test those patches.
Sirish
On 4/19/2012 8:40 AM, Tom Stellard wrote:> On Wed, Apr 18, 2012 at 11:18:05PM -0500, Sirish Pande wrote:
>> Hi,
>>
>> Here's a patch for Hexagon Packetizer for review. This patch does
>> not yield any warnings.
>>
> Would it be possible to split this patch in two. One for the core
> LLVM changes and one for the Hexagon changes. I've been working on a
> Packetizer for the R600 backend, and I was using the DFAPacketizer code
> that was reverted, so it would be nice if the DFAPacketizer changes
> weren't tied directly to the Hexagon code.
>
> -Tom
>
>> Sirish
>>
>> --
>> Qualcomm Innovation Center, Inc is a member of Code Aurora Forum
>>
>> diff --git a/include/llvm/CodeGen/DFAPacketizer.h
b/include/llvm/CodeGen/DFAPacketizer.h
>> index ee1ed07..2d2db78 100644
>> --- a/include/llvm/CodeGen/DFAPacketizer.h
>> +++ b/include/llvm/CodeGen/DFAPacketizer.h
>> @@ -28,6 +28,7 @@
>>
>> #include "llvm/CodeGen/MachineBasicBlock.h"
>> #include "llvm/ADT/DenseMap.h"
>> +#include<map>
>>
>> namespace llvm {
>>
>> @@ -36,7 +37,7 @@ class MachineInstr;
>> class MachineLoopInfo;
>> class MachineDominatorTree;
>> class InstrItineraryData;
>> -class ScheduleDAGInstrs;
>> +class DefaultVLIWScheduler;
>> class SUnit;
>>
>> class DFAPacketizer {
>> @@ -77,6 +78,8 @@ public:
>> // reserveResources - Reserve the resources occupied by a machine
>> // instruction and change the current state to reflect that change.
>> void reserveResources(llvm::MachineInstr *MI);
>> +
>> + const InstrItineraryData *getInstrItins() const { return InstrItins;
}
>> };
>>
>> // VLIWPacketizerList - Implements a simple VLIW packetizer using
DFA. The
>> @@ -87,20 +90,21 @@ public:
>> // and machine resource is marked as taken. If any dependency is
found, a target
>> // API call is made to prune the dependence.
>> class VLIWPacketizerList {
>> +protected:
>> const TargetMachine&TM;
>> const MachineFunction&MF;
>> const TargetInstrInfo *TII;
>>
>> - // Encapsulate data types not exposed to the target interface.
>> - ScheduleDAGInstrs *SchedulerImpl;
>> + // The VLIW Scheduler.
>> + DefaultVLIWScheduler *VLIWScheduler;
>>
>> -protected:
>> // Vector of instructions assigned to the current packet.
>> std::vector<MachineInstr*> CurrentPacketMIs;
>> // DFA resource tracker.
>> DFAPacketizer *ResourceTracker;
>> - // Scheduling units.
>> - std::vector<SUnit> SUnits;
>> +
>> + // Generate MI -> SU map.
>> + std::map<MachineInstr*, SUnit*> MIToSUnit;
>>
>> public:
>> VLIWPacketizerList(
>> @@ -118,17 +122,32 @@ public:
>> DFAPacketizer *getResourceTracker() {return ResourceTracker;}
>>
>> // addToPacket - Add MI to the current packet.
>> - void addToPacket(MachineInstr *MI);
>> + virtual MachineBasicBlock::iterator addToPacket(MachineInstr *MI) {
>> + MachineBasicBlock::iterator MII = MI;
>> + CurrentPacketMIs.push_back(MI);
>> + ResourceTracker->reserveResources(MI);
>> + return MII;
>> + }
>>
>> // endPacket - End the current packet.
>> - void endPacket(MachineBasicBlock *MBB, MachineInstr *I);
>> + void endPacket(MachineBasicBlock *MBB, MachineInstr *MI);
>> +
>> + // initPacketizerState - perform initialization before packetizing
>> + // an instruction. This function is supposed to be overrided by
>> + // the target dependent packetizer.
>> + virtual void initPacketizerState(void) { return; }
>>
>> // ignorePseudoInstruction - Ignore bundling of pseudo
instructions.
>> - bool ignorePseudoInstruction(MachineInstr *I, MachineBasicBlock
*MBB);
>> + virtual bool ignorePseudoInstruction(MachineInstr *I,
>> + MachineBasicBlock *MBB) {
>> + return false;
>> + }
>>
>> - // isSoloInstruction - return true if instruction I must end
previous
>> - // packet.
>> - bool isSoloInstruction(MachineInstr *I);
>> + // isSoloInstruction - return true if instruction MI can not be
packetized
>> + // with any other instruction, which means that MI itself is a
packet.
>> + virtual bool isSoloInstruction(MachineInstr *MI) {
>> + return true;
>> + }
>>
>> // isLegalToPacketizeTogether - Is it legal to packetize SUI and
SUJ
>> // together.
>> @@ -141,6 +160,7 @@ public:
>> virtual bool isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) {
>> return false;
>> }
>> +
>> };
>> }
>>
>> diff --git a/lib/CodeGen/DFAPacketizer.cpp
b/lib/CodeGen/DFAPacketizer.cpp
>> index 5ff641c..bfbe779 100644
>> --- a/lib/CodeGen/DFAPacketizer.cpp
>> +++ b/lib/CodeGen/DFAPacketizer.cpp
>> @@ -23,10 +23,10 @@
>> //
>>
//===----------------------------------------------------------------------===//
>>
>> +#include "llvm/CodeGen/ScheduleDAGInstrs.h"
>> #include "llvm/CodeGen/DFAPacketizer.h"
>> #include "llvm/CodeGen/MachineInstr.h"
>> #include "llvm/CodeGen/MachineInstrBundle.h"
>> -#include "llvm/CodeGen/ScheduleDAGInstrs.h"
>> #include "llvm/Target/TargetInstrInfo.h"
>> #include "llvm/MC/MCInstrItineraries.h"
>> using namespace llvm;
>> @@ -100,17 +100,17 @@ void
DFAPacketizer::reserveResources(llvm::MachineInstr *MI) {
>> reserveResources(&MID);
>> }
>>
>> -namespace {
>> +namespace llvm {
>> // DefaultVLIWScheduler - This class extends ScheduleDAGInstrs and
overrides
>> // Schedule method to build the dependence graph.
>> class DefaultVLIWScheduler : public ScheduleDAGInstrs {
>> public:
>> DefaultVLIWScheduler(MachineFunction&MF,
MachineLoopInfo&MLI,
>> - MachineDominatorTree&MDT, bool IsPostRA);
>> + MachineDominatorTree&MDT, bool IsPostRA);
>> // Schedule - Actual scheduling work.
>> void schedule();
>> };
>> -} // end anonymous namespace
>> +}
>>
>> DefaultVLIWScheduler::DefaultVLIWScheduler(
>> MachineFunction&MF, MachineLoopInfo&MLI,
MachineDominatorTree&MDT,
>> @@ -129,49 +129,25 @@ VLIWPacketizerList::VLIWPacketizerList(
>> bool IsPostRA) : TM(MF.getTarget()), MF(MF) {
>> TII = TM.getInstrInfo();
>> ResourceTracker = TII->CreateTargetScheduleState(&TM, 0);
>> - SchedulerImpl = new DefaultVLIWScheduler(MF, MLI, MDT, IsPostRA);
>> + VLIWScheduler = new DefaultVLIWScheduler(MF, MLI, MDT, IsPostRA);
>> }
>>
>> // VLIWPacketizerList Dtor
>> VLIWPacketizerList::~VLIWPacketizerList() {
>> - delete SchedulerImpl;
>> - delete ResourceTracker;
>> -}
>> -
>> -// ignorePseudoInstruction - ignore pseudo instructions.
>> -bool VLIWPacketizerList::ignorePseudoInstruction(MachineInstr *MI,
>> - MachineBasicBlock
*MBB) {
>> - if (MI->isDebugValue())
>> - return true;
>> -
>> - if (TII->isSchedulingBoundary(MI, MBB, MF))
>> - return true;
>> -
>> - return false;
>> -}
>> -
>> -// isSoloInstruction - return true if instruction I must end previous
>> -// packet.
>> -bool VLIWPacketizerList::isSoloInstruction(MachineInstr *I) {
>> - if (I->isInlineAsm())
>> - return true;
>> -
>> - return false;
>> -}
>> + if (VLIWScheduler)
>> + delete VLIWScheduler;
>>
>> -// addToPacket - Add I to the current packet and reserve resource.
>> -void VLIWPacketizerList::addToPacket(MachineInstr *MI) {
>> - CurrentPacketMIs.push_back(MI);
>> - ResourceTracker->reserveResources(MI);
>> + if (ResourceTracker)
>> + delete ResourceTracker;
>> }
>>
>> // endPacket - End the current packet, bundle packet instructions and
reset
>> // DFA state.
>> void VLIWPacketizerList::endPacket(MachineBasicBlock *MBB,
>> - MachineInstr *I) {
>> + MachineInstr *MI) {
>> if (CurrentPacketMIs.size()> 1) {
>> MachineInstr *MIFirst = CurrentPacketMIs.front();
>> - finalizeBundle(*MBB, MIFirst, I);
>> + finalizeBundle(*MBB, MIFirst, MI);
>> }
>> CurrentPacketMIs.clear();
>> ResourceTracker->clearResources();
>> @@ -181,31 +157,36 @@ void
VLIWPacketizerList::endPacket(MachineBasicBlock *MBB,
>> void VLIWPacketizerList::PacketizeMIs(MachineBasicBlock *MBB,
>> MachineBasicBlock::iterator
BeginItr,
>> MachineBasicBlock::iterator
EndItr) {
>> - assert(MBB->end() == EndItr&& "Bad EndIndex");
>> -
>> - SchedulerImpl->enterRegion(MBB, BeginItr, EndItr,
MBB->size());
>> -
>> - // Build the DAG without reordering instructions.
>> - SchedulerImpl->schedule();
>> -
>> - // Remember scheduling units.
>> - SUnits = SchedulerImpl->SUnits;
>> + assert(VLIWScheduler&& "VLIW Scheduler is not
initialized!");
>> + VLIWScheduler->enterRegion(MBB, BeginItr, EndItr,
MBB->size());
>> + VLIWScheduler->schedule();
>> + VLIWScheduler->exitRegion();
>> +
>> + // Generate MI -> SU map.
>> + //std::map<MachineInstr*, SUnit*> MIToSUnit;
>> + MIToSUnit.clear();
>> + for (unsigned i = 0, e = VLIWScheduler->SUnits.size(); i != e;
++i) {
>> + SUnit *SU =&VLIWScheduler->SUnits[i];
>> + MIToSUnit[SU->getInstr()] = SU;
>> + }
>>
>> // The main packetizer loop.
>> for (; BeginItr != EndItr; ++BeginItr) {
>> MachineInstr *MI = BeginItr;
>>
>> - // Ignore pseudo instructions.
>> - if (ignorePseudoInstruction(MI, MBB))
>> - continue;
>> + this->initPacketizerState();
>>
>> // End the current packet if needed.
>> - if (isSoloInstruction(MI)) {
>> + if (this->isSoloInstruction(MI)) {
>> endPacket(MBB, MI);
>> continue;
>> }
>>
>> - SUnit *SUI = SchedulerImpl->getSUnit(MI);
>> + // Ignore pseudo instructions.
>> + if (this->ignorePseudoInstruction(MI, MBB))
>> + continue;
>> +
>> + SUnit *SUI = MIToSUnit[MI];
>> assert(SUI&& "Missing SUnit Info!");
>>
>> // Ask DFA if machine resource is available for MI.
>> @@ -215,13 +196,13 @@ void
VLIWPacketizerList::PacketizeMIs(MachineBasicBlock *MBB,
>> for (std::vector<MachineInstr*>::iterator VI =
CurrentPacketMIs.begin(),
>> VE = CurrentPacketMIs.end(); VI != VE; ++VI) {
>> MachineInstr *MJ = *VI;
>> - SUnit *SUJ = SchedulerImpl->getSUnit(MJ);
>> + SUnit *SUJ = MIToSUnit[MJ];
>> assert(SUJ&& "Missing SUnit Info!");
>>
>> // Is it legal to packetize SUI and SUJ together.
>> - if (!isLegalToPacketizeTogether(SUI, SUJ)) {
>> + if (!this->isLegalToPacketizeTogether(SUI, SUJ)) {
>> // Allow packetization if dependency can be pruned.
>> - if (!isLegalToPruneDependencies(SUI, SUJ)) {
>> + if (!this->isLegalToPruneDependencies(SUI, SUJ)) {
>> // End the packet if dependency cannot be pruned.
>> endPacket(MBB, MI);
>> break;
>> @@ -234,11 +215,9 @@ void
VLIWPacketizerList::PacketizeMIs(MachineBasicBlock *MBB,
>> }
>>
>> // Add MI to the current packet.
>> - addToPacket(MI);
>> + BeginItr = this->addToPacket(MI);
>> } // For all instructions in BB.
>>
>> // End any packet left behind.
>> endPacket(MBB, EndItr);
>> -
>> - SchedulerImpl->exitRegion();
>> }
>> diff --git a/lib/Target/Hexagon/CMakeLists.txt
b/lib/Target/Hexagon/CMakeLists.txt
>> index af9e813..29cf8a7 100644
>> --- a/lib/Target/Hexagon/CMakeLists.txt
>> +++ b/lib/Target/Hexagon/CMakeLists.txt
>> @@ -28,6 +28,7 @@ add_llvm_target(HexagonCodeGen
>> HexagonSubtarget.cpp
>> HexagonTargetMachine.cpp
>> HexagonTargetObjectFile.cpp
>> + HexagonVLIWPacketizer.cpp
>> )
>>
>> add_subdirectory(TargetInfo)
>> diff --git a/lib/Target/Hexagon/Hexagon.h
b/lib/Target/Hexagon/Hexagon.h
>> index 0808323..43858b9 100644
>> --- a/lib/Target/Hexagon/Hexagon.h
>> +++ b/lib/Target/Hexagon/Hexagon.h
>> @@ -40,6 +40,7 @@ namespace llvm {
>> FunctionPass *createHexagonHardwareLoops();
>> FunctionPass *createHexagonPeephole();
>> FunctionPass *createHexagonFixupHwLoops();
>> + FunctionPass *createHexagonPacketizer();
>>
>> /* TODO: object output.
>> MCCodeEmitter *createHexagonMCCodeEmitter(const Target&,
>> diff --git a/lib/Target/Hexagon/HexagonAsmPrinter.cpp
b/lib/Target/Hexagon/HexagonAsmPrinter.cpp
>> index 39bf45d..63d58d0 100644
>> --- a/lib/Target/Hexagon/HexagonAsmPrinter.cpp
>> +++ b/lib/Target/Hexagon/HexagonAsmPrinter.cpp
>> @@ -13,11 +13,11 @@
>> //
>>
//===----------------------------------------------------------------------===//
>>
>> -
>> #define DEBUG_TYPE "asm-printer"
>> #include "Hexagon.h"
>> #include "HexagonAsmPrinter.h"
>> #include "HexagonMachineFunctionInfo.h"
>> +#include "HexagonMCInst.h"
>> #include "HexagonTargetMachine.h"
>> #include "HexagonSubtarget.h"
>> #include "InstPrinter/HexagonInstPrinter.h"
>> @@ -54,6 +54,7 @@
>> #include "llvm/ADT/SmallString.h"
>> #include "llvm/ADT/SmallVector.h"
>> #include "llvm/ADT/StringExtras.h"
>> +#include<map>
>>
>> using namespace llvm;
>>
>> @@ -77,8 +78,7 @@ void HexagonAsmPrinter::printOperand(const
MachineInstr *MI, unsigned OpNo,
>> const MachineOperand&MO = MI->getOperand(OpNo);
>>
>> switch (MO.getType()) {
>> - default:
>> - assert(0&& "<unknown operand type>");
>> + default: llvm_unreachable ("<unknown operand
type>");
>> case MachineOperand::MO_Register:
>> O<< HexagonInstPrinter::getRegisterName(MO.getReg());
>> return;
>> @@ -196,10 +196,45 @@ void
HexagonAsmPrinter::printPredicateOperand(const MachineInstr *MI,
>> /// the current output stream.
>> ///
>> void HexagonAsmPrinter::EmitInstruction(const MachineInstr *MI) {
>> - MCInst MCI;
>> -
>> - HexagonLowerToMC(MI, MCI, *this);
>> - OutStreamer.EmitInstruction(MCI);
>> + if (MI->isBundle()) {
>> + std::vector<const MachineInstr*> BundleMIs;
>> +
>> + const MachineBasicBlock *MBB = MI->getParent();
>> + MachineBasicBlock::const_instr_iterator MII = MI;
>> + ++MII;
>> + unsigned int IgnoreCount = 0;
>> + while (MII != MBB->end()&& MII->isInsideBundle()) {
>> + const MachineInstr *MInst = MII;
>> + if (MInst->getOpcode() == TargetOpcode::DBG_VALUE ||
>> + MInst->getOpcode() == TargetOpcode::IMPLICIT_DEF) {
>> + IgnoreCount++;
>> + ++MII;
>> + continue;
>> + }
>> + //BundleMIs.push_back(&*MII);
>> + BundleMIs.push_back(MInst);
>> + ++MII;
>> + }
>> + unsigned Size = BundleMIs.size();
>> + assert((Size+IgnoreCount) == MI->getBundleSize()&&
"Corrupt Bundle!");
>> + for (unsigned Index = 0; Index< Size; Index++) {
>> + HexagonMCInst MCI;
>> + MCI.setStartPacket(Index == 0);
>> + MCI.setEndPacket(Index == (Size-1));
>> +
>> + HexagonLowerToMC(BundleMIs[Index], MCI, *this);
>> + OutStreamer.EmitInstruction(MCI);
>> + }
>> + }
>> + else {
>> + HexagonMCInst MCI;
>> + if (MI->getOpcode() == Hexagon::ENDLOOP0) {
>> + MCI.setStartPacket(true);
>> + MCI.setEndPacket(true);
>> + }
>> + HexagonLowerToMC(MI, MCI, *this);
>> + OutStreamer.EmitInstruction(MCI);
>> + }
>>
>> return;
>> }
>> @@ -242,17 +277,17 @@ void HexagonAsmPrinter::printJumpTable(const
MachineInstr *MI, int OpNo,
>> raw_ostream&O) {
>> const MachineOperand&MO = MI->getOperand(OpNo);
>> assert( (MO.getType() ==
MachineOperand::MO_JumpTableIndex)&&
>> - "Expecting jump table index");
>> + "Expecting jump table index");
>>
>> // Hexagon_TODO: Do we need name mangling?
>> O<< *GetJTISymbol(MO.getIndex());
>> }
>>
>> void HexagonAsmPrinter::printConstantPool(const MachineInstr *MI, int
OpNo,
>> - raw_ostream&O) {
>> + raw_ostream&O) {
>> const MachineOperand&MO = MI->getOperand(OpNo);
>> assert( (MO.getType() ==
MachineOperand::MO_ConstantPoolIndex)&&
>> - "Expecting constant pool index");
>> + "Expecting constant pool index");
>>
>> // Hexagon_TODO: Do we need name mangling?
>> O<< *GetCPISymbol(MO.getIndex());
>> diff --git a/lib/Target/Hexagon/HexagonInstrFormats.td
b/lib/Target/Hexagon/HexagonInstrFormats.td
>> index c9f16fb..e3fa0ed 100644
>> --- a/lib/Target/Hexagon/HexagonInstrFormats.td
>> +++ b/lib/Target/Hexagon/HexagonInstrFormats.td
>> @@ -13,13 +13,26 @@
>> // *** Must match HexagonBaseInfo.h ***
>>
//===----------------------------------------------------------------------===//
>>
>> +class Type<bits<5> t> {
>> + bits<5> Value = t;
>> +}
>> +def TypePSEUDO : Type<0>;
>> +def TypeALU32 : Type<1>;
>> +def TypeCR : Type<2>;
>> +def TypeJR : Type<3>;
>> +def TypeJ : Type<4>;
>> +def TypeLD : Type<5>;
>> +def TypeST : Type<6>;
>> +def TypeSYSTEM : Type<7>;
>> +def TypeXTYPE : Type<8>;
>> +def TypeMARKER : Type<31>;
>>
>>
//===----------------------------------------------------------------------===//
>> // Intruction Class Declaration +
>>
//===----------------------------------------------------------------------===//
>>
>> class InstHexagon<dag outs, dag ins, string asmstr,
list<dag> pattern,
>> - string cstr, InstrItinClass itin> : Instruction
{
>> + string cstr, InstrItinClass itin, Type type> :
Instruction {
>> field bits<32> Inst;
>>
>> let Namespace = "Hexagon";
>> @@ -31,11 +44,15 @@ class InstHexagon<dag outs, dag ins, string
asmstr, list<dag> pattern,
>> let Constraints = cstr;
>> let Itinerary = itin;
>>
>> - // *** The code below must match HexagonBaseInfo.h ***
>> + // *** Must match HexagonBaseInfo.h ***
>> + Type HexagonType = type;
>> + let TSFlags{4-0} = HexagonType.Value;
>> + bits<1> isHexagonSolo = 0;
>> + let TSFlags{5} = isHexagonSolo;
>>
>> // Predicated instructions.
>> bits<1> isPredicated = 0;
>> - let TSFlags{1} = isPredicated;
>> + let TSFlags{6} = isPredicated;
>>
>> // *** The code above must match HexagonBaseInfo.h ***
>> }
>> @@ -47,17 +64,25 @@ class InstHexagon<dag outs, dag ins, string
asmstr, list<dag> pattern,
>> // LD Instruction Class in V2/V3/V4.
>> // Definition of the instruction class NOT CHANGED.
>> class LDInst<dag outs, dag ins, string asmstr, list<dag>
pattern>
>> - : InstHexagon<outs, ins, asmstr, pattern, "", LD> {
>> + : InstHexagon<outs, ins, asmstr, pattern, "", LD,
TypeLD> {
>> + bits<5> rd;
>> + bits<5> rs;
>> + bits<13> imm13;
>> +}
>> +
>> +class LDInst2<dag outs, dag ins, string asmstr, list<dag>
pattern>
>> + : InstHexagon<outs, ins, asmstr, pattern, "", LD,
TypeLD> {
>> bits<5> rd;
>> bits<5> rs;
>> bits<13> imm13;
>> + let mayLoad = 1;
>> }
>>
>> // LD Instruction Class in V2/V3/V4.
>> // Definition of the instruction class NOT CHANGED.
>> class LDInstPost<dag outs, dag ins, string asmstr, list<dag>
pattern,
>> string cstr>
>> - : InstHexagon<outs, ins, asmstr, pattern, cstr, LD> {
>> + : InstHexagon<outs, ins, asmstr, pattern, cstr, LD, TypeLD> {
>> bits<5> rd;
>> bits<5> rs;
>> bits<5> rt;
>> @@ -68,7 +93,24 @@ class LDInstPost<dag outs, dag ins, string
asmstr, list<dag> pattern,
>> // ST Instruction Class in V4 can take SLOT0& SLOT1.
>> // Definition of the instruction class CHANGED from V2/V3 to V4.
>> class STInst<dag outs, dag ins, string asmstr, list<dag>
pattern>
>> - : InstHexagon<outs, ins, asmstr, pattern, "", ST>
{
>> + : InstHexagon<outs, ins, asmstr, pattern, "", ST,
TypeST> {
>> + bits<5> rd;
>> + bits<5> rs;
>> + bits<13> imm13;
>> +}
>> +
>> +class STInst2<dag outs, dag ins, string asmstr, list<dag>
pattern>
>> + : InstHexagon<outs, ins, asmstr, pattern, "", ST,
TypeST> {
>> + bits<5> rd;
>> + bits<5> rs;
>> + bits<13> imm13;
>> + let mayStore = 1;
>> +}
>> +
>> +// SYSTEM Instruction Class in V4 can take SLOT0 only
>> +// In V2/V3 we used ST for this but in v4 ST can take SLOT0 or SLOT1.
>> +class SYSInst<dag outs, dag ins, string asmstr, list<dag>
pattern>
>> + : InstHexagon<outs, ins, asmstr, pattern, "", SYS,
TypeSYSTEM> {
>> bits<5> rd;
>> bits<5> rs;
>> bits<13> imm13;
>> @@ -79,7 +121,7 @@ class STInst<dag outs, dag ins, string asmstr,
list<dag> pattern>
>> // Definition of the instruction class CHANGED from V2/V3 to V4.
>> class STInstPost<dag outs, dag ins, string asmstr, list<dag>
pattern,
>> string cstr>
>> - : InstHexagon<outs, ins, asmstr, pattern, cstr, ST> {
>> + : InstHexagon<outs, ins, asmstr, pattern, cstr, ST, TypeST> {
>> bits<5> rd;
>> bits<5> rs;
>> bits<5> rt;
>> @@ -89,7 +131,7 @@ class STInstPost<dag outs, dag ins, string
asmstr, list<dag> pattern,
>> // ALU32 Instruction Class in V2/V3/V4.
>> // Definition of the instruction class NOT CHANGED.
>> class ALU32Type<dag outs, dag ins, string asmstr, list<dag>
pattern>
>> - : InstHexagon<outs, ins, asmstr, pattern, "",
ALU32> {
>> + : InstHexagon<outs, ins, asmstr, pattern, "", ALU32,
TypeALU32> {
>> bits<5> rd;
>> bits<5> rs;
>> bits<5> rt;
>> @@ -102,7 +144,17 @@ class ALU32Type<dag outs, dag ins, string
asmstr, list<dag> pattern>
>> // Definition of the instruction class NOT CHANGED.
>> // Name of the Instruction Class changed from ALU64 to XTYPE from
V2/V3 to V4.
>> class ALU64Type<dag outs, dag ins, string asmstr, list<dag>
pattern>
>> - : InstHexagon<outs, ins, asmstr, pattern, "",
ALU64> {
>> + : InstHexagon<outs, ins, asmstr, pattern, "", ALU64,
TypeXTYPE> {
>> + bits<5> rd;
>> + bits<5> rs;
>> + bits<5> rt;
>> + bits<16> imm16;
>> + bits<16> imm16_2;
>> +}
>> +
>> +class ALU64_acc<dag outs, dag ins, string asmstr, list<dag>
pattern,
>> + string cstr>
>> + : InstHexagon<outs, ins, asmstr, pattern, cstr, ALU64,
TypeXTYPE> {
>> bits<5> rd;
>> bits<5> rs;
>> bits<5> rt;
>> @@ -115,7 +167,7 @@ class ALU64Type<dag outs, dag ins, string
asmstr, list<dag> pattern>
>> // Definition of the instruction class NOT CHANGED.
>> // Name of the Instruction Class changed from M to XTYPE from V2/V3
to V4.
>> class MInst<dag outs, dag ins, string asmstr, list<dag>
pattern>
>> - : InstHexagon<outs, ins, asmstr, pattern, "", M> {
>> + : InstHexagon<outs, ins, asmstr, pattern, "", M,
TypeXTYPE> {
>> bits<5> rd;
>> bits<5> rs;
>> bits<5> rt;
>> @@ -126,8 +178,8 @@ class MInst<dag outs, dag ins, string asmstr,
list<dag> pattern>
>> // Definition of the instruction class NOT CHANGED.
>> // Name of the Instruction Class changed from M to XTYPE from V2/V3
to V4.
>> class MInst_acc<dag outs, dag ins, string asmstr, list<dag>
pattern,
>> - string cstr>
>> - : InstHexagon<outs, ins, asmstr, pattern, cstr, M> {
>> + string cstr>
>> + : InstHexagon<outs, ins, asmstr, pattern, cstr, M,
TypeXTYPE> {
>> bits<5> rd;
>> bits<5> rs;
>> bits<5> rt;
>> @@ -138,9 +190,7 @@ class MInst_acc<dag outs, dag ins, string
asmstr, list<dag> pattern,
>> // Definition of the instruction class NOT CHANGED.
>> // Name of the Instruction Class changed from S to XTYPE from V2/V3
to V4.
>> class SInst<dag outs, dag ins, string asmstr, list<dag>
pattern>
>> -//: InstHexagon<outs, ins, asmstr, pattern, cstr, !if(V4T,
XTYPE_V4, M)> {
>> - : InstHexagon<outs, ins, asmstr, pattern, "", S> {
>> -// : InstHexagon<outs, ins, asmstr, pattern, "", S>
{
>> + : InstHexagon<outs, ins, asmstr, pattern, "", S,
TypeXTYPE> {
>> bits<5> rd;
>> bits<5> rs;
>> bits<5> rt;
>> @@ -151,8 +201,8 @@ class SInst<dag outs, dag ins, string asmstr,
list<dag> pattern>
>> // Definition of the instruction class NOT CHANGED.
>> // Name of the Instruction Class changed from S to XTYPE from V2/V3
to V4.
>> class SInst_acc<dag outs, dag ins, string asmstr, list<dag>
pattern,
>> - string cstr>
>> - : InstHexagon<outs, ins, asmstr, pattern, cstr, S> {
>> + string cstr>
>> + : InstHexagon<outs, ins, asmstr, pattern, cstr, S, TypeXTYPE>
{
>> // : InstHexagon<outs, ins, asmstr, pattern, cstr, S> {
>> // : InstHexagon<outs, ins, asmstr, pattern, cstr, !if(V4T,
XTYPE_V4, S)> {
>> bits<5> rd;
>> @@ -163,14 +213,14 @@ class SInst_acc<dag outs, dag ins, string
asmstr, list<dag> pattern,
>> // J Instruction Class in V2/V3/V4.
>> // Definition of the instruction class NOT CHANGED.
>> class JType<dag outs, dag ins, string asmstr, list<dag>
pattern>
>> - : InstHexagon<outs, ins, asmstr, pattern, "", J> {
>> + : InstHexagon<outs, ins, asmstr, pattern, "", J,
TypeJ> {
>> bits<16> imm16;
>> }
>>
>> // JR Instruction Class in V2/V3/V4.
>> // Definition of the instruction class NOT CHANGED.
>> class JRType<dag outs, dag ins, string asmstr, list<dag>
pattern>
>> - : InstHexagon<outs, ins, asmstr, pattern, "", JR> {
>> + : InstHexagon<outs, ins, asmstr, pattern, "", JR,
TypeJR> {
>> bits<5> rs;
>> bits<5> pu; // Predicate register
>> }
>> @@ -178,15 +228,22 @@ class JRType<dag outs, dag ins, string asmstr,
list<dag> pattern>
>> // CR Instruction Class in V2/V3/V4.
>> // Definition of the instruction class NOT CHANGED.
>> class CRInst<dag outs, dag ins, string asmstr, list<dag>
pattern>
>> - : InstHexagon<outs, ins, asmstr, pattern, "", CR> {
>> + : InstHexagon<outs, ins, asmstr, pattern, "", CR,
TypeCR> {
>> bits<5> rs;
>> bits<10> imm10;
>> }
>>
>> +class Marker<dag outs, dag ins, string asmstr, list<dag>
pattern>
>> + : InstHexagon<outs, ins, asmstr, pattern, "", MARKER,
TypeMARKER> {
>> + let isCodeGenOnly = 1;
>> + let isPseudo = 1;
>> +}
>>
>> class Pseudo<dag outs, dag ins, string asmstr, list<dag>
pattern>
>> - : InstHexagon<outs, ins, asmstr, pattern, "",
PSEUDO>;
>> -
>> + : InstHexagon<outs, ins, asmstr, pattern, "", PSEUDO,
TypePSEUDO> {
>> + let isCodeGenOnly = 1;
>> + let isPseudo = 1;
>> +}
>>
>>
//===----------------------------------------------------------------------===//
>> // Intruction Classes Definitions -
>> @@ -222,6 +279,11 @@ class ALU64_rr<dag outs, dag ins, string
asmstr, list<dag> pattern>
>> : ALU64Type<outs, ins, asmstr, pattern> {
>> }
>>
>> +class ALU64_ri<dag outs, dag ins, string asmstr, list<dag>
pattern>
>> + : ALU64Type<outs, ins, asmstr, pattern> {
>> + let rt{0-4} = 0;
>> +}
>> +
>> // J Type Instructions.
>> class JInst<dag outs, dag ins, string asmstr, list<dag>
pattern>
>> : JType<outs, ins, asmstr, pattern> {
>> @@ -239,12 +301,27 @@ class STInstPI<dag outs, dag ins, string
asmstr, list<dag> pattern, string cstr>
>> let rt{0-4} = 0;
>> }
>>
>> +class STInst2PI<dag outs, dag ins, string asmstr, list<dag>
pattern, string cstr>
>> + : STInstPost<outs, ins, asmstr, pattern, cstr> {
>> + let rt{0-4} = 0;
>> + let mayStore = 1;
>> +}
>> +
>> +
>> +
>> // Post increment LD Instruction.
>> class LDInstPI<dag outs, dag ins, string asmstr, list<dag>
pattern, string cstr>
>> : LDInstPost<outs, ins, asmstr, pattern, cstr> {
>> let rt{0-4} = 0;
>> }
>>
>> +class LDInst2PI<dag outs, dag ins, string asmstr, list<dag>
pattern, string cstr>
>> + : LDInstPost<outs, ins, asmstr, pattern, cstr> {
>> + let rt{0-4} = 0;
>> + let mayLoad = 1;
>> +}
>> +
>> +
>>
//===----------------------------------------------------------------------===//
>> // V4 Instruction Format Definitions +
>>
//===----------------------------------------------------------------------===//
>> diff --git a/lib/Target/Hexagon/HexagonInstrFormatsV4.td
b/lib/Target/Hexagon/HexagonInstrFormatsV4.td
>> index bd5e449..49741a3 100644
>> --- a/lib/Target/Hexagon/HexagonInstrFormatsV4.td
>> +++ b/lib/Target/Hexagon/HexagonInstrFormatsV4.td
>> @@ -11,11 +11,25 @@
>> //
>>
//===----------------------------------------------------------------------===//
>>
>>
+//----------------------------------------------------------------------------//
>> +// Hexagon Intruction Flags +
>> +//
>> +// *** Must match BaseInfo.h ***
>>
+//----------------------------------------------------------------------------//
>> +
>> +def TypeMEMOP : Type<9>;
>> +def TypeNV : Type<10>;
>> +def TypePREFIX : Type<30>;
>> +
>>
+//----------------------------------------------------------------------------//
>> +// Intruction Classes Definitions +
>>
+//----------------------------------------------------------------------------//
>> +
>> //
>> // NV type instructions.
>> //
>> class NVInst_V4<dag outs, dag ins, string asmstr, list<dag>
pattern>
>> - : InstHexagon<outs, ins, asmstr, pattern, "", NV_V4>
{
>> + : InstHexagon<outs, ins, asmstr, pattern, "", NV_V4,
TypeNV> {
>> bits<5> rd;
>> bits<5> rs;
>> bits<13> imm13;
>> @@ -24,7 +38,7 @@ class NVInst_V4<dag outs, dag ins, string asmstr,
list<dag> pattern>
>> // Definition of Post increment new value store.
>> class NVInstPost_V4<dag outs, dag ins, string asmstr,
list<dag> pattern,
>> string cstr>
>> - : InstHexagon<outs, ins, asmstr, pattern, cstr, NV_V4> {
>> + : InstHexagon<outs, ins, asmstr, pattern, cstr, NV_V4, TypeNV>
{
>> bits<5> rd;
>> bits<5> rs;
>> bits<5> rt;
>> @@ -39,8 +53,15 @@ class NVInstPI_V4<dag outs, dag ins, string
asmstr, list<dag> pattern,
>> }
>>
>> class MEMInst_V4<dag outs, dag ins, string asmstr, list<dag>
pattern>
>> - : InstHexagon<outs, ins, asmstr, pattern, "",
MEM_V4> {
>> + : InstHexagon<outs, ins, asmstr, pattern, "", MEM_V4,
TypeMEMOP> {
>> bits<5> rd;
>> bits<5> rs;
>> bits<6> imm6;
>> }
>> +
>> +class Immext<dag outs, dag ins, string asmstr, list<dag>
pattern>
>> + : InstHexagon<outs, ins, asmstr, pattern, "", PREFIX,
TypePREFIX> {
>> + let isCodeGenOnly = 1;
>> +
>> + bits<26> imm26;
>> +}
>> diff --git a/lib/Target/Hexagon/HexagonInstrInfo.cpp
b/lib/Target/Hexagon/HexagonInstrInfo.cpp
>> index 77b3663..30aca6c 100644
>> --- a/lib/Target/Hexagon/HexagonInstrInfo.cpp
>> +++ b/lib/Target/Hexagon/HexagonInstrInfo.cpp
>> @@ -11,10 +11,10 @@
>> //
>>
//===----------------------------------------------------------------------===//
>>
>> -#include "Hexagon.h"
>> #include "HexagonInstrInfo.h"
>> #include "HexagonRegisterInfo.h"
>> #include "HexagonSubtarget.h"
>> +#include "Hexagon.h"
>> #include "llvm/ADT/STLExtras.h"
>> #include "llvm/ADT/SmallVector.h"
>> #include "llvm/CodeGen/DFAPacketizer.h"
>> @@ -466,7 +466,862 @@ unsigned
HexagonInstrInfo::createVR(MachineFunction* MF, MVT VT) const {
>> return NewReg;
>> }
>>
>> +bool HexagonInstrInfo::isExtendable(const MachineInstr *MI) const {
>> + switch(MI->getOpcode()) {
>> + // JMP_EQri
>> + case Hexagon::JMP_EQriPt_nv_V4:
>> + case Hexagon::JMP_EQriPnt_nv_V4:
>> + case Hexagon::JMP_EQriNotPt_nv_V4:
>> + case Hexagon::JMP_EQriNotPnt_nv_V4:
>> +
>> + // JMP_EQri - with -1
>> + case Hexagon::JMP_EQriPtneg_nv_V4:
>> + case Hexagon::JMP_EQriPntneg_nv_V4:
>> + case Hexagon::JMP_EQriNotPtneg_nv_V4:
>> + case Hexagon::JMP_EQriNotPntneg_nv_V4:
>> +
>> + // JMP_EQrr
>> + case Hexagon::JMP_EQrrPt_nv_V4:
>> + case Hexagon::JMP_EQrrPnt_nv_V4:
>> + case Hexagon::JMP_EQrrNotPt_nv_V4:
>> + case Hexagon::JMP_EQrrNotPnt_nv_V4:
>> +
>> + // JMP_GTri
>> + case Hexagon::JMP_GTriPt_nv_V4:
>> + case Hexagon::JMP_GTriPnt_nv_V4:
>> + case Hexagon::JMP_GTriNotPt_nv_V4:
>> + case Hexagon::JMP_GTriNotPnt_nv_V4:
>> +
>> + // JMP_GTri - with -1
>> + case Hexagon::JMP_GTriPtneg_nv_V4:
>> + case Hexagon::JMP_GTriPntneg_nv_V4:
>> + case Hexagon::JMP_GTriNotPtneg_nv_V4:
>> + case Hexagon::JMP_GTriNotPntneg_nv_V4:
>> +
>> + // JMP_GTrr
>> + case Hexagon::JMP_GTrrPt_nv_V4:
>> + case Hexagon::JMP_GTrrPnt_nv_V4:
>> + case Hexagon::JMP_GTrrNotPt_nv_V4:
>> + case Hexagon::JMP_GTrrNotPnt_nv_V4:
>> +
>> + // JMP_GTrrdn
>> + case Hexagon::JMP_GTrrdnPt_nv_V4:
>> + case Hexagon::JMP_GTrrdnPnt_nv_V4:
>> + case Hexagon::JMP_GTrrdnNotPt_nv_V4:
>> + case Hexagon::JMP_GTrrdnNotPnt_nv_V4:
>> +
>> + // JMP_GTUri
>> + case Hexagon::JMP_GTUriPt_nv_V4:
>> + case Hexagon::JMP_GTUriPnt_nv_V4:
>> + case Hexagon::JMP_GTUriNotPt_nv_V4:
>> + case Hexagon::JMP_GTUriNotPnt_nv_V4:
>> +
>> + // JMP_GTUrr
>> + case Hexagon::JMP_GTUrrPt_nv_V4:
>> + case Hexagon::JMP_GTUrrPnt_nv_V4:
>> + case Hexagon::JMP_GTUrrNotPt_nv_V4:
>> + case Hexagon::JMP_GTUrrNotPnt_nv_V4:
>> +
>> + // JMP_GTUrrdn
>> + case Hexagon::JMP_GTUrrdnPt_nv_V4:
>> + case Hexagon::JMP_GTUrrdnPnt_nv_V4:
>> + case Hexagon::JMP_GTUrrdnNotPt_nv_V4:
>> + case Hexagon::JMP_GTUrrdnNotPnt_nv_V4:
>> + return true;
>> +
>> + // TFR_FI
>> + case Hexagon::TFR_FI:
>> + return true;
>> +
>> +
>> + default:
>> + return false;
>> + }
>> + return false;
>> +}
>> +
>> +bool HexagonInstrInfo::isExtended(const MachineInstr *MI) const {
>> + switch(MI->getOpcode()) {
>> + // JMP_EQri
>> + case Hexagon::JMP_EQriPt_ie_nv_V4:
>> + case Hexagon::JMP_EQriPnt_ie_nv_V4:
>> + case Hexagon::JMP_EQriNotPt_ie_nv_V4:
>> + case Hexagon::JMP_EQriNotPnt_ie_nv_V4:
>> +
>> + // JMP_EQri - with -1
>> + case Hexagon::JMP_EQriPtneg_ie_nv_V4:
>> + case Hexagon::JMP_EQriPntneg_ie_nv_V4:
>> + case Hexagon::JMP_EQriNotPtneg_ie_nv_V4:
>> + case Hexagon::JMP_EQriNotPntneg_ie_nv_V4:
>> +
>> + // JMP_EQrr
>> + case Hexagon::JMP_EQrrPt_ie_nv_V4:
>> + case Hexagon::JMP_EQrrPnt_ie_nv_V4:
>> + case Hexagon::JMP_EQrrNotPt_ie_nv_V4:
>> + case Hexagon::JMP_EQrrNotPnt_ie_nv_V4:
>> +
>> + // JMP_GTri
>> + case Hexagon::JMP_GTriPt_ie_nv_V4:
>> + case Hexagon::JMP_GTriPnt_ie_nv_V4:
>> + case Hexagon::JMP_GTriNotPt_ie_nv_V4:
>> + case Hexagon::JMP_GTriNotPnt_ie_nv_V4:
>> +
>> + // JMP_GTri - with -1
>> + case Hexagon::JMP_GTriPtneg_ie_nv_V4:
>> + case Hexagon::JMP_GTriPntneg_ie_nv_V4:
>> + case Hexagon::JMP_GTriNotPtneg_ie_nv_V4:
>> + case Hexagon::JMP_GTriNotPntneg_ie_nv_V4:
>> +
>> + // JMP_GTrr
>> + case Hexagon::JMP_GTrrPt_ie_nv_V4:
>> + case Hexagon::JMP_GTrrPnt_ie_nv_V4:
>> + case Hexagon::JMP_GTrrNotPt_ie_nv_V4:
>> + case Hexagon::JMP_GTrrNotPnt_ie_nv_V4:
>> +
>> + // JMP_GTrrdn
>> + case Hexagon::JMP_GTrrdnPt_ie_nv_V4:
>> + case Hexagon::JMP_GTrrdnPnt_ie_nv_V4:
>> + case Hexagon::JMP_GTrrdnNotPt_ie_nv_V4:
>> + case Hexagon::JMP_GTrrdnNotPnt_ie_nv_V4:
>> +
>> + // JMP_GTUri
>> + case Hexagon::JMP_GTUriPt_ie_nv_V4:
>> + case Hexagon::JMP_GTUriPnt_ie_nv_V4:
>> + case Hexagon::JMP_GTUriNotPt_ie_nv_V4:
>> + case Hexagon::JMP_GTUriNotPnt_ie_nv_V4:
>> +
>> + // JMP_GTUrr
>> + case Hexagon::JMP_GTUrrPt_ie_nv_V4:
>> + case Hexagon::JMP_GTUrrPnt_ie_nv_V4:
>> + case Hexagon::JMP_GTUrrNotPt_ie_nv_V4:
>> + case Hexagon::JMP_GTUrrNotPnt_ie_nv_V4:
>> +
>> + // JMP_GTUrrdn
>> + case Hexagon::JMP_GTUrrdnPt_ie_nv_V4:
>> + case Hexagon::JMP_GTUrrdnPnt_ie_nv_V4:
>> + case Hexagon::JMP_GTUrrdnNotPt_ie_nv_V4:
>> + case Hexagon::JMP_GTUrrdnNotPnt_ie_nv_V4:
>> +
>> + // V4 absolute set addressing.
>> + case Hexagon::LDrid_abs_setimm_V4:
>> + case Hexagon::LDriw_abs_setimm_V4:
>> + case Hexagon::LDrih_abs_setimm_V4:
>> + case Hexagon::LDrib_abs_setimm_V4:
>> + case Hexagon::LDriuh_abs_setimm_V4:
>> + case Hexagon::LDriub_abs_setimm_V4:
>> +
>> + case Hexagon::STrid_abs_setimm_V4:
>> + case Hexagon::STrib_abs_setimm_V4:
>> + case Hexagon::STrih_abs_setimm_V4:
>> + case Hexagon::STriw_abs_setimm_V4:
>> +
>> + // V4 global address load.
>> + case Hexagon::LDrid_GP_cPt_V4 :
>> + case Hexagon::LDrid_GP_cNotPt_V4 :
>> + case Hexagon::LDrid_GP_cdnPt_V4 :
>> + case Hexagon::LDrid_GP_cdnNotPt_V4 :
>> + case Hexagon::LDrib_GP_cPt_V4 :
>> + case Hexagon::LDrib_GP_cNotPt_V4 :
>> + case Hexagon::LDrib_GP_cdnPt_V4 :
>> + case Hexagon::LDrib_GP_cdnNotPt_V4 :
>> + case Hexagon::LDriub_GP_cPt_V4 :
>> + case Hexagon::LDriub_GP_cNotPt_V4 :
>> + case Hexagon::LDriub_GP_cdnPt_V4 :
>> + case Hexagon::LDriub_GP_cdnNotPt_V4 :
>> + case Hexagon::LDrih_GP_cPt_V4 :
>> + case Hexagon::LDrih_GP_cNotPt_V4 :
>> + case Hexagon::LDrih_GP_cdnPt_V4 :
>> + case Hexagon::LDrih_GP_cdnNotPt_V4 :
>> + case Hexagon::LDriuh_GP_cPt_V4 :
>> + case Hexagon::LDriuh_GP_cNotPt_V4 :
>> + case Hexagon::LDriuh_GP_cdnPt_V4 :
>> + case Hexagon::LDriuh_GP_cdnNotPt_V4 :
>> + case Hexagon::LDriw_GP_cPt_V4 :
>> + case Hexagon::LDriw_GP_cNotPt_V4 :
>> + case Hexagon::LDriw_GP_cdnPt_V4 :
>> + case Hexagon::LDriw_GP_cdnNotPt_V4 :
>> + case Hexagon::LDd_GP_cPt_V4 :
>> + case Hexagon::LDd_GP_cNotPt_V4 :
>> + case Hexagon::LDd_GP_cdnPt_V4 :
>> + case Hexagon::LDd_GP_cdnNotPt_V4 :
>> + case Hexagon::LDb_GP_cPt_V4 :
>> + case Hexagon::LDb_GP_cNotPt_V4 :
>> + case Hexagon::LDb_GP_cdnPt_V4 :
>> + case Hexagon::LDb_GP_cdnNotPt_V4 :
>> + case Hexagon::LDub_GP_cPt_V4 :
>> + case Hexagon::LDub_GP_cNotPt_V4 :
>> + case Hexagon::LDub_GP_cdnPt_V4 :
>> + case Hexagon::LDub_GP_cdnNotPt_V4 :
>> + case Hexagon::LDh_GP_cPt_V4 :
>> + case Hexagon::LDh_GP_cNotPt_V4 :
>> + case Hexagon::LDh_GP_cdnPt_V4 :
>> + case Hexagon::LDh_GP_cdnNotPt_V4 :
>> + case Hexagon::LDuh_GP_cPt_V4 :
>> + case Hexagon::LDuh_GP_cNotPt_V4 :
>> + case Hexagon::LDuh_GP_cdnPt_V4 :
>> + case Hexagon::LDuh_GP_cdnNotPt_V4 :
>> + case Hexagon::LDw_GP_cPt_V4 :
>> + case Hexagon::LDw_GP_cNotPt_V4 :
>> + case Hexagon::LDw_GP_cdnPt_V4 :
>> + case Hexagon::LDw_GP_cdnNotPt_V4 :
>> +
>> + // V4 global address store.
>> + case Hexagon::STrid_GP_cPt_V4 :
>> + case Hexagon::STrid_GP_cNotPt_V4 :
>> + case Hexagon::STrid_GP_cdnPt_V4 :
>> + case Hexagon::STrid_GP_cdnNotPt_V4 :
>> + case Hexagon::STrib_GP_cPt_V4 :
>> + case Hexagon::STrib_GP_cNotPt_V4 :
>> + case Hexagon::STrib_GP_cdnPt_V4 :
>> + case Hexagon::STrib_GP_cdnNotPt_V4 :
>> + case Hexagon::STrih_GP_cPt_V4 :
>> + case Hexagon::STrih_GP_cNotPt_V4 :
>> + case Hexagon::STrih_GP_cdnPt_V4 :
>> + case Hexagon::STrih_GP_cdnNotPt_V4 :
>> + case Hexagon::STriw_GP_cPt_V4 :
>> + case Hexagon::STriw_GP_cNotPt_V4 :
>> + case Hexagon::STriw_GP_cdnPt_V4 :
>> + case Hexagon::STriw_GP_cdnNotPt_V4 :
>> + case Hexagon::STd_GP_cPt_V4 :
>> + case Hexagon::STd_GP_cNotPt_V4 :
>> + case Hexagon::STd_GP_cdnPt_V4 :
>> + case Hexagon::STd_GP_cdnNotPt_V4 :
>> + case Hexagon::STb_GP_cPt_V4 :
>> + case Hexagon::STb_GP_cNotPt_V4 :
>> + case Hexagon::STb_GP_cdnPt_V4 :
>> + case Hexagon::STb_GP_cdnNotPt_V4 :
>> + case Hexagon::STh_GP_cPt_V4 :
>> + case Hexagon::STh_GP_cNotPt_V4 :
>> + case Hexagon::STh_GP_cdnPt_V4 :
>> + case Hexagon::STh_GP_cdnNotPt_V4 :
>> + case Hexagon::STw_GP_cPt_V4 :
>> + case Hexagon::STw_GP_cNotPt_V4 :
>> + case Hexagon::STw_GP_cdnPt_V4 :
>> + case Hexagon::STw_GP_cdnNotPt_V4 :
>> +
>> + // V4 predicated global address new value store.
>> + case Hexagon::STrib_GP_cPt_nv_V4 :
>> + case Hexagon::STrib_GP_cNotPt_nv_V4 :
>> + case Hexagon::STrib_GP_cdnPt_nv_V4 :
>> + case Hexagon::STrib_GP_cdnNotPt_nv_V4 :
>> + case Hexagon::STrih_GP_cPt_nv_V4 :
>> + case Hexagon::STrih_GP_cNotPt_nv_V4 :
>> + case Hexagon::STrih_GP_cdnPt_nv_V4 :
>> + case Hexagon::STrih_GP_cdnNotPt_nv_V4 :
>> + case Hexagon::STriw_GP_cPt_nv_V4 :
>> + case Hexagon::STriw_GP_cNotPt_nv_V4 :
>> + case Hexagon::STriw_GP_cdnPt_nv_V4 :
>> + case Hexagon::STriw_GP_cdnNotPt_nv_V4 :
>> + case Hexagon::STb_GP_cPt_nv_V4 :
>> + case Hexagon::STb_GP_cNotPt_nv_V4 :
>> + case Hexagon::STb_GP_cdnPt_nv_V4 :
>> + case Hexagon::STb_GP_cdnNotPt_nv_V4 :
>> + case Hexagon::STh_GP_cPt_nv_V4 :
>> + case Hexagon::STh_GP_cNotPt_nv_V4 :
>> + case Hexagon::STh_GP_cdnPt_nv_V4 :
>> + case Hexagon::STh_GP_cdnNotPt_nv_V4 :
>> + case Hexagon::STw_GP_cPt_nv_V4 :
>> + case Hexagon::STw_GP_cNotPt_nv_V4 :
>> + case Hexagon::STw_GP_cdnPt_nv_V4 :
>> + case Hexagon::STw_GP_cdnNotPt_nv_V4 :
>> +
>> + // TFR_FI
>> + case Hexagon::TFR_FI_immext_V4:
>> + return true;
>> +
>> + default:
>> + return false;
>> + }
>> + return false;
>> +}
>> +
>> +bool HexagonInstrInfo::isNewValueJump(const MachineInstr *MI) const {
>> + switch (MI->getOpcode()) {
>> + // JMP_EQri
>> + case Hexagon::JMP_EQriPt_nv_V4:
>> + case Hexagon::JMP_EQriPnt_nv_V4:
>> + case Hexagon::JMP_EQriNotPt_nv_V4:
>> + case Hexagon::JMP_EQriNotPnt_nv_V4:
>> + case Hexagon::JMP_EQriPt_ie_nv_V4:
>> + case Hexagon::JMP_EQriPnt_ie_nv_V4:
>> + case Hexagon::JMP_EQriNotPt_ie_nv_V4:
>> + case Hexagon::JMP_EQriNotPnt_ie_nv_V4:
>> +
>> + // JMP_EQri - with -1
>> + case Hexagon::JMP_EQriPtneg_nv_V4:
>> + case Hexagon::JMP_EQriPntneg_nv_V4:
>> + case Hexagon::JMP_EQriNotPtneg_nv_V4:
>> + case Hexagon::JMP_EQriNotPntneg_nv_V4:
>> + case Hexagon::JMP_EQriPtneg_ie_nv_V4:
>> + case Hexagon::JMP_EQriPntneg_ie_nv_V4:
>> + case Hexagon::JMP_EQriNotPtneg_ie_nv_V4:
>> + case Hexagon::JMP_EQriNotPntneg_ie_nv_V4:
>> +
>> + // JMP_EQrr
>> + case Hexagon::JMP_EQrrPt_nv_V4:
>> + case Hexagon::JMP_EQrrPnt_nv_V4:
>> + case Hexagon::JMP_EQrrNotPt_nv_V4:
>> + case Hexagon::JMP_EQrrNotPnt_nv_V4:
>> + case Hexagon::JMP_EQrrPt_ie_nv_V4:
>> + case Hexagon::JMP_EQrrPnt_ie_nv_V4:
>> + case Hexagon::JMP_EQrrNotPt_ie_nv_V4:
>> + case Hexagon::JMP_EQrrNotPnt_ie_nv_V4:
>> +
>> + // JMP_GTri
>> + case Hexagon::JMP_GTriPt_nv_V4:
>> + case Hexagon::JMP_GTriPnt_nv_V4:
>> + case Hexagon::JMP_GTriNotPt_nv_V4:
>> + case Hexagon::JMP_GTriNotPnt_nv_V4:
>> + case Hexagon::JMP_GTriPt_ie_nv_V4:
>> + case Hexagon::JMP_GTriPnt_ie_nv_V4:
>> + case Hexagon::JMP_GTriNotPt_ie_nv_V4:
>> + case Hexagon::JMP_GTriNotPnt_ie_nv_V4:
>> +
>> + // JMP_GTri - with -1
>> + case Hexagon::JMP_GTriPtneg_nv_V4:
>> + case Hexagon::JMP_GTriPntneg_nv_V4:
>> + case Hexagon::JMP_GTriNotPtneg_nv_V4:
>> + case Hexagon::JMP_GTriNotPntneg_nv_V4:
>> + case Hexagon::JMP_GTriPtneg_ie_nv_V4:
>> + case Hexagon::JMP_GTriPntneg_ie_nv_V4:
>> + case Hexagon::JMP_GTriNotPtneg_ie_nv_V4:
>> + case Hexagon::JMP_GTriNotPntneg_ie_nv_V4:
>> +
>> + // JMP_GTrr
>> + case Hexagon::JMP_GTrrPt_nv_V4:
>> + case Hexagon::JMP_GTrrPnt_nv_V4:
>> + case Hexagon::JMP_GTrrNotPt_nv_V4:
>> + case Hexagon::JMP_GTrrNotPnt_nv_V4:
>> + case Hexagon::JMP_GTrrPt_ie_nv_V4:
>> + case Hexagon::JMP_GTrrPnt_ie_nv_V4:
>> + case Hexagon::JMP_GTrrNotPt_ie_nv_V4:
>> + case Hexagon::JMP_GTrrNotPnt_ie_nv_V4:
>> +
>> + // JMP_GTrrdn
>> + case Hexagon::JMP_GTrrdnPt_nv_V4:
>> + case Hexagon::JMP_GTrrdnPnt_nv_V4:
>> + case Hexagon::JMP_GTrrdnNotPt_nv_V4:
>> + case Hexagon::JMP_GTrrdnNotPnt_nv_V4:
>> + case Hexagon::JMP_GTrrdnPt_ie_nv_V4:
>> + case Hexagon::JMP_GTrrdnPnt_ie_nv_V4:
>> + case Hexagon::JMP_GTrrdnNotPt_ie_nv_V4:
>> + case Hexagon::JMP_GTrrdnNotPnt_ie_nv_V4:
>> +
>> + // JMP_GTUri
>> + case Hexagon::JMP_GTUriPt_nv_V4:
>> + case Hexagon::JMP_GTUriPnt_nv_V4:
>> + case Hexagon::JMP_GTUriNotPt_nv_V4:
>> + case Hexagon::JMP_GTUriNotPnt_nv_V4:
>> + case Hexagon::JMP_GTUriPt_ie_nv_V4:
>> + case Hexagon::JMP_GTUriPnt_ie_nv_V4:
>> + case Hexagon::JMP_GTUriNotPt_ie_nv_V4:
>> + case Hexagon::JMP_GTUriNotPnt_ie_nv_V4:
>> +
>> + // JMP_GTUrr
>> + case Hexagon::JMP_GTUrrPt_nv_V4:
>> + case Hexagon::JMP_GTUrrPnt_nv_V4:
>> + case Hexagon::JMP_GTUrrNotPt_nv_V4:
>> + case Hexagon::JMP_GTUrrNotPnt_nv_V4:
>> + case Hexagon::JMP_GTUrrPt_ie_nv_V4:
>> + case Hexagon::JMP_GTUrrPnt_ie_nv_V4:
>> + case Hexagon::JMP_GTUrrNotPt_ie_nv_V4:
>> + case Hexagon::JMP_GTUrrNotPnt_ie_nv_V4:
>> +
>> + // JMP_GTUrrdn
>> + case Hexagon::JMP_GTUrrdnPt_nv_V4:
>> + case Hexagon::JMP_GTUrrdnPnt_nv_V4:
>> + case Hexagon::JMP_GTUrrdnNotPt_nv_V4:
>> + case Hexagon::JMP_GTUrrdnNotPnt_nv_V4:
>> + case Hexagon::JMP_GTUrrdnPt_ie_nv_V4:
>> + case Hexagon::JMP_GTUrrdnPnt_ie_nv_V4:
>> + case Hexagon::JMP_GTUrrdnNotPt_ie_nv_V4:
>> + case Hexagon::JMP_GTUrrdnNotPnt_ie_nv_V4:
>> + return true;
>> +
>> + default:
>> + return false;
>> + }
>> + return false;
>> +}
>> +
>> +unsigned HexagonInstrInfo::getImmExtForm(const MachineInstr* MI) const
{
>> + switch(MI->getOpcode()) {
>> + // JMP_EQri
>> + case Hexagon::JMP_EQriPt_nv_V4:
>> + return Hexagon::JMP_EQriPt_ie_nv_V4;
>> + case Hexagon::JMP_EQriNotPt_nv_V4:
>> + return Hexagon::JMP_EQriNotPt_ie_nv_V4;
>> + case Hexagon::JMP_EQriPnt_nv_V4:
>> + return Hexagon::JMP_EQriPnt_ie_nv_V4;
>> + case Hexagon::JMP_EQriNotPnt_nv_V4:
>> + return Hexagon::JMP_EQriNotPnt_ie_nv_V4;
>> +
>> + // JMP_EQri -- with -1
>> + case Hexagon::JMP_EQriPtneg_nv_V4:
>> + return Hexagon::JMP_EQriPtneg_ie_nv_V4;
>> + case Hexagon::JMP_EQriNotPtneg_nv_V4:
>> + return Hexagon::JMP_EQriNotPtneg_ie_nv_V4;
>> + case Hexagon::JMP_EQriPntneg_nv_V4:
>> + return Hexagon::JMP_EQriPntneg_ie_nv_V4;
>> + case Hexagon::JMP_EQriNotPntneg_nv_V4:
>> + return Hexagon::JMP_EQriNotPntneg_ie_nv_V4;
>> +
>> + // JMP_EQrr
>> + case Hexagon::JMP_EQrrPt_nv_V4:
>> + return Hexagon::JMP_EQrrPt_ie_nv_V4;
>> + case Hexagon::JMP_EQrrNotPt_nv_V4:
>> + return Hexagon::JMP_EQrrNotPt_ie_nv_V4;
>> + case Hexagon::JMP_EQrrPnt_nv_V4:
>> + return Hexagon::JMP_EQrrPnt_ie_nv_V4;
>> + case Hexagon::JMP_EQrrNotPnt_nv_V4:
>> + return Hexagon::JMP_EQrrNotPnt_ie_nv_V4;
>> +
>> + // JMP_GTri
>> + case Hexagon::JMP_GTriPt_nv_V4:
>> + return Hexagon::JMP_GTriPt_ie_nv_V4;
>> + case Hexagon::JMP_GTriNotPt_nv_V4:
>> + return Hexagon::JMP_GTriNotPt_ie_nv_V4;
>> + case Hexagon::JMP_GTriPnt_nv_V4:
>> + return Hexagon::JMP_GTriPnt_ie_nv_V4;
>> + case Hexagon::JMP_GTriNotPnt_nv_V4:
>> + return Hexagon::JMP_GTriNotPnt_ie_nv_V4;
>> +
>> + // JMP_GTri -- with -1
>> + case Hexagon::JMP_GTriPtneg_nv_V4:
>> + return Hexagon::JMP_GTriPtneg_ie_nv_V4;
>> + case Hexagon::JMP_GTriNotPtneg_nv_V4:
>> + return Hexagon::JMP_GTriNotPtneg_ie_nv_V4;
>> + case Hexagon::JMP_GTriPntneg_nv_V4:
>> + return Hexagon::JMP_GTriPntneg_ie_nv_V4;
>> + case Hexagon::JMP_GTriNotPntneg_nv_V4:
>> + return Hexagon::JMP_GTriNotPntneg_ie_nv_V4;
>> +
>> + // JMP_GTrr
>> + case Hexagon::JMP_GTrrPt_nv_V4:
>> + return Hexagon::JMP_GTrrPt_ie_nv_V4;
>> + case Hexagon::JMP_GTrrNotPt_nv_V4:
>> + return Hexagon::JMP_GTrrNotPt_ie_nv_V4;
>> + case Hexagon::JMP_GTrrPnt_nv_V4:
>> + return Hexagon::JMP_GTrrPnt_ie_nv_V4;
>> + case Hexagon::JMP_GTrrNotPnt_nv_V4:
>> + return Hexagon::JMP_GTrrNotPnt_ie_nv_V4;
>> +
>> + // JMP_GTrrdn
>> + case Hexagon::JMP_GTrrdnPt_nv_V4:
>> + return Hexagon::JMP_GTrrdnPt_ie_nv_V4;
>> + case Hexagon::JMP_GTrrdnNotPt_nv_V4:
>> + return Hexagon::JMP_GTrrdnNotPt_ie_nv_V4;
>> + case Hexagon::JMP_GTrrdnPnt_nv_V4:
>> + return Hexagon::JMP_GTrrdnPnt_ie_nv_V4;
>> + case Hexagon::JMP_GTrrdnNotPnt_nv_V4:
>> + return Hexagon::JMP_GTrrdnNotPnt_ie_nv_V4;
>> +
>> + // JMP_GTUri
>> + case Hexagon::JMP_GTUriPt_nv_V4:
>> + return Hexagon::JMP_GTUriPt_ie_nv_V4;
>> + case Hexagon::JMP_GTUriNotPt_nv_V4:
>> + return Hexagon::JMP_GTUriNotPt_ie_nv_V4;
>> + case Hexagon::JMP_GTUriPnt_nv_V4:
>> + return Hexagon::JMP_GTUriPnt_ie_nv_V4;
>> + case Hexagon::JMP_GTUriNotPnt_nv_V4:
>> + return Hexagon::JMP_GTUriNotPnt_ie_nv_V4;
>> +
>> + // JMP_GTUrr
>> + case Hexagon::JMP_GTUrrPt_nv_V4:
>> + return Hexagon::JMP_GTUrrPt_ie_nv_V4;
>> + case Hexagon::JMP_GTUrrNotPt_nv_V4:
>> + return Hexagon::JMP_GTUrrNotPt_ie_nv_V4;
>> + case Hexagon::JMP_GTUrrPnt_nv_V4:
>> + return Hexagon::JMP_GTUrrPnt_ie_nv_V4;
>> + case Hexagon::JMP_GTUrrNotPnt_nv_V4:
>> + return Hexagon::JMP_GTUrrNotPnt_ie_nv_V4;
>> +
>> + // JMP_GTUrrdn
>> + case Hexagon::JMP_GTUrrdnPt_nv_V4:
>> + return Hexagon::JMP_GTUrrdnPt_ie_nv_V4;
>> + case Hexagon::JMP_GTUrrdnNotPt_nv_V4:
>> + return Hexagon::JMP_GTUrrdnNotPt_ie_nv_V4;
>> + case Hexagon::JMP_GTUrrdnPnt_nv_V4:
>> + return Hexagon::JMP_GTUrrdnPnt_ie_nv_V4;
>> + case Hexagon::JMP_GTUrrdnNotPnt_nv_V4:
>> + return Hexagon::JMP_GTUrrdnNotPnt_ie_nv_V4;
>> +
>> + case Hexagon::TFR_FI:
>> + return Hexagon::TFR_FI_immext_V4;
>> +
>> + case Hexagon::MEMw_ADDSUBi_indexed_MEM_V4 :
>> + case Hexagon::MEMw_ADDi_indexed_MEM_V4 :
>> + case Hexagon::MEMw_SUBi_indexed_MEM_V4 :
>> + case Hexagon::MEMw_ADDr_indexed_MEM_V4 :
>> + case Hexagon::MEMw_SUBr_indexed_MEM_V4 :
>> + case Hexagon::MEMw_ANDr_indexed_MEM_V4 :
>> + case Hexagon::MEMw_ORr_indexed_MEM_V4 :
>> + case Hexagon::MEMw_ADDSUBi_MEM_V4 :
>> + case Hexagon::MEMw_ADDi_MEM_V4 :
>> + case Hexagon::MEMw_SUBi_MEM_V4 :
>> + case Hexagon::MEMw_ADDr_MEM_V4 :
>> + case Hexagon::MEMw_SUBr_MEM_V4 :
>> + case Hexagon::MEMw_ANDr_MEM_V4 :
>> + case Hexagon::MEMw_ORr_MEM_V4 :
>> + case Hexagon::MEMh_ADDSUBi_indexed_MEM_V4 :
>> + case Hexagon::MEMh_ADDi_indexed_MEM_V4 :
>> + case Hexagon::MEMh_SUBi_indexed_MEM_V4 :
>> + case Hexagon::MEMh_ADDr_indexed_MEM_V4 :
>> + case Hexagon::MEMh_SUBr_indexed_MEM_V4 :
>> + case Hexagon::MEMh_ANDr_indexed_MEM_V4 :
>> + case Hexagon::MEMh_ORr_indexed_MEM_V4 :
>> + case Hexagon::MEMh_ADDSUBi_MEM_V4 :
>> + case Hexagon::MEMh_ADDi_MEM_V4 :
>> + case Hexagon::MEMh_SUBi_MEM_V4 :
>> + case Hexagon::MEMh_ADDr_MEM_V4 :
>> + case Hexagon::MEMh_SUBr_MEM_V4 :
>> + case Hexagon::MEMh_ANDr_MEM_V4 :
>> + case Hexagon::MEMh_ORr_MEM_V4 :
>> + case Hexagon::MEMb_ADDSUBi_indexed_MEM_V4 :
>> + case Hexagon::MEMb_ADDi_indexed_MEM_V4 :
>> + case Hexagon::MEMb_SUBi_indexed_MEM_V4 :
>> + case Hexagon::MEMb_ADDr_indexed_MEM_V4 :
>> + case Hexagon::MEMb_SUBr_indexed_MEM_V4 :
>> + case Hexagon::MEMb_ANDr_indexed_MEM_V4 :
>> + case Hexagon::MEMb_ORr_indexed_MEM_V4 :
>> + case Hexagon::MEMb_ADDSUBi_MEM_V4 :
>> + case Hexagon::MEMb_ADDi_MEM_V4 :
>> + case Hexagon::MEMb_SUBi_MEM_V4 :
>> + case Hexagon::MEMb_ADDr_MEM_V4 :
>> + case Hexagon::MEMb_SUBr_MEM_V4 :
>> + case Hexagon::MEMb_ANDr_MEM_V4 :
>> + case Hexagon::MEMb_ORr_MEM_V4 :
>> + default: llvm_unreachable("Unknown type of
instruction.");
>> + }
>> +}
>> +
>> +unsigned HexagonInstrInfo::getNormalBranchForm(const MachineInstr* MI)
const {
>> + switch(MI->getOpcode()) {
>> + default: llvm_unreachable("Unknown type of jump
instruction.");
>> + // JMP_EQri
>> + case Hexagon::JMP_EQriPt_ie_nv_V4:
>> + return Hexagon::JMP_EQriPt_nv_V4;
>> + case Hexagon::JMP_EQriNotPt_ie_nv_V4:
>> + return Hexagon::JMP_EQriNotPt_nv_V4;
>> + case Hexagon::JMP_EQriPnt_ie_nv_V4:
>> + return Hexagon::JMP_EQriPnt_nv_V4;
>> + case Hexagon::JMP_EQriNotPnt_ie_nv_V4:
>> + return Hexagon::JMP_EQriNotPnt_nv_V4;
>> +
>> + // JMP_EQri -- with -1
>> + case Hexagon::JMP_EQriPtneg_ie_nv_V4:
>> + return Hexagon::JMP_EQriPtneg_nv_V4;
>> + case Hexagon::JMP_EQriNotPtneg_ie_nv_V4:
>> + return Hexagon::JMP_EQriNotPtneg_nv_V4;
>> + case Hexagon::JMP_EQriPntneg_ie_nv_V4:
>> + return Hexagon::JMP_EQriPntneg_nv_V4;
>> + case Hexagon::JMP_EQriNotPntneg_ie_nv_V4:
>> + return Hexagon::JMP_EQriNotPntneg_nv_V4;
>> +
>> + // JMP_EQrr
>> + case Hexagon::JMP_EQrrPt_ie_nv_V4:
>> + return Hexagon::JMP_EQrrPt_nv_V4;
>> + case Hexagon::JMP_EQrrNotPt_ie_nv_V4:
>> + return Hexagon::JMP_EQrrNotPt_nv_V4;
>> + case Hexagon::JMP_EQrrPnt_ie_nv_V4:
>> + return Hexagon::JMP_EQrrPnt_nv_V4;
>> + case Hexagon::JMP_EQrrNotPnt_ie_nv_V4:
>> + return Hexagon::JMP_EQrrNotPnt_nv_V4;
>> +
>> + // JMP_GTri
>> + case Hexagon::JMP_GTriPt_ie_nv_V4:
>> + return Hexagon::JMP_GTriPt_nv_V4;
>> + case Hexagon::JMP_GTriNotPt_ie_nv_V4:
>> + return Hexagon::JMP_GTriNotPt_nv_V4;
>> + case Hexagon::JMP_GTriPnt_ie_nv_V4:
>> + return Hexagon::JMP_GTriPnt_nv_V4;
>> + case Hexagon::JMP_GTriNotPnt_ie_nv_V4:
>> + return Hexagon::JMP_GTriNotPnt_nv_V4;
>> +
>> + // JMP_GTri -- with -1
>> + case Hexagon::JMP_GTriPtneg_ie_nv_V4:
>> + return Hexagon::JMP_GTriPtneg_nv_V4;
>> + case Hexagon::JMP_GTriNotPtneg_ie_nv_V4:
>> + return Hexagon::JMP_GTriNotPtneg_nv_V4;
>> + case Hexagon::JMP_GTriPntneg_ie_nv_V4:
>> + return Hexagon::JMP_GTriPntneg_nv_V4;
>> + case Hexagon::JMP_GTriNotPntneg_ie_nv_V4:
>> + return Hexagon::JMP_GTriNotPntneg_nv_V4;
>> +
>> + // JMP_GTrr
>> + case Hexagon::JMP_GTrrPt_ie_nv_V4:
>> + return Hexagon::JMP_GTrrPt_nv_V4;
>> + case Hexagon::JMP_GTrrNotPt_ie_nv_V4:
>> + return Hexagon::JMP_GTrrNotPt_nv_V4;
>> + case Hexagon::JMP_GTrrPnt_ie_nv_V4:
>> + return Hexagon::JMP_GTrrPnt_nv_V4;
>> + case Hexagon::JMP_GTrrNotPnt_ie_nv_V4:
>> + return Hexagon::JMP_GTrrNotPnt_nv_V4;
>> +
>> + // JMP_GTrrdn
>> + case Hexagon::JMP_GTrrdnPt_ie_nv_V4:
>> + return Hexagon::JMP_GTrrdnPt_nv_V4;
>> + case Hexagon::JMP_GTrrdnNotPt_ie_nv_V4:
>> + return Hexagon::JMP_GTrrdnNotPt_nv_V4;
>> + case Hexagon::JMP_GTrrdnPnt_ie_nv_V4:
>> + return Hexagon::JMP_GTrrdnPnt_nv_V4;
>> + case Hexagon::JMP_GTrrdnNotPnt_ie_nv_V4:
>> + return Hexagon::JMP_GTrrdnNotPnt_nv_V4;
>> +
>> + // JMP_GTUri
>> + case Hexagon::JMP_GTUriPt_ie_nv_V4:
>> + return Hexagon::JMP_GTUriPt_nv_V4;
>> + case Hexagon::JMP_GTUriNotPt_ie_nv_V4:
>> + return Hexagon::JMP_GTUriNotPt_nv_V4;
>> + case Hexagon::JMP_GTUriPnt_ie_nv_V4:
>> + return Hexagon::JMP_GTUriPnt_nv_V4;
>> + case Hexagon::JMP_GTUriNotPnt_ie_nv_V4:
>> + return Hexagon::JMP_GTUriNotPnt_nv_V4;
>> +
>> + // JMP_GTUrr
>> + case Hexagon::JMP_GTUrrPt_ie_nv_V4:
>> + return Hexagon::JMP_GTUrrPt_nv_V4;
>> + case Hexagon::JMP_GTUrrNotPt_ie_nv_V4:
>> + return Hexagon::JMP_GTUrrNotPt_nv_V4;
>> + case Hexagon::JMP_GTUrrPnt_ie_nv_V4:
>> + return Hexagon::JMP_GTUrrPnt_nv_V4;
>> + case Hexagon::JMP_GTUrrNotPnt_ie_nv_V4:
>> + return Hexagon::JMP_GTUrrNotPnt_nv_V4;
>> +
>> + // JMP_GTUrrdn
>> + case Hexagon::JMP_GTUrrdnPt_ie_nv_V4:
>> + return Hexagon::JMP_GTUrrdnPt_nv_V4;
>> + case Hexagon::JMP_GTUrrdnNotPt_ie_nv_V4:
>> + return Hexagon::JMP_GTUrrdnNotPt_nv_V4;
>> + case Hexagon::JMP_GTUrrdnPnt_ie_nv_V4:
>> + return Hexagon::JMP_GTUrrdnPnt_nv_V4;
>> + case Hexagon::JMP_GTUrrdnNotPnt_ie_nv_V4:
>> + return Hexagon::JMP_GTUrrdnNotPnt_nv_V4;
>> + }
>> +}
>> +
>>
>> +bool HexagonInstrInfo::isNewValueStore(const MachineInstr *MI) const {
>> + switch (MI->getOpcode()) {
>> +
>> + // Store Byte
>> + case Hexagon::STrib_nv_V4:
>> + case Hexagon::STrib_indexed_nv_V4:
>> + case Hexagon::STrib_indexed_shl_nv_V4:
>> + case Hexagon::STrib_shl_nv_V4:
>> + case Hexagon::STrib_GP_nv_V4:
>> + case Hexagon::STb_GP_nv_V4:
>> + case Hexagon::POST_STbri_nv_V4:
>> + case Hexagon::STrib_cPt_nv_V4:
>> + case Hexagon::STrib_cdnPt_nv_V4:
>> + case Hexagon::STrib_cNotPt_nv_V4:
>> + case Hexagon::STrib_cdnNotPt_nv_V4:
>> + case Hexagon::STrib_indexed_cPt_nv_V4:
>> + case Hexagon::STrib_indexed_cdnPt_nv_V4:
>> + case Hexagon::STrib_indexed_cNotPt_nv_V4:
>> + case Hexagon::STrib_indexed_cdnNotPt_nv_V4:
>> + case Hexagon::STrib_indexed_shl_cPt_nv_V4:
>> + case Hexagon::STrib_indexed_shl_cdnPt_nv_V4:
>> + case Hexagon::STrib_indexed_shl_cNotPt_nv_V4:
>> + case Hexagon::STrib_indexed_shl_cdnNotPt_nv_V4:
>> + case Hexagon::POST_STbri_cPt_nv_V4:
>> + case Hexagon::POST_STbri_cdnPt_nv_V4:
>> + case Hexagon::POST_STbri_cNotPt_nv_V4:
>> + case Hexagon::POST_STbri_cdnNotPt_nv_V4:
>> + case Hexagon::STb_GP_cPt_nv_V4:
>> + case Hexagon::STb_GP_cNotPt_nv_V4:
>> + case Hexagon::STb_GP_cdnPt_nv_V4:
>> + case Hexagon::STb_GP_cdnNotPt_nv_V4:
>> + case Hexagon::STrib_GP_cPt_nv_V4:
>> + case Hexagon::STrib_GP_cNotPt_nv_V4:
>> + case Hexagon::STrib_GP_cdnPt_nv_V4:
>> + case Hexagon::STrib_GP_cdnNotPt_nv_V4:
>> + case Hexagon::STrib_abs_nv_V4:
>> + case Hexagon::STrib_abs_cPt_nv_V4:
>> + case Hexagon::STrib_abs_cdnPt_nv_V4:
>> + case Hexagon::STrib_abs_cNotPt_nv_V4:
>> + case Hexagon::STrib_abs_cdnNotPt_nv_V4:
>> + case Hexagon::STrib_imm_abs_nv_V4:
>> + case Hexagon::STrib_imm_abs_cPt_nv_V4:
>> + case Hexagon::STrib_imm_abs_cdnPt_nv_V4:
>> + case Hexagon::STrib_imm_abs_cNotPt_nv_V4:
>> + case Hexagon::STrib_imm_abs_cdnNotPt_nv_V4:
>> +
>> + // Store Halfword
>> + case Hexagon::STrih_nv_V4:
>> + case Hexagon::STrih_indexed_nv_V4:
>> + case Hexagon::STrih_indexed_shl_nv_V4:
>> + case Hexagon::STrih_shl_nv_V4:
>> + case Hexagon::STrih_GP_nv_V4:
>> + case Hexagon::STh_GP_nv_V4:
>> + case Hexagon::POST_SThri_nv_V4:
>> + case Hexagon::STrih_cPt_nv_V4:
>> + case Hexagon::STrih_cdnPt_nv_V4:
>> + case Hexagon::STrih_cNotPt_nv_V4:
>> + case Hexagon::STrih_cdnNotPt_nv_V4:
>> + case Hexagon::STrih_indexed_cPt_nv_V4:
>> + case Hexagon::STrih_indexed_cdnPt_nv_V4:
>> + case Hexagon::STrih_indexed_cNotPt_nv_V4:
>> + case Hexagon::STrih_indexed_cdnNotPt_nv_V4:
>> + case Hexagon::STrih_indexed_shl_cPt_nv_V4:
>> + case Hexagon::STrih_indexed_shl_cdnPt_nv_V4:
>> + case Hexagon::STrih_indexed_shl_cNotPt_nv_V4:
>> + case Hexagon::STrih_indexed_shl_cdnNotPt_nv_V4:
>> + case Hexagon::POST_SThri_cPt_nv_V4:
>> + case Hexagon::POST_SThri_cdnPt_nv_V4:
>> + case Hexagon::POST_SThri_cNotPt_nv_V4:
>> + case Hexagon::POST_SThri_cdnNotPt_nv_V4:
>> + case Hexagon::STh_GP_cPt_nv_V4:
>> + case Hexagon::STh_GP_cNotPt_nv_V4:
>> + case Hexagon::STh_GP_cdnPt_nv_V4:
>> + case Hexagon::STh_GP_cdnNotPt_nv_V4:
>> + case Hexagon::STrih_GP_cPt_nv_V4:
>> + case Hexagon::STrih_GP_cNotPt_nv_V4:
>> + case Hexagon::STrih_GP_cdnPt_nv_V4:
>> + case Hexagon::STrih_GP_cdnNotPt_nv_V4:
>> + case Hexagon::STrih_abs_nv_V4:
>> + case Hexagon::STrih_abs_cPt_nv_V4:
>> + case Hexagon::STrih_abs_cdnPt_nv_V4:
>> + case Hexagon::STrih_abs_cNotPt_nv_V4:
>> + case Hexagon::STrih_abs_cdnNotPt_nv_V4:
>> + case Hexagon::STrih_imm_abs_nv_V4:
>> + case Hexagon::STrih_imm_abs_cPt_nv_V4:
>> + case Hexagon::STrih_imm_abs_cdnPt_nv_V4:
>> + case Hexagon::STrih_imm_abs_cNotPt_nv_V4:
>> + case Hexagon::STrih_imm_abs_cdnNotPt_nv_V4:
>> +
>> + // Store Word
>> + case Hexagon::STriw_nv_V4:
>> + case Hexagon::STriw_indexed_nv_V4:
>> + case Hexagon::STriw_indexed_shl_nv_V4:
>> + case Hexagon::STriw_shl_nv_V4:
>> + case Hexagon::STriw_GP_nv_V4:
>> + case Hexagon::STw_GP_nv_V4:
>> + case Hexagon::POST_STwri_nv_V4:
>> + case Hexagon::STriw_cPt_nv_V4:
>> + case Hexagon::STriw_cdnPt_nv_V4:
>> + case Hexagon::STriw_cNotPt_nv_V4:
>> + case Hexagon::STriw_cdnNotPt_nv_V4:
>> + case Hexagon::STriw_indexed_cPt_nv_V4:
>> + case Hexagon::STriw_indexed_cdnPt_nv_V4:
>> + case Hexagon::STriw_indexed_cNotPt_nv_V4:
>> + case Hexagon::STriw_indexed_cdnNotPt_nv_V4:
>> + case Hexagon::STriw_indexed_shl_cPt_nv_V4:
>> + case Hexagon::STriw_indexed_shl_cdnPt_nv_V4:
>> + case Hexagon::STriw_indexed_shl_cNotPt_nv_V4:
>> + case Hexagon::STriw_indexed_shl_cdnNotPt_nv_V4:
>> + case Hexagon::POST_STwri_cPt_nv_V4:
>> + case Hexagon::POST_STwri_cdnPt_nv_V4:
>> + case Hexagon::POST_STwri_cNotPt_nv_V4:
>> + case Hexagon::POST_STwri_cdnNotPt_nv_V4:
>> + case Hexagon::STw_GP_cPt_nv_V4:
>> + case Hexagon::STw_GP_cNotPt_nv_V4:
>> + case Hexagon::STw_GP_cdnPt_nv_V4:
>> + case Hexagon::STw_GP_cdnNotPt_nv_V4:
>> + case Hexagon::STriw_GP_cPt_nv_V4:
>> + case Hexagon::STriw_GP_cNotPt_nv_V4:
>> + case Hexagon::STriw_GP_cdnPt_nv_V4:
>> + case Hexagon::STriw_GP_cdnNotPt_nv_V4:
>> + case Hexagon::STriw_abs_nv_V4:
>> + case Hexagon::STriw_abs_cPt_nv_V4:
>> + case Hexagon::STriw_abs_cdnPt_nv_V4:
>> + case Hexagon::STriw_abs_cNotPt_nv_V4:
>> + case Hexagon::STriw_abs_cdnNotPt_nv_V4:
>> + case Hexagon::STriw_imm_abs_nv_V4:
>> + case Hexagon::STriw_imm_abs_cPt_nv_V4:
>> + case Hexagon::STriw_imm_abs_cdnPt_nv_V4:
>> + case Hexagon::STriw_imm_abs_cNotPt_nv_V4:
>> + case Hexagon::STriw_imm_abs_cdnNotPt_nv_V4:
>> + return true;
>> +
>> + default:
>> + return false;
>> + }
>> + return false;
>> +}
>> +
>> +bool HexagonInstrInfo::isPostIncrement (const MachineInstr* MI) const
{
>> + switch (MI->getOpcode())
>> + {
>> + // Load Byte
>> + case Hexagon::POST_LDrib:
>> + case Hexagon::POST_LDrib_cPt:
>> + case Hexagon::POST_LDrib_cNotPt:
>> + case Hexagon::POST_LDrib_cdnPt_V4:
>> + case Hexagon::POST_LDrib_cdnNotPt_V4:
>> +
>> + // Load unsigned byte
>> + case Hexagon::POST_LDriub:
>> + case Hexagon::POST_LDriub_cPt:
>> + case Hexagon::POST_LDriub_cNotPt:
>> + case Hexagon::POST_LDriub_cdnPt_V4:
>> + case Hexagon::POST_LDriub_cdnNotPt_V4:
>> +
>> + // Load halfword
>> + case Hexagon::POST_LDrih:
>> + case Hexagon::POST_LDrih_cPt:
>> + case Hexagon::POST_LDrih_cNotPt:
>> + case Hexagon::POST_LDrih_cdnPt_V4:
>> + case Hexagon::POST_LDrih_cdnNotPt_V4:
>> +
>> + // Load unsigned halfword
>> + case Hexagon::POST_LDriuh:
>> + case Hexagon::POST_LDriuh_cPt:
>> + case Hexagon::POST_LDriuh_cNotPt:
>> + case Hexagon::POST_LDriuh_cdnPt_V4:
>> + case Hexagon::POST_LDriuh_cdnNotPt_V4:
>> +
>> + // Load word
>> + case Hexagon::POST_LDriw:
>> + case Hexagon::POST_LDriw_cPt:
>> + case Hexagon::POST_LDriw_cNotPt:
>> + case Hexagon::POST_LDriw_cdnPt_V4:
>> + case Hexagon::POST_LDriw_cdnNotPt_V4:
>> +
>> + // Load double word
>> + case Hexagon::POST_LDrid:
>> + case Hexagon::POST_LDrid_cPt:
>> + case Hexagon::POST_LDrid_cNotPt:
>> + case Hexagon::POST_LDrid_cdnPt_V4:
>> + case Hexagon::POST_LDrid_cdnNotPt_V4:
>> +
>> + // Store byte
>> + case Hexagon::POST_STbri:
>> + case Hexagon::POST_STbri_cPt:
>> + case Hexagon::POST_STbri_cNotPt:
>> + case Hexagon::POST_STbri_cdnPt_V4:
>> + case Hexagon::POST_STbri_cdnNotPt_V4:
>> +
>> + // Store halfword
>> + case Hexagon::POST_SThri:
>> + case Hexagon::POST_SThri_cPt:
>> + case Hexagon::POST_SThri_cNotPt:
>> + case Hexagon::POST_SThri_cdnPt_V4:
>> + case Hexagon::POST_SThri_cdnNotPt_V4:
>> +
>> + // Store word
>> + case Hexagon::POST_STwri:
>> + case Hexagon::POST_STwri_cPt:
>> + case Hexagon::POST_STwri_cNotPt:
>> + case Hexagon::POST_STwri_cdnPt_V4:
>> + case Hexagon::POST_STwri_cdnNotPt_V4:
>> +
>> + // Store double word
>> + case Hexagon::POST_STdri:
>> + case Hexagon::POST_STdri_cPt:
>> + case Hexagon::POST_STdri_cNotPt:
>> + case Hexagon::POST_STdri_cdnPt_V4:
>> + case Hexagon::POST_STdri_cdnNotPt_V4:
>> + return true;
>> +
>> + default:
>> + return false;
>> + }
>> +}
>> +
>> +bool HexagonInstrInfo::isSaveCalleeSavedRegsCall(const MachineInstr
*MI) const {
>> + return MI->getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4;
>> +}
>>
>> bool HexagonInstrInfo::isPredicable(MachineInstr *MI) const {
>> bool isPred = MI->getDesc().isPredicable();
>> @@ -559,6 +1414,7 @@ bool HexagonInstrInfo::isPredicable(MachineInstr
*MI) const {
>>
>> unsigned HexagonInstrInfo::getInvertedPredicatedOpcode(const int Opc)
const {
>> switch(Opc) {
>> + default: llvm_unreachable("Unexpected predicated
instruction");
>> case Hexagon::TFR_cPt:
>> return Hexagon::TFR_cNotPt;
>> case Hexagon::TFR_cNotPt:
>> @@ -1009,9 +1865,6 @@ unsigned
HexagonInstrInfo::getInvertedPredicatedOpcode(const int Opc) const {
>> return Hexagon::JMP_GTUrrdnNotPnt_nv_V4;
>> case Hexagon::JMP_GTUrrdnNotPnt_nv_V4:
>> return Hexagon::JMP_GTUrrdnPnt_nv_V4;
>> -
>> - default:
>> - llvm_unreachable("Unexpected predicated instruction");
>> }
>> }
>>
>> @@ -1587,6 +2440,24 @@ isSpillPredRegOp(const MachineInstr *MI) const {
>> return false;
>> }
>>
>> +bool HexagonInstrInfo::
>> +isConditionalTransfer (const MachineInstr *MI) const {
>> + switch (MI->getOpcode()) {
>> + case Hexagon::TFR_cPt:
>> + case Hexagon::TFR_cNotPt:
>> + case Hexagon::TFRI_cPt:
>> + case Hexagon::TFRI_cNotPt:
>> + case Hexagon::TFR_cdnPt:
>> + case Hexagon::TFR_cdnNotPt:
>> + case Hexagon::TFRI_cdnPt:
>> + case Hexagon::TFRI_cdnNotPt:
>> + return true;
>> +
>> + default:
>> + return false;
>> + }
>> + return false;
>> +}
>>
>> bool HexagonInstrInfo::isConditionalALU32 (const MachineInstr* MI)
const {
>> const HexagonRegisterInfo& QRI = getRegisterInfo();
>> @@ -1626,7 +2497,6 @@ bool HexagonInstrInfo::isConditionalALU32 (const
MachineInstr* MI) const {
>> }
>> }
>>
>> -
>> bool HexagonInstrInfo::
>> isConditionalLoad (const MachineInstr* MI) const {
>> const HexagonRegisterInfo& QRI = getRegisterInfo();
>> @@ -1700,6 +2570,136 @@ isConditionalLoad (const MachineInstr* MI)
const {
>> }
>> }
>>
>> +// Returns true if an instruction is a conditional store.
>> +//
>> +// Note: It doesn't include conditional new-value stores as they
can't be
>> +// converted to .new predicate.
>> +//
>> +// p.new NV store [ if(p0.new)memw(R0+#0)=R2.new ]
>> +// ^ ^
>> +// / \ (not OK. it will cause new-value
store to be
>> +// / X conditional on p0.new while R2
producer is
>> +// / \ on p0)
>> +// / \.
>> +// p.new store p.old NV store
>> +// [if(p0.new)memw(R0+#0)=R2] [if(p0)memw(R0+#0)=R2.new]
>> +// ^ ^
>> +// \ /
>> +// \ /
>> +// \ /
>> +// p.old store
>> +// [if (p0)memw(R0+#0)=R2]
>> +//
>> +// The above diagram shows the steps involoved in the conversion of a
predicated
>> +// store instruction to its .new predicated new-value form.
>> +//
>> +// The following set of instructions further explains the scenario
where
>> +// conditional new-value store becomes invalid when promoted to .new
predicate
>> +// form.
>> +//
>> +// { 1) if (p0) r0 = add(r1, r2)
>> +// 2) p0 = cmp.eq(r3, #0) }
>> +//
>> +// 3) if (p0) memb(r1+#0) = r0 --> this instruction can't
be grouped with
>> +// the first two instructions because in instr 1, r0 is conditional on
old value
>> +// of p0 but its use in instr 3 is conditional on p0 modified by instr
2 which
>> +// is not valid for new-value stores.
>> +bool HexagonInstrInfo::
>> +isConditionalStore (const MachineInstr* MI) const {
>> + const HexagonRegisterInfo& QRI = getRegisterInfo();
>> + switch (MI->getOpcode())
>> + {
>> + case Hexagon::STrib_imm_cPt_V4 :
>> + case Hexagon::STrib_imm_cNotPt_V4 :
>> + case Hexagon::STrib_indexed_shl_cPt_V4 :
>> + case Hexagon::STrib_indexed_shl_cNotPt_V4 :
>> + case Hexagon::STrib_cPt :
>> + case Hexagon::STrib_cNotPt :
>> + case Hexagon::POST_STbri_cPt :
>> + case Hexagon::POST_STbri_cNotPt :
>> + case Hexagon::STrid_indexed_cPt :
>> + case Hexagon::STrid_indexed_cNotPt :
>> + case Hexagon::STrid_indexed_shl_cPt_V4 :
>> + case Hexagon::POST_STdri_cPt :
>> + case Hexagon::POST_STdri_cNotPt :
>> + case Hexagon::STrih_cPt :
>> + case Hexagon::STrih_cNotPt :
>> + case Hexagon::STrih_indexed_cPt :
>> + case Hexagon::STrih_indexed_cNotPt :
>> + case Hexagon::STrih_imm_cPt_V4 :
>> + case Hexagon::STrih_imm_cNotPt_V4 :
>> + case Hexagon::STrih_indexed_shl_cPt_V4 :
>> + case Hexagon::STrih_indexed_shl_cNotPt_V4 :
>> + case Hexagon::POST_SThri_cPt :
>> + case Hexagon::POST_SThri_cNotPt :
>> + case Hexagon::STriw_cPt :
>> + case Hexagon::STriw_cNotPt :
>> + case Hexagon::STriw_indexed_cPt :
>> + case Hexagon::STriw_indexed_cNotPt :
>> + case Hexagon::STriw_imm_cPt_V4 :
>> + case Hexagon::STriw_imm_cNotPt_V4 :
>> + case Hexagon::STriw_indexed_shl_cPt_V4 :
>> + case Hexagon::STriw_indexed_shl_cNotPt_V4 :
>> + case Hexagon::POST_STwri_cPt :
>> + case Hexagon::POST_STwri_cNotPt :
>> + return QRI.Subtarget.hasV4TOps();
>> +
>> + // V4 global address store before promoting to dot new.
>> + case Hexagon::STrid_GP_cPt_V4 :
>> + case Hexagon::STrid_GP_cNotPt_V4 :
>> + case Hexagon::STrib_GP_cPt_V4 :
>> + case Hexagon::STrib_GP_cNotPt_V4 :
>> + case Hexagon::STrih_GP_cPt_V4 :
>> + case Hexagon::STrih_GP_cNotPt_V4 :
>> + case Hexagon::STriw_GP_cPt_V4 :
>> + case Hexagon::STriw_GP_cNotPt_V4 :
>> + case Hexagon::STd_GP_cPt_V4 :
>> + case Hexagon::STd_GP_cNotPt_V4 :
>> + case Hexagon::STb_GP_cPt_V4 :
>> + case Hexagon::STb_GP_cNotPt_V4 :
>> + case Hexagon::STh_GP_cPt_V4 :
>> + case Hexagon::STh_GP_cNotPt_V4 :
>> + case Hexagon::STw_GP_cPt_V4 :
>> + case Hexagon::STw_GP_cNotPt_V4 :
>> + return QRI.Subtarget.hasV4TOps();
>> +
>> + // Predicated new value stores (i.e. if (p0) memw(..)=r0.new) are
excluded
>> + // from the "Conditional Store" list. Because a
predicated new value store
>> + // would NOT be promoted to a double dot new store. See diagram
below:
>> + // This function returns yes for those stores that are predicated
but not
>> + // yet promoted to predicate dot new instructions.
>> + //
>> + // +---------------------+
>> + // /-----| if (p0) memw(..)=r0 |---------\~
>> + // || +---------------------+ ||
>> + // promote || /\ /\ ||
promote
>> + // || /||\ /||\ ||
>> + // \||/ demote || \||/
>> + // \/ || || \/
>> + // +-------------------------+ ||
+-------------------------+
>> + // | if (p0.new) memw(..)=r0 | || | if (p0)
memw(..)=r0.new |
>> + // +-------------------------+ ||
+-------------------------+
>> + // || || ||
>> + // || demote \||/
>> + // promote || \/ NOT possible
>> + // || || /\~
>> + // \||/ || /||\~
>> + // \/ || ||
>> + // +-----------------------------+
>> + // | if (p0.new) memw(..)=r0.new |
>> + // +-----------------------------+
>> + // Double Dot New Store
>> + //
>> +
>> + default:
>> + return false;
>> +
>> + }
>> + return false;
>> +}
>> +
>> +
>> +
>> DFAPacketizer *HexagonInstrInfo::
>> CreateTargetScheduleState(const TargetMachine *TM,
>> const ScheduleDAG *DAG) const {
>> diff --git a/lib/Target/Hexagon/HexagonInstrInfo.h
b/lib/Target/Hexagon/HexagonInstrInfo.h
>> index 7306870..6a45871 100644
>> --- a/lib/Target/Hexagon/HexagonInstrInfo.h
>> +++ b/lib/Target/Hexagon/HexagonInstrInfo.h
>> @@ -160,10 +160,20 @@ public:
>> bool isS8_Immediate(const int value) const;
>> bool isS6_Immediate(const int value) const;
>>
>> + bool isSaveCalleeSavedRegsCall(const MachineInstr* MI) const;
>> + bool isConditionalTransfer(const MachineInstr* MI) const;
>> bool isConditionalALU32 (const MachineInstr* MI) const;
>> bool isConditionalLoad (const MachineInstr* MI) const;
>> + bool isConditionalStore(const MachineInstr* MI) const;
>> bool isDeallocRet(const MachineInstr *MI) const;
>> unsigned getInvertedPredicatedOpcode(const int Opc) const;
>> + bool isExtendable(const MachineInstr* MI) const;
>> + bool isExtended(const MachineInstr* MI) const;
>> + bool isPostIncrement(const MachineInstr* MI) const;
>> + bool isNewValueStore(const MachineInstr* MI) const;
>> + bool isNewValueJump(const MachineInstr* MI) const;
>> + unsigned getImmExtForm(const MachineInstr* MI) const;
>> + unsigned getNormalBranchForm(const MachineInstr* MI) const;
>>
>> private:
>> int getMatchingCondBranchOpcode(int Opc, bool sense) const;
>> diff --git a/lib/Target/Hexagon/HexagonInstrInfo.td
b/lib/Target/Hexagon/HexagonInstrInfo.td
>> index b563ac3..e50d935 100644
>> --- a/lib/Target/Hexagon/HexagonInstrInfo.td
>> +++ b/lib/Target/Hexagon/HexagonInstrInfo.td
>> @@ -875,19 +875,19 @@ def LDrid_indexed : LDInst<(outs
DoubleRegs:$dst),
>>
s11_3ImmPred:$offset)))]>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDrid_GP : LDInst<(outs DoubleRegs:$dst),
>> +def LDrid_GP : LDInst2<(outs DoubleRegs:$dst),
>> (ins globaladdress:$global, u16Imm:$offset),
>> "$dst=memd(#$global+$offset)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDd_GP : LDInst<(outs DoubleRegs:$dst),
>> +def LDd_GP : LDInst2<(outs DoubleRegs:$dst),
>> (ins globaladdress:$global),
>> "$dst=memd(#$global)",
>> []>;
>>
>> let isPredicable = 1, mayLoad = 1, hasCtrlDep = 1,
neverHasSideEffects = 1 in
>> -def POST_LDrid : LDInstPI<(outs DoubleRegs:$dst, IntRegs:$dst2),
>> +def POST_LDrid : LDInst2PI<(outs DoubleRegs:$dst, IntRegs:$dst2),
>> (ins IntRegs:$src1, s4Imm:$offset),
>> "$dst = memd($src1++#$offset)",
>> [],
>> @@ -895,64 +895,64 @@ def POST_LDrid : LDInstPI<(outs
DoubleRegs:$dst, IntRegs:$dst2),
>>
>> // Load doubleword conditionally.
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDrid_cPt : LDInst<(outs DoubleRegs:$dst),
>> +def LDrid_cPt : LDInst2<(outs DoubleRegs:$dst),
>> (ins PredRegs:$src1, MEMri:$addr),
>> "if ($src1) $dst = memd($addr)",
>> []>;
>>
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDrid_cNotPt : LDInst<(outs DoubleRegs:$dst),
>> +def LDrid_cNotPt : LDInst2<(outs DoubleRegs:$dst),
>> (ins PredRegs:$src1, MEMri:$addr),
>> "if (!$src1) $dst = memd($addr)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDrid_indexed_cPt : LDInst<(outs DoubleRegs:$dst),
>> +def LDrid_indexed_cPt : LDInst2<(outs DoubleRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3),
>> "if ($src1) $dst=memd($src2+#$src3)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDrid_indexed_cNotPt : LDInst<(outs DoubleRegs:$dst),
>> +def LDrid_indexed_cNotPt : LDInst2<(outs DoubleRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3),
>> "if (!$src1) $dst=memd($src2+#$src3)",
>> []>;
>>
>> let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
>> -def POST_LDrid_cPt : LDInstPI<(outs DoubleRegs:$dst1,
IntRegs:$dst2),
>> +def POST_LDrid_cPt : LDInst2PI<(outs DoubleRegs:$dst1,
IntRegs:$dst2),
>> (ins PredRegs:$src1, IntRegs:$src2, s4_3Imm:$src3),
>> "if ($src1) $dst1 = memd($src2++#$src3)",
>> [],
>> "$src2 = $dst2">;
>>
>> let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
>> -def POST_LDrid_cNotPt : LDInstPI<(outs DoubleRegs:$dst1,
IntRegs:$dst2),
>> +def POST_LDrid_cNotPt : LDInst2PI<(outs DoubleRegs:$dst1,
IntRegs:$dst2),
>> (ins PredRegs:$src1, IntRegs:$src2, s4_3Imm:$src3),
>> "if (!$src1) $dst1 = memd($src2++#$src3)",
>> [],
>> "$src2 = $dst2">;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDrid_cdnPt : LDInst<(outs DoubleRegs:$dst),
>> +def LDrid_cdnPt : LDInst2<(outs DoubleRegs:$dst),
>> (ins PredRegs:$src1, MEMri:$addr),
>> "if ($src1.new) $dst = memd($addr)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDrid_cdnNotPt : LDInst<(outs DoubleRegs:$dst),
>> +def LDrid_cdnNotPt : LDInst2<(outs DoubleRegs:$dst),
>> (ins PredRegs:$src1, MEMri:$addr),
>> "if (!$src1.new) $dst = memd($addr)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDrid_indexed_cdnPt : LDInst<(outs DoubleRegs:$dst),
>> +def LDrid_indexed_cdnPt : LDInst2<(outs DoubleRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3),
>> "if ($src1.new) $dst=memd($src2+#$src3)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDrid_indexed_cdnNotPt : LDInst<(outs DoubleRegs:$dst),
>> +def LDrid_indexed_cdnNotPt : LDInst2<(outs DoubleRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3),
>> "if (!$src1.new) $dst=memd($src2+#$src3)",
>> []>;
>> @@ -988,25 +988,25 @@ def LDrib_ae_indexed : LDInst<(outs
IntRegs:$dst),
>>
s11_0ImmPred:$offset)))]>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDrib_GP : LDInst<(outs IntRegs:$dst),
>> +def LDrib_GP : LDInst2<(outs IntRegs:$dst),
>> (ins globaladdress:$global, u16Imm:$offset),
>> "$dst=memb(#$global+$offset)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDb_GP : LDInst<(outs IntRegs:$dst),
>> +def LDb_GP : LDInst2<(outs IntRegs:$dst),
>> (ins globaladdress:$global),
>> "$dst=memb(#$global)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDub_GP : LDInst<(outs IntRegs:$dst),
>> +def LDub_GP : LDInst2<(outs IntRegs:$dst),
>> (ins globaladdress:$global),
>> "$dst=memub(#$global)",
>> []>;
>>
>> let isPredicable = 1, mayLoad = 1, hasCtrlDep = 1,
neverHasSideEffects = 1 in
>> -def POST_LDrib : LDInstPI<(outs IntRegs:$dst, IntRegs:$dst2),
>> +def POST_LDrib : LDInst2PI<(outs IntRegs:$dst, IntRegs:$dst2),
>> (ins IntRegs:$src1, s4Imm:$offset),
>> "$dst = memb($src1++#$offset)",
>> [],
>> @@ -1014,63 +1014,63 @@ def POST_LDrib : LDInstPI<(outs
IntRegs:$dst, IntRegs:$dst2),
>>
>> // Load byte conditionally.
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDrib_cPt : LDInst<(outs IntRegs:$dst),
>> +def LDrib_cPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, MEMri:$addr),
>> "if ($src1) $dst = memb($addr)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDrib_cNotPt : LDInst<(outs IntRegs:$dst),
>> +def LDrib_cNotPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, MEMri:$addr),
>> "if (!$src1) $dst = memb($addr)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDrib_indexed_cPt : LDInst<(outs IntRegs:$dst),
>> +def LDrib_indexed_cPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3),
>> "if ($src1) $dst = memb($src2+#$src3)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDrib_indexed_cNotPt : LDInst<(outs IntRegs:$dst),
>> +def LDrib_indexed_cNotPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3),
>> "if (!$src1) $dst = memb($src2+#$src3)",
>> []>;
>>
>> let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
>> -def POST_LDrib_cPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
>> +def POST_LDrib_cPt : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2),
>> (ins PredRegs:$src1, IntRegs:$src2, s4_0Imm:$src3),
>> "if ($src1) $dst1 = memb($src2++#$src3)",
>> [],
>> "$src2 = $dst2">;
>>
>> let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
>> -def POST_LDrib_cNotPt : LDInstPI<(outs IntRegs:$dst1,
IntRegs:$dst2),
>> +def POST_LDrib_cNotPt : LDInst2PI<(outs IntRegs:$dst1,
IntRegs:$dst2),
>> (ins PredRegs:$src1, IntRegs:$src2, s4_0Imm:$src3),
>> "if (!$src1) $dst1 = memb($src2++#$src3)",
>> [],
>> "$src2 = $dst2">;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDrib_cdnPt : LDInst<(outs IntRegs:$dst),
>> +def LDrib_cdnPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, MEMri:$addr),
>> "if ($src1.new) $dst = memb($addr)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDrib_cdnNotPt : LDInst<(outs IntRegs:$dst),
>> +def LDrib_cdnNotPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, MEMri:$addr),
>> "if (!$src1.new) $dst = memb($addr)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDrib_indexed_cdnPt : LDInst<(outs IntRegs:$dst),
>> +def LDrib_indexed_cdnPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3),
>> "if ($src1.new) $dst = memb($src2+#$src3)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDrib_indexed_cdnNotPt : LDInst<(outs IntRegs:$dst),
>> +def LDrib_indexed_cdnNotPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3),
>> "if (!$src1.new) $dst = memb($src2+#$src3)",
>> []>;
>> @@ -1103,26 +1103,26 @@ def LDrih_ae_indexed : LDInst<(outs
IntRegs:$dst),
>>
s11_1ImmPred:$offset)))]>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDrih_GP : LDInst<(outs IntRegs:$dst),
>> +def LDrih_GP : LDInst2<(outs IntRegs:$dst),
>> (ins globaladdress:$global, u16Imm:$offset),
>> "$dst=memh(#$global+$offset)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDh_GP : LDInst<(outs IntRegs:$dst),
>> +def LDh_GP : LDInst2<(outs IntRegs:$dst),
>> (ins globaladdress:$global),
>> "$dst=memh(#$global)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDuh_GP : LDInst<(outs IntRegs:$dst),
>> +def LDuh_GP : LDInst2<(outs IntRegs:$dst),
>> (ins globaladdress:$global),
>> "$dst=memuh(#$global)",
>> []>;
>>
>>
>> let isPredicable = 1, mayLoad = 1, hasCtrlDep = 1,
neverHasSideEffects = 1 in
>> -def POST_LDrih : LDInstPI<(outs IntRegs:$dst, IntRegs:$dst2),
>> +def POST_LDrih : LDInst2PI<(outs IntRegs:$dst, IntRegs:$dst2),
>> (ins IntRegs:$src1, s4Imm:$offset),
>> "$dst = memh($src1++#$offset)",
>> [],
>> @@ -1130,63 +1130,63 @@ def POST_LDrih : LDInstPI<(outs
IntRegs:$dst, IntRegs:$dst2),
>>
>> // Load halfword conditionally.
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDrih_cPt : LDInst<(outs IntRegs:$dst),
>> +def LDrih_cPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, MEMri:$addr),
>> "if ($src1) $dst = memh($addr)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDrih_cNotPt : LDInst<(outs IntRegs:$dst),
>> +def LDrih_cNotPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, MEMri:$addr),
>> "if (!$src1) $dst = memh($addr)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDrih_indexed_cPt : LDInst<(outs IntRegs:$dst),
>> +def LDrih_indexed_cPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3),
>> "if ($src1) $dst = memh($src2+#$src3)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDrih_indexed_cNotPt : LDInst<(outs IntRegs:$dst),
>> +def LDrih_indexed_cNotPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3),
>> "if (!$src1) $dst = memh($src2+#$src3)",
>> []>;
>>
>> let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
>> -def POST_LDrih_cPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
>> +def POST_LDrih_cPt : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2),
>> (ins PredRegs:$src1, IntRegs:$src2, s4_1Imm:$src3),
>> "if ($src1) $dst1 = memh($src2++#$src3)",
>> [],
>> "$src2 = $dst2">;
>>
>> let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
>> -def POST_LDrih_cNotPt : LDInstPI<(outs IntRegs:$dst1,
IntRegs:$dst2),
>> +def POST_LDrih_cNotPt : LDInst2PI<(outs IntRegs:$dst1,
IntRegs:$dst2),
>> (ins PredRegs:$src1, IntRegs:$src2, s4_1Imm:$src3),
>> "if (!$src1) $dst1 = memh($src2++#$src3)",
>> [],
>> "$src2 = $dst2">;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDrih_cdnPt : LDInst<(outs IntRegs:$dst),
>> +def LDrih_cdnPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, MEMri:$addr),
>> "if ($src1.new) $dst = memh($addr)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDrih_cdnNotPt : LDInst<(outs IntRegs:$dst),
>> +def LDrih_cdnNotPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, MEMri:$addr),
>> "if (!$src1.new) $dst = memh($addr)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDrih_indexed_cdnPt : LDInst<(outs IntRegs:$dst),
>> +def LDrih_indexed_cdnPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3),
>> "if ($src1.new) $dst = memh($src2+#$src3)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDrih_indexed_cdnNotPt : LDInst<(outs IntRegs:$dst),
>> +def LDrih_indexed_cdnNotPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3),
>> "if (!$src1.new) $dst = memh($src2+#$src3)",
>> []>;
>> @@ -1232,13 +1232,13 @@ def LDriub_ae_indexed : LDInst<(outs
IntRegs:$dst),
>>
s11_0ImmPred:$offset)))]>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDriub_GP : LDInst<(outs IntRegs:$dst),
>> +def LDriub_GP : LDInst2<(outs IntRegs:$dst),
>> (ins globaladdress:$global, u16Imm:$offset),
>> "$dst=memub(#$global+$offset)",
>> []>;
>>
>> let isPredicable = 1, mayLoad = 1, hasCtrlDep = 1,
neverHasSideEffects = 1 in
>> -def POST_LDriub : LDInstPI<(outs IntRegs:$dst, IntRegs:$dst2),
>> +def POST_LDriub : LDInst2PI<(outs IntRegs:$dst, IntRegs:$dst2),
>> (ins IntRegs:$src1, s4Imm:$offset),
>> "$dst = memub($src1++#$offset)",
>> [],
>> @@ -1246,63 +1246,63 @@ def POST_LDriub : LDInstPI<(outs
IntRegs:$dst, IntRegs:$dst2),
>>
>> // Load unsigned byte conditionally.
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDriub_cPt : LDInst<(outs IntRegs:$dst),
>> +def LDriub_cPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, MEMri:$addr),
>> "if ($src1) $dst = memub($addr)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDriub_cNotPt : LDInst<(outs IntRegs:$dst),
>> +def LDriub_cNotPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, MEMri:$addr),
>> "if (!$src1) $dst = memub($addr)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDriub_indexed_cPt : LDInst<(outs IntRegs:$dst),
>> +def LDriub_indexed_cPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3),
>> "if ($src1) $dst = memub($src2+#$src3)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDriub_indexed_cNotPt : LDInst<(outs IntRegs:$dst),
>> +def LDriub_indexed_cNotPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3),
>> "if (!$src1) $dst = memub($src2+#$src3)",
>> []>;
>>
>> let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
>> -def POST_LDriub_cPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
>> +def POST_LDriub_cPt : LDInst2PI<(outs IntRegs:$dst1,
IntRegs:$dst2),
>> (ins PredRegs:$src1, IntRegs:$src2, s4_0Imm:$src3),
>> "if ($src1) $dst1 = memub($src2++#$src3)",
>> [],
>> "$src2 = $dst2">;
>>
>> let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
>> -def POST_LDriub_cNotPt : LDInstPI<(outs IntRegs:$dst1,
IntRegs:$dst2),
>> +def POST_LDriub_cNotPt : LDInst2PI<(outs IntRegs:$dst1,
IntRegs:$dst2),
>> (ins PredRegs:$src1, IntRegs:$src2, s4_0Imm:$src3),
>> "if (!$src1) $dst1 = memub($src2++#$src3)",
>> [],
>> "$src2 = $dst2">;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDriub_cdnPt : LDInst<(outs IntRegs:$dst),
>> +def LDriub_cdnPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, MEMri:$addr),
>> "if ($src1.new) $dst = memub($addr)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDriub_cdnNotPt : LDInst<(outs IntRegs:$dst),
>> +def LDriub_cdnNotPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, MEMri:$addr),
>> "if (!$src1.new) $dst = memub($addr)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDriub_indexed_cdnPt : LDInst<(outs IntRegs:$dst),
>> +def LDriub_indexed_cdnPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3),
>> "if ($src1.new) $dst = memub($src2+#$src3)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDriub_indexed_cdnNotPt : LDInst<(outs IntRegs:$dst),
>> +def LDriub_indexed_cdnNotPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3),
>> "if (!$src1.new) $dst = memub($src2+#$src3)",
>> []>;
>> @@ -1337,13 +1337,13 @@ def LDriuh_ae_indexed : LDInst<(outs
IntRegs:$dst),
>>
s11_1ImmPred:$offset)))]>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDriuh_GP : LDInst<(outs IntRegs:$dst),
>> +def LDriuh_GP : LDInst2<(outs IntRegs:$dst),
>> (ins globaladdress:$global, u16Imm:$offset),
>> "$dst=memuh(#$global+$offset)",
>> []>;
>>
>> let isPredicable = 1, mayLoad = 1, hasCtrlDep = 1,
neverHasSideEffects = 1 in
>> -def POST_LDriuh : LDInstPI<(outs IntRegs:$dst, IntRegs:$dst2),
>> +def POST_LDriuh : LDInst2PI<(outs IntRegs:$dst, IntRegs:$dst2),
>> (ins IntRegs:$src1, s4Imm:$offset),
>> "$dst = memuh($src1++#$offset)",
>> [],
>> @@ -1351,63 +1351,63 @@ def POST_LDriuh : LDInstPI<(outs
IntRegs:$dst, IntRegs:$dst2),
>>
>> // Load unsigned halfword conditionally.
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDriuh_cPt : LDInst<(outs IntRegs:$dst),
>> +def LDriuh_cPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, MEMri:$addr),
>> "if ($src1) $dst = memuh($addr)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDriuh_cNotPt : LDInst<(outs IntRegs:$dst),
>> +def LDriuh_cNotPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, MEMri:$addr),
>> "if (!$src1) $dst = memuh($addr)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDriuh_indexed_cPt : LDInst<(outs IntRegs:$dst),
>> +def LDriuh_indexed_cPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3),
>> "if ($src1) $dst = memuh($src2+#$src3)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDriuh_indexed_cNotPt : LDInst<(outs IntRegs:$dst),
>> +def LDriuh_indexed_cNotPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3),
>> "if (!$src1) $dst = memuh($src2+#$src3)",
>> []>;
>>
>> let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
>> -def POST_LDriuh_cPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
>> +def POST_LDriuh_cPt : LDInst2PI<(outs IntRegs:$dst1,
IntRegs:$dst2),
>> (ins PredRegs:$src1, IntRegs:$src2, s4_1Imm:$src3),
>> "if ($src1) $dst1 = memuh($src2++#$src3)",
>> [],
>> "$src2 = $dst2">;
>>
>> let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
>> -def POST_LDriuh_cNotPt : LDInstPI<(outs IntRegs:$dst1,
IntRegs:$dst2),
>> +def POST_LDriuh_cNotPt : LDInst2PI<(outs IntRegs:$dst1,
IntRegs:$dst2),
>> (ins PredRegs:$src1, IntRegs:$src2, s4_1Imm:$src3),
>> "if (!$src1) $dst1 = memuh($src2++#$src3)",
>> [],
>> "$src2 = $dst2">;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDriuh_cdnPt : LDInst<(outs IntRegs:$dst),
>> +def LDriuh_cdnPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, MEMri:$addr),
>> "if ($src1.new) $dst = memuh($addr)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDriuh_cdnNotPt : LDInst<(outs IntRegs:$dst),
>> +def LDriuh_cdnNotPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, MEMri:$addr),
>> "if (!$src1.new) $dst = memuh($addr)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDriuh_indexed_cdnPt : LDInst<(outs IntRegs:$dst),
>> +def LDriuh_indexed_cdnPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3),
>> "if ($src1.new) $dst = memuh($src2+#$src3)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDriuh_indexed_cdnNotPt : LDInst<(outs IntRegs:$dst),
>> +def LDriuh_indexed_cdnNotPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3),
>> "if (!$src1.new) $dst = memuh($src2+#$src3)",
>> []>;
>> @@ -1421,7 +1421,7 @@ def LDriw : LDInst<(outs IntRegs:$dst),
>>
>> // Load predicate.
>> let mayLoad = 1, Defs = [R10,R11] in
>> -def LDriw_pred : LDInst<(outs PredRegs:$dst),
>> +def LDriw_pred : LDInst2<(outs PredRegs:$dst),
>> (ins MEMri:$addr),
>> "Error; should not emit",
>> []>;
>> @@ -1435,19 +1435,19 @@ def LDriw_indexed : LDInst<(outs
IntRegs:$dst),
>>
s11_2ImmPred:$offset)))]>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDriw_GP : LDInst<(outs IntRegs:$dst),
>> +def LDriw_GP : LDInst2<(outs IntRegs:$dst),
>> (ins globaladdress:$global, u16Imm:$offset),
>> "$dst=memw(#$global+$offset)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDw_GP : LDInst<(outs IntRegs:$dst),
>> +def LDw_GP : LDInst2<(outs IntRegs:$dst),
>> (ins globaladdress:$global),
>> "$dst=memw(#$global)",
>> []>;
>>
>> let isPredicable = 1, mayLoad = 1, hasCtrlDep = 1,
neverHasSideEffects = 1 in
>> -def POST_LDriw : LDInstPI<(outs IntRegs:$dst, IntRegs:$dst2),
>> +def POST_LDriw : LDInst2PI<(outs IntRegs:$dst, IntRegs:$dst2),
>> (ins IntRegs:$src1, s4Imm:$offset),
>> "$dst = memw($src1++#$offset)",
>> [],
>> @@ -1456,70 +1456,70 @@ def POST_LDriw : LDInstPI<(outs
IntRegs:$dst, IntRegs:$dst2),
>> // Load word conditionally.
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDriw_cPt : LDInst<(outs IntRegs:$dst),
>> +def LDriw_cPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, MEMri:$addr),
>> "if ($src1) $dst = memw($addr)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDriw_cNotPt : LDInst<(outs IntRegs:$dst),
>> +def LDriw_cNotPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, MEMri:$addr),
>> "if (!$src1) $dst = memw($addr)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDriw_indexed_cPt : LDInst<(outs IntRegs:$dst),
>> +def LDriw_indexed_cPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3),
>> "if ($src1) $dst=memw($src2+#$src3)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDriw_indexed_cNotPt : LDInst<(outs IntRegs:$dst),
>> +def LDriw_indexed_cNotPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3),
>> "if (!$src1) $dst=memw($src2+#$src3)",
>> []>;
>>
>> let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
>> -def POST_LDriw_cPt : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2),
>> +def POST_LDriw_cPt : LDInst2PI<(outs IntRegs:$dst1, IntRegs:$dst2),
>> (ins PredRegs:$src1, IntRegs:$src2, s4_2Imm:$src3),
>> "if ($src1) $dst1 = memw($src2++#$src3)",
>> [],
>> "$src2 = $dst2">;
>>
>> let mayLoad = 1, hasCtrlDep = 1, neverHasSideEffects = 1 in
>> -def POST_LDriw_cNotPt : LDInstPI<(outs IntRegs:$dst1,
IntRegs:$dst2),
>> +def POST_LDriw_cNotPt : LDInst2PI<(outs IntRegs:$dst1,
IntRegs:$dst2),
>> (ins PredRegs:$src1, IntRegs:$src2, s4_2Imm:$src3),
>> "if (!$src1) $dst1 = memw($src2++#$src3)",
>> [],
>> "$src2 = $dst2">;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDriw_cdnPt : LDInst<(outs IntRegs:$dst),
>> +def LDriw_cdnPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, MEMri:$addr),
>> "if ($src1.new) $dst = memw($addr)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDriw_cdnNotPt : LDInst<(outs IntRegs:$dst),
>> +def LDriw_cdnNotPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, MEMri:$addr),
>> "if (!$src1.new) $dst = memw($addr)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDriw_indexed_cdnPt : LDInst<(outs IntRegs:$dst),
>> +def LDriw_indexed_cdnPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3),
>> "if ($src1.new) $dst=memw($src2+#$src3)",
>> []>;
>>
>> let mayLoad = 1, neverHasSideEffects = 1 in
>> -def LDriw_indexed_cdnNotPt : LDInst<(outs IntRegs:$dst),
>> +def LDriw_indexed_cdnNotPt : LDInst2<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3),
>> "if (!$src1.new) $dst=memw($src2+#$src3)",
>> []>;
>>
>> // Deallocate stack frame.
>> let Defs = [R29, R30, R31], Uses = [R29], neverHasSideEffects = 1 in
{
>> - def DEALLOCFRAME : LDInst<(outs), (ins i32imm:$amt1),
>> + def DEALLOCFRAME : LDInst2<(outs), (ins i32imm:$amt1),
>> "deallocframe",
>> []>;
>> }
>> @@ -1741,8 +1741,8 @@ def STrid_indexed : STInst<(outs),
>> [(store DoubleRegs:$src3,
>> (add IntRegs:$src1,
s11_3ImmPred:$src2))]>;
>>
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> -def STrid_GP : STInst<(outs),
>> +let neverHasSideEffects = 1 in
>> +def STrid_GP : STInst2<(outs),
>> (ins globaladdress:$global, u16Imm:$offset,
DoubleRegs:$src),
>> "memd(#$global+$offset) = $src",
>> []>;
>> @@ -1758,30 +1758,30 @@ def POST_STdri : STInstPI<(outs
IntRegs:$dst),
>> // Store doubleword conditionally.
>> // if ([!]Pv) memd(Rs+#u6:3)=Rtt
>> // if (Pv) memd(Rs+#u6:3)=Rtt
>> -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in
>> -def STrid_cPt : STInst<(outs),
>> +let AddedComplexity = 10, neverHasSideEffects = 1 in
>> +def STrid_cPt : STInst2<(outs),
>> (ins PredRegs:$src1, MEMri:$addr, DoubleRegs:$src2),
>> "if ($src1) memd($addr) = $src2",
>> []>;
>>
>> // if (!Pv) memd(Rs+#u6:3)=Rtt
>> -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in
>> -def STrid_cNotPt : STInst<(outs),
>> +let AddedComplexity = 10, neverHasSideEffects = 1 in
>> +def STrid_cNotPt : STInst2<(outs),
>> (ins PredRegs:$src1, MEMri:$addr, DoubleRegs:$src2),
>> "if (!$src1) memd($addr) = $src2",
>> []>;
>>
>> // if (Pv) memd(Rs+#u6:3)=Rtt
>> -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in
>> -def STrid_indexed_cPt : STInst<(outs),
>> +let AddedComplexity = 10, neverHasSideEffects = 1 in
>> +def STrid_indexed_cPt : STInst2<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3,
>> DoubleRegs:$src4),
>> "if ($src1) memd($src2+#$src3) = $src4",
>> []>;
>>
>> // if (!Pv) memd(Rs+#u6:3)=Rtt
>> -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in
>> -def STrid_indexed_cNotPt : STInst<(outs),
>> +let AddedComplexity = 10, neverHasSideEffects = 1 in
>> +def STrid_indexed_cNotPt : STInst2<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3,
>> DoubleRegs:$src4),
>> "if (!$src1) memd($src2+#$src3) = $src4",
>> @@ -1789,8 +1789,8 @@ def STrid_indexed_cNotPt : STInst<(outs),
>>
>> // if ([!]Pv) memd(Rx++#s4:3)=Rtt
>> // if (Pv) memd(Rx++#s4:3)=Rtt
>> -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in
>> -def POST_STdri_cPt : STInstPI<(outs IntRegs:$dst),
>> +let AddedComplexity = 10, neverHasSideEffects = 1 in
>> +def POST_STdri_cPt : STInst2PI<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, DoubleRegs:$src2, IntRegs:$src3,
>> s4_3Imm:$offset),
>> "if ($src1) memd($src3++#$offset) = $src2",
>> @@ -1798,9 +1798,9 @@ def POST_STdri_cPt : STInstPI<(outs
IntRegs:$dst),
>> "$src3 = $dst">;
>>
>> // if (!Pv) memd(Rx++#s4:3)=Rtt
>> -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1,
>> +let AddedComplexity = 10, neverHasSideEffects = 1,
>> isPredicated = 1 in
>> -def POST_STdri_cNotPt : STInstPI<(outs IntRegs:$dst),
>> +def POST_STdri_cNotPt : STInst2PI<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, DoubleRegs:$src2, IntRegs:$src3,
>> s4_3Imm:$offset),
>> "if (!$src1) memd($src3++#$offset) = $src2",
>> @@ -1824,14 +1824,14 @@ def STrib_indexed : STInst<(outs),
>>
s11_0ImmPred:$src2))]>;
>>
>> // memb(gp+#u16:0)=Rt
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> -def STrib_GP : STInst<(outs),
>> +let neverHasSideEffects = 1 in
>> +def STrib_GP : STInst2<(outs),
>> (ins globaladdress:$global, u16Imm:$offset,
IntRegs:$src),
>> "memb(#$global+$offset) = $src",
>> []>;
>>
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> -def STb_GP : STInst<(outs),
>> +let neverHasSideEffects = 1 in
>> +def STb_GP : STInst2<(outs),
>> (ins globaladdress:$global, IntRegs:$src),
>> "memb(#$global) = $src",
>> []>;
>> @@ -1850,44 +1850,44 @@ def POST_STbri : STInstPI<(outs
IntRegs:$dst), (ins IntRegs:$src1,
>> // Store byte conditionally.
>> // if ([!]Pv) memb(Rs+#u6:0)=Rt
>> // if (Pv) memb(Rs+#u6:0)=Rt
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> -def STrib_cPt : STInst<(outs),
>> +let neverHasSideEffects = 1 in
>> +def STrib_cPt : STInst2<(outs),
>> (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
>> "if ($src1) memb($addr) = $src2",
>> []>;
>>
>> // if (!Pv) memb(Rs+#u6:0)=Rt
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> -def STrib_cNotPt : STInst<(outs),
>> +let neverHasSideEffects = 1 in
>> +def STrib_cNotPt : STInst2<(outs),
>> (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
>> "if (!$src1) memb($addr) = $src2",
>> []>;
>>
>> // if (Pv) memb(Rs+#u6:0)=Rt
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> -def STrib_indexed_cPt : STInst<(outs),
>> +let neverHasSideEffects = 1 in
>> +def STrib_indexed_cPt : STInst2<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3,
IntRegs:$src4),
>> "if ($src1) memb($src2+#$src3) = $src4",
>> []>;
>>
>> // if (!Pv) memb(Rs+#u6:0)=Rt
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> -def STrib_indexed_cNotPt : STInst<(outs),
>> +let neverHasSideEffects = 1 in
>> +def STrib_indexed_cNotPt : STInst2<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3,
IntRegs:$src4),
>> "if (!$src1) memb($src2+#$src3) = $src4",
>> []>;
>>
>> // if ([!]Pv) memb(Rx++#s4:0)=Rt
>> // if (Pv) memb(Rx++#s4:0)=Rt
>> -let mayStore = 1, hasCtrlDep = 1, isPredicated = 1 in
>> -def POST_STbri_cPt : STInstPI<(outs IntRegs:$dst),
>> +let hasCtrlDep = 1, isPredicated = 1 in
>> +def POST_STbri_cPt : STInst2PI<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
s4_0Imm:$offset),
>> "if ($src1) memb($src3++#$offset) = $src2",
>> [],"$src3 = $dst">;
>>
>> // if (!Pv) memb(Rx++#s4:0)=Rt
>> -let mayStore = 1, hasCtrlDep = 1, isPredicated = 1 in
>> -def POST_STbri_cNotPt : STInstPI<(outs IntRegs:$dst),
>> +let hasCtrlDep = 1, isPredicated = 1 in
>> +def POST_STbri_cNotPt : STInst2PI<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
s4_0Imm:$offset),
>> "if (!$src1) memb($src3++#$offset) = $src2",
>> [],"$src3 = $dst">;
>> @@ -1909,14 +1909,14 @@ def STrih_indexed : STInst<(outs),
>> [(truncstorei16 IntRegs:$src3, (add IntRegs:$src1,
>>
s11_1ImmPred:$src2))]>;
>>
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> -def STrih_GP : STInst<(outs),
>> +let neverHasSideEffects = 1 in
>> +def STrih_GP : STInst2<(outs),
>> (ins globaladdress:$global, u16Imm:$offset,
IntRegs:$src),
>> "memh(#$global+$offset) = $src",
>> []>;
>>
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> -def STh_GP : STInst<(outs),
>> +let neverHasSideEffects = 1 in
>> +def STh_GP : STInst2<(outs),
>> (ins globaladdress:$global, IntRegs:$src),
>> "memh(#$global) = $src",
>> []>;
>> @@ -1935,44 +1935,44 @@ def POST_SThri : STInstPI<(outs
IntRegs:$dst),
>> // Store halfword conditionally.
>> // if ([!]Pv) memh(Rs+#u6:1)=Rt
>> // if (Pv) memh(Rs+#u6:1)=Rt
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> -def STrih_cPt : STInst<(outs),
>> +let neverHasSideEffects = 1 in
>> +def STrih_cPt : STInst2<(outs),
>> (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
>> "if ($src1) memh($addr) = $src2",
>> []>;
>>
>> // if (!Pv) memh(Rs+#u6:1)=Rt
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> -def STrih_cNotPt : STInst<(outs),
>> +let neverHasSideEffects = 1 in
>> +def STrih_cNotPt : STInst2<(outs),
>> (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
>> "if (!$src1) memh($addr) = $src2",
>> []>;
>>
>> // if (Pv) memh(Rs+#u6:1)=Rt
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> -def STrih_indexed_cPt : STInst<(outs),
>> +let neverHasSideEffects = 1 in
>> +def STrih_indexed_cPt : STInst2<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3,
IntRegs:$src4),
>> "if ($src1) memh($src2+#$src3) = $src4",
>> []>;
>>
>> // if (!Pv) memh(Rs+#u6:1)=Rt
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> -def STrih_indexed_cNotPt : STInst<(outs),
>> +let neverHasSideEffects = 1 in
>> +def STrih_indexed_cNotPt : STInst2<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3,
IntRegs:$src4),
>> "if (!$src1) memh($src2+#$src3) = $src4",
>> []>;
>>
>> // if ([!]Pv) memh(Rx++#s4:1)=Rt
>> // if (Pv) memh(Rx++#s4:1)=Rt
>> -let mayStore = 1, hasCtrlDep = 1, isPredicated = 1 in
>> -def POST_SThri_cPt : STInstPI<(outs IntRegs:$dst),
>> +let hasCtrlDep = 1, isPredicated = 1 in
>> +def POST_SThri_cPt : STInst2PI<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
s4_1Imm:$offset),
>> "if ($src1) memh($src3++#$offset) = $src2",
>> [],"$src3 = $dst">;
>>
>> // if (!Pv) memh(Rx++#s4:1)=Rt
>> -let mayStore = 1, hasCtrlDep = 1, isPredicated = 1 in
>> -def POST_SThri_cNotPt : STInstPI<(outs IntRegs:$dst),
>> +let hasCtrlDep = 1, isPredicated = 1 in
>> +def POST_SThri_cNotPt : STInst2PI<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
s4_1Imm:$offset),
>> "if (!$src1) memh($src3++#$offset) = $src2",
>> [],"$src3 = $dst">;
>> @@ -1981,7 +1981,7 @@ def POST_SThri_cNotPt : STInstPI<(outs
IntRegs:$dst),
>> // Store word.
>> // Store predicate.
>> let Defs = [R10,R11] in
>> -def STriw_pred : STInst<(outs),
>> +def STriw_pred : STInst2<(outs),
>> (ins MEMri:$addr, PredRegs:$src1),
>> "Error; should not emit",
>> []>;
>> @@ -1999,8 +1999,8 @@ def STriw_indexed : STInst<(outs),
>> "memw($src1+#$src2) = $src3",
>> [(store IntRegs:$src3, (add IntRegs:$src1,
s11_2ImmPred:$src2))]>;
>>
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> -def STriw_GP : STInst<(outs),
>> +let neverHasSideEffects = 1 in
>> +def STriw_GP : STInst2<(outs),
>> (ins globaladdress:$global, u16Imm:$offset,
IntRegs:$src),
>> "memw(#$global+$offset) = $src",
>> []>;
>> @@ -2016,44 +2016,44 @@ def POST_STwri : STInstPI<(outs
IntRegs:$dst),
>> // Store word conditionally.
>> // if ([!]Pv) memw(Rs+#u6:2)=Rt
>> // if (Pv) memw(Rs+#u6:2)=Rt
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> -def STriw_cPt : STInst<(outs),
>> +let neverHasSideEffects = 1 in
>> +def STriw_cPt : STInst2<(outs),
>> (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
>> "if ($src1) memw($addr) = $src2",
>> []>;
>>
>> // if (!Pv) memw(Rs+#u6:2)=Rt
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> -def STriw_cNotPt : STInst<(outs),
>> +let neverHasSideEffects = 1 in
>> +def STriw_cNotPt : STInst2<(outs),
>> (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
>> "if (!$src1) memw($addr) = $src2",
>> []>;
>>
>> // if (Pv) memw(Rs+#u6:2)=Rt
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> -def STriw_indexed_cPt : STInst<(outs),
>> +let neverHasSideEffects = 1 in
>> +def STriw_indexed_cPt : STInst2<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3,
IntRegs:$src4),
>> "if ($src1) memw($src2+#$src3) = $src4",
>> []>;
>>
>> // if (!Pv) memw(Rs+#u6:2)=Rt
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> -def STriw_indexed_cNotPt : STInst<(outs),
>> +let neverHasSideEffects = 1 in
>> +def STriw_indexed_cNotPt : STInst2<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3,
IntRegs:$src4),
>> "if (!$src1) memw($src2+#$src3) = $src4",
>> []>;
>>
>> // if ([!]Pv) memw(Rx++#s4:2)=Rt
>> // if (Pv) memw(Rx++#s4:2)=Rt
>> -let mayStore = 1, hasCtrlDep = 1, isPredicated = 1 in
>> -def POST_STwri_cPt : STInstPI<(outs IntRegs:$dst),
>> +let hasCtrlDep = 1, isPredicated = 1 in
>> +def POST_STwri_cPt : STInst2PI<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
s4_2Imm:$offset),
>> "if ($src1) memw($src3++#$offset) = $src2",
>> [],"$src3 = $dst">;
>>
>> // if (!Pv) memw(Rx++#s4:2)=Rt
>> -let mayStore = 1, hasCtrlDep = 1, isPredicated = 1 in
>> -def POST_STwri_cNotPt : STInstPI<(outs IntRegs:$dst),
>> +let hasCtrlDep = 1, isPredicated = 1 in
>> +def POST_STwri_cNotPt : STInst2PI<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
s4_2Imm:$offset),
>> "if (!$src1) memw($src3++#$offset) = $src2",
>> [],"$src3 = $dst">;
>> @@ -2062,7 +2062,7 @@ def POST_STwri_cNotPt : STInstPI<(outs
IntRegs:$dst),
>>
>> // Allocate stack frame.
>> let Defs = [R29, R30], Uses = [R31, R30], neverHasSideEffects = 1 in
{
>> - def ALLOCFRAME : STInst<(outs),
>> + def ALLOCFRAME : STInst2<(outs),
>> (ins i32imm:$amt),
>> "allocframe(#$amt)",
>> []>;
>> @@ -2232,7 +2232,7 @@ def HexagonBARRIER:
SDNode<"HexagonISD::BARRIER", SDHexagonBARRIER,
>> [SDNPHasChain]>;
>>
>> let hasSideEffects = 1 in
>> -def BARRIER : STInst<(outs), (ins),
>> +def BARRIER : STInst2<(outs), (ins),
>> "barrier",
>> [(HexagonBARRIER)]>;
>>
>> @@ -2324,35 +2324,35 @@ def CONST32 : LDInst<(outs IntRegs:$dst),
(ins globaladdress:$global),
>> (load (HexagonCONST32 tglobaltlsaddr:$global)))]>;
>>
>> let isReMaterializable = 1, isMoveImm = 1 in
>> -def CONST32_set : LDInst<(outs IntRegs:$dst), (ins
globaladdress:$global),
>> +def CONST32_set : LDInst2<(outs IntRegs:$dst), (ins
globaladdress:$global),
>> "$dst = CONST32(#$global)",
>> [(set IntRegs:$dst,
>> (HexagonCONST32 tglobaladdr:$global))]>;
>>
>> let isReMaterializable = 1, isMoveImm = 1 in
>> -def CONST32_set_jt : LDInst<(outs IntRegs:$dst), (ins
jumptablebase:$jt),
>> +def CONST32_set_jt : LDInst2<(outs IntRegs:$dst), (ins
jumptablebase:$jt),
>> "$dst = CONST32(#$jt)",
>> [(set IntRegs:$dst,
>> (HexagonCONST32 tjumptable:$jt))]>;
>>
>> let isReMaterializable = 1, isMoveImm = 1 in
>> -def CONST32GP_set : LDInst<(outs IntRegs:$dst), (ins
globaladdress:$global),
>> +def CONST32GP_set : LDInst2<(outs IntRegs:$dst), (ins
globaladdress:$global),
>> "$dst = CONST32(#$global)",
>> [(set IntRegs:$dst,
>> (HexagonCONST32_GP tglobaladdr:$global))]>;
>>
>> let isReMaterializable = 1, isMoveImm = 1 in
>> -def CONST32_Int_Real : LDInst<(outs IntRegs:$dst), (ins
i32imm:$global),
>> +def CONST32_Int_Real : LDInst2<(outs IntRegs:$dst), (ins
i32imm:$global),
>> "$dst = CONST32(#$global)",
>> [(set IntRegs:$dst, imm:$global) ]>;
>>
>> let isReMaterializable = 1, isMoveImm = 1 in
>> -def CONST32_Label : LDInst<(outs IntRegs:$dst), (ins
bblabel:$label),
>> +def CONST32_Label : LDInst2<(outs IntRegs:$dst), (ins
bblabel:$label),
>> "$dst = CONST32($label)",
>> [(set IntRegs:$dst, (HexagonCONST32
bbl:$label))]>;
>>
>> let isReMaterializable = 1, isMoveImm = 1 in
>> -def CONST64_Int_Real : LDInst<(outs DoubleRegs:$dst), (ins
i64imm:$global),
>> +def CONST64_Int_Real : LDInst2<(outs DoubleRegs:$dst), (ins
i64imm:$global),
>> "$dst = CONST64(#$global)",
>> [(set DoubleRegs:$dst, imm:$global) ]>;
>>
>> @@ -3046,3 +3046,7 @@ include "HexagonInstrInfoV3.td"
>>
//===----------------------------------------------------------------------===//
>>
>> include "HexagonInstrInfoV4.td"
>> +
>>
+//===----------------------------------------------------------------------===//
>> +// V4 Instructions -
>>
+//===----------------------------------------------------------------------===//
>> diff --git a/lib/Target/Hexagon/HexagonInstrInfoV3.td
b/lib/Target/Hexagon/HexagonInstrInfoV3.td
>> index a73897e..2bd6770 100644
>> --- a/lib/Target/Hexagon/HexagonInstrInfoV3.td
>> +++ b/lib/Target/Hexagon/HexagonInstrInfoV3.td
>> @@ -41,10 +41,11 @@ let isCall = 1, neverHasSideEffects = 1,
>> }
>>
>>
>> +// Jump to address from register
>> // if(p?.new) jumpr:t r?
>> let isReturn = 1, isTerminator = 1, isBarrier = 1,
>> Defs = [PC], Uses = [R31] in {
>> - def JMPR_cPnewt: JRInst<(outs), (ins PredRegs:$src1,
IntRegs:$src2),
>> + def JMPR_cdnPt_V3: JRInst<(outs), (ins PredRegs:$src1,
IntRegs:$src2),
>> "if ($src1.new) jumpr:t $src2",
>> []>, Requires<[HasV3T]>;
>> }
>> @@ -52,7 +53,7 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1,
>> // if (!p?.new) jumpr:t r?
>> let isReturn = 1, isTerminator = 1, isBarrier = 1,
>> Defs = [PC], Uses = [R31] in {
>> - def JMPR_cNotPnewt: JRInst<(outs), (ins PredRegs:$src1,
IntRegs:$src2),
>> + def JMPR_cdnNotPt_V3: JRInst<(outs), (ins PredRegs:$src1,
IntRegs:$src2),
>> "if (!$src1.new) jumpr:t $src2",
>> []>, Requires<[HasV3T]>;
>> }
>> @@ -61,7 +62,7 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1,
>> // if(p?.new) jumpr:nt r?
>> let isReturn = 1, isTerminator = 1, isBarrier = 1,
>> Defs = [PC], Uses = [R31] in {
>> - def JMPR_cPnewNt: JRInst<(outs), (ins PredRegs:$src1,
IntRegs:$src2),
>> + def JMPR_cdnPnt: JRInst<(outs), (ins PredRegs:$src1,
IntRegs:$src2),
>> "if ($src1.new) jumpr:nt $src2",
>> []>, Requires<[HasV3T]>;
>> }
>> @@ -69,7 +70,7 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1,
>> // if (!p?.new) jumpr:nt r?
>> let isReturn = 1, isTerminator = 1, isBarrier = 1,
>> Defs = [PC], Uses = [R31] in {
>> - def JMPR_cNotPnewNt: JRInst<(outs), (ins PredRegs:$src1,
IntRegs:$src2),
>> + def JMPR_cdnNotPnt: JRInst<(outs), (ins PredRegs:$src1,
IntRegs:$src2),
>> "if (!$src1.new) jumpr:nt $src2",
>> []>, Requires<[HasV3T]>;
>> }
>> @@ -86,20 +87,22 @@ let AddedComplexity = 200 in
>> def MAXw_dd : ALU64_rr<(outs DoubleRegs:$dst), (ins
DoubleRegs:$src1,
>>
DoubleRegs:$src2),
>> "$dst = max($src2, $src1)",
>> - [(set DoubleRegs:$dst, (select (i1 (setlt
DoubleRegs:$src2,
>> -
DoubleRegs:$src1)),
>> - DoubleRegs:$src1,
>> - DoubleRegs:$src2))]>,
>> + [(set (i64 DoubleRegs:$dst),
>> + (i64 (select (i1 (setlt (i64 DoubleRegs:$src2),
>> + (i64 DoubleRegs:$src1))),
>> + (i64 DoubleRegs:$src1),
>> + (i64 DoubleRegs:$src2))))]>,
>> Requires<[HasV3T]>;
>>
>> let AddedComplexity = 200 in
>> def MINw_dd : ALU64_rr<(outs DoubleRegs:$dst), (ins
DoubleRegs:$src1,
>>
DoubleRegs:$src2),
>> "$dst = min($src2, $src1)",
>> - [(set DoubleRegs:$dst, (select (i1 (setgt
DoubleRegs:$src2,
>> -
DoubleRegs:$src1)),
>> - DoubleRegs:$src1,
>> - DoubleRegs:$src2))]>,
>> + [(set (i64 DoubleRegs:$dst),
>> + (i64 (select (i1 (setgt (i64 DoubleRegs:$src2),
>> + (i64 DoubleRegs:$src1))),
>> + (i64 DoubleRegs:$src1),
>> + (i64 DoubleRegs:$src2))))]>,
>> Requires<[HasV3T]>;
>>
>>
//===----------------------------------------------------------------------===//
>> @@ -109,25 +112,25 @@ Requires<[HasV3T]>;
>>
>>
>>
>> -//def : Pat<(brcond (i1 (seteq IntRegs:$src1, 0)), bb:$offset),
>> -// (JMP_RegEzt IntRegs:$src1, bb:$offset)>,
Requires<[HasV3T]>;
>> +//def : Pat<(brcond (i1 (seteq (i32 IntRegs:$src1), 0)),
bb:$offset),
>> +// (JMP_RegEzt (i32 IntRegs:$src1), bb:$offset)>,
Requires<[HasV3T]>;
>>
>> -//def : Pat<(brcond (i1 (setne IntRegs:$src1, 0)), bb:$offset),
>> -// (JMP_RegNzt IntRegs:$src1, bb:$offset)>,
Requires<[HasV3T]>;
>> +//def : Pat<(brcond (i1 (setne (i32 IntRegs:$src1), 0)),
bb:$offset),
>> +// (JMP_RegNzt (i32 IntRegs:$src1), bb:$offset)>,
Requires<[HasV3T]>;
>>
>> -//def : Pat<(brcond (i1 (setle IntRegs:$src1, 0)), bb:$offset),
>> -// (JMP_RegLezt IntRegs:$src1, bb:$offset)>,
Requires<[HasV3T]>;
>> +//def : Pat<(brcond (i1 (setle (i32 IntRegs:$src1), 0)),
bb:$offset),
>> +// (JMP_RegLezt (i32 IntRegs:$src1), bb:$offset)>,
Requires<[HasV3T]>;
>>
>> -//def : Pat<(brcond (i1 (setge IntRegs:$src1, 0)), bb:$offset),
>> -// (JMP_RegGezt IntRegs:$src1, bb:$offset)>,
Requires<[HasV3T]>;
>> +//def : Pat<(brcond (i1 (setge (i32 IntRegs:$src1), 0)),
bb:$offset),
>> +// (JMP_RegGezt (i32 IntRegs:$src1), bb:$offset)>,
Requires<[HasV3T]>;
>>
>> -//def : Pat<(brcond (i1 (setgt IntRegs:$src1, -1)), bb:$offset),
>> -// (JMP_RegGezt IntRegs:$src1, bb:$offset)>,
Requires<[HasV3T]>;
>> +//def : Pat<(brcond (i1 (setgt (i32 IntRegs:$src1), -1)),
bb:$offset),
>> +// (JMP_RegGezt (i32 IntRegs:$src1), bb:$offset)>,
Requires<[HasV3T]>;
>>
>>
>> // Map call instruction
>> -def : Pat<(call IntRegs:$dst),
>> - (CALLRv3 IntRegs:$dst)>, Requires<[HasV3T]>;
>> +def : Pat<(call (i32 IntRegs:$dst)),
>> + (CALLRv3 (i32 IntRegs:$dst))>, Requires<[HasV3T]>;
>> def : Pat<(call tglobaladdr:$dst),
>> (CALLv3 tglobaladdr:$dst)>, Requires<[HasV3T]>;
>> def : Pat<(call texternalsym:$dst),
>> diff --git a/lib/Target/Hexagon/HexagonInstrInfoV4.td
b/lib/Target/Hexagon/HexagonInstrInfoV4.td
>> index 9e60cf2..f507e4f 100644
>> --- a/lib/Target/Hexagon/HexagonInstrInfoV4.td
>> +++ b/lib/Target/Hexagon/HexagonInstrInfoV4.td
>> @@ -11,6 +11,11 @@
>> //
>>
//===----------------------------------------------------------------------===//
>>
>> +def IMMEXT : Immext<(outs), (ins),
>> + "##immext //should never emit this",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> // Hexagon V4 Architecture spec defines 8 instruction classes:
>> // LD ST ALU32 XTYPE J JR MEMOP NV CR SYSTEM(system is not
implemented in the
>> // compiler)
>> @@ -250,23 +255,151 @@ def ZXTH_cdnNotPt_V4 : ALU32_rr<(outs
IntRegs:$dst),
>> []>,
>> Requires<[HasV4T]>;
>>
>> +// Generate frame index addresses.
>> +let neverHasSideEffects = 1, isReMaterializable = 1 in
>> +def TFR_FI_immext_V4 : ALU32_ri<(outs IntRegs:$dst),
>> + (ins IntRegs:$src1, s32Imm:$offset),
>> + "$dst = add($src1, ##$offset)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>>
>>
//===----------------------------------------------------------------------===//
>> // ALU32 -
>>
//===----------------------------------------------------------------------===//
>>
>>
>>
+//===----------------------------------------------------------------------===//
>> +// ALU32/PERM +
>>
+//===----------------------------------------------------------------------===//
>> +
>> +// Combine
>> +// Rdd=combine(Rs, #s8)
>> +let neverHasSideEffects = 1 in
>> +def COMBINE_ri_V4 : ALU32_ri<(outs DoubleRegs:$dst),
>> + (ins IntRegs:$src1, s8Imm:$src2),
>> + "$dst = combine($src1, #$src2)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +// Rdd=combine(#s8, Rs)
>> +let neverHasSideEffects = 1 in
>> +def COMBINE_ir_V4 : ALU32_ir<(outs DoubleRegs:$dst),
>> + (ins s8Imm:$src1, IntRegs:$src2),
>> + "$dst = combine(#$src1, $src2)",
>> + []>,
>> + Requires<[HasV4T]>;
>>
+//===----------------------------------------------------------------------===//
>> +// ALU32/PERM +
>>
+//===----------------------------------------------------------------------===//
>>
>>
//===----------------------------------------------------------------------===//
>> // LD +
>>
//===----------------------------------------------------------------------===//
>> -///
>> -/// Make sure that in post increment load, the first operand is always
the post
>> -/// increment operand.
>> -///
>> -//// Load doubleword.
>> -// Rdd=memd(Re=#U6)
>> +//
>> +// These absolute set addressing mode instructions accept immediate as
>> +// an operand. We have duplicated these patterns to take global
address.
>> +
>> +let neverHasSideEffects = 1 in
>> +def LDrid_abs_setimm_V4 : LDInst<(outs DoubleRegs:$dst1,
IntRegs:$dst2),
>> + (ins u6Imm:$addr),
>> + "$dst1 = memd($dst2=#$addr)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// Rd=memb(Re=#U6)
>> +let neverHasSideEffects = 1 in
>> +def LDrib_abs_setimm_V4 : LDInst<(outs IntRegs:$dst1,
IntRegs:$dst2),
>> + (ins u6Imm:$addr),
>> + "$dst1 = memb($dst2=#$addr)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// Rd=memh(Re=#U6)
>> +let neverHasSideEffects = 1 in
>> +def LDrih_abs_setimm_V4 : LDInst<(outs IntRegs:$dst1,
IntRegs:$dst2),
>> + (ins u6Imm:$addr),
>> + "$dst1 = memh($dst2=#$addr)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// Rd=memub(Re=#U6)
>> +let neverHasSideEffects = 1 in
>> +def LDriub_abs_setimm_V4 : LDInst<(outs IntRegs:$dst1,
IntRegs:$dst2),
>> + (ins u6Imm:$addr),
>> + "$dst1 = memub($dst2=#$addr)",
>> + []>,
>> + Requires<[HasV4T]>;
>>
>> +// Rd=memuh(Re=#U6)
>> +let neverHasSideEffects = 1 in
>> +def LDriuh_abs_setimm_V4 : LDInst<(outs IntRegs:$dst1,
IntRegs:$dst2),
>> + (ins u6Imm:$addr),
>> + "$dst1 = memuh($dst2=#$addr)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// Rd=memw(Re=#U6)
>> +let neverHasSideEffects = 1 in
>> +def LDriw_abs_setimm_V4 : LDInst<(outs IntRegs:$dst1,
IntRegs:$dst2),
>> + (ins u6Imm:$addr),
>> + "$dst1 = memw($dst2=#$addr)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// Following patterns are defined for absolute set addressing mode
>> +// instruction which take global address as operand.
>> +let mayLoad = 1, neverHasSideEffects = 1 in
>> +def LDrid_abs_set_V4 : LDInst<(outs DoubleRegs:$dst1,
IntRegs:$dst2),
>> + (ins globaladdress:$addr),
>> + "$dst1 = memd($dst2=##$addr)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// Rd=memb(Re=#U6)
>> +let mayLoad = 1, neverHasSideEffects = 1 in
>> +def LDrib_abs_set_V4 : LDInst<(outs IntRegs:$dst1, IntRegs:$dst2),
>> + (ins globaladdress:$addr),
>> + "$dst1 = memb($dst2=##$addr)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// Rd=memh(Re=#U6)
>> +let mayLoad = 1, neverHasSideEffects = 1 in
>> +def LDrih_abs_set_V4 : LDInst<(outs IntRegs:$dst1, IntRegs:$dst2),
>> + (ins globaladdress:$addr),
>> + "$dst1 = memh($dst2=##$addr)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// Rd=memub(Re=#U6)
>> +let mayLoad = 1, neverHasSideEffects = 1 in
>> +def LDriub_abs_set_V4 : LDInst<(outs IntRegs:$dst1, IntRegs:$dst2),
>> + (ins globaladdress:$addr),
>> + "$dst1 = memub($dst2=##$addr)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// Rd=memuh(Re=#U6)
>> +let mayLoad = 1, neverHasSideEffects = 1 in
>> +def LDriuh_abs_set_V4 : LDInst<(outs IntRegs:$dst1, IntRegs:$dst2),
>> + (ins globaladdress:$addr),
>> + "$dst1 = memuh($dst2=##$addr)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// Rd=memw(Re=#U6)
>> +let mayLoad = 1, neverHasSideEffects = 1 in
>> +def LDriw_abs_set_V4 : LDInst<(outs IntRegs:$dst1, IntRegs:$dst2),
>> + (ins globaladdress:$addr),
>> + "$dst1 = memw($dst2=##$addr)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// Load doubleword.
>> +//
>> +// Make sure that in post increment load, the first operand is always
the post
>> +// increment operand.
>> +//
>> // Rdd=memd(Rs+Rt<<#u2)
>> // Special case pattern for indexed load without offset which is
easier to
>> // match. AddedComplexity of this pattern should be lower than
base+offset load
>> @@ -276,17 +409,19 @@ let AddedComplexity = 10, isPredicable = 1 in
>> def LDrid_indexed_V4 : LDInst<(outs DoubleRegs:$dst),
>> (ins IntRegs:$src1, IntRegs:$src2),
>> "$dst=memd($src1+$src2<<#0)",
>> - [(set DoubleRegs:$dst, (load (add IntRegs:$src1,
>> -
IntRegs:$src2)))]>,
>> + [(set (i64 DoubleRegs:$dst),
>> + (i64 (load (add (i32 IntRegs:$src1),
>> + (i32
IntRegs:$src2)))))]>,
>> Requires<[HasV4T]>;
>>
>> let AddedComplexity = 40, isPredicable = 1 in
>> def LDrid_indexed_shl_V4 : LDInst<(outs DoubleRegs:$dst),
>> (ins IntRegs:$src1, IntRegs:$src2,
u2Imm:$offset),
>>
"$dst=memd($src1+$src2<<#$offset)",
>> - [(set DoubleRegs:$dst, (load (add IntRegs:$src1,
>> - (shl IntRegs:$src2,
>> -
u2ImmPred:$offset))))]>,
>> + [(set (i64 DoubleRegs:$dst),
>> + (i64 (load (add (i32 IntRegs:$src1),
>> + (shl (i32 IntRegs:$src2),
>> +
u2ImmPred:$offset)))))]>,
>> Requires<[HasV4T]>;
>>
>> //// Load doubleword conditionally.
>> @@ -362,60 +497,62 @@ def LDrid_indexed_shl_cdnNotPt_V4 :
LDInst<(outs DoubleRegs:$dst),
>> // Rdd=memd(Rt<<#u2+#U6)
>>
>> //// Load byte.
>> -// Rd=memb(Re=#U6)
>> -
>> // Rd=memb(Rs+Rt<<#u2)
>> let AddedComplexity = 10, isPredicable = 1 in
>> def LDrib_indexed_V4 : LDInst<(outs IntRegs:$dst),
>> (ins IntRegs:$src1, IntRegs:$src2),
>> "$dst=memb($src1+$src2<<#0)",
>> - [(set IntRegs:$dst, (sextloadi8 (add
IntRegs:$src1,
>> -
IntRegs:$src2)))]>,
>> + [(set (i32 IntRegs:$dst),
>> + (i32 (sextloadi8 (add (i32 IntRegs:$src1),
>> + (i32
IntRegs:$src2)))))]>,
>> Requires<[HasV4T]>;
>>
>> let AddedComplexity = 10, isPredicable = 1 in
>> def LDriub_indexed_V4 : LDInst<(outs IntRegs:$dst),
>> (ins IntRegs:$src1, IntRegs:$src2),
>> "$dst=memub($src1+$src2<<#0)",
>> - [(set IntRegs:$dst, (zextloadi8 (add
IntRegs:$src1,
>> -
IntRegs:$src2)))]>,
>> + [(set (i32 IntRegs:$dst),
>> + (i32 (zextloadi8 (add (i32 IntRegs:$src1),
>> + (i32
IntRegs:$src2)))))]>,
>> Requires<[HasV4T]>;
>>
>> let AddedComplexity = 10, isPredicable = 1 in
>> def LDriub_ae_indexed_V4 : LDInst<(outs IntRegs:$dst),
>> (ins IntRegs:$src1, IntRegs:$src2),
>> "$dst=memub($src1+$src2<<#0)",
>> - [(set IntRegs:$dst, (extloadi8 (add IntRegs:$src1,
>> -
IntRegs:$src2)))]>,
>> + [(set (i32 IntRegs:$dst),
>> + (i32 (extloadi8 (add (i32 IntRegs:$src1),
>> + (i32
IntRegs:$src2)))))]>,
>> Requires<[HasV4T]>;
>>
>> let AddedComplexity = 40, isPredicable = 1 in
>> def LDrib_indexed_shl_V4 : LDInst<(outs IntRegs:$dst),
>> (ins IntRegs:$src1, IntRegs:$src2,
u2Imm:$offset),
>>
"$dst=memb($src1+$src2<<#$offset)",
>> - [(set IntRegs:$dst,
>> - (sextloadi8 (add IntRegs:$src1,
>> - (shl IntRegs:$src2,
>> -
u2ImmPred:$offset))))]>,
>> + [(set (i32 IntRegs:$dst),
>> + (i32 (sextloadi8 (add (i32 IntRegs:$src1),
>> + (shl (i32
IntRegs:$src2),
>> +
u2ImmPred:$offset)))))]>,
>> Requires<[HasV4T]>;
>>
>> let AddedComplexity = 40, isPredicable = 1 in
>> def LDriub_indexed_shl_V4 : LDInst<(outs IntRegs:$dst),
>> (ins IntRegs:$src1, IntRegs:$src2,
u2Imm:$offset),
>>
"$dst=memub($src1+$src2<<#$offset)",
>> - [(set IntRegs:$dst,
>> - (zextloadi8 (add IntRegs:$src1,
>> - (shl IntRegs:$src2,
>> -
u2ImmPred:$offset))))]>,
>> + [(set (i32 IntRegs:$dst),
>> + (i32 (zextloadi8 (add (i32 IntRegs:$src1),
>> + (shl (i32
IntRegs:$src2),
>> +
u2ImmPred:$offset)))))]>,
>> Requires<[HasV4T]>;
>>
>> let AddedComplexity = 40, isPredicable = 1 in
>> def LDriub_ae_indexed_shl_V4 : LDInst<(outs IntRegs:$dst),
>> (ins IntRegs:$src1, IntRegs:$src2,
u2Imm:$offset),
>>
"$dst=memub($src1+$src2<<#$offset)",
>> - [(set IntRegs:$dst, (extloadi8 (add IntRegs:$src1,
>> - (shl IntRegs:$src2,
>> -
u2ImmPred:$offset))))]>,
>> + [(set (i32 IntRegs:$dst),
>> + (i32 (extloadi8 (add (i32 IntRegs:$src1),
>> + (shl (i32
IntRegs:$src2),
>> +
u2ImmPred:$offset)))))]>,
>> Requires<[HasV4T]>;
>>
>> //// Load byte conditionally.
>> @@ -561,31 +698,32 @@ def LDriub_indexed_shl_cdnNotPt_V4 :
LDInst<(outs IntRegs:$dst),
>> // Rd=memb(Rt<<#u2+#U6)
>>
>> //// Load halfword
>> -// Rd=memh(Re=#U6)
>> -
>> // Rd=memh(Rs+Rt<<#u2)
>> let AddedComplexity = 10, isPredicable = 1 in
>> def LDrih_indexed_V4 : LDInst<(outs IntRegs:$dst),
>> (ins IntRegs:$src1, IntRegs:$src2),
>> "$dst=memh($src1+$src2<<#0)",
>> - [(set IntRegs:$dst, (sextloadi16 (add
IntRegs:$src1,
>> -
IntRegs:$src2)))]>,
>> + [(set (i32 IntRegs:$dst),
>> + (i32 (sextloadi16 (add (i32 IntRegs:$src1),
>> + (i32
IntRegs:$src2)))))]>,
>> Requires<[HasV4T]>;
>>
>> let AddedComplexity = 10, isPredicable = 1 in
>> def LDriuh_indexed_V4 : LDInst<(outs IntRegs:$dst),
>> (ins IntRegs:$src1, IntRegs:$src2),
>> "$dst=memuh($src1+$src2<<#0)",
>> - [(set IntRegs:$dst, (zextloadi16 (add
IntRegs:$src1,
>> -
IntRegs:$src2)))]>,
>> + [(set (i32 IntRegs:$dst),
>> + (i32 (zextloadi16 (add (i32 IntRegs:$src1),
>> + (i32
IntRegs:$src2)))))]>,
>> Requires<[HasV4T]>;
>>
>> let AddedComplexity = 10, isPredicable = 1 in
>> def LDriuh_ae_indexed_V4 : LDInst<(outs IntRegs:$dst),
>> (ins IntRegs:$src1, IntRegs:$src2),
>> "$dst=memuh($src1+$src2<<#0)",
>> - [(set IntRegs:$dst, (extloadi16 (add
IntRegs:$src1,
>> -
IntRegs:$src2)))]>,
>> + [(set (i32 IntRegs:$dst),
>> + (i32 (extloadi16 (add (i32 IntRegs:$src1),
>> + (i32
IntRegs:$src2)))))]>,
>> Requires<[HasV4T]>;
>>
>> // Rd=memh(Rs+Rt<<#u2)
>> @@ -593,30 +731,30 @@ let AddedComplexity = 40, isPredicable = 1 in
>> def LDrih_indexed_shl_V4 : LDInst<(outs IntRegs:$dst),
>> (ins IntRegs:$src1, IntRegs:$src2,
u2Imm:$offset),
>>
"$dst=memh($src1+$src2<<#$offset)",
>> - [(set IntRegs:$dst,
>> - (sextloadi16 (add IntRegs:$src1,
>> - (shl IntRegs:$src2,
>> -
u2ImmPred:$offset))))]>,
>> + [(set (i32 IntRegs:$dst),
>> + (i32 (sextloadi16 (add (i32 IntRegs:$src1),
>> + (shl (i32
IntRegs:$src2),
>> +
u2ImmPred:$offset)))))]>,
>> Requires<[HasV4T]>;
>>
>> let AddedComplexity = 40, isPredicable = 1 in
>> def LDriuh_indexed_shl_V4 : LDInst<(outs IntRegs:$dst),
>> (ins IntRegs:$src1, IntRegs:$src2,
u2Imm:$offset),
>>
"$dst=memuh($src1+$src2<<#$offset)",
>> - [(set IntRegs:$dst,
>> - (zextloadi16 (add IntRegs:$src1,
>> - (shl IntRegs:$src2,
>> -
u2ImmPred:$offset))))]>,
>> + [(set (i32 IntRegs:$dst),
>> + (i32 (zextloadi16 (add (i32 IntRegs:$src1),
>> + (shl (i32
IntRegs:$src2),
>> +
u2ImmPred:$offset)))))]>,
>> Requires<[HasV4T]>;
>>
>> let AddedComplexity = 40, isPredicable = 1 in
>> def LDriuh_ae_indexed_shl_V4 : LDInst<(outs IntRegs:$dst),
>> (ins IntRegs:$src1, IntRegs:$src2,
u2Imm:$offset),
>>
"$dst=memuh($src1+$src2<<#$offset)",
>> - [(set IntRegs:$dst,
>> - (extloadi16 (add IntRegs:$src1,
>> - (shl IntRegs:$src2,
>> -
u2ImmPred:$offset))))]>,
>> + [(set (i32 IntRegs:$dst),
>> + (i32 (extloadi16 (add (i32 IntRegs:$src1),
>> + (shl (i32
IntRegs:$src2),
>> +
u2ImmPred:$offset)))))]>,
>> Requires<[HasV4T]>;
>>
>> //// Load halfword conditionally.
>> @@ -762,6 +900,14 @@ def LDriuh_indexed_shl_cdnNotPt_V4 :
LDInst<(outs IntRegs:$dst),
>> // Rd=memh(Rt<<#u2+#U6)
>>
>> //// Load word.
>> +// Load predicate: Fix for bug 5279.
>> +let mayLoad = 1, neverHasSideEffects = 1 in
>> +def LDriw_pred_V4 : LDInst<(outs PredRegs:$dst),
>> + (ins MEMri:$addr),
>> + "Error; should not emit",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> // Rd=memw(Re=#U6)
>>
>> // Rd=memw(Rs+Rt<<#u2)
>> @@ -769,8 +915,9 @@ let AddedComplexity = 10, isPredicable = 1 in
>> def LDriw_indexed_V4 : LDInst<(outs IntRegs:$dst),
>> (ins IntRegs:$src1, IntRegs:$src2),
>> "$dst=memw($src1+$src2<<#0)",
>> - [(set IntRegs:$dst, (load (add IntRegs:$src1,
>> -
IntRegs:$src2)))]>,
>> + [(set (i32 IntRegs:$dst),
>> + (i32 (load (add (i32 IntRegs:$src1),
>> + (i32
IntRegs:$src2)))))]>,
>> Requires<[HasV4T]>;
>>
>> // Rd=memw(Rs+Rt<<#u2)
>> @@ -778,9 +925,10 @@ let AddedComplexity = 40, isPredicable = 1 in
>> def LDriw_indexed_shl_V4 : LDInst<(outs IntRegs:$dst),
>> (ins IntRegs:$src1, IntRegs:$src2,
u2Imm:$offset),
>>
"$dst=memw($src1+$src2<<#$offset)",
>> - [(set IntRegs:$dst, (load (add IntRegs:$src1,
>> - (shl IntRegs:$src2,
>> -
u2ImmPred:$offset))))]>,
>> + [(set (i32 IntRegs:$dst),
>> + (i32 (load (add (i32 IntRegs:$src1),
>> + (shl (i32 IntRegs:$src2),
>> +
u2ImmPred:$offset)))))]>,
>> Requires<[HasV4T]>;
>>
>> //// Load word conditionally.
>> @@ -955,261 +1103,956 @@ def POST_LDriw_cdnNotPt_V4 : LDInstPI<(outs
IntRegs:$dst1, IntRegs:$dst2),
>> "$src2 = $dst2">,
>> Requires<[HasV4T]>;
>>
>> +/// Load from global offset
>>
>>
-//===----------------------------------------------------------------------===//
>> -// LD -
>>
-//===----------------------------------------------------------------------===//
>> -
>>
-//===----------------------------------------------------------------------===//
>> -// ST +
>>
-//===----------------------------------------------------------------------===//
>> -///
>> -/// Assumptions::: ****** DO NOT IGNORE ********
>> -/// 1. Make sure that in post increment store, the zero'th operand
is always the
>> -/// post increment operand.
>> -/// 2. Make sure that the store value operand(Rt/Rtt) in a store is
always the
>> -/// last operand.
>> -///
>> +let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in
>> +def LDrid_GP_V4 : LDInst<(outs DoubleRegs:$dst),
>> + (ins globaladdress:$global, u16Imm:$offset),
>> + "$dst=memd(#$global+$offset)",
>> + []>,
>> + Requires<[HasV4T]>;
>>
>> -// Store doubleword.
>> -// memd(Re=#U6)=Rtt
>> -// TODO: needs to be implemented
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDrid_GP_cPt_V4 : LDInst<(outs DoubleRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset),
>> + "if ($src1) $dst=memd(##$global+$offset)",
>> + []>,
>> + Requires<[HasV4T]>;
>>
>> -// memd(Rs+#s11:3)=Rtt
>> -// memd(Rs+Ru<<#u2)=Rtt
>> -let AddedComplexity = 10, isPredicable = 1 in
>> -def STrid_indexed_shl_V4 : STInst<(outs),
>> - (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$src3,
DoubleRegs:$src4),
>> - "memd($src1+$src2<<#$src3) = $src4",
>> - [(store DoubleRegs:$src4, (add IntRegs:$src1,
>> - (shl IntRegs:$src2,
u2ImmPred:$src3)))]>,
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDrid_GP_cNotPt_V4 : LDInst<(outs DoubleRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset),
>> + "if (!$src1) $dst=memd(##$global+$offset)",
>> + []>,
>> Requires<[HasV4T]>;
>>
>> -// memd(Ru<<#u2+#U6)=Rtt
>> -let AddedComplexity = 10 in
>> -def STrid_shl_V4 : STInst<(outs),
>> - (ins IntRegs:$src1, u2Imm:$src2, u6Imm:$src3,
DoubleRegs:$src4),
>> - "memd($src1<<#$src2+#$src3) = $src4",
>> - [(store DoubleRegs:$src4, (shl IntRegs:$src1,
>> - (add u2ImmPred:$src2,
>> - u6ImmPred:$src3)))]>,
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDrid_GP_cdnPt_V4 : LDInst<(outs DoubleRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset),
>> + "if ($src1.new) $dst=memd(##$global+$offset)",
>> + []>,
>> Requires<[HasV4T]>;
>>
>> -// memd(Rx++#s4:3)=Rtt
>> -// memd(Rx++#s4:3:circ(Mu))=Rtt
>> -// memd(Rx++I:circ(Mu))=Rtt
>> -// memd(Rx++Mu)=Rtt
>> -// memd(Rx++Mu:brev)=Rtt
>> -// memd(gp+#u16:3)=Rtt
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDrid_GP_cdnNotPt_V4 : LDInst<(outs DoubleRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset),
>> + "if (!$src1.new) $dst=memd(##$global+$offset)",
>> + []>,
>> + Requires<[HasV4T]>;
>>
>> -// Store doubleword conditionally.
>> -// if ([!]Pv[.new]) memd(#u6)=Rtt
>> -// TODO: needs to be implemented.
>> +let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in
>> +def LDrib_GP_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins globaladdress:$global, u16Imm:$offset),
>> + "$dst=memb(#$global+$offset)",
>> + []>,
>> + Requires<[HasV4T]>;
>>
>> -// if ([!]Pv[.new]) memd(Rs+#u6:3)=Rtt
>> -// if (Pv) memd(Rs+#u6:3)=Rtt
>> -// if (Pv.new) memd(Rs+#u6:3)=Rtt
>> -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in
>> -def STrid_cdnPt_V4 : STInst<(outs),
>> - (ins PredRegs:$src1, MEMri:$addr, DoubleRegs:$src2),
>> - "if ($src1.new) memd($addr) = $src2",
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDrib_GP_cPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset),
>> + "if ($src1) $dst=memb(##$global+$offset)",
>> []>,
>> Requires<[HasV4T]>;
>>
>> -// if (!Pv) memd(Rs+#u6:3)=Rtt
>> -// if (!Pv.new) memd(Rs+#u6:3)=Rtt
>> -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in
>> -def STrid_cdnNotPt_V4 : STInst<(outs),
>> - (ins PredRegs:$src1, MEMri:$addr, DoubleRegs:$src2),
>> - "if (!$src1.new) memd($addr) = $src2",
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDrib_GP_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset),
>> + "if (!$src1) $dst=memb(##$global+$offset)",
>> []>,
>> Requires<[HasV4T]>;
>>
>> -// if (Pv) memd(Rs+#u6:3)=Rtt
>> -// if (Pv.new) memd(Rs+#u6:3)=Rtt
>> -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in
>> -def STrid_indexed_cdnPt_V4 : STInst<(outs),
>> - (ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3,
>> - DoubleRegs:$src4),
>> - "if ($src1.new) memd($src2+#$src3) = $src4",
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDrib_GP_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset),
>> + "if ($src1.new) $dst=memb(##$global+$offset)",
>> []>,
>> Requires<[HasV4T]>;
>>
>> -// if (!Pv) memd(Rs+#u6:3)=Rtt
>> -// if (!Pv.new) memd(Rs+#u6:3)=Rtt
>> -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in
>> -def STrid_indexed_cdnNotPt_V4 : STInst<(outs),
>> - (ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3,
>> - DoubleRegs:$src4),
>> - "if (!$src1.new) memd($src2+#$src3) = $src4",
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDrib_GP_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset),
>> + "if (!$src1.new) $dst=memb(##$global+$offset)",
>> []>,
>> Requires<[HasV4T]>;
>>
>> -// if ([!]Pv[.new]) memd(Rs+Ru<<#u2)=Rtt
>> -// if (Pv) memd(Rs+Ru<<#u2)=Rtt
>> -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in
>> -def STrid_indexed_shl_cPt_V4 : STInst<(outs),
>> - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> - DoubleRegs:$src5),
>> - "if ($src1) memd($src2+$src3<<#$src4) =
$src5",
>> +
>> +let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in
>> +def LDriub_GP_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins globaladdress:$global, u16Imm:$offset),
>> + "$dst=memub(#$global+$offset)",
>> []>,
>> Requires<[HasV4T]>;
>>
>> -// if (Pv.new) memd(Rs+Ru<<#u2)=Rtt
>> -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in
>> -def STrid_indexed_shl_cdnPt_V4 : STInst<(outs),
>> - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> - DoubleRegs:$src5),
>> - "if ($src1) memd($src2+$src3<<#$src4) =
$src5",
>> +
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDriub_GP_cPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset),
>> + "if ($src1) $dst=memub(##$global+$offset)",
>> []>,
>> Requires<[HasV4T]>;
>> -// if (!Pv) memd(Rs+Ru<<#u2)=Rtt
>> -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in
>> -def STrid_indexed_shl_cNotPt_V4 : STInst<(outs),
>> - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> - DoubleRegs:$src5),
>> - "if (!$src1) memd($src2+$src3<<#$src4) =
$src5",
>> +
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDriub_GP_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset),
>> + "if (!$src1) $dst=memub(##$global+$offset)",
>> []>,
>> Requires<[HasV4T]>;
>> -// if (!Pv.new) memd(Rs+Ru<<#u2)=Rtt
>> -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in
>> -def STrid_indexed_shl_cdnNotPt_V4 : STInst<(outs),
>> - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> - DoubleRegs:$src5),
>> - "if (!$src1.new) memd($src2+$src3<<#$src4) =
$src5",
>> +
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDriub_GP_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset),
>> + "if ($src1.new) $dst=memub(##$global+$offset)",
>> []>,
>> Requires<[HasV4T]>;
>>
>> -// if ([!]Pv[.new]) memd(Rx++#s4:3)=Rtt
>> -// if (Pv) memd(Rx++#s4:3)=Rtt
>> -// if (Pv.new) memd(Rx++#s4:3)=Rtt
>> -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in
>> -def POST_STdri_cdnPt_V4 : STInstPI<(outs IntRegs:$dst),
>> - (ins PredRegs:$src1, DoubleRegs:$src2, IntRegs:$src3,
>> - s4_3Imm:$offset),
>> - "if ($src1.new) memd($src3++#$offset) = $src2",
>> - [],
>> - "$src3 = $dst">,
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDriub_GP_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset),
>> + "if (!$src1.new) $dst=memub(##$global+$offset)",
>> + []>,
>> Requires<[HasV4T]>;
>>
>> -// if (!Pv) memd(Rx++#s4:3)=Rtt
>> -// if (!Pv.new) memd(Rx++#s4:3)=Rtt
>> -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in
>> -def POST_STdri_cdnNotPt_V4 : STInstPI<(outs IntRegs:$dst),
>> - (ins PredRegs:$src1, DoubleRegs:$src2, IntRegs:$src3,
>> - s4_3Imm:$offset),
>> - "if (!$src1.new) memd($src3++#$offset) = $src2",
>> - [],
>> - "$src3 = $dst">,
>> +
>> +let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in
>> +def LDrih_GP_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins globaladdress:$global, u16Imm:$offset),
>> + "$dst=memh(#$global+$offset)",
>> + []>,
>> Requires<[HasV4T]>;
>>
>>
>> -// Store byte.
>> -// memb(Re=#U6)=Rt
>> -// TODO: needs to be implemented.
>> -// memb(Rs+#s11:0)=Rt
>> -// memb(Rs+#u6:0)=#S8
>> -let AddedComplexity = 10, isPredicable = 1 in
>> -def STrib_imm_V4 : STInst<(outs),
>> - (ins IntRegs:$src1, u6_0Imm:$src2, s8Imm:$src3),
>> - "memb($src1+#$src2) = #$src3",
>> - [(truncstorei8 s8ImmPred:$src3, (add IntRegs:$src1,
>> -
u6_0ImmPred:$src2))]>,
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDrih_GP_cPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset),
>> + "if ($src1) $dst=memh(##$global+$offset)",
>> + []>,
>> Requires<[HasV4T]>;
>>
>> -// memb(Rs+Ru<<#u2)=Rt
>> -let AddedComplexity = 10, isPredicable = 1 in
>> -def STrib_indexed_shl_V4 : STInst<(outs),
>> - (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$src3,
IntRegs:$src4),
>> - "memb($src1+$src2<<#$src3) = $src4",
>> - [(truncstorei8 IntRegs:$src4, (add IntRegs:$src1,
>> - (shl IntRegs:$src2,
>> -
u2ImmPred:$src3)))]>,
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDrih_GP_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset),
>> + "if (!$src1) $dst=memh(##$global+$offset)",
>> + []>,
>> Requires<[HasV4T]>;
>>
>> -// memb(Ru<<#u2+#U6)=Rt
>> -let AddedComplexity = 10 in
>> -def STrib_shl_V4 : STInst<(outs),
>> - (ins IntRegs:$src1, u2Imm:$src2, u6Imm:$src3,
IntRegs:$src4),
>> - "memb($src1<<#$src2+#$src3) = $src4",
>> - [(truncstorei8 IntRegs:$src4, (shl IntRegs:$src1,
>> - (add u2ImmPred:$src2,
>> -
u6ImmPred:$src3)))]>,
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDrih_GP_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset),
>> + "if ($src1.new) $dst=memh(##$global+$offset)",
>> + []>,
>> Requires<[HasV4T]>;
>>
>> -// memb(Rx++#s4:0:circ(Mu))=Rt
>> -// memb(Rx++I:circ(Mu))=Rt
>> -// memb(Rx++Mu)=Rt
>> -// memb(Rx++Mu:brev)=Rt
>> -// memb(gp+#u16:0)=Rt
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDrih_GP_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset),
>> + "if (!$src1.new) $dst=memh(##$global+$offset)",
>> + []>,
>> + Requires<[HasV4T]>;
>>
>>
>> -// Store byte conditionally.
>> -// if ([!]Pv[.new]) memb(#u6)=Rt
>> -// if ([!]Pv[.new]) memb(Rs+#u6:0)=#S6
>> -// if (Pv) memb(Rs+#u6:0)=#S6
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> -def STrib_imm_cPt_V4 : STInst<(outs),
>> - (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3,
s6Imm:$src4),
>> - "if ($src1) memb($src2+#$src3) = #$src4",
>> +let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in
>> +def LDriuh_GP_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins globaladdress:$global, u16Imm:$offset),
>> + "$dst=memuh(#$global+$offset)",
>> []>,
>> Requires<[HasV4T]>;
>>
>> -// if (Pv.new) memb(Rs+#u6:0)=#S6
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> -def STrib_imm_cdnPt_V4 : STInst<(outs),
>> - (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3,
s6Imm:$src4),
>> - "if ($src1.new) memb($src2+#$src3) = #$src4",
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDriuh_GP_cPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset),
>> + "if ($src1) $dst=memuh(##$global+$offset)",
>> []>,
>> Requires<[HasV4T]>;
>>
>> -// if (!Pv) memb(Rs+#u6:0)=#S6
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> -def STrib_imm_cNotPt_V4 : STInst<(outs),
>> - (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3,
s6Imm:$src4),
>> - "if (!$src1) memb($src2+#$src3) = #$src4",
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDriuh_GP_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset),
>> + "if (!$src1) $dst=memuh(##$global+$offset)",
>> []>,
>> Requires<[HasV4T]>;
>>
>> -// if (!Pv.new) memb(Rs+#u6:0)=#S6
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> -def STrib_imm_cdnNotPt_V4 : STInst<(outs),
>> - (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3,
s6Imm:$src4),
>> - "if (!$src1.new) memb($src2+#$src3) = #$src4",
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDriuh_GP_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset),
>> + "if ($src1.new) $dst=memuh(##$global+$offset)",
>> []>,
>> Requires<[HasV4T]>;
>>
>> -// if ([!]Pv[.new]) memb(Rs+#u6:0)=Rt
>> -// if (Pv) memb(Rs+#u6:0)=Rt
>> -// if (Pv.new) memb(Rs+#u6:0)=Rt
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> -def STrib_cdnPt_V4 : STInst<(outs),
>> - (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
>> - "if ($src1.new) memb($addr) = $src2",
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDriuh_GP_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset),
>> + "if (!$src1.new) $dst=memuh(##$global+$offset)",
>> []>,
>> Requires<[HasV4T]>;
>>
>> -// if (!Pv) memb(Rs+#u6:0)=Rt
>> -// if (!Pv.new) memb(Rs+#u6:0)=Rt
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> -def STrib_cdnNotPt_V4 : STInst<(outs),
>> - (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
>> - "if (!$src1.new) memb($addr) = $src2",
>> +let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in
>> +def LDriw_GP_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins globaladdress:$global, u16Imm:$offset),
>> + "$dst=memw(#$global+$offset)",
>> []>,
>> Requires<[HasV4T]>;
>>
>> -// if (Pv) memb(Rs+#u6:0)=Rt
>> -// if (!Pv) memb(Rs+#u6:0)=Rt
>> -// if (Pv.new) memb(Rs+#u6:0)=Rt
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> -def STrib_indexed_cdnPt_V4 : STInst<(outs),
>> - (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3,
IntRegs:$src4),
>> - "if ($src1.new) memb($src2+#$src3) = $src4",
>> +
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDriw_GP_cPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset),
>> + "if ($src1) $dst=memw(##$global+$offset)",
>> []>,
>> Requires<[HasV4T]>;
>>
>> -// if (!Pv.new) memb(Rs+#u6:0)=Rt
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDriw_GP_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset),
>> + "if (!$src1) $dst=memw(##$global+$offset)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDriw_GP_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset),
>> + "if ($src1.new) $dst=memw(##$global+$offset)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDriw_GP_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset),
>> + "if (!$src1.new) $dst=memw(##$global+$offset)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +
>> +let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in
>> +def LDd_GP_V4 : LDInst<(outs DoubleRegs:$dst),
>> + (ins globaladdress:$global),
>> + "$dst=memd(#$global)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (Pv) Rtt=memd(##global)
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDd_GP_cPt_V4 : LDInst<(outs DoubleRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global),
>> + "if ($src1) $dst=memd(##$global)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +
>> +// if (!Pv) Rtt=memd(##global)
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDd_GP_cNotPt_V4 : LDInst<(outs DoubleRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global),
>> + "if (!$src1) $dst=memd(##$global)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (Pv) Rtt=memd(##global)
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDd_GP_cdnPt_V4 : LDInst<(outs DoubleRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global),
>> + "if ($src1.new) $dst=memd(##$global)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +
>> +// if (!Pv) Rtt=memd(##global)
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDd_GP_cdnNotPt_V4 : LDInst<(outs DoubleRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global),
>> + "if (!$src1.new) $dst=memd(##$global)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in
>> +def LDb_GP_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins globaladdress:$global),
>> + "$dst=memb(#$global)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (Pv) Rt=memb(##global)
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDb_GP_cPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global),
>> + "if ($src1) $dst=memb(##$global)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (!Pv) Rt=memb(##global)
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDb_GP_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global),
>> + "if (!$src1) $dst=memb(##$global)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (Pv) Rt=memb(##global)
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDb_GP_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global),
>> + "if ($src1.new) $dst=memb(##$global)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (!Pv) Rt=memb(##global)
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDb_GP_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global),
>> + "if (!$src1.new) $dst=memb(##$global)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in
>> +def LDub_GP_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins globaladdress:$global),
>> + "$dst=memub(#$global)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (Pv) Rt=memub(##global)
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDub_GP_cPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global),
>> + "if ($src1) $dst=memub(##$global)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +
>> +// if (!Pv) Rt=memub(##global)
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDub_GP_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global),
>> + "if (!$src1) $dst=memub(##$global)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (Pv) Rt=memub(##global)
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDub_GP_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global),
>> + "if ($src1.new) $dst=memub(##$global)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +
>> +// if (!Pv) Rt=memub(##global)
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDub_GP_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global),
>> + "if (!$src1.new) $dst=memub(##$global)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in
>> +def LDh_GP_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins globaladdress:$global),
>> + "$dst=memh(#$global)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (Pv) Rt=memh(##global)
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDh_GP_cPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global),
>> + "if ($src1) $dst=memh(##$global)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (!Pv) Rt=memh(##global)
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDh_GP_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global),
>> + "if (!$src1) $dst=memh(##$global)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (Pv) Rt=memh(##global)
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDh_GP_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global),
>> + "if ($src1.new) $dst=memh(##$global)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (!Pv) Rt=memh(##global)
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDh_GP_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global),
>> + "if (!$src1.new) $dst=memh(##$global)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in
>> +def LDuh_GP_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins globaladdress:$global),
>> + "$dst=memuh(#$global)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (Pv) Rt=memuh(##global)
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDuh_GP_cPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global),
>> + "if ($src1) $dst=memuh(##$global)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (!Pv) Rt=memuh(##global)
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDuh_GP_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global),
>> + "if (!$src1) $dst=memuh(##$global)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (Pv) Rt=memuh(##global)
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDuh_GP_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global),
>> + "if ($src1.new) $dst=memuh(##$global)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (!Pv) Rt=memuh(##global)
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDuh_GP_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global),
>> + "if (!$src1.new) $dst=memuh(##$global)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in
>> +def LDw_GP_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins globaladdress:$global),
>> + "$dst=memw(#$global)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (Pv) Rt=memw(##global)
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDw_GP_cPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global),
>> + "if ($src1) $dst=memw(##$global)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +
>> +// if (!Pv) Rt=memw(##global)
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDw_GP_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global),
>> + "if (!$src1) $dst=memw(##$global)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (Pv) Rt=memw(##global)
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDw_GP_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global),
>> + "if ($src1.new) $dst=memw(##$global)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +
>> +// if (!Pv) Rt=memw(##global)
>> +let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def LDw_GP_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$global),
>> + "if (!$src1.new) $dst=memw(##$global)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +
>> +
>> +def : Pat<(atomic_load_64 (HexagonCONST32_GP tglobaladdr:$global)),
>> + (i64 (LDd_GP_V4 tglobaladdr:$global))>,
>> + Requires<[HasV4T]>;
>> +
>> +def : Pat<(atomic_load_32 (HexagonCONST32_GP tglobaladdr:$global)),
>> + (i32 (LDw_GP_V4 tglobaladdr:$global))>,
>> + Requires<[HasV4T]>;
>> +
>> +def : Pat<(atomic_load_16 (HexagonCONST32_GP tglobaladdr:$global)),
>> + (i32 (LDuh_GP_V4 tglobaladdr:$global))>,
>> + Requires<[HasV4T]>;
>> +
>> +def : Pat<(atomic_load_8 (HexagonCONST32_GP tglobaladdr:$global)),
>> + (i32 (LDub_GP_V4 tglobaladdr:$global))>,
>> + Requires<[HasV4T]>;
>> +
>> +// Map from load(globaladdress) -> memw(#foo + 0)
>> +let AddedComplexity = 100 in
>> +def : Pat<(i64 (load (HexagonCONST32_GP tglobaladdr:$global))),
>> + (i64 (LDd_GP_V4 tglobaladdr:$global))>,
>> + Requires<[HasV4T]>;
>> +
>> +// Map from Pd = load(globaladdress) -> Rd = memb(globaladdress),
Pd = Rd
>> +let AddedComplexity = 100 in
>> +def : Pat<(i1 (load (HexagonCONST32_GP tglobaladdr:$global))),
>> + (i1 (TFR_PdRs (i32 (LDb_GP_V4 tglobaladdr:$global))))>,
>> + Requires<[HasV4T]>;
>> +
>> +// When the Interprocedural Global Variable optimizer realizes that a
certain
>> +// global variable takes only two constant values, it shrinks the
global to
>> +// a boolean. Catch those loads here in the following 3 patterns.
>> +let AddedComplexity = 100 in
>> +def : Pat<(i32 (extloadi1 (HexagonCONST32_GP
tglobaladdr:$global))),
>> + (i32 (LDb_GP_V4 tglobaladdr:$global))>,
>> + Requires<[HasV4T]>;
>> +
>> +let AddedComplexity = 100 in
>> +def : Pat<(i32 (sextloadi1 (HexagonCONST32_GP
tglobaladdr:$global))),
>> + (i32 (LDb_GP_V4 tglobaladdr:$global))>,
>> + Requires<[HasV4T]>;
>> +
>> +// Map from load(globaladdress) -> memb(#foo)
>> +let AddedComplexity = 100 in
>> +def : Pat<(i32 (extloadi8 (HexagonCONST32_GP
tglobaladdr:$global))),
>> + (i32 (LDb_GP_V4 tglobaladdr:$global))>,
>> + Requires<[HasV4T]>;
>> +
>> +// Map from load(globaladdress) -> memb(#foo)
>> +let AddedComplexity = 100 in
>> +def : Pat<(i32 (sextloadi8 (HexagonCONST32_GP
tglobaladdr:$global))),
>> + (i32 (LDb_GP_V4 tglobaladdr:$global))>,
>> + Requires<[HasV4T]>;
>> +
>> +let AddedComplexity = 100 in
>> +def : Pat<(i32 (zextloadi1 (HexagonCONST32_GP
tglobaladdr:$global))),
>> + (i32 (LDub_GP_V4 tglobaladdr:$global))>,
>> + Requires<[HasV4T]>;
>> +
>> +// Map from load(globaladdress) -> memub(#foo)
>> +let AddedComplexity = 100 in
>> +def : Pat<(i32 (zextloadi8 (HexagonCONST32_GP
tglobaladdr:$global))),
>> + (i32 (LDub_GP_V4 tglobaladdr:$global))>,
>> + Requires<[HasV4T]>;
>> +
>> +// Map from load(globaladdress) -> memh(#foo)
>> +let AddedComplexity = 100 in
>> +def : Pat<(i32 (extloadi16 (HexagonCONST32_GP
tglobaladdr:$global))),
>> + (i32 (LDh_GP_V4 tglobaladdr:$global))>,
>> + Requires<[HasV4T]>;
>> +
>> +// Map from load(globaladdress) -> memh(#foo)
>> +let AddedComplexity = 100 in
>> +def : Pat<(i32 (sextloadi16 (HexagonCONST32_GP
tglobaladdr:$global))),
>> + (i32 (LDh_GP_V4 tglobaladdr:$global))>,
>> + Requires<[HasV4T]>;
>> +
>> +// Map from load(globaladdress) -> memuh(#foo)
>> +let AddedComplexity = 100 in
>> +def : Pat<(i32 (zextloadi16 (HexagonCONST32_GP
tglobaladdr:$global))),
>> + (i32 (LDuh_GP_V4 tglobaladdr:$global))>,
>> + Requires<[HasV4T]>;
>> +
>> +// Map from load(globaladdress) -> memw(#foo)
>> +let AddedComplexity = 100 in
>> +def : Pat<(i32 (load (HexagonCONST32_GP tglobaladdr:$global))),
>> + (i32 (LDw_GP_V4 tglobaladdr:$global))>,
>> + Requires<[HasV4T]>;
>> +
>> +def : Pat<(atomic_load_64 (add (HexagonCONST32_GP
tglobaladdr:$global),
>> + u16ImmPred:$offset)),
>> + (i64 (LDrid_GP_V4 tglobaladdr:$global,
u16ImmPred:$offset))>,
>> + Requires<[HasV4T]>;
>> +
>> +def : Pat<(atomic_load_32 (add (HexagonCONST32_GP
tglobaladdr:$global),
>> + u16ImmPred:$offset)),
>> + (i32 (LDriw_GP_V4 tglobaladdr:$global,
u16ImmPred:$offset))>,
>> + Requires<[HasV4T]>;
>> +
>> +def : Pat<(atomic_load_16 (add (HexagonCONST32_GP
tglobaladdr:$global),
>> + u16ImmPred:$offset)),
>> + (i32 (LDriuh_GP_V4 tglobaladdr:$global,
u16ImmPred:$offset))>,
>> + Requires<[HasV4T]>;
>> +
>> +def : Pat<(atomic_load_8 (add (HexagonCONST32_GP
tglobaladdr:$global),
>> + u16ImmPred:$offset)),
>> + (i32 (LDriub_GP_V4 tglobaladdr:$global,
u16ImmPred:$offset))>,
>> + Requires<[HasV4T]>;
>> +
>> +// Map from load(globaladdress + x) -> memd(#foo + x)
>> +let AddedComplexity = 100 in
>> +def : Pat<(i64 (load (add (HexagonCONST32_GP tglobaladdr:$global),
>> + u16ImmPred:$offset))),
>> + (i64 (LDrid_GP_V4 tglobaladdr:$global,
u16ImmPred:$offset))>,
>> + Requires<[HasV4T]>;
>> +
>> +// Map from load(globaladdress + x) -> memb(#foo + x)
>> +let AddedComplexity = 100 in
>> +def : Pat<(i32 (extloadi8 (add (HexagonCONST32_GP
tglobaladdr:$global),
>> + u16ImmPred:$offset))),
>> + (i32 (LDrib_GP_V4 tglobaladdr:$global,
u16ImmPred:$offset))>,
>> + Requires<[HasV4T]>;
>> +
>> +// Map from load(globaladdress + x) -> memb(#foo + x)
>> +let AddedComplexity = 100 in
>> +def : Pat<(i32 (sextloadi8 (add (HexagonCONST32_GP
tglobaladdr:$global),
>> + u16ImmPred:$offset))),
>> + (i32 (LDrib_GP_V4 tglobaladdr:$global,
u16ImmPred:$offset))>,
>> + Requires<[HasV4T]>;
>> +
>> +// Map from load(globaladdress + x) -> memub(#foo + x)
>> +let AddedComplexity = 100 in
>> +def : Pat<(i32 (zextloadi8 (add (HexagonCONST32_GP
tglobaladdr:$global),
>> + u16ImmPred:$offset))),
>> + (i32 (LDriub_GP_V4 tglobaladdr:$global,
u16ImmPred:$offset))>,
>> + Requires<[HasV4T]>;
>> +
>> +// Map from load(globaladdress + x) -> memuh(#foo + x)
>> +let AddedComplexity = 100 in
>> +def : Pat<(i32 (extloadi16 (add (HexagonCONST32_GP
tglobaladdr:$global),
>> + u16ImmPred:$offset))),
>> + (i32 (LDrih_GP_V4 tglobaladdr:$global,
u16ImmPred:$offset))>,
>> + Requires<[HasV4T]>;
>> +
>> +// Map from load(globaladdress + x) -> memh(#foo + x)
>> +let AddedComplexity = 100 in
>> +def : Pat<(i32 (sextloadi16 (add (HexagonCONST32_GP
tglobaladdr:$global),
>> + u16ImmPred:$offset))),
>> + (i32 (LDrih_GP_V4 tglobaladdr:$global,
u16ImmPred:$offset))>,
>> + Requires<[HasV4T]>;
>> +
>> +
>> +// Map from load(globaladdress + x) -> memuh(#foo + x)
>> +let AddedComplexity = 100 in
>> +def : Pat<(i32 (zextloadi16 (add (HexagonCONST32_GP
tglobaladdr:$global),
>> + u16ImmPred:$offset))),
>> + (i32 (LDriuh_GP_V4 tglobaladdr:$global,
u16ImmPred:$offset))>,
>> + Requires<[HasV4T]>;
>> +
>> +// Map from load(globaladdress + x) -> memw(#foo + x)
>> +let AddedComplexity = 100 in
>> +def : Pat<(i32 (load (add (HexagonCONST32_GP tglobaladdr:$global),
>> + u16ImmPred:$offset))),
>> + (i32 (LDriw_GP_V4 tglobaladdr:$global,
u16ImmPred:$offset))>,
>> + Requires<[HasV4T]>;
>> +
>> +
>>
+//===----------------------------------------------------------------------===//
>> +// LD -
>>
+//===----------------------------------------------------------------------===//
>> +
>>
+//===----------------------------------------------------------------------===//
>> +// ST +
>>
+//===----------------------------------------------------------------------===//
>> +///
>> +/// Assumptions::: ****** DO NOT IGNORE ********
>> +/// 1. Make sure that in post increment store, the zero'th operand
is always the
>> +/// post increment operand.
>> +/// 2. Make sure that the store value operand(Rt/Rtt) in a store is
always the
>> +/// last operand.
>> +///
>> +
>> +// memd(Re=#U6)=Rtt
>> +def STrid_abs_setimm_V4 : STInst<(outs IntRegs:$dst1),
>> + (ins DoubleRegs:$src1, u6Imm:$src2),
>> + "memd($dst1=#$src2) = $src1",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// memb(Re=#U6)=Rs
>> +def STrib_abs_setimm_V4 : STInst<(outs IntRegs:$dst1),
>> + (ins IntRegs:$src1, u6Imm:$src2),
>> + "memb($dst1=#$src2) = $src1",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// memh(Re=#U6)=Rs
>> +def STrih_abs_setimm_V4 : STInst<(outs IntRegs:$dst1),
>> + (ins IntRegs:$src1, u6Imm:$src2),
>> + "memh($dst1=#$src2) = $src1",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// memw(Re=#U6)=Rs
>> +def STriw_abs_setimm_V4 : STInst<(outs IntRegs:$dst1),
>> + (ins IntRegs:$src1, u6Imm:$src2),
>> + "memw($dst1=#$src2) = $src1",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// memd(Re=#U6)=Rtt
>> +def STrid_abs_set_V4 : STInst<(outs IntRegs:$dst1),
>> + (ins DoubleRegs:$src1, globaladdress:$src2),
>> + "memd($dst1=##$src2) = $src1",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// memb(Re=#U6)=Rs
>> +def STrib_abs_set_V4 : STInst<(outs IntRegs:$dst1),
>> + (ins IntRegs:$src1, globaladdress:$src2),
>> + "memb($dst1=##$src2) = $src1",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// memh(Re=#U6)=Rs
>> +def STrih_abs_set_V4 : STInst<(outs IntRegs:$dst1),
>> + (ins IntRegs:$src1, globaladdress:$src2),
>> + "memh($dst1=##$src2) = $src1",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// memw(Re=#U6)=Rs
>> +def STriw_abs_set_V4 : STInst<(outs IntRegs:$dst1),
>> + (ins IntRegs:$src1, globaladdress:$src2),
>> + "memw($dst1=##$src2) = $src1",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// memd(Rs+Ru<<#u2)=Rtt
>> +let AddedComplexity = 10, isPredicable = 1 in
>> +def STrid_indexed_shl_V4 : STInst<(outs),
>> + (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$src3,
DoubleRegs:$src4),
>> + "memd($src1+$src2<<#$src3) = $src4",
>> + [(store (i64 DoubleRegs:$src4),
>> + (add (i32 IntRegs:$src1),
>> + (shl (i32 IntRegs:$src2),
u2ImmPred:$src3)))]>,
>> + Requires<[HasV4T]>;
>> +
>> +// memd(Ru<<#u2+#U6)=Rtt
>> +let AddedComplexity = 10 in
>> +def STrid_shl_V4 : STInst<(outs),
>> + (ins IntRegs:$src1, u2Imm:$src2, u6Imm:$src3,
DoubleRegs:$src4),
>> + "memd($src1<<#$src2+#$src3) = $src4",
>> + [(store (i64 DoubleRegs:$src4),
>> + (add (shl (i32 IntRegs:$src1), u2ImmPred:$src2),
>> + u6ImmPred:$src3))]>,
>> + Requires<[HasV4T]>;
>> +
>> +// memd(Rx++#s4:3)=Rtt
>> +// memd(Rx++#s4:3:circ(Mu))=Rtt
>> +// memd(Rx++I:circ(Mu))=Rtt
>> +// memd(Rx++Mu)=Rtt
>> +// memd(Rx++Mu:brev)=Rtt
>> +// memd(gp+#u16:3)=Rtt
>> +
>> +// Store doubleword conditionally.
>> +// if ([!]Pv[.new]) memd(#u6)=Rtt
>> +// TODO: needs to be implemented.
>> +
>> +// if ([!]Pv[.new]) memd(Rs+#u6:3)=Rtt
>> +// if (Pv) memd(Rs+#u6:3)=Rtt
>> +// if (Pv.new) memd(Rs+#u6:3)=Rtt
>> +let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> +def STrid_cdnPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, MEMri:$addr, DoubleRegs:$src2),
>> + "if ($src1.new) memd($addr) = $src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (!Pv) memd(Rs+#u6:3)=Rtt
>> +// if (!Pv.new) memd(Rs+#u6:3)=Rtt
>> +let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> +def STrid_cdnNotPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, MEMri:$addr, DoubleRegs:$src2),
>> + "if (!$src1.new) memd($addr) = $src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (Pv) memd(Rs+#u6:3)=Rtt
>> +// if (Pv.new) memd(Rs+#u6:3)=Rtt
>> +let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> +def STrid_indexed_cdnPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3,
>> + DoubleRegs:$src4),
>> + "if ($src1.new) memd($src2+#$src3) = $src4",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (!Pv) memd(Rs+#u6:3)=Rtt
>> +// if (!Pv.new) memd(Rs+#u6:3)=Rtt
>> +let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> +def STrid_indexed_cdnNotPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3,
>> + DoubleRegs:$src4),
>> + "if (!$src1.new) memd($src2+#$src3) = $src4",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if ([!]Pv[.new]) memd(Rs+Ru<<#u2)=Rtt
>> +// if (Pv) memd(Rs+Ru<<#u2)=Rtt
>> +let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> +def STrid_indexed_shl_cPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> + DoubleRegs:$src5),
>> + "if ($src1) memd($src2+$src3<<#$src4) =
$src5",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (Pv.new) memd(Rs+Ru<<#u2)=Rtt
>> +let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> +def STrid_indexed_shl_cdnPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> + DoubleRegs:$src5),
>> + "if ($src1.new) memd($src2+$src3<<#$src4) =
$src5",
>> + []>,
>> + Requires<[HasV4T]>;
>> +// if (!Pv) memd(Rs+Ru<<#u2)=Rtt
>> +let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> +def STrid_indexed_shl_cNotPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> + DoubleRegs:$src5),
>> + "if (!$src1) memd($src2+$src3<<#$src4) =
$src5",
>> + []>,
>> + Requires<[HasV4T]>;
>> +// if (!Pv.new) memd(Rs+Ru<<#u2)=Rtt
>> +let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> +def STrid_indexed_shl_cdnNotPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> + DoubleRegs:$src5),
>> + "if (!$src1.new) memd($src2+$src3<<#$src4) =
$src5",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if ([!]Pv[.new]) memd(Rx++#s4:3)=Rtt
>> +// if (Pv) memd(Rx++#s4:3)=Rtt
>> +// if (Pv.new) memd(Rx++#s4:3)=Rtt
>> +let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> +def POST_STdri_cdnPt_V4 : STInstPI<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, DoubleRegs:$src2, IntRegs:$src3,
>> + s4_3Imm:$offset),
>> + "if ($src1.new) memd($src3++#$offset) = $src2",
>> + [],
>> + "$src3 = $dst">,
>> + Requires<[HasV4T]>;
>> +
>> +// if (!Pv) memd(Rx++#s4:3)=Rtt
>> +// if (!Pv.new) memd(Rx++#s4:3)=Rtt
>> +let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> +def POST_STdri_cdnNotPt_V4 : STInstPI<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, DoubleRegs:$src2, IntRegs:$src3,
>> + s4_3Imm:$offset),
>> + "if (!$src1.new) memd($src3++#$offset) = $src2",
>> + [],
>> + "$src3 = $dst">,
>> + Requires<[HasV4T]>;
>> +
>> +
>> +// Store byte.
>> +// memb(Rs+#u6:0)=#S8
>> +let AddedComplexity = 10, isPredicable = 1 in
>> +def STrib_imm_V4 : STInst<(outs),
>> + (ins IntRegs:$src1, u6_0Imm:$src2, s8Imm:$src3),
>> + "memb($src1+#$src2) = #$src3",
>> + [(truncstorei8 s8ImmPred:$src3, (add (i32 IntRegs:$src1),
>> +
u6_0ImmPred:$src2))]>,
>> + Requires<[HasV4T]>;
>> +
>> +// memb(Rs+Ru<<#u2)=Rt
>> +let AddedComplexity = 10, isPredicable = 1 in
>> +def STrib_indexed_shl_V4 : STInst<(outs),
>> + (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$src3,
IntRegs:$src4),
>> + "memb($src1+$src2<<#$src3) = $src4",
>> + [(truncstorei8 (i32 IntRegs:$src4),
>> + (add (i32 IntRegs:$src1),
>> + (shl (i32 IntRegs:$src2),
>> + u2ImmPred:$src3)))]>,
>> + Requires<[HasV4T]>;
>> +
>> +// memb(Ru<<#u2+#U6)=Rt
>> +let AddedComplexity = 10 in
>> +def STrib_shl_V4 : STInst<(outs),
>> + (ins IntRegs:$src1, u2Imm:$src2, u6Imm:$src3,
IntRegs:$src4),
>> + "memb($src1<<#$src2+#$src3) = $src4",
>> + [(truncstorei8 (i32 IntRegs:$src4),
>> + (add (shl (i32 IntRegs:$src1),
u2ImmPred:$src2),
>> + u6ImmPred:$src3))]>,
>> + Requires<[HasV4T]>;
>> +
>> +// memb(Rx++#s4:0:circ(Mu))=Rt
>> +// memb(Rx++I:circ(Mu))=Rt
>> +// memb(Rx++Mu)=Rt
>> +// memb(Rx++Mu:brev)=Rt
>> +// memb(gp+#u16:0)=Rt
>> +
>> +
>> +// Store byte conditionally.
>> +// if ([!]Pv[.new]) memb(#u6)=Rt
>> +// if ([!]Pv[.new]) memb(Rs+#u6:0)=#S6
>> +// if (Pv) memb(Rs+#u6:0)=#S6
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> +def STrib_imm_cPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3,
s6Imm:$src4),
>> + "if ($src1) memb($src2+#$src3) = #$src4",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (Pv.new) memb(Rs+#u6:0)=#S6
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> +def STrib_imm_cdnPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3,
s6Imm:$src4),
>> + "if ($src1.new) memb($src2+#$src3) = #$src4",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (!Pv) memb(Rs+#u6:0)=#S6
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> +def STrib_imm_cNotPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3,
s6Imm:$src4),
>> + "if (!$src1) memb($src2+#$src3) = #$src4",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (!Pv.new) memb(Rs+#u6:0)=#S6
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> +def STrib_imm_cdnNotPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3,
s6Imm:$src4),
>> + "if (!$src1.new) memb($src2+#$src3) = #$src4",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if ([!]Pv[.new]) memb(Rs+#u6:0)=Rt
>> +// if (Pv) memb(Rs+#u6:0)=Rt
>> +// if (Pv.new) memb(Rs+#u6:0)=Rt
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> +def STrib_cdnPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
>> + "if ($src1.new) memb($addr) = $src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (!Pv) memb(Rs+#u6:0)=Rt
>> +// if (!Pv.new) memb(Rs+#u6:0)=Rt
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> +def STrib_cdnNotPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
>> + "if (!$src1.new) memb($addr) = $src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (Pv) memb(Rs+#u6:0)=Rt
>> +// if (!Pv) memb(Rs+#u6:0)=Rt
>> +// if (Pv.new) memb(Rs+#u6:0)=Rt
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> +def STrib_indexed_cdnPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3,
IntRegs:$src4),
>> + "if ($src1.new) memb($src2+#$src3) = $src4",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (!Pv.new) memb(Rs+#u6:0)=Rt
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STrib_indexed_cdnNotPt_V4 : STInst<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3,
IntRegs:$src4),
>> "if (!$src1.new) memb($src2+#$src3) = $src4",
>> @@ -1218,7 +2061,8 @@ def STrib_indexed_cdnNotPt_V4 : STInst<(outs),
>>
>> // if ([!]Pv[.new]) memb(Rs+Ru<<#u2)=Rt
>> // if (Pv) memb(Rs+Ru<<#u2)=Rt
>> -let mayStore = 1, AddedComplexity = 10 in
>> +let mayStore = 1, AddedComplexity = 10,
>> + isPredicated = 1 in
>> def STrib_indexed_shl_cPt_V4 : STInst<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> IntRegs:$src5),
>> @@ -1227,7 +2071,8 @@ def STrib_indexed_shl_cPt_V4 : STInst<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (Pv.new) memb(Rs+Ru<<#u2)=Rt
>> -let mayStore = 1, AddedComplexity = 10 in
>> +let mayStore = 1, AddedComplexity = 10,
>> + isPredicated = 1 in
>> def STrib_indexed_shl_cdnPt_V4 : STInst<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> IntRegs:$src5),
>> @@ -1236,7 +2081,8 @@ def STrib_indexed_shl_cdnPt_V4 :
STInst<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (!Pv) memb(Rs+Ru<<#u2)=Rt
>> -let mayStore = 1, AddedComplexity = 10 in
>> +let mayStore = 1, AddedComplexity = 10,
>> + isPredicated = 1 in
>> def STrib_indexed_shl_cNotPt_V4 : STInst<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> IntRegs:$src5),
>> @@ -1245,7 +2091,8 @@ def STrib_indexed_shl_cNotPt_V4 :
STInst<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (!Pv.new) memb(Rs+Ru<<#u2)=Rt
>> -let mayStore = 1, AddedComplexity = 10 in
>> +let mayStore = 1, AddedComplexity = 10,
>> + isPredicated = 1 in
>> def STrib_indexed_shl_cdnNotPt_V4 : STInst<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> IntRegs:$src5),
>> @@ -1256,7 +2103,8 @@ def STrib_indexed_shl_cdnNotPt_V4 :
STInst<(outs),
>> // if ([!]Pv[.new]) memb(Rx++#s4:0)=Rt
>> // if (Pv) memb(Rx++#s4:0)=Rt
>> // if (Pv.new) memb(Rx++#s4:0)=Rt
>> -let mayStore = 1, hasCtrlDep = 1 in
>> +let mayStore = 1, hasCtrlDep = 1,
>> + isPredicated = 1 in
>> def POST_STbri_cdnPt_V4 : STInstPI<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
s4_0Imm:$offset),
>> "if ($src1.new) memb($src3++#$offset) = $src2",
>> @@ -1265,7 +2113,8 @@ def POST_STbri_cdnPt_V4 : STInstPI<(outs
IntRegs:$dst),
>>
>> // if (!Pv) memb(Rx++#s4:0)=Rt
>> // if (!Pv.new) memb(Rx++#s4:0)=Rt
>> -let mayStore = 1, hasCtrlDep = 1 in
>> +let mayStore = 1, hasCtrlDep = 1,
>> + isPredicated = 1 in
>> def POST_STbri_cdnNotPt_V4 : STInstPI<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
s4_0Imm:$offset),
>> "if (!$src1.new) memb($src3++#$offset) =
$src2",
>> @@ -1274,20 +2123,15 @@ def POST_STbri_cdnNotPt_V4 : STInstPI<(outs
IntRegs:$dst),
>>
>>
>> // Store halfword.
>> -// memh(Re=#U6)=Rt.H
>> -// TODO: needs to be implemented
>> -
>> -// memh(Re=#U6)=Rt
>> // TODO: needs to be implemented
>> -
>> +// memh(Re=#U6)=Rt.H
>> // memh(Rs+#s11:1)=Rt.H
>> -// memh(Rs+#s11:1)=Rt
>> // memh(Rs+#u6:1)=#S8
>> let AddedComplexity = 10, isPredicable = 1 in
>> def STrih_imm_V4 : STInst<(outs),
>> (ins IntRegs:$src1, u6_1Imm:$src2, s8Imm:$src3),
>> "memh($src1+#$src2) = #$src3",
>> - [(truncstorei16 s8ImmPred:$src3, (add IntRegs:$src1,
>> + [(truncstorei16 s8ImmPred:$src3, (add (i32 IntRegs:$src1),
>>
u6_1ImmPred:$src2))]>,
>> Requires<[HasV4T]>;
>>
>> @@ -1299,9 +2143,10 @@ let AddedComplexity = 10, isPredicable = 1 in
>> def STrih_indexed_shl_V4 : STInst<(outs),
>> (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$src3,
IntRegs:$src4),
>> "memh($src1+$src2<<#$src3) = $src4",
>> - [(truncstorei16 IntRegs:$src4, (add IntRegs:$src1,
>> - (shl IntRegs:$src2,
>> -
u2ImmPred:$src3)))]>,
>> + [(truncstorei16 (i32 IntRegs:$src4),
>> + (add (i32 IntRegs:$src1),
>> + (shl (i32 IntRegs:$src2),
>> + u2ImmPred:$src3)))]>,
>> Requires<[HasV4T]>;
>>
>> // memh(Ru<<#u2+#U6)=Rt.H
>> @@ -1310,9 +2155,9 @@ let AddedComplexity = 10 in
>> def STrih_shl_V4 : STInst<(outs),
>> (ins IntRegs:$src1, u2Imm:$src2, u6Imm:$src3,
IntRegs:$src4),
>> "memh($src1<<#$src2+#$src3) = $src4",
>> - [(truncstorei16 IntRegs:$src4, (shl IntRegs:$src1,
>> - (add u2ImmPred:$src2,
>> -
u6ImmPred:$src3)))]>,
>> + [(truncstorei16 (i32 IntRegs:$src4),
>> + (add (shl (i32 IntRegs:$src1),
u2ImmPred:$src2),
>> + u6ImmPred:$src3))]>,
>> Requires<[HasV4T]>;
>>
>> // memh(Rx++#s4:1:circ(Mu))=Rt.H
>> @@ -1323,17 +2168,14 @@ def STrih_shl_V4 : STInst<(outs),
>> // memh(Rx++Mu)=Rt
>> // memh(Rx++Mu:brev)=Rt.H
>> // memh(Rx++Mu:brev)=Rt
>> -// memh(gp+#u16:1)=Rt.H
>> // memh(gp+#u16:1)=Rt
>> -
>> -
>> -// Store halfword conditionally.
>> // if ([!]Pv[.new]) memh(#u6)=Rt.H
>> // if ([!]Pv[.new]) memh(#u6)=Rt
>>
>> // if ([!]Pv[.new]) memh(Rs+#u6:1)=#S6
>> // if (Pv) memh(Rs+#u6:1)=#S6
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STrih_imm_cPt_V4 : STInst<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3,
s6Imm:$src4),
>> "if ($src1) memh($src2+#$src3) = #$src4",
>> @@ -1341,7 +2183,8 @@ def STrih_imm_cPt_V4 : STInst<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (Pv.new) memh(Rs+#u6:1)=#S6
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STrih_imm_cdnPt_V4 : STInst<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3,
s6Imm:$src4),
>> "if ($src1.new) memh($src2+#$src3) = #$src4",
>> @@ -1349,7 +2192,8 @@ def STrih_imm_cdnPt_V4 : STInst<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (!Pv) memh(Rs+#u6:1)=#S6
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STrih_imm_cNotPt_V4 : STInst<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3,
s6Imm:$src4),
>> "if (!$src1) memh($src2+#$src3) = #$src4",
>> @@ -1357,7 +2201,8 @@ def STrih_imm_cNotPt_V4 : STInst<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (!Pv.new) memh(Rs+#u6:1)=#S6
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STrih_imm_cdnNotPt_V4 : STInst<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3,
s6Imm:$src4),
>> "if (!$src1.new) memh($src2+#$src3) = #$src4",
>> @@ -1370,7 +2215,8 @@ def STrih_imm_cdnNotPt_V4 : STInst<(outs),
>> // if ([!]Pv[.new]) memh(Rs+#u6:1)=Rt
>> // if (Pv) memh(Rs+#u6:1)=Rt
>> // if (Pv.new) memh(Rs+#u6:1)=Rt
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STrih_cdnPt_V4 : STInst<(outs),
>> (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
>> "if ($src1.new) memh($addr) = $src2",
>> @@ -1379,7 +2225,8 @@ def STrih_cdnPt_V4 : STInst<(outs),
>>
>> // if (!Pv) memh(Rs+#u6:1)=Rt
>> // if (!Pv.new) memh(Rs+#u6:1)=Rt
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STrih_cdnNotPt_V4 : STInst<(outs),
>> (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
>> "if (!$src1.new) memh($addr) = $src2",
>> @@ -1387,7 +2234,8 @@ def STrih_cdnNotPt_V4 : STInst<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (Pv.new) memh(Rs+#u6:1)=Rt
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STrih_indexed_cdnPt_V4 : STInst<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3,
IntRegs:$src4),
>> "if ($src1.new) memh($src2+#$src3) = $src4",
>> @@ -1395,7 +2243,8 @@ def STrih_indexed_cdnPt_V4 : STInst<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (!Pv.new) memh(Rs+#u6:1)=Rt
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STrih_indexed_cdnNotPt_V4 : STInst<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3,
IntRegs:$src4),
>> "if (!$src1.new) memh($src2+#$src3) = $src4",
>> @@ -1405,7 +2254,8 @@ def STrih_indexed_cdnNotPt_V4 : STInst<(outs),
>> // if ([!]Pv[.new]) memh(Rs+Ru<<#u2)=Rt.H
>> // if ([!]Pv[.new]) memh(Rs+Ru<<#u2)=Rt
>> // if (Pv) memh(Rs+Ru<<#u2)=Rt
>> -let mayStore = 1, AddedComplexity = 10 in
>> +let mayStore = 1, AddedComplexity = 10,
>> + isPredicated = 1 in
>> def STrih_indexed_shl_cPt_V4 : STInst<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> IntRegs:$src5),
>> @@ -1414,6 +2264,8 @@ def STrih_indexed_shl_cPt_V4 : STInst<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (Pv.new) memh(Rs+Ru<<#u2)=Rt
>> +let mayStore = 1, AddedComplexity = 10,
>> + isPredicated = 1 in
>> def STrih_indexed_shl_cdnPt_V4 : STInst<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> IntRegs:$src5),
>> @@ -1422,7 +2274,8 @@ def STrih_indexed_shl_cdnPt_V4 :
STInst<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (!Pv) memh(Rs+Ru<<#u2)=Rt
>> -let mayStore = 1, AddedComplexity = 10 in
>> +let mayStore = 1, AddedComplexity = 10,
>> + isPredicated = 1 in
>> def STrih_indexed_shl_cNotPt_V4 : STInst<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> IntRegs:$src5),
>> @@ -1431,7 +2284,8 @@ def STrih_indexed_shl_cNotPt_V4 :
STInst<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (!Pv.new) memh(Rs+Ru<<#u2)=Rt
>> -let mayStore = 1, AddedComplexity = 10 in
>> +let mayStore = 1, AddedComplexity = 10,
>> + isPredicated = 1 in
>> def STrih_indexed_shl_cdnNotPt_V4 : STInst<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> IntRegs:$src5),
>> @@ -1445,7 +2299,8 @@ def STrih_indexed_shl_cdnNotPt_V4 :
STInst<(outs),
>> // if ([!]Pv[.new]) memh(Rx++#s4:1)=Rt
>> // if (Pv) memh(Rx++#s4:1)=Rt
>> // if (Pv.new) memh(Rx++#s4:1)=Rt
>> -let mayStore = 1, hasCtrlDep = 1 in
>> +let mayStore = 1, hasCtrlDep = 1,
>> + isPredicated = 1 in
>> def POST_SThri_cdnPt_V4 : STInstPI<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
s4_1Imm:$offset),
>> "if ($src1.new) memh($src3++#$offset) = $src2",
>> @@ -1454,7 +2309,8 @@ def POST_SThri_cdnPt_V4 : STInstPI<(outs
IntRegs:$dst),
>>
>> // if (!Pv) memh(Rx++#s4:1)=Rt
>> // if (!Pv.new) memh(Rx++#s4:1)=Rt
>> -let mayStore = 1, hasCtrlDep = 1 in
>> +let mayStore = 1, hasCtrlDep = 1,
>> + isPredicated = 1 in
>> def POST_SThri_cdnNotPt_V4 : STInstPI<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
s4_1Imm:$offset),
>> "if (!$src1.new) memh($src3++#$offset) =
$src2",
>> @@ -1466,13 +2322,22 @@ def POST_SThri_cdnNotPt_V4 : STInstPI<(outs
IntRegs:$dst),
>> // memw(Re=#U6)=Rt
>> // TODO: Needs to be implemented.
>>
>> -// memw(Rs+#s11:2)=Rt
>> +// Store predicate:
>> +let mayStore = 1, neverHasSideEffects = 1 in
>> +def STriw_pred_V4 : STInst<(outs),
>> + (ins MEMri:$addr, PredRegs:$src1),
>> + "Error; should not emit",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +
>> // memw(Rs+#u6:2)=#S8
>> let AddedComplexity = 10, isPredicable = 1 in
>> def STriw_imm_V4 : STInst<(outs),
>> (ins IntRegs:$src1, u6_2Imm:$src2, s8Imm:$src3),
>> "memw($src1+#$src2) = #$src3",
>> - [(store s8ImmPred:$src3, (add IntRegs:$src1,
u6_2ImmPred:$src2))]>,
>> + [(store s8ImmPred:$src3, (add (i32 IntRegs:$src1),
>> + u6_2ImmPred:$src2))]>,
>> Requires<[HasV4T]>;
>>
>> // memw(Rs+Ru<<#u2)=Rt
>> @@ -1480,8 +2345,9 @@ let AddedComplexity = 10, isPredicable = 1 in
>> def STriw_indexed_shl_V4 : STInst<(outs),
>> (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$src3,
IntRegs:$src4),
>> "memw($src1+$src2<<#$src3) = $src4",
>> - [(store IntRegs:$src4, (add IntRegs:$src1,
>> - (shl IntRegs:$src2,
u2ImmPred:$src3)))]>,
>> + [(store (i32 IntRegs:$src4), (add (i32 IntRegs:$src1),
>> + (shl (i32 IntRegs:$src2),
>> + u2ImmPred:$src3)))]>,
>> Requires<[HasV4T]>;
>>
>> // memw(Ru<<#u2+#U6)=Rt
>> @@ -1489,8 +2355,9 @@ let AddedComplexity = 10 in
>> def STriw_shl_V4 : STInst<(outs),
>> (ins IntRegs:$src1, u2Imm:$src2, u6Imm:$src3,
IntRegs:$src4),
>> "memw($src1<<#$src2+#$src3) = $src4",
>> - [(store IntRegs:$src4, (shl IntRegs:$src1,
>> - (add u2ImmPred:$src2,
u6ImmPred:$src3)))]>,
>> + [(store (i32 IntRegs:$src4),
>> + (add (shl (i32 IntRegs:$src1), u2ImmPred:$src2),
>> + u6ImmPred:$src3))]>,
>> Requires<[HasV4T]>;
>>
>> // memw(Rx++#s4:2)=Rt
>> @@ -1502,12 +2369,11 @@ def STriw_shl_V4 : STInst<(outs),
>>
>>
>> // Store word conditionally.
>> -// if ([!]Pv[.new]) memw(#u6)=Rt
>> -// TODO: Needs to be implemented.
>>
>> // if ([!]Pv[.new]) memw(Rs+#u6:2)=#S6
>> // if (Pv) memw(Rs+#u6:2)=#S6
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STriw_imm_cPt_V4 : STInst<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3,
s6Imm:$src4),
>> "if ($src1) memw($src2+#$src3) = #$src4",
>> @@ -1515,7 +2381,8 @@ def STriw_imm_cPt_V4 : STInst<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (Pv.new) memw(Rs+#u6:2)=#S6
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STriw_imm_cdnPt_V4 : STInst<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3,
s6Imm:$src4),
>> "if ($src1.new) memw($src2+#$src3) = #$src4",
>> @@ -1523,7 +2390,8 @@ def STriw_imm_cdnPt_V4 : STInst<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (!Pv) memw(Rs+#u6:2)=#S6
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STriw_imm_cNotPt_V4 : STInst<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3,
s6Imm:$src4),
>> "if (!$src1) memw($src2+#$src3) = #$src4",
>> @@ -1531,106 +2399,550 @@ def STriw_imm_cNotPt_V4 : STInst<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (!Pv.new) memw(Rs+#u6:2)=#S6
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STriw_imm_cdnNotPt_V4 : STInst<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3,
s6Imm:$src4),
>> "if (!$src1.new) memw($src2+#$src3) = #$src4",
>> []>,
>> Requires<[HasV4T]>;
>>
>> -// if ([!]Pv[.new]) memw(Rs+#u6:2)=Rt
>> -// if (Pv) memw(Rs+#u6:2)=Rt
>> -// if (Pv.new) memw(Rs+#u6:2)=Rt
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> -def STriw_cdnPt_V4 : STInst<(outs),
>> - (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
>> - "if ($src1.new) memw($addr) = $src2",
>> +// if ([!]Pv[.new]) memw(Rs+#u6:2)=Rt
>> +// if (Pv) memw(Rs+#u6:2)=Rt
>> +// if (Pv.new) memw(Rs+#u6:2)=Rt
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> +def STriw_cdnPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
>> + "if ($src1.new) memw($addr) = $src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (!Pv) memw(Rs+#u6:2)=Rt
>> +// if (!Pv.new) memw(Rs+#u6:2)=Rt
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> +def STriw_cdnNotPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
>> + "if (!$src1.new) memw($addr) = $src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (Pv) memw(Rs+#u6:2)=Rt
>> +// if (!Pv) memw(Rs+#u6:2)=Rt
>> +// if (Pv.new) memw(Rs+#u6:2)=Rt
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> +def STriw_indexed_cdnPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3,
IntRegs:$src4),
>> + "if ($src1.new) memw($src2+#$src3) = $src4",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (!Pv.new) memw(Rs+#u6:2)=Rt
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> +def STriw_indexed_cdnNotPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3,
IntRegs:$src4),
>> + "if (!$src1.new) memw($src2+#$src3) = $src4",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if ([!]Pv[.new]) memw(Rs+Ru<<#u2)=Rt
>> +// if (Pv) memw(Rs+Ru<<#u2)=Rt
>> +let mayStore = 1, AddedComplexity = 10,
>> + isPredicated = 1 in
>> +def STriw_indexed_shl_cPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> + IntRegs:$src5),
>> + "if ($src1) memw($src2+$src3<<#$src4) =
$src5",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (Pv.new) memw(Rs+Ru<<#u2)=Rt
>> +let mayStore = 1, AddedComplexity = 10,
>> + isPredicated = 1 in
>> +def STriw_indexed_shl_cdnPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> + IntRegs:$src5),
>> + "if ($src1.new) memw($src2+$src3<<#$src4) =
$src5",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (!Pv) memw(Rs+Ru<<#u2)=Rt
>> +let mayStore = 1, AddedComplexity = 10,
>> + isPredicated = 1 in
>> +def STriw_indexed_shl_cNotPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> + IntRegs:$src5),
>> + "if (!$src1) memw($src2+$src3<<#$src4) =
$src5",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (!Pv.new) memw(Rs+Ru<<#u2)=Rt
>> +let mayStore = 1, AddedComplexity = 10,
>> + isPredicated = 1 in
>> +def STriw_indexed_shl_cdnNotPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> + IntRegs:$src5),
>> + "if (!$src1.new) memw($src2+$src3<<#$src4) =
$src5",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if ([!]Pv[.new]) memw(Rx++#s4:2)=Rt
>> +// if (Pv) memw(Rx++#s4:2)=Rt
>> +// if (Pv.new) memw(Rx++#s4:2)=Rt
>> +let mayStore = 1, hasCtrlDep = 1,
>> + isPredicated = 1 in
>> +def POST_STwri_cdnPt_V4 : STInstPI<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
s4_2Imm:$offset),
>> + "if ($src1.new) memw($src3++#$offset) = $src2",
>> + [],"$src3 = $dst">,
>> + Requires<[HasV4T]>;
>> +
>> +// if (!Pv) memw(Rx++#s4:2)=Rt
>> +// if (!Pv.new) memw(Rx++#s4:2)=Rt
>> +let mayStore = 1, hasCtrlDep = 1,
>> + isPredicated = 1 in
>> +def POST_STwri_cdnNotPt_V4 : STInstPI<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
s4_2Imm:$offset),
>> + "if (!$src1.new) memw($src3++#$offset) = $src2",
>> + [],"$src3 = $dst">,
>> + Requires<[HasV4T]>;
>> +
>> +
>> +/// store to global address
>> +
>> +let isPredicable = 1, mayStore = 1, neverHasSideEffects = 1 in
>> +def STrid_GP_V4 : STInst<(outs),
>> + (ins globaladdress:$global, u16Imm:$offset,
DoubleRegs:$src),
>> + "memd(#$global+$offset) = $src",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def STrid_GP_cPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset,
>> +
DoubleRegs:$src2),
>> + "if ($src1) memd(##$global+$offset) = $src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def STrid_GP_cNotPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset,
>> +
DoubleRegs:$src2),
>> + "if (!$src1) memd(##$global+$offset) = $src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def STrid_GP_cdnPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset,
>> +
DoubleRegs:$src2),
>> + "if ($src1.new) memd(##$global+$offset) =
$src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def STrid_GP_cdnNotPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset,
>> +
DoubleRegs:$src2),
>> + "if (!$src1.new) memd(##$global+$offset) =
$src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let isPredicable = 1, mayStore = 1, neverHasSideEffects = 1 in
>> +def STrib_GP_V4 : STInst<(outs),
>> + (ins globaladdress:$global, u16Imm:$offset, IntRegs:$src),
>> + "memb(#$global+$offset) = $src",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def STrib_GP_cPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset,
>> +
IntRegs:$src2),
>> + "if ($src1) memb(##$global+$offset) = $src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def STrib_GP_cNotPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset,
>> +
IntRegs:$src2),
>> + "if (!$src1) memb(##$global+$offset) = $src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def STrib_GP_cdnPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset,
>> +
IntRegs:$src2),
>> + "if ($src1.new) memb(##$global+$offset) =
$src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def STrib_GP_cdnNotPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset,
>> +
IntRegs:$src2),
>> + "if (!$src1.new) memb(##$global+$offset) =
$src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let isPredicable = 1, mayStore = 1, neverHasSideEffects = 1 in
>> +def STrih_GP_V4 : STInst<(outs),
>> + (ins globaladdress:$global, u16Imm:$offset, IntRegs:$src),
>> + "memh(#$global+$offset) = $src",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def STrih_GP_cPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset,
>> +
IntRegs:$src2),
>> + "if ($src1) memh(##$global+$offset) = $src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def STrih_GP_cNotPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset,
>> +
IntRegs:$src2),
>> + "if (!$src1) memh(##$global+$offset) = $src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def STrih_GP_cdnPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset,
>> +
IntRegs:$src2),
>> + "if ($src1.new) memh(##$global+$offset) =
$src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def STrih_GP_cdnNotPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset,
>> +
IntRegs:$src2),
>> + "if (!$src1.new) memh(##$global+$offset) =
$src2",
>> []>,
>> Requires<[HasV4T]>;
>>
>> -// if (!Pv) memw(Rs+#u6:2)=Rt
>> -// if (!Pv.new) memw(Rs+#u6:2)=Rt
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> -def STriw_cdnNotPt_V4 : STInst<(outs),
>> - (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
>> - "if (!$src1.new) memw($addr) = $src2",
>> +let isPredicable = 1, mayStore = 1, neverHasSideEffects = 1 in
>> +def STriw_GP_V4 : STInst<(outs),
>> + (ins globaladdress:$global, u16Imm:$offset, IntRegs:$src),
>> + "memw(#$global+$offset) = $src",
>> []>,
>> Requires<[HasV4T]>;
>>
>> -// if (Pv) memw(Rs+#u6:2)=Rt
>> -// if (!Pv) memw(Rs+#u6:2)=Rt
>> -// if (Pv.new) memw(Rs+#u6:2)=Rt
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> -def STriw_indexed_cdnPt_V4 : STInst<(outs),
>> - (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3,
IntRegs:$src4),
>> - "if ($src1.new) memw($src2+#$src3) = $src4",
>> +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def STriw_GP_cPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset,
>> +
IntRegs:$src2),
>> + "if ($src1) memw(##$global+$offset) = $src2",
>> []>,
>> Requires<[HasV4T]>;
>>
>> -// if (!Pv.new) memw(Rs+#u6:2)=Rt
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> -def STriw_indexed_cdnNotPt_V4 : STInst<(outs),
>> - (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3,
IntRegs:$src4),
>> - "if (!$src1.new) memw($src2+#$src3) = $src4",
>> +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def STriw_GP_cNotPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset,
>> +
IntRegs:$src2),
>> + "if (!$src1) memw(##$global+$offset) = $src2",
>> []>,
>> Requires<[HasV4T]>;
>>
>> -// if ([!]Pv[.new]) memw(Rs+Ru<<#u2)=Rt
>> -// if (Pv) memw(Rs+Ru<<#u2)=Rt
>> -let mayStore = 1, AddedComplexity = 10 in
>> -def STriw_indexed_shl_cPt_V4 : STInst<(outs),
>> - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> - IntRegs:$src5),
>> - "if ($src1) memw($src2+$src3<<#$src4) =
$src5",
>> +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def STriw_GP_cdnPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset,
>> +
IntRegs:$src2),
>> + "if ($src1.new) memw(##$global+$offset) =
$src2",
>> []>,
>> Requires<[HasV4T]>;
>>
>> -// if (Pv.new) memw(Rs+Ru<<#u2)=Rt
>> -let mayStore = 1, AddedComplexity = 10 in
>> -def STriw_indexed_shl_cdnPt_V4 : STInst<(outs),
>> - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> - IntRegs:$src5),
>> - "if ($src1.new) memw($src2+$src3<<#$src4) =
$src5",
>> +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def STriw_GP_cdnNotPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset,
>> +
IntRegs:$src2),
>> + "if (!$src1.new) memw(##$global+$offset) =
$src2",
>> []>,
>> Requires<[HasV4T]>;
>>
>> -// if (!Pv) memw(Rs+Ru<<#u2)=Rt
>> -let mayStore = 1, AddedComplexity = 10 in
>> -def STriw_indexed_shl_cNotPt_V4 : STInst<(outs),
>> - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> - IntRegs:$src5),
>> - "if (!$src1) memw($src2+$src3<<#$src4) =
$src5",
>> +// memd(#global)=Rtt
>> +let isPredicable = 1, mayStore = 1, neverHasSideEffects = 1 in
>> +def STd_GP_V4 : STInst<(outs),
>> + (ins globaladdress:$global, DoubleRegs:$src),
>> + "memd(#$global) = $src",
>> []>,
>> Requires<[HasV4T]>;
>>
>> -// if (!Pv.new) memw(Rs+Ru<<#u2)=Rt
>> -let mayStore = 1, AddedComplexity = 10 in
>> -def STriw_indexed_shl_cdnNotPt_V4 : STInst<(outs),
>> - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> - IntRegs:$src5),
>> - "if (!$src1.new) memw($src2+$src3<<#$src4) =
$src5",
>> +// if (Pv) memd(##global) = Rtt
>> +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def STd_GP_cPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
DoubleRegs:$src2),
>> + "if ($src1) memd(##$global) = $src2",
>> []>,
>> Requires<[HasV4T]>;
>>
>> -// if ([!]Pv[.new]) memw(Rx++#s4:2)=Rt
>> -// if (Pv) memw(Rx++#s4:2)=Rt
>> -// if (Pv.new) memw(Rx++#s4:2)=Rt
>> -let mayStore = 1, hasCtrlDep = 1 in
>> -def POST_STwri_cdnPt_V4 : STInstPI<(outs IntRegs:$dst),
>> - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
s4_2Imm:$offset),
>> - "if ($src1.new) memw($src3++#$offset) = $src2",
>> - [],"$src3 = $dst">,
>> +// if (!Pv) memd(##global) = Rtt
>> +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def STd_GP_cNotPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
DoubleRegs:$src2),
>> + "if (!$src1) memd(##$global) = $src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (Pv) memd(##global) = Rtt
>> +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def STd_GP_cdnPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
DoubleRegs:$src2),
>> + "if ($src1.new) memd(##$global) = $src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (!Pv) memd(##global) = Rtt
>> +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def STd_GP_cdnNotPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
DoubleRegs:$src2),
>> + "if (!$src1.new) memd(##$global) = $src2",
>> + []>,
>> Requires<[HasV4T]>;
>>
>> -// if (!Pv) memw(Rx++#s4:2)=Rt
>> -// if (!Pv.new) memw(Rx++#s4:2)=Rt
>> -let mayStore = 1, hasCtrlDep = 1 in
>> -def POST_STwri_cdnNotPt_V4 : STInstPI<(outs IntRegs:$dst),
>> - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
s4_2Imm:$offset),
>> - "if (!$src1.new) memw($src3++#$offset) = $src2",
>> - [],"$src3 = $dst">,
>> +// memb(#global)=Rt
>> +let isPredicable = 1, mayStore = 1, neverHasSideEffects = 1 in
>> +def STb_GP_V4 : STInst<(outs),
>> + (ins globaladdress:$global, IntRegs:$src),
>> + "memb(#$global) = $src",
>> + []>,
>> Requires<[HasV4T]>;
>>
>> +// if (Pv) memb(##global) = Rt
>> +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def STb_GP_cPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
IntRegs:$src2),
>> + "if ($src1) memb(##$global) = $src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (!Pv) memb(##global) = Rt
>> +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def STb_GP_cNotPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
IntRegs:$src2),
>> + "if (!$src1) memb(##$global) = $src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (Pv) memb(##global) = Rt
>> +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def STb_GP_cdnPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
IntRegs:$src2),
>> + "if ($src1.new) memb(##$global) = $src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (!Pv) memb(##global) = Rt
>> +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def STb_GP_cdnNotPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
IntRegs:$src2),
>> + "if (!$src1.new) memb(##$global) = $src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// memh(#global)=Rt
>> +let isPredicable = 1, mayStore = 1, neverHasSideEffects = 1 in
>> +def STh_GP_V4 : STInst<(outs),
>> + (ins globaladdress:$global, IntRegs:$src),
>> + "memh(#$global) = $src",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (Pv) memh(##global) = Rt
>> +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def STh_GP_cPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
IntRegs:$src2),
>> + "if ($src1) memh(##$global) = $src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (!Pv) memh(##global) = Rt
>> +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def STh_GP_cNotPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
IntRegs:$src2),
>> + "if (!$src1) memh(##$global) = $src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (Pv) memh(##global) = Rt
>> +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def STh_GP_cdnPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
IntRegs:$src2),
>> + "if ($src1.new) memh(##$global) = $src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (!Pv) memh(##global) = Rt
>> +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def STh_GP_cdnNotPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
IntRegs:$src2),
>> + "if (!$src1.new) memh(##$global) = $src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// memw(#global)=Rt
>> +let isPredicable = 1, mayStore = 1, neverHasSideEffects = 1 in
>> +def STw_GP_V4 : STInst<(outs),
>> + (ins globaladdress:$global, IntRegs:$src),
>> + "memw(#$global) = $src",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (Pv) memw(##global) = Rt
>> +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def STw_GP_cPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
IntRegs:$src2),
>> + "if ($src1) memw(##$global) = $src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (!Pv) memw(##global) = Rt
>> +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def STw_GP_cNotPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
IntRegs:$src2),
>> + "if (!$src1) memw(##$global) = $src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (Pv) memw(##global) = Rt
>> +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def STw_GP_cdnPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
IntRegs:$src2),
>> + "if ($src1.new) memw(##$global) = $src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (!Pv) memw(##global) = Rt
>> +let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in
>> +def STw_GP_cdnNotPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
IntRegs:$src2),
>> + "if (!$src1.new) memw(##$global) = $src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// 64 bit atomic store
>> +def : Pat<(atomic_store_64 (HexagonCONST32_GP tglobaladdr:$global),
>> + (i64 DoubleRegs:$src1)),
>> + (STd_GP_V4 tglobaladdr:$global, (i64
DoubleRegs:$src1))>,
>> + Requires<[HasV4T]>;
>> +
>> +// Map from store(globaladdress) -> memd(#foo)
>> +let AddedComplexity = 100 in
>> +def : Pat<(store (i64 DoubleRegs:$src1), (HexagonCONST32_GP
tglobaladdr:$global)),
>> + (STd_GP_V4 tglobaladdr:$global, (i64
DoubleRegs:$src1))>,
>> + Requires<[HasV4T]>;
>> +
>> +// 8 bit atomic store
>> +def : Pat< (atomic_store_8 (HexagonCONST32_GP
tglobaladdr:$global),
>> + (i32 IntRegs:$src1)),
>> + (STb_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>,
>> + Requires<[HasV4T]>;
>> +
>> +// Map from store(globaladdress) -> memb(#foo)
>> +let AddedComplexity = 100 in
>> +def : Pat<(truncstorei8 (i32 IntRegs:$src1),
>> + (HexagonCONST32_GP tglobaladdr:$global)),
>> + (STb_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>,
>> + Requires<[HasV4T]>;
>> +
>> +// Map from "i1 = constant<-1>; memw(CONST32(#foo)) =
i1"
>> +// to "r0 = 1; memw(#foo) = r0"
>> +let AddedComplexity = 100 in
>> +def : Pat<(store (i1 -1), (HexagonCONST32_GP tglobaladdr:$global)),
>> + (STb_GP_V4 tglobaladdr:$global, (TFRI 1))>,
>> + Requires<[HasV4T]>;
>> +
>> +def : Pat<(atomic_store_16 (HexagonCONST32_GP tglobaladdr:$global),
>> + (i32 IntRegs:$src1)),
>> + (STh_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>,
>> + Requires<[HasV4T]>;
>> +
>> +// Map from store(globaladdress) -> memh(#foo)
>> +let AddedComplexity = 100 in
>> +def : Pat<(truncstorei16 (i32 IntRegs:$src1),
>> + (HexagonCONST32_GP tglobaladdr:$global)),
>> + (STh_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>,
>> + Requires<[HasV4T]>;
>> +
>> +// 32 bit atomic store
>> +def : Pat<(atomic_store_32 (HexagonCONST32_GP tglobaladdr:$global),
>> + (i32 IntRegs:$src1)),
>> + (STw_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>,
>> + Requires<[HasV4T]>;
>> +
>> +// Map from store(globaladdress) -> memw(#foo)
>> +let AddedComplexity = 100 in
>> +def : Pat<(store (i32 IntRegs:$src1), (HexagonCONST32_GP
tglobaladdr:$global)),
>> + (STw_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>,
>> + Requires<[HasV4T]>;
>> +
>> +def : Pat<(atomic_store_64 (add (HexagonCONST32_GP
tglobaladdr:$global),
>> + u16ImmPred:$offset),
>> + (i64 DoubleRegs:$src1)),
>> + (STrid_GP_V4 tglobaladdr:$global, u16ImmPred:$offset,
>> + (i64
DoubleRegs:$src1))>,
>> + Requires<[HasV4T]>;
>> +
>> +def : Pat<(atomic_store_32 (add (HexagonCONST32_GP
tglobaladdr:$global),
>> + u16ImmPred:$offset),
>> + (i32 IntRegs:$src1)),
>> + (STriw_GP_V4 tglobaladdr:$global, u16ImmPred:$offset, (i32
IntRegs:$src1))>,
>> + Requires<[HasV4T]>;
>> +
>> +def : Pat<(atomic_store_16 (add (HexagonCONST32_GP
tglobaladdr:$global),
>> + u16ImmPred:$offset),
>> + (i32 IntRegs:$src1)),
>> + (STrih_GP_V4 tglobaladdr:$global, u16ImmPred:$offset, (i32
IntRegs:$src1))>,
>> + Requires<[HasV4T]>;
>> +
>> +def : Pat<(atomic_store_8 (add (HexagonCONST32_GP
tglobaladdr:$global),
>> + u16ImmPred:$offset),
>> + (i32 IntRegs:$src1)),
>> + (STrib_GP_V4 tglobaladdr:$global, u16ImmPred:$offset, (i32
IntRegs:$src1))>,
>> + Requires<[HasV4T]>;
>> +
>> +// Map from store(globaladdress + x) -> memd(#foo + x)
>> +let AddedComplexity = 100 in
>> +def : Pat<(store (i64 DoubleRegs:$src1), (add (HexagonCONST32_GP
tglobaladdr:$global),
>> + u16ImmPred:$offset)),
>> + (STrid_GP_V4 tglobaladdr:$global, u16ImmPred:$offset,
>> + (i64
DoubleRegs:$src1))>,
>> + Requires<[HasV4T]>;
>> +
>> +// Map from store(globaladdress + x) -> memb(#foo + x)
>> +let AddedComplexity = 100 in
>> +def : Pat<(truncstorei8 (i32 IntRegs:$src1),
>> + (add (HexagonCONST32_GP tglobaladdr:$global),
>> + u16ImmPred:$offset)),
>> + (STrib_GP_V4 tglobaladdr:$global, u16ImmPred:$offset, (i32
IntRegs:$src1))>,
>> + Requires<[HasV4T]>;
>> +
>> +// Map from store(globaladdress + x) -> memh(#foo + x)
>> +let AddedComplexity = 100 in
>> +def : Pat<(truncstorei16 (i32 IntRegs:$src1),
>> + (add (HexagonCONST32_GP tglobaladdr:$global),
>> + u16ImmPred:$offset)),
>> + (STrih_GP_V4 tglobaladdr:$global, u16ImmPred:$offset, (i32
IntRegs:$src1))>,
>> + Requires<[HasV4T]>;
>> +
>> +// Map from store(globaladdress + x) -> memw(#foo + x)
>> +let AddedComplexity = 100 in
>> +def : Pat<(store (i32 IntRegs:$src1),
>> + (add (HexagonCONST32_GP tglobaladdr:$global),
>> + u16ImmPred:$offset)),
>> + (STriw_GP_V4 tglobaladdr:$global, u16ImmPred:$offset, (i32
IntRegs:$src1))>,
>> + Requires<[HasV4T]>;
>> +
>> +
>>
>>
//===----------------------------------------------------------------------==>>
// ST -
>> @@ -1696,11 +3008,19 @@ def STrib_GP_nv_V4 : NVInst_V4<(outs),
>> []>,
>> Requires<[HasV4T]>;
>>
>> +// memb(#global)=Nt.new
>> +let mayStore = 1, neverHasSideEffects = 1 in
>> +def STb_GP_nv_V4 : NVInst_V4<(outs),
>> + (ins globaladdress:$global, IntRegs:$src),
>> + "memb(#$global) = $src.new",
>> + []>,
>> + Requires<[HasV4T]>;
>>
>> // Store new-value byte conditionally.
>> // if ([!]Pv[.new]) memb(#u6)=Nt.new
>> // if (Pv) memb(Rs+#u6:0)=Nt.new
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STrib_cPt_nv_V4 : NVInst_V4<(outs),
>> (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
>> "if ($src1) memb($addr) = $src2.new",
>> @@ -1708,7 +3028,8 @@ def STrib_cPt_nv_V4 : NVInst_V4<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (Pv.new) memb(Rs+#u6:0)=Nt.new
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STrib_cdnPt_nv_V4 : NVInst_V4<(outs),
>> (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
>> "if ($src1.new) memb($addr) = $src2.new",
>> @@ -1716,7 +3037,8 @@ def STrib_cdnPt_nv_V4 : NVInst_V4<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (!Pv) memb(Rs+#u6:0)=Nt.new
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STrib_cNotPt_nv_V4 : NVInst_V4<(outs),
>> (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
>> "if (!$src1) memb($addr) = $src2.new",
>> @@ -1724,7 +3046,8 @@ def STrib_cNotPt_nv_V4 : NVInst_V4<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (!Pv.new) memb(Rs+#u6:0)=Nt.new
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STrib_cdnNotPt_nv_V4 : NVInst_V4<(outs),
>> (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
>> "if (!$src1.new) memb($addr) = $src2.new",
>> @@ -1732,7 +3055,8 @@ def STrib_cdnNotPt_nv_V4 : NVInst_V4<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (Pv) memb(Rs+#u6:0)=Nt.new
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STrib_indexed_cPt_nv_V4 : NVInst_V4<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3,
IntRegs:$src4),
>> "if ($src1) memb($src2+#$src3) = $src4.new",
>> @@ -1740,7 +3064,8 @@ def STrib_indexed_cPt_nv_V4 :
NVInst_V4<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (Pv.new) memb(Rs+#u6:0)=Nt.new
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STrib_indexed_cdnPt_nv_V4 : NVInst_V4<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3,
IntRegs:$src4),
>> "if ($src1.new) memb($src2+#$src3) =
$src4.new",
>> @@ -1748,7 +3073,8 @@ def STrib_indexed_cdnPt_nv_V4 :
NVInst_V4<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (!Pv) memb(Rs+#u6:0)=Nt.new
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STrib_indexed_cNotPt_nv_V4 : NVInst_V4<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3,
IntRegs:$src4),
>> "if (!$src1) memb($src2+#$src3) = $src4.new",
>> @@ -1756,7 +3082,8 @@ def STrib_indexed_cNotPt_nv_V4 :
NVInst_V4<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (!Pv.new) memb(Rs+#u6:0)=Nt.new
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STrib_indexed_cdnNotPt_nv_V4 : NVInst_V4<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3,
IntRegs:$src4),
>> "if (!$src1.new) memb($src2+#$src3) =
$src4.new",
>> @@ -1766,7 +3093,8 @@ def STrib_indexed_cdnNotPt_nv_V4 :
NVInst_V4<(outs),
>>
>> // if ([!]Pv[.new]) memb(Rs+Ru<<#u2)=Nt.new
>> // if (Pv) memb(Rs+Ru<<#u2)=Nt.new
>> -let mayStore = 1, AddedComplexity = 10 in
>> +let mayStore = 1, AddedComplexity = 10,
>> + isPredicated = 1 in
>> def STrib_indexed_shl_cPt_nv_V4 : NVInst_V4<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> IntRegs:$src5),
>> @@ -1775,7 +3103,8 @@ def STrib_indexed_shl_cPt_nv_V4 :
NVInst_V4<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (Pv.new) memb(Rs+Ru<<#u2)=Nt.new
>> -let mayStore = 1, AddedComplexity = 10 in
>> +let mayStore = 1, AddedComplexity = 10,
>> + isPredicated = 1 in
>> def STrib_indexed_shl_cdnPt_nv_V4 : NVInst_V4<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> IntRegs:$src5),
>> @@ -1784,7 +3113,8 @@ def STrib_indexed_shl_cdnPt_nv_V4 :
NVInst_V4<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (!Pv) memb(Rs+Ru<<#u2)=Nt.new
>> -let mayStore = 1, AddedComplexity = 10 in
>> +let mayStore = 1, AddedComplexity = 10,
>> + isPredicated = 1 in
>> def STrib_indexed_shl_cNotPt_nv_V4 : NVInst_V4<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> IntRegs:$src5),
>> @@ -1793,7 +3123,8 @@ def STrib_indexed_shl_cNotPt_nv_V4 :
NVInst_V4<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (!Pv.new) memb(Rs+Ru<<#u2)=Nt.new
>> -let mayStore = 1, AddedComplexity = 10 in
>> +let mayStore = 1, AddedComplexity = 10,
>> + isPredicated = 1 in
>> def STrib_indexed_shl_cdnNotPt_nv_V4 : NVInst_V4<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> IntRegs:$src5),
>> @@ -1803,7 +3134,8 @@ def STrib_indexed_shl_cdnNotPt_nv_V4 :
NVInst_V4<(outs),
>>
>> // if ([!]Pv[.new]) memb(Rx++#s4:0)=Nt.new
>> // if (Pv) memb(Rx++#s4:0)=Nt.new
>> -let mayStore = 1, hasCtrlDep = 1 in
>> +let mayStore = 1, hasCtrlDep = 1,
>> + isPredicated = 1 in
>> def POST_STbri_cPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
s4_0Imm:$offset),
>> "if ($src1) memb($src3++#$offset) = $src2.new",
>> @@ -1811,7 +3143,8 @@ def POST_STbri_cPt_nv_V4 : NVInstPI_V4<(outs
IntRegs:$dst),
>> Requires<[HasV4T]>;
>>
>> // if (Pv.new) memb(Rx++#s4:0)=Nt.new
>> -let mayStore = 1, hasCtrlDep = 1 in
>> +let mayStore = 1, hasCtrlDep = 1,
>> + isPredicated = 1 in
>> def POST_STbri_cdnPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
s4_0Imm:$offset),
>> "if ($src1.new) memb($src3++#$offset) =
$src2.new",
>> @@ -1819,7 +3152,8 @@ def POST_STbri_cdnPt_nv_V4 : NVInstPI_V4<(outs
IntRegs:$dst),
>> Requires<[HasV4T]>;
>>
>> // if (!Pv) memb(Rx++#s4:0)=Nt.new
>> -let mayStore = 1, hasCtrlDep = 1 in
>> +let mayStore = 1, hasCtrlDep = 1,
>> + isPredicated = 1 in
>> def POST_STbri_cNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
s4_0Imm:$offset),
>> "if (!$src1) memb($src3++#$offset) =
$src2.new",
>> @@ -1827,7 +3161,8 @@ def POST_STbri_cNotPt_nv_V4 :
NVInstPI_V4<(outs IntRegs:$dst),
>> Requires<[HasV4T]>;
>>
>> // if (!Pv.new) memb(Rx++#s4:0)=Nt.new
>> -let mayStore = 1, hasCtrlDep = 1 in
>> +let mayStore = 1, hasCtrlDep = 1,
>> + isPredicated = 1 in
>> def POST_STbri_cdnNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
s4_0Imm:$offset),
>> "if (!$src1.new) memb($src3++#$offset) =
$src2.new",
>> @@ -1889,6 +3224,14 @@ def STrih_GP_nv_V4 : NVInst_V4<(outs),
>> []>,
>> Requires<[HasV4T]>;
>>
>> +// memh(#global)=Nt.new
>> +let mayStore = 1, neverHasSideEffects = 1 in
>> +def STh_GP_nv_V4 : NVInst_V4<(outs),
>> + (ins globaladdress:$global, IntRegs:$src),
>> + "memh(#$global) = $src.new",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>>
>> // Store new-value halfword conditionally.
>>
>> @@ -1896,7 +3239,8 @@ def STrih_GP_nv_V4 : NVInst_V4<(outs),
>>
>> // if ([!]Pv[.new]) memh(Rs+#u6:1)=Nt.new
>> // if (Pv) memh(Rs+#u6:1)=Nt.new
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STrih_cPt_nv_V4 : NVInst_V4<(outs),
>> (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
>> "if ($src1) memh($addr) = $src2.new",
>> @@ -1904,7 +3248,8 @@ def STrih_cPt_nv_V4 : NVInst_V4<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (Pv.new) memh(Rs+#u6:1)=Nt.new
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STrih_cdnPt_nv_V4 : NVInst_V4<(outs),
>> (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
>> "if ($src1.new) memh($addr) = $src2.new",
>> @@ -1912,7 +3257,8 @@ def STrih_cdnPt_nv_V4 : NVInst_V4<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (!Pv) memh(Rs+#u6:1)=Nt.new
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STrih_cNotPt_nv_V4 : NVInst_V4<(outs),
>> (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
>> "if (!$src1) memh($addr) = $src2.new",
>> @@ -1920,7 +3266,8 @@ def STrih_cNotPt_nv_V4 : NVInst_V4<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (!Pv.new) memh(Rs+#u6:1)=Nt.new
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STrih_cdnNotPt_nv_V4 : NVInst_V4<(outs),
>> (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
>> "if (!$src1.new) memh($addr) = $src2.new",
>> @@ -1928,7 +3275,8 @@ def STrih_cdnNotPt_nv_V4 : NVInst_V4<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (Pv) memh(Rs+#u6:1)=Nt.new
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STrih_indexed_cPt_nv_V4 : NVInst_V4<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3,
IntRegs:$src4),
>> "if ($src1) memh($src2+#$src3) = $src4.new",
>> @@ -1936,7 +3284,8 @@ def STrih_indexed_cPt_nv_V4 :
NVInst_V4<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (Pv.new) memh(Rs+#u6:1)=Nt.new
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STrih_indexed_cdnPt_nv_V4 : NVInst_V4<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3,
IntRegs:$src4),
>> "if ($src1.new) memh($src2+#$src3) =
$src4.new",
>> @@ -1944,7 +3293,8 @@ def STrih_indexed_cdnPt_nv_V4 :
NVInst_V4<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (!Pv) memh(Rs+#u6:1)=Nt.new
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STrih_indexed_cNotPt_nv_V4 : NVInst_V4<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3,
IntRegs:$src4),
>> "if (!$src1) memh($src2+#$src3) = $src4.new",
>> @@ -1952,7 +3302,8 @@ def STrih_indexed_cNotPt_nv_V4 :
NVInst_V4<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (!Pv.new) memh(Rs+#u6:1)=Nt.new
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STrih_indexed_cdnNotPt_nv_V4 : NVInst_V4<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3,
IntRegs:$src4),
>> "if (!$src1.new) memh($src2+#$src3) =
$src4.new",
>> @@ -1961,7 +3312,8 @@ def STrih_indexed_cdnNotPt_nv_V4 :
NVInst_V4<(outs),
>>
>> // if ([!]Pv[.new]) memh(Rs+Ru<<#u2)=Nt.new
>> // if (Pv) memh(Rs+Ru<<#u2)=Nt.new
>> -let mayStore = 1, AddedComplexity = 10 in
>> +let mayStore = 1, AddedComplexity = 10,
>> + isPredicated = 1 in
>> def STrih_indexed_shl_cPt_nv_V4 : NVInst_V4<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> IntRegs:$src5),
>> @@ -1970,7 +3322,8 @@ def STrih_indexed_shl_cPt_nv_V4 :
NVInst_V4<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (Pv.new) memh(Rs+Ru<<#u2)=Nt.new
>> -let mayStore = 1, AddedComplexity = 10 in
>> +let mayStore = 1, AddedComplexity = 10,
>> + isPredicated = 1 in
>> def STrih_indexed_shl_cdnPt_nv_V4 : NVInst_V4<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> IntRegs:$src5),
>> @@ -1979,7 +3332,8 @@ def STrih_indexed_shl_cdnPt_nv_V4 :
NVInst_V4<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (!Pv) memh(Rs+Ru<<#u2)=Nt.new
>> -let mayStore = 1, AddedComplexity = 10 in
>> +let mayStore = 1, AddedComplexity = 10,
>> + isPredicated = 1 in
>> def STrih_indexed_shl_cNotPt_nv_V4 : NVInst_V4<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> IntRegs:$src5),
>> @@ -1988,7 +3342,8 @@ def STrih_indexed_shl_cNotPt_nv_V4 :
NVInst_V4<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (!Pv.new) memh(Rs+Ru<<#u2)=Nt.new
>> -let mayStore = 1, AddedComplexity = 10 in
>> +let mayStore = 1, AddedComplexity = 10,
>> + isPredicated = 1 in
>> def STrih_indexed_shl_cdnNotPt_nv_V4 : NVInst_V4<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> IntRegs:$src5),
>> @@ -1998,7 +3353,8 @@ def STrih_indexed_shl_cdnNotPt_nv_V4 :
NVInst_V4<(outs),
>>
>> // if ([!]Pv[]) memh(Rx++#s4:1)=Nt.new
>> // if (Pv) memh(Rx++#s4:1)=Nt.new
>> -let mayStore = 1, hasCtrlDep = 1 in
>> +let mayStore = 1, hasCtrlDep = 1,
>> + isPredicated = 1 in
>> def POST_SThri_cPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
s4_1Imm:$offset),
>> "if ($src1) memh($src3++#$offset) = $src2.new",
>> @@ -2006,7 +3362,8 @@ def POST_SThri_cPt_nv_V4 : NVInstPI_V4<(outs
IntRegs:$dst),
>> Requires<[HasV4T]>;
>>
>> // if (Pv.new) memh(Rx++#s4:1)=Nt.new
>> -let mayStore = 1, hasCtrlDep = 1 in
>> +let mayStore = 1, hasCtrlDep = 1,
>> + isPredicated = 1 in
>> def POST_SThri_cdnPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
s4_1Imm:$offset),
>> "if ($src1.new) memh($src3++#$offset) =
$src2.new",
>> @@ -2014,7 +3371,8 @@ def POST_SThri_cdnPt_nv_V4 : NVInstPI_V4<(outs
IntRegs:$dst),
>> Requires<[HasV4T]>;
>>
>> // if (!Pv) memh(Rx++#s4:1)=Nt.new
>> -let mayStore = 1, hasCtrlDep = 1 in
>> +let mayStore = 1, hasCtrlDep = 1,
>> + isPredicated = 1 in
>> def POST_SThri_cNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
s4_1Imm:$offset),
>> "if (!$src1) memh($src3++#$offset) =
$src2.new",
>> @@ -2022,7 +3380,8 @@ def POST_SThri_cNotPt_nv_V4 :
NVInstPI_V4<(outs IntRegs:$dst),
>> Requires<[HasV4T]>;
>>
>> // if (!Pv.new) memh(Rx++#s4:1)=Nt.new
>> -let mayStore = 1, hasCtrlDep = 1 in
>> +let mayStore = 1, hasCtrlDep = 1,
>> + isPredicated = 1 in
>> def POST_SThri_cdnNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
s4_1Imm:$offset),
>> "if (!$src1.new) memh($src3++#$offset) =
$src2.new",
>> @@ -2085,6 +3444,12 @@ def STriw_GP_nv_V4 : NVInst_V4<(outs),
>> []>,
>> Requires<[HasV4T]>;
>>
>> +let mayStore = 1, neverHasSideEffects = 1 in
>> +def STw_GP_nv_V4 : NVInst_V4<(outs),
>> + (ins globaladdress:$global, IntRegs:$src),
>> + "memw(#$global) = $src.new",
>> + []>,
>> + Requires<[HasV4T]>;
>>
>> // Store new-value word conditionally.
>>
>> @@ -2092,7 +3457,8 @@ def STriw_GP_nv_V4 : NVInst_V4<(outs),
>>
>> // if ([!]Pv[.new]) memw(Rs+#u6:2)=Nt.new
>> // if (Pv) memw(Rs+#u6:2)=Nt.new
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STriw_cPt_nv_V4 : NVInst_V4<(outs),
>> (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
>> "if ($src1) memw($addr) = $src2.new",
>> @@ -2100,7 +3466,8 @@ def STriw_cPt_nv_V4 : NVInst_V4<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (Pv.new) memw(Rs+#u6:2)=Nt.new
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STriw_cdnPt_nv_V4 : NVInst_V4<(outs),
>> (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
>> "if ($src1.new) memw($addr) = $src2.new",
>> @@ -2108,7 +3475,8 @@ def STriw_cdnPt_nv_V4 : NVInst_V4<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (!Pv) memw(Rs+#u6:2)=Nt.new
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STriw_cNotPt_nv_V4 : NVInst_V4<(outs),
>> (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
>> "if (!$src1) memw($addr) = $src2.new",
>> @@ -2116,7 +3484,8 @@ def STriw_cNotPt_nv_V4 : NVInst_V4<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (!Pv.new) memw(Rs+#u6:2)=Nt.new
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STriw_cdnNotPt_nv_V4 : NVInst_V4<(outs),
>> (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2),
>> "if (!$src1.new) memw($addr) = $src2.new",
>> @@ -2124,7 +3493,8 @@ def STriw_cdnNotPt_nv_V4 : NVInst_V4<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (Pv) memw(Rs+#u6:2)=Nt.new
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STriw_indexed_cPt_nv_V4 : NVInst_V4<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3,
IntRegs:$src4),
>> "if ($src1) memw($src2+#$src3) = $src4.new",
>> @@ -2132,7 +3502,8 @@ def STriw_indexed_cPt_nv_V4 :
NVInst_V4<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (Pv.new) memw(Rs+#u6:2)=Nt.new
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STriw_indexed_cdnPt_nv_V4 : NVInst_V4<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3,
IntRegs:$src4),
>> "if ($src1.new) memw($src2+#$src3) =
$src4.new",
>> @@ -2140,7 +3511,8 @@ def STriw_indexed_cdnPt_nv_V4 :
NVInst_V4<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (!Pv) memw(Rs+#u6:2)=Nt.new
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STriw_indexed_cNotPt_nv_V4 : NVInst_V4<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3,
IntRegs:$src4),
>> "if (!$src1) memw($src2+#$src3) = $src4.new",
>> @@ -2148,7 +3520,8 @@ def STriw_indexed_cNotPt_nv_V4 :
NVInst_V4<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (!Pv.new) memw(Rs+#u6:2)=Nt.new
>> -let mayStore = 1, neverHasSideEffects = 1 in
>> +let mayStore = 1, neverHasSideEffects = 1,
>> + isPredicated = 1 in
>> def STriw_indexed_cdnNotPt_nv_V4 : NVInst_V4<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3,
IntRegs:$src4),
>> "if (!$src1.new) memw($src2+#$src3) =
$src4.new",
>> @@ -2158,7 +3531,8 @@ def STriw_indexed_cdnNotPt_nv_V4 :
NVInst_V4<(outs),
>>
>> // if ([!]Pv[.new]) memw(Rs+Ru<<#u2)=Nt.new
>> // if (Pv) memw(Rs+Ru<<#u2)=Nt.new
>> -let mayStore = 1, AddedComplexity = 10 in
>> +let mayStore = 1, AddedComplexity = 10,
>> + isPredicated = 1 in
>> def STriw_indexed_shl_cPt_nv_V4 : NVInst_V4<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> IntRegs:$src5),
>> @@ -2167,7 +3541,8 @@ def STriw_indexed_shl_cPt_nv_V4 :
NVInst_V4<(outs),
>> Requires<[HasV4T]>;
>>
>> // if (Pv.new) memw(Rs+Ru<<#u2)=Nt.new
>> -let mayStore = 1, AddedComplexity = 10 in
>> +let mayStore = 1, AddedComplexity = 10,
>> + isPredicated = 1 in
>> def STriw_indexed_shl_cdnPt_nv_V4 : NVInst_V4<(outs),
>> (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> IntRegs:$src5),
>> @@ -2175,57 +3550,256 @@ def STriw_indexed_shl_cdnPt_nv_V4 :
NVInst_V4<(outs),
>> []>,
>> Requires<[HasV4T]>;
>>
>> -// if (!Pv) memw(Rs+Ru<<#u2)=Nt.new
>> -let mayStore = 1, AddedComplexity = 10 in
>> -def STriw_indexed_shl_cNotPt_nv_V4 : NVInst_V4<(outs),
>> - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> - IntRegs:$src5),
>> - "if (!$src1) memw($src2+$src3<<#$src4) =
$src5.new",
>> +// if (!Pv) memw(Rs+Ru<<#u2)=Nt.new
>> +let mayStore = 1, AddedComplexity = 10,
>> + isPredicated = 1 in
>> +def STriw_indexed_shl_cNotPt_nv_V4 : NVInst_V4<(outs),
>> + (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> + IntRegs:$src5),
>> + "if (!$src1) memw($src2+$src3<<#$src4) =
$src5.new",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (!Pv.new) memw(Rs+Ru<<#u2)=Nt.new
>> +let mayStore = 1, AddedComplexity = 10,
>> + isPredicated = 1 in
>> +def STriw_indexed_shl_cdnNotPt_nv_V4 : NVInst_V4<(outs),
>> + (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> + IntRegs:$src5),
>> + "if (!$src1.new) memw($src2+$src3<<#$src4) =
$src5.new",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if ([!]Pv[.new]) memw(Rx++#s4:2)=Nt.new
>> +// if (Pv) memw(Rx++#s4:2)=Nt.new
>> +let mayStore = 1, hasCtrlDep = 1,
>> + isPredicated = 1 in
>> +def POST_STwri_cPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
s4_2Imm:$offset),
>> + "if ($src1) memw($src3++#$offset) = $src2.new",
>> + [],"$src3 = $dst">,
>> + Requires<[HasV4T]>;
>> +
>> +// if (Pv.new) memw(Rx++#s4:2)=Nt.new
>> +let mayStore = 1, hasCtrlDep = 1,
>> + isPredicated = 1 in
>> +def POST_STwri_cdnPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
s4_2Imm:$offset),
>> + "if ($src1.new) memw($src3++#$offset) =
$src2.new",
>> + [],"$src3 = $dst">,
>> + Requires<[HasV4T]>;
>> +
>> +// if (!Pv) memw(Rx++#s4:2)=Nt.new
>> +let mayStore = 1, hasCtrlDep = 1,
>> + isPredicated = 1 in
>> +def POST_STwri_cNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
s4_2Imm:$offset),
>> + "if (!$src1) memw($src3++#$offset) = $src2.new",
>> + [],"$src3 = $dst">,
>> + Requires<[HasV4T]>;
>> +
>> +// if (!Pv.new) memw(Rx++#s4:2)=Nt.new
>> +let mayStore = 1, hasCtrlDep = 1,
>> + isPredicated = 1 in
>> +def POST_STwri_cdnNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
s4_2Imm:$offset),
>> + "if (!$src1.new) memw($src3++#$offset) =
$src2.new",
>> + [],"$src3 = $dst">,
>> + Requires<[HasV4T]>;
>> +
>> +
>> +
>> +// if (Pv) memb(##global) = Rt
>> +let mayStore = 1, neverHasSideEffects = 1 in
>> +def STb_GP_cPt_nv_V4 : NVInst_V4<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
IntRegs:$src2),
>> + "if ($src1) memb(##$global) = $src2.new",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (!Pv) memb(##global) = Rt
>> +let mayStore = 1, neverHasSideEffects = 1 in
>> +def STb_GP_cNotPt_nv_V4 : NVInst_V4<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
IntRegs:$src2),
>> + "if (!$src1) memb(##$global) = $src2.new",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (Pv) memb(##global) = Rt
>> +let mayStore = 1, neverHasSideEffects = 1 in
>> +def STb_GP_cdnPt_nv_V4 : NVInst_V4<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
IntRegs:$src2),
>> + "if ($src1.new) memb(##$global) = $src2.new",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (!Pv) memb(##global) = Rt
>> +let mayStore = 1, neverHasSideEffects = 1 in
>> +def STb_GP_cdnNotPt_nv_V4 : NVInst_V4<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
IntRegs:$src2),
>> + "if (!$src1.new) memb(##$global) = $src2.new",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (Pv) memh(##global) = Rt
>> +let mayStore = 1, neverHasSideEffects = 1 in
>> +def STh_GP_cPt_nv_V4 : NVInst_V4<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
IntRegs:$src2),
>> + "if ($src1) memh(##$global) = $src2.new",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (!Pv) memh(##global) = Rt
>> +let mayStore = 1, neverHasSideEffects = 1 in
>> +def STh_GP_cNotPt_nv_V4 : NVInst_V4<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
IntRegs:$src2),
>> + "if (!$src1) memh(##$global) = $src2.new",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (Pv) memh(##global) = Rt
>> +let mayStore = 1, neverHasSideEffects = 1 in
>> +def STh_GP_cdnPt_nv_V4 : NVInst_V4<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
IntRegs:$src2),
>> + "if ($src1.new) memh(##$global) = $src2.new",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (!Pv) memh(##global) = Rt
>> +let mayStore = 1, neverHasSideEffects = 1 in
>> +def STh_GP_cdnNotPt_nv_V4 : NVInst_V4<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
IntRegs:$src2),
>> + "if (!$src1.new) memh(##$global) = $src2.new",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (Pv) memw(##global) = Rt
>> +let mayStore = 1, neverHasSideEffects = 1 in
>> +def STw_GP_cPt_nv_V4 : NVInst_V4<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
IntRegs:$src2),
>> + "if ($src1) memw(##$global) = $src2.new",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (!Pv) memw(##global) = Rt
>> +let mayStore = 1, neverHasSideEffects = 1 in
>> +def STw_GP_cNotPt_nv_V4 : NVInst_V4<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
IntRegs:$src2),
>> + "if (!$src1) memw(##$global) = $src2.new",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (Pv) memw(##global) = Rt
>> +let mayStore = 1, neverHasSideEffects = 1 in
>> +def STw_GP_cdnPt_nv_V4 : NVInst_V4<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
IntRegs:$src2),
>> + "if ($src1.new) memw(##$global) = $src2.new",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +// if (!Pv) memw(##global) = Rt
>> +let mayStore = 1, neverHasSideEffects = 1 in
>> +def STw_GP_cdnNotPt_nv_V4 : NVInst_V4<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
IntRegs:$src2),
>> + "if (!$src1.new) memw(##$global) = $src2.new",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let mayStore = 1, neverHasSideEffects = 1 in
>> +def STrib_GP_cPt_nv_V4 : NVInst_V4<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset,
>> +
IntRegs:$src2),
>> + "if ($src1) memb(##$global+$offset) =
$src2.new",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let mayStore = 1, neverHasSideEffects = 1 in
>> +def STrib_GP_cNotPt_nv_V4 : NVInst_V4<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset,
>> +
IntRegs:$src2),
>> + "if (!$src1) memb(##$global+$offset) =
$src2.new",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let mayStore = 1, neverHasSideEffects = 1 in
>> +def STrib_GP_cdnPt_nv_V4 : NVInst_V4<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset,
>> +
IntRegs:$src2),
>> + "if ($src1.new) memb(##$global+$offset) =
$src2.new",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let mayStore = 1, neverHasSideEffects = 1 in
>> +def STrib_GP_cdnNotPt_nv_V4 : NVInst_V4<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset,
>> +
IntRegs:$src2),
>> + "if (!$src1.new) memb(##$global+$offset) =
$src2.new",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let mayStore = 1, neverHasSideEffects = 1 in
>> +def STrih_GP_cPt_nv_V4 : NVInst_V4<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset,
>> +
IntRegs:$src2),
>> + "if ($src1) memh(##$global+$offset) =
$src2.new",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let mayStore = 1, neverHasSideEffects = 1 in
>> +def STrih_GP_cNotPt_nv_V4 : NVInst_V4<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset,
>> +
IntRegs:$src2),
>> + "if (!$src1) memh(##$global+$offset) =
$src2.new",
>> []>,
>> Requires<[HasV4T]>;
>>
>> -// if (!Pv.new) memw(Rs+Ru<<#u2)=Nt.new
>> -let mayStore = 1, AddedComplexity = 10 in
>> -def STriw_indexed_shl_cdnNotPt_nv_V4 : NVInst_V4<(outs),
>> - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
u2Imm:$src4,
>> - IntRegs:$src5),
>> - "if (!$src1.new) memw($src2+$src3<<#$src4) =
$src5.new",
>> +let mayStore = 1, neverHasSideEffects = 1 in
>> +def STrih_GP_cdnPt_nv_V4 : NVInst_V4<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset,
>> +
IntRegs:$src2),
>> + "if ($src1.new) memh(##$global+$offset) =
$src2.new",
>> []>,
>> Requires<[HasV4T]>;
>>
>> -// if ([!]Pv[.new]) memw(Rx++#s4:2)=Nt.new
>> -// if (Pv) memw(Rx++#s4:2)=Nt.new
>> -let mayStore = 1, hasCtrlDep = 1 in
>> -def POST_STwri_cPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
>> - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
s4_2Imm:$offset),
>> - "if ($src1) memw($src3++#$offset) = $src2.new",
>> - [],"$src3 = $dst">,
>> +let mayStore = 1, neverHasSideEffects = 1 in
>> +def STrih_GP_cdnNotPt_nv_V4 : NVInst_V4<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset,
>> +
IntRegs:$src2),
>> + "if (!$src1.new) memh(##$global+$offset) =
$src2.new",
>> + []>,
>> Requires<[HasV4T]>;
>>
>> -// if (Pv.new) memw(Rx++#s4:2)=Nt.new
>> -let mayStore = 1, hasCtrlDep = 1 in
>> -def POST_STwri_cdnPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
>> - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
s4_2Imm:$offset),
>> - "if ($src1.new) memw($src3++#$offset) =
$src2.new",
>> - [],"$src3 = $dst">,
>> +let mayStore = 1, neverHasSideEffects = 1 in
>> +def STriw_GP_cPt_nv_V4 : NVInst_V4<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset,
>> +
IntRegs:$src2),
>> + "if ($src1) memw(##$global+$offset) =
$src2.new",
>> + []>,
>> Requires<[HasV4T]>;
>>
>> -// if (!Pv) memw(Rx++#s4:2)=Nt.new
>> -let mayStore = 1, hasCtrlDep = 1 in
>> -def POST_STwri_cNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
>> - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
s4_2Imm:$offset),
>> - "if (!$src1) memw($src3++#$offset) = $src2.new",
>> - [],"$src3 = $dst">,
>> +let mayStore = 1, neverHasSideEffects = 1 in
>> +def STriw_GP_cNotPt_nv_V4 : NVInst_V4<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset,
>> +
IntRegs:$src2),
>> + "if (!$src1) memw(##$global+$offset) =
$src2.new",
>> + []>,
>> Requires<[HasV4T]>;
>>
>> -// if (!Pv.new) memw(Rx++#s4:2)=Nt.new
>> -let mayStore = 1, hasCtrlDep = 1 in
>> -def POST_STwri_cdnNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst),
>> - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3,
s4_2Imm:$offset),
>> - "if (!$src1.new) memw($src3++#$offset) =
$src2.new",
>> - [],"$src3 = $dst">,
>> +let mayStore = 1, neverHasSideEffects = 1 in
>> +def STriw_GP_cdnPt_nv_V4 : NVInst_V4<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset,
>> +
IntRegs:$src2),
>> + "if ($src1.new) memw(##$global+$offset) =
$src2.new",
>> + []>,
>> Requires<[HasV4T]>;
>>
>> +let mayStore = 1, neverHasSideEffects = 1 in
>> +def STriw_GP_cdnNotPt_nv_V4 : NVInst_V4<(outs),
>> + (ins PredRegs:$src1, globaladdress:$global,
u16Imm:$offset,
>> +
IntRegs:$src2),
>> + "if (!$src1.new) memw(##$global+$offset) =
$src2.new",
>> + []>,
>> + Requires<[HasV4T]>;
>>
>>
//===----------------------------------------------------------------------===//
>> // NV/ST -
>> @@ -2416,16 +3990,18 @@ let isBranch = 1, isTerminator=1,
neverHasSideEffects = 1, Defs = [PC] in {
>> def ADDr_ADDri_V4 : MInst<(outs IntRegs:$dst),
>> (ins IntRegs:$src1, IntRegs:$src2, s6Imm:$src3),
>> "$dst = add($src1, add($src2, #$src3))",
>> - [(set IntRegs:$dst,
>> - (add IntRegs:$src1, (add IntRegs:$src2,
s6ImmPred:$src3)))]>,
>> + [(set (i32 IntRegs:$dst),
>> + (add (i32 IntRegs:$src1), (add (i32 IntRegs:$src2),
>> + s6ImmPred:$src3)))]>,
>> Requires<[HasV4T]>;
>>
>> // Rd=add(Rs,sub(#s6,Ru))
>> def ADDr_SUBri_V4 : MInst<(outs IntRegs:$dst),
>> (ins IntRegs:$src1, s6Imm:$src2, IntRegs:$src3),
>> "$dst = add($src1, sub(#$src2, $src3))",
>> - [(set IntRegs:$dst,
>> - (add IntRegs:$src1, (sub s6ImmPred:$src2,
IntRegs:$src3)))]>,
>> + [(set (i32 IntRegs:$dst),
>> + (add (i32 IntRegs:$src1), (sub s6ImmPred:$src2,
>> + (i32 IntRegs:$src3))))]>,
>> Requires<[HasV4T]>;
>>
>> // Generates the same instruction as ADDr_SUBri_V4 but matches
different
>> @@ -2434,8 +4010,9 @@ def ADDr_SUBri_V4 : MInst<(outs IntRegs:$dst),
>> def ADDri_SUBr_V4 : MInst<(outs IntRegs:$dst),
>> (ins IntRegs:$src1, s6Imm:$src2, IntRegs:$src3),
>> "$dst = add($src1, sub(#$src2, $src3))",
>> - [(set IntRegs:$dst,
>> - (sub (add IntRegs:$src1, s6ImmPred:$src2),
IntRegs:$src3))]>,
>> + [(set (i32 IntRegs:$dst),
>> + (sub (add (i32 IntRegs:$src1), s6ImmPred:$src2),
>> + (i32 IntRegs:$src3)))]>,
>> Requires<[HasV4T]>;
>>
>>
>> @@ -2451,16 +4028,16 @@ def ADDri_SUBr_V4 : MInst<(outs
IntRegs:$dst),
>> def ANDd_NOTd_V4 : MInst<(outs DoubleRegs:$dst),
>> (ins DoubleRegs:$src1, DoubleRegs:$src2),
>> "$dst = and($src1, ~$src2)",
>> - [(set DoubleRegs:$dst, (and DoubleRegs:$src1,
>> - (not DoubleRegs:$src2)))]>,
>> + [(set (i64 DoubleRegs:$dst), (and (i64 DoubleRegs:$src1),
>> + (not (i64
DoubleRegs:$src2))))]>,
>> Requires<[HasV4T]>;
>>
>> // Rdd=or(Rtt,~Rss)
>> def ORd_NOTd_V4 : MInst<(outs DoubleRegs:$dst),
>> (ins DoubleRegs:$src1, DoubleRegs:$src2),
>> "$dst = or($src1, ~$src2)",
>> - [(set DoubleRegs:$dst,
>> - (or DoubleRegs:$src1, (not DoubleRegs:$src2)))]>,
>> + [(set (i64 DoubleRegs:$dst),
>> + (or (i64 DoubleRegs:$src1), (not (i64
DoubleRegs:$src2))))]>,
>> Requires<[HasV4T]>;
>>
>>
>> @@ -2469,8 +4046,9 @@ def ORd_NOTd_V4 : MInst<(outs
DoubleRegs:$dst),
>> def XORd_XORdd: MInst_acc<(outs DoubleRegs:$dst),
>> (ins DoubleRegs:$src1, DoubleRegs:$src2, DoubleRegs:$src3),
>> "$dst ^= xor($src2, $src3)",
>> - [(set DoubleRegs:$dst,
>> - (xor DoubleRegs:$src1, (xor DoubleRegs:$src2,
DoubleRegs:$src3)))],
>> + [(set (i64 DoubleRegs:$dst),
>> + (xor (i64 DoubleRegs:$src1), (xor (i64 DoubleRegs:$src2),
>> + (i64
DoubleRegs:$src3))))],
>> "$src1 = $dst">,
>> Requires<[HasV4T]>;
>>
>> @@ -2480,8 +4058,9 @@ def XORd_XORdd: MInst_acc<(outs
DoubleRegs:$dst),
>> def ORr_ANDri_V4 : MInst_acc<(outs IntRegs:$dst),
>> (ins IntRegs:$src1, IntRegs: $src2, s10Imm:$src3),
>> "$dst = or($src1, and($src2, #$src3))",
>> - [(set IntRegs:$dst,
>> - (or IntRegs:$src1, (and IntRegs:$src2,
s10ImmPred:$src3)))],
>> + [(set (i32 IntRegs:$dst),
>> + (or (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
>> + s10ImmPred:$src3)))],
>> "$src2 = $dst">,
>> Requires<[HasV4T]>;
>>
>> @@ -2490,8 +4069,9 @@ def ORr_ANDri_V4 : MInst_acc<(outs
IntRegs:$dst),
>> def ANDr_ANDrr_V4 : MInst_acc<(outs IntRegs:$dst),
>> (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
>> "$dst&= and($src2, $src3)",
>> - [(set IntRegs:$dst,
>> - (and IntRegs:$src1, (and IntRegs:$src2,
IntRegs:$src3)))],
>> + [(set (i32 IntRegs:$dst),
>> + (and (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
>> + (i32
IntRegs:$src3))))],
>> "$src1 = $dst">,
>> Requires<[HasV4T]>;
>>
>> @@ -2499,8 +4079,9 @@ def ANDr_ANDrr_V4 : MInst_acc<(outs
IntRegs:$dst),
>> def ORr_ANDrr_V4 : MInst_acc<(outs IntRegs:$dst),
>> (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
>> "$dst |= and($src2, $src3)",
>> - [(set IntRegs:$dst,
>> - (or IntRegs:$src1, (and IntRegs:$src2, IntRegs:$src3)))],
>> + [(set (i32 IntRegs:$dst),
>> + (or (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
>> + (i32
IntRegs:$src3))))],
>> "$src1 = $dst">,
>> Requires<[HasV4T]>;
>>
>> @@ -2508,8 +4089,9 @@ def ORr_ANDrr_V4 : MInst_acc<(outs
IntRegs:$dst),
>> def XORr_ANDrr_V4 : MInst_acc<(outs IntRegs:$dst),
>> (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
>> "$dst ^= and($src2, $src3)",
>> - [(set IntRegs:$dst,
>> - (xor IntRegs:$src1, (and IntRegs:$src2,
IntRegs:$src3)))],
>> + [(set (i32 IntRegs:$dst),
>> + (xor (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
>> + (i32 IntRegs:$src3))))],
>> "$src1 = $dst">,
>> Requires<[HasV4T]>;
>>
>> @@ -2518,8 +4100,9 @@ def XORr_ANDrr_V4 : MInst_acc<(outs
IntRegs:$dst),
>> def ANDr_ANDr_NOTr_V4 : MInst_acc<(outs IntRegs:$dst),
>> (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
>> "$dst&= and($src2, ~$src3)",
>> - [(set IntRegs:$dst,
>> - (and IntRegs:$src1, (and IntRegs:$src2, (not
IntRegs:$src3))))],
>> + [(set (i32 IntRegs:$dst),
>> + (and (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
>> + (not (i32
IntRegs:$src3)))))],
>> "$src1 = $dst">,
>> Requires<[HasV4T]>;
>>
>> @@ -2527,8 +4110,9 @@ def ANDr_ANDr_NOTr_V4 : MInst_acc<(outs
IntRegs:$dst),
>> def ORr_ANDr_NOTr_V4 : MInst_acc<(outs IntRegs:$dst),
>> (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
>> "$dst |= and($src2, ~$src3)",
>> - [(set IntRegs:$dst,
>> - (or IntRegs:$src1, (and IntRegs:$src2, (not
IntRegs:$src3))))],
>> + [(set (i32 IntRegs:$dst),
>> + (or (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
>> + (not (i32
IntRegs:$src3)))))],
>> "$src1 = $dst">,
>> Requires<[HasV4T]>;
>>
>> @@ -2536,8 +4120,9 @@ def ORr_ANDr_NOTr_V4 : MInst_acc<(outs
IntRegs:$dst),
>> def XORr_ANDr_NOTr_V4 : MInst_acc<(outs IntRegs:$dst),
>> (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
>> "$dst ^= and($src2, ~$src3)",
>> - [(set IntRegs:$dst,
>> - (xor IntRegs:$src1, (and IntRegs:$src2, (not
IntRegs:$src3))))],
>> + [(set (i32 IntRegs:$dst),
>> + (xor (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
>> + (not (i32
IntRegs:$src3)))))],
>> "$src1 = $dst">,
>> Requires<[HasV4T]>;
>>
>> @@ -2546,8 +4131,9 @@ def XORr_ANDr_NOTr_V4 : MInst_acc<(outs
IntRegs:$dst),
>> def ANDr_ORrr_V4 : MInst_acc<(outs IntRegs:$dst),
>> (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
>> "$dst&= or($src2, $src3)",
>> - [(set IntRegs:$dst,
>> - (and IntRegs:$src1, (or IntRegs:$src2, IntRegs:$src3)))],
>> + [(set (i32 IntRegs:$dst),
>> + (and (i32 IntRegs:$src1), (or (i32 IntRegs:$src2),
>> + (i32
IntRegs:$src3))))],
>> "$src1 = $dst">,
>> Requires<[HasV4T]>;
>>
>> @@ -2555,8 +4141,9 @@ def ANDr_ORrr_V4 : MInst_acc<(outs
IntRegs:$dst),
>> def ORr_ORrr_V4 : MInst_acc<(outs IntRegs:$dst),
>> (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
>> "$dst |= or($src2, $src3)",
>> - [(set IntRegs:$dst,
>> - (or IntRegs:$src1, (or IntRegs:$src2, IntRegs:$src3)))],
>> + [(set (i32 IntRegs:$dst),
>> + (or (i32 IntRegs:$src1), (or (i32 IntRegs:$src2),
>> + (i32
IntRegs:$src3))))],
>> "$src1 = $dst">,
>> Requires<[HasV4T]>;
>>
>> @@ -2564,8 +4151,9 @@ def ORr_ORrr_V4 : MInst_acc<(outs
IntRegs:$dst),
>> def XORr_ORrr_V4 : MInst_acc<(outs IntRegs:$dst),
>> (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
>> "$dst ^= or($src2, $src3)",
>> - [(set IntRegs:$dst,
>> - (xor IntRegs:$src1, (or IntRegs:$src2, IntRegs:$src3)))],
>> + [(set (i32 IntRegs:$dst),
>> + (xor (i32 IntRegs:$src1), (or (i32 IntRegs:$src2),
>> + (i32 IntRegs:$src3))))],
>> "$src1 = $dst">,
>> Requires<[HasV4T]>;
>>
>> @@ -2574,8 +4162,9 @@ def XORr_ORrr_V4 : MInst_acc<(outs
IntRegs:$dst),
>> def ANDr_XORrr_V4 : MInst_acc<(outs IntRegs:$dst),
>> (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
>> "$dst&= xor($src2, $src3)",
>> - [(set IntRegs:$dst,
>> - (and IntRegs:$src1, (xor IntRegs:$src2,
IntRegs:$src3)))],
>> + [(set (i32 IntRegs:$dst),
>> + (and (i32 IntRegs:$src1), (xor (i32 IntRegs:$src2),
>> + (i32
IntRegs:$src3))))],
>> "$src1 = $dst">,
>> Requires<[HasV4T]>;
>>
>> @@ -2583,8 +4172,9 @@ def ANDr_XORrr_V4 : MInst_acc<(outs
IntRegs:$dst),
>> def ORr_XORrr_V4 : MInst_acc<(outs IntRegs:$dst),
>> (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
>> "$dst |= xor($src2, $src3)",
>> - [(set IntRegs:$dst,
>> - (and IntRegs:$src1, (xor IntRegs:$src2,
IntRegs:$src3)))],
>> + [(set (i32 IntRegs:$dst),
>> + (and (i32 IntRegs:$src1), (xor (i32 IntRegs:$src2),
>> + (i32
IntRegs:$src3))))],
>> "$src1 = $dst">,
>> Requires<[HasV4T]>;
>>
>> @@ -2592,8 +4182,9 @@ def ORr_XORrr_V4 : MInst_acc<(outs
IntRegs:$dst),
>> def XORr_XORrr_V4 : MInst_acc<(outs IntRegs:$dst),
>> (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3),
>> "$dst ^= xor($src2, $src3)",
>> - [(set IntRegs:$dst,
>> - (and IntRegs:$src1, (xor IntRegs:$src2,
IntRegs:$src3)))],
>> + [(set (i32 IntRegs:$dst),
>> + (and (i32 IntRegs:$src1), (xor (i32 IntRegs:$src2),
>> + (i32 IntRegs:$src3))))],
>> "$src1 = $dst">,
>> Requires<[HasV4T]>;
>>
>> @@ -2601,8 +4192,9 @@ def XORr_XORrr_V4 : MInst_acc<(outs
IntRegs:$dst),
>> def ORr_ANDri2_V4 : MInst_acc<(outs IntRegs:$dst),
>> (ins IntRegs:$src1, IntRegs: $src2, s10Imm:$src3),
>> "$dst |= and($src2, #$src3)",
>> - [(set IntRegs:$dst,
>> - (or IntRegs:$src1, (and IntRegs:$src2,
s10ImmPred:$src3)))],
>> + [(set (i32 IntRegs:$dst),
>> + (or (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
>> + s10ImmPred:$src3)))],
>> "$src1 = $dst">,
>> Requires<[HasV4T]>;
>>
>> @@ -2610,8 +4202,9 @@ def ORr_ANDri2_V4 : MInst_acc<(outs
IntRegs:$dst),
>> def ORr_ORri_V4 : MInst_acc<(outs IntRegs:$dst),
>> (ins IntRegs:$src1, IntRegs: $src2, s10Imm:$src3),
>> "$dst |= or($src2, #$src3)",
>> - [(set IntRegs:$dst,
>> - (or IntRegs:$src1, (and IntRegs:$src2,
s10ImmPred:$src3)))],
>> + [(set (i32 IntRegs:$dst),
>> + (or (i32 IntRegs:$src1), (and (i32 IntRegs:$src2),
>> + s10ImmPred:$src3)))],
>> "$src1 = $dst">,
>> Requires<[HasV4T]>;
>>
>> @@ -2663,8 +4256,9 @@ def ORr_ORri_V4 : MInst_acc<(outs
IntRegs:$dst),
>> def ADDi_MPYri_V4 : MInst<(outs IntRegs:$dst),
>> (ins u6Imm:$src1, IntRegs:$src2, u6Imm:$src3),
>> "$dst = add(#$src1, mpyi($src2, #$src3))",
>> - [(set IntRegs:$dst,
>> - (add (mul IntRegs:$src2, u6ImmPred:$src3),
u6ImmPred:$src1))]>,
>> + [(set (i32 IntRegs:$dst),
>> + (add (mul (i32 IntRegs:$src2), u6ImmPred:$src3),
>> + u6ImmPred:$src1))]>,
>> Requires<[HasV4T]>;
>>
>> // Rd=add(#u6,mpyi(Rs,Rt))
>> @@ -2672,32 +4266,36 @@ def ADDi_MPYri_V4 : MInst<(outs
IntRegs:$dst),
>> def ADDi_MPYrr_V4 : MInst<(outs IntRegs:$dst),
>> (ins u6Imm:$src1, IntRegs:$src2, IntRegs:$src3),
>> "$dst = add(#$src1, mpyi($src2, $src3))",
>> - [(set IntRegs:$dst,
>> - (add (mul IntRegs:$src2, IntRegs:$src3),
u6ImmPred:$src1))]>,
>> + [(set (i32 IntRegs:$dst),
>> + (add (mul (i32 IntRegs:$src2), (i32 IntRegs:$src3)),
>> + u6ImmPred:$src1))]>,
>> Requires<[HasV4T]>;
>>
>> // Rd=add(Ru,mpyi(#u6:2,Rs))
>> def ADDr_MPYir_V4 : MInst<(outs IntRegs:$dst),
>> (ins IntRegs:$src1, u6Imm:$src2, IntRegs:$src3),
>> "$dst = add($src1, mpyi(#$src2, $src3))",
>> - [(set IntRegs:$dst,
>> - (add IntRegs:$src1, (mul IntRegs:$src3,
u6_2ImmPred:$src2)))]>,
>> + [(set (i32 IntRegs:$dst),
>> + (add (i32 IntRegs:$src1), (mul (i32 IntRegs:$src3),
>> + u6_2ImmPred:$src2)))]>,
>> Requires<[HasV4T]>;
>>
>> // Rd=add(Ru,mpyi(Rs,#u6))
>> def ADDr_MPYri_V4 : MInst<(outs IntRegs:$dst),
>> (ins IntRegs:$src1, IntRegs:$src2, u6Imm:$src3),
>> "$dst = add($src1, mpyi($src2, #$src3))",
>> - [(set IntRegs:$dst,
>> - (add IntRegs:$src1, (mul IntRegs:$src2,
u6ImmPred:$src3)))]>,
>> + [(set (i32 IntRegs:$dst),
>> + (add (i32 IntRegs:$src1), (mul (i32 IntRegs:$src2),
>> +
u6ImmPred:$src3)))]>,
>> Requires<[HasV4T]>;
>>
>> // Rx=add(Ru,mpyi(Rx,Rs))
>> def ADDr_MPYrr_V4 : MInst_acc<(outs IntRegs:$dst),
>> (ins IntRegs:$src1, IntRegs:$src2, IntRegs:$src3),
>> "$dst = add($src1, mpyi($src2, $src3))",
>> - [(set IntRegs:$dst,
>> - (add IntRegs:$src1, (mul IntRegs:$src2,
IntRegs:$src3)))],
>> + [(set (i32 IntRegs:$dst),
>> + (add (i32 IntRegs:$src1), (mul (i32 IntRegs:$src2),
>> + (i32 IntRegs:$src3))))],
>> "$src2 = $dst">,
>> Requires<[HasV4T]>;
>>
>> @@ -2745,8 +4343,9 @@ def ADDr_MPYrr_V4 : MInst_acc<(outs
IntRegs:$dst),
>> def ADDi_ASLri_V4 : MInst_acc<(outs IntRegs:$dst),
>> (ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3),
>> "$dst = add(#$src1, asl($src2, #$src3))",
>> - [(set IntRegs:$dst,
>> - (add (shl IntRegs:$src2, u5ImmPred:$src3),
u8ImmPred:$src1))],
>> + [(set (i32 IntRegs:$dst),
>> + (add (shl (i32 IntRegs:$src2), u5ImmPred:$src3),
>> + u8ImmPred:$src1))],
>> "$src2 = $dst">,
>> Requires<[HasV4T]>;
>>
>> @@ -2754,8 +4353,9 @@ def ADDi_ASLri_V4 : MInst_acc<(outs
IntRegs:$dst),
>> def ADDi_LSRri_V4 : MInst_acc<(outs IntRegs:$dst),
>> (ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3),
>> "$dst = add(#$src1, lsr($src2, #$src3))",
>> - [(set IntRegs:$dst,
>> - (add (srl IntRegs:$src2, u5ImmPred:$src3),
u8ImmPred:$src1))],
>> + [(set (i32 IntRegs:$dst),
>> + (add (srl (i32 IntRegs:$src2), u5ImmPred:$src3),
>> + u8ImmPred:$src1))],
>> "$src2 = $dst">,
>> Requires<[HasV4T]>;
>>
>> @@ -2763,8 +4363,9 @@ def ADDi_LSRri_V4 : MInst_acc<(outs
IntRegs:$dst),
>> def SUBi_ASLri_V4 : MInst_acc<(outs IntRegs:$dst),
>> (ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3),
>> "$dst = sub(#$src1, asl($src2, #$src3))",
>> - [(set IntRegs:$dst,
>> - (sub (shl IntRegs:$src2, u5ImmPred:$src3),
u8ImmPred:$src1))],
>> + [(set (i32 IntRegs:$dst),
>> + (sub (shl (i32 IntRegs:$src2), u5ImmPred:$src3),
>> + u8ImmPred:$src1))],
>> "$src2 = $dst">,
>> Requires<[HasV4T]>;
>>
>> @@ -2772,8 +4373,9 @@ def SUBi_ASLri_V4 : MInst_acc<(outs
IntRegs:$dst),
>> def SUBi_LSRri_V4 : MInst_acc<(outs IntRegs:$dst),
>> (ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3),
>> "$dst = sub(#$src1, lsr($src2, #$src3))",
>> - [(set IntRegs:$dst,
>> - (sub (srl IntRegs:$src2, u5ImmPred:$src3),
u8ImmPred:$src1))],
>> + [(set (i32 IntRegs:$dst),
>> + (sub (srl (i32 IntRegs:$src2), u5ImmPred:$src3),
>> + u8ImmPred:$src1))],
>> "$src2 = $dst">,
>> Requires<[HasV4T]>;
>>
>> @@ -2783,8 +4385,9 @@ def SUBi_LSRri_V4 : MInst_acc<(outs
IntRegs:$dst),
>> def ANDi_ASLri_V4 : MInst_acc<(outs IntRegs:$dst),
>> (ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3),
>> "$dst = and(#$src1, asl($src2, #$src3))",
>> - [(set IntRegs:$dst,
>> - (and (shl IntRegs:$src2, u5ImmPred:$src3),
u8ImmPred:$src1))],
>> + [(set (i32 IntRegs:$dst),
>> + (and (shl (i32 IntRegs:$src2), u5ImmPred:$src3),
>> + u8ImmPred:$src1))],
>> "$src2 = $dst">,
>> Requires<[HasV4T]>;
>>
>> @@ -2792,26 +4395,31 @@ def ANDi_ASLri_V4 : MInst_acc<(outs
IntRegs:$dst),
>> def ANDi_LSRri_V4 : MInst_acc<(outs IntRegs:$dst),
>> (ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3),
>> "$dst = and(#$src1, lsr($src2, #$src3))",
>> - [(set IntRegs:$dst,
>> - (and (srl IntRegs:$src2, u5ImmPred:$src3),
u8ImmPred:$src1))],
>> + [(set (i32 IntRegs:$dst),
>> + (and (srl (i32 IntRegs:$src2), u5ImmPred:$src3),
>> + u8ImmPred:$src1))],
>> "$src2 = $dst">,
>> Requires<[HasV4T]>;
>>
>> //Rx=or(#u8,asl(Rx,#U5))
>> +let AddedComplexity = 30 in
>> def ORi_ASLri_V4 : MInst_acc<(outs IntRegs:$dst),
>> (ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3),
>> "$dst = or(#$src1, asl($src2, #$src3))",
>> - [(set IntRegs:$dst,
>> - (or (shl IntRegs:$src2, u5ImmPred:$src3),
u8ImmPred:$src1))],
>> + [(set (i32 IntRegs:$dst),
>> + (or (shl (i32 IntRegs:$src2), u5ImmPred:$src3),
>> + u8ImmPred:$src1))],
>> "$src2 = $dst">,
>> Requires<[HasV4T]>;
>>
>> //Rx=or(#u8,lsr(Rx,#U5))
>> +let AddedComplexity = 30 in
>> def ORi_LSRri_V4 : MInst_acc<(outs IntRegs:$dst),
>> (ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3),
>> "$dst = or(#$src1, lsr($src2, #$src3))",
>> - [(set IntRegs:$dst,
>> - (or (srl IntRegs:$src2, u5ImmPred:$src3),
u8ImmPred:$src1))],
>> + [(set (i32 IntRegs:$dst),
>> + (or (srl (i32 IntRegs:$src2), u5ImmPred:$src3),
>> + u8ImmPred:$src1))],
>> "$src2 = $dst">,
>> Requires<[HasV4T]>;
>>
>> @@ -2820,7 +4428,8 @@ def ORi_LSRri_V4 : MInst_acc<(outs
IntRegs:$dst),
>> //Rd=lsl(#s6,Rt)
>> def LSLi_V4 : MInst<(outs IntRegs:$dst), (ins s6Imm:$src1,
IntRegs:$src2),
>> "$dst = lsl(#$src1, $src2)",
>> - [(set IntRegs:$dst, (shl s6ImmPred:$src1,
IntRegs:$src2))]>,
>> + [(set (i32 IntRegs:$dst), (shl s6ImmPred:$src1,
>> + (i32 IntRegs:$src2)))]>,
>> Requires<[HasV4T]>;
>>
>>
>> @@ -2829,8 +4438,9 @@ def LSLi_V4 : MInst<(outs IntRegs:$dst), (ins
s6Imm:$src1, IntRegs:$src2),
>> def ASLd_rr_xor_V4 : MInst_acc<(outs DoubleRegs:$dst),
>> (ins DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3),
>> "$dst ^= asl($src2, $src3)",
>> - [(set DoubleRegs:$dst,
>> - (xor DoubleRegs:$src1, (shl DoubleRegs:$src2,
IntRegs:$src3)))],
>> + [(set (i64 DoubleRegs:$dst),
>> + (xor (i64 DoubleRegs:$src1), (shl (i64
DoubleRegs:$src2),
>> + (i32
IntRegs:$src3))))],
>> "$src1 = $dst">,
>> Requires<[HasV4T]>;
>>
>> @@ -2838,8 +4448,9 @@ def ASLd_rr_xor_V4 : MInst_acc<(outs
DoubleRegs:$dst),
>> def ASRd_rr_xor_V4 : MInst_acc<(outs DoubleRegs:$dst),
>> (ins DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3),
>> "$dst ^= asr($src2, $src3)",
>> - [(set DoubleRegs:$dst,
>> - (xor DoubleRegs:$src1, (sra DoubleRegs:$src2,
IntRegs:$src3)))],
>> + [(set (i64 DoubleRegs:$dst),
>> + (xor (i64 DoubleRegs:$src1), (sra (i64
DoubleRegs:$src2),
>> + (i32
IntRegs:$src3))))],
>> "$src1 = $dst">,
>> Requires<[HasV4T]>;
>>
>> @@ -2847,8 +4458,9 @@ def ASRd_rr_xor_V4 : MInst_acc<(outs
DoubleRegs:$dst),
>> def LSLd_rr_xor_V4 : MInst_acc<(outs DoubleRegs:$dst),
>> (ins DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3),
>> "$dst ^= lsl($src2, $src3)",
>> - [(set DoubleRegs:$dst,
>> - (xor DoubleRegs:$src1, (shl DoubleRegs:$src2,
IntRegs:$src3)))],
>> + [(set (i64 DoubleRegs:$dst), (xor (i64 DoubleRegs:$src1),
>> + (shl (i64
DoubleRegs:$src2),
>> + (i32
IntRegs:$src3))))],
>> "$src1 = $dst">,
>> Requires<[HasV4T]>;
>>
>> @@ -2856,8 +4468,9 @@ def LSLd_rr_xor_V4 : MInst_acc<(outs
DoubleRegs:$dst),
>> def LSRd_rr_xor_V4 : MInst_acc<(outs DoubleRegs:$dst),
>> (ins DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3),
>> "$dst ^= lsr($src2, $src3)",
>> - [(set DoubleRegs:$dst,
>> - (xor DoubleRegs:$src1, (srl DoubleRegs:$src2,
IntRegs:$src3)))],
>> + [(set (i64 DoubleRegs:$dst),
>> + (xor (i64 DoubleRegs:$src1), (srl (i64
DoubleRegs:$src2),
>> + (i32
IntRegs:$src3))))],
>> "$src1 = $dst">,
>> Requires<[HasV4T]>;
>>
>> @@ -2903,16 +4516,16 @@ let AddedComplexity = 30 in
>> def MEMw_ADDSUBi_indexed_MEM_V4 : MEMInst_V4<(outs),
>> (ins IntRegs:$base, u6_2Imm:$offset, m6Imm:$addend),
>> "Error; should not emit",
>> - [(store (add (load (add IntRegs:$base,
u6_2ImmPred:$offset)),
>> -m6ImmPred:$addend),
>> - (add IntRegs:$base, u6_2ImmPred:$offset))]>,
>> + [(store (add (load (add (i32 IntRegs:$base),
u6_2ImmPred:$offset)),
>> + m6ImmPred:$addend),
>> + (add (i32 IntRegs:$base),
u6_2ImmPred:$offset))]>,
>> Requires<[HasV4T, UseMEMOP]>;
>>
>> // memw(Rs+#u6:2) += #U5
>> let AddedComplexity = 30 in
>> def MEMw_ADDi_indexed_MEM_V4 : MEMInst_V4<(outs),
>> (ins IntRegs:$base, u6_2Imm:$offset, u5Imm:$addend),
>> - "memw($base+#$offset) += $addend",
>> + "memw($base+#$offset) += #$addend",
>> []>,
>> Requires<[HasV4T, UseMEMOP]>;
>>
>> @@ -2920,7 +4533,7 @@ def MEMw_ADDi_indexed_MEM_V4 :
MEMInst_V4<(outs),
>> let AddedComplexity = 30 in
>> def MEMw_SUBi_indexed_MEM_V4 : MEMInst_V4<(outs),
>> (ins IntRegs:$base, u6_2Imm:$offset, u5Imm:$subend),
>> - "memw($base+#$offset) -= $subend",
>> + "memw($base+#$offset) -= #$subend",
>> []>,
>> Requires<[HasV4T, UseMEMOP]>;
>>
>> @@ -2929,9 +4542,9 @@ let AddedComplexity = 30 in
>> def MEMw_ADDr_indexed_MEM_V4 : MEMInst_V4<(outs),
>> (ins IntRegs:$base, u6_2Imm:$offset, IntRegs:$addend),
>> "memw($base+#$offset) += $addend",
>> - [(store (add (load (add IntRegs:$base,
u6_2ImmPred:$offset)),
>> -IntRegs:$addend),
>> - (add IntRegs:$base, u6_2ImmPred:$offset))]>,
>> + [(store (add (load (add (i32 IntRegs:$base),
u6_2ImmPred:$offset)),
>> + (i32 IntRegs:$addend)),
>> + (add (i32 IntRegs:$base),
u6_2ImmPred:$offset))]>,
>> Requires<[HasV4T, UseMEMOP]>;
>>
>> // memw(Rs+#u6:2) -= Rt
>> @@ -2939,19 +4552,19 @@ let AddedComplexity = 30 in
>> def MEMw_SUBr_indexed_MEM_V4 : MEMInst_V4<(outs),
>> (ins IntRegs:$base, u6_2Imm:$offset, IntRegs:$subend),
>> "memw($base+#$offset) -= $subend",
>> - [(store (sub (load (add IntRegs:$base,
u6_2ImmPred:$offset)),
>> -IntRegs:$subend),
>> - (add IntRegs:$base, u6_2ImmPred:$offset))]>,
>> + [(store (sub (load (add (i32 IntRegs:$base),
u6_2ImmPred:$offset)),
>> + (i32 IntRegs:$subend)),
>> + (add (i32 IntRegs:$base),
u6_2ImmPred:$offset))]>,
>> Requires<[HasV4T, UseMEMOP]>;
>>
>> // memw(Rs+#u6:2)&= Rt
>> let AddedComplexity = 30 in
>> def MEMw_ANDr_indexed_MEM_V4 : MEMInst_V4<(outs),
>> (ins IntRegs:$base, u6_2Imm:$offset, IntRegs:$andend),
>> - "memw($base+#$offset) += $andend",
>> - [(store (and (load (add IntRegs:$base,
u6_2ImmPred:$offset)),
>> -IntRegs:$andend),
>> - (add IntRegs:$base, u6_2ImmPred:$offset))]>,
>> + "memw($base+#$offset)&= $andend",
>> + [(store (and (load (add (i32 IntRegs:$base),
u6_2ImmPred:$offset)),
>> + (i32 IntRegs:$andend)),
>> + (add (i32 IntRegs:$base),
u6_2ImmPred:$offset))]>,
>> Requires<[HasV4T, UseMEMOP]>;
>>
>> // memw(Rs+#u6:2) |= Rt
>> @@ -2959,9 +4572,9 @@ let AddedComplexity = 30 in
>> def MEMw_ORr_indexed_MEM_V4 : MEMInst_V4<(outs),
>> (ins IntRegs:$base, u6_2Imm:$offset, IntRegs:$orend),
>> "memw($base+#$offset) |= $orend",
>> - [(store (or (load (add IntRegs:$base,
u6_2ImmPred:$offset)),
>> - IntRegs:$orend),
>> - (add IntRegs:$base, u6_2ImmPred:$offset))]>,
>> + [(store (or (load (add (i32 IntRegs:$base),
u6_2ImmPred:$offset)),
>> + (i32 IntRegs:$orend)),
>> + (add (i32 IntRegs:$base),
u6_2ImmPred:$offset))]>,
>> Requires<[HasV4T, UseMEMOP]>;
>>
>> // MEMw_ADDSUBi_V4:
>> @@ -2996,7 +4609,7 @@ let AddedComplexity = 30 in
>> def MEMw_ADDr_MEM_V4 : MEMInst_V4<(outs),
>> (ins MEMri:$addr, IntRegs:$addend),
>> "memw($addr) += $addend",
>> - [(store (add (load ADDRriU6_2:$addr), IntRegs:$addend),
>> + [(store (add (load ADDRriU6_2:$addr), (i32
IntRegs:$addend)),
>> ADDRriU6_2:$addr)]>,
>> Requires<[HasV4T, UseMEMOP]>;
>>
>> @@ -3005,7 +4618,7 @@ let AddedComplexity = 30 in
>> def MEMw_SUBr_MEM_V4 : MEMInst_V4<(outs),
>> (ins MEMri:$addr, IntRegs:$subend),
>> "memw($addr) -= $subend",
>> - [(store (sub (load ADDRriU6_2:$addr), IntRegs:$subend),
>> + [(store (sub (load ADDRriU6_2:$addr), (i32
IntRegs:$subend)),
>> ADDRriU6_2:$addr)]>,
>> Requires<[HasV4T, UseMEMOP]>;
>>
>> @@ -3014,7 +4627,7 @@ let AddedComplexity = 30 in
>> def MEMw_ANDr_MEM_V4 : MEMInst_V4<(outs),
>> (ins MEMri:$addr, IntRegs:$andend),
>> "memw($addr)&= $andend",
>> - [(store (and (load ADDRriU6_2:$addr), IntRegs:$andend),
>> + [(store (and (load ADDRriU6_2:$addr), (i32
IntRegs:$andend)),
>> ADDRriU6_2:$addr)]>,
>> Requires<[HasV4T, UseMEMOP]>;
>>
>> @@ -3023,8 +4636,8 @@ let AddedComplexity = 30 in
>> def MEMw_ORr_MEM_V4 : MEMInst_V4<(outs),
>> (ins MEMri:$addr, IntRegs:$orend),
>> "memw($addr) |= $orend",
>> - [(store (or (load ADDRriU6_2:$addr), IntRegs:$orend),
>> -ADDRriU6_2:$addr)]>,
>> + [(store (or (load ADDRriU6_2:$addr), (i32
IntRegs:$orend)),
>> + ADDRriU6_2:$addr)]>,
>> Requires<[HasV4T, UseMEMOP]>;
>>
>>
//===----------------------------------------------------------------------===//
>> @@ -3060,10 +4673,10 @@ let AddedComplexity = 30 in
>> def MEMh_ADDSUBi_indexed_MEM_V4 : MEMInst_V4<(outs),
>> (ins IntRegs:$base, u6_1Imm:$offset, m6Imm:$addend),
>> "Error; should not emit",
>> - [(truncstorei16 (add (sextloadi16 (add IntRegs:$base,
>> + [(truncstorei16 (add (sextloadi16 (add (i32
IntRegs:$base),
>>
u6_1ImmPred:$offset)),
>> m6ImmPred:$addend),
>> - (add IntRegs:$base,
u6_1ImmPred:$offset))]>,
>> + (add (i32 IntRegs:$base),
u6_1ImmPred:$offset))]>,
>> Requires<[HasV4T, UseMEMOP]>;
>>
>> // memh(Rs+#u6:1) += #U5
>> @@ -3087,10 +4700,10 @@ let AddedComplexity = 30 in
>> def MEMh_ADDr_indexed_MEM_V4 : MEMInst_V4<(outs),
>> (ins IntRegs:$base, u6_1Imm:$offset, IntRegs:$addend),
>> "memh($base+#$offset) += $addend",
>> - [(truncstorei16 (add (sextloadi16 (add IntRegs:$base,
>> + [(truncstorei16 (add (sextloadi16 (add (i32
IntRegs:$base),
>>
u6_1ImmPred:$offset)),
>> - IntRegs:$addend),
>> - (add IntRegs:$base,
u6_1ImmPred:$offset))]>,
>> + (i32 IntRegs:$addend)),
>> + (add (i32 IntRegs:$base),
u6_1ImmPred:$offset))]>,
>> Requires<[HasV4T, UseMEMOP]>;
>>
>> // memh(Rs+#u6:1) -= Rt
>> @@ -3098,10 +4711,10 @@ let AddedComplexity = 30 in
>> def MEMh_SUBr_indexed_MEM_V4 : MEMInst_V4<(outs),
>> (ins IntRegs:$base, u6_1Imm:$offset, IntRegs:$subend),
>> "memh($base+#$offset) -= $subend",
>> - [(truncstorei16 (sub (sextloadi16 (add IntRegs:$base,
>> + [(truncstorei16 (sub (sextloadi16 (add (i32
IntRegs:$base),
>>
u6_1ImmPred:$offset)),
>> - IntRegs:$subend),
>> - (add IntRegs:$base,
u6_1ImmPred:$offset))]>,
>> + (i32 IntRegs:$subend)),
>> + (add (i32 IntRegs:$base),
u6_1ImmPred:$offset))]>,
>> Requires<[HasV4T, UseMEMOP]>;
>>
>> // memh(Rs+#u6:1)&= Rt
>> @@ -3109,10 +4722,10 @@ let AddedComplexity = 30 in
>> def MEMh_ANDr_indexed_MEM_V4 : MEMInst_V4<(outs),
>> (ins IntRegs:$base, u6_1Imm:$offset, IntRegs:$andend),
>> "memh($base+#$offset) += $andend",
>> - [(truncstorei16 (and (sextloadi16 (add IntRegs:$base,
>> + [(truncstorei16 (and (sextloadi16 (add (i32
IntRegs:$base),
>>
u6_1ImmPred:$offset)),
>> - IntRegs:$andend),
>> - (add IntRegs:$base,
u6_1ImmPred:$offset))]>,
>> + (i32 IntRegs:$andend)),
>> + (add (i32 IntRegs:$base),
u6_1ImmPred:$offset))]>,
>> Requires<[HasV4T, UseMEMOP]>;
>>
>> // memh(Rs+#u6:1) |= Rt
>> @@ -3120,10 +4733,10 @@ let AddedComplexity = 30 in
>> def MEMh_ORr_indexed_MEM_V4 : MEMInst_V4<(outs),
>> (ins IntRegs:$base, u6_1Imm:$offset, IntRegs:$orend),
>> "memh($base+#$offset) |= $orend",
>> - [(truncstorei16 (or (sextloadi16 (add IntRegs:$base,
>> + [(truncstorei16 (or (sextloadi16 (add (i32 IntRegs:$base),
>> u6_1ImmPred:$offset)),
>> - IntRegs:$orend),
>> - (add IntRegs:$base,
u6_1ImmPred:$offset))]>,
>> + (i32 IntRegs:$orend)),
>> + (add (i32 IntRegs:$base),
u6_1ImmPred:$offset))]>,
>> Requires<[HasV4T, UseMEMOP]>;
>>
>> // MEMh_ADDSUBi_V4:
>> @@ -3159,7 +4772,7 @@ def MEMh_ADDr_MEM_V4 : MEMInst_V4<(outs),
>> (ins MEMri:$addr, IntRegs:$addend),
>> "memh($addr) += $addend",
>> [(truncstorei16 (add (sextloadi16 ADDRriU6_1:$addr),
>> - IntRegs:$addend),
ADDRriU6_1:$addr)]>,
>> + (i32 IntRegs:$addend)),
ADDRriU6_1:$addr)]>,
>> Requires<[HasV4T, UseMEMOP]>;
>>
>> // memh(Rs+#u6:1) -= Rt
>> @@ -3168,7 +4781,7 @@ def MEMh_SUBr_MEM_V4 : MEMInst_V4<(outs),
>> (ins MEMri:$addr, IntRegs:$subend),
>> "memh($addr) -= $subend",
>> [(truncstorei16 (sub (sextloadi16 ADDRriU6_1:$addr),
>> - IntRegs:$subend),
ADDRriU6_1:$addr)]>,
>> + (i32 IntRegs:$subend)),
ADDRriU6_1:$addr)]>,
>> Requires<[HasV4T, UseMEMOP]>;
>>
>> // memh(Rs+#u6:1)&= Rt
>> @@ -3177,7 +4790,7 @@ def MEMh_ANDr_MEM_V4 : MEMInst_V4<(outs),
>> (ins MEMri:$addr, IntRegs:$andend),
>> "memh($addr)&= $andend",
>> [(truncstorei16 (and (sextloadi16 ADDRriU6_1:$addr),
>> - IntRegs:$andend),
ADDRriU6_1:$addr)]>,
>> + (i32 IntRegs:$andend)),
ADDRriU6_1:$addr)]>,
>> Requires<[HasV4T, UseMEMOP]>;
>>
>> // memh(Rs+#u6:1) |= Rt
>> @@ -3186,7 +4799,7 @@ def MEMh_ORr_MEM_V4 : MEMInst_V4<(outs),
>> (ins MEMri:$addr, IntRegs:$orend),
>> "memh($addr) |= $orend",
>> [(truncstorei16 (or (sextloadi16 ADDRriU6_1:$addr),
>> - IntRegs:$orend),
ADDRriU6_1:$addr)]>,
>> + (i32 IntRegs:$orend)),
ADDRriU6_1:$addr)]>,
>> Requires<[HasV4T, UseMEMOP]>;
>>
>>
>> @@ -3223,10 +4836,10 @@ let AddedComplexity = 30 in
>> def MEMb_ADDSUBi_indexed_MEM_V4 : MEMInst_V4<(outs),
>> (ins IntRegs:$base, u6_0Imm:$offset, m6Imm:$addend),
>> "Error; should not emit",
>> - [(truncstorei8 (add (sextloadi8 (add IntRegs:$base,
>> + [(truncstorei8 (add (sextloadi8 (add (i32 IntRegs:$base),
>>
u6_0ImmPred:$offset)),
>> m6ImmPred:$addend),
>> - (add IntRegs:$base,
u6_0ImmPred:$offset))]>,
>> + (add (i32 IntRegs:$base),
u6_0ImmPred:$offset))]>,
>> Requires<[HasV4T, UseMEMOP]>;
>>
>> // memb(Rs+#u6:0) += #U5
>> @@ -3250,10 +4863,10 @@ let AddedComplexity = 30 in
>> def MEMb_ADDr_indexed_MEM_V4 : MEMInst_V4<(outs),
>> (ins IntRegs:$base, u6_0Imm:$offset, IntRegs:$addend),
>> "memb($base+#$offset) += $addend",
>> - [(truncstorei8 (add (sextloadi8 (add IntRegs:$base,
>> + [(truncstorei8 (add (sextloadi8 (add (i32 IntRegs:$base),
>>
u6_0ImmPred:$offset)),
>> - IntRegs:$addend),
>> - (add IntRegs:$base,
u6_0ImmPred:$offset))]>,
>> + (i32 IntRegs:$addend)),
>> + (add (i32 IntRegs:$base),
u6_0ImmPred:$offset))]>,
>> Requires<[HasV4T, UseMEMOP]>;
>>
>> // memb(Rs+#u6:0) -= Rt
>> @@ -3261,10 +4874,10 @@ let AddedComplexity = 30 in
>> def MEMb_SUBr_indexed_MEM_V4 : MEMInst_V4<(outs),
>> (ins IntRegs:$base, u6_0Imm:$offset, IntRegs:$subend),
>> "memb($base+#$offset) -= $subend",
>> - [(truncstorei8 (sub (sextloadi8 (add IntRegs:$base,
>> + [(truncstorei8 (sub (sextloadi8 (add (i32 IntRegs:$base),
>>
u6_0ImmPred:$offset)),
>> - IntRegs:$subend),
>> - (add IntRegs:$base,
u6_0ImmPred:$offset))]>,
>> + (i32 IntRegs:$subend)),
>> + (add (i32 IntRegs:$base),
u6_0ImmPred:$offset))]>,
>> Requires<[HasV4T, UseMEMOP]>;
>>
>> // memb(Rs+#u6:0)&= Rt
>> @@ -3272,10 +4885,10 @@ let AddedComplexity = 30 in
>> def MEMb_ANDr_indexed_MEM_V4 : MEMInst_V4<(outs),
>> (ins IntRegs:$base, u6_0Imm:$offset, IntRegs:$andend),
>> "memb($base+#$offset) += $andend",
>> - [(truncstorei8 (and (sextloadi8 (add IntRegs:$base,
>> + [(truncstorei8 (and (sextloadi8 (add (i32 IntRegs:$base),
>>
u6_0ImmPred:$offset)),
>> - IntRegs:$andend),
>> - (add IntRegs:$base,
u6_0ImmPred:$offset))]>,
>> + (i32 IntRegs:$andend)),
>> + (add (i32 IntRegs:$base),
u6_0ImmPred:$offset))]>,
>> Requires<[HasV4T, UseMEMOP]>;
>>
>> // memb(Rs+#u6:0) |= Rt
>> @@ -3283,10 +4896,10 @@ let AddedComplexity = 30 in
>> def MEMb_ORr_indexed_MEM_V4 : MEMInst_V4<(outs),
>> (ins IntRegs:$base, u6_0Imm:$offset, IntRegs:$orend),
>> "memb($base+#$offset) |= $orend",
>> - [(truncstorei8 (or (sextloadi8 (add IntRegs:$base,
>> + [(truncstorei8 (or (sextloadi8 (add (i32 IntRegs:$base),
>>
u6_0ImmPred:$offset)),
>> - IntRegs:$orend),
>> - (add IntRegs:$base,
u6_0ImmPred:$offset))]>,
>> + (i32 IntRegs:$orend)),
>> + (add (i32 IntRegs:$base),
u6_0ImmPred:$offset))]>,
>> Requires<[HasV4T, UseMEMOP]>;
>>
>> // MEMb_ADDSUBi_V4:
>> @@ -3322,7 +4935,7 @@ def MEMb_ADDr_MEM_V4 : MEMInst_V4<(outs),
>> (ins MEMri:$addr, IntRegs:$addend),
>> "memb($addr) += $addend",
>> [(truncstorei8 (add (sextloadi8 ADDRriU6_0:$addr),
>> - IntRegs:$addend),
ADDRriU6_0:$addr)]>,
>> + (i32 IntRegs:$addend)),
ADDRriU6_0:$addr)]>,
>> Requires<[HasV4T, UseMEMOP]>;
>>
>> // memb(Rs+#u6:0) -= Rt
>> @@ -3331,7 +4944,7 @@ def MEMb_SUBr_MEM_V4 : MEMInst_V4<(outs),
>> (ins MEMri:$addr, IntRegs:$subend),
>> "memb($addr) -= $subend",
>> [(truncstorei8 (sub (sextloadi8 ADDRriU6_0:$addr),
>> - IntRegs:$subend),
ADDRriU6_0:$addr)]>,
>> + (i32 IntRegs:$subend)),
ADDRriU6_0:$addr)]>,
>> Requires<[HasV4T, UseMEMOP]>;
>>
>> // memb(Rs+#u6:0)&= Rt
>> @@ -3340,7 +4953,7 @@ def MEMb_ANDr_MEM_V4 : MEMInst_V4<(outs),
>> (ins MEMri:$addr, IntRegs:$andend),
>> "memb($addr)&= $andend",
>> [(truncstorei8 (and (sextloadi8 ADDRriU6_0:$addr),
>> - IntRegs:$andend),
ADDRriU6_0:$addr)]>,
>> + (i32 IntRegs:$andend)),
ADDRriU6_0:$addr)]>,
>> Requires<[HasV4T, UseMEMOP]>;
>>
>> // memb(Rs+#u6:0) |= Rt
>> @@ -3349,7 +4962,7 @@ def MEMb_ORr_MEM_V4 : MEMInst_V4<(outs),
>> (ins MEMri:$addr, IntRegs:$orend),
>> "memb($addr) |= $orend",
>> [(truncstorei8 (or (sextloadi8 ADDRriU6_0:$addr),
>> - IntRegs:$orend),
ADDRriU6_0:$addr)]>,
>> + (i32 IntRegs:$orend)),
ADDRriU6_0:$addr)]>,
>> Requires<[HasV4T, UseMEMOP]>;
>>
>>
>> @@ -3364,13 +4977,16 @@ def MEMb_ORr_MEM_V4 : MEMInst_V4<(outs),
>> // The implemented patterns are: EQ/GT/GTU.
>> // Missing patterns are: GE/GEU/LT/LTU/LE/LEU.
>>
>> +// Following instruction is not being extended as it results into the
>> +// incorrect code for negative numbers.
>> // Pd=cmpb.eq(Rs,#u8)
>> +
>> let isCompare = 1 in
>> def CMPbEQri_V4 : MInst<(outs PredRegs:$dst),
>> (ins IntRegs:$src1, u8Imm:$src2),
>> "$dst = cmpb.eq($src1, #$src2)",
>> - [(set PredRegs:$dst, (seteq (and IntRegs:$src1, 255),
>> - u8ImmPred:$src2))]>,
>> + [(set (i1 PredRegs:$dst),
>> + (seteq (and (i32 IntRegs:$src1), 255),
u8ImmPred:$src2))]>,
>> Requires<[HasV4T]>;
>>
>> // Pd=cmpb.eq(Rs,Rt)
>> @@ -3378,10 +4994,9 @@ let isCompare = 1 in
>> def CMPbEQrr_ubub_V4 : MInst<(outs PredRegs:$dst),
>> (ins IntRegs:$src1, IntRegs:$src2),
>> "$dst = cmpb.eq($src1, $src2)",
>> - [(set PredRegs:$dst, (seteq (and (xor IntRegs:$src1,
>> - IntRegs:$src2),
>> - 255),
>> - 0))]>,
>> + [(set (i1 PredRegs:$dst),
>> + (seteq (and (xor (i32 IntRegs:$src1),
>> + (i32 IntRegs:$src2)), 255),
0))]>,
>> Requires<[HasV4T]>;
>>
>> // Pd=cmpb.eq(Rs,Rt)
>> @@ -3389,26 +5004,31 @@ let isCompare = 1 in
>> def CMPbEQrr_sbsb_V4 : MInst<(outs PredRegs:$dst),
>> (ins IntRegs:$src1, IntRegs:$src2),
>> "$dst = cmpb.eq($src1, $src2)",
>> - [(set PredRegs:$dst, (seteq (shl IntRegs:$src1, (i32 24)),
>> - (shl IntRegs:$src2, (i32
24))))]>,
>> + [(set (i1 PredRegs:$dst),
>> + (seteq (shl (i32 IntRegs:$src1), (i32 24)),
>> + (shl (i32 IntRegs:$src2), (i32 24))))]>,
>> Requires<[HasV4T]>;
>>
>> +/* Incorrect Pattern -- immediate should be right shifted before being
>> +used in the cmpb.gt instruction.
>> // Pd=cmpb.gt(Rs,#s8)
>> let isCompare = 1 in
>> def CMPbGTri_V4 : MInst<(outs PredRegs:$dst),
>> - (ins IntRegs:$src1, s32Imm:$src2),
>> + (ins IntRegs:$src1, s8Imm:$src2),
>> "$dst = cmpb.gt($src1, #$src2)",
>> - [(set PredRegs:$dst, (setgt (shl IntRegs:$src1, (i32 24)),
>> - s32_24ImmPred:$src2))]>,
>> + [(set (i1 PredRegs:$dst), (setgt (shl (i32 IntRegs:$src1),
(i32 24)),
>> + s8ImmPred:$src2))]>,
>> Requires<[HasV4T]>;
>> +*/
>>
>> // Pd=cmpb.gt(Rs,Rt)
>> let isCompare = 1 in
>> def CMPbGTrr_V4 : MInst<(outs PredRegs:$dst),
>> (ins IntRegs:$src1, IntRegs:$src2),
>> "$dst = cmpb.gt($src1, $src2)",
>> - [(set PredRegs:$dst, (setgt (shl IntRegs:$src1, (i32 24)),
>> - (shl IntRegs:$src2, (i32
24))))]>,
>> + [(set (i1 PredRegs:$dst),
>> + (setgt (shl (i32 IntRegs:$src1), (i32 24)),
>> + (shl (i32 IntRegs:$src2), (i32 24))))]>,
>> Requires<[HasV4T]>;
>>
>> // Pd=cmpb.gtu(Rs,#u7)
>> @@ -3416,8 +5036,8 @@ let isCompare = 1 in
>> def CMPbGTUri_V4 : MInst<(outs PredRegs:$dst),
>> (ins IntRegs:$src1, u7Imm:$src2),
>> "$dst = cmpb.gtu($src1, #$src2)",
>> - [(set PredRegs:$dst, (setugt (and IntRegs:$src1, 255),
>> - u7ImmPred:$src2))]>,
>> + [(set (i1 PredRegs:$dst), (setugt (and (i32
IntRegs:$src1), 255),
>> + u7ImmPred:$src2))]>,
>> Requires<[HasV4T]>;
>>
>> // Pd=cmpb.gtu(Rs,Rt)
>> @@ -3425,18 +5045,21 @@ let isCompare = 1 in
>> def CMPbGTUrr_V4 : MInst<(outs PredRegs:$dst),
>> (ins IntRegs:$src1, IntRegs:$src2),
>> "$dst = cmpb.gtu($src1, $src2)",
>> - [(set PredRegs:$dst, (setugt (and IntRegs:$src1, 255),
>> - (and IntRegs:$src2,
255)))]>,
>> + [(set (i1 PredRegs:$dst), (setugt (and (i32
IntRegs:$src1), 255),
>> + (and (i32
IntRegs:$src2), 255)))]>,
>> Requires<[HasV4T]>;
>>
>> +// Following instruction is not being extended as it results into the
incorrect
>> +// code for negative numbers.
>> +
>> // Signed half compare(.eq) ri.
>> // Pd=cmph.eq(Rs,#s8)
>> let isCompare = 1 in
>> def CMPhEQri_V4 : MInst<(outs PredRegs:$dst),
>> - (ins IntRegs:$src1, u16Imm:$src2),
>> + (ins IntRegs:$src1, s8Imm:$src2),
>> "$dst = cmph.eq($src1, #$src2)",
>> - [(set PredRegs:$dst, (seteq (and IntRegs:$src1, 65535),
>> - u16_s8ImmPred:$src2))]>,
>> + [(set (i1 PredRegs:$dst), (seteq (and (i32 IntRegs:$src1),
65535),
>> + s8ImmPred:$src2))]>,
>> Requires<[HasV4T]>;
>>
>> // Signed half compare(.eq) rr.
>> @@ -3449,10 +5072,9 @@ let isCompare = 1 in
>> def CMPhEQrr_xor_V4 : MInst<(outs PredRegs:$dst),
>> (ins IntRegs:$src1, IntRegs:$src2),
>> "$dst = cmph.eq($src1, $src2)",
>> - [(set PredRegs:$dst, (seteq (and (xor IntRegs:$src1,
>> - IntRegs:$src2),
>> - 65535),
>> - 0))]>,
>> + [(set (i1 PredRegs:$dst), (seteq (and (xor (i32
IntRegs:$src1),
>> + (i32
IntRegs:$src2)),
>> + 65535), 0))]>,
>> Requires<[HasV4T]>;
>>
>> // Signed half compare(.eq) rr.
>> @@ -3465,19 +5087,25 @@ let isCompare = 1 in
>> def CMPhEQrr_shl_V4 : MInst<(outs PredRegs:$dst),
>> (ins IntRegs:$src1, IntRegs:$src2),
>> "$dst = cmph.eq($src1, $src2)",
>> - [(set PredRegs:$dst, (seteq (shl IntRegs:$src1, (i32 16)),
>> - (shl IntRegs:$src2, (i32
16))))]>,
>> + [(set (i1 PredRegs:$dst),
>> + (seteq (shl (i32 IntRegs:$src1), (i32 16)),
>> + (shl (i32 IntRegs:$src2), (i32 16))))]>,
>> Requires<[HasV4T]>;
>>
>> +/* Incorrect Pattern -- immediate should be right shifted before being
>> +used in the cmph.gt instruction.
>> // Signed half compare(.gt) ri.
>> // Pd=cmph.gt(Rs,#s8)
>> +
>> let isCompare = 1 in
>> def CMPhGTri_V4 : MInst<(outs PredRegs:$dst),
>> - (ins IntRegs:$src1, s32Imm:$src2),
>> + (ins IntRegs:$src1, s8Imm:$src2),
>> "$dst = cmph.gt($src1, #$src2)",
>> - [(set PredRegs:$dst, (setgt (shl IntRegs:$src1, (i32 16)),
>> - s32_16s8ImmPred:$src2))]>,
>> + [(set (i1 PredRegs:$dst),
>> + (setgt (shl (i32 IntRegs:$src1), (i32 16)),
>> + s8ImmPred:$src2))]>,
>> Requires<[HasV4T]>;
>> +*/
>>
>> // Signed half compare(.gt) rr.
>> // Pd=cmph.gt(Rs,Rt)
>> @@ -3485,8 +5113,9 @@ let isCompare = 1 in
>> def CMPhGTrr_shl_V4 : MInst<(outs PredRegs:$dst),
>> (ins IntRegs:$src1, IntRegs:$src2),
>> "$dst = cmph.gt($src1, $src2)",
>> - [(set PredRegs:$dst, (setgt (shl IntRegs:$src1, (i32 16)),
>> - (shl IntRegs:$src2, (i32
16))))]>,
>> + [(set (i1 PredRegs:$dst),
>> + (setgt (shl (i32 IntRegs:$src1), (i32 16)),
>> + (shl (i32 IntRegs:$src2), (i32 16))))]>,
>> Requires<[HasV4T]>;
>>
>> // Unsigned half compare rr (.gtu).
>> @@ -3495,8 +5124,9 @@ let isCompare = 1 in
>> def CMPhGTUrr_V4 : MInst<(outs PredRegs:$dst),
>> (ins IntRegs:$src1, IntRegs:$src2),
>> "$dst = cmph.gtu($src1, $src2)",
>> - [(set PredRegs:$dst, (setugt (and IntRegs:$src1, 65535),
>> - (and IntRegs:$src2,
65535)))]>,
>> + [(set (i1 PredRegs:$dst),
>> + (setugt (and (i32 IntRegs:$src1), 65535),
>> + (and (i32 IntRegs:$src2), 65535)))]>,
>> Requires<[HasV4T]>;
>>
>> // Unsigned half compare ri (.gtu).
>> @@ -3505,8 +5135,8 @@ let isCompare = 1 in
>> def CMPhGTUri_V4 : MInst<(outs PredRegs:$dst),
>> (ins IntRegs:$src1, u7Imm:$src2),
>> "$dst = cmph.gtu($src1, #$src2)",
>> - [(set PredRegs:$dst, (setugt (and IntRegs:$src1, 65535),
>> - u7ImmPred:$src2))]>,
>> + [(set (i1 PredRegs:$dst), (setugt (and (i32
IntRegs:$src1), 65535),
>> + u7ImmPred:$src2))]>,
>> Requires<[HasV4T]>;
>>
>>
//===----------------------------------------------------------------------===//
>> @@ -3523,9 +5153,37 @@ let isReturn = 1, isTerminator = 1, isBarrier =
1, isPredicable = 1,
>> Requires<[HasV4T]>;
>> }
>>
>> +// Restore registers and dealloc return function call.
>> +let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1,
>> + Defs = [R29, R30, R31, PC] in {
>> + def RESTORE_DEALLOC_RET_JMP_V4 : JInst<(outs), (ins
calltarget:$dst, variable_ops),
>> + "jump $dst // Restore_and_dealloc_return",
>> + []>,
>> + Requires<[HasV4T]>;
>> +}
>> +
>> +// Restore registers and dealloc frame before a tail call.
>> +let isCall = 1, isBarrier = 1,
>> + Defs = [R29, R30, R31, PC] in {
>> + def RESTORE_DEALLOC_BEFORE_TAILCALL_V4 : JInst<(outs), (ins
calltarget:$dst, variable_ops),
>> + "call $dst //
Restore_and_dealloc_before_tailcall",
>> + []>,
>> + Requires<[HasV4T]>;
>> +}
>> +
>> +// Save registers function call.
>> +let isCall = 1, isBarrier = 1,
>> + Uses = [R29, R31] in {
>> + def SAVE_REGISTERS_CALL_V4 : JInst<(outs), (ins calltarget:$dst,
variable_ops),
>> + "call $dst // Save_calle_saved_registers",
>> + []>,
>> + Requires<[HasV4T]>;
>> +}
>> +
>> // if (Ps) dealloc_return
>> let isReturn = 1, isTerminator = 1,
>> - Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects =
1 in {
>> + Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects
= 1,
>> + isPredicated = 1 in {
>> def DEALLOC_RET_cPt_V4 : NVInst_V4<(outs), (ins PredRegs:$src1,
i32imm:$amt1),
>> "if ($src1) dealloc_return",
>> []>,
>> @@ -3534,7 +5192,8 @@ let isReturn = 1, isTerminator = 1,
>>
>> // if (!Ps) dealloc_return
>> let isReturn = 1, isTerminator = 1,
>> - Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects =
1 in {
>> + Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects
= 1,
>> + isPredicated = 1 in {
>> def DEALLOC_RET_cNotPt_V4 : NVInst_V4<(outs), (ins
PredRegs:$src1,
>> i32imm:$amt1),
>> "if (!$src1) dealloc_return",
>> @@ -3544,7 +5203,8 @@ let isReturn = 1, isTerminator = 1,
>>
>> // if (Ps.new) dealloc_return:nt
>> let isReturn = 1, isTerminator = 1,
>> - Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects =
1 in {
>> + Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects
= 1,
>> + isPredicated = 1 in {
>> def DEALLOC_RET_cdnPnt_V4 : NVInst_V4<(outs), (ins
PredRegs:$src1,
>> i32imm:$amt1),
>> "if ($src1.new) dealloc_return:nt",
>> @@ -3554,7 +5214,8 @@ let isReturn = 1, isTerminator = 1,
>>
>> // if (!Ps.new) dealloc_return:nt
>> let isReturn = 1, isTerminator = 1,
>> - Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects =
1 in {
>> + Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects
= 1,
>> + isPredicated = 1 in {
>> def DEALLOC_RET_cNotdnPnt_V4 : NVInst_V4<(outs), (ins
PredRegs:$src1,
>>
i32imm:$amt1),
>> "if (!$src1.new) dealloc_return:nt",
>> @@ -3564,7 +5225,8 @@ let isReturn = 1, isTerminator = 1,
>>
>> // if (Ps.new) dealloc_return:t
>> let isReturn = 1, isTerminator = 1,
>> - Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects =
1 in {
>> + Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects
= 1,
>> + isPredicated = 1 in {
>> def DEALLOC_RET_cdnPt_V4 : NVInst_V4<(outs), (ins
PredRegs:$src1,
>> i32imm:$amt1),
>> "if ($src1.new) dealloc_return:t",
>> @@ -3574,10 +5236,511 @@ let isReturn = 1, isTerminator = 1,
>>
>> // if (!Ps.new) dealloc_return:nt
>> let isReturn = 1, isTerminator = 1,
>> - Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects =
1 in {
>> + Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects
= 1,
>> + isPredicated = 1 in {
>> def DEALLOC_RET_cNotdnPt_V4 : NVInst_V4<(outs), (ins
PredRegs:$src1,
>> i32imm:$amt1),
>> "if (!$src1.new) dealloc_return:t",
>> []>,
>> Requires<[HasV4T]>;
>> }
>> +
>> +
>> +// Load/Store with absolute addressing mode
>> +// memw(#u6)=Rt
>> +
>> +multiclass ST_abs<string OpcStr> {
>> + let isPredicable = 1 in
>> + def _abs_V4 : STInst<(outs),
>> + (ins globaladdress:$absaddr, IntRegs:$src),
>> + !strconcat(OpcStr, "(##$absaddr) = $src"),
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> + let isPredicated = 1 in
>> + def _abs_cPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$absaddr,
IntRegs:$src2),
>> + !strconcat("if ($src1)", !strconcat(OpcStr,
"(##$absaddr) = $src2")),
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> + let isPredicated = 1 in
>> + def _abs_cNotPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$absaddr,
IntRegs:$src2),
>> + !strconcat("if (!$src1)", !strconcat(OpcStr,
"(##$absaddr) = $src2")),
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> + let isPredicated = 1 in
>> + def _abs_cdnPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$absaddr,
IntRegs:$src2),
>> + !strconcat("if ($src1.new)", !strconcat(OpcStr,
"(##$absaddr) = $src2")),
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> + let isPredicated = 1 in
>> + def _abs_cdnNotPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$absaddr,
IntRegs:$src2),
>> + !strconcat("if (!$src1.new)", !strconcat(OpcStr,
"(##$absaddr) = $src2")),
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> + def _abs_nv_V4 : STInst<(outs),
>> + (ins globaladdress:$absaddr, IntRegs:$src),
>> + !strconcat(OpcStr, "(##$absaddr) = $src.new"),
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> + let isPredicated = 1 in
>> + def _abs_cPt_nv_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$absaddr,
IntRegs:$src2),
>> + !strconcat("if ($src1)", !strconcat(OpcStr,
"(##$absaddr) = $src2.new")),
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> + let isPredicated = 1 in
>> + def _abs_cNotPt_nv_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$absaddr,
IntRegs:$src2),
>> + !strconcat("if (!$src1)", !strconcat(OpcStr,
"(##$absaddr) = $src2.new")),
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> + let isPredicated = 1 in
>> + def _abs_cdnPt_nv_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$absaddr,
IntRegs:$src2),
>> + !strconcat("if ($src1.new)", !strconcat(OpcStr,
"(##$absaddr) = $src2.new")),
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> + let isPredicated = 1 in
>> + def _abs_cdnNotPt_nv_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$absaddr,
IntRegs:$src2),
>> + !strconcat("if (!$src1.new)", !strconcat(OpcStr,
"(##$absaddr) = $src2.new")),
>> + []>,
>> + Requires<[HasV4T]>;
>> +}
>> +
>> +let AddedComplexity = 30, isPredicable = 1 in
>> +def STrid_abs_V4 : STInst<(outs),
>> + (ins globaladdress:$absaddr, DoubleRegs:$src),
>> + "memd(##$absaddr) = $src",
>> + [(store (i64 DoubleRegs:$src), (HexagonCONST32
tglobaladdr:$absaddr))]>,
>> + Requires<[HasV4T]>;
>> +
>> +let AddedComplexity = 30, isPredicated = 1 in
>> +def STrid_abs_cPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$absaddr,
DoubleRegs:$src2),
>> + "if ($src1) memd(##$absaddr) = $src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let AddedComplexity = 30, isPredicated = 1 in
>> +def STrid_abs_cNotPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$absaddr,
DoubleRegs:$src2),
>> + "if (!$src1) memd(##$absaddr) = $src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let AddedComplexity = 30, isPredicated = 1 in
>> +def STrid_abs_cdnPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$absaddr,
DoubleRegs:$src2),
>> + "if ($src1.new) memd(##$absaddr) = $src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let AddedComplexity = 30, isPredicated = 1 in
>> +def STrid_abs_cdnNotPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, globaladdress:$absaddr,
DoubleRegs:$src2),
>> + "if (!$src1.new) memd(##$absaddr) = $src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +defm STrib : ST_abs<"memb">;
>> +defm STrih : ST_abs<"memh">;
>> +defm STriw : ST_abs<"memw">;
>> +
>> +let Predicates = [HasV4T], AddedComplexity = 30 in
>> +def : Pat<(truncstorei8 (i32 IntRegs:$src1), (HexagonCONST32
tglobaladdr:$absaddr)),
>> + (STrib_abs_V4 tglobaladdr: $absaddr, IntRegs: $src1)>;
>> +
>> +let Predicates = [HasV4T], AddedComplexity = 30 in
>> +def : Pat<(truncstorei16 (i32 IntRegs:$src1), (HexagonCONST32
tglobaladdr:$absaddr)),
>> + (STrih_abs_V4 tglobaladdr: $absaddr, IntRegs: $src1)>;
>> +
>> +let Predicates = [HasV4T], AddedComplexity = 30 in
>> +def : Pat<(store (i32 IntRegs:$src1), (HexagonCONST32
tglobaladdr:$absaddr)),
>> + (STriw_abs_V4 tglobaladdr: $absaddr, IntRegs: $src1)>;
>> +
>> +
>> +multiclass LD_abs<string OpcStr> {
>> + let isPredicable = 1 in
>> + def _abs_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins globaladdress:$absaddr),
>> + !strconcat("$dst = ", !strconcat(OpcStr,
"(##$absaddr)")),
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> + let isPredicated = 1 in
>> + def _abs_cPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$absaddr),
>> + !strconcat("if ($src1) $dst = ",
!strconcat(OpcStr, "(##$absaddr)")),
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> + let isPredicated = 1 in
>> + def _abs_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$absaddr),
>> + !strconcat("if (!$src1) $dst = ",
!strconcat(OpcStr, "(##$absaddr)")),
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> + let isPredicated = 1 in
>> + def _abs_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$absaddr),
>> + !strconcat("if ($src1.new) $dst = ",
!strconcat(OpcStr, "(##$absaddr)")),
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> + let isPredicated = 1 in
>> + def _abs_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$absaddr),
>> + !strconcat("if (!$src1.new) $dst = ",
!strconcat(OpcStr, "(##$absaddr)")),
>> + []>,
>> + Requires<[HasV4T]>;
>> +}
>> +
>> +let AddedComplexity = 30 in
>> +def LDrid_abs_V4 : LDInst<(outs DoubleRegs:$dst),
>> + (ins globaladdress:$absaddr),
>> + "$dst = memd(##$absaddr)",
>> + [(set (i64 DoubleRegs:$dst), (load (HexagonCONST32
tglobaladdr:$absaddr)))]>,
>> + Requires<[HasV4T]>;
>> +
>> +let AddedComplexity = 30, isPredicated = 1 in
>> +def LDrid_abs_cPt_V4 : LDInst<(outs DoubleRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$absaddr),
>> + "if ($src1) $dst = memd(##$absaddr)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let AddedComplexity = 30, isPredicated = 1 in
>> +def LDrid_abs_cNotPt_V4 : LDInst<(outs DoubleRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$absaddr),
>> + "if (!$src1) $dst = memd(##$absaddr)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let AddedComplexity = 30, isPredicated = 1 in
>> +def LDrid_abs_cdnPt_V4 : LDInst<(outs DoubleRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$absaddr),
>> + "if ($src1.new) $dst = memd(##$absaddr)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let AddedComplexity = 30, isPredicated = 1 in
>> +def LDrid_abs_cdnNotPt_V4 : LDInst<(outs DoubleRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$absaddr),
>> + "if (!$src1.new) $dst = memd(##$absaddr)",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +defm LDrib : LD_abs<"memb">;
>> +defm LDriub : LD_abs<"memub">;
>> +defm LDrih : LD_abs<"memh">;
>> +defm LDriuh : LD_abs<"memuh">;
>> +defm LDriw : LD_abs<"memw">;
>> +
>> +
>> +let Predicates = [HasV4T], AddedComplexity = 30 in
>> +def : Pat<(i32 (load (HexagonCONST32 tglobaladdr:$absaddr))),
>> + (LDriw_abs_V4 tglobaladdr: $absaddr)>;
>> +
>> +let Predicates = [HasV4T], AddedComplexity=30 in
>> +def : Pat<(i32 (sextloadi8 (HexagonCONST32 tglobaladdr:$absaddr))),
>> + (LDrib_abs_V4 tglobaladdr:$absaddr)>;
>> +
>> +let Predicates = [HasV4T], AddedComplexity=30 in
>> +def : Pat<(i32 (zextloadi8 (HexagonCONST32 tglobaladdr:$absaddr))),
>> + (LDriub_abs_V4 tglobaladdr:$absaddr)>;
>> +
>> +let Predicates = [HasV4T], AddedComplexity=30 in
>> +def : Pat<(i32 (sextloadi16 (HexagonCONST32
tglobaladdr:$absaddr))),
>> + (LDrih_abs_V4 tglobaladdr:$absaddr)>;
>> +
>> +let Predicates = [HasV4T], AddedComplexity=30 in
>> +def : Pat<(i32 (zextloadi16 (HexagonCONST32
tglobaladdr:$absaddr))),
>> + (LDriuh_abs_V4 tglobaladdr:$absaddr)>;
>> +
>> +// Transfer global address into a register
>> +let AddedComplexity=50, isMoveImm = 1, isReMaterializable = 1 in
>> +def TFRI_V4 : ALU32_ri<(outs IntRegs:$dst), (ins
globaladdress:$src1),
>> + "$dst = ##$src1",
>> + [(set IntRegs:$dst, (HexagonCONST32
tglobaladdr:$src1))]>,
>> + Requires<[HasV4T]>;
>> +
>> +let AddedComplexity=50, neverHasSideEffects = 1, isPredicated = 1 in
>> +def TFRI_cPt_V4 : ALU32_ri<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, globaladdress:$src2),
>> + "if($src1) $dst = ##$src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let AddedComplexity=50, neverHasSideEffects = 1, isPredicated = 1 in
>> +def TFRI_cNotPt_V4 : ALU32_ri<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1,
globaladdress:$src2),
>> + "if(!$src1) $dst = ##$src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let AddedComplexity=50, neverHasSideEffects = 1, isPredicated = 1 in
>> +def TFRI_cdnPt_V4 : ALU32_ri<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1,
globaladdress:$src2),
>> + "if($src1.new) $dst = ##$src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let AddedComplexity=50, neverHasSideEffects = 1, isPredicated = 1 in
>> +def TFRI_cdnNotPt_V4 : ALU32_ri<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1,
globaladdress:$src2),
>> + "if(!$src1.new) $dst = ##$src2",
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> +let AddedComplexity = 50, Predicates = [HasV4T] in
>> +def : Pat<(HexagonCONST32_GP tglobaladdr:$src1),
>> + (TFRI_V4 tglobaladdr:$src1)>;
>> +
>> +
>> +// Load - Indirect with long offset: These instructions take global
address
>> +// as an operand
>> +let AddedComplexity = 10 in
>> +def LDrid_ind_lo_V4 : LDInst<(outs DoubleRegs:$dst),
>> + (ins IntRegs:$src1, u2Imm:$src2, globaladdress:$offset),
>> + "$dst=memd($src1<<#$src2+##$offset)",
>> + [(set (i64 DoubleRegs:$dst),
>> + (load (add (shl IntRegs:$src1, u2ImmPred:$src2),
>> + (HexagonCONST32 tglobaladdr:$offset))))]>,
>> + Requires<[HasV4T]>;
>> +
>> +let AddedComplexity = 10 in
>> +multiclass LD_indirect_lo<string OpcStr, PatFrag OpNode> {
>> + def _lo_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins IntRegs:$src1, u2Imm:$src2, globaladdress:$offset),
>> + !strconcat("$dst = ", !strconcat(OpcStr,
"($src1<<#$src2+##$offset)")),
>> + [(set IntRegs:$dst,
>> + (i32 (OpNode (add (shl IntRegs:$src1,
u2ImmPred:$src2),
>> + (HexagonCONST32
tglobaladdr:$offset)))))]>,
>> + Requires<[HasV4T]>;
>> +}
>> +
>> +defm LDrib_ind : LD_indirect_lo<"memb", sextloadi8>;
>> +defm LDriub_ind : LD_indirect_lo<"memub", zextloadi8>;
>> +defm LDrih_ind : LD_indirect_lo<"memh", sextloadi16>;
>> +defm LDriuh_ind : LD_indirect_lo<"memuh",
zextloadi16>;
>> +defm LDriw_ind : LD_indirect_lo<"memw", load>;
>> +
>> +// Store - Indirect with long offset: These instructions take global
address
>> +// as an operand
>> +let AddedComplexity = 10 in
>> +def STrid_ind_lo_V4 : STInst<(outs),
>> + (ins IntRegs:$src1, u2Imm:$src2, globaladdress:$src3,
>> + DoubleRegs:$src4),
>> + "memd($src1<<#$src2+#$src3) = $src4",
>> + [(store (i64 DoubleRegs:$src4),
>> + (add (shl IntRegs:$src1, u2ImmPred:$src2),
>> + (HexagonCONST32 tglobaladdr:$src3)))]>,
>> + Requires<[HasV4T]>;
>> +
>> +let AddedComplexity = 10 in
>> +multiclass ST_indirect_lo<string OpcStr, PatFrag OpNode> {
>> + def _lo_V4 : STInst<(outs),
>> + (ins IntRegs:$src1, u2Imm:$src2, globaladdress:$src3,
>> + IntRegs:$src4),
>> + !strconcat(OpcStr, "($src1<<#$src2+##$src3) =
$src4"),
>> + [(OpNode (i32 IntRegs:$src4),
>> + (add (shl IntRegs:$src1, u2ImmPred:$src2),
>> + (HexagonCONST32 tglobaladdr:$src3)))]>,
>> + Requires<[HasV4T]>;
>> +}
>> +
>> +defm STrib_ind : ST_indirect_lo<"memb", truncstorei8>;
>> +defm STrih_ind : ST_indirect_lo<"memh",
truncstorei16>;
>> +defm STriw_ind : ST_indirect_lo<"memw", store>;
>> +
>> +// Store - absolute addressing mode: These instruction take constant
>> +// value as the extended operand
>> +multiclass ST_absimm<string OpcStr> {
>> + let isPredicable = 1 in
>> + def _abs_V4 : STInst<(outs),
>> + (ins u6Imm:$src1, IntRegs:$src2),
>> + !strconcat(OpcStr, "(#$src1) = $src2"),
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> + let isPredicated = 1 in
>> + def _abs_cPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3),
>> + !strconcat("if ($src1)", !strconcat(OpcStr,
"(#$src2) = $src3")),
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> + let isPredicated = 1 in
>> + def _abs_cNotPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3),
>> + !strconcat("if (!$src1)", !strconcat(OpcStr,
"(#$src2) = $src3")),
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> + let isPredicated = 1 in
>> + def _abs_cdnPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3),
>> + !strconcat("if ($src1.new)", !strconcat(OpcStr,
"(#$src2) = $src3")),
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> + let isPredicated = 1 in
>> + def _abs_cdnNotPt_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3),
>> + !strconcat("if (!$src1.new)", !strconcat(OpcStr,
"(#$src2) = $src3")),
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> + def _abs_nv_V4 : STInst<(outs),
>> + (ins u6Imm:$src1, IntRegs:$src2),
>> + !strconcat(OpcStr, "(#$src1) = $src2.new"),
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> + let isPredicated = 1 in
>> + def _abs_cPt_nv_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3),
>> + !strconcat("if ($src1)", !strconcat(OpcStr,
"(#$src2) = $src3.new")),
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> + let isPredicated = 1 in
>> + def _abs_cNotPt_nv_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3),
>> + !strconcat("if (!$src1)", !strconcat(OpcStr,
"(#$src2) = $src3.new")),
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> + let isPredicated = 1 in
>> + def _abs_cdnPt_nv_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3),
>> + !strconcat("if ($src1.new)", !strconcat(OpcStr,
"(#$src2) = $src3.new")),
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> + let isPredicated = 1 in
>> + def _abs_cdnNotPt_nv_V4 : STInst<(outs),
>> + (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3),
>> + !strconcat("if (!$src1.new)", !strconcat(OpcStr,
"(#$src2) = $src3.new")),
>> + []>,
>> + Requires<[HasV4T]>;
>> +}
>> +
>> +defm STrib_imm : ST_absimm<"memb">;
>> +defm STrih_imm : ST_absimm<"memh">;
>> +defm STriw_imm : ST_absimm<"memw">;
>> +
>> +let Predicates = [HasV4T], AddedComplexity = 30 in
>> +def : Pat<(truncstorei8 (i32 IntRegs:$src1), u6ImmPred:$src2),
>> + (STrib_imm_abs_V4 u6ImmPred:$src2, IntRegs: $src1)>;
>> +
>> +let Predicates = [HasV4T], AddedComplexity = 30 in
>> +def : Pat<(truncstorei16 (i32 IntRegs:$src1), u6ImmPred:$src2),
>> + (STrih_imm_abs_V4 u6ImmPred:$src2, IntRegs: $src1)>;
>> +
>> +let Predicates = [HasV4T], AddedComplexity = 30 in
>> +def : Pat<(store (i32 IntRegs:$src1), u6ImmPred:$src2),
>> + (STriw_imm_abs_V4 u6ImmPred:$src2, IntRegs: $src1)>;
>> +
>> +
>> +// Load - absolute addressing mode: These instruction take constant
>> +// value as the extended operand
>> +
>> +multiclass LD_absimm<string OpcStr> {
>> + let isPredicable = 1 in
>> + def _abs_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins u6Imm:$src),
>> + !strconcat("$dst = ", !strconcat(OpcStr,
"(#$src)")),
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> + let isPredicated = 1 in
>> + def _abs_cPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, u6Imm:$src2),
>> + !strconcat("if ($src1) $dst = ",
!strconcat(OpcStr, "(#$src2)")),
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> + let isPredicated = 1 in
>> + def _abs_cNotPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, u6Imm:$src2),
>> + !strconcat("if (!$src1) $dst = ",
!strconcat(OpcStr, "(#$src2)")),
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> + let isPredicated = 1 in
>> + def _abs_cdnPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, u6Imm:$src2),
>> + !strconcat("if ($src1.new) $dst = ",
!strconcat(OpcStr, "(#$src2)")),
>> + []>,
>> + Requires<[HasV4T]>;
>> +
>> + let isPredicated = 1 in
>> + def _abs_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst),
>> + (ins PredRegs:$src1, u6Imm:$src2),
>> + !strconcat("if (!$src1.new) $dst = ",
!strconcat(OpcStr, "(#$src2)")),
>> + []>,
>> + Requires<[HasV4T]>;
>> +}
>> +
>> +defm LDrib_imm : LD_absimm<"memb">;
>> +defm LDriub_imm : LD_absimm<"memub">;
>> +defm LDrih_imm : LD_absimm<"memh">;
>> +defm LDriuh_imm : LD_absimm<"memuh">;
>> +defm LDriw_imm : LD_absimm<"memw">;
>> +
>> +let Predicates = [HasV4T], AddedComplexity = 30 in
>> +def : Pat<(i32 (load u6ImmPred:$src)),
>> + (LDriw_imm_abs_V4 u6ImmPred:$src)>;
>> +
>> +let Predicates = [HasV4T], AddedComplexity=30 in
>> +def : Pat<(i32 (sextloadi8 u6ImmPred:$src)),
>> + (LDrib_imm_abs_V4 u6ImmPred:$src)>;
>> +
>> +let Predicates = [HasV4T], AddedComplexity=30 in
>> +def : Pat<(i32 (zextloadi8 u6ImmPred:$src)),
>> + (LDriub_imm_abs_V4 u6ImmPred:$src)>;
>> +
>> +let Predicates = [HasV4T], AddedComplexity=30 in
>> +def : Pat<(i32 (sextloadi16 u6ImmPred:$src)),
>> + (LDrih_imm_abs_V4 u6ImmPred:$src)>;
>> +
>> +let Predicates = [HasV4T], AddedComplexity=30 in
>> +def : Pat<(i32 (zextloadi16 u6ImmPred:$src)),
>> + (LDriuh_imm_abs_V4 u6ImmPred:$src)>;
>> +
>> +
>> +// Indexed store double word - global address.
>> +// memw(Rs+#u6:2)=#S8
>> +let AddedComplexity = 10 in
>> +def STriw_offset_ext_V4 : STInst<(outs),
>> + (ins IntRegs:$src1, u6_2Imm:$src2, globaladdress:$src3),
>> + "memw($src1+#$src2) = ##$src3",
>> + [(store (HexagonCONST32 tglobaladdr:$src3),
>> + (add IntRegs:$src1, u6_2ImmPred:$src2))]>,
>> + Requires<[HasV4T]>;
>> +
>> +
>> +// Indexed store double word - global address.
>> +// memw(Rs+#u6:2)=#S8
>> +let AddedComplexity = 10 in
>> +def STrih_offset_ext_V4 : STInst<(outs),
>> + (ins IntRegs:$src1, u6_1Imm:$src2, globaladdress:$src3),
>> + "memh($src1+#$src2) = ##$src3",
>> + [(truncstorei16 (HexagonCONST32 tglobaladdr:$src3),
>> + (add IntRegs:$src1, u6_1ImmPred:$src2))]>,
>> + Requires<[HasV4T]>;
>> diff --git a/lib/Target/Hexagon/HexagonMCInst.h
b/lib/Target/Hexagon/HexagonMCInst.h
>> new file mode 100644
>> index 0000000..16ea7cf
>> --- /dev/null
>> +++ b/lib/Target/Hexagon/HexagonMCInst.h
>> @@ -0,0 +1,41 @@
>> +//===- HexagonMCInst.h - Hexagon sub-class of MCInst
----------------------===//
>> +//
>> +// The LLVM Compiler Infrastructure
>> +//
>> +// This file is distributed under the University of Illinois Open
Source
>> +// License. See LICENSE.TXT for details.
>> +//
>>
+//===----------------------------------------------------------------------===//
>> +//
>> +// This class extends MCInst to allow some VLIW annotation.
>> +//
>>
+//===----------------------------------------------------------------------===//
>> +
>> +#ifndef HEXAGONMCINST_H
>> +#define HEXAGONMCINST_H
>> +
>> +#include "llvm/MC/MCInst.h"
>> +#include "llvm/CodeGen/MachineInstr.h"
>> +
>> +namespace llvm {
>> + class HexagonMCInst: public MCInst {
>> + // Packet start and end markers
>> + unsigned startPacket: 1, endPacket: 1;
>> + const MachineInstr *MachineI;
>> + public:
>> + explicit HexagonMCInst(): MCInst(),
>> + startPacket(0), endPacket(0) {}
>> +
>> + const MachineInstr* getMI() const { return MachineI; };
>> +
>> + void setMI(const MachineInstr *MI) { MachineI = MI; };
>> +
>> + bool isStartPacket() const { return (startPacket); };
>> + bool isEndPacket() const { return (endPacket); };
>> +
>> + void setStartPacket(bool yes) { startPacket = yes; };
>> + void setEndPacket(bool yes) { endPacket = yes; };
>> + };
>> +}
>> +
>> +#endif
>> diff --git a/lib/Target/Hexagon/HexagonMCInstLower.cpp
b/lib/Target/Hexagon/HexagonMCInstLower.cpp
>> index fbb331b..70bddcc 100644
>> --- a/lib/Target/Hexagon/HexagonMCInstLower.cpp
>> +++ b/lib/Target/Hexagon/HexagonMCInstLower.cpp
>> @@ -49,7 +49,7 @@ void llvm::HexagonLowerToMC(const MachineInstr* MI,
MCInst& MCI,
>> switch (MO.getType()) {
>> default:
>> MI->dump();
>> - assert(0&& "unknown operand type");
>> + llvm_unreachable("unknown operand type");
>> case MachineOperand::MO_Register:
>> // Ignore all implicit register operands.
>> if (MO.isImplicit()) continue;
>> diff --git a/lib/Target/Hexagon/HexagonSchedule.td
b/lib/Target/Hexagon/HexagonSchedule.td
>> index fbea445..c488796 100644
>> --- a/lib/Target/Hexagon/HexagonSchedule.td
>> +++ b/lib/Target/Hexagon/HexagonSchedule.td
>> @@ -13,7 +13,6 @@ def LSUNIT : FuncUnit;
>> def MUNIT : FuncUnit;
>> def SUNIT : FuncUnit;
>>
>> -
>> // Itinerary classes
>> def ALU32 : InstrItinClass;
>> def ALU64 : InstrItinClass;
>> @@ -24,23 +23,25 @@ def LD : InstrItinClass;
>> def M : InstrItinClass;
>> def ST : InstrItinClass;
>> def S : InstrItinClass;
>> +def SYS : InstrItinClass;
>> +def MARKER : InstrItinClass;
>> def PSEUDO : InstrItinClass;
>>
>> -
>> def HexagonItineraries :
>> - ProcessorItineraries<[LUNIT, LSUNIT, MUNIT, SUNIT], [], [
>> - InstrItinData<ALU32 , [InstrStage<1, [LUNIT, LSUNIT,
MUNIT, SUNIT]>]>,
>> - InstrItinData<ALU64 , [InstrStage<1, [MUNIT,
SUNIT]>]>,
>> - InstrItinData<CR , [InstrStage<1, [SUNIT]>]>,
>> - InstrItinData<J , [InstrStage<1, [SUNIT,
MUNIT]>]>,
>> - InstrItinData<JR , [InstrStage<1, [MUNIT]>]>,
>> - InstrItinData<LD , [InstrStage<1, [LUNIT,
LSUNIT]>]>,
>> - InstrItinData<M , [InstrStage<1, [MUNIT,
SUNIT]>]>,
>> - InstrItinData<ST , [InstrStage<1, [LSUNIT]>]>,
>> - InstrItinData<S , [InstrStage<1, [SUNIT,
MUNIT]>]>,
>> - InstrItinData<PSEUDO , [InstrStage<1, [LUNIT, LSUNIT,
MUNIT, SUNIT]>]>
>> -]>;
>> -
>> + ProcessorItineraries<[LUNIT, LSUNIT, MUNIT, SUNIT], [], [
>> + InstrItinData<ALU32 , [InstrStage<1, [LUNIT, LSUNIT,
MUNIT, SUNIT]>]>,
>> + InstrItinData<ALU64 , [InstrStage<1, [MUNIT,
SUNIT]>]>,
>> + InstrItinData<CR , [InstrStage<1, [SUNIT]>]>,
>> + InstrItinData<J , [InstrStage<1, [SUNIT,
MUNIT]>]>,
>> + InstrItinData<JR , [InstrStage<1, [MUNIT]>]>,
>> + InstrItinData<LD , [InstrStage<1, [LUNIT,
LSUNIT]>]>,
>> + InstrItinData<M , [InstrStage<1, [MUNIT,
SUNIT]>]>,
>> + InstrItinData<ST , [InstrStage<1, [LSUNIT]>]>,
>> + InstrItinData<S , [InstrStage<1, [SUNIT,
MUNIT]>]>,
>> + InstrItinData<SYS , [InstrStage<1, [LSUNIT]>]>,
>> + InstrItinData<MARKER , [InstrStage<1, [LUNIT, LSUNIT,
MUNIT, SUNIT]>]>,
>> + InstrItinData<PSEUDO , [InstrStage<1, [LUNIT, LSUNIT,
MUNIT, SUNIT]>]>
>> + ]>;
>>
>>
//===----------------------------------------------------------------------===//
>> // V4 Machine Info +
>> diff --git a/lib/Target/Hexagon/HexagonScheduleV4.td
b/lib/Target/Hexagon/HexagonScheduleV4.td
>> index 4cf66fe..1d82dbb 100644
>> --- a/lib/Target/Hexagon/HexagonScheduleV4.td
>> +++ b/lib/Target/Hexagon/HexagonScheduleV4.td
>> @@ -23,7 +23,6 @@
>> // | SLOT3 | XTYPE ALU32 J CR
|
>> //
|===========|==================================================|
>>
>> -
>> // Functional Units.
>> def SLOT0 : FuncUnit;
>> def SLOT1 : FuncUnit;
>> @@ -34,22 +33,26 @@ def SLOT3 : FuncUnit;
>> def NV_V4 : InstrItinClass;
>> def MEM_V4 : InstrItinClass;
>> // ALU64/M/S Instruction classes of V2 are collectively knownn as
XTYPE in V4.
>> +def PREFIX : InstrItinClass;
>>
>> -def HexagonItinerariesV4 : ProcessorItineraries<
>> - [SLOT0, SLOT1, SLOT2, SLOT3], [], [
>> - InstrItinData<LD , [InstrStage<1, [SLOT0,
SLOT1]>]>,
>> - InstrItinData<ST , [InstrStage<1, [SLOT0,
SLOT1]>]>,
>> - InstrItinData<ALU32 , [InstrStage<1, [SLOT0, SLOT1,
SLOT2, SLOT3]>]>,
>> - InstrItinData<NV_V4 , [InstrStage<1,
[SLOT0]>]>,
>> - InstrItinData<MEM_V4 , [InstrStage<1,
[SLOT0]>]>,
>> - InstrItinData<J , [InstrStage<1, [SLOT2,
SLOT3]>]>,
>> - InstrItinData<JR , [InstrStage<1,
[SLOT2]>]>,
>> - InstrItinData<CR , [InstrStage<1,
[SLOT3]>]>,
>> - InstrItinData<PSEUDO , [InstrStage<1, [SLOT0, SLOT1,
SLOT2, SLOT3]>]>,
>> - InstrItinData<ALU64 , [InstrStage<1, [SLOT2,
SLOT3]>]>,
>> - InstrItinData<M , [InstrStage<1, [SLOT2,
SLOT3]>]>,
>> - InstrItinData<S , [InstrStage<1, [SLOT2,
SLOT3]>]>
>> -]>;
>> +def HexagonItinerariesV4 :
>> + ProcessorItineraries<[SLOT0, SLOT1, SLOT2, SLOT3], [], [
>> + InstrItinData<ALU32 , [InstrStage<1, [SLOT0, SLOT1,
SLOT2, SLOT3]>]>,
>> + InstrItinData<ALU64 , [InstrStage<1, [SLOT2,
SLOT3]>]>,
>> + InstrItinData<CR , [InstrStage<1, [SLOT3]>]>,
>> + InstrItinData<J , [InstrStage<1, [SLOT2,
SLOT3]>]>,
>> + InstrItinData<JR , [InstrStage<1, [SLOT2]>]>,
>> + InstrItinData<LD , [InstrStage<1, [SLOT0,
SLOT1]>]>,
>> + InstrItinData<M , [InstrStage<1, [SLOT2,
SLOT3]>]>,
>> + InstrItinData<ST , [InstrStage<1, [SLOT0,
SLOT1]>]>,
>> + InstrItinData<S , [InstrStage<1, [SLOT2,
SLOT3]>]>,
>> + InstrItinData<SYS , [InstrStage<1, [SLOT0]>]>,
>> + InstrItinData<NV_V4 , [InstrStage<1, [SLOT0]>]>,
>> + InstrItinData<MEM_V4 , [InstrStage<1, [SLOT0]>]>,
>> + InstrItinData<MARKER , [InstrStage<1, [SLOT0, SLOT1,
SLOT2, SLOT3]>]>,
>> + InstrItinData<PREFIX , [InstrStage<1, [SLOT0, SLOT1,
SLOT2, SLOT3]>]>,
>> + InstrItinData<PSEUDO , [InstrStage<1, [SLOT0, SLOT1,
SLOT2, SLOT3]>]>
>> + ]>;
>>
>>
//===----------------------------------------------------------------------===//
>> // Hexagon V4 Resource Definitions -
>> diff --git a/lib/Target/Hexagon/HexagonTargetMachine.cpp
b/lib/Target/Hexagon/HexagonTargetMachine.cpp
>> index b9e6894..411325b 100644
>> --- a/lib/Target/Hexagon/HexagonTargetMachine.cpp
>> +++ b/lib/Target/Hexagon/HexagonTargetMachine.cpp
>> @@ -138,5 +138,8 @@ bool HexagonPassConfig::addPreEmitPass() {
>> // Split up TFRcondsets into conditional transfers.
>> PM.add(createHexagonSplitTFRCondSets(getHexagonTargetMachine()));
>>
>> + // Create Packets.
>> + PM.add(createHexagonPacketizer());
>> +
>> return false;
>> }
>> diff --git a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
>> new file mode 100644
>> index 0000000..adc5a59
>> --- /dev/null
>> +++ b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
>> @@ -0,0 +1,3640 @@
>> +//===----- HexagonPacketizer.cpp - vliw packetizer
---------------------===//
>> +//
>> +// The LLVM Compiler Infrastructure
>> +//
>> +// This file is distributed under the University of Illinois Open
Source
>> +// License. See LICENSE.TXT for details.
>> +//
>>
+//===----------------------------------------------------------------------===//
>> +//
>> +// This implements a simple VLIW packetizer using DFA. The packetizer
works on
>> +// machine basic blocks. For each instruction I in BB, the packetizer
consults
>> +// the DFA to see if machine resources are available to execute I. If
so, the
>> +// packetizer checks if I depends on any instruction J in the current
packet.
>> +// If no dependency is found, I is added to current packet and machine
resource
>> +// is marked as taken. If any dependency is found, a target API call
is made to
>> +// prune the dependence.
>> +//
>>
+//===----------------------------------------------------------------------===//
>> +#define DEBUG_TYPE "packets"
>> +#include "llvm/CodeGen/DFAPacketizer.h"
>> +#include "llvm/CodeGen/Passes.h"
>> +#include "llvm/CodeGen/MachineDominators.h"
>> +#include "llvm/CodeGen/MachineFunctionPass.h"
>> +#include "llvm/CodeGen/MachineLoopInfo.h"
>> +#include "llvm/CodeGen/ScheduleDAG.h"
>> +#include "llvm/CodeGen/ScheduleDAGInstrs.h"
>> +#include "llvm/CodeGen/LatencyPriorityQueue.h"
>> +#include "llvm/CodeGen/SchedulerRegistry.h"
>> +#include "llvm/CodeGen/MachineFrameInfo.h"
>> +#include "llvm/CodeGen/MachineInstrBuilder.h"
>> +#include "llvm/CodeGen/MachineRegisterInfo.h"
>> +#include "llvm/CodeGen/MachineFunctionAnalysis.h"
>> +#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
>> +#include "llvm/Target/TargetMachine.h"
>> +#include "llvm/Target/TargetInstrInfo.h"
>> +#include "llvm/Target/TargetRegisterInfo.h"
>> +#include "llvm/ADT/DenseMap.h"
>> +#include "llvm/ADT/Statistic.h"
>> +#include "llvm/Support/MathExtras.h"
>> +#include "llvm/MC/MCInstrItineraries.h"
>> +#include "llvm/Support/Compiler.h"
>> +#include "llvm/Support/CommandLine.h"
>> +#include "llvm/Support/Debug.h"
>> +#include "Hexagon.h"
>> +#include "HexagonTargetMachine.h"
>> +#include "HexagonRegisterInfo.h"
>> +#include "HexagonSubtarget.h"
>> +#include "HexagonMachineFunctionInfo.h"
>> +
>> +#include<map>
>> +
>> +using namespace llvm;
>> +
>> +namespace {
>> + class HexagonPacketizer : public MachineFunctionPass {
>> +
>> + public:
>> + static char ID;
>> + HexagonPacketizer() : MachineFunctionPass(ID) {}
>> +
>> + void getAnalysisUsage(AnalysisUsage&AU) const {
>> + AU.setPreservesCFG();
>> + AU.addRequired<MachineDominatorTree>();
>> + AU.addPreserved<MachineDominatorTree>();
>> + AU.addRequired<MachineLoopInfo>();
>> + AU.addPreserved<MachineLoopInfo>();
>> + MachineFunctionPass::getAnalysisUsage(AU);
>> + }
>> +
>> + const char *getPassName() const {
>> + return "Hexagon Packetizer";
>> + }
>> +
>> + bool runOnMachineFunction(MachineFunction&Fn);
>> + };
>> + char HexagonPacketizer::ID = 0;
>> +
>> + class HexagonPacketizerList : public VLIWPacketizerList {
>> +
>> + private:
>> +
>> + // Has the instruction been promoted to a dot-new instruction.
>> + bool PromotedToDotNew;
>> +
>> + // Has the instruction been glued to allocframe.
>> + bool GlueAllocframeStore;
>> +
>> + // Has the feeder instruction been glued to new value jump.
>> + bool GlueToNewValueJump;
>> +
>> + // Check if there is a dependence between some instruction already
in this
>> + // packet and this instruction.
>> + bool Dependence;
>> +
>> + // Only check for dependence if there are resources available to
>> + // schedule this instruction.
>> + bool FoundSequentialDependence;
>> +
>> + public:
>> + // Ctor.
>> + HexagonPacketizerList(MachineFunction&MF,
MachineLoopInfo&MLI,
>> + MachineDominatorTree&MDT);
>> +
>> + // initPacketizerState - initialize some internal flags.
>> + void initPacketizerState(void);
>> +
>> + // ignorePseudoInstruction - Ignore bundling of pseudo
instructions.
>> + bool ignorePseudoInstruction(MachineInstr *MI, MachineBasicBlock
*MBB);
>> +
>> + // isSoloInstruction - return true if instruction MI can not be
packetized
>> + // with any other instruction, which means that MI itself is a
packet.
>> + bool isSoloInstruction(MachineInstr *MI);
>> +
>> + // isLegalToPacketizeTogether - Is it legal to packetize SUI and
SUJ
>> + // together.
>> + bool isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ);
>> +
>> + // isLegalToPruneDependencies - Is it legal to prune dependece
between SUI
>> + // and SUJ.
>> + bool isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ);
>> +
>> + MachineBasicBlock::iterator addToPacket(MachineInstr *MI);
>> + private:
>> + bool IsCallDependent(MachineInstr* MI, SDep::Kind DepType,
unsigned DepReg);
>> + bool PromoteToDotNew(MachineInstr* MI, SDep::Kind DepType,
>> + MachineBasicBlock::iterator&MII,
>> + const TargetRegisterClass* RC);
>> + bool CanPromoteToDotNew(MachineInstr* MI, SUnit* PacketSU,
>> + unsigned DepReg,
>> + std::map<MachineInstr*, SUnit*> MIToSUnit,
>> + MachineBasicBlock::iterator&MII,
>> + const TargetRegisterClass* RC);
>> + bool CanPromoteToNewValue(MachineInstr* MI, SUnit* PacketSU,
>> + unsigned DepReg,
>> + std::map<MachineInstr*, SUnit*> MIToSUnit,
>> + MachineBasicBlock::iterator&MII);
>> + bool CanPromoteToNewValueStore(MachineInstr* MI, MachineInstr*
PacketMI,
>> + unsigned DepReg,
>> + std::map<MachineInstr*, SUnit*> MIToSUnit);
>> + bool DemoteToDotOld(MachineInstr* MI);
>> + bool ArePredicatesComplements(MachineInstr* MI1, MachineInstr*
MI2,
>> + std::map<MachineInstr*, SUnit*> MIToSUnit);
>> + bool RestrictingDepExistInPacket(MachineInstr*,
>> + unsigned, std::map<MachineInstr*, SUnit*>);
>> + bool isNewifiable(MachineInstr* MI);
>> + bool isCondInst(MachineInstr* MI);
>> + bool IsNewifyStore (MachineInstr* MI);
>> + bool tryAllocateResourcesForConstExt(MachineInstr* MI);
>> + bool canReserveResourcesForConstExt(MachineInstr *MI);
>> + void reserveResourcesForConstExt(MachineInstr* MI);
>> + bool isNewValueInst(MachineInstr* MI);
>> + bool isDotNewInst(MachineInstr* MI);
>> + };
>> +}
>> +
>> +// HexagonPacketizerList Ctor.
>> +HexagonPacketizerList::HexagonPacketizerList(
>> + MachineFunction&MF,
MachineLoopInfo&MLI,MachineDominatorTree&MDT)
>> + : VLIWPacketizerList(MF, MLI, MDT, true){
>> +}
>> +
>> +bool HexagonPacketizer::runOnMachineFunction(MachineFunction&Fn) {
>> + const TargetInstrInfo *TII = Fn.getTarget().getInstrInfo();
>> + MachineLoopInfo&MLI = getAnalysis<MachineLoopInfo>();
>> + MachineDominatorTree&MDT =
getAnalysis<MachineDominatorTree>();
>> +
>> + // Instantiate the packetizer.
>> + HexagonPacketizerList Packetizer(Fn, MLI, MDT);
>> +
>> + // DFA state table should not be empty.
>> + assert(Packetizer.getResourceTracker()&& "Empty DFA
table!");
>> +
>> + //
>> + // Loop over all basic blocks and remove KILL pseudo-instructions
>> + // These instructions confuse the dependence analysis. Consider:
>> + // D0 = ... (Insn 0)
>> + // R0 = KILL R0, D0 (Insn 1)
>> + // R0 = ... (Insn 2)
>> + // Here, Insn 1 will result in the dependence graph not emitting an
output
>> + // dependence between Insn 0 and Insn 2. This can lead to incorrect
>> + // packetization
>> + //
>> + for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
>> + MBB != MBBe; ++MBB) {
>> + MachineBasicBlock::iterator End = MBB->end();
>> + MachineBasicBlock::iterator MI = MBB->begin();
>> + while (MI != End) {
>> + if (MI->isKill()) {
>> + MachineBasicBlock::iterator DeleteMI = MI;
>> + ++MI;
>> + MBB->erase(DeleteMI);
>> + End = MBB->end();
>> + continue;
>> + }
>> + ++MI;
>> + }
>> + }
>> +
>> + // Loop over all of the basic blocks.
>> + for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
>> + MBB != MBBe; ++MBB) {
>> + // Find scheduling regions and schedule / packetize each region.
>> + unsigned RemainingCount = MBB->size();
>> + for(MachineBasicBlock::iterator RegionEnd = MBB->end();
>> + RegionEnd != MBB->begin();) {
>> + // The next region starts above the previous region. Look
backward in the
>> + // instruction stream until we find the nearest boundary.
>> + MachineBasicBlock::iterator I = RegionEnd;
>> + for(;I != MBB->begin(); --I, --RemainingCount) {
>> + if (TII->isSchedulingBoundary(llvm::prior(I), MBB, Fn))
>> + break;
>> + }
>> + I = MBB->begin();
>> +
>> + // Skip empty scheduling regions.
>> + if (I == RegionEnd) {
>> + RegionEnd = llvm::prior(RegionEnd);
>> + --RemainingCount;
>> + continue;
>> + }
>> + // Skip regions with one instruction.
>> + if (I == llvm::prior(RegionEnd)) {
>> + RegionEnd = llvm::prior(RegionEnd);
>> + continue;
>> + }
>> +
>> + Packetizer.PacketizeMIs(MBB, I, RegionEnd);
>> + RegionEnd = I;
>> + }
>> + }
>> +
>> + return true;
>> +}
>> +
>> +
>> +static bool IsIndirectCall(MachineInstr* MI) {
>> + return ((MI->getOpcode() == Hexagon::CALLR) ||
>> + (MI->getOpcode() == Hexagon::CALLRv3));
>> +}
>> +
>> +// Reserve resources for constant extender. Trigure an assertion if
>> +// reservation fail.
>> +void HexagonPacketizerList::reserveResourcesForConstExt(MachineInstr*
MI) {
>> + const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
>> + MachineInstr *PseudoMI =
MI->getParent()->getParent()->CreateMachineInstr(
>> + QII->get(Hexagon::IMMEXT),
MI->getDebugLoc());
>> +
>> + if (ResourceTracker->canReserveResources(PseudoMI)) {
>> + ResourceTracker->reserveResources(PseudoMI);
>> +
MI->getParent()->getParent()->DeleteMachineInstr(PseudoMI);
>> + } else {
>> +
MI->getParent()->getParent()->DeleteMachineInstr(PseudoMI);
>> + llvm_unreachable("can not reserve resources for constant
extender.");
>> + }
>> + return;
>> +}
>> +
>> +bool
HexagonPacketizerList::canReserveResourcesForConstExt(MachineInstr *MI) {
>> + const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
>> + assert(QII->isExtended(MI)&&
>> + "Should only be called for constant extended
instructions");
>> + MachineFunction *MF = MI->getParent()->getParent();
>> + MachineInstr *PseudoMI =
MF->CreateMachineInstr(QII->get(Hexagon::IMMEXT),
>> +
MI->getDebugLoc());
>> + bool CanReserve = ResourceTracker->canReserveResources(PseudoMI);
>> + MF->DeleteMachineInstr(PseudoMI);
>> + return CanReserve;
>> +}
>> +
>> +// Allocate resources (i.e. 4 bytes) for constant extender. If
succeed, return
>> +// true, otherwise, return false.
>> +bool
HexagonPacketizerList::tryAllocateResourcesForConstExt(MachineInstr* MI) {
>> + const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
>> + MachineInstr *PseudoMI =
MI->getParent()->getParent()->CreateMachineInstr(
>> + QII->get(Hexagon::IMMEXT),
MI->getDebugLoc());
>> +
>> + if (ResourceTracker->canReserveResources(PseudoMI)) {
>> + ResourceTracker->reserveResources(PseudoMI);
>> +
MI->getParent()->getParent()->DeleteMachineInstr(PseudoMI);
>> + return true;
>> + } else {
>> +
MI->getParent()->getParent()->DeleteMachineInstr(PseudoMI);
>> + return false;
>> + }
>> +}
>> +
>> +
>> +bool HexagonPacketizerList::IsCallDependent(MachineInstr* MI,
>> + SDep::Kind DepType,
>> + unsigned DepReg) {
>> +
>> + const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
>> + const HexagonRegisterInfo* QRI = (const HexagonRegisterInfo *)
TM.getRegisterInfo();
>> +
>> + // Check for lr dependence
>> + if (DepReg == QRI->getRARegister()) {
>> + return true;
>> + }
>> +
>> + if (QII->isDeallocRet(MI)) {
>> + if (DepReg == QRI->getFrameRegister() ||
>> + DepReg == QRI->getStackRegister())
>> + return true;
>> + }
>> +
>> + // Check if this is a predicate dependence
>> + const TargetRegisterClass* RC =
QRI->getMinimalPhysRegClass(DepReg);
>> + if (RC == Hexagon::PredRegsRegisterClass) {
>> + return true;
>> + }
>> +
>> + //
>> + // Lastly check for an operand used in an indirect call
>> + // If we had an attribute for checking if an instruction is an
indirect call,
>> + // then we could have avoided this relatively brittle implementation
of
>> + // IsIndirectCall()
>> + //
>> + // Assumes that the first operand of the CALLr is the function
address
>> + //
>> + if (IsIndirectCall(MI)&& (DepType == SDep::Data)) {
>> + MachineOperand MO = MI->getOperand(0);
>> + if (MO.isReg()&& MO.isUse()&& (MO.getReg() ==
DepReg)) {
>> + return true;
>> + }
>> + }
>> +
>> + return false;
>> +}
>> +
>> +static bool IsRegDependence(const SDep::Kind DepType) {
>> + return (DepType == SDep::Data || DepType == SDep::Anti ||
>> + DepType == SDep::Output);
>> +}
>> +
>> +static bool IsDirectJump(MachineInstr* MI) {
>> + return (MI->getOpcode() == Hexagon::JMP);
>> +}
>> +
>> +static bool IsSchedBarrier(MachineInstr* MI) {
>> + switch (MI->getOpcode()) {
>> + case Hexagon::BARRIER:
>> + return true;
>> + }
>> + return false;
>> +}
>> +
>> +static bool IsControlFlow(MachineInstr* MI) {
>> + return (MI->getDesc().isTerminator() ||
MI->getDesc().isCall());
>> +}
>> +
>> +bool HexagonPacketizerList::isNewValueInst(MachineInstr* MI) {
>> + const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
>> + if (QII->isNewValueJump(MI))
>> + return true;
>> +
>> + if (QII->isNewValueStore(MI))
>> + return true;
>> +
>> + return false;
>> +}
>> +
>> +// Function returns true if an instruction can be promoted to the
new-value
>> +// store. It will always return false for v2 and v3.
>> +// It lists all the conditional and unconditional stores that can be
promoted
>> +// to the new-value stores.
>> +
>> +bool HexagonPacketizerList::IsNewifyStore (MachineInstr* MI) {
>> + const HexagonRegisterInfo* QRI = (const HexagonRegisterInfo *)
TM.getRegisterInfo();
>> + switch (MI->getOpcode())
>> + {
>> + // store byte
>> + case Hexagon::STrib:
>> + case Hexagon::STrib_indexed:
>> + case Hexagon::STrib_indexed_shl_V4:
>> + case Hexagon::STrib_shl_V4:
>> + case Hexagon::STrib_GP_V4:
>> + case Hexagon::STb_GP_V4:
>> + case Hexagon::POST_STbri:
>> + case Hexagon::STrib_cPt:
>> + case Hexagon::STrib_cdnPt_V4:
>> + case Hexagon::STrib_cNotPt:
>> + case Hexagon::STrib_cdnNotPt_V4:
>> + case Hexagon::STrib_indexed_cPt:
>> + case Hexagon::STrib_indexed_cdnPt_V4:
>> + case Hexagon::STrib_indexed_cNotPt:
>> + case Hexagon::STrib_indexed_cdnNotPt_V4:
>> + case Hexagon::STrib_indexed_shl_cPt_V4:
>> + case Hexagon::STrib_indexed_shl_cdnPt_V4:
>> + case Hexagon::STrib_indexed_shl_cNotPt_V4:
>> + case Hexagon::STrib_indexed_shl_cdnNotPt_V4:
>> + case Hexagon::POST_STbri_cPt:
>> + case Hexagon::POST_STbri_cdnPt_V4:
>> + case Hexagon::POST_STbri_cNotPt:
>> + case Hexagon::POST_STbri_cdnNotPt_V4:
>> + case Hexagon::STb_GP_cPt_V4:
>> + case Hexagon::STb_GP_cNotPt_V4:
>> + case Hexagon::STb_GP_cdnPt_V4:
>> + case Hexagon::STb_GP_cdnNotPt_V4:
>> + case Hexagon::STrib_GP_cPt_V4:
>> + case Hexagon::STrib_GP_cNotPt_V4:
>> + case Hexagon::STrib_GP_cdnPt_V4:
>> + case Hexagon::STrib_GP_cdnNotPt_V4:
>> +
>> + // store halfword
>> + case Hexagon::STrih:
>> + case Hexagon::STrih_indexed:
>> + case Hexagon::STrih_indexed_shl_V4:
>> + case Hexagon::STrih_shl_V4:
>> + case Hexagon::STrih_GP_V4:
>> + case Hexagon::STh_GP_V4:
>> + case Hexagon::POST_SThri:
>> + case Hexagon::STrih_cPt:
>> + case Hexagon::STrih_cdnPt_V4:
>> + case Hexagon::STrih_cNotPt:
>> + case Hexagon::STrih_cdnNotPt_V4:
>> + case Hexagon::STrih_indexed_cPt:
>> + case Hexagon::STrih_indexed_cdnPt_V4:
>> + case Hexagon::STrih_indexed_cNotPt:
>> + case Hexagon::STrih_indexed_cdnNotPt_V4:
>> + case Hexagon::STrih_indexed_shl_cPt_V4:
>> + case Hexagon::STrih_indexed_shl_cdnPt_V4:
>> + case Hexagon::STrih_indexed_shl_cNotPt_V4:
>> + case Hexagon::STrih_indexed_shl_cdnNotPt_V4:
>> + case Hexagon::POST_SThri_cPt:
>> + case Hexagon::POST_SThri_cdnPt_V4:
>> + case Hexagon::POST_SThri_cNotPt:
>> + case Hexagon::POST_SThri_cdnNotPt_V4:
>> + case Hexagon::STh_GP_cPt_V4:
>> + case Hexagon::STh_GP_cNotPt_V4:
>> + case Hexagon::STh_GP_cdnPt_V4:
>> + case Hexagon::STh_GP_cdnNotPt_V4:
>> + case Hexagon::STrih_GP_cPt_V4:
>> + case Hexagon::STrih_GP_cNotPt_V4:
>> + case Hexagon::STrih_GP_cdnPt_V4:
>> + case Hexagon::STrih_GP_cdnNotPt_V4:
>> +
>> + // store word
>> + case Hexagon::STriw:
>> + case Hexagon::STriw_indexed:
>> + case Hexagon::STriw_indexed_shl_V4:
>> + case Hexagon::STriw_shl_V4:
>> + case Hexagon::STriw_GP_V4:
>> + case Hexagon::STw_GP_V4:
>> + case Hexagon::POST_STwri:
>> + case Hexagon::STriw_cPt:
>> + case Hexagon::STriw_cdnPt_V4:
>> + case Hexagon::STriw_cNotPt:
>> + case Hexagon::STriw_cdnNotPt_V4:
>> + case Hexagon::STriw_indexed_cPt:
>> + case Hexagon::STriw_indexed_cdnPt_V4:
>> + case Hexagon::STriw_indexed_cNotPt:
>> + case Hexagon::STriw_indexed_cdnNotPt_V4:
>> + case Hexagon::STriw_indexed_shl_cPt_V4:
>> + case Hexagon::STriw_indexed_shl_cdnPt_V4:
>> + case Hexagon::STriw_indexed_shl_cNotPt_V4:
>> + case Hexagon::STriw_indexed_shl_cdnNotPt_V4:
>> + case Hexagon::POST_STwri_cPt:
>> + case Hexagon::POST_STwri_cdnPt_V4:
>> + case Hexagon::POST_STwri_cNotPt:
>> + case Hexagon::POST_STwri_cdnNotPt_V4:
>> + case Hexagon::STw_GP_cPt_V4:
>> + case Hexagon::STw_GP_cNotPt_V4:
>> + case Hexagon::STw_GP_cdnPt_V4:
>> + case Hexagon::STw_GP_cdnNotPt_V4:
>> + case Hexagon::STriw_GP_cPt_V4:
>> + case Hexagon::STriw_GP_cNotPt_V4:
>> + case Hexagon::STriw_GP_cdnPt_V4:
>> + case Hexagon::STriw_GP_cdnNotPt_V4:
>> + return QRI->Subtarget.hasV4TOps();
>> + }
>> + return false;
>> +}
>> +
>> +static bool IsLoopN(MachineInstr *MI) {
>> + return (MI->getOpcode() == Hexagon::LOOP0_i ||
>> + MI->getOpcode() == Hexagon::LOOP0_r);
>> +}
>> +
>> +/// DoesModifyCalleeSavedReg - Returns true if the instruction
modifies a
>> +/// callee-saved register.
>> +static bool DoesModifyCalleeSavedReg(MachineInstr *MI,
>> + const TargetRegisterInfo *TRI) {
>> + for (const uint16_t *CSR = TRI->getCalleeSavedRegs(); *CSR;
++CSR) {
>> + unsigned CalleeSavedReg = *CSR;
>> + if (MI->modifiesRegister(CalleeSavedReg, TRI))
>> + return true;
>> + }
>> + return false;
>> +}
>> +
>> +// Return the new value instruction for a given store.
>> +static int GetDotNewOp(const int opc) {
>> + switch (opc) {
>> + default: llvm_unreachable("Unknown .new type");
>> + // store new value byte
>> + case Hexagon::STrib:
>> + return Hexagon::STrib_nv_V4;
>> +
>> + case Hexagon::STrib_indexed:
>> + return Hexagon::STrib_indexed_nv_V4;
>> +
>> + case Hexagon::STrib_indexed_shl_V4:
>> + return Hexagon::STrib_indexed_shl_nv_V4;
>> +
>> + case Hexagon::STrib_shl_V4:
>> + return Hexagon::STrib_shl_nv_V4;
>> +
>> + case Hexagon::STrib_GP_V4:
>> + return Hexagon::STrib_GP_nv_V4;
>> +
>> + case Hexagon::STb_GP_V4:
>> + return Hexagon::STb_GP_nv_V4;
>> +
>> + case Hexagon::POST_STbri:
>> + return Hexagon::POST_STbri_nv_V4;
>> +
>> + case Hexagon::STrib_cPt:
>> + return Hexagon::STrib_cPt_nv_V4;
>> +
>> + case Hexagon::STrib_cdnPt_V4:
>> + return Hexagon::STrib_cdnPt_nv_V4;
>> +
>> + case Hexagon::STrib_cNotPt:
>> + return Hexagon::STrib_cNotPt_nv_V4;
>> +
>> + case Hexagon::STrib_cdnNotPt_V4:
>> + return Hexagon::STrib_cdnNotPt_nv_V4;
>> +
>> + case Hexagon::STrib_indexed_cPt:
>> + return Hexagon::STrib_indexed_cPt_nv_V4;
>> +
>> + case Hexagon::STrib_indexed_cdnPt_V4:
>> + return Hexagon::STrib_indexed_cdnPt_nv_V4;
>> +
>> + case Hexagon::STrib_indexed_cNotPt:
>> + return Hexagon::STrib_indexed_cNotPt_nv_V4;
>> +
>> + case Hexagon::STrib_indexed_cdnNotPt_V4:
>> + return Hexagon::STrib_indexed_cdnNotPt_nv_V4;
>> +
>> + case Hexagon::STrib_indexed_shl_cPt_V4:
>> + return Hexagon::STrib_indexed_shl_cPt_nv_V4;
>> +
>> + case Hexagon::STrib_indexed_shl_cdnPt_V4:
>> + return Hexagon::STrib_indexed_shl_cdnPt_nv_V4;
>> +
>> + case Hexagon::STrib_indexed_shl_cNotPt_V4:
>> + return Hexagon::STrib_indexed_shl_cNotPt_nv_V4;
>> +
>> + case Hexagon::STrib_indexed_shl_cdnNotPt_V4:
>> + return Hexagon::STrib_indexed_shl_cdnNotPt_nv_V4;
>> +
>> + case Hexagon::POST_STbri_cPt:
>> + return Hexagon::POST_STbri_cPt_nv_V4;
>> +
>> + case Hexagon::POST_STbri_cdnPt_V4:
>> + return Hexagon::POST_STbri_cdnPt_nv_V4;
>> +
>> + case Hexagon::POST_STbri_cNotPt:
>> + return Hexagon::POST_STbri_cNotPt_nv_V4;
>> +
>> + case Hexagon::POST_STbri_cdnNotPt_V4:
>> + return Hexagon::POST_STbri_cdnNotPt_nv_V4;
>> +
>> + case Hexagon::STb_GP_cPt_V4:
>> + return Hexagon::STb_GP_cPt_nv_V4;
>> +
>> + case Hexagon::STb_GP_cNotPt_V4:
>> + return Hexagon::STb_GP_cNotPt_nv_V4;
>> +
>> + case Hexagon::STb_GP_cdnPt_V4:
>> + return Hexagon::STb_GP_cdnPt_nv_V4;
>> +
>> + case Hexagon::STb_GP_cdnNotPt_V4:
>> + return Hexagon::STb_GP_cdnNotPt_nv_V4;
>> +
>> + case Hexagon::STrib_GP_cPt_V4:
>> + return Hexagon::STrib_GP_cPt_nv_V4;
>> +
>> + case Hexagon::STrib_GP_cNotPt_V4:
>> + return Hexagon::STrib_GP_cNotPt_nv_V4;
>> +
>> + case Hexagon::STrib_GP_cdnPt_V4:
>> + return Hexagon::STrib_GP_cdnPt_nv_V4;
>> +
>> + case Hexagon::STrib_GP_cdnNotPt_V4:
>> + return Hexagon::STrib_GP_cdnNotPt_nv_V4;
>> +
>> + // store new value halfword
>> + case Hexagon::STrih:
>> + return Hexagon::STrih_nv_V4;
>> +
>> + case Hexagon::STrih_indexed:
>> + return Hexagon::STrih_indexed_nv_V4;
>> +
>> + case Hexagon::STrih_indexed_shl_V4:
>> + return Hexagon::STrih_indexed_shl_nv_V4;
>> +
>> + case Hexagon::STrih_shl_V4:
>> + return Hexagon::STrih_shl_nv_V4;
>> +
>> + case Hexagon::STrih_GP_V4:
>> + return Hexagon::STrih_GP_nv_V4;
>> +
>> + case Hexagon::STh_GP_V4:
>> + return Hexagon::STh_GP_nv_V4;
>> +
>> + case Hexagon::POST_SThri:
>> + return Hexagon::POST_SThri_nv_V4;
>> +
>> + case Hexagon::STrih_cPt:
>> + return Hexagon::STrih_cPt_nv_V4;
>> +
>> + case Hexagon::STrih_cdnPt_V4:
>> + return Hexagon::STrih_cdnPt_nv_V4;
>> +
>> + case Hexagon::STrih_cNotPt:
>> + return Hexagon::STrih_cNotPt_nv_V4;
>> +
>> + case Hexagon::STrih_cdnNotPt_V4:
>> + return Hexagon::STrih_cdnNotPt_nv_V4;
>> +
>> + case Hexagon::STrih_indexed_cPt:
>> + return Hexagon::STrih_indexed_cPt_nv_V4;
>> +
>> + case Hexagon::STrih_indexed_cdnPt_V4:
>> + return Hexagon::STrih_indexed_cdnPt_nv_V4;
>> +
>> + case Hexagon::STrih_indexed_cNotPt:
>> + return Hexagon::STrih_indexed_cNotPt_nv_V4;
>> +
>> + case Hexagon::STrih_indexed_cdnNotPt_V4:
>> + return Hexagon::STrih_indexed_cdnNotPt_nv_V4;
>> +
>> + case Hexagon::STrih_indexed_shl_cPt_V4:
>> + return Hexagon::STrih_indexed_shl_cPt_nv_V4;
>> +
>> + case Hexagon::STrih_indexed_shl_cdnPt_V4:
>> + return Hexagon::STrih_indexed_shl_cdnPt_nv_V4;
>> +
>> + case Hexagon::STrih_indexed_shl_cNotPt_V4:
>> + return Hexagon::STrih_indexed_shl_cNotPt_nv_V4;
>> +
>> + case Hexagon::STrih_indexed_shl_cdnNotPt_V4:
>> + return Hexagon::STrih_indexed_shl_cdnNotPt_nv_V4;
>> +
>> + case Hexagon::POST_SThri_cPt:
>> + return Hexagon::POST_SThri_cPt_nv_V4;
>> +
>> + case Hexagon::POST_SThri_cdnPt_V4:
>> + return Hexagon::POST_SThri_cdnPt_nv_V4;
>> +
>> + case Hexagon::POST_SThri_cNotPt:
>> + return Hexagon::POST_SThri_cNotPt_nv_V4;
>> +
>> + case Hexagon::POST_SThri_cdnNotPt_V4:
>> + return Hexagon::POST_SThri_cdnNotPt_nv_V4;
>> +
>> + case Hexagon::STh_GP_cPt_V4:
>> + return Hexagon::STh_GP_cPt_nv_V4;
>> +
>> + case Hexagon::STh_GP_cNotPt_V4:
>> + return Hexagon::STh_GP_cNotPt_nv_V4;
>> +
>> + case Hexagon::STh_GP_cdnPt_V4:
>> + return Hexagon::STh_GP_cdnPt_nv_V4;
>> +
>> + case Hexagon::STh_GP_cdnNotPt_V4:
>> + return Hexagon::STh_GP_cdnNotPt_nv_V4;
>> +
>> + case Hexagon::STrih_GP_cPt_V4:
>> + return Hexagon::STrih_GP_cPt_nv_V4;
>> +
>> + case Hexagon::STrih_GP_cNotPt_V4:
>> + return Hexagon::STrih_GP_cNotPt_nv_V4;
>> +
>> + case Hexagon::STrih_GP_cdnPt_V4:
>> + return Hexagon::STrih_GP_cdnPt_nv_V4;
>> +
>> + case Hexagon::STrih_GP_cdnNotPt_V4:
>> + return Hexagon::STrih_GP_cdnNotPt_nv_V4;
>> +
>> + // store new value word
>> + case Hexagon::STriw:
>> + return Hexagon::STriw_nv_V4;
>> +
>> + case Hexagon::STriw_indexed:
>> + return Hexagon::STriw_indexed_nv_V4;
>> +
>> + case Hexagon::STriw_indexed_shl_V4:
>> + return Hexagon::STriw_indexed_shl_nv_V4;
>> +
>> + case Hexagon::STriw_shl_V4:
>> + return Hexagon::STriw_shl_nv_V4;
>> +
>> + case Hexagon::STriw_GP_V4:
>> + return Hexagon::STriw_GP_nv_V4;
>> +
>> + case Hexagon::STw_GP_V4:
>> + return Hexagon::STw_GP_nv_V4;
>> +
>> + case Hexagon::POST_STwri:
>> + return Hexagon::POST_STwri_nv_V4;
>> +
>> + case Hexagon::STriw_cPt:
>> + return Hexagon::STriw_cPt_nv_V4;
>> +
>> + case Hexagon::STriw_cdnPt_V4:
>> + return Hexagon::STriw_cdnPt_nv_V4;
>> +
>> + case Hexagon::STriw_cNotPt:
>> + return Hexagon::STriw_cNotPt_nv_V4;
>> +
>> + case Hexagon::STriw_cdnNotPt_V4:
>> + return Hexagon::STriw_cdnNotPt_nv_V4;
>> +
>> + case Hexagon::STriw_indexed_cPt:
>> + return Hexagon::STriw_indexed_cPt_nv_V4;
>> +
>> + case Hexagon::STriw_indexed_cdnPt_V4:
>> + return Hexagon::STriw_indexed_cdnPt_nv_V4;
>> +
>> + case Hexagon::STriw_indexed_cNotPt:
>> + return Hexagon::STriw_indexed_cNotPt_nv_V4;
>> +
>> + case Hexagon::STriw_indexed_cdnNotPt_V4:
>> + return Hexagon::STriw_indexed_cdnNotPt_nv_V4;
>> +
>> + case Hexagon::STriw_indexed_shl_cPt_V4:
>> + return Hexagon::STriw_indexed_shl_cPt_nv_V4;
>> +
>> + case Hexagon::STriw_indexed_shl_cdnPt_V4:
>> + return Hexagon::STriw_indexed_shl_cdnPt_nv_V4;
>> +
>> + case Hexagon::STriw_indexed_shl_cNotPt_V4:
>> + return Hexagon::STriw_indexed_shl_cNotPt_nv_V4;
>> +
>> + case Hexagon::STriw_indexed_shl_cdnNotPt_V4:
>> + return Hexagon::STriw_indexed_shl_cdnNotPt_nv_V4;
>> +
>> + case Hexagon::POST_STwri_cPt:
>> + return Hexagon::POST_STwri_cPt_nv_V4;
>> +
>> + case Hexagon::POST_STwri_cdnPt_V4:
>> + return Hexagon::POST_STwri_cdnPt_nv_V4;
>> +
>> + case Hexagon::POST_STwri_cNotPt:
>> + return Hexagon::POST_STwri_cNotPt_nv_V4;
>> +
>> + case Hexagon::POST_STwri_cdnNotPt_V4:
>> + return Hexagon::POST_STwri_cdnNotPt_nv_V4;
>> +
>> + case Hexagon::STw_GP_cPt_V4:
>> + return Hexagon::STw_GP_cPt_nv_V4;
>> +
>> + case Hexagon::STw_GP_cNotPt_V4:
>> + return Hexagon::STw_GP_cNotPt_nv_V4;
>> +
>> + case Hexagon::STw_GP_cdnPt_V4:
>> + return Hexagon::STw_GP_cdnPt_nv_V4;
>> +
>> + case Hexagon::STw_GP_cdnNotPt_V4:
>> + return Hexagon::STw_GP_cdnNotPt_nv_V4;
>> +
>> + case Hexagon::STriw_GP_cPt_V4:
>> + return Hexagon::STriw_GP_cPt_nv_V4;
>> +
>> + case Hexagon::STriw_GP_cNotPt_V4:
>> + return Hexagon::STriw_GP_cNotPt_nv_V4;
>> +
>> + case Hexagon::STriw_GP_cdnPt_V4:
>> + return Hexagon::STriw_GP_cdnPt_nv_V4;
>> +
>> + case Hexagon::STriw_GP_cdnNotPt_V4:
>> + return Hexagon::STriw_GP_cdnNotPt_nv_V4;
>> + }
>> +}
>> +
>> +// Return .new predicate version for an instruction
>> +static int GetDotNewPredOp(const int opc) {
>> + switch (opc) {
>> + default: llvm_unreachable("Unknown .new type");
>> + // Conditional stores
>> + // Store byte conditionally
>> + case Hexagon::STrib_cPt :
>> + return Hexagon::STrib_cdnPt_V4;
>> +
>> + case Hexagon::STrib_cNotPt :
>> + return Hexagon::STrib_cdnNotPt_V4;
>> +
>> + case Hexagon::STrib_indexed_cPt :
>> + return Hexagon::STrib_indexed_cdnPt_V4;
>> +
>> + case Hexagon::STrib_indexed_cNotPt :
>> + return Hexagon::STrib_indexed_cdnNotPt_V4;
>> +
>> + case Hexagon::STrib_imm_cPt_V4 :
>> + return Hexagon::STrib_imm_cdnPt_V4;
>> +
>> + case Hexagon::STrib_imm_cNotPt_V4 :
>> + return Hexagon::STrib_imm_cdnNotPt_V4;
>> +
>> + case Hexagon::POST_STbri_cPt :
>> + return Hexagon::POST_STbri_cdnPt_V4;
>> +
>> + case Hexagon::POST_STbri_cNotPt :
>> + return Hexagon::POST_STbri_cdnNotPt_V4;
>> +
>> + case Hexagon::STrib_indexed_shl_cPt_V4 :
>> + return Hexagon::STrib_indexed_shl_cdnPt_V4;
>> +
>> + case Hexagon::STrib_indexed_shl_cNotPt_V4 :
>> + return Hexagon::STrib_indexed_shl_cdnNotPt_V4;
>> +
>> + case Hexagon::STb_GP_cPt_V4 :
>> + return Hexagon::STb_GP_cdnPt_V4;
>> +
>> + case Hexagon::STb_GP_cNotPt_V4 :
>> + return Hexagon::STb_GP_cdnNotPt_V4;
>> +
>> + case Hexagon::STrib_GP_cPt_V4 :
>> + return Hexagon::STrib_GP_cdnPt_V4;
>> +
>> + case Hexagon::STrib_GP_cNotPt_V4 :
>> + return Hexagon::STrib_GP_cdnNotPt_V4;
>> +
>> + // Store doubleword conditionally
>> + case Hexagon::STrid_cPt :
>> + return Hexagon::STrid_cdnPt_V4;
>> +
>> + case Hexagon::STrid_cNotPt :
>> + return Hexagon::STrid_cdnNotPt_V4;
>> +
>> + case Hexagon::STrid_indexed_cPt :
>> + return Hexagon::STrid_indexed_cdnPt_V4;
>> +
>> + case Hexagon::STrid_indexed_cNotPt :
>> + return Hexagon::STrid_indexed_cdnNotPt_V4;
>> +
>> + case Hexagon::STrid_indexed_shl_cPt_V4 :
>> + return Hexagon::STrid_indexed_shl_cdnPt_V4;
>> +
>> + case Hexagon::STrid_indexed_shl_cNotPt_V4 :
>> + return Hexagon::STrid_indexed_shl_cdnNotPt_V4;
>> +
>> + case Hexagon::POST_STdri_cPt :
>> + return Hexagon::POST_STdri_cdnPt_V4;
>> +
>> + case Hexagon::POST_STdri_cNotPt :
>> + return Hexagon::POST_STdri_cdnNotPt_V4;
>> +
>> + case Hexagon::STd_GP_cPt_V4 :
>> + return Hexagon::STd_GP_cdnPt_V4;
>> +
>> + case Hexagon::STd_GP_cNotPt_V4 :
>> + return Hexagon::STd_GP_cdnNotPt_V4;
>> +
>> + case Hexagon::STrid_GP_cPt_V4 :
>> + return Hexagon::STrid_GP_cdnPt_V4;
>> +
>> + case Hexagon::STrid_GP_cNotPt_V4 :
>> + return Hexagon::STrid_GP_cdnNotPt_V4;
>> +
>> + // Store halfword conditionally
>> + case Hexagon::STrih_cPt :
>> + return Hexagon::STrih_cdnPt_V4;
>> +
>> + case Hexagon::STrih_cNotPt :
>> + return Hexagon::STrih_cdnNotPt_V4;
>> +
>> + case Hexagon::STrih_indexed_cPt :
>> + return Hexagon::STrih_indexed_cdnPt_V4;
>> +
>> + case Hexagon::STrih_indexed_cNotPt :
>> + return Hexagon::STrih_indexed_cdnNotPt_V4;
>> +
>> + case Hexagon::STrih_imm_cPt_V4 :
>> + return Hexagon::STrih_imm_cdnPt_V4;
>> +
>> + case Hexagon::STrih_imm_cNotPt_V4 :
>> + return Hexagon::STrih_imm_cdnNotPt_V4;
>> +
>> + case Hexagon::STrih_indexed_shl_cPt_V4 :
>> + return Hexagon::STrih_indexed_shl_cdnPt_V4;
>> +
>> + case Hexagon::STrih_indexed_shl_cNotPt_V4 :
>> + return Hexagon::STrih_indexed_shl_cdnNotPt_V4;
>> +
>> + case Hexagon::POST_SThri_cPt :
>> + return Hexagon::POST_SThri_cdnPt_V4;
>> +
>> + case Hexagon::POST_SThri_cNotPt :
>> + return Hexagon::POST_SThri_cdnNotPt_V4;
>> +
>> + case Hexagon::STh_GP_cPt_V4 :
>> + return Hexagon::STh_GP_cdnPt_V4;
>> +
>> + case Hexagon::STh_GP_cNotPt_V4 :
>> + return Hexagon::STh_GP_cdnNotPt_V4;
>> +
>> + case Hexagon::STrih_GP_cPt_V4 :
>> + return Hexagon::STrih_GP_cdnPt_V4;
>> +
>> + case Hexagon::STrih_GP_cNotPt_V4 :
>> + return Hexagon::STrih_GP_cdnNotPt_V4;
>> +
>> + // Store word conditionally
>> + case Hexagon::STriw_cPt :
>> + return Hexagon::STriw_cdnPt_V4;
>> +
>> + case Hexagon::STriw_cNotPt :
>> + return Hexagon::STriw_cdnNotPt_V4;
>> +
>> + case Hexagon::STriw_indexed_cPt :
>> + return Hexagon::STriw_indexed_cdnPt_V4;
>> +
>> + case Hexagon::STriw_indexed_cNotPt :
>> + return Hexagon::STriw_indexed_cdnNotPt_V4;
>> +
>> + case Hexagon::STriw_imm_cPt_V4 :
>> + return Hexagon::STriw_imm_cdnPt_V4;
>> +
>> + case Hexagon::STriw_imm_cNotPt_V4 :
>> + return Hexagon::STriw_imm_cdnNotPt_V4;
>> +
>> + case Hexagon::STriw_indexed_shl_cPt_V4 :
>> + return Hexagon::STriw_indexed_shl_cdnPt_V4;
>> +
>> + case Hexagon::STriw_indexed_shl_cNotPt_V4 :
>> + return Hexagon::STriw_indexed_shl_cdnNotPt_V4;
>> +
>> + case Hexagon::POST_STwri_cPt :
>> + return Hexagon::POST_STwri_cdnPt_V4;
>> +
>> + case Hexagon::POST_STwri_cNotPt :
>> + return Hexagon::POST_STwri_cdnNotPt_V4;
>> +
>> + case Hexagon::STw_GP_cPt_V4 :
>> + return Hexagon::STw_GP_cdnPt_V4;
>> +
>> + case Hexagon::STw_GP_cNotPt_V4 :
>> + return Hexagon::STw_GP_cdnNotPt_V4;
>> +
>> + case Hexagon::STriw_GP_cPt_V4 :
>> + return Hexagon::STriw_GP_cdnPt_V4;
>> +
>> + case Hexagon::STriw_GP_cNotPt_V4 :
>> + return Hexagon::STriw_GP_cdnNotPt_V4;
>> +
>> + // Condtional Jumps
>> + case Hexagon::JMP_c:
>> + return Hexagon::JMP_cdnPt;
>> +
>> + case Hexagon::JMP_cNot:
>> + return Hexagon::JMP_cdnNotPt;
>> +
>> + case Hexagon::JMPR_cPt:
>> + return Hexagon::JMPR_cdnPt_V3;
>> +
>> + case Hexagon::JMPR_cNotPt:
>> + return Hexagon::JMPR_cdnNotPt_V3;
>> +
>> + // Conditional Transfers
>> + case Hexagon::TFR_cPt:
>> + return Hexagon::TFR_cdnPt;
>> +
>> + case Hexagon::TFR_cNotPt:
>> + return Hexagon::TFR_cdnNotPt;
>> +
>> + case Hexagon::TFRI_cPt:
>> + return Hexagon::TFRI_cdnPt;
>> +
>> + case Hexagon::TFRI_cNotPt:
>> + return Hexagon::TFRI_cdnNotPt;
>> +
>> + // Load double word
>> + case Hexagon::LDrid_cPt :
>> + return Hexagon::LDrid_cdnPt;
>> +
>> + case Hexagon::LDrid_cNotPt :
>> + return Hexagon::LDrid_cdnNotPt;
>> +
>> + case Hexagon::LDrid_indexed_cPt :
>> + return Hexagon::LDrid_indexed_cdnPt;
>> +
>> + case Hexagon::LDrid_indexed_cNotPt :
>> + return Hexagon::LDrid_indexed_cdnNotPt;
>> +
>> + case Hexagon::POST_LDrid_cPt :
>> + return Hexagon::POST_LDrid_cdnPt_V4;
>> +
>> + case Hexagon::POST_LDrid_cNotPt :
>> + return Hexagon::POST_LDrid_cdnNotPt_V4;
>> +
>> + // Load word
>> + case Hexagon::LDriw_cPt :
>> + return Hexagon::LDriw_cdnPt;
>> +
>> + case Hexagon::LDriw_cNotPt :
>> + return Hexagon::LDriw_cdnNotPt;
>> +
>> + case Hexagon::LDriw_indexed_cPt :
>> + return Hexagon::LDriw_indexed_cdnPt;
>> +
>> + case Hexagon::LDriw_indexed_cNotPt :
>> + return Hexagon::LDriw_indexed_cdnNotPt;
>> +
>> + case Hexagon::POST_LDriw_cPt :
>> + return Hexagon::POST_LDriw_cdnPt_V4;
>> +
>> + case Hexagon::POST_LDriw_cNotPt :
>> + return Hexagon::POST_LDriw_cdnNotPt_V4;
>> +
>> + // Load halfword
>> + case Hexagon::LDrih_cPt :
>> + return Hexagon::LDrih_cdnPt;
>> +
>> + case Hexagon::LDrih_cNotPt :
>> + return Hexagon::LDrih_cdnNotPt;
>> +
>> + case Hexagon::LDrih_indexed_cPt :
>> + return Hexagon::LDrih_indexed_cdnPt;
>> +
>> + case Hexagon::LDrih_indexed_cNotPt :
>> + return Hexagon::LDrih_indexed_cdnNotPt;
>> +
>> + case Hexagon::POST_LDrih_cPt :
>> + return Hexagon::POST_LDrih_cdnPt_V4;
>> +
>> + case Hexagon::POST_LDrih_cNotPt :
>> + return Hexagon::POST_LDrih_cdnNotPt_V4;
>> +
>> + // Load byte
>> + case Hexagon::LDrib_cPt :
>> + return Hexagon::LDrib_cdnPt;
>> +
>> + case Hexagon::LDrib_cNotPt :
>> + return Hexagon::LDrib_cdnNotPt;
>> +
>> + case Hexagon::LDrib_indexed_cPt :
>> + return Hexagon::LDrib_indexed_cdnPt;
>> +
>> + case Hexagon::LDrib_indexed_cNotPt :
>> + return Hexagon::LDrib_indexed_cdnNotPt;
>> +
>> + case Hexagon::POST_LDrib_cPt :
>> + return Hexagon::POST_LDrib_cdnPt_V4;
>> +
>> + case Hexagon::POST_LDrib_cNotPt :
>> + return Hexagon::POST_LDrib_cdnNotPt_V4;
>> +
>> + // Load unsigned halfword
>> + case Hexagon::LDriuh_cPt :
>> + return Hexagon::LDriuh_cdnPt;
>> +
>> + case Hexagon::LDriuh_cNotPt :
>> + return Hexagon::LDriuh_cdnNotPt;
>> +
>> + case Hexagon::LDriuh_indexed_cPt :
>> + return Hexagon::LDriuh_indexed_cdnPt;
>> +
>> + case Hexagon::LDriuh_indexed_cNotPt :
>> + return Hexagon::LDriuh_indexed_cdnNotPt;
>> +
>> + case Hexagon::POST_LDriuh_cPt :
>> + return Hexagon::POST_LDriuh_cdnPt_V4;
>> +
>> + case Hexagon::POST_LDriuh_cNotPt :
>> + return Hexagon::POST_LDriuh_cdnNotPt_V4;
>> +
>> + // Load unsigned byte
>> + case Hexagon::LDriub_cPt :
>> + return Hexagon::LDriub_cdnPt;
>> +
>> + case Hexagon::LDriub_cNotPt :
>> + return Hexagon::LDriub_cdnNotPt;
>> +
>> + case Hexagon::LDriub_indexed_cPt :
>> + return Hexagon::LDriub_indexed_cdnPt;
>> +
>> + case Hexagon::LDriub_indexed_cNotPt :
>> + return Hexagon::LDriub_indexed_cdnNotPt;
>> +
>> + case Hexagon::POST_LDriub_cPt :
>> + return Hexagon::POST_LDriub_cdnPt_V4;
>> +
>> + case Hexagon::POST_LDriub_cNotPt :
>> + return Hexagon::POST_LDriub_cdnNotPt_V4;
>> +
>> + // V4 indexed+scaled load
>> +
>> + case Hexagon::LDrid_indexed_cPt_V4 :
>> + return Hexagon::LDrid_indexed_cdnPt_V4;
>> +
>> + case Hexagon::LDrid_indexed_cNotPt_V4 :
>> + return Hexagon::LDrid_indexed_cdnNotPt_V4;
>> +
>> + case Hexagon::LDrid_indexed_shl_cPt_V4 :
>> + return Hexagon::LDrid_indexed_shl_cdnPt_V4;
>> +
>> + case Hexagon::LDrid_indexed_shl_cNotPt_V4 :
>> + return Hexagon::LDrid_indexed_shl_cdnNotPt_V4;
>> +
>> + case Hexagon::LDrib_indexed_cPt_V4 :
>> + return Hexagon::LDrib_indexed_cdnPt_V4;
>> +
>> + case Hexagon::LDrib_indexed_cNotPt_V4 :
>> + return Hexagon::LDrib_indexed_cdnNotPt_V4;
>> +
>> + case Hexagon::LDrib_indexed_shl_cPt_V4 :
>> + return Hexagon::LDrib_indexed_shl_cdnPt_V4;
>> +
>> + case Hexagon::LDrib_indexed_shl_cNotPt_V4 :
>> + return Hexagon::LDrib_indexed_shl_cdnNotPt_V4;
>> +
>> + case Hexagon::LDriub_indexed_cPt_V4 :
>> + return Hexagon::LDriub_indexed_cdnPt_V4;
>> +
>> + case Hexagon::LDriub_indexed_cNotPt_V4 :
>> + return Hexagon::LDriub_indexed_cdnNotPt_V4;
>> +
>> + case Hexagon::LDriub_indexed_shl_cPt_V4 :
>> + return Hexagon::LDriub_indexed_shl_cdnPt_V4;
>> +
>> + case Hexagon::LDriub_indexed_shl_cNotPt_V4 :
>> + return Hexagon::LDriub_indexed_shl_cdnNotPt_V4;
>> +
>> + case Hexagon::LDrih_indexed_cPt_V4 :
>> + return Hexagon::LDrih_indexed_cdnPt_V4;
>> +
>> + case Hexagon::LDrih_indexed_cNotPt_V4 :
>> + return Hexagon::LDrih_indexed_cdnNotPt_V4;
>> +
>> + case Hexagon::LDrih_indexed_shl_cPt_V4 :
>> + return Hexagon::LDrih_indexed_shl_cdnPt_V4;
>> +
>> + case Hexagon::LDrih_indexed_shl_cNotPt_V4 :
>> + return Hexagon::LDrih_indexed_shl_cdnNotPt_V4;
>> +
>> + case Hexagon::LDriuh_indexed_cPt_V4 :
>> + return Hexagon::LDriuh_indexed_cdnPt_V4;
>> +
>> + case Hexagon::LDriuh_indexed_cNotPt_V4 :
>> + return Hexagon::LDriuh_indexed_cdnNotPt_V4;
>> +
>> + case Hexagon::LDriuh_indexed_shl_cPt_V4 :
>> + return Hexagon::LDriuh_indexed_shl_cdnPt_V4;
>> +
>> + case Hexagon::LDriuh_indexed_shl_cNotPt_V4 :
>> + return Hexagon::LDriuh_indexed_shl_cdnNotPt_V4;
>> +
>> + case Hexagon::LDriw_indexed_cPt_V4 :
>> + return Hexagon::LDriw_indexed_cdnPt_V4;
>> +
>> + case Hexagon::LDriw_indexed_cNotPt_V4 :
>> + return Hexagon::LDriw_indexed_cdnNotPt_V4;
>> +
>> + case Hexagon::LDriw_indexed_shl_cPt_V4 :
>> + return Hexagon::LDriw_indexed_shl_cdnPt_V4;
>> +
>> + case Hexagon::LDriw_indexed_shl_cNotPt_V4 :
>> + return Hexagon::LDriw_indexed_shl_cdnNotPt_V4;
>> +
>> + // V4 global address load
>> +
>> + case Hexagon::LDd_GP_cPt_V4:
>> + return Hexagon::LDd_GP_cdnPt_V4;
>> +
>> + case Hexagon::LDd_GP_cNotPt_V4:
>> + return Hexagon::LDd_GP_cdnNotPt_V4;
>> +
>> + case Hexagon::LDb_GP_cPt_V4:
>> + return Hexagon::LDb_GP_cdnPt_V4;
>> +
>> + case Hexagon::LDb_GP_cNotPt_V4:
>> + return Hexagon::LDb_GP_cdnNotPt_V4;
>> +
>> + case Hexagon::LDub_GP_cPt_V4:
>> + return Hexagon::LDub_GP_cdnPt_V4;
>> +
>> + case Hexagon::LDub_GP_cNotPt_V4:
>> + return Hexagon::LDub_GP_cdnNotPt_V4;
>> +
>> + case Hexagon::LDh_GP_cPt_V4:
>> + return Hexagon::LDh_GP_cdnPt_V4;
>> +
>> + case Hexagon::LDh_GP_cNotPt_V4:
>> + return Hexagon::LDh_GP_cdnNotPt_V4;
>> +
>> + case Hexagon::LDuh_GP_cPt_V4:
>> + return Hexagon::LDuh_GP_cdnPt_V4;
>> +
>> + case Hexagon::LDuh_GP_cNotPt_V4:
>> + return Hexagon::LDuh_GP_cdnNotPt_V4;
>> +
>> + case Hexagon::LDw_GP_cPt_V4:
>> + return Hexagon::LDw_GP_cdnPt_V4;
>> +
>> + case Hexagon::LDw_GP_cNotPt_V4:
>> + return Hexagon::LDw_GP_cdnNotPt_V4;
>> +
>> + case Hexagon::LDrid_GP_cPt_V4:
>> + return Hexagon::LDrid_GP_cdnPt_V4;
>> +
>> + case Hexagon::LDrid_GP_cNotPt_V4:
>> + return Hexagon::LDrid_GP_cdnNotPt_V4;
>> +
>> + case Hexagon::LDrib_GP_cPt_V4:
>> + return Hexagon::LDrib_GP_cdnPt_V4;
>> +
>> + case Hexagon::LDrib_GP_cNotPt_V4:
>> + return Hexagon::LDrib_GP_cdnNotPt_V4;
>> +
>> + case Hexagon::LDriub_GP_cPt_V4:
>> + return Hexagon::LDriub_GP_cdnPt_V4;
>> +
>> + case Hexagon::LDriub_GP_cNotPt_V4:
>> + return Hexagon::LDriub_GP_cdnNotPt_V4;
>> +
>> + case Hexagon::LDrih_GP_cPt_V4:
>> + return Hexagon::LDrih_GP_cdnPt_V4;
>> +
>> + case Hexagon::LDrih_GP_cNotPt_V4:
>> + return Hexagon::LDrih_GP_cdnNotPt_V4;
>> +
>> + case Hexagon::LDriuh_GP_cPt_V4:
>> + return Hexagon::LDriuh_GP_cdnPt_V4;
>> +
>> + case Hexagon::LDriuh_GP_cNotPt_V4:
>> + return Hexagon::LDriuh_GP_cdnNotPt_V4;
>> +
>> + case Hexagon::LDriw_GP_cPt_V4:
>> + return Hexagon::LDriw_GP_cdnPt_V4;
>> +
>> + case Hexagon::LDriw_GP_cNotPt_V4:
>> + return Hexagon::LDriw_GP_cdnNotPt_V4;
>> +
>> + // Conditional store new-value byte
>> + case Hexagon::STrib_cPt_nv_V4 :
>> + return Hexagon::STrib_cdnPt_nv_V4;
>> + case Hexagon::STrib_cNotPt_nv_V4 :
>> + return Hexagon::STrib_cdnNotPt_nv_V4;
>> +
>> + case Hexagon::STrib_indexed_cPt_nv_V4 :
>> + return Hexagon::STrib_indexed_cdnPt_nv_V4;
>> + case Hexagon::STrib_indexed_cNotPt_nv_V4 :
>> + return Hexagon::STrib_indexed_cdnNotPt_nv_V4;
>> +
>> + case Hexagon::STrib_indexed_shl_cPt_nv_V4 :
>> + return Hexagon::STrib_indexed_shl_cdnPt_nv_V4;
>> + case Hexagon::STrib_indexed_shl_cNotPt_nv_V4 :
>> + return Hexagon::STrib_indexed_shl_cdnNotPt_nv_V4;
>> +
>> + case Hexagon::POST_STbri_cPt_nv_V4 :
>> + return Hexagon::POST_STbri_cdnPt_nv_V4;
>> + case Hexagon::POST_STbri_cNotPt_nv_V4 :
>> + return Hexagon::POST_STbri_cdnNotPt_nv_V4;
>> +
>> + case Hexagon::STb_GP_cPt_nv_V4 :
>> + return Hexagon::STb_GP_cdnPt_nv_V4;
>> +
>> + case Hexagon::STb_GP_cNotPt_nv_V4 :
>> + return Hexagon::STb_GP_cdnNotPt_nv_V4;
>> +
>> + case Hexagon::STrib_GP_cPt_nv_V4 :
>> + return Hexagon::STrib_GP_cdnPt_nv_V4;
>> +
>> + case Hexagon::STrib_GP_cNotPt_nv_V4 :
>> + return Hexagon::STrib_GP_cdnNotPt_nv_V4;
>> +
>> + // Conditional store new-value halfword
>> + case Hexagon::STrih_cPt_nv_V4 :
>> + return Hexagon::STrih_cdnPt_nv_V4;
>> + case Hexagon::STrih_cNotPt_nv_V4 :
>> + return Hexagon::STrih_cdnNotPt_nv_V4;
>> +
>> + case Hexagon::STrih_indexed_cPt_nv_V4 :
>> + return Hexagon::STrih_indexed_cdnPt_nv_V4;
>> + case Hexagon::STrih_indexed_cNotPt_nv_V4 :
>> + return Hexagon::STrih_indexed_cdnNotPt_nv_V4;
>> +
>> + case Hexagon::STrih_indexed_shl_cPt_nv_V4 :
>> + return Hexagon::STrih_indexed_shl_cdnPt_nv_V4;
>> + case Hexagon::STrih_indexed_shl_cNotPt_nv_V4 :
>> + return Hexagon::STrih_indexed_shl_cdnNotPt_nv_V4;
>> +
>> + case Hexagon::POST_SThri_cPt_nv_V4 :
>> + return Hexagon::POST_SThri_cdnPt_nv_V4;
>> + case Hexagon::POST_SThri_cNotPt_nv_V4 :
>> + return Hexagon::POST_SThri_cdnNotPt_nv_V4;
>> +
>> + case Hexagon::STh_GP_cPt_nv_V4 :
>> + return Hexagon::STh_GP_cdnPt_nv_V4;
>> +
>> + case Hexagon::STh_GP_cNotPt_nv_V4 :
>> + return Hexagon::STh_GP_cdnNotPt_nv_V4;
>> +
>> + case Hexagon::STrih_GP_cPt_nv_V4 :
>> + return Hexagon::STrih_GP_cdnPt_nv_V4;
>> +
>> + case Hexagon::STrih_GP_cNotPt_nv_V4 :
>> + return Hexagon::STrih_GP_cdnNotPt_nv_V4;
>> +
>> + // Conditional store new-value word
>> + case Hexagon::STriw_cPt_nv_V4 :
>> + return Hexagon::STriw_cdnPt_nv_V4;
>> + case Hexagon::STriw_cNotPt_nv_V4 :
>> + return Hexagon::STriw_cdnNotPt_nv_V4;
>> +
>> + case Hexagon::STriw_indexed_cPt_nv_V4 :
>> + return Hexagon::STriw_indexed_cdnPt_nv_V4;
>> + case Hexagon::STriw_indexed_cNotPt_nv_V4 :
>> + return Hexagon::STriw_indexed_cdnNotPt_nv_V4;
>> +
>> + case Hexagon::STriw_indexed_shl_cPt_nv_V4 :
>> + return Hexagon::STriw_indexed_shl_cdnPt_nv_V4;
>> + case Hexagon::STriw_indexed_shl_cNotPt_nv_V4 :
>> + return Hexagon::STriw_indexed_shl_cdnNotPt_nv_V4;
>> +
>> + case Hexagon::POST_STwri_cPt_nv_V4 :
>> + return Hexagon::POST_STwri_cdnPt_nv_V4;
>> + case Hexagon::POST_STwri_cNotPt_nv_V4:
>> + return Hexagon::POST_STwri_cdnNotPt_nv_V4;
>> +
>> + case Hexagon::STw_GP_cPt_nv_V4 :
>> + return Hexagon::STw_GP_cdnPt_nv_V4;
>> +
>> + case Hexagon::STw_GP_cNotPt_nv_V4 :
>> + return Hexagon::STw_GP_cdnNotPt_nv_V4;
>> +
>> + case Hexagon::STriw_GP_cPt_nv_V4 :
>> + return Hexagon::STriw_GP_cdnPt_nv_V4;
>> +
>> + case Hexagon::STriw_GP_cNotPt_nv_V4 :
>> + return Hexagon::STriw_GP_cdnNotPt_nv_V4;
>> +
>> + // Conditional add
>> + case Hexagon::ADD_ri_cPt :
>> + return Hexagon::ADD_ri_cdnPt;
>> + case Hexagon::ADD_ri_cNotPt :
>> + return Hexagon::ADD_ri_cdnNotPt;
>> +
>> + case Hexagon::ADD_rr_cPt :
>> + return Hexagon::ADD_rr_cdnPt;
>> + case Hexagon::ADD_rr_cNotPt :
>> + return Hexagon::ADD_rr_cdnNotPt;
>> +
>> + // Conditional logical Operations
>> + case Hexagon::XOR_rr_cPt :
>> + return Hexagon::XOR_rr_cdnPt;
>> + case Hexagon::XOR_rr_cNotPt :
>> + return Hexagon::XOR_rr_cdnNotPt;
>> +
>> + case Hexagon::AND_rr_cPt :
>> + return Hexagon::AND_rr_cdnPt;
>> + case Hexagon::AND_rr_cNotPt :
>> + return Hexagon::AND_rr_cdnNotPt;
>> +
>> + case Hexagon::OR_rr_cPt :
>> + return Hexagon::OR_rr_cdnPt;
>> + case Hexagon::OR_rr_cNotPt :
>> + return Hexagon::OR_rr_cdnNotPt;
>> +
>> + // Conditional Subtract
>> + case Hexagon::SUB_rr_cPt :
>> + return Hexagon::SUB_rr_cdnPt;
>> + case Hexagon::SUB_rr_cNotPt :
>> + return Hexagon::SUB_rr_cdnNotPt;
>> +
>> + // Conditional combine
>> + case Hexagon::COMBINE_rr_cPt :
>> + return Hexagon::COMBINE_rr_cdnPt;
>> + case Hexagon::COMBINE_rr_cNotPt :
>> + return Hexagon::COMBINE_rr_cdnNotPt;
>> +
>> + case Hexagon::ASLH_cPt_V4 :
>> + return Hexagon::ASLH_cdnPt_V4;
>> + case Hexagon::ASLH_cNotPt_V4 :
>> + return Hexagon::ASLH_cdnNotPt_V4;
>> +
>> + case Hexagon::ASRH_cPt_V4 :
>> + return Hexagon::ASRH_cdnPt_V4;
>> + case Hexagon::ASRH_cNotPt_V4 :
>> + return Hexagon::ASRH_cdnNotPt_V4;
>> +
>> + case Hexagon::SXTB_cPt_V4 :
>> + return Hexagon::SXTB_cdnPt_V4;
>> + case Hexagon::SXTB_cNotPt_V4 :
>> + return Hexagon::SXTB_cdnNotPt_V4;
>> +
>> + case Hexagon::SXTH_cPt_V4 :
>> + return Hexagon::SXTH_cdnPt_V4;
>> + case Hexagon::SXTH_cNotPt_V4 :
>> + return Hexagon::SXTH_cdnNotPt_V4;
>> +
>> + case Hexagon::ZXTB_cPt_V4 :
>> + return Hexagon::ZXTB_cdnPt_V4;
>> + case Hexagon::ZXTB_cNotPt_V4 :
>> + return Hexagon::ZXTB_cdnNotPt_V4;
>> +
>> + case Hexagon::ZXTH_cPt_V4 :
>> + return Hexagon::ZXTH_cdnPt_V4;
>> + case Hexagon::ZXTH_cNotPt_V4 :
>> + return Hexagon::ZXTH_cdnNotPt_V4;
>> + }
>> +}
>> +
>> +// Returns true if an instruction can be promoted to .new predicate
>> +// or new-value store.
>> +bool HexagonPacketizerList::isNewifiable(MachineInstr* MI) {
>> + if ( isCondInst(MI) || IsNewifyStore(MI))
>> + return true;
>> + else
>> + return false;
>> +}
>> +
>> +bool HexagonPacketizerList::isCondInst (MachineInstr* MI) {
>> + const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
>> + const MCInstrDesc& TID = MI->getDesc();
>> + // bug 5670: until that is fixed,
>> + // this portion is disabled.
>> + if ( TID.isConditionalBranch() //&& !IsRegisterJump(MI))
||
>> + || QII->isConditionalTransfer(MI)
>> + || QII->isConditionalALU32(MI)
>> + || QII->isConditionalLoad(MI)
>> + || QII->isConditionalStore(MI)) {
>> + return true;
>> + }
>> + return false;
>> +}
>> +
>> +
>> +// Promote an instructiont to its .new form.
>> +// At this time, we have already made a call to CanPromoteToDotNew
>> +// and made sure that it can *indeed* be promoted.
>> +bool HexagonPacketizerList::PromoteToDotNew(MachineInstr* MI,
>> + SDep::Kind DepType,
MachineBasicBlock::iterator&MII,
>> + const TargetRegisterClass* RC) {
>> +
>> + assert (DepType == SDep::Data);
>> + const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
>> +
>> + int NewOpcode;
>> + if (RC == Hexagon::PredRegsRegisterClass)
>> + NewOpcode = GetDotNewPredOp(MI->getOpcode());
>> + else
>> + NewOpcode = GetDotNewOp(MI->getOpcode());
>> + MI->setDesc(QII->get(NewOpcode));
>> +
>> + return true;
>> +}
>> +
>> +// Returns the most basic instruction for the .new predicated
instructions and
>> +// new-value stores.
>> +// For example, all of the following instructions will be converted
back to the
>> +// same instruction:
>> +// 1) if (p0.new) memw(R0+#0) = R1.new --->
>> +// 2) if (p0) memw(R0+#0)= R1.new -------> if (p0)
memw(R0+#0) = R1
>> +// 3) if (p0.new) memw(R0+#0) = R1 --->
>> +//
>> +// To understand the translation of instruction 1 to its original
form, consider
>> +// a packet with 3 instructions.
>> +// { p0 = cmp.eq(R0,R1)
>> +// if (p0.new) R2 = add(R3, R4)
>> +// R5 = add (R3, R1)
>> +// }
>> +// if (p0) memw(R5+#0) = R2<--- trying to include it in the
previous packet
>> +//
>> +// This instruction can be part of the previous packet only if both p0
and R2
>> +// are promoted to .new values. This promotion happens in steps, first
>> +// predicate register is promoted to .new and in the next iteration R2
is
>> +// promoted. Therefore, in case of dependence check failure (due to
R5) during
>> +// next iteration, it should be converted back to its most basic form.
>> +
>> +static int GetDotOldOp(const int opc) {
>> + switch (opc) {
>> + default: llvm_unreachable("Unknown .old type");
>> + case Hexagon::TFR_cdnPt:
>> + return Hexagon::TFR_cPt;
>> +
>> + case Hexagon::TFR_cdnNotPt:
>> + return Hexagon::TFR_cNotPt;
>> +
>> + case Hexagon::TFRI_cdnPt:
>> + return Hexagon::TFRI_cPt;
>> +
>> + case Hexagon::TFRI_cdnNotPt:
>> + return Hexagon::TFRI_cNotPt;
>> +
>> + case Hexagon::JMP_cdnPt:
>> + return Hexagon::JMP_c;
>> +
>> + case Hexagon::JMP_cdnNotPt:
>> + return Hexagon::JMP_cNot;
>> +
>> + case Hexagon::JMPR_cdnPt_V3:
>> + return Hexagon::JMPR_cPt;
>> +
>> + case Hexagon::JMPR_cdnNotPt_V3:
>> + return Hexagon::JMPR_cNotPt;
>> +
>> + // Load double word
>> +
>> + case Hexagon::LDrid_cdnPt :
>> + return Hexagon::LDrid_cPt;
>> +
>> + case Hexagon::LDrid_cdnNotPt :
>> + return Hexagon::LDrid_cNotPt;
>> +
>> + case Hexagon::LDrid_indexed_cdnPt :
>> + return Hexagon::LDrid_indexed_cPt;
>> +
>> + case Hexagon::LDrid_indexed_cdnNotPt :
>> + return Hexagon::LDrid_indexed_cNotPt;
>> +
>> + case Hexagon::POST_LDrid_cdnPt_V4 :
>> + return Hexagon::POST_LDrid_cPt;
>> +
>> + case Hexagon::POST_LDrid_cdnNotPt_V4 :
>> + return Hexagon::POST_LDrid_cNotPt;
>> +
>> + // Load word
>> +
>> + case Hexagon::LDriw_cdnPt :
>> + return Hexagon::LDriw_cPt;
>> +
>> + case Hexagon::LDriw_cdnNotPt :
>> + return Hexagon::LDriw_cNotPt;
>> +
>> + case Hexagon::LDriw_indexed_cdnPt :
>> + return Hexagon::LDriw_indexed_cPt;
>> +
>> + case Hexagon::LDriw_indexed_cdnNotPt :
>> + return Hexagon::LDriw_indexed_cNotPt;
>> +
>> + case Hexagon::POST_LDriw_cdnPt_V4 :
>> + return Hexagon::POST_LDriw_cPt;
>> +
>> + case Hexagon::POST_LDriw_cdnNotPt_V4 :
>> + return Hexagon::POST_LDriw_cNotPt;
>> +
>> + // Load half
>> +
>> + case Hexagon::LDrih_cdnPt :
>> + return Hexagon::LDrih_cPt;
>> +
>> + case Hexagon::LDrih_cdnNotPt :
>> + return Hexagon::LDrih_cNotPt;
>> +
>> + case Hexagon::LDrih_indexed_cdnPt :
>> + return Hexagon::LDrih_indexed_cPt;
>> +
>> + case Hexagon::LDrih_indexed_cdnNotPt :
>> + return Hexagon::LDrih_indexed_cNotPt;
>> +
>> + case Hexagon::POST_LDrih_cdnPt_V4 :
>> + return Hexagon::POST_LDrih_cPt;
>> +
>> + case Hexagon::POST_LDrih_cdnNotPt_V4 :
>> + return Hexagon::POST_LDrih_cNotPt;
>> +
>> + // Load byte
>> +
>> + case Hexagon::LDrib_cdnPt :
>> + return Hexagon::LDrib_cPt;
>> +
>> + case Hexagon::LDrib_cdnNotPt :
>> + return Hexagon::LDrib_cNotPt;
>> +
>> + case Hexagon::LDrib_indexed_cdnPt :
>> + return Hexagon::LDrib_indexed_cPt;
>> +
>> + case Hexagon::LDrib_indexed_cdnNotPt :
>> + return Hexagon::LDrib_indexed_cNotPt;
>> +
>> + case Hexagon::POST_LDrib_cdnPt_V4 :
>> + return Hexagon::POST_LDrib_cPt;
>> +
>> + case Hexagon::POST_LDrib_cdnNotPt_V4 :
>> + return Hexagon::POST_LDrib_cNotPt;
>> +
>> + // Load unsigned half
>> +
>> + case Hexagon::LDriuh_cdnPt :
>> + return Hexagon::LDriuh_cPt;
>> +
>> + case Hexagon::LDriuh_cdnNotPt :
>> + return Hexagon::LDriuh_cNotPt;
>> +
>> + case Hexagon::LDriuh_indexed_cdnPt :
>> + return Hexagon::LDriuh_indexed_cPt;
>> +
>> + case Hexagon::LDriuh_indexed_cdnNotPt :
>> + return Hexagon::LDriuh_indexed_cNotPt;
>> +
>> + case Hexagon::POST_LDriuh_cdnPt_V4 :
>> + return Hexagon::POST_LDriuh_cPt;
>> +
>> + case Hexagon::POST_LDriuh_cdnNotPt_V4 :
>> + return Hexagon::POST_LDriuh_cNotPt;
>> +
>> + // Load unsigned byte
>> + case Hexagon::LDriub_cdnPt :
>> + return Hexagon::LDriub_cPt;
>> +
>> + case Hexagon::LDriub_cdnNotPt :
>> + return Hexagon::LDriub_cNotPt;
>> +
>> + case Hexagon::LDriub_indexed_cdnPt :
>> + return Hexagon::LDriub_indexed_cPt;
>> +
>> + case Hexagon::LDriub_indexed_cdnNotPt :
>> + return Hexagon::LDriub_indexed_cNotPt;
>> +
>> + case Hexagon::POST_LDriub_cdnPt_V4 :
>> + return Hexagon::POST_LDriub_cPt;
>> +
>> + case Hexagon::POST_LDriub_cdnNotPt_V4 :
>> + return Hexagon::POST_LDriub_cNotPt;
>> +
>> + // V4 indexed+scaled Load
>> +
>> + case Hexagon::LDrid_indexed_cdnPt_V4 :
>> + return Hexagon::LDrid_indexed_cPt_V4;
>> +
>> + case Hexagon::LDrid_indexed_cdnNotPt_V4 :
>> + return Hexagon::LDrid_indexed_cNotPt_V4;
>> +
>> + case Hexagon::LDrid_indexed_shl_cdnPt_V4 :
>> + return Hexagon::LDrid_indexed_shl_cPt_V4;
>> +
>> + case Hexagon::LDrid_indexed_shl_cdnNotPt_V4 :
>> + return Hexagon::LDrid_indexed_shl_cNotPt_V4;
>> +
>> + case Hexagon::LDrib_indexed_cdnPt_V4 :
>> + return Hexagon::LDrib_indexed_cPt_V4;
>> +
>> + case Hexagon::LDrib_indexed_cdnNotPt_V4 :
>> + return Hexagon::LDrib_indexed_cNotPt_V4;
>> +
>> + case Hexagon::LDrib_indexed_shl_cdnPt_V4 :
>> + return Hexagon::LDrib_indexed_shl_cPt_V4;
>> +
>> + case Hexagon::LDrib_indexed_shl_cdnNotPt_V4 :
>> + return Hexagon::LDrib_indexed_shl_cNotPt_V4;
>> +
>> + case Hexagon::LDriub_indexed_cdnPt_V4 :
>> + return Hexagon::LDriub_indexed_cPt_V4;
>> +
>> + case Hexagon::LDriub_indexed_cdnNotPt_V4 :
>> + return Hexagon::LDriub_indexed_cNotPt_V4;
>> +
>> + case Hexagon::LDriub_indexed_shl_cdnPt_V4 :
>> + return Hexagon::LDriub_indexed_shl_cPt_V4;
>> +
>> + case Hexagon::LDriub_indexed_shl_cdnNotPt_V4 :
>> + return Hexagon::LDriub_indexed_shl_cNotPt_V4;
>> +
>> + case Hexagon::LDrih_indexed_cdnPt_V4 :
>> + return Hexagon::LDrih_indexed_cPt_V4;
>> +
>> + case Hexagon::LDrih_indexed_cdnNotPt_V4 :
>> + return Hexagon::LDrih_indexed_cNotPt_V4;
>> +
>> + case Hexagon::LDrih_indexed_shl_cdnPt_V4 :
>> + return Hexagon::LDrih_indexed_shl_cPt_V4;
>> +
>> + case Hexagon::LDrih_indexed_shl_cdnNotPt_V4 :
>> + return Hexagon::LDrih_indexed_shl_cNotPt_V4;
>> +
>> + case Hexagon::LDriuh_indexed_cdnPt_V4 :
>> + return Hexagon::LDriuh_indexed_cPt_V4;
>> +
>> + case Hexagon::LDriuh_indexed_cdnNotPt_V4 :
>> + return Hexagon::LDriuh_indexed_cNotPt_V4;
>> +
>> + case Hexagon::LDriuh_indexed_shl_cdnPt_V4 :
>> + return Hexagon::LDriuh_indexed_shl_cPt_V4;
>> +
>> + case Hexagon::LDriuh_indexed_shl_cdnNotPt_V4 :
>> + return Hexagon::LDriuh_indexed_shl_cNotPt_V4;
>> +
>> + case Hexagon::LDriw_indexed_cdnPt_V4 :
>> + return Hexagon::LDriw_indexed_cPt_V4;
>> +
>> + case Hexagon::LDriw_indexed_cdnNotPt_V4 :
>> + return Hexagon::LDriw_indexed_cNotPt_V4;
>> +
>> + case Hexagon::LDriw_indexed_shl_cdnPt_V4 :
>> + return Hexagon::LDriw_indexed_shl_cPt_V4;
>> +
>> + case Hexagon::LDriw_indexed_shl_cdnNotPt_V4 :
>> + return Hexagon::LDriw_indexed_shl_cNotPt_V4;
>> +
>> + // V4 global address load
>> +
>> + case Hexagon::LDd_GP_cdnPt_V4:
>> + return Hexagon::LDd_GP_cPt_V4;
>> +
>> + case Hexagon::LDd_GP_cdnNotPt_V4:
>> + return Hexagon::LDd_GP_cNotPt_V4;
>> +
>> + case Hexagon::LDb_GP_cdnPt_V4:
>> + return Hexagon::LDb_GP_cPt_V4;
>> +
>> + case Hexagon::LDb_GP_cdnNotPt_V4:
>> + return Hexagon::LDb_GP_cNotPt_V4;
>> +
>> + case Hexagon::LDub_GP_cdnPt_V4:
>> + return Hexagon::LDub_GP_cPt_V4;
>> +
>> + case Hexagon::LDub_GP_cdnNotPt_V4:
>> + return Hexagon::LDub_GP_cNotPt_V4;
>> +
>> + case Hexagon::LDh_GP_cdnPt_V4:
>> + return Hexagon::LDh_GP_cPt_V4;
>> +
>> + case Hexagon::LDh_GP_cdnNotPt_V4:
>> + return Hexagon::LDh_GP_cNotPt_V4;
>> +
>> + case Hexagon::LDuh_GP_cdnPt_V4:
>> + return Hexagon::LDuh_GP_cPt_V4;
>> +
>> + case Hexagon::LDuh_GP_cdnNotPt_V4:
>> + return Hexagon::LDuh_GP_cNotPt_V4;
>> +
>> + case Hexagon::LDw_GP_cdnPt_V4:
>> + return Hexagon::LDw_GP_cPt_V4;
>> +
>> + case Hexagon::LDw_GP_cdnNotPt_V4:
>> + return Hexagon::LDw_GP_cNotPt_V4;
>> +
>> + case Hexagon::LDrid_GP_cdnPt_V4:
>> + return Hexagon::LDrid_GP_cPt_V4;
>> +
>> + case Hexagon::LDrid_GP_cdnNotPt_V4:
>> + return Hexagon::LDrid_GP_cNotPt_V4;
>> +
>> + case Hexagon::LDrib_GP_cdnPt_V4:
>> + return Hexagon::LDrib_GP_cPt_V4;
>> +
>> + case Hexagon::LDrib_GP_cdnNotPt_V4:
>> + return Hexagon::LDrib_GP_cNotPt_V4;
>> +
>> + case Hexagon::LDriub_GP_cdnPt_V4:
>> + return Hexagon::LDriub_GP_cPt_V4;
>> +
>> + case Hexagon::LDriub_GP_cdnNotPt_V4:
>> + return Hexagon::LDriub_GP_cNotPt_V4;
>> +
>> + case Hexagon::LDrih_GP_cdnPt_V4:
>> + return Hexagon::LDrih_GP_cPt_V4;
>> +
>> + case Hexagon::LDrih_GP_cdnNotPt_V4:
>> + return Hexagon::LDrih_GP_cNotPt_V4;
>> +
>> + case Hexagon::LDriuh_GP_cdnPt_V4:
>> + return Hexagon::LDriuh_GP_cPt_V4;
>> +
>> + case Hexagon::LDriuh_GP_cdnNotPt_V4:
>> + return Hexagon::LDriuh_GP_cNotPt_V4;
>> +
>> + case Hexagon::LDriw_GP_cdnPt_V4:
>> + return Hexagon::LDriw_GP_cPt_V4;
>> +
>> + case Hexagon::LDriw_GP_cdnNotPt_V4:
>> + return Hexagon::LDriw_GP_cNotPt_V4;
>> +
>> + // Conditional add
>> +
>> + case Hexagon::ADD_ri_cdnPt :
>> + return Hexagon::ADD_ri_cPt;
>> + case Hexagon::ADD_ri_cdnNotPt :
>> + return Hexagon::ADD_ri_cNotPt;
>> +
>> + case Hexagon::ADD_rr_cdnPt :
>> + return Hexagon::ADD_rr_cPt;
>> + case Hexagon::ADD_rr_cdnNotPt:
>> + return Hexagon::ADD_rr_cNotPt;
>> +
>> + // Conditional logical Operations
>> +
>> + case Hexagon::XOR_rr_cdnPt :
>> + return Hexagon::XOR_rr_cPt;
>> + case Hexagon::XOR_rr_cdnNotPt :
>> + return Hexagon::XOR_rr_cNotPt;
>> +
>> + case Hexagon::AND_rr_cdnPt :
>> + return Hexagon::AND_rr_cPt;
>> + case Hexagon::AND_rr_cdnNotPt :
>> + return Hexagon::AND_rr_cNotPt;
>> +
>> + case Hexagon::OR_rr_cdnPt :
>> + return Hexagon::OR_rr_cPt;
>> + case Hexagon::OR_rr_cdnNotPt :
>> + return Hexagon::OR_rr_cNotPt;
>> +
>> + // Conditional Subtract
>> +
>> + case Hexagon::SUB_rr_cdnPt :
>> + return Hexagon::SUB_rr_cPt;
>> + case Hexagon::SUB_rr_cdnNotPt :
>> + return Hexagon::SUB_rr_cNotPt;
>> +
>> + // Conditional combine
>> +
>> + case Hexagon::COMBINE_rr_cdnPt :
>> + return Hexagon::COMBINE_rr_cPt;
>> + case Hexagon::COMBINE_rr_cdnNotPt :
>> + return Hexagon::COMBINE_rr_cNotPt;
>> +
>> +// Conditional shift operations
>> +
>> + case Hexagon::ASLH_cdnPt_V4 :
>> + return Hexagon::ASLH_cPt_V4;
>> + case Hexagon::ASLH_cdnNotPt_V4 :
>> + return Hexagon::ASLH_cNotPt_V4;
>> +
>> + case Hexagon::ASRH_cdnPt_V4 :
>> + return Hexagon::ASRH_cPt_V4;
>> + case Hexagon::ASRH_cdnNotPt_V4 :
>> + return Hexagon::ASRH_cNotPt_V4;
>> +
>> + case Hexagon::SXTB_cdnPt_V4 :
>> + return Hexagon::SXTB_cPt_V4;
>> + case Hexagon::SXTB_cdnNotPt_V4 :
>> + return Hexagon::SXTB_cNotPt_V4;
>> +
>> + case Hexagon::SXTH_cdnPt_V4 :
>> + return Hexagon::SXTH_cPt_V4;
>> + case Hexagon::SXTH_cdnNotPt_V4 :
>> + return Hexagon::SXTH_cNotPt_V4;
>> +
>> + case Hexagon::ZXTB_cdnPt_V4 :
>> + return Hexagon::ZXTB_cPt_V4;
>> + case Hexagon::ZXTB_cdnNotPt_V4 :
>> + return Hexagon::ZXTB_cNotPt_V4;
>> +
>> + case Hexagon::ZXTH_cdnPt_V4 :
>> + return Hexagon::ZXTH_cPt_V4;
>> + case Hexagon::ZXTH_cdnNotPt_V4 :
>> + return Hexagon::ZXTH_cNotPt_V4;
>> +
>> + // Store byte
>> +
>> + case Hexagon::STrib_imm_cdnPt_V4 :
>> + return Hexagon::STrib_imm_cPt_V4;
>> +
>> + case Hexagon::STrib_imm_cdnNotPt_V4 :
>> + return Hexagon::STrib_imm_cNotPt_V4;
>> +
>> + case Hexagon::STrib_cdnPt_nv_V4 :
>> + case Hexagon::STrib_cPt_nv_V4 :
>> + case Hexagon::STrib_cdnPt_V4 :
>> + return Hexagon::STrib_cPt;
>> +
>> + case Hexagon::STrib_cdnNotPt_nv_V4 :
>> + case Hexagon::STrib_cNotPt_nv_V4 :
>> + case Hexagon::STrib_cdnNotPt_V4 :
>> + return Hexagon::STrib_cNotPt;
>> +
>> + case Hexagon::STrib_indexed_cdnPt_V4 :
>> + case Hexagon::STrib_indexed_cPt_nv_V4 :
>> + case Hexagon::STrib_indexed_cdnPt_nv_V4 :
>> + return Hexagon::STrib_indexed_cPt;
>> +
>> + case Hexagon::STrib_indexed_cdnNotPt_V4 :
>> + case Hexagon::STrib_indexed_cNotPt_nv_V4 :
>> + case Hexagon::STrib_indexed_cdnNotPt_nv_V4 :
>> + return Hexagon::STrib_indexed_cNotPt;
>> +
>> + case Hexagon::STrib_indexed_shl_cdnPt_nv_V4:
>> + case Hexagon::STrib_indexed_shl_cPt_nv_V4 :
>> + case Hexagon::STrib_indexed_shl_cdnPt_V4 :
>> + return Hexagon::STrib_indexed_shl_cPt_V4;
>> +
>> + case Hexagon::STrib_indexed_shl_cdnNotPt_nv_V4:
>> + case Hexagon::STrib_indexed_shl_cNotPt_nv_V4 :
>> + case Hexagon::STrib_indexed_shl_cdnNotPt_V4 :
>> + return Hexagon::STrib_indexed_shl_cNotPt_V4;
>> +
>> + case Hexagon::POST_STbri_cdnPt_nv_V4 :
>> + case Hexagon::POST_STbri_cPt_nv_V4 :
>> + case Hexagon::POST_STbri_cdnPt_V4 :
>> + return Hexagon::POST_STbri_cPt;
>> +
>> + case Hexagon::POST_STbri_cdnNotPt_nv_V4 :
>> + case Hexagon::POST_STbri_cNotPt_nv_V4:
>> + case Hexagon::POST_STbri_cdnNotPt_V4 :
>> + return Hexagon::POST_STbri_cNotPt;
>> +
>> + case Hexagon::STb_GP_cdnPt_nv_V4:
>> + case Hexagon::STb_GP_cdnPt_V4:
>> + case Hexagon::STb_GP_cPt_nv_V4:
>> + return Hexagon::STb_GP_cPt_V4;
>> +
>> + case Hexagon::STb_GP_cdnNotPt_nv_V4:
>> + case Hexagon::STb_GP_cdnNotPt_V4:
>> + case Hexagon::STb_GP_cNotPt_nv_V4:
>> + return Hexagon::STb_GP_cNotPt_V4;
>> +
>> + case Hexagon::STrib_GP_cdnPt_nv_V4:
>> + case Hexagon::STrib_GP_cdnPt_V4:
>> + case Hexagon::STrib_GP_cPt_nv_V4:
>> + return Hexagon::STrib_GP_cPt_V4;
>> +
>> + case Hexagon::STrib_GP_cdnNotPt_nv_V4:
>> + case Hexagon::STrib_GP_cdnNotPt_V4:
>> + case Hexagon::STrib_GP_cNotPt_nv_V4:
>> + return Hexagon::STrib_GP_cNotPt_V4;
>> +
>> + // Store new-value byte - unconditional
>> + case Hexagon::STrib_nv_V4:
>> + return Hexagon::STrib;
>> +
>> + case Hexagon::STrib_indexed_nv_V4:
>> + return Hexagon::STrib_indexed;
>> +
>> + case Hexagon::STrib_indexed_shl_nv_V4:
>> + return Hexagon::STrib_indexed_shl_V4;
>> +
>> + case Hexagon::STrib_shl_nv_V4:
>> + return Hexagon::STrib_shl_V4;
>> +
>> + case Hexagon::STrib_GP_nv_V4:
>> + return Hexagon::STrib_GP_V4;
>> +
>> + case Hexagon::STb_GP_nv_V4:
>> + return Hexagon::STb_GP_V4;
>> +
>> + case Hexagon::POST_STbri_nv_V4:
>> + return Hexagon::POST_STbri;
>> +
>> + // Store halfword
>> + case Hexagon::STrih_imm_cdnPt_V4 :
>> + return Hexagon::STrih_imm_cPt_V4;
>> +
>> + case Hexagon::STrih_imm_cdnNotPt_V4 :
>> + return Hexagon::STrih_imm_cNotPt_V4;
>> +
>> + case Hexagon::STrih_cdnPt_nv_V4 :
>> + case Hexagon::STrih_cPt_nv_V4 :
>> + case Hexagon::STrih_cdnPt_V4 :
>> + return Hexagon::STrih_cPt;
>> +
>> + case Hexagon::STrih_cdnNotPt_nv_V4 :
>> + case Hexagon::STrih_cNotPt_nv_V4 :
>> + case Hexagon::STrih_cdnNotPt_V4 :
>> + return Hexagon::STrih_cNotPt;
>> +
>> + case Hexagon::STrih_indexed_cdnPt_nv_V4:
>> + case Hexagon::STrih_indexed_cPt_nv_V4 :
>> + case Hexagon::STrih_indexed_cdnPt_V4 :
>> + return Hexagon::STrih_indexed_cPt;
>> +
>> + case Hexagon::STrih_indexed_cdnNotPt_nv_V4:
>> + case Hexagon::STrih_indexed_cNotPt_nv_V4 :
>> + case Hexagon::STrih_indexed_cdnNotPt_V4 :
>> + return Hexagon::STrih_indexed_cNotPt;
>> +
>> + case Hexagon::STrih_indexed_shl_cdnPt_nv_V4 :
>> + case Hexagon::STrih_indexed_shl_cPt_nv_V4 :
>> + case Hexagon::STrih_indexed_shl_cdnPt_V4 :
>> + return Hexagon::STrih_indexed_shl_cPt_V4;
>> +
>> + case Hexagon::STrih_indexed_shl_cdnNotPt_nv_V4 :
>> + case Hexagon::STrih_indexed_shl_cNotPt_nv_V4 :
>> + case Hexagon::STrih_indexed_shl_cdnNotPt_V4 :
>> + return Hexagon::STrih_indexed_shl_cNotPt_V4;
>> +
>> + case Hexagon::POST_SThri_cdnPt_nv_V4 :
>> + case Hexagon::POST_SThri_cPt_nv_V4 :
>> + case Hexagon::POST_SThri_cdnPt_V4 :
>> + return Hexagon::POST_SThri_cPt;
>> +
>> + case Hexagon::POST_SThri_cdnNotPt_nv_V4 :
>> + case Hexagon::POST_SThri_cNotPt_nv_V4 :
>> + case Hexagon::POST_SThri_cdnNotPt_V4 :
>> + return Hexagon::POST_SThri_cNotPt;
>> +
>> + case Hexagon::STh_GP_cdnPt_nv_V4:
>> + case Hexagon::STh_GP_cdnPt_V4:
>> + case Hexagon::STh_GP_cPt_nv_V4:
>> + return Hexagon::STh_GP_cPt_V4;
>> +
>> + case Hexagon::STh_GP_cdnNotPt_nv_V4:
>> + case Hexagon::STh_GP_cdnNotPt_V4:
>> + case Hexagon::STh_GP_cNotPt_nv_V4:
>> + return Hexagon::STh_GP_cNotPt_V4;
>> +
>> + case Hexagon::STrih_GP_cdnPt_nv_V4:
>> + case Hexagon::STrih_GP_cdnPt_V4:
>> + case Hexagon::STrih_GP_cPt_nv_V4:
>> + return Hexagon::STrih_GP_cPt_V4;
>> +
>> + case Hexagon::STrih_GP_cdnNotPt_nv_V4:
>> + case Hexagon::STrih_GP_cdnNotPt_V4:
>> + case Hexagon::STrih_GP_cNotPt_nv_V4:
>> + return Hexagon::STrih_GP_cNotPt_V4;
>> +
>> + // Store new-value halfword - unconditional
>> +
>> + case Hexagon::STrih_nv_V4:
>> + return Hexagon::STrih;
>> +
>> + case Hexagon::STrih_indexed_nv_V4:
>> + return Hexagon::STrih_indexed;
>> +
>> + case Hexagon::STrih_indexed_shl_nv_V4:
>> + return Hexagon::STrih_indexed_shl_V4;
>> +
>> + case Hexagon::STrih_shl_nv_V4:
>> + return Hexagon::STrih_shl_V4;
>> +
>> + case Hexagon::STrih_GP_nv_V4:
>> + return Hexagon::STrih_GP_V4;
>> +
>> + case Hexagon::STh_GP_nv_V4:
>> + return Hexagon::STh_GP_V4;
>> +
>> + case Hexagon::POST_SThri_nv_V4:
>> + return Hexagon::POST_SThri;
>> +
>> + // Store word
>> +
>> + case Hexagon::STriw_imm_cdnPt_V4 :
>> + return Hexagon::STriw_imm_cPt_V4;
>> +
>> + case Hexagon::STriw_imm_cdnNotPt_V4 :
>> + return Hexagon::STriw_imm_cNotPt_V4;
>> +
>> + case Hexagon::STriw_cdnPt_nv_V4 :
>> + case Hexagon::STriw_cPt_nv_V4 :
>> + case Hexagon::STriw_cdnPt_V4 :
>> + return Hexagon::STriw_cPt;
>> +
>> + case Hexagon::STriw_cdnNotPt_nv_V4 :
>> + case Hexagon::STriw_cNotPt_nv_V4 :
>> + case Hexagon::STriw_cdnNotPt_V4 :
>> + return Hexagon::STriw_cNotPt;
>> +
>> + case Hexagon::STriw_indexed_cdnPt_nv_V4 :
>> + case Hexagon::STriw_indexed_cPt_nv_V4 :
>> + case Hexagon::STriw_indexed_cdnPt_V4 :
>> + return Hexagon::STriw_indexed_cPt;
>> +
>> + case Hexagon::STriw_indexed_cdnNotPt_nv_V4 :
>> + case Hexagon::STriw_indexed_cNotPt_nv_V4 :
>> + case Hexagon::STriw_indexed_cdnNotPt_V4 :
>> + return Hexagon::STriw_indexed_cNotPt;
>> +
>> + case Hexagon::STriw_indexed_shl_cdnPt_nv_V4 :
>> + case Hexagon::STriw_indexed_shl_cPt_nv_V4 :
>> + case Hexagon::STriw_indexed_shl_cdnPt_V4 :
>> + return Hexagon::STriw_indexed_shl_cPt_V4;
>> +
>> + case Hexagon::STriw_indexed_shl_cdnNotPt_nv_V4 :
>> + case Hexagon::STriw_indexed_shl_cNotPt_nv_V4 :
>> + case Hexagon::STriw_indexed_shl_cdnNotPt_V4 :
>> + return Hexagon::STriw_indexed_shl_cNotPt_V4;
>> +
>> + case Hexagon::POST_STwri_cdnPt_nv_V4 :
>> + case Hexagon::POST_STwri_cPt_nv_V4 :
>> + case Hexagon::POST_STwri_cdnPt_V4 :
>> + return Hexagon::POST_STwri_cPt;
>> +
>> + case Hexagon::POST_STwri_cdnNotPt_nv_V4 :
>> + case Hexagon::POST_STwri_cNotPt_nv_V4 :
>> + case Hexagon::POST_STwri_cdnNotPt_V4 :
>> + return Hexagon::POST_STwri_cNotPt;
>> +
>> + case Hexagon::STw_GP_cdnPt_nv_V4:
>> + case Hexagon::STw_GP_cdnPt_V4:
>> + case Hexagon::STw_GP_cPt_nv_V4:
>> + return Hexagon::STw_GP_cPt_V4;
>> +
>> + case Hexagon::STw_GP_cdnNotPt_nv_V4:
>> + case Hexagon::STw_GP_cdnNotPt_V4:
>> + case Hexagon::STw_GP_cNotPt_nv_V4:
>> + return Hexagon::STw_GP_cNotPt_V4;
>> +
>> + case Hexagon::STriw_GP_cdnPt_nv_V4:
>> + case Hexagon::STriw_GP_cdnPt_V4:
>> + case Hexagon::STriw_GP_cPt_nv_V4:
>> + return Hexagon::STriw_GP_cPt_V4;
>> +
>> + case Hexagon::STriw_GP_cdnNotPt_nv_V4:
>> + case Hexagon::STriw_GP_cdnNotPt_V4:
>> + case Hexagon::STriw_GP_cNotPt_nv_V4:
>> + return Hexagon::STriw_GP_cNotPt_V4;
>> +
>> + // Store new-value word - unconditional
>> +
>> + case Hexagon::STriw_nv_V4:
>> + return Hexagon::STriw;
>> +
>> + case Hexagon::STriw_indexed_nv_V4:
>> + return Hexagon::STriw_indexed;
>> +
>> + case Hexagon::STriw_indexed_shl_nv_V4:
>> + return Hexagon::STriw_indexed_shl_V4;
>> +
>> + case Hexagon::STriw_shl_nv_V4:
>> + return Hexagon::STriw_shl_V4;
>> +
>> + case Hexagon::STriw_GP_nv_V4:
>> + return Hexagon::STriw_GP_V4;
>> +
>> + case Hexagon::STw_GP_nv_V4:
>> + return Hexagon::STw_GP_V4;
>> +
>> + case Hexagon::POST_STwri_nv_V4:
>> + return Hexagon::POST_STwri;
>> +
>> + // Store doubleword
>> +
>> + case Hexagon::STrid_cdnPt_V4 :
>> + return Hexagon::STrid_cPt;
>> +
>> + case Hexagon::STrid_cdnNotPt_V4 :
>> + return Hexagon::STrid_cNotPt;
>> +
>> + case Hexagon::STrid_indexed_cdnPt_V4 :
>> + return Hexagon::STrid_indexed_cPt;
>> +
>> + case Hexagon::STrid_indexed_cdnNotPt_V4 :
>> + return Hexagon::STrid_indexed_cNotPt;
>> +
>> + case Hexagon::STrid_indexed_shl_cdnPt_V4 :
>> + return Hexagon::STrid_indexed_shl_cPt_V4;
>> +
>> + case Hexagon::STrid_indexed_shl_cdnNotPt_V4 :
>> + return Hexagon::STrid_indexed_shl_cNotPt_V4;
>> +
>> + case Hexagon::POST_STdri_cdnPt_V4 :
>> + return Hexagon::POST_STdri_cPt;
>> +
>> + case Hexagon::POST_STdri_cdnNotPt_V4 :
>> + return Hexagon::POST_STdri_cNotPt;
>> +
>> + case Hexagon::STd_GP_cdnPt_V4 :
>> + return Hexagon::STd_GP_cPt_V4;
>> +
>> + case Hexagon::STd_GP_cdnNotPt_V4 :
>> + return Hexagon::STd_GP_cNotPt_V4;
>> +
>> + case Hexagon::STrid_GP_cdnPt_V4 :
>> + return Hexagon::STrid_GP_cPt_V4;
>> +
>> + case Hexagon::STrid_GP_cdnNotPt_V4 :
>> + return Hexagon::STrid_GP_cNotPt_V4;
>> + }
>> +}
>> +
>> +bool HexagonPacketizerList::DemoteToDotOld(MachineInstr* MI) {
>> + const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
>> + int NewOpcode = GetDotOldOp(MI->getOpcode());
>> + MI->setDesc(QII->get(NewOpcode));
>> + return true;
>> +}
>> +
>> +// Returns true if an instruction is predicated on p0 and false if
it's
>> +// predicated on !p0.
>> +
>> +static bool GetPredicateSense(MachineInstr* MI,
>> + const HexagonInstrInfo *QII) {
>> +
>> + switch (MI->getOpcode()) {
>> + case Hexagon::TFR_cPt:
>> + case Hexagon::TFR_cdnPt:
>> + case Hexagon::TFRI_cPt:
>> + case Hexagon::TFRI_cdnPt:
>> + case Hexagon::STrib_cPt :
>> + case Hexagon::STrib_cdnPt_V4 :
>> + case Hexagon::STrib_indexed_cPt :
>> + case Hexagon::STrib_indexed_cdnPt_V4 :
>> + case Hexagon::STrib_indexed_shl_cPt_V4 :
>> + case Hexagon::STrib_indexed_shl_cdnPt_V4 :
>> + case Hexagon::POST_STbri_cPt :
>> + case Hexagon::POST_STbri_cdnPt_V4 :
>> + case Hexagon::STrih_cPt :
>> + case Hexagon::STrih_cdnPt_V4 :
>> + case Hexagon::STrih_indexed_cPt :
>> + case Hexagon::STrih_indexed_cdnPt_V4 :
>> + case Hexagon::STrih_indexed_shl_cPt_V4 :
>> + case Hexagon::STrih_indexed_shl_cdnPt_V4 :
>> + case Hexagon::POST_SThri_cPt :
>> + case Hexagon::POST_SThri_cdnPt_V4 :
>> + case Hexagon::STriw_cPt :
>> + case Hexagon::STriw_cdnPt_V4 :
>> + case Hexagon::STriw_indexed_cPt :
>> + case Hexagon::STriw_indexed_cdnPt_V4 :
>> + case Hexagon::STriw_indexed_shl_cPt_V4 :
>> + case Hexagon::STriw_indexed_shl_cdnPt_V4 :
>> + case Hexagon::POST_STwri_cPt :
>> + case Hexagon::POST_STwri_cdnPt_V4 :
>> + case Hexagon::STrib_imm_cPt_V4 :
>> + case Hexagon::STrib_imm_cdnPt_V4 :
>> + case Hexagon::STrid_cPt :
>> + case Hexagon::STrid_cdnPt_V4 :
>> + case Hexagon::STrid_indexed_cPt :
>> + case Hexagon::STrid_indexed_cdnPt_V4 :
>> + case Hexagon::STrid_indexed_shl_cPt_V4 :
>> + case Hexagon::STrid_indexed_shl_cdnPt_V4 :
>> + case Hexagon::POST_STdri_cPt :
>> + case Hexagon::POST_STdri_cdnPt_V4 :
>> + case Hexagon::STrih_imm_cPt_V4 :
>> + case Hexagon::STrih_imm_cdnPt_V4 :
>> + case Hexagon::STriw_imm_cPt_V4 :
>> + case Hexagon::STriw_imm_cdnPt_V4 :
>> + case Hexagon::JMP_cdnPt :
>> + case Hexagon::LDrid_cPt :
>> + case Hexagon::LDrid_cdnPt :
>> + case Hexagon::LDrid_indexed_cPt :
>> + case Hexagon::LDrid_indexed_cdnPt :
>> + case Hexagon::POST_LDrid_cPt :
>> + case Hexagon::POST_LDrid_cdnPt_V4 :
>> + case Hexagon::LDriw_cPt :
>> + case Hexagon::LDriw_cdnPt :
>> + case Hexagon::LDriw_indexed_cPt :
>> + case Hexagon::LDriw_indexed_cdnPt :
>> + case Hexagon::POST_LDriw_cPt :
>> + case Hexagon::POST_LDriw_cdnPt_V4 :
>> + case Hexagon::LDrih_cPt :
>> + case Hexagon::LDrih_cdnPt :
>> + case Hexagon::LDrih_indexed_cPt :
>> + case Hexagon::LDrih_indexed_cdnPt :
>> + case Hexagon::POST_LDrih_cPt :
>> + case Hexagon::POST_LDrih_cdnPt_V4 :
>> + case Hexagon::LDrib_cPt :
>> + case Hexagon::LDrib_cdnPt :
>> + case Hexagon::LDrib_indexed_cPt :
>> + case Hexagon::LDrib_indexed_cdnPt :
>> + case Hexagon::POST_LDrib_cPt :
>> + case Hexagon::POST_LDrib_cdnPt_V4 :
>> + case Hexagon::LDriuh_cPt :
>> + case Hexagon::LDriuh_cdnPt :
>> + case Hexagon::LDriuh_indexed_cPt :
>> + case Hexagon::LDriuh_indexed_cdnPt :
>> + case Hexagon::POST_LDriuh_cPt :
>> + case Hexagon::POST_LDriuh_cdnPt_V4 :
>> + case Hexagon::LDriub_cPt :
>> + case Hexagon::LDriub_cdnPt :
>> + case Hexagon::LDriub_indexed_cPt :
>> + case Hexagon::LDriub_indexed_cdnPt :
>> + case Hexagon::POST_LDriub_cPt :
>> + case Hexagon::POST_LDriub_cdnPt_V4 :
>> + case Hexagon::LDrid_indexed_cPt_V4 :
>> + case Hexagon::LDrid_indexed_cdnPt_V4 :
>> + case Hexagon::LDrid_indexed_shl_cPt_V4 :
>> + case Hexagon::LDrid_indexed_shl_cdnPt_V4 :
>> + case Hexagon::LDrib_indexed_cPt_V4 :
>> + case Hexagon::LDrib_indexed_cdnPt_V4 :
>> + case Hexagon::LDrib_indexed_shl_cPt_V4 :
>> + case Hexagon::LDrib_indexed_shl_cdnPt_V4 :
>> + case Hexagon::LDriub_indexed_cPt_V4 :
>> + case Hexagon::LDriub_indexed_cdnPt_V4 :
>> + case Hexagon::LDriub_indexed_shl_cPt_V4 :
>> + case Hexagon::LDriub_indexed_shl_cdnPt_V4 :
>> + case Hexagon::LDrih_indexed_cPt_V4 :
>> + case Hexagon::LDrih_indexed_cdnPt_V4 :
>> + case Hexagon::LDrih_indexed_shl_cPt_V4 :
>> + case Hexagon::LDrih_indexed_shl_cdnPt_V4 :
>> + case Hexagon::LDriuh_indexed_cPt_V4 :
>> + case Hexagon::LDriuh_indexed_cdnPt_V4 :
>> + case Hexagon::LDriuh_indexed_shl_cPt_V4 :
>> + case Hexagon::LDriuh_indexed_shl_cdnPt_V4 :
>> + case Hexagon::LDriw_indexed_cPt_V4 :
>> + case Hexagon::LDriw_indexed_cdnPt_V4 :
>> + case Hexagon::LDriw_indexed_shl_cPt_V4 :
>> + case Hexagon::LDriw_indexed_shl_cdnPt_V4 :
>> + case Hexagon::ADD_ri_cPt :
>> + case Hexagon::ADD_ri_cdnPt :
>> + case Hexagon::ADD_rr_cPt :
>> + case Hexagon::ADD_rr_cdnPt :
>> + case Hexagon::XOR_rr_cPt :
>> + case Hexagon::XOR_rr_cdnPt :
>> + case Hexagon::AND_rr_cPt :
>> + case Hexagon::AND_rr_cdnPt :
>> + case Hexagon::OR_rr_cPt :
>> + case Hexagon::OR_rr_cdnPt :
>> + case Hexagon::SUB_rr_cPt :
>> + case Hexagon::SUB_rr_cdnPt :
>> + case Hexagon::COMBINE_rr_cPt :
>> + case Hexagon::COMBINE_rr_cdnPt :
>> + case Hexagon::ASLH_cPt_V4 :
>> + case Hexagon::ASLH_cdnPt_V4 :
>> + case Hexagon::ASRH_cPt_V4 :
>> + case Hexagon::ASRH_cdnPt_V4 :
>> + case Hexagon::SXTB_cPt_V4 :
>> + case Hexagon::SXTB_cdnPt_V4 :
>> + case Hexagon::SXTH_cPt_V4 :
>> + case Hexagon::SXTH_cdnPt_V4 :
>> + case Hexagon::ZXTB_cPt_V4 :
>> + case Hexagon::ZXTB_cdnPt_V4 :
>> + case Hexagon::ZXTH_cPt_V4 :
>> + case Hexagon::ZXTH_cdnPt_V4 :
>> + case Hexagon::LDrid_GP_cPt_V4 :
>> + case Hexagon::LDrib_GP_cPt_V4 :
>> + case Hexagon::LDriub_GP_cPt_V4 :
>> + case Hexagon::LDrih_GP_cPt_V4 :
>> + case Hexagon::LDriuh_GP_cPt_V4 :
>> + case Hexagon::LDriw_GP_cPt_V4 :
>> + case Hexagon::LDd_GP_cPt_V4 :
>> + case Hexagon::LDb_GP_cPt_V4 :
>> + case Hexagon::LDub_GP_cPt_V4 :
>> + case Hexagon::LDh_GP_cPt_V4 :
>> + case Hexagon::LDuh_GP_cPt_V4 :
>> + case Hexagon::LDw_GP_cPt_V4 :
>> + case Hexagon::STrid_GP_cPt_V4 :
>> + case Hexagon::STrib_GP_cPt_V4 :
>> + case Hexagon::STrih_GP_cPt_V4 :
>> + case Hexagon::STriw_GP_cPt_V4 :
>> + case Hexagon::STd_GP_cPt_V4 :
>> + case Hexagon::STb_GP_cPt_V4 :
>> + case Hexagon::STh_GP_cPt_V4 :
>> + case Hexagon::STw_GP_cPt_V4 :
>> + case Hexagon::LDrid_GP_cdnPt_V4 :
>> + case Hexagon::LDrib_GP_cdnPt_V4 :
>> + case Hexagon::LDriub_GP_cdnPt_V4 :
>> + case Hexagon::LDrih_GP_cdnPt_V4 :
>> + case Hexagon::LDriuh_GP_cdnPt_V4 :
>> + case Hexagon::LDriw_GP_cdnPt_V4 :
>> + case Hexagon::LDd_GP_cdnPt_V4 :
>> + case Hexagon::LDb_GP_cdnPt_V4 :
>> + case Hexagon::LDub_GP_cdnPt_V4 :
>> + case Hexagon::LDh_GP_cdnPt_V4 :
>> + case Hexagon::LDuh_GP_cdnPt_V4 :
>> + case Hexagon::LDw_GP_cdnPt_V4 :
>> + case Hexagon::STrid_GP_cdnPt_V4 :
>> + case Hexagon::STrib_GP_cdnPt_V4 :
>> + case Hexagon::STrih_GP_cdnPt_V4 :
>> + case Hexagon::STriw_GP_cdnPt_V4 :
>> + case Hexagon::STd_GP_cdnPt_V4 :
>> + case Hexagon::STb_GP_cdnPt_V4 :
>> + case Hexagon::STh_GP_cdnPt_V4 :
>> + case Hexagon::STw_GP_cdnPt_V4 :
>> + return true;
>> +
>> + case Hexagon::TFR_cNotPt:
>> + case Hexagon::TFR_cdnNotPt:
>> + case Hexagon::TFRI_cNotPt:
>> + case Hexagon::TFRI_cdnNotPt:
>> + case Hexagon::STrib_cNotPt :
>> + case Hexagon::STrib_cdnNotPt_V4 :
>> + case Hexagon::STrib_indexed_cNotPt :
>> + case Hexagon::STrib_indexed_cdnNotPt_V4 :
>> + case Hexagon::STrib_indexed_shl_cNotPt_V4 :
>> + case Hexagon::STrib_indexed_shl_cdnNotPt_V4 :
>> + case Hexagon::POST_STbri_cNotPt :
>> + case Hexagon::POST_STbri_cdnNotPt_V4 :
>> + case Hexagon::STrih_cNotPt :
>> + case Hexagon::STrih_cdnNotPt_V4 :
>> + case Hexagon::STrih_indexed_cNotPt :
>> + case Hexagon::STrih_indexed_cdnNotPt_V4 :
>> + case Hexagon::STrih_indexed_shl_cNotPt_V4 :
>> + case Hexagon::STrih_indexed_shl_cdnNotPt_V4 :
>> + case Hexagon::POST_SThri_cNotPt :
>> + case Hexagon::POST_SThri_cdnNotPt_V4 :
>> + case Hexagon::STriw_cNotPt :
>> + case Hexagon::STriw_cdnNotPt_V4 :
>> + case Hexagon::STriw_indexed_cNotPt :
>> + case Hexagon::STriw_indexed_cdnNotPt_V4 :
>> + case Hexagon::STriw_indexed_shl_cNotPt_V4 :
>> + case Hexagon::STriw_indexed_shl_cdnNotPt_V4 :
>> + case Hexagon::POST_STwri_cNotPt :
>> + case Hexagon::POST_STwri_cdnNotPt_V4 :
>> + case Hexagon::STrib_imm_cNotPt_V4 :
>> + case Hexagon::STrib_imm_cdnNotPt_V4 :
>> + case Hexagon::STrid_cNotPt :
>> + case Hexagon::STrid_cdnNotPt_V4 :
>> + case Hexagon::STrid_indexed_cdnNotPt_V4 :
>> + case Hexagon::STrid_indexed_cNotPt :
>> + case Hexagon::STrid_indexed_shl_cNotPt_V4 :
>> + case Hexagon::STrid_indexed_shl_cdnNotPt_V4 :
>> + case Hexagon::POST_STdri_cNotPt :
>> + case Hexagon::POST_STdri_cdnNotPt_V4 :
>> + case Hexagon::STrih_imm_cNotPt_V4 :
>> + case Hexagon::STrih_imm_cdnNotPt_V4 :
>> + case Hexagon::STriw_imm_cNotPt_V4 :
>> + case Hexagon::STriw_imm_cdnNotPt_V4 :
>> + case Hexagon::JMP_cdnNotPt :
>> + case Hexagon::LDrid_cNotPt :
>> + case Hexagon::LDrid_cdnNotPt :
>> + case Hexagon::LDrid_indexed_cNotPt :
>> + case Hexagon::LDrid_indexed_cdnNotPt :
>> + case Hexagon::POST_LDrid_cNotPt :
>> + case Hexagon::POST_LDrid_cdnNotPt_V4 :
>> + case Hexagon::LDriw_cNotPt :
>> + case Hexagon::LDriw_cdnNotPt :
>> + case Hexagon::LDriw_indexed_cNotPt :
>> + case Hexagon::LDriw_indexed_cdnNotPt :
>> + case Hexagon::POST_LDriw_cNotPt :
>> + case Hexagon::POST_LDriw_cdnNotPt_V4 :
>> + case Hexagon::LDrih_cNotPt :
>> + case Hexagon::LDrih_cdnNotPt :
>> + case Hexagon::LDrih_indexed_cNotPt :
>> + case Hexagon::LDrih_indexed_cdnNotPt :
>> + case Hexagon::POST_LDrih_cNotPt :
>> + case Hexagon::POST_LDrih_cdnNotPt_V4 :
>> + case Hexagon::LDrib_cNotPt :
>> + case Hexagon::LDrib_cdnNotPt :
>> + case Hexagon::LDrib_indexed_cNotPt :
>> + case Hexagon::LDrib_indexed_cdnNotPt :
>> + case Hexagon::POST_LDrib_cNotPt :
>> + case Hexagon::POST_LDrib_cdnNotPt_V4 :
>> + case Hexagon::LDriuh_cNotPt :
>> + case Hexagon::LDriuh_cdnNotPt :
>> + case Hexagon::LDriuh_indexed_cNotPt :
>> + case Hexagon::LDriuh_indexed_cdnNotPt :
>> + case Hexagon::POST_LDriuh_cNotPt :
>> + case Hexagon::POST_LDriuh_cdnNotPt_V4 :
>> + case Hexagon::LDriub_cNotPt :
>> + case Hexagon::LDriub_cdnNotPt :
>> + case Hexagon::LDriub_indexed_cNotPt :
>> + case Hexagon::LDriub_indexed_cdnNotPt :
>> + case Hexagon::POST_LDriub_cNotPt :
>> + case Hexagon::POST_LDriub_cdnNotPt_V4 :
>> + case Hexagon::LDrid_indexed_cNotPt_V4 :
>> + case Hexagon::LDrid_indexed_cdnNotPt_V4 :
>> + case Hexagon::LDrid_indexed_shl_cNotPt_V4 :
>> + case Hexagon::LDrid_indexed_shl_cdnNotPt_V4 :
>> + case Hexagon::LDrib_indexed_cNotPt_V4 :
>> + case Hexagon::LDrib_indexed_cdnNotPt_V4 :
>> + case Hexagon::LDrib_indexed_shl_cNotPt_V4 :
>> + case Hexagon::LDrib_indexed_shl_cdnNotPt_V4 :
>> + case Hexagon::LDriub_indexed_cNotPt_V4 :
>> + case Hexagon::LDriub_indexed_cdnNotPt_V4 :
>> + case Hexagon::LDriub_indexed_shl_cNotPt_V4 :
>> + case Hexagon::LDriub_indexed_shl_cdnNotPt_V4 :
>> + case Hexagon::LDrih_indexed_cNotPt_V4 :
>> + case Hexagon::LDrih_indexed_cdnNotPt_V4 :
>> + case Hexagon::LDrih_indexed_shl_cNotPt_V4 :
>> + case Hexagon::LDrih_indexed_shl_cdnNotPt_V4 :
>> + case Hexagon::LDriuh_indexed_cNotPt_V4 :
>> + case Hexagon::LDriuh_indexed_cdnNotPt_V4 :
>> + case Hexagon::LDriuh_indexed_shl_cNotPt_V4 :
>> + case Hexagon::LDriuh_indexed_shl_cdnNotPt_V4 :
>> + case Hexagon::LDriw_indexed_cNotPt_V4 :
>> + case Hexagon::LDriw_indexed_cdnNotPt_V4 :
>> + case Hexagon::LDriw_indexed_shl_cNotPt_V4 :
>> + case Hexagon::LDriw_indexed_shl_cdnNotPt_V4 :
>> + case Hexagon::ADD_ri_cNotPt :
>> + case Hexagon::ADD_ri_cdnNotPt :
>> + case Hexagon::ADD_rr_cNotPt :
>> + case Hexagon::ADD_rr_cdnNotPt :
>> + case Hexagon::XOR_rr_cNotPt :
>> + case Hexagon::XOR_rr_cdnNotPt :
>> + case Hexagon::AND_rr_cNotPt :
>> + case Hexagon::AND_rr_cdnNotPt :
>> + case Hexagon::OR_rr_cNotPt :
>> + case Hexagon::OR_rr_cdnNotPt :
>> + case Hexagon::SUB_rr_cNotPt :
>> + case Hexagon::SUB_rr_cdnNotPt :
>> + case Hexagon::COMBINE_rr_cNotPt :
>> + case Hexagon::COMBINE_rr_cdnNotPt :
>> + case Hexagon::ASLH_cNotPt_V4 :
>> + case Hexagon::ASLH_cdnNotPt_V4 :
>> + case Hexagon::ASRH_cNotPt_V4 :
>> + case Hexagon::ASRH_cdnNotPt_V4 :
>> + case Hexagon::SXTB_cNotPt_V4 :
>> + case Hexagon::SXTB_cdnNotPt_V4 :
>> + case Hexagon::SXTH_cNotPt_V4 :
>> + case Hexagon::SXTH_cdnNotPt_V4 :
>> + case Hexagon::ZXTB_cNotPt_V4 :
>> + case Hexagon::ZXTB_cdnNotPt_V4 :
>> + case Hexagon::ZXTH_cNotPt_V4 :
>> + case Hexagon::ZXTH_cdnNotPt_V4 :
>> +
>> + case Hexagon::LDrid_GP_cNotPt_V4 :
>> + case Hexagon::LDrib_GP_cNotPt_V4 :
>> + case Hexagon::LDriub_GP_cNotPt_V4 :
>> + case Hexagon::LDrih_GP_cNotPt_V4 :
>> + case Hexagon::LDriuh_GP_cNotPt_V4 :
>> + case Hexagon::LDriw_GP_cNotPt_V4 :
>> + case Hexagon::LDd_GP_cNotPt_V4 :
>> + case Hexagon::LDb_GP_cNotPt_V4 :
>> + case Hexagon::LDub_GP_cNotPt_V4 :
>> + case Hexagon::LDh_GP_cNotPt_V4 :
>> + case Hexagon::LDuh_GP_cNotPt_V4 :
>> + case Hexagon::LDw_GP_cNotPt_V4 :
>> + case Hexagon::STrid_GP_cNotPt_V4 :
>> + case Hexagon::STrib_GP_cNotPt_V4 :
>> + case Hexagon::STrih_GP_cNotPt_V4 :
>> + case Hexagon::STriw_GP_cNotPt_V4 :
>> + case Hexagon::STd_GP_cNotPt_V4 :
>> + case Hexagon::STb_GP_cNotPt_V4 :
>> + case Hexagon::STh_GP_cNotPt_V4 :
>> + case Hexagon::STw_GP_cNotPt_V4 :
>> + case Hexagon::LDrid_GP_cdnNotPt_V4 :
>> + case Hexagon::LDrib_GP_cdnNotPt_V4 :
>> + case Hexagon::LDriub_GP_cdnNotPt_V4 :
>> + case Hexagon::LDrih_GP_cdnNotPt_V4 :
>> + case Hexagon::LDriuh_GP_cdnNotPt_V4 :
>> + case Hexagon::LDriw_GP_cdnNotPt_V4 :
>> + case Hexagon::LDd_GP_cdnNotPt_V4 :
>> + case Hexagon::LDb_GP_cdnNotPt_V4 :
>> + case Hexagon::LDub_GP_cdnNotPt_V4 :
>> + case Hexagon::LDh_GP_cdnNotPt_V4 :
>> + case Hexagon::LDuh_GP_cdnNotPt_V4 :
>> + case Hexagon::LDw_GP_cdnNotPt_V4 :
>> + case Hexagon::STrid_GP_cdnNotPt_V4 :
>> + case Hexagon::STrib_GP_cdnNotPt_V4 :
>> + case Hexagon::STrih_GP_cdnNotPt_V4 :
>> + case Hexagon::STriw_GP_cdnNotPt_V4 :
>> + case Hexagon::STd_GP_cdnNotPt_V4 :
>> + case Hexagon::STb_GP_cdnNotPt_V4 :
>> + case Hexagon::STh_GP_cdnNotPt_V4 :
>> + case Hexagon::STw_GP_cdnNotPt_V4 :
>> + return false;
>> +
>> + default:
>> + assert (false&& "Unknown predicate sense of the
instruction");
>> + }
>> + // return *some value* to avoid compiler warning
>> + return false;
>> +}
>> +
>> +bool HexagonPacketizerList::isDotNewInst(MachineInstr* MI) {
>> + if (isNewValueInst(MI))
>> + return true;
>> +
>> + switch (MI->getOpcode()) {
>> + case Hexagon::TFR_cdnNotPt:
>> + case Hexagon::TFR_cdnPt:
>> + case Hexagon::TFRI_cdnNotPt:
>> + case Hexagon::TFRI_cdnPt:
>> + case Hexagon::LDrid_cdnPt :
>> + case Hexagon::LDrid_cdnNotPt :
>> + case Hexagon::LDrid_indexed_cdnPt :
>> + case Hexagon::LDrid_indexed_cdnNotPt :
>> + case Hexagon::POST_LDrid_cdnPt_V4 :
>> + case Hexagon::POST_LDrid_cdnNotPt_V4 :
>> + case Hexagon::LDriw_cdnPt :
>> + case Hexagon::LDriw_cdnNotPt :
>> + case Hexagon::LDriw_indexed_cdnPt :
>> + case Hexagon::LDriw_indexed_cdnNotPt :
>> + case Hexagon::POST_LDriw_cdnPt_V4 :
>> + case Hexagon::POST_LDriw_cdnNotPt_V4 :
>> + case Hexagon::LDrih_cdnPt :
>> + case Hexagon::LDrih_cdnNotPt :
>> + case Hexagon::LDrih_indexed_cdnPt :
>> + case Hexagon::LDrih_indexed_cdnNotPt :
>> + case Hexagon::POST_LDrih_cdnPt_V4 :
>> + case Hexagon::POST_LDrih_cdnNotPt_V4 :
>> + case Hexagon::LDrib_cdnPt :
>> + case Hexagon::LDrib_cdnNotPt :
>> + case Hexagon::LDrib_indexed_cdnPt :
>> + case Hexagon::LDrib_indexed_cdnNotPt :
>> + case Hexagon::POST_LDrib_cdnPt_V4 :
>> + case Hexagon::POST_LDrib_cdnNotPt_V4 :
>> + case Hexagon::LDriuh_cdnPt :
>> + case Hexagon::LDriuh_cdnNotPt :
>> + case Hexagon::LDriuh_indexed_cdnPt :
>> + case Hexagon::LDriuh_indexed_cdnNotPt :
>> + case Hexagon::POST_LDriuh_cdnPt_V4 :
>> + case Hexagon::POST_LDriuh_cdnNotPt_V4 :
>> + case Hexagon::LDriub_cdnPt :
>> + case Hexagon::LDriub_cdnNotPt :
>> + case Hexagon::LDriub_indexed_cdnPt :
>> + case Hexagon::LDriub_indexed_cdnNotPt :
>> + case Hexagon::POST_LDriub_cdnPt_V4 :
>> + case Hexagon::POST_LDriub_cdnNotPt_V4 :
>> +
>> + case Hexagon::LDrid_indexed_cdnPt_V4 :
>> + case Hexagon::LDrid_indexed_cdnNotPt_V4 :
>> + case Hexagon::LDrid_indexed_shl_cdnPt_V4 :
>> + case Hexagon::LDrid_indexed_shl_cdnNotPt_V4 :
>> + case Hexagon::LDrib_indexed_cdnPt_V4 :
>> + case Hexagon::LDrib_indexed_cdnNotPt_V4 :
>> + case Hexagon::LDrib_indexed_shl_cdnPt_V4 :
>> + case Hexagon::LDrib_indexed_shl_cdnNotPt_V4 :
>> + case Hexagon::LDriub_indexed_cdnPt_V4 :
>> + case Hexagon::LDriub_indexed_cdnNotPt_V4 :
>> + case Hexagon::LDriub_indexed_shl_cdnPt_V4 :
>> + case Hexagon::LDriub_indexed_shl_cdnNotPt_V4 :
>> + case Hexagon::LDrih_indexed_cdnPt_V4 :
>> + case Hexagon::LDrih_indexed_cdnNotPt_V4 :
>> + case Hexagon::LDrih_indexed_shl_cdnPt_V4 :
>> + case Hexagon::LDrih_indexed_shl_cdnNotPt_V4 :
>> + case Hexagon::LDriuh_indexed_cdnPt_V4 :
>> + case Hexagon::LDriuh_indexed_cdnNotPt_V4 :
>> + case Hexagon::LDriuh_indexed_shl_cdnPt_V4 :
>> + case Hexagon::LDriuh_indexed_shl_cdnNotPt_V4 :
>> + case Hexagon::LDriw_indexed_cdnPt_V4 :
>> + case Hexagon::LDriw_indexed_cdnNotPt_V4 :
>> + case Hexagon::LDriw_indexed_shl_cdnPt_V4 :
>> + case Hexagon::LDriw_indexed_shl_cdnNotPt_V4 :
>> +
>> +// Coditional add
>> + case Hexagon::ADD_ri_cdnPt:
>> + case Hexagon::ADD_ri_cdnNotPt:
>> + case Hexagon::ADD_rr_cdnPt:
>> + case Hexagon::ADD_rr_cdnNotPt:
>> +
>> + // Conditional logical operations
>> + case Hexagon::XOR_rr_cdnPt :
>> + case Hexagon::XOR_rr_cdnNotPt :
>> + case Hexagon::AND_rr_cdnPt :
>> + case Hexagon::AND_rr_cdnNotPt :
>> + case Hexagon::OR_rr_cdnPt :
>> + case Hexagon::OR_rr_cdnNotPt :
>> +
>> + // Conditonal subtract
>> + case Hexagon::SUB_rr_cdnPt :
>> + case Hexagon::SUB_rr_cdnNotPt :
>> +
>> + // Conditional combine
>> + case Hexagon::COMBINE_rr_cdnPt :
>> + case Hexagon::COMBINE_rr_cdnNotPt :
>> +
>> + // Conditional shift operations
>> + case Hexagon::ASLH_cdnPt_V4:
>> + case Hexagon::ASLH_cdnNotPt_V4:
>> + case Hexagon::ASRH_cdnPt_V4:
>> + case Hexagon::ASRH_cdnNotPt_V4:
>> + case Hexagon::SXTB_cdnPt_V4:
>> + case Hexagon::SXTB_cdnNotPt_V4:
>> + case Hexagon::SXTH_cdnPt_V4:
>> + case Hexagon::SXTH_cdnNotPt_V4:
>> + case Hexagon::ZXTB_cdnPt_V4:
>> + case Hexagon::ZXTB_cdnNotPt_V4:
>> + case Hexagon::ZXTH_cdnPt_V4:
>> + case Hexagon::ZXTH_cdnNotPt_V4:
>> +
>> + // Conditional stores
>> + case Hexagon::STrib_imm_cdnPt_V4 :
>> + case Hexagon::STrib_imm_cdnNotPt_V4 :
>> + case Hexagon::STrib_cdnPt_V4 :
>> + case Hexagon::STrib_cdnNotPt_V4 :
>> + case Hexagon::STrib_indexed_cdnPt_V4 :
>> + case Hexagon::STrib_indexed_cdnNotPt_V4 :
>> + case Hexagon::POST_STbri_cdnPt_V4 :
>> + case Hexagon::POST_STbri_cdnNotPt_V4 :
>> + case Hexagon::STrib_indexed_shl_cdnPt_V4 :
>> + case Hexagon::STrib_indexed_shl_cdnNotPt_V4 :
>> +
>> + // Store doubleword conditionally
>> + case Hexagon::STrid_indexed_cdnPt_V4 :
>> + case Hexagon::STrid_indexed_cdnNotPt_V4 :
>> + case Hexagon::STrid_indexed_shl_cdnPt_V4 :
>> + case Hexagon::STrid_indexed_shl_cdnNotPt_V4 :
>> + case Hexagon::POST_STdri_cdnPt_V4 :
>> + case Hexagon::POST_STdri_cdnNotPt_V4 :
>> +
>> + // Store halfword conditionally
>> + case Hexagon::STrih_cdnPt_V4 :
>> + case Hexagon::STrih_cdnNotPt_V4 :
>> + case Hexagon::STrih_indexed_cdnPt_V4 :
>> + case Hexagon::STrih_indexed_cdnNotPt_V4 :
>> + case Hexagon::STrih_imm_cdnPt_V4 :
>> + case Hexagon::STrih_imm_cdnNotPt_V4 :
>> + case Hexagon::STrih_indexed_shl_cdnPt_V4 :
>> + case Hexagon::STrih_indexed_shl_cdnNotPt_V4 :
>> + case Hexagon::POST_SThri_cdnPt_V4 :
>> + case Hexagon::POST_SThri_cdnNotPt_V4 :
>> +
>> + // Store word conditionally
>> + case Hexagon::STriw_cdnPt_V4 :
>> + case Hexagon::STriw_cdnNotPt_V4 :
>> + case Hexagon::STriw_indexed_cdnPt_V4 :
>> + case Hexagon::STriw_indexed_cdnNotPt_V4 :
>> + case Hexagon::STriw_imm_cdnPt_V4 :
>> + case Hexagon::STriw_imm_cdnNotPt_V4 :
>> + case Hexagon::STriw_indexed_shl_cdnPt_V4 :
>> + case Hexagon::STriw_indexed_shl_cdnNotPt_V4 :
>> + case Hexagon::POST_STwri_cdnPt_V4 :
>> + case Hexagon::POST_STwri_cdnNotPt_V4 :
>> +
>> + case Hexagon::LDd_GP_cdnPt_V4:
>> + case Hexagon::LDd_GP_cdnNotPt_V4:
>> + case Hexagon::LDb_GP_cdnPt_V4:
>> + case Hexagon::LDb_GP_cdnNotPt_V4:
>> + case Hexagon::LDub_GP_cdnPt_V4:
>> + case Hexagon::LDub_GP_cdnNotPt_V4:
>> + case Hexagon::LDh_GP_cdnPt_V4:
>> + case Hexagon::LDh_GP_cdnNotPt_V4:
>> + case Hexagon::LDuh_GP_cdnPt_V4:
>> + case Hexagon::LDuh_GP_cdnNotPt_V4:
>> + case Hexagon::LDw_GP_cdnPt_V4:
>> + case Hexagon::LDw_GP_cdnNotPt_V4:
>> + case Hexagon::LDrid_GP_cdnPt_V4:
>> + case Hexagon::LDrid_GP_cdnNotPt_V4:
>> + case Hexagon::LDrib_GP_cdnPt_V4:
>> + case Hexagon::LDrib_GP_cdnNotPt_V4:
>> + case Hexagon::LDriub_GP_cdnPt_V4:
>> + case Hexagon::LDriub_GP_cdnNotPt_V4:
>> + case Hexagon::LDrih_GP_cdnPt_V4:
>> + case Hexagon::LDrih_GP_cdnNotPt_V4:
>> + case Hexagon::LDriuh_GP_cdnPt_V4:
>> + case Hexagon::LDriuh_GP_cdnNotPt_V4:
>> + case Hexagon::LDriw_GP_cdnPt_V4:
>> + case Hexagon::LDriw_GP_cdnNotPt_V4:
>> +
>> + case Hexagon::STrid_GP_cdnPt_V4:
>> + case Hexagon::STrid_GP_cdnNotPt_V4:
>> + case Hexagon::STrib_GP_cdnPt_V4:
>> + case Hexagon::STrib_GP_cdnNotPt_V4:
>> + case Hexagon::STrih_GP_cdnPt_V4:
>> + case Hexagon::STrih_GP_cdnNotPt_V4:
>> + case Hexagon::STriw_GP_cdnPt_V4:
>> + case Hexagon::STriw_GP_cdnNotPt_V4:
>> + case Hexagon::STd_GP_cdnPt_V4:
>> + case Hexagon::STd_GP_cdnNotPt_V4:
>> + case Hexagon::STb_GP_cdnPt_V4:
>> + case Hexagon::STb_GP_cdnNotPt_V4:
>> + case Hexagon::STh_GP_cdnPt_V4:
>> + case Hexagon::STh_GP_cdnNotPt_V4:
>> + case Hexagon::STw_GP_cdnPt_V4:
>> + case Hexagon::STw_GP_cdnNotPt_V4:
>> +
>> + return true;
>> + }
>> + return false;
>> +}
>> +
>> +static MachineOperand& GetPostIncrementOperand(MachineInstr *MI,
>> + const HexagonInstrInfo
*QII) {
>> + assert(QII->isPostIncrement(MI)&& "Not a post
increment operation.");
>> +#ifndef NDEBUG
>> + // Post Increment means duplicates. Use dense map to find duplicates
in the
>> + // list. Caution: Densemap initializes with the minimum of 64
buckets,
>> + // whereas there are at most 5 operands in the post increment.
>> + DenseMap<unsigned, unsigned> DefRegsSet;
>> + for(unsigned opNum = 0; opNum< MI->getNumOperands(); opNum++)
>> + if (MI->getOperand(opNum).isReg()&&
>> + MI->getOperand(opNum).isDef()) {
>> + DefRegsSet[MI->getOperand(opNum).getReg()] = 1;
>> + }
>> +
>> + for(unsigned opNum = 0; opNum< MI->getNumOperands(); opNum++)
>> + if (MI->getOperand(opNum).isReg()&&
>> + MI->getOperand(opNum).isUse()) {
>> + if (DefRegsSet[MI->getOperand(opNum).getReg()]) {
>> + return MI->getOperand(opNum);
>> + }
>> + }
>> +#else
>> + if (MI->getDesc().mayLoad()) {
>> + // The 2nd operand is always the post increment operand in load.
>> + assert(MI->getOperand(1).isReg()&&
>> + "Post increment operand has be to a
register.");
>> + return (MI->getOperand(1));
>> + }
>> + if (MI->getDesc().mayStore()) {
>> + // The 1st operand is always the post increment operand in store.
>> + assert(MI->getOperand(0).isReg()&&
>> + "Post increment operand has be to a
register.");
>> + return (MI->getOperand(0));
>> + }
>> +#endif
>> + // we should never come here.
>> + llvm_unreachable("mayLoad or mayStore not set for Post
Increment operation");
>> +}
>> +
>> +// get the value being stored
>> +static MachineOperand& GetStoreValueOperand(MachineInstr *MI) {
>> + // value being stored is always the last operand.
>> + return (MI->getOperand(MI->getNumOperands()-1));
>> +}
>> +
>> +// can be new value store?
>> +// Following restrictions are to be respected in convert a store into
>> +// a new value store.
>> +// 1. If an instruction uses auto-increment, its address register
cannot
>> +// be a new-value register. Arch Spec 5.4.2.1
>> +// 2. If an instruction uses absolute-set addressing mode,
>> +// its address register cannot be a new-value register.
>> +// Arch Spec 5.4.2.1.TODO: This is not enabled as
>> +// as absolute-set address mode patters are not implemented.
>> +// 3. If an instruction produces a 64-bit result, its registers cannot
be used
>> +// as new-value registers. Arch Spec 5.4.2.2.
>> +// 4. If the instruction that sets a new-value register is
conditional, then
>> +// the instruction that uses the new-value register must also be
conditional,
>> +// and both must always have their predicates evaluate identically.
>> +// Arch Spec 5.4.2.3.
>> +// 5. There is an implied restriction of a packet can not have another
store,
>> +// if there is a new value store in the packet. Corollary, if
there is
>> +// already a store in a packet, there can not be a new value store.
>> +// Arch Spec: 3.4.4.2
>> +bool HexagonPacketizerList::CanPromoteToNewValueStore( MachineInstr
*MI,
>> + MachineInstr *PacketMI, unsigned DepReg,
>> + std::map<MachineInstr*, SUnit*> MIToSUnit)
>> +{
>> + // Make sure we are looking at the store
>> + if (!IsNewifyStore(MI))
>> + return false;
>> +
>> + // Make sure there is dependency and can be new value'ed
>> + if (GetStoreValueOperand(MI).isReg()&&
>> + GetStoreValueOperand(MI).getReg() != DepReg)
>> + return false;
>> +
>> + const HexagonRegisterInfo* QRI = (const HexagonRegisterInfo *)
TM.getRegisterInfo();
>> + const MCInstrDesc& MCID = PacketMI->getDesc();
>> + // first operand is always the result
>> +
>> + const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
>> + const TargetRegisterClass* PacketRC = QII->getRegClass(MCID, 0,
QRI);
>> +
>> + // if there is already an store in the packet, no can do new value
store
>> + // Arch Spec 3.4.4.2.
>> + for (std::vector<MachineInstr*>::iterator VI =
CurrentPacketMIs.begin(),
>> + VE = CurrentPacketMIs.end();
>> + (VI != VE); ++VI) {
>> + SUnit* PacketSU = MIToSUnit[*VI];
>> + if (PacketSU->getInstr()->getDesc().mayStore() ||
>> + // if we have mayStore = 1 set on ALLOCFRAME and DEALLOCFRAME,
>> + // then we don't need this
>> + PacketSU->getInstr()->getOpcode() == Hexagon::ALLOCFRAME
||
>> + PacketSU->getInstr()->getOpcode() ==
Hexagon::DEALLOCFRAME)
>> + return false;
>> + }
>> +
>> + if (PacketRC == Hexagon::DoubleRegsRegisterClass) {
>> + // new value store constraint: double regs can not feed into new
value store
>> + // arch spec section: 5.4.2.2
>> + return false;
>> + }
>> +
>> + // Make sure it's NOT the post increment register that we are
going to
>> + // new value.
>> + if (QII->isPostIncrement(MI)&&
>> + MI->getDesc().mayStore()&&
>> + GetPostIncrementOperand(MI, QII).getReg() == DepReg) {
>> + return false;
>> + }
>> +
>> + if (QII->isPostIncrement(PacketMI)&&
>> + PacketMI->getDesc().mayLoad()&&
>> + GetPostIncrementOperand(PacketMI, QII).getReg() == DepReg) {
>> + // if source is post_inc, or absolute-set addressing,
>> + // it can not feed into new value store
>> + // r3 = memw(r2++#4)
>> + // memw(r30 + #-1404) = r2.new -> can not be new value store
>> + // arch spec section: 5.4.2.1
>> + return false;
>> + }
>> +
>> + // If the source that feeds the store is predicated, new value store
must also be
>> + // also predicated.
>> + if (QII->isPredicated(PacketMI)) {
>> + if (!QII->isPredicated(MI))
>> + return false;
>> +
>> + // Check to make sure that they both will have their predicates
>> + // evaluate identically
>> + unsigned predRegNumSrc;
>> + unsigned predRegNumDst;
>> + const TargetRegisterClass* predRegClass;
>> +
>> + // Get predicate register used in the source instruction
>> + for(unsigned opNum = 0; opNum< PacketMI->getNumOperands();
opNum++) {
>> + if ( PacketMI->getOperand(opNum).isReg())
>> + predRegNumSrc = PacketMI->getOperand(opNum).getReg();
>> + predRegClass = QRI->getMinimalPhysRegClass(predRegNumSrc);
>> + if (predRegClass == Hexagon::PredRegsRegisterClass) {
>> + break;
>> + }
>> + }
>> + assert ((predRegClass == Hexagon::PredRegsRegisterClass
)&&
>> + ("predicate register not found in a predicated PacketMI
instruction"));
>> +
>> + // Get predicate register used in new-value store instruction
>> + for(unsigned opNum = 0; opNum< MI->getNumOperands();
opNum++) {
>> + if ( MI->getOperand(opNum).isReg())
>> + predRegNumDst = MI->getOperand(opNum).getReg();
>> + predRegClass = QRI->getMinimalPhysRegClass(predRegNumDst);
>> + if (predRegClass == Hexagon::PredRegsRegisterClass) {
>> + break;
>> + }
>> + }
>> + assert ((predRegClass == Hexagon::PredRegsRegisterClass
)&&
>> + ("predicate register not found in a predicated MI
instruction"));
>> +
>> + // New-value register producer and user (store) need to satisfy
these
>> + // constraints:
>> + // 1) Both instructions should be predicated on the same register.
>> + // 2) If producer of the new-value register is .new predicated
then store
>> + // should also be .new predicated and if producer is not .new
predicated
>> + // then store should not be .new predicated.
>> + // 3) Both new-value register producer and user should have same
predicate
>> + // sense, i.e, either both should be negated or both should be
none negated.
>> +
>> + if (( predRegNumDst != predRegNumSrc) ||
>> + isDotNewInst(PacketMI) != isDotNewInst(MI) ||
>> + GetPredicateSense(MI, QII) != GetPredicateSense(PacketMI,
QII)) {
>> + return false;
>> + }
>> + }
>> +
>> + // Make sure that other than the new-value register no other store
instruction
>> + // register has been modified in the same packet. Predicate
registers can be
>> + // modified by they should not be modified between the producer and
the store
>> + // instruction as it will make them both conditional on different
values.
>> + // We already know this to be true for all the instructions before
and
>> + // including PacketMI. Howerver, we need to perform the check for
the
>> + // remaining instructions in the packet.
>> +
>> + std::vector<MachineInstr*>::iterator VI;
>> + std::vector<MachineInstr*>::iterator VE;
>> + unsigned StartCheck = 0;
>> +
>> + for (VI=CurrentPacketMIs.begin(), VE = CurrentPacketMIs.end();
>> + (VI != VE); ++VI) {
>> + SUnit* TempSU = MIToSUnit[*VI];
>> + MachineInstr* TempMI = TempSU->getInstr();
>> +
>> + // Following condition is true for all the instructions until
PacketMI is
>> + // reached (StartCheck is set to 0 before the for loop).
>> + // StartCheck flag is 1 for all the instructions after PacketMI.
>> + if (TempMI != PacketMI&& !StartCheck) // start processing
only after
>> + continue; // encountering PacketMI
>> +
>> + StartCheck = 1;
>> + if (TempMI == PacketMI) // We don't want to check PacketMI for
dependence
>> + continue;
>> +
>> + for(unsigned opNum = 0; opNum< MI->getNumOperands();
opNum++) {
>> + if (MI->getOperand(opNum).isReg()&&
>> +
TempSU->getInstr()->modifiesRegister(MI->getOperand(opNum).getReg(),
QRI))
>> + return false;
>> + }
>> + }
>> +
>> + // Make sure that for non POST_INC stores:
>> + // 1. The only use of reg is DepReg and no other registers.
>> + // This handles V4 base+index registers.
>> + // The following store can not be dot new.
>> + // Eg. r0 = add(r0, #3)a
>> + // memw(r1+r0<<#2) = r0
>> + if (!QII->isPostIncrement(MI)&&
>> + GetStoreValueOperand(MI).isReg()&&
>> + GetStoreValueOperand(MI).getReg() == DepReg) {
>> + for(unsigned opNum = 0; opNum< MI->getNumOperands()-1;
opNum++) {
>> + if (MI->getOperand(opNum).isReg()&&
>> + MI->getOperand(opNum).getReg() == DepReg) {
>> + return false;
>> + }
>> + }
>> + // 2. If data definition is because of implicit definition of the
register,
>> + // do not newify the store. Eg.
>> + // %R9<def> = ZXTH %R12, %D6<imp-use>,
%R12<imp-def>
>> + // STrih_indexed %R8, 2, %R12<kill>; mem:ST2[%scevgep343]
>> + for(unsigned opNum = 0; opNum< PacketMI->getNumOperands();
opNum++) {
>> + if (PacketMI->getOperand(opNum).isReg()&&
>> + PacketMI->getOperand(opNum).getReg() == DepReg&&
>> + PacketMI->getOperand(opNum).isDef()&&
>> + PacketMI->getOperand(opNum).isImplicit()) {
>> + return false;
>> + }
>> + }
>> + }
>> +
>> + // Can be dot new store.
>> + return true;
>> +}
>> +
>> +// can this MI to promoted to either
>> +// new value store or new value jump
>> +bool HexagonPacketizerList::CanPromoteToNewValue( MachineInstr *MI,
>> + SUnit *PacketSU, unsigned DepReg,
>> + std::map<MachineInstr*, SUnit*> MIToSUnit,
>> + MachineBasicBlock::iterator&MII)
>> +{
>> +
>> + const HexagonRegisterInfo* QRI = (const HexagonRegisterInfo *)
TM.getRegisterInfo();
>> + if (!QRI->Subtarget.hasV4TOps() ||
>> + !IsNewifyStore(MI))
>> + return false;
>> +
>> + MachineInstr *PacketMI = PacketSU->getInstr();
>> +
>> + // Check to see the store can be new value'ed.
>> + if (CanPromoteToNewValueStore(MI, PacketMI, DepReg, MIToSUnit))
>> + return true;
>> +
>> + // Check to see the compare/jump can be new value'ed.
>> + // This is done as a pass on its own. Don't need to check it
here.
>> + return false;
>> +}
>> +
>> +// Check to see if an instruction can be dot new
>> +// There are three kinds.
>> +// 1. dot new on predicate - V2/V3/V4
>> +// 2. dot new on stores NV/ST - V4
>> +// 3. dot new on jump NV/J - V4 -- This is generated in a pass.
>> +bool HexagonPacketizerList::CanPromoteToDotNew( MachineInstr *MI,
>> + SUnit *PacketSU, unsigned DepReg,
>> + std::map<MachineInstr*, SUnit*>
MIToSUnit,
>> + MachineBasicBlock::iterator&MII,
>> + const TargetRegisterClass* RC )
>> +{
>> + // already a dot new instruction
>> + if (isDotNewInst(MI)&& !IsNewifyStore(MI))
>> + return false;
>> +
>> + if (!isNewifiable(MI))
>> + return false;
>> +
>> + // predicate .new
>> + if (RC == Hexagon::PredRegsRegisterClass&& isCondInst(MI))
>> + return true;
>> + else if (RC != Hexagon::PredRegsRegisterClass&&
>> + !IsNewifyStore(MI)) // MI is not a new-value store
>> + return false;
>> + else {
>> + // Create a dot new machine instruction to see if resources can be
>> + // allocated. If not, bail out now.
>> + const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
>> + int NewOpcode = GetDotNewOp(MI->getOpcode());
>> + const MCInstrDesc&desc = QII->get(NewOpcode);
>> + DebugLoc dl;
>> + MachineInstr *NewMI =
MI->getParent()->getParent()->CreateMachineInstr(desc, dl);
>> + bool ResourcesAvailable =
ResourceTracker->canReserveResources(NewMI);
>> + MI->getParent()->getParent()->DeleteMachineInstr(NewMI);
>> +
>> + if (!ResourcesAvailable)
>> + return false;
>> +
>> + // new value store only
>> + // new new value jump generated as a passes
>> + if (!CanPromoteToNewValue(MI, PacketSU, DepReg, MIToSUnit, MII)) {
>> + return false;
>> + }
>> + }
>> + return true;
>> +}
>> +
>> +// Go through the packet instructions and search for anti dependency
>> +// between them and DepReg from MI
>> +// Consider this case:
>> +// Trying to add
>> +// a) %R1<def> = TFRI_cdNotPt %P3, 2
>> +// to this packet:
>> +// {
>> +// b) %P0<def> = OR_pp %P3<kill>, %P0<kill>
>> +// c) %P3<def> = TFR_PdRs %R23
>> +// d) %R1<def> = TFRI_cdnPt %P3, 4
>> +// }
>> +// The P3 from a) and d) will be complements after
>> +// a)'s P3 is converted to .new form
>> +// Anti Dep between c) and b) is irrelevant for this case
>> +bool HexagonPacketizerList::RestrictingDepExistInPacket (MachineInstr*
MI,
>> + unsigned DepReg,
>> + std::map<MachineInstr*, SUnit*> MIToSUnit) {
>> +
>> + const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
>> + SUnit* PacketSUDep = MIToSUnit[MI];
>> +
>> + for (std::vector<MachineInstr*>::iterator VIN =
CurrentPacketMIs.begin(),
>> + VEN = CurrentPacketMIs.end(); (VIN != VEN); ++VIN) {
>> +
>> + // We only care for dependencies to predicated instructions
>> + if(!QII->isPredicated(*VIN)) continue;
>> +
>> + // Scheduling Unit for current insn in the packet
>> + SUnit* PacketSU = MIToSUnit[*VIN];
>> +
>> + // Look at dependencies between current members of the packet
>> + // and predicate defining instruction MI.
>> + // Make sure that dependency is on the exact register
>> + // we care about.
>> + if (PacketSU->isSucc(PacketSUDep)) {
>> + for (unsigned i = 0; i< PacketSU->Succs.size(); ++i) {
>> + if ((PacketSU->Succs[i].getSUnit() ==
PacketSUDep)&&
>> + (PacketSU->Succs[i].getKind() == SDep::Anti)&&
>> + (PacketSU->Succs[i].getReg() == DepReg)) {
>> + return true;
>> + }
>> + }
>> + }
>> + }
>> +
>> + return false;
>> +}
>> +
>> +
>> +// Given two predicated instructions, this function detects whether
>> +// the predicates are complements
>> +bool HexagonPacketizerList::ArePredicatesComplements (MachineInstr*
MI1,
>> + MachineInstr* MI2, std::map<MachineInstr*, SUnit*>
MIToSUnit) {
>> +
>> + const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
>> + // Currently can only reason about conditional transfers
>> + if (!QII->isConditionalTransfer(MI1) ||
!QII->isConditionalTransfer(MI2)) {
>> + return false;
>> + }
>> +
>> + // Scheduling unit for candidate
>> + SUnit* SU = MIToSUnit[MI1];
>> +
>> + // One corner case deals with the following scenario:
>> + // Trying to add
>> + // a) %R24<def> = TFR_cPt %P0, %R25
>> + // to this packet:
>> + //
>> + // {
>> + // b) %R25<def> = TFR_cNotPt %P0, %R24
>> + // c) %P0<def> = CMPEQri %R26, 1
>> + // }
>> + //
>> + // On general check a) and b) are complements, but
>> + // presence of c) will convert a) to .new form, and
>> + // then it is not a complement
>> + // We attempt to detect it by analyzing existing
>> + // dependencies in the packet
>> +
>> + // Analyze relationships between all existing members of the packet.
>> + // Look for Anti dependecy on the same predicate reg
>> + // as used in the candidate
>> + for (std::vector<MachineInstr*>::iterator VIN =
CurrentPacketMIs.begin(),
>> + VEN = CurrentPacketMIs.end(); (VIN != VEN); ++VIN) {
>> +
>> + // Scheduling Unit for current insn in the packet
>> + SUnit* PacketSU = MIToSUnit[*VIN];
>> +
>> + // If this instruction in the packet is succeeded by the
candidate...
>> + if (PacketSU->isSucc(SU)) {
>> + for (unsigned i = 0; i< PacketSU->Succs.size(); ++i) {
>> + // The corner case exist when there is true data
>> + // dependency between candidate and one of current
>> + // packet members, this dep is on predicate reg, and
>> + // there already exist anti dep on the same pred in
>> + // the packet.
>> + if (PacketSU->Succs[i].getSUnit() == SU&&
>> + Hexagon::PredRegsRegisterClass->contains(
>> + PacketSU->Succs[i].getReg())&&
>> + PacketSU->Succs[i].getKind() == SDep::Data&&
>> + // Here I know that *VIN is predicate setting instruction
>> + // with true data dep to candidate on the register
>> + // we care about - c) in the above example.
>> + // Now I need to see if there is an anti dependency
>> + // from c) to any other instruction in the
>> + // same packet on the pred reg of interest
>> +
RestrictingDepExistInPacket(*VIN,PacketSU->Succs[i].getReg(),
>> + MIToSUnit)) {
>> + return false;
>> + }
>> + }
>> + }
>> + }
>> +
>> + // If the above case does not apply, check regular
>> + // complement condition.
>> + // Check that the predicate register is the same and
>> + // that the predicate sense is different
>> + // We also need to differentiate .old vs. .new:
>> + // !p0 is not complimentary to p0.new
>> + return ((MI1->getOperand(1).getReg() ==
MI2->getOperand(1).getReg())&&
>> + (GetPredicateSense(MI1, QII) != GetPredicateSense(MI2,
QII))&&
>> + (isDotNewInst(MI1) == isDotNewInst(MI2)));
>> +}
>> +
>> +// initPacketizerState - Initialize packetizer flags
>> +void HexagonPacketizerList::initPacketizerState(void) {
>> +
>> + Dependence = false;
>> + PromotedToDotNew = false;
>> + GlueToNewValueJump = false;
>> + GlueAllocframeStore = false;
>> + FoundSequentialDependence = false;
>> +
>> + return;
>> +}
>> +
>> +// ignorePseudoInstruction - Ignore bundling of pseudo instructions.
>> +bool HexagonPacketizerList::ignorePseudoInstruction(MachineInstr *MI,
>> + MachineBasicBlock
*MBB) {
>> + if (MI->isDebugValue())
>> + return true;
>> +
>> + // We must print out inline assembly
>> + if (MI->isInlineAsm())
>> + return false;
>> +
>> + // We check if MI has any functional units mapped to it.
>> + // If it doesn't, we ignore the instruction.
>> + const MCInstrDesc& TID = MI->getDesc();
>> + unsigned SchedClass = TID.getSchedClass();
>> + const InstrStage* IS =
ResourceTracker->getInstrItins()->beginStage(SchedClass);
>> + unsigned FuncUnits = IS->getUnits();
>> + return !FuncUnits;
>> +}
>> +
>> +// isSoloInstruction: - Returns true for instructions that must be
>> +// scheduled in their own packet.
>> +bool HexagonPacketizerList::isSoloInstruction(MachineInstr *MI) {
>> +
>> + if (MI->isInlineAsm())
>> + return true;
>> +
>> + if (MI->isEHLabel())
>> + return true;
>> +
>> + // From Hexagon V4 Programmer's Reference Manual 3.4.4 Grouping
constraints:
>> + // trap, pause, barrier, icinva, isync, and syncht are solo
instructions.
>> + // They must not be grouped with other instructions in a packet.
>> + if (IsSchedBarrier(MI))
>> + return true;
>> +
>> + return false;
>> +}
>> +
>> +// isLegalToPacketizeTogether:
>> +// SUI is the current instruction that is out side of the current
packet.
>> +// SUJ is the current instruction inside the current packet against
which that
>> +// SUI will be packetized.
>> +bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI,
SUnit *SUJ) {
>> + MachineInstr *I = SUI->getInstr();
>> + MachineInstr *J = SUJ->getInstr();
>> + assert(I&& J&& "Unable to packetize null
instruction!");
>> +
>> + const MCInstrDesc&MCIDI = I->getDesc();
>> + const MCInstrDesc&MCIDJ = J->getDesc();
>> +
>> + MachineBasicBlock::iterator II = I;
>> +
>> + const unsigned FrameSize = MF.getFrameInfo()->getStackSize();
>> + const HexagonRegisterInfo* QRI = (const HexagonRegisterInfo *)
TM.getRegisterInfo();
>> + const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
>> +
>> + // Inline asm cannot go in the packet.
>> + if (I->getOpcode() == Hexagon::INLINEASM)
>> + llvm_unreachable("Should not meet inline asm here!");
>> +
>> + if (isSoloInstruction(I))
>> + llvm_unreachable("Should not meet solo instr here!");
>> +
>> + // A save callee-save register function call can only be in a packet
>> + // with instructions that don't write to the callee-save
registers.
>> + if ((QII->isSaveCalleeSavedRegsCall(I)&&
>> + DoesModifyCalleeSavedReg(J, QRI)) ||
>> + (QII->isSaveCalleeSavedRegsCall(J)&&
>> + DoesModifyCalleeSavedReg(I, QRI))) {
>> + Dependence = true;
>> + return false;
>> + }
>> +
>> + // Two control flow instructions cannot go in the same packet.
>> + if (IsControlFlow(I)&& IsControlFlow(J)) {
>> + Dependence = true;
>> + return false;
>> + }
>> +
>> + // A LoopN instruction cannot appear in the same packet as a jump or
call.
>> + if (IsLoopN(I)&& ( IsDirectJump(J)
>> + || MCIDJ.isCall()
>> + || QII->isDeallocRet(J))) {
>> + Dependence = true;
>> + return false;
>> + }
>> + if (IsLoopN(J)&& ( IsDirectJump(I)
>> + || MCIDI.isCall()
>> + || QII->isDeallocRet(I))) {
>> + Dependence = true;
>> + return false;
>> + }
>> +
>> + // dealloc_return cannot appear in the same packet as a conditional
or
>> + // unconditional jump.
>> + if (QII->isDeallocRet(I)&& ( MCIDJ.isBranch()
>> + || MCIDJ.isCall()
>> + || MCIDJ.isBarrier())) {
>> + Dependence = true;
>> + return false;
>> + }
>> +
>> +
>> + // V4 allows dual store. But does not allow second store, if the
>> + // first store is not in SLOT0. New value store, new value jump,
>> + // dealloc_return and memop always take SLOT0.
>> + // Arch spec 3.4.4.2
>> + if (QRI->Subtarget.hasV4TOps()) {
>> +
>> + if (MCIDI.mayStore()&& MCIDJ.mayStore()&&
isNewValueInst(J)) {
>> + Dependence = true;
>> + return false;
>> + }
>> +
>> + if ( (QII->isMemOp(J)&& MCIDI.mayStore())
>> + || (MCIDJ.mayStore()&& QII->isMemOp(I))
>> + || (QII->isMemOp(J)&& QII->isMemOp(I))) {
>> + Dependence = true;
>> + return false;
>> + }
>> +
>> + //if dealloc_return
>> + if (MCIDJ.mayStore()&& QII->isDeallocRet(I)){
>> + Dependence = true;
>> + return false;
>> + }
>> +
>> + // If an instruction feeds new value jump, glue it.
>> + MachineBasicBlock::iterator NextMII = I;
>> + ++NextMII;
>> + MachineInstr *NextMI = NextMII;
>> +
>> + if (QII->isNewValueJump(NextMI)) {
>> +
>> + bool secondRegMatch = false;
>> + bool maintainNewValueJump = false;
>> +
>> + if (NextMI->getOperand(1).isReg()&&
>> + I->getOperand(0).getReg() ==
NextMI->getOperand(1).getReg()) {
>> + secondRegMatch = true;
>> + maintainNewValueJump = true;
>> + }
>> +
>> + if (!secondRegMatch&&
>> + I->getOperand(0).getReg() ==
NextMI->getOperand(0).getReg()) {
>> + maintainNewValueJump = true;
>> + }
>> +
>> + for (std::vector<MachineInstr*>::iterator
>> + VI = CurrentPacketMIs.begin(),
>> + VE = CurrentPacketMIs.end();
>> + (VI != VE&& maintainNewValueJump); ++VI) {
>> + SUnit* PacketSU = MIToSUnit[*VI];
>> +
>> + // NVJ can not be part of the dual jump - Arch Spec: section
7.8
>> + if (PacketSU->getInstr()->getDesc().isCall()) {
>> + Dependence = true;
>> + break;
>> + }
>> + // Validate
>> + // 1. Packet does not have a store in it.
>> + // 2. If the first operand of the nvj is newified, and the
second
>> + // operand is also a reg, it (second reg) is not defined in
>> + // the same packet.
>> + // 3. If the second operand of the nvj is newified, (which
means
>> + // first operand is also a reg), first reg is not defined
in
>> + // the same packet.
>> + if (PacketSU->getInstr()->getDesc().mayStore()
||
>> + PacketSU->getInstr()->getOpcode() ==
Hexagon::ALLOCFRAME ||
>> + // Check #2.
>> + (!secondRegMatch&&
NextMI->getOperand(1).isReg()&&
>> + PacketSU->getInstr()->modifiesRegister(
>> + NextMI->getOperand(1).getReg(),
QRI)) ||
>> + // Check #3.
>> + (secondRegMatch&&
>> + PacketSU->getInstr()->modifiesRegister(
>> + NextMI->getOperand(0).getReg(),
QRI))) {
>> + Dependence = true;
>> + break;
>> + }
>> + }
>> + if (!Dependence)
>> + GlueToNewValueJump = true;
>> + else
>> + return false;
>> + }
>> + }
>> +
>> + if (SUJ->isSucc(SUI)) {
>> + for (unsigned i = 0;
>> + (i< SUJ->Succs.size())&&
!FoundSequentialDependence;
>> + ++i) {
>> +
>> + if (SUJ->Succs[i].getSUnit() != SUI) {
>> + continue;
>> + }
>> +
>> + SDep::Kind DepType = SUJ->Succs[i].getKind();
>> +
>> + // For direct calls:
>> + // Ignore register dependences for call instructions for
>> + // packetization purposes except for those due to r31 and
>> + // predicate registers.
>> + //
>> + // For indirect calls:
>> + // Same as direct calls + check for true dependences to the
register
>> + // used in the indirect call.
>> + //
>> + // We completely ignore Order dependences for call instructions
>> + //
>> + // For returns:
>> + // Ignore register dependences for return instructions like
jumpr,
>> + // dealloc return unless we have dependencies on the explicit
uses
>> + // of the registers used by jumpr (like r31) or dealloc return
>> + // (like r29 or r30).
>> + //
>> + // TODO: Currently, jumpr is handling only return of r31. So,
the
>> + // following logic (specificaly IsCallDependent) is working
fine.
>> + // We need to enable jumpr for register other than r31 and then,
>> + // we need to rework the last part, where it handles indirect
call
>> + // of that (IsCallDependent) function. Bug 6216 is opened for
this.
>> + //
>> + unsigned DepReg = 0;
>> + const TargetRegisterClass* RC = NULL;
>> + if (DepType == SDep::Data) {
>> + DepReg = SUJ->Succs[i].getReg();
>> + RC = QRI->getMinimalPhysRegClass(DepReg);
>> + }
>> + if ((MCIDI.isCall() || MCIDI.isReturn())&&
>> + (!IsRegDependence(DepType) ||
>> + !IsCallDependent(I, DepType, SUJ->Succs[i].getReg())))
{
>> + /* do nothing */
>> + }
>> +
>> + // For instructions that can be promoted to dot-new, try to
promote.
>> + else if ((DepType == SDep::Data)&&
>> + CanPromoteToDotNew(I, SUJ, DepReg, MIToSUnit, II,
RC)&&
>> + PromoteToDotNew(I, DepType, II, RC)) {
>> + PromotedToDotNew = true;
>> + /* do nothing */
>> + }
>> +
>> + else if ((DepType == SDep::Data)&&
>> + (QII->isNewValueJump(I))) {
>> + /* do nothing */
>> + }
>> +
>> + // For predicated instructions, if the predicates are
complements
>> + // then there can be no dependence.
>> + else if (QII->isPredicated(I)&&
>> + QII->isPredicated(J)&&
>> + ArePredicatesComplements(I, J, MIToSUnit)) {
>> + /* do nothing */
>> +
>> + }
>> + else if (IsDirectJump(I)&&
>> + !MCIDJ.isBranch()&&
>> + !MCIDJ.isCall()&&
>> + (DepType == SDep::Order)) {
>> + // Ignore Order dependences between unconditional direct
branches
>> + // and non-control-flow instructions
>> + /* do nothing */
>> + }
>> + else if (MCIDI.isConditionalBranch()&& (DepType !=
SDep::Data)&&
>> + (DepType != SDep::Output)) {
>> + // Ignore all dependences for jumps except for true and output
>> + // dependences
>> + /* do nothing */
>> + }
>> +
>> + // Ignore output dependences due to superregs. We can
>> + // write to two different subregisters of R1:0 for instance
>> + // in the same cycle
>> + //
>> +
>> + //
>> + // Let the
>> + // If neither I nor J defines DepReg, then this is a
>> + // superfluous output dependence. The dependence must be of the
>> + // form:
>> + // R0 = ...
>> + // R1 = ...
>> + // and there is an output dependence between the two
instructions
>> + // with
>> + // DepReg = D0
>> + // We want to ignore these dependences.
>> + // Ideally, the dependence constructor should annotate such
>> + // dependences. We can then avoid this relatively expensive
check.
>> + //
>> + else if (DepType == SDep::Output) {
>> + // DepReg is the register that's responsible for the
dependence.
>> + unsigned DepReg = SUJ->Succs[i].getReg();
>> +
>> + // Check if I and J really defines DepReg.
>> + if (I->definesRegister(DepReg) ||
>> + J->definesRegister(DepReg)) {
>> + FoundSequentialDependence = true;
>> + break;
>> + }
>> + }
>> +
>> + // We ignore Order dependences for
>> + // 1. Two loads unless they are volatile.
>> + // 2. Two stores in V4 unless they are volatile.
>> + else if ((DepType == SDep::Order)&&
>> + !I->hasVolatileMemoryRef()&&
>> + !J->hasVolatileMemoryRef()) {
>> + if (QRI->Subtarget.hasV4TOps()&&
>> + // hexagonv4 allows dual store.
>> + MCIDI.mayStore()&& MCIDJ.mayStore()) {
>> + /* do nothing */
>> + }
>> + // store followed by store-- not OK on V2
>> + // store followed by load -- not OK on all (OK if addresses
>> + // are not aliased)
>> + // load followed by store -- OK on all
>> + // load followed by load -- OK on all
>> + else if ( !MCIDJ.mayStore()) {
>> + /* do nothing */
>> + }
>> + else {
>> + FoundSequentialDependence = true;
>> + break;
>> + }
>> + }
>> +
>> + // For V4, special case ALLOCFRAME. Even though there is
dependency
>> + // between ALLOCAFRAME and subsequent store, allow it to be
>> + // packetized in a same packet. This implies that the store is
using
>> + // caller's SP. Hense, offset needs to be updated
accordingly.
>> + else if (DepType == SDep::Data
>> +&& QRI->Subtarget.hasV4TOps()
>> +&& J->getOpcode() == Hexagon::ALLOCFRAME
>> +&& (I->getOpcode() == Hexagon::STrid
>> + || I->getOpcode() == Hexagon::STriw
>> + || I->getOpcode() == Hexagon::STrib)
>> +&& I->getOperand(0).getReg() == QRI->getStackRegister()
>> +&& QII->isValidOffset(I->getOpcode(),
>> + I->getOperand(1).getImm() -
>> + (FrameSize + HEXAGON_LRFP_SIZE)))
>> + {
>> + GlueAllocframeStore = true;
>> + // Since this store is to be glued with allocframe in the same
>> + // packet, it will use SP of the previous stack frame, i.e
>> + // caller's SP. Therefore, we need to recalculate offset
according
>> + // to this change.
>> + I->getOperand(1).setImm(I->getOperand(1).getImm() -
>> + (FrameSize +
HEXAGON_LRFP_SIZE));
>> + }
>> +
>> + //
>> + // Skip over anti-dependences. Two instructions that are
>> + // anti-dependent can share a packet
>> + //
>> + else if (DepType != SDep::Anti) {
>> + FoundSequentialDependence = true;
>> + break;
>> + }
>> + }
>> +
>> + if (FoundSequentialDependence) {
>> + Dependence = true;
>> + return false;
>> + }
>> + }
>> +
>> + return true;
>> +}
>> +
>> +// isLegalToPruneDependencies
>> +bool HexagonPacketizerList::isLegalToPruneDependencies(SUnit *SUI,
SUnit *SUJ) {
>> + MachineInstr *I = SUI->getInstr();
>> + MachineInstr *J = SUJ->getInstr();
>> + assert(I&& J&& "Unable to packetize null
instruction!");
>> +
>> + const unsigned FrameSize = MF.getFrameInfo()->getStackSize();
>> +
>> + if (Dependence) {
>> +
>> + // Check if the instruction was promoted to a dot-new. If so,
demote it
>> + // back into a dot-old.
>> + if (PromotedToDotNew) {
>> + DemoteToDotOld(I);
>> + }
>> +
>> + // Check if the instruction (must be a store) was glued with an
Allocframe
>> + // instruction. If so, restore its offset to its original value,
i.e. use
>> + // curent SP instead of caller's SP.
>> + if (GlueAllocframeStore) {
>> + I->getOperand(1).setImm(I->getOperand(1).getImm() +
>> + FrameSize +
HEXAGON_LRFP_SIZE);
>> + }
>> +
>> + return false;
>> + }
>> + return true;
>> +}
>> +
>> +MachineBasicBlock::iterator
HexagonPacketizerList::addToPacket(MachineInstr *MI) {
>> +
>> + MachineBasicBlock::iterator MII = MI;
>> + MachineBasicBlock *MBB = MI->getParent();
>> +
>> + const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
>> +
>> + if (GlueToNewValueJump) {
>> +
>> + ++MII;
>> + MachineInstr *nvjMI = MII;
>> + assert(ResourceTracker->canReserveResources(MI));
>> + ResourceTracker->reserveResources(MI);
>> + if (QII->isExtended(MI)&&
>> + !tryAllocateResourcesForConstExt(MI)) {
>> + endPacket(MBB, MI);
>> + ResourceTracker->reserveResources(MI);
>> + assert(canReserveResourcesForConstExt(MI)&&
>> + "Ensure that there is a slot");
>> + reserveResourcesForConstExt(MI);
>> + // Reserve resources for new value jump constant extender.
>> + assert(canReserveResourcesForConstExt(MI)&&
>> + "Ensure that there is a slot");
>> + reserveResourcesForConstExt(nvjMI);
>> +
assert(ResourceTracker->canReserveResources(nvjMI)&&
>> + "Ensure that there is a slot");
>> +
>> + } else if ( // Extended instruction takes two slots in the
packet.
>> + // Try reserve and allocate 4-byte in the current packet
first.
>> + (QII->isExtended(nvjMI)
>> +&& (!tryAllocateResourcesForConstExt(nvjMI)
>> + || !ResourceTracker->canReserveResources(nvjMI)))
>> + || // For non-extended instruction, no need to allocate extra
4 bytes.
>> + (!QII->isExtended(nvjMI)&&
!ResourceTracker->canReserveResources(nvjMI)))
>> + {
>> + endPacket(MBB, MI);
>> + // A new and empty packet starts.
>> + // We are sure that the resources requirements can be
satisfied.
>> + // Therefore, do not need to call
"canReserveResources" anymore.
>> + ResourceTracker->reserveResources(MI);
>> + if (QII->isExtended(nvjMI))
>> + reserveResourcesForConstExt(nvjMI);
>> + }
>> + // Here, we are sure that "reserveResources" would
succeed.
>> + ResourceTracker->reserveResources(nvjMI);
>> + CurrentPacketMIs.push_back(MI);
>> + CurrentPacketMIs.push_back(nvjMI);
>> + } else {
>> + if ( QII->isExtended(MI)
>> +&& ( !tryAllocateResourcesForConstExt(MI)
>> + || !ResourceTracker->canReserveResources(MI)))
>> + {
>> + endPacket(MBB, MI);
>> + // Check if the instruction was promoted to a dot-new. If so,
demote it
>> + // back into a dot-old
>> + if (PromotedToDotNew) {
>> + DemoteToDotOld(MI);
>> + }
>> + reserveResourcesForConstExt(MI);
>> + }
>> + // In case that "MI" is not an extended insn,
>> + // the resource availability has already been checked.
>> + ResourceTracker->reserveResources(MI);
>> + CurrentPacketMIs.push_back(MI);
>> + }
>> + return MII;
>> +}
>> +
>>
+//===----------------------------------------------------------------------===//
>> +// Public Constructor Functions
>>
+//===----------------------------------------------------------------------===//
>> +
>> +FunctionPass *llvm::createHexagonPacketizer() {
>> + return new HexagonPacketizer();
>> +}
>> +
>> diff --git a/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp
b/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp
>> index ef36881..e21e014 100644
>> --- a/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp
>> +++ b/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp
>> @@ -15,6 +15,7 @@
>> #include "Hexagon.h"
>> #include "HexagonAsmPrinter.h"
>> #include "HexagonInstPrinter.h"
>> +#include "HexagonMCInst.h"
>> #include "llvm/MC/MCInst.h"
>> #include "llvm/MC/MCAsmInfo.h"
>> #include "llvm/MC/MCExpr.h"
>> @@ -37,20 +38,50 @@ StringRef HexagonInstPrinter::getRegName(unsigned
RegNo) const {
>>
>> void HexagonInstPrinter::printInst(const MCInst *MI,
raw_ostream&O,
>> StringRef Annot) {
>> + printInst((const HexagonMCInst*)(MI), O, Annot);
>> +}
>> +
>> +void HexagonInstPrinter::printInst(const HexagonMCInst *MI,
raw_ostream&O,
>> + StringRef Annot) {
>> const char packetPadding[] = " ";
>> const char startPacket = '{',
>> endPacket = '}';
>> // TODO: add outer HW loop when it's supported too.
>> if (MI->getOpcode() == Hexagon::ENDLOOP0) {
>> - MCInst Nop;
>> + // Ending a harware loop is different from ending an regular
packet.
>> + assert(MI->isEndPacket()&& "Loop end must also
end the packet");
>> +
>> + if (MI->isStartPacket()) {
>> + // There must be a packet to end a loop.
>> + // FIXME: when shuffling is always run, this shouldn't be
needed.
>> + HexagonMCInst Nop;
>> + StringRef NoAnnot;
>>
>> - O<< packetPadding<< startPacket<<
'\n';
>> - Nop.setOpcode(Hexagon::NOP);
>> - printInstruction(&Nop, O);
>> - O<< packetPadding<< endPacket;
>> + Nop.setOpcode (Hexagon::NOP);
>> + Nop.setStartPacket (MI->isStartPacket());
>> + printInst (&Nop, O, NoAnnot);
>> + }
>> +
>> + // Close the packet.
>> + if (MI->isEndPacket())
>> + O<< packetPadding<< endPacket;
>> +
>> + printInstruction(MI, O);
>> + }
>> + else {
>> + // Prefix the insn opening the packet.
>> + if (MI->isStartPacket())
>> + O<< packetPadding<< startPacket<<
'\n';
>> +
>> + printInstruction(MI, O);
>> +
>> + // Suffix the insn closing the packet.
>> + if (MI->isEndPacket())
>> + // Suffix the packet in a new line always, since the GNU
assembler has
>> + // issues with a closing brace on the same line as CONST{32,64}.
>> + O<< '\n'<< packetPadding<<
endPacket;
>> }
>>
>> - printInstruction(MI, O);
>> printAnnotation(O, Annot);
>> }
>>
>> @@ -65,22 +96,22 @@ void HexagonInstPrinter::printOperand(const MCInst
*MI, unsigned OpNo,
>> } else if(MO.isImm()) {
>> printImmOperand(MI, OpNo, O);
>> } else {
>> - assert(false&& "Unknown operand");
>> + llvm_unreachable("Unknown operand");
>> }
>> }
>>
>> -void HexagonInstPrinter::printImmOperand
>> - (const MCInst *MI, unsigned OpNo, raw_ostream&O) const {
>> +void HexagonInstPrinter::printImmOperand(const MCInst *MI, unsigned
OpNo,
>> + raw_ostream&O) const {
>> O<< MI->getOperand(OpNo).getImm();
>> }
>>
>> void HexagonInstPrinter::printExtOperand(const MCInst *MI, unsigned
OpNo,
>> - raw_ostream&O)
const {
>> + raw_ostream&O) const {
>> O<< MI->getOperand(OpNo).getImm();
>> }
>>
>> -void HexagonInstPrinter::printUnsignedImmOperand
>> - (const MCInst *MI, unsigned OpNo, raw_ostream&O) const {
>> +void HexagonInstPrinter::printUnsignedImmOperand(const MCInst *MI,
unsigned OpNo,
>> + raw_ostream&O)
const {
>> O<< MI->getOperand(OpNo).getImm();
>> }
>>
>> @@ -89,13 +120,13 @@ void HexagonInstPrinter::printNegImmOperand(const
MCInst *MI, unsigned OpNo,
>> O<< -MI->getOperand(OpNo).getImm();
>> }
>>
>> -void HexagonInstPrinter::printNOneImmOperand
>> - (const MCInst *MI, unsigned OpNo, raw_ostream&O) const {
>> +void HexagonInstPrinter::printNOneImmOperand(const MCInst *MI,
unsigned OpNo,
>> + raw_ostream&O) const
{
>> O<< -1;
>> }
>>
>> -void HexagonInstPrinter::printMEMriOperand
>> - (const MCInst *MI, unsigned OpNo, raw_ostream&O) const {
>> +void HexagonInstPrinter::printMEMriOperand(const MCInst *MI, unsigned
OpNo,
>> + raw_ostream&O) const {
>> const MCOperand& MO0 = MI->getOperand(OpNo);
>> const MCOperand& MO1 = MI->getOperand(OpNo + 1);
>>
>> @@ -103,8 +134,8 @@ void HexagonInstPrinter::printMEMriOperand
>> O<< " + #"<< MO1.getImm();
>> }
>>
>> -void HexagonInstPrinter::printFrameIndexOperand
>> - (const MCInst *MI, unsigned OpNo, raw_ostream&O) const {
>> +void HexagonInstPrinter::printFrameIndexOperand(const MCInst *MI,
unsigned OpNo,
>> + raw_ostream&O)
const {
>> const MCOperand& MO0 = MI->getOperand(OpNo);
>> const MCOperand& MO1 = MI->getOperand(OpNo + 1);
>>
>> @@ -113,24 +144,21 @@ void HexagonInstPrinter::printFrameIndexOperand
>>
>> void HexagonInstPrinter::printGlobalOperand(const MCInst *MI,
unsigned OpNo,
>> raw_ostream&O) const
{
>> - const MCOperand& MO = MI->getOperand(OpNo);
>> - assert(MO.isExpr()&& "Expecting expression");
>> + assert(MI->getOperand(OpNo).isExpr()&& "Expecting
expression");
>>
>> printOperand(MI, OpNo, O);
>> }
>>
>> void HexagonInstPrinter::printJumpTable(const MCInst *MI, unsigned
OpNo,
>> raw_ostream&O) const {
>> - const MCOperand& MO = MI->getOperand(OpNo);
>> - assert(MO.isExpr()&& "Expecting expression");
>> + assert(MI->getOperand(OpNo).isExpr()&& "Expecting
expression");
>>
>> printOperand(MI, OpNo, O);
>> }
>>
>> void HexagonInstPrinter::printConstantPool(const MCInst *MI, unsigned
OpNo,
>> raw_ostream&O) const {
>> - const MCOperand& MO = MI->getOperand(OpNo);
>> - assert(MO.isExpr()&& "Expecting expression");
>> + assert(MI->getOperand(OpNo).isExpr()&& "Expecting
expression");
>>
>> printOperand(MI, OpNo, O);
>> }
>> diff --git a/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.h
b/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.h
>> index dad4334..3ce7dfc 100644
>> --- a/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.h
>> +++ b/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.h
>> @@ -14,6 +14,7 @@
>> #ifndef HEXAGONINSTPRINTER_H
>> #define HEXAGONINSTPRINTER_H
>>
>> +#include "HexagonMCInst.h"
>> #include "llvm/MC/MCInstPrinter.h"
>>
>> namespace llvm {
>> @@ -25,6 +26,7 @@ namespace llvm {
>> : MCInstPrinter(MAI, MII, MRI) {}
>>
>> virtual void printInst(const MCInst *MI, raw_ostream&O,
StringRef Annot);
>> + void printInst(const HexagonMCInst *MI, raw_ostream&O,
StringRef Annot);
>> virtual StringRef getOpcodeName(unsigned Opcode) const;
>> void printInstruction(const MCInst *MI, raw_ostream&O);
>> StringRef getRegName(unsigned RegNo) const;
>> diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
b/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
>> index ed55c3c..7221e90 100644
>> --- a/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
>> +++ b/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
>> @@ -23,14 +23,41 @@ namespace llvm {
>> /// instruction info tracks.
>> ///
>> namespace HexagonII {
>> -
>> // *** The code below must match HexagonInstrFormat*.td *** //
>>
>> + // Insn types.
>> + // *** Must match HexagonInstrFormat*.td ***
>> + enum Type {
>> + TypePSEUDO = 0,
>> + TypeALU32 = 1,
>> + TypeCR = 2,
>> + TypeJR = 3,
>> + TypeJ = 4,
>> + TypeLD = 5,
>> + TypeST = 6,
>> + TypeSYSTEM = 7,
>> + TypeXTYPE = 8,
>> + TypeMEMOP = 9,
>> + TypeNV = 10,
>> + TypePREFIX = 30, // Such as extenders.
>> + TypeMARKER = 31 // Such as end of a HW loop.
>> + };
>> +
>> +
>> +
>> // MCInstrDesc TSFlags
>> + // *** Must match HexagonInstrFormat*.td ***
>> enum {
>> + // This 5-bit field describes the insn type.
>> + TypePos = 0,
>> + TypeMask = 0x1f,
>> +
>> + // Solo instructions.
>> + SoloPos = 5,
>> + SoloMask = 0x1,
>>
>> // Predicated instructions.
>> - PredicatedPos = 1,
>> + PredicatedPos = 6,
>> PredicatedMask = 0x1
>> };
>>
>> _______________________________________________
>> LLVM Developers mailing list
>> LLVMdev at cs.uiuc.edu http://llvm.cs.uiuc.edu
>> http://lists.cs.uiuc.edu/mailman/listinfo/llvmdev
>
--
Qualcomm Innovation Center, Inc is a member of Code Aurora Forum