Commit 6b044c26094a9f86da7d12945b00a47a5f07cf6d

Authored by Jim Grosbach
1 parent 13d2ba34

ARM VSHR implied destination operand form aliases.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@146192 91177308-0d34-0410-b5e6-96231b3b80d8
... ... @@ -201,21 +201,29 @@ def msr_mask : Operand<i32> {
201 201 // 16 imm6<5:4> = '01', 16 - <imm> is encoded in imm6<3:0>
202 202 // 32 imm6<5> = '1', 32 - <imm> is encoded in imm6<4:0>
203 203 // 64 64 - <imm> is encoded in imm6<5:0>
  204 +def shr_imm8_asm_operand : ImmAsmOperand { let Name = "ShrImm8"; }
204 205 def shr_imm8 : Operand<i32> {
205 206 let EncoderMethod = "getShiftRight8Imm";
206 207 let DecoderMethod = "DecodeShiftRight8Imm";
  208 + let ParserMatchClass = shr_imm8_asm_operand;
207 209 }
  210 +def shr_imm16_asm_operand : ImmAsmOperand { let Name = "ShrImm16"; }
208 211 def shr_imm16 : Operand<i32> {
209 212 let EncoderMethod = "getShiftRight16Imm";
210 213 let DecoderMethod = "DecodeShiftRight16Imm";
  214 + let ParserMatchClass = shr_imm16_asm_operand;
211 215 }
  216 +def shr_imm32_asm_operand : ImmAsmOperand { let Name = "ShrImm32"; }
212 217 def shr_imm32 : Operand<i32> {
213 218 let EncoderMethod = "getShiftRight32Imm";
214 219 let DecoderMethod = "DecodeShiftRight32Imm";
  220 + let ParserMatchClass = shr_imm32_asm_operand;
215 221 }
  222 +def shr_imm64_asm_operand : ImmAsmOperand { let Name = "ShrImm64"; }
216 223 def shr_imm64 : Operand<i32> {
217 224 let EncoderMethod = "getShiftRight64Imm";
218 225 let DecoderMethod = "DecodeShiftRight64Imm";
  226 + let ParserMatchClass = shr_imm64_asm_operand;
219 227 }
220 228  
221 229 //===----------------------------------------------------------------------===//
... ...
... ... @@ -5533,6 +5533,43 @@ def : NEONInstAlias&lt;&quot;vshl${p}.u32 $Vdn, $Vm&quot;,
5533 5533 def : NEONInstAlias<"vshl${p}.u64 $Vdn, $Vm",
5534 5534 (VSHLuv2i64 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
5535 5535  
  5536 +// VSHL (immediate) two-operand aliases.
  5537 +def : NEONInstAlias<"vshr${p}.s8 $Vdn, $imm",
  5538 + (VSHRsv8i8 DPR:$Vdn, DPR:$Vdn, shr_imm8:$imm, pred:$p)>;
  5539 +def : NEONInstAlias<"vshr${p}.s16 $Vdn, $imm",
  5540 + (VSHRsv4i16 DPR:$Vdn, DPR:$Vdn, shr_imm16:$imm, pred:$p)>;
  5541 +def : NEONInstAlias<"vshr${p}.s32 $Vdn, $imm",
  5542 + (VSHRsv2i32 DPR:$Vdn, DPR:$Vdn, shr_imm32:$imm, pred:$p)>;
  5543 +def : NEONInstAlias<"vshr${p}.s64 $Vdn, $imm",
  5544 + (VSHRsv1i64 DPR:$Vdn, DPR:$Vdn, shr_imm64:$imm, pred:$p)>;
  5545 +
  5546 +def : NEONInstAlias<"vshr${p}.s8 $Vdn, $imm",
  5547 + (VSHRsv16i8 QPR:$Vdn, QPR:$Vdn, shr_imm8:$imm, pred:$p)>;
  5548 +def : NEONInstAlias<"vshr${p}.s16 $Vdn, $imm",
  5549 + (VSHRsv8i16 QPR:$Vdn, QPR:$Vdn, shr_imm16:$imm, pred:$p)>;
  5550 +def : NEONInstAlias<"vshr${p}.s32 $Vdn, $imm",
  5551 + (VSHRsv4i32 QPR:$Vdn, QPR:$Vdn, shr_imm32:$imm, pred:$p)>;
  5552 +def : NEONInstAlias<"vshr${p}.s64 $Vdn, $imm",
  5553 + (VSHRsv2i64 QPR:$Vdn, QPR:$Vdn, shr_imm64:$imm, pred:$p)>;
  5554 +
  5555 +def : NEONInstAlias<"vshr${p}.u8 $Vdn, $imm",
  5556 + (VSHRuv8i8 DPR:$Vdn, DPR:$Vdn, shr_imm8:$imm, pred:$p)>;
  5557 +def : NEONInstAlias<"vshr${p}.u16 $Vdn, $imm",
  5558 + (VSHRuv4i16 DPR:$Vdn, DPR:$Vdn, shr_imm16:$imm, pred:$p)>;
  5559 +def : NEONInstAlias<"vshr${p}.u32 $Vdn, $imm",
  5560 + (VSHRuv2i32 DPR:$Vdn, DPR:$Vdn, shr_imm32:$imm, pred:$p)>;
  5561 +def : NEONInstAlias<"vshr${p}.u64 $Vdn, $imm",
  5562 + (VSHRuv1i64 DPR:$Vdn, DPR:$Vdn, shr_imm64:$imm, pred:$p)>;
  5563 +
  5564 +def : NEONInstAlias<"vshr${p}.u8 $Vdn, $imm",
  5565 + (VSHRuv16i8 QPR:$Vdn, QPR:$Vdn, shr_imm8:$imm, pred:$p)>;
  5566 +def : NEONInstAlias<"vshr${p}.u16 $Vdn, $imm",
  5567 + (VSHRuv8i16 QPR:$Vdn, QPR:$Vdn, shr_imm16:$imm, pred:$p)>;
  5568 +def : NEONInstAlias<"vshr${p}.u32 $Vdn, $imm",
  5569 + (VSHRuv4i32 QPR:$Vdn, QPR:$Vdn, shr_imm32:$imm, pred:$p)>;
  5570 +def : NEONInstAlias<"vshr${p}.u64 $Vdn, $imm",
  5571 + (VSHRuv2i64 QPR:$Vdn, QPR:$Vdn, shr_imm64:$imm, pred:$p)>;
  5572 +
5536 5573 // VLD1 single-lane pseudo-instructions. These need special handling for
5537 5574 // the lane index that an InstAlias can't handle, so we use these instead.
5538 5575 defm VLD1LNdAsm : NEONDT8AsmPseudoInst<"vld1${p}", "$list, $addr",
... ...
... ... @@ -643,6 +643,38 @@ public:
643 643 int64_t Value = CE->getValue();
644 644 return Value == 32;
645 645 }
  646 + bool isShrImm8() const {
  647 + if (Kind != k_Immediate)
  648 + return false;
  649 + const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
  650 + if (!CE) return false;
  651 + int64_t Value = CE->getValue();
  652 + return Value > 0 && Value <= 8;
  653 + }
  654 + bool isShrImm16() const {
  655 + if (Kind != k_Immediate)
  656 + return false;
  657 + const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
  658 + if (!CE) return false;
  659 + int64_t Value = CE->getValue();
  660 + return Value > 0 && Value <= 16;
  661 + }
  662 + bool isShrImm32() const {
  663 + if (Kind != k_Immediate)
  664 + return false;
  665 + const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
  666 + if (!CE) return false;
  667 + int64_t Value = CE->getValue();
  668 + return Value > 0 && Value <= 32;
  669 + }
  670 + bool isShrImm64() const {
  671 + if (Kind != k_Immediate)
  672 + return false;
  673 + const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
  674 + if (!CE) return false;
  675 + int64_t Value = CE->getValue();
  676 + return Value > 0 && Value <= 64;
  677 + }
646 678 bool isImm1_7() const {
647 679 if (Kind != k_Immediate)
648 680 return false;
... ...
... ... @@ -70,6 +70,41 @@ _foo:
70 70 @ CHECK: vshr.s32 q8, q8, #31 @ encoding: [0x70,0x00,0xe1,0xf2]
71 71 @ CHECK: vshr.s64 q8, q8, #63 @ encoding: [0xf0,0x00,0xc1,0xf2]
72 72  
  73 +@ implied destination operand variants.
  74 + vshr.u8 d16, #7
  75 + vshr.u16 d16, #15
  76 + vshr.u32 d16, #31
  77 + vshr.u64 d16, #63
  78 + vshr.u8 q8, #7
  79 + vshr.u16 q8, #15
  80 + vshr.u32 q8, #31
  81 + vshr.u64 q8, #63
  82 + vshr.s8 d16, #7
  83 + vshr.s16 d16, #15
  84 + vshr.s32 d16, #31
  85 + vshr.s64 d16, #63
  86 + vshr.s8 q8, #7
  87 + vshr.s16 q8, #15
  88 + vshr.s32 q8, #31
  89 + vshr.s64 q8, #63
  90 +
  91 +@ CHECK: vshr.u8 d16, d16, #7 @ encoding: [0x30,0x00,0xc9,0xf3]
  92 +@ CHECK: vshr.u16 d16, d16, #15 @ encoding: [0x30,0x00,0xd1,0xf3]
  93 +@ CHECK: vshr.u32 d16, d16, #31 @ encoding: [0x30,0x00,0xe1,0xf3]
  94 +@ CHECK: vshr.u64 d16, d16, #63 @ encoding: [0xb0,0x00,0xc1,0xf3]
  95 +@ CHECK: vshr.u8 q8, q8, #7 @ encoding: [0x70,0x00,0xc9,0xf3]
  96 +@ CHECK: vshr.u16 q8, q8, #15 @ encoding: [0x70,0x00,0xd1,0xf3]
  97 +@ CHECK: vshr.u32 q8, q8, #31 @ encoding: [0x70,0x00,0xe1,0xf3]
  98 +@ CHECK: vshr.u64 q8, q8, #63 @ encoding: [0xf0,0x00,0xc1,0xf3]
  99 +@ CHECK: vshr.s8 d16, d16, #7 @ encoding: [0x30,0x00,0xc9,0xf2]
  100 +@ CHECK: vshr.s16 d16, d16, #15 @ encoding: [0x30,0x00,0xd1,0xf2]
  101 +@ CHECK: vshr.s32 d16, d16, #31 @ encoding: [0x30,0x00,0xe1,0xf2]
  102 +@ CHECK: vshr.s64 d16, d16, #63 @ encoding: [0xb0,0x00,0xc1,0xf2]
  103 +@ CHECK: vshr.s8 q8, q8, #7 @ encoding: [0x70,0x00,0xc9,0xf2]
  104 +@ CHECK: vshr.s16 q8, q8, #15 @ encoding: [0x70,0x00,0xd1,0xf2]
  105 +@ CHECK: vshr.s32 q8, q8, #31 @ encoding: [0x70,0x00,0xe1,0xf2]
  106 +@ CHECK: vshr.s64 q8, q8, #63 @ encoding: [0xf0,0x00,0xc1,0xf2]
  107 +
73 108 @ CHECK: vsra.u8 d16, d16, #7 @ encoding: [0x30,0x01,0xc9,0xf3]
74 109 vsra.u8 d16, d16, #7
75 110 @ CHECK: vsra.u16 d16, d16, #15 @ encoding: [0x30,0x01,0xd1,0xf3]
... ...