LLVM 20.0.0git
WebAssemblyISelLowering.cpp
Go to the documentation of this file.
1//=- WebAssemblyISelLowering.cpp - WebAssembly DAG Lowering Implementation -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements the WebAssemblyTargetLowering class.
11///
12//===----------------------------------------------------------------------===//
13
31#include "llvm/IR/Function.h"
32#include "llvm/IR/Intrinsics.h"
33#include "llvm/IR/IntrinsicsWebAssembly.h"
38using namespace llvm;
39
40#define DEBUG_TYPE "wasm-lower"
41
43 const TargetMachine &TM, const WebAssemblySubtarget &STI)
44 : TargetLowering(TM), Subtarget(&STI) {
45 auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32;
46
47 // Booleans always contain 0 or 1.
49 // Except in SIMD vectors
51 // We don't know the microarchitecture here, so just reduce register pressure.
53 // Tell ISel that we have a stack pointer.
55 Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32);
56 // Set up the register classes.
57 addRegisterClass(MVT::i32, &WebAssembly::I32RegClass);
58 addRegisterClass(MVT::i64, &WebAssembly::I64RegClass);
59 addRegisterClass(MVT::f32, &WebAssembly::F32RegClass);
60 addRegisterClass(MVT::f64, &WebAssembly::F64RegClass);
61 if (Subtarget->hasSIMD128()) {
62 addRegisterClass(MVT::v16i8, &WebAssembly::V128RegClass);
63 addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass);
64 addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass);
65 addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass);
66 addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass);
67 addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass);
68 }
69 if (Subtarget->hasFP16()) {
70 addRegisterClass(MVT::v8f16, &WebAssembly::V128RegClass);
71 }
72 if (Subtarget->hasReferenceTypes()) {
73 addRegisterClass(MVT::externref, &WebAssembly::EXTERNREFRegClass);
74 addRegisterClass(MVT::funcref, &WebAssembly::FUNCREFRegClass);
75 if (Subtarget->hasExceptionHandling()) {
76 addRegisterClass(MVT::exnref, &WebAssembly::EXNREFRegClass);
77 }
78 }
79 // Compute derived properties from the register classes.
81
82 // Transform loads and stores to pointers in address space 1 to loads and
83 // stores to WebAssembly global variables, outside linear memory.
84 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64}) {
87 }
88 if (Subtarget->hasSIMD128()) {
89 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
90 MVT::v2f64}) {
93 }
94 }
95 if (Subtarget->hasFP16()) {
98 }
99 if (Subtarget->hasReferenceTypes()) {
100 // We need custom load and store lowering for both externref, funcref and
101 // Other. The MVT::Other here represents tables of reference types.
102 for (auto T : {MVT::externref, MVT::funcref, MVT::Other}) {
105 }
106 }
107
115
116 // Take the default expansion for va_arg, va_copy, and va_end. There is no
117 // default action for va_start, so we do that custom.
122
123 for (auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
124 // Don't expand the floating-point types to constant pools.
126 // Expand floating-point comparisons.
130 // Expand floating-point library function operators.
131 for (auto Op :
134 // Note supported floating-point library function operators that otherwise
135 // default to expand.
139 // Support minimum and maximum, which otherwise default to expand.
142 // WebAssembly currently has no builtin f16 support.
146 setTruncStoreAction(T, MVT::f16, Expand);
147 }
148
149 if (Subtarget->hasFP16()) {
152 }
153
154 // Expand unavailable integer operations.
155 for (auto Op :
159 for (auto T : {MVT::i32, MVT::i64})
161 if (Subtarget->hasSIMD128())
162 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
164 }
165
166 if (Subtarget->hasWideArithmetic()) {
171 }
172
173 if (Subtarget->hasNontrappingFPToInt())
175 for (auto T : {MVT::i32, MVT::i64})
177
178 // SIMD-specific configuration
179 if (Subtarget->hasSIMD128()) {
180 // Combine vector mask reductions into alltrue/anytrue
182
183 // Convert vector to integer bitcasts to bitmask
185
186 // Hoist bitcasts out of shuffles
188
189 // Combine extends of extract_subvectors into widening ops
191
192 // Combine int_to_fp or fp_extend of extract_vectors and vice versa into
193 // conversions ops
196
197 // Combine fp_to_{s,u}int_sat or fp_round of concat_vectors or vice versa
198 // into conversion ops
201
203
204 // Support saturating add/sub for i8x16 and i16x8
206 for (auto T : {MVT::v16i8, MVT::v8i16})
208
209 // Support integer abs
210 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
212
213 // Custom lower BUILD_VECTORs to minimize number of replace_lanes
214 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
215 MVT::v2f64})
217
218 if (Subtarget->hasFP16())
220
221 // We have custom shuffle lowering to expose the shuffle mask
222 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
223 MVT::v2f64})
225
226 // Support splatting
227 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
228 MVT::v2f64})
230
231 // Custom lowering since wasm shifts must have a scalar shift amount
232 for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL})
233 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
235
236 // Custom lower lane accesses to expand out variable indices
238 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
239 MVT::v2f64})
241
242 // There is no i8x16.mul instruction
243 setOperationAction(ISD::MUL, MVT::v16i8, Expand);
244
245 // There is no vector conditional select instruction
246 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
247 MVT::v2f64})
249
250 // Expand integer operations supported for scalars but not SIMD
251 for (auto Op :
253 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
255
256 // But we do have integer min and max operations
257 for (auto Op : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
258 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
260
261 // And we have popcnt for i8x16. It can be used to expand ctlz/cttz.
262 setOperationAction(ISD::CTPOP, MVT::v16i8, Legal);
263 setOperationAction(ISD::CTLZ, MVT::v16i8, Expand);
264 setOperationAction(ISD::CTTZ, MVT::v16i8, Expand);
265
266 // Custom lower bit counting operations for other types to scalarize them.
267 for (auto Op : {ISD::CTLZ, ISD::CTTZ, ISD::CTPOP})
268 for (auto T : {MVT::v8i16, MVT::v4i32, MVT::v2i64})
270
271 // Expand float operations supported for scalars but not SIMD
274 for (auto T : {MVT::v4f32, MVT::v2f64})
276
277 // Unsigned comparison operations are unavailable for i64x2 vectors.
279 setCondCodeAction(CC, MVT::v2i64, Custom);
280
281 // 64x2 conversions are not in the spec
282 for (auto Op :
284 for (auto T : {MVT::v2i64, MVT::v2f64})
286
287 // But saturating fp_to_int converstions are
289 setOperationAction(Op, MVT::v4i32, Custom);
290 if (Subtarget->hasFP16()) {
291 setOperationAction(Op, MVT::v8i16, Custom);
292 }
293 }
294
295 // Support vector extending
299 }
300 }
301
302 // As a special case, these operators use the type to mean the type to
303 // sign-extend from.
305 if (!Subtarget->hasSignExt()) {
306 // Sign extends are legal only when extending a vector extract
307 auto Action = Subtarget->hasSIMD128() ? Custom : Expand;
308 for (auto T : {MVT::i8, MVT::i16, MVT::i32})
310 }
313
314 // Dynamic stack allocation: use the default expansion.
318
322
323 // Expand these forms; we pattern-match the forms that we can handle in isel.
324 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
325 for (auto Op : {ISD::BR_CC, ISD::SELECT_CC})
327
328 // We have custom switch handling.
330
331 // WebAssembly doesn't have:
332 // - Floating-point extending loads.
333 // - Floating-point truncating stores.
334 // - i1 extending loads.
335 // - truncating SIMD stores and most extending loads
336 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
337 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
338 for (auto T : MVT::integer_valuetypes())
339 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
340 setLoadExtAction(Ext, T, MVT::i1, Promote);
341 if (Subtarget->hasSIMD128()) {
342 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32,
343 MVT::v2f64}) {
344 for (auto MemT : MVT::fixedlen_vector_valuetypes()) {
345 if (MVT(T) != MemT) {
347 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
348 setLoadExtAction(Ext, T, MemT, Expand);
349 }
350 }
351 }
352 // But some vector extending loads are legal
353 for (auto Ext : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) {
354 setLoadExtAction(Ext, MVT::v8i16, MVT::v8i8, Legal);
355 setLoadExtAction(Ext, MVT::v4i32, MVT::v4i16, Legal);
356 setLoadExtAction(Ext, MVT::v2i64, MVT::v2i32, Legal);
357 }
358 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Legal);
359 }
360
361 // Don't do anything clever with build_pairs
363
364 // Trap lowers to wasm unreachable
365 setOperationAction(ISD::TRAP, MVT::Other, Legal);
367
368 // Exception handling intrinsics
372
374
375 // Override the __gnu_f2h_ieee/__gnu_h2f_ieee names so that the f32 name is
376 // consistent with the f64 and f128 names.
377 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
378 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
379
380 // Define the emscripten name for return address helper.
381 // TODO: when implementing other Wasm backends, make this generic or only do
382 // this on emscripten depending on what they end up doing.
383 setLibcallName(RTLIB::RETURN_ADDRESS, "emscripten_return_address");
384
385 // Always convert switches to br_tables unless there is only one case, which
386 // is equivalent to a simple branch. This reduces code size for wasm, and we
387 // defer possible jump table optimizations to the VM.
389}
390
392 uint32_t AS) const {
394 return MVT::externref;
396 return MVT::funcref;
398}
399
401 uint32_t AS) const {
403 return MVT::externref;
405 return MVT::funcref;
407}
408
410WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
411 // We have wasm instructions for these
412 switch (AI->getOperation()) {
420 default:
421 break;
422 }
424}
425
426bool WebAssemblyTargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
427 // Implementation copied from X86TargetLowering.
428 unsigned Opc = VecOp.getOpcode();
429
430 // Assume target opcodes can't be scalarized.
431 // TODO - do we have any exceptions?
432 if (Opc >= ISD::BUILTIN_OP_END || !isBinOp(Opc))
433 return false;
434
435 // If the vector op is not supported, try to convert to scalar.
436 EVT VecVT = VecOp.getValueType();
437 if (!isOperationLegalOrCustomOrPromote(Opc, VecVT))
438 return true;
439
440 // If the vector op is supported, but the scalar op is not, the transform may
441 // not be worthwhile.
442 EVT ScalarVT = VecVT.getScalarType();
443 return isOperationLegalOrCustomOrPromote(Opc, ScalarVT);
444}
445
446FastISel *WebAssemblyTargetLowering::createFastISel(
447 FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const {
448 return WebAssembly::createFastISel(FuncInfo, LibInfo);
449}
450
451MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/,
452 EVT VT) const {
453 unsigned BitWidth = NextPowerOf2(VT.getSizeInBits() - 1);
454 if (BitWidth > 1 && BitWidth < 8)
455 BitWidth = 8;
456
457 if (BitWidth > 64) {
458 // The shift will be lowered to a libcall, and compiler-rt libcalls expect
459 // the count to be an i32.
460 BitWidth = 32;
462 "32-bit shift counts ought to be enough for anyone");
463 }
464
467 "Unable to represent scalar shift amount type");
468 return Result;
469}
470
471// Lower an fp-to-int conversion operator from the LLVM opcode, which has an
472// undefined result on invalid/overflow, to the WebAssembly opcode, which
473// traps on invalid/overflow.
476 const TargetInstrInfo &TII,
477 bool IsUnsigned, bool Int64,
478 bool Float64, unsigned LoweredOpcode) {
480
481 Register OutReg = MI.getOperand(0).getReg();
482 Register InReg = MI.getOperand(1).getReg();
483
484 unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32;
485 unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32;
486 unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32;
487 unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32;
488 unsigned IConst = Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32;
489 unsigned Eqz = WebAssembly::EQZ_I32;
490 unsigned And = WebAssembly::AND_I32;
491 int64_t Limit = Int64 ? INT64_MIN : INT32_MIN;
492 int64_t Substitute = IsUnsigned ? 0 : Limit;
493 double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(double)Limit;
494 auto &Context = BB->getParent()->getFunction().getContext();
495 Type *Ty = Float64 ? Type::getDoubleTy(Context) : Type::getFloatTy(Context);
496
497 const BasicBlock *LLVMBB = BB->getBasicBlock();
498 MachineFunction *F = BB->getParent();
499 MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB);
500 MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVMBB);
501 MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB);
502
504 F->insert(It, FalseMBB);
505 F->insert(It, TrueMBB);
506 F->insert(It, DoneMBB);
507
508 // Transfer the remainder of BB and its successor edges to DoneMBB.
509 DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end());
511
512 BB->addSuccessor(TrueMBB);
513 BB->addSuccessor(FalseMBB);
514 TrueMBB->addSuccessor(DoneMBB);
515 FalseMBB->addSuccessor(DoneMBB);
516
517 unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg;
518 Tmp0 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
519 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
520 CmpReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
521 EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
522 FalseReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
523 TrueReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
524
525 MI.eraseFromParent();
526 // For signed numbers, we can do a single comparison to determine whether
527 // fabs(x) is within range.
528 if (IsUnsigned) {
529 Tmp0 = InReg;
530 } else {
531 BuildMI(BB, DL, TII.get(Abs), Tmp0).addReg(InReg);
532 }
533 BuildMI(BB, DL, TII.get(FConst), Tmp1)
534 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, CmpVal)));
535 BuildMI(BB, DL, TII.get(LT), CmpReg).addReg(Tmp0).addReg(Tmp1);
536
537 // For unsigned numbers, we have to do a separate comparison with zero.
538 if (IsUnsigned) {
539 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
540 Register SecondCmpReg =
541 MRI.createVirtualRegister(&WebAssembly::I32RegClass);
542 Register AndReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
543 BuildMI(BB, DL, TII.get(FConst), Tmp1)
544 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, 0.0)));
545 BuildMI(BB, DL, TII.get(GE), SecondCmpReg).addReg(Tmp0).addReg(Tmp1);
546 BuildMI(BB, DL, TII.get(And), AndReg).addReg(CmpReg).addReg(SecondCmpReg);
547 CmpReg = AndReg;
548 }
549
550 BuildMI(BB, DL, TII.get(Eqz), EqzReg).addReg(CmpReg);
551
552 // Create the CFG diamond to select between doing the conversion or using
553 // the substitute value.
554 BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(TrueMBB).addReg(EqzReg);
555 BuildMI(FalseMBB, DL, TII.get(LoweredOpcode), FalseReg).addReg(InReg);
556 BuildMI(FalseMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB);
557 BuildMI(TrueMBB, DL, TII.get(IConst), TrueReg).addImm(Substitute);
558 BuildMI(*DoneMBB, DoneMBB->begin(), DL, TII.get(TargetOpcode::PHI), OutReg)
559 .addReg(FalseReg)
560 .addMBB(FalseMBB)
561 .addReg(TrueReg)
562 .addMBB(TrueMBB);
563
564 return DoneMBB;
565}
566
567// Lower a `MEMCPY` instruction into a CFG triangle around a `MEMORY_COPY`
568// instuction to handle the zero-length case.
571 const TargetInstrInfo &TII, bool Int64) {
573
574 MachineOperand DstMem = MI.getOperand(0);
575 MachineOperand SrcMem = MI.getOperand(1);
576 MachineOperand Dst = MI.getOperand(2);
577 MachineOperand Src = MI.getOperand(3);
578 MachineOperand Len = MI.getOperand(4);
579
580 // We're going to add an extra use to `Len` to test if it's zero; that
581 // use shouldn't be a kill, even if the original use is.
582 MachineOperand NoKillLen = Len;
583 NoKillLen.setIsKill(false);
584
585 // Decide on which `MachineInstr` opcode we're going to use.
586 unsigned Eqz = Int64 ? WebAssembly::EQZ_I64 : WebAssembly::EQZ_I32;
587 unsigned MemoryCopy =
588 Int64 ? WebAssembly::MEMORY_COPY_A64 : WebAssembly::MEMORY_COPY_A32;
589
590 // Create two new basic blocks; one for the new `memory.fill` that we can
591 // branch over, and one for the rest of the instructions after the original
592 // `memory.fill`.
593 const BasicBlock *LLVMBB = BB->getBasicBlock();
594 MachineFunction *F = BB->getParent();
595 MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB);
596 MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB);
597
599 F->insert(It, TrueMBB);
600 F->insert(It, DoneMBB);
601
602 // Transfer the remainder of BB and its successor edges to DoneMBB.
603 DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end());
605
606 // Connect the CFG edges.
607 BB->addSuccessor(TrueMBB);
608 BB->addSuccessor(DoneMBB);
609 TrueMBB->addSuccessor(DoneMBB);
610
611 // Create a virtual register for the `Eqz` result.
612 unsigned EqzReg;
613 EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
614
615 // Erase the original `memory.copy`.
616 MI.eraseFromParent();
617
618 // Test if `Len` is zero.
619 BuildMI(BB, DL, TII.get(Eqz), EqzReg).add(NoKillLen);
620
621 // Insert a new `memory.copy`.
622 BuildMI(TrueMBB, DL, TII.get(MemoryCopy))
623 .add(DstMem)
624 .add(SrcMem)
625 .add(Dst)
626 .add(Src)
627 .add(Len);
628
629 // Create the CFG triangle.
630 BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(DoneMBB).addReg(EqzReg);
631 BuildMI(TrueMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB);
632
633 return DoneMBB;
634}
635
636// Lower a `MEMSET` instruction into a CFG triangle around a `MEMORY_FILL`
637// instuction to handle the zero-length case.
640 const TargetInstrInfo &TII, bool Int64) {
642
643 MachineOperand Mem = MI.getOperand(0);
644 MachineOperand Dst = MI.getOperand(1);
645 MachineOperand Val = MI.getOperand(2);
646 MachineOperand Len = MI.getOperand(3);
647
648 // We're going to add an extra use to `Len` to test if it's zero; that
649 // use shouldn't be a kill, even if the original use is.
650 MachineOperand NoKillLen = Len;
651 NoKillLen.setIsKill(false);
652
653 // Decide on which `MachineInstr` opcode we're going to use.
654 unsigned Eqz = Int64 ? WebAssembly::EQZ_I64 : WebAssembly::EQZ_I32;
655 unsigned MemoryFill =
656 Int64 ? WebAssembly::MEMORY_FILL_A64 : WebAssembly::MEMORY_FILL_A32;
657
658 // Create two new basic blocks; one for the new `memory.fill` that we can
659 // branch over, and one for the rest of the instructions after the original
660 // `memory.fill`.
661 const BasicBlock *LLVMBB = BB->getBasicBlock();
662 MachineFunction *F = BB->getParent();
663 MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB);
664 MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB);
665
667 F->insert(It, TrueMBB);
668 F->insert(It, DoneMBB);
669
670 // Transfer the remainder of BB and its successor edges to DoneMBB.
671 DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end());
673
674 // Connect the CFG edges.
675 BB->addSuccessor(TrueMBB);
676 BB->addSuccessor(DoneMBB);
677 TrueMBB->addSuccessor(DoneMBB);
678
679 // Create a virtual register for the `Eqz` result.
680 unsigned EqzReg;
681 EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
682
683 // Erase the original `memory.fill`.
684 MI.eraseFromParent();
685
686 // Test if `Len` is zero.
687 BuildMI(BB, DL, TII.get(Eqz), EqzReg).add(NoKillLen);
688
689 // Insert a new `memory.copy`.
690 BuildMI(TrueMBB, DL, TII.get(MemoryFill)).add(Mem).add(Dst).add(Val).add(Len);
691
692 // Create the CFG triangle.
693 BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(DoneMBB).addReg(EqzReg);
694 BuildMI(TrueMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB);
695
696 return DoneMBB;
697}
698
699static MachineBasicBlock *
701 const WebAssemblySubtarget *Subtarget,
702 const TargetInstrInfo &TII) {
703 MachineInstr &CallParams = *CallResults.getPrevNode();
704 assert(CallParams.getOpcode() == WebAssembly::CALL_PARAMS);
705 assert(CallResults.getOpcode() == WebAssembly::CALL_RESULTS ||
706 CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS);
707
708 bool IsIndirect =
709 CallParams.getOperand(0).isReg() || CallParams.getOperand(0).isFI();
710 bool IsRetCall = CallResults.getOpcode() == WebAssembly::RET_CALL_RESULTS;
711
712 bool IsFuncrefCall = false;
713 if (IsIndirect && CallParams.getOperand(0).isReg()) {
714 Register Reg = CallParams.getOperand(0).getReg();
715 const MachineFunction *MF = BB->getParent();
716 const MachineRegisterInfo &MRI = MF->getRegInfo();
717 const TargetRegisterClass *TRC = MRI.getRegClass(Reg);
718 IsFuncrefCall = (TRC == &WebAssembly::FUNCREFRegClass);
719 assert(!IsFuncrefCall || Subtarget->hasReferenceTypes());
720 }
721
722 unsigned CallOp;
723 if (IsIndirect && IsRetCall) {
724 CallOp = WebAssembly::RET_CALL_INDIRECT;
725 } else if (IsIndirect) {
726 CallOp = WebAssembly::CALL_INDIRECT;
727 } else if (IsRetCall) {
728 CallOp = WebAssembly::RET_CALL;
729 } else {
730 CallOp = WebAssembly::CALL;
731 }
732
733 MachineFunction &MF = *BB->getParent();
734 const MCInstrDesc &MCID = TII.get(CallOp);
735 MachineInstrBuilder MIB(MF, MF.CreateMachineInstr(MCID, DL));
736
737 // Move the function pointer to the end of the arguments for indirect calls
738 if (IsIndirect) {
739 auto FnPtr = CallParams.getOperand(0);
740 CallParams.removeOperand(0);
741
742 // For funcrefs, call_indirect is done through __funcref_call_table and the
743 // funcref is always installed in slot 0 of the table, therefore instead of
744 // having the function pointer added at the end of the params list, a zero
745 // (the index in
746 // __funcref_call_table is added).
747 if (IsFuncrefCall) {
748 Register RegZero =
749 MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass);
750 MachineInstrBuilder MIBC0 =
751 BuildMI(MF, DL, TII.get(WebAssembly::CONST_I32), RegZero).addImm(0);
752
753 BB->insert(CallResults.getIterator(), MIBC0);
754 MachineInstrBuilder(MF, CallParams).addReg(RegZero);
755 } else
756 CallParams.addOperand(FnPtr);
757 }
758
759 for (auto Def : CallResults.defs())
760 MIB.add(Def);
761
762 if (IsIndirect) {
763 // Placeholder for the type index.
764 MIB.addImm(0);
765 // The table into which this call_indirect indexes.
766 MCSymbolWasm *Table = IsFuncrefCall
768 MF.getContext(), Subtarget)
770 MF.getContext(), Subtarget);
771 if (Subtarget->hasCallIndirectOverlong()) {
772 MIB.addSym(Table);
773 } else {
774 // For the MVP there is at most one table whose number is 0, but we can't
775 // write a table symbol or issue relocations. Instead we just ensure the
776 // table is live and write a zero.
777 Table->setNoStrip();
778 MIB.addImm(0);
779 }
780 }
781
782 for (auto Use : CallParams.uses())
783 MIB.add(Use);
784
785 BB->insert(CallResults.getIterator(), MIB);
786 CallParams.eraseFromParent();
787 CallResults.eraseFromParent();
788
789 // If this is a funcref call, to avoid hidden GC roots, we need to clear the
790 // table slot with ref.null upon call_indirect return.
791 //
792 // This generates the following code, which comes right after a call_indirect
793 // of a funcref:
794 //
795 // i32.const 0
796 // ref.null func
797 // table.set __funcref_call_table
798 if (IsIndirect && IsFuncrefCall) {
800 MF.getContext(), Subtarget);
801 Register RegZero =
802 MF.getRegInfo().createVirtualRegister(&WebAssembly::I32RegClass);
803 MachineInstr *Const0 =
804 BuildMI(MF, DL, TII.get(WebAssembly::CONST_I32), RegZero).addImm(0);
805 BB->insertAfter(MIB.getInstr()->getIterator(), Const0);
806
807 Register RegFuncref =
808 MF.getRegInfo().createVirtualRegister(&WebAssembly::FUNCREFRegClass);
809 MachineInstr *RefNull =
810 BuildMI(MF, DL, TII.get(WebAssembly::REF_NULL_FUNCREF), RegFuncref);
811 BB->insertAfter(Const0->getIterator(), RefNull);
812
813 MachineInstr *TableSet =
814 BuildMI(MF, DL, TII.get(WebAssembly::TABLE_SET_FUNCREF))
815 .addSym(Table)
816 .addReg(RegZero)
817 .addReg(RegFuncref);
818 BB->insertAfter(RefNull->getIterator(), TableSet);
819 }
820
821 return BB;
822}
823
824MachineBasicBlock *WebAssemblyTargetLowering::EmitInstrWithCustomInserter(
825 MachineInstr &MI, MachineBasicBlock *BB) const {
826 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
827 DebugLoc DL = MI.getDebugLoc();
828
829 switch (MI.getOpcode()) {
830 default:
831 llvm_unreachable("Unexpected instr type to insert");
832 case WebAssembly::FP_TO_SINT_I32_F32:
833 return LowerFPToInt(MI, DL, BB, TII, false, false, false,
834 WebAssembly::I32_TRUNC_S_F32);
835 case WebAssembly::FP_TO_UINT_I32_F32:
836 return LowerFPToInt(MI, DL, BB, TII, true, false, false,
837 WebAssembly::I32_TRUNC_U_F32);
838 case WebAssembly::FP_TO_SINT_I64_F32:
839 return LowerFPToInt(MI, DL, BB, TII, false, true, false,
840 WebAssembly::I64_TRUNC_S_F32);
841 case WebAssembly::FP_TO_UINT_I64_F32:
842 return LowerFPToInt(MI, DL, BB, TII, true, true, false,
843 WebAssembly::I64_TRUNC_U_F32);
844 case WebAssembly::FP_TO_SINT_I32_F64:
845 return LowerFPToInt(MI, DL, BB, TII, false, false, true,
846 WebAssembly::I32_TRUNC_S_F64);
847 case WebAssembly::FP_TO_UINT_I32_F64:
848 return LowerFPToInt(MI, DL, BB, TII, true, false, true,
849 WebAssembly::I32_TRUNC_U_F64);
850 case WebAssembly::FP_TO_SINT_I64_F64:
851 return LowerFPToInt(MI, DL, BB, TII, false, true, true,
852 WebAssembly::I64_TRUNC_S_F64);
853 case WebAssembly::FP_TO_UINT_I64_F64:
854 return LowerFPToInt(MI, DL, BB, TII, true, true, true,
855 WebAssembly::I64_TRUNC_U_F64);
856 case WebAssembly::MEMCPY_A32:
857 return LowerMemcpy(MI, DL, BB, TII, false);
858 case WebAssembly::MEMCPY_A64:
859 return LowerMemcpy(MI, DL, BB, TII, true);
860 case WebAssembly::MEMSET_A32:
861 return LowerMemset(MI, DL, BB, TII, false);
862 case WebAssembly::MEMSET_A64:
863 return LowerMemset(MI, DL, BB, TII, true);
864 case WebAssembly::CALL_RESULTS:
865 case WebAssembly::RET_CALL_RESULTS:
866 return LowerCallResults(MI, DL, BB, Subtarget, TII);
867 }
868}
869
870const char *
871WebAssemblyTargetLowering::getTargetNodeName(unsigned Opcode) const {
872 switch (static_cast<WebAssemblyISD::NodeType>(Opcode)) {
875 break;
876#define HANDLE_NODETYPE(NODE) \
877 case WebAssemblyISD::NODE: \
878 return "WebAssemblyISD::" #NODE;
879#define HANDLE_MEM_NODETYPE(NODE) HANDLE_NODETYPE(NODE)
880#include "WebAssemblyISD.def"
881#undef HANDLE_MEM_NODETYPE
882#undef HANDLE_NODETYPE
883 }
884 return nullptr;
885}
886
887std::pair<unsigned, const TargetRegisterClass *>
888WebAssemblyTargetLowering::getRegForInlineAsmConstraint(
889 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
890 // First, see if this is a constraint that directly corresponds to a
891 // WebAssembly register class.
892 if (Constraint.size() == 1) {
893 switch (Constraint[0]) {
894 case 'r':
895 assert(VT != MVT::iPTR && "Pointer MVT not expected here");
896 if (Subtarget->hasSIMD128() && VT.isVector()) {
897 if (VT.getSizeInBits() == 128)
898 return std::make_pair(0U, &WebAssembly::V128RegClass);
899 }
900 if (VT.isInteger() && !VT.isVector()) {
901 if (VT.getSizeInBits() <= 32)
902 return std::make_pair(0U, &WebAssembly::I32RegClass);
903 if (VT.getSizeInBits() <= 64)
904 return std::make_pair(0U, &WebAssembly::I64RegClass);
905 }
906 if (VT.isFloatingPoint() && !VT.isVector()) {
907 switch (VT.getSizeInBits()) {
908 case 32:
909 return std::make_pair(0U, &WebAssembly::F32RegClass);
910 case 64:
911 return std::make_pair(0U, &WebAssembly::F64RegClass);
912 default:
913 break;
914 }
915 }
916 break;
917 default:
918 break;
919 }
920 }
921
923}
924
925bool WebAssemblyTargetLowering::isCheapToSpeculateCttz(Type *Ty) const {
926 // Assume ctz is a relatively cheap operation.
927 return true;
928}
929
930bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz(Type *Ty) const {
931 // Assume clz is a relatively cheap operation.
932 return true;
933}
934
935bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL,
936 const AddrMode &AM,
937 Type *Ty, unsigned AS,
938 Instruction *I) const {
939 // WebAssembly offsets are added as unsigned without wrapping. The
940 // isLegalAddressingMode gives us no way to determine if wrapping could be
941 // happening, so we approximate this by accepting only non-negative offsets.
942 if (AM.BaseOffs < 0)
943 return false;
944
945 // WebAssembly has no scale register operands.
946 if (AM.Scale != 0)
947 return false;
948
949 // Everything else is legal.
950 return true;
951}
952
953bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
954 EVT /*VT*/, unsigned /*AddrSpace*/, Align /*Align*/,
955 MachineMemOperand::Flags /*Flags*/, unsigned *Fast) const {
956 // WebAssembly supports unaligned accesses, though it should be declared
957 // with the p2align attribute on loads and stores which do so, and there
958 // may be a performance impact. We tell LLVM they're "fast" because
959 // for the kinds of things that LLVM uses this for (merging adjacent stores
960 // of constants, etc.), WebAssembly implementations will either want the
961 // unaligned access or they'll split anyway.
962 if (Fast)
963 *Fast = 1;
964 return true;
965}
966
967bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT,
968 AttributeList Attr) const {
969 // The current thinking is that wasm engines will perform this optimization,
970 // so we can save on code size.
971 return true;
972}
973
974bool WebAssemblyTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
975 EVT ExtT = ExtVal.getValueType();
976 EVT MemT = cast<LoadSDNode>(ExtVal->getOperand(0))->getValueType(0);
977 return (ExtT == MVT::v8i16 && MemT == MVT::v8i8) ||
978 (ExtT == MVT::v4i32 && MemT == MVT::v4i16) ||
979 (ExtT == MVT::v2i64 && MemT == MVT::v2i32);
980}
981
982bool WebAssemblyTargetLowering::isOffsetFoldingLegal(
983 const GlobalAddressSDNode *GA) const {
984 // Wasm doesn't support function addresses with offsets
985 const GlobalValue *GV = GA->getGlobal();
986 return isa<Function>(GV) ? false : TargetLowering::isOffsetFoldingLegal(GA);
987}
988
989EVT WebAssemblyTargetLowering::getSetCCResultType(const DataLayout &DL,
990 LLVMContext &C,
991 EVT VT) const {
992 if (VT.isVector())
994
995 // So far, all branch instructions in Wasm take an I32 condition.
996 // The default TargetLowering::getSetCCResultType returns the pointer size,
997 // which would be useful to reduce instruction counts when testing
998 // against 64-bit pointers/values if at some point Wasm supports that.
999 return EVT::getIntegerVT(C, 32);
1000}
1001
1002bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
1003 const CallInst &I,
1004 MachineFunction &MF,
1005 unsigned Intrinsic) const {
1006 switch (Intrinsic) {
1007 case Intrinsic::wasm_memory_atomic_notify:
1009 Info.memVT = MVT::i32;
1010 Info.ptrVal = I.getArgOperand(0);
1011 Info.offset = 0;
1012 Info.align = Align(4);
1013 // atomic.notify instruction does not really load the memory specified with
1014 // this argument, but MachineMemOperand should either be load or store, so
1015 // we set this to a load.
1016 // FIXME Volatile isn't really correct, but currently all LLVM atomic
1017 // instructions are treated as volatiles in the backend, so we should be
1018 // consistent. The same applies for wasm_atomic_wait intrinsics too.
1020 return true;
1021 case Intrinsic::wasm_memory_atomic_wait32:
1023 Info.memVT = MVT::i32;
1024 Info.ptrVal = I.getArgOperand(0);
1025 Info.offset = 0;
1026 Info.align = Align(4);
1028 return true;
1029 case Intrinsic::wasm_memory_atomic_wait64:
1031 Info.memVT = MVT::i64;
1032 Info.ptrVal = I.getArgOperand(0);
1033 Info.offset = 0;
1034 Info.align = Align(8);
1036 return true;
1037 case Intrinsic::wasm_loadf16_f32:
1039 Info.memVT = MVT::f16;
1040 Info.ptrVal = I.getArgOperand(0);
1041 Info.offset = 0;
1042 Info.align = Align(2);
1044 return true;
1045 case Intrinsic::wasm_storef16_f32:
1047 Info.memVT = MVT::f16;
1048 Info.ptrVal = I.getArgOperand(1);
1049 Info.offset = 0;
1050 Info.align = Align(2);
1052 return true;
1053 default:
1054 return false;
1055 }
1056}
1057
1058void WebAssemblyTargetLowering::computeKnownBitsForTargetNode(
1059 const SDValue Op, KnownBits &Known, const APInt &DemandedElts,
1060 const SelectionDAG &DAG, unsigned Depth) const {
1061 switch (Op.getOpcode()) {
1062 default:
1063 break;
1065 unsigned IntNo = Op.getConstantOperandVal(0);
1066 switch (IntNo) {
1067 default:
1068 break;
1069 case Intrinsic::wasm_bitmask: {
1070 unsigned BitWidth = Known.getBitWidth();
1071 EVT VT = Op.getOperand(1).getSimpleValueType();
1072 unsigned PossibleBits = VT.getVectorNumElements();
1073 APInt ZeroMask = APInt::getHighBitsSet(BitWidth, BitWidth - PossibleBits);
1074 Known.Zero |= ZeroMask;
1075 break;
1076 }
1077 }
1078 }
1079 }
1080}
1081
1083WebAssemblyTargetLowering::getPreferredVectorAction(MVT VT) const {
1084 if (VT.isFixedLengthVector()) {
1085 MVT EltVT = VT.getVectorElementType();
1086 // We have legal vector types with these lane types, so widening the
1087 // vector would let us use some of the lanes directly without having to
1088 // extend or truncate values.
1089 if (EltVT == MVT::i8 || EltVT == MVT::i16 || EltVT == MVT::i32 ||
1090 EltVT == MVT::i64 || EltVT == MVT::f32 || EltVT == MVT::f64)
1091 return TypeWidenVector;
1092 }
1093
1095}
1096
1097bool WebAssemblyTargetLowering::shouldSimplifyDemandedVectorElts(
1098 SDValue Op, const TargetLoweringOpt &TLO) const {
1099 // ISel process runs DAGCombiner after legalization; this step is called
1100 // SelectionDAG optimization phase. This post-legalization combining process
1101 // runs DAGCombiner on each node, and if there was a change to be made,
1102 // re-runs legalization again on it and its user nodes to make sure
1103 // everythiing is in a legalized state.
1104 //
1105 // The legalization calls lowering routines, and we do our custom lowering for
1106 // build_vectors (LowerBUILD_VECTOR), which converts undef vector elements
1107 // into zeros. But there is a set of routines in DAGCombiner that turns unused
1108 // (= not demanded) nodes into undef, among which SimplifyDemandedVectorElts
1109 // turns unused vector elements into undefs. But this routine does not work
1110 // with our custom LowerBUILD_VECTOR, which turns undefs into zeros. This
1111 // combination can result in a infinite loop, in which undefs are converted to
1112 // zeros in legalization and back to undefs in combining.
1113 //
1114 // So after DAG is legalized, we prevent SimplifyDemandedVectorElts from
1115 // running for build_vectors.
1116 if (Op.getOpcode() == ISD::BUILD_VECTOR && TLO.LegalOps && TLO.LegalTys)
1117 return false;
1118 return true;
1119}
1120
1121//===----------------------------------------------------------------------===//
1122// WebAssembly Lowering private implementation.
1123//===----------------------------------------------------------------------===//
1124
1125//===----------------------------------------------------------------------===//
1126// Lowering Code
1127//===----------------------------------------------------------------------===//
1128
1129static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg) {
1131 DAG.getContext()->diagnose(
1132 DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc()));
1133}
1134
1135// Test whether the given calling convention is supported.
1137 // We currently support the language-independent target-independent
1138 // conventions. We don't yet have a way to annotate calls with properties like
1139 // "cold", and we don't have any call-clobbered registers, so these are mostly
1140 // all handled the same.
1141 return CallConv == CallingConv::C || CallConv == CallingConv::Fast ||
1142 CallConv == CallingConv::Cold ||
1143 CallConv == CallingConv::PreserveMost ||
1144 CallConv == CallingConv::PreserveAll ||
1145 CallConv == CallingConv::CXX_FAST_TLS ||
1147 CallConv == CallingConv::Swift;
1148}
1149
1150SDValue
1151WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
1152 SmallVectorImpl<SDValue> &InVals) const {
1153 SelectionDAG &DAG = CLI.DAG;
1154 SDLoc DL = CLI.DL;
1155 SDValue Chain = CLI.Chain;
1156 SDValue Callee = CLI.Callee;
1158 auto Layout = MF.getDataLayout();
1159
1160 CallingConv::ID CallConv = CLI.CallConv;
1161 if (!callingConvSupported(CallConv))
1162 fail(DL, DAG,
1163 "WebAssembly doesn't support language-specific or target-specific "
1164 "calling conventions yet");
1165 if (CLI.IsPatchPoint)
1166 fail(DL, DAG, "WebAssembly doesn't support patch point yet");
1167
1168 if (CLI.IsTailCall) {
1169 auto NoTail = [&](const char *Msg) {
1170 if (CLI.CB && CLI.CB->isMustTailCall())
1171 fail(DL, DAG, Msg);
1172 CLI.IsTailCall = false;
1173 };
1174
1175 if (!Subtarget->hasTailCall())
1176 NoTail("WebAssembly 'tail-call' feature not enabled");
1177
1178 // Varargs calls cannot be tail calls because the buffer is on the stack
1179 if (CLI.IsVarArg)
1180 NoTail("WebAssembly does not support varargs tail calls");
1181
1182 // Do not tail call unless caller and callee return types match
1183 const Function &F = MF.getFunction();
1185 Type *RetTy = F.getReturnType();
1186 SmallVector<MVT, 4> CallerRetTys;
1187 SmallVector<MVT, 4> CalleeRetTys;
1188 computeLegalValueVTs(F, TM, RetTy, CallerRetTys);
1189 computeLegalValueVTs(F, TM, CLI.RetTy, CalleeRetTys);
1190 bool TypesMatch = CallerRetTys.size() == CalleeRetTys.size() &&
1191 std::equal(CallerRetTys.begin(), CallerRetTys.end(),
1192 CalleeRetTys.begin());
1193 if (!TypesMatch)
1194 NoTail("WebAssembly tail call requires caller and callee return types to "
1195 "match");
1196
1197 // If pointers to local stack values are passed, we cannot tail call
1198 if (CLI.CB) {
1199 for (auto &Arg : CLI.CB->args()) {
1200 Value *Val = Arg.get();
1201 // Trace the value back through pointer operations
1202 while (true) {
1203 Value *Src = Val->stripPointerCastsAndAliases();
1204 if (auto *GEP = dyn_cast<GetElementPtrInst>(Src))
1205 Src = GEP->getPointerOperand();
1206 if (Val == Src)
1207 break;
1208 Val = Src;
1209 }
1210 if (isa<AllocaInst>(Val)) {
1211 NoTail(
1212 "WebAssembly does not support tail calling with stack arguments");
1213 break;
1214 }
1215 }
1216 }
1217 }
1218
1220 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1221 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1222
1223 // The generic code may have added an sret argument. If we're lowering an
1224 // invoke function, the ABI requires that the function pointer be the first
1225 // argument, so we may have to swap the arguments.
1226 if (CallConv == CallingConv::WASM_EmscriptenInvoke && Outs.size() >= 2 &&
1227 Outs[0].Flags.isSRet()) {
1228 std::swap(Outs[0], Outs[1]);
1229 std::swap(OutVals[0], OutVals[1]);
1230 }
1231
1232 bool HasSwiftSelfArg = false;
1233 bool HasSwiftErrorArg = false;
1234 unsigned NumFixedArgs = 0;
1235 for (unsigned I = 0; I < Outs.size(); ++I) {
1236 const ISD::OutputArg &Out = Outs[I];
1237 SDValue &OutVal = OutVals[I];
1238 HasSwiftSelfArg |= Out.Flags.isSwiftSelf();
1239 HasSwiftErrorArg |= Out.Flags.isSwiftError();
1240 if (Out.Flags.isNest())
1241 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
1242 if (Out.Flags.isInAlloca())
1243 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
1244 if (Out.Flags.isInConsecutiveRegs())
1245 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
1247 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
1248 if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) {
1249 auto &MFI = MF.getFrameInfo();
1250 int FI = MFI.CreateStackObject(Out.Flags.getByValSize(),
1252 /*isSS=*/false);
1253 SDValue SizeNode =
1254 DAG.getConstant(Out.Flags.getByValSize(), DL, MVT::i32);
1255 SDValue FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
1256 Chain = DAG.getMemcpy(Chain, DL, FINode, OutVal, SizeNode,
1258 /*isVolatile*/ false, /*AlwaysInline=*/false,
1259 /*CI=*/nullptr, std::nullopt, MachinePointerInfo(),
1261 OutVal = FINode;
1262 }
1263 // Count the number of fixed args *after* legalization.
1264 NumFixedArgs += Out.IsFixed;
1265 }
1266
1267 bool IsVarArg = CLI.IsVarArg;
1268 auto PtrVT = getPointerTy(Layout);
1269
1270 // For swiftcc, emit additional swiftself and swifterror arguments
1271 // if there aren't. These additional arguments are also added for callee
1272 // signature They are necessary to match callee and caller signature for
1273 // indirect call.
1274 if (CallConv == CallingConv::Swift) {
1275 if (!HasSwiftSelfArg) {
1276 NumFixedArgs++;
1277 ISD::OutputArg Arg;
1278 Arg.Flags.setSwiftSelf();
1279 CLI.Outs.push_back(Arg);
1280 SDValue ArgVal = DAG.getUNDEF(PtrVT);
1281 CLI.OutVals.push_back(ArgVal);
1282 }
1283 if (!HasSwiftErrorArg) {
1284 NumFixedArgs++;
1285 ISD::OutputArg Arg;
1286 Arg.Flags.setSwiftError();
1287 CLI.Outs.push_back(Arg);
1288 SDValue ArgVal = DAG.getUNDEF(PtrVT);
1289 CLI.OutVals.push_back(ArgVal);
1290 }
1291 }
1292
1293 // Analyze operands of the call, assigning locations to each operand.
1295 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
1296
1297 if (IsVarArg) {
1298 // Outgoing non-fixed arguments are placed in a buffer. First
1299 // compute their offsets and the total amount of buffer space needed.
1300 for (unsigned I = NumFixedArgs; I < Outs.size(); ++I) {
1301 const ISD::OutputArg &Out = Outs[I];
1302 SDValue &Arg = OutVals[I];
1303 EVT VT = Arg.getValueType();
1304 assert(VT != MVT::iPTR && "Legalized args should be concrete");
1305 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
1306 Align Alignment =
1307 std::max(Out.Flags.getNonZeroOrigAlign(), Layout.getABITypeAlign(Ty));
1308 unsigned Offset =
1309 CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty), Alignment);
1310 CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(),
1311 Offset, VT.getSimpleVT(),
1313 }
1314 }
1315
1316 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
1317
1318 SDValue FINode;
1319 if (IsVarArg && NumBytes) {
1320 // For non-fixed arguments, next emit stores to store the argument values
1321 // to the stack buffer at the offsets computed above.
1322 MaybeAlign StackAlign = Layout.getStackAlignment();
1323 assert(StackAlign && "data layout string is missing stack alignment");
1324 int FI = MF.getFrameInfo().CreateStackObject(NumBytes, *StackAlign,
1325 /*isSS=*/false);
1326 unsigned ValNo = 0;
1328 for (SDValue Arg : drop_begin(OutVals, NumFixedArgs)) {
1329 assert(ArgLocs[ValNo].getValNo() == ValNo &&
1330 "ArgLocs should remain in order and only hold varargs args");
1331 unsigned Offset = ArgLocs[ValNo++].getLocMemOffset();
1332 FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
1333 SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, FINode,
1334 DAG.getConstant(Offset, DL, PtrVT));
1335 Chains.push_back(
1336 DAG.getStore(Chain, DL, Arg, Add,
1338 }
1339 if (!Chains.empty())
1340 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
1341 } else if (IsVarArg) {
1342 FINode = DAG.getIntPtrConstant(0, DL);
1343 }
1344
1345 if (Callee->getOpcode() == ISD::GlobalAddress) {
1346 // If the callee is a GlobalAddress node (quite common, every direct call
1347 // is) turn it into a TargetGlobalAddress node so that LowerGlobalAddress
1348 // doesn't at MO_GOT which is not needed for direct calls.
1349 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Callee);
1352 GA->getOffset());
1353 Callee = DAG.getNode(WebAssemblyISD::Wrapper, DL,
1354 getPointerTy(DAG.getDataLayout()), Callee);
1355 }
1356
1357 // Compute the operands for the CALLn node.
1359 Ops.push_back(Chain);
1360 Ops.push_back(Callee);
1361
1362 // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs
1363 // isn't reliable.
1364 Ops.append(OutVals.begin(),
1365 IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end());
1366 // Add a pointer to the vararg buffer.
1367 if (IsVarArg)
1368 Ops.push_back(FINode);
1369
1370 SmallVector<EVT, 8> InTys;
1371 for (const auto &In : Ins) {
1372 assert(!In.Flags.isByVal() && "byval is not valid for return values");
1373 assert(!In.Flags.isNest() && "nest is not valid for return values");
1374 if (In.Flags.isInAlloca())
1375 fail(DL, DAG, "WebAssembly hasn't implemented inalloca return values");
1376 if (In.Flags.isInConsecutiveRegs())
1377 fail(DL, DAG, "WebAssembly hasn't implemented cons regs return values");
1378 if (In.Flags.isInConsecutiveRegsLast())
1379 fail(DL, DAG,
1380 "WebAssembly hasn't implemented cons regs last return values");
1381 // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
1382 // registers.
1383 InTys.push_back(In.VT);
1384 }
1385
1386 // Lastly, if this is a call to a funcref we need to add an instruction
1387 // table.set to the chain and transform the call.
1389 CLI.CB->getCalledOperand()->getType())) {
1390 // In the absence of function references proposal where a funcref call is
1391 // lowered to call_ref, using reference types we generate a table.set to set
1392 // the funcref to a special table used solely for this purpose, followed by
1393 // a call_indirect. Here we just generate the table set, and return the
1394 // SDValue of the table.set so that LowerCall can finalize the lowering by
1395 // generating the call_indirect.
1396 SDValue Chain = Ops[0];
1397
1399 MF.getContext(), Subtarget);
1400 SDValue Sym = DAG.getMCSymbol(Table, PtrVT);
1401 SDValue TableSlot = DAG.getConstant(0, DL, MVT::i32);
1402 SDValue TableSetOps[] = {Chain, Sym, TableSlot, Callee};
1403 SDValue TableSet = DAG.getMemIntrinsicNode(
1404 WebAssemblyISD::TABLE_SET, DL, DAG.getVTList(MVT::Other), TableSetOps,
1405 MVT::funcref,
1406 // Machine Mem Operand args
1409 CLI.CB->getCalledOperand()->getPointerAlignment(DAG.getDataLayout()),
1411
1412 Ops[0] = TableSet; // The new chain is the TableSet itself
1413 }
1414
1415 if (CLI.IsTailCall) {
1416 // ret_calls do not return values to the current frame
1417 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1418 return DAG.getNode(WebAssemblyISD::RET_CALL, DL, NodeTys, Ops);
1419 }
1420
1421 InTys.push_back(MVT::Other);
1422 SDVTList InTyList = DAG.getVTList(InTys);
1423 SDValue Res = DAG.getNode(WebAssemblyISD::CALL, DL, InTyList, Ops);
1424
1425 for (size_t I = 0; I < Ins.size(); ++I)
1426 InVals.push_back(Res.getValue(I));
1427
1428 // Return the chain
1429 return Res.getValue(Ins.size());
1430}
1431
1432bool WebAssemblyTargetLowering::CanLowerReturn(
1433 CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/,
1435 LLVMContext & /*Context*/) const {
1436 // WebAssembly can only handle returning tuples with multivalue enabled
1437 return WebAssembly::canLowerReturn(Outs.size(), Subtarget);
1438}
1439
1440SDValue WebAssemblyTargetLowering::LowerReturn(
1441 SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/,
1443 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
1444 SelectionDAG &DAG) const {
1445 assert(WebAssembly::canLowerReturn(Outs.size(), Subtarget) &&
1446 "MVP WebAssembly can only return up to one value");
1447 if (!callingConvSupported(CallConv))
1448 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
1449
1450 SmallVector<SDValue, 4> RetOps(1, Chain);
1451 RetOps.append(OutVals.begin(), OutVals.end());
1452 Chain = DAG.getNode(WebAssemblyISD::RETURN, DL, MVT::Other, RetOps);
1453
1454 // Record the number and types of the return values.
1455 for (const ISD::OutputArg &Out : Outs) {
1456 assert(!Out.Flags.isByVal() && "byval is not valid for return values");
1457 assert(!Out.Flags.isNest() && "nest is not valid for return values");
1458 assert(Out.IsFixed && "non-fixed return value is not valid");
1459 if (Out.Flags.isInAlloca())
1460 fail(DL, DAG, "WebAssembly hasn't implemented inalloca results");
1461 if (Out.Flags.isInConsecutiveRegs())
1462 fail(DL, DAG, "WebAssembly hasn't implemented cons regs results");
1464 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results");
1465 }
1466
1467 return Chain;
1468}
1469
1470SDValue WebAssemblyTargetLowering::LowerFormalArguments(
1471 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
1472 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1473 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1474 if (!callingConvSupported(CallConv))
1475 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
1476
1478 auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>();
1479
1480 // Set up the incoming ARGUMENTS value, which serves to represent the liveness
1481 // of the incoming values before they're represented by virtual registers.
1482 MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS);
1483
1484 bool HasSwiftErrorArg = false;
1485 bool HasSwiftSelfArg = false;
1486 for (const ISD::InputArg &In : Ins) {
1487 HasSwiftSelfArg |= In.Flags.isSwiftSelf();
1488 HasSwiftErrorArg |= In.Flags.isSwiftError();
1489 if (In.Flags.isInAlloca())
1490 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
1491 if (In.Flags.isNest())
1492 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
1493 if (In.Flags.isInConsecutiveRegs())
1494 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
1495 if (In.Flags.isInConsecutiveRegsLast())
1496 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
1497 // Ignore In.getNonZeroOrigAlign() because all our arguments are passed in
1498 // registers.
1499 InVals.push_back(In.Used ? DAG.getNode(WebAssemblyISD::ARGUMENT, DL, In.VT,
1500 DAG.getTargetConstant(InVals.size(),
1501 DL, MVT::i32))
1502 : DAG.getUNDEF(In.VT));
1503
1504 // Record the number and types of arguments.
1505 MFI->addParam(In.VT);
1506 }
1507
1508 // For swiftcc, emit additional swiftself and swifterror arguments
1509 // if there aren't. These additional arguments are also added for callee
1510 // signature They are necessary to match callee and caller signature for
1511 // indirect call.
1512 auto PtrVT = getPointerTy(MF.getDataLayout());
1513 if (CallConv == CallingConv::Swift) {
1514 if (!HasSwiftSelfArg) {
1515 MFI->addParam(PtrVT);
1516 }
1517 if (!HasSwiftErrorArg) {
1518 MFI->addParam(PtrVT);
1519 }
1520 }
1521 // Varargs are copied into a buffer allocated by the caller, and a pointer to
1522 // the buffer is passed as an argument.
1523 if (IsVarArg) {
1524 MVT PtrVT = getPointerTy(MF.getDataLayout());
1525 Register VarargVreg =
1527 MFI->setVarargBufferVreg(VarargVreg);
1528 Chain = DAG.getCopyToReg(
1529 Chain, DL, VarargVreg,
1530 DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT,
1531 DAG.getTargetConstant(Ins.size(), DL, MVT::i32)));
1532 MFI->addParam(PtrVT);
1533 }
1534
1535 // Record the number and types of arguments and results.
1536 SmallVector<MVT, 4> Params;
1539 MF.getFunction(), DAG.getTarget(), Params, Results);
1540 for (MVT VT : Results)
1541 MFI->addResult(VT);
1542 // TODO: Use signatures in WebAssemblyMachineFunctionInfo too and unify
1543 // the param logic here with ComputeSignatureVTs
1544 assert(MFI->getParams().size() == Params.size() &&
1545 std::equal(MFI->getParams().begin(), MFI->getParams().end(),
1546 Params.begin()));
1547
1548 return Chain;
1549}
1550
1551void WebAssemblyTargetLowering::ReplaceNodeResults(
1553 switch (N->getOpcode()) {
1555 // Do not add any results, signifying that N should not be custom lowered
1556 // after all. This happens because simd128 turns on custom lowering for
1557 // SIGN_EXTEND_INREG, but for non-vector sign extends the result might be an
1558 // illegal type.
1559 break;
1562 // Do not add any results, signifying that N should not be custom lowered.
1563 // EXTEND_VECTOR_INREG is implemented for some vectors, but not all.
1564 break;
1565 case ISD::ADD:
1566 case ISD::SUB:
1567 Results.push_back(Replace128Op(N, DAG));
1568 break;
1569 default:
1571 "ReplaceNodeResults not implemented for this op for WebAssembly!");
1572 }
1573}
1574
1575//===----------------------------------------------------------------------===//
1576// Custom lowering hooks.
1577//===----------------------------------------------------------------------===//
1578
1579SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op,
1580 SelectionDAG &DAG) const {
1581 SDLoc DL(Op);
1582 switch (Op.getOpcode()) {
1583 default:
1584 llvm_unreachable("unimplemented operation lowering");
1585 return SDValue();
1586 case ISD::FrameIndex:
1587 return LowerFrameIndex(Op, DAG);
1588 case ISD::GlobalAddress:
1589 return LowerGlobalAddress(Op, DAG);
1591 return LowerGlobalTLSAddress(Op, DAG);
1593 return LowerExternalSymbol(Op, DAG);
1594 case ISD::JumpTable:
1595 return LowerJumpTable(Op, DAG);
1596 case ISD::BR_JT:
1597 return LowerBR_JT(Op, DAG);
1598 case ISD::VASTART:
1599 return LowerVASTART(Op, DAG);
1600 case ISD::BlockAddress:
1601 case ISD::BRIND:
1602 fail(DL, DAG, "WebAssembly hasn't implemented computed gotos");
1603 return SDValue();
1604 case ISD::RETURNADDR:
1605 return LowerRETURNADDR(Op, DAG);
1606 case ISD::FRAMEADDR:
1607 return LowerFRAMEADDR(Op, DAG);
1608 case ISD::CopyToReg:
1609 return LowerCopyToReg(Op, DAG);
1612 return LowerAccessVectorElement(Op, DAG);
1616 return LowerIntrinsic(Op, DAG);
1618 return LowerSIGN_EXTEND_INREG(Op, DAG);
1621 return LowerEXTEND_VECTOR_INREG(Op, DAG);
1622 case ISD::BUILD_VECTOR:
1623 return LowerBUILD_VECTOR(Op, DAG);
1625 return LowerVECTOR_SHUFFLE(Op, DAG);
1626 case ISD::SETCC:
1627 return LowerSETCC(Op, DAG);
1628 case ISD::SHL:
1629 case ISD::SRA:
1630 case ISD::SRL:
1631 return LowerShift(Op, DAG);
1634 return LowerFP_TO_INT_SAT(Op, DAG);
1635 case ISD::LOAD:
1636 return LowerLoad(Op, DAG);
1637 case ISD::STORE:
1638 return LowerStore(Op, DAG);
1639 case ISD::CTPOP:
1640 case ISD::CTLZ:
1641 case ISD::CTTZ:
1642 return DAG.UnrollVectorOp(Op.getNode());
1643 case ISD::CLEAR_CACHE:
1644 report_fatal_error("llvm.clear_cache is not supported on wasm");
1645 case ISD::SMUL_LOHI:
1646 case ISD::UMUL_LOHI:
1647 return LowerMUL_LOHI(Op, DAG);
1648 }
1649}
1650
1652 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
1654
1655 return false;
1656}
1657
1658static std::optional<unsigned> IsWebAssemblyLocal(SDValue Op,
1659 SelectionDAG &DAG) {
1660 const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op);
1661 if (!FI)
1662 return std::nullopt;
1663
1664 auto &MF = DAG.getMachineFunction();
1666}
1667
1668SDValue WebAssemblyTargetLowering::LowerStore(SDValue Op,
1669 SelectionDAG &DAG) const {
1670 SDLoc DL(Op);
1671 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
1672 const SDValue &Value = SN->getValue();
1673 const SDValue &Base = SN->getBasePtr();
1674 const SDValue &Offset = SN->getOffset();
1675
1677 if (!Offset->isUndef())
1678 report_fatal_error("unexpected offset when storing to webassembly global",
1679 false);
1680
1681 SDVTList Tys = DAG.getVTList(MVT::Other);
1682 SDValue Ops[] = {SN->getChain(), Value, Base};
1683 return DAG.getMemIntrinsicNode(WebAssemblyISD::GLOBAL_SET, DL, Tys, Ops,
1684 SN->getMemoryVT(), SN->getMemOperand());
1685 }
1686
1687 if (std::optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) {
1688 if (!Offset->isUndef())
1689 report_fatal_error("unexpected offset when storing to webassembly local",
1690 false);
1691
1692 SDValue Idx = DAG.getTargetConstant(*Local, Base, MVT::i32);
1693 SDVTList Tys = DAG.getVTList(MVT::Other); // The chain.
1694 SDValue Ops[] = {SN->getChain(), Idx, Value};
1695 return DAG.getNode(WebAssemblyISD::LOCAL_SET, DL, Tys, Ops);
1696 }
1697
1700 "Encountered an unlowerable store to the wasm_var address space",
1701 false);
1702
1703 return Op;
1704}
1705
1706SDValue WebAssemblyTargetLowering::LowerLoad(SDValue Op,
1707 SelectionDAG &DAG) const {
1708 SDLoc DL(Op);
1709 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
1710 const SDValue &Base = LN->getBasePtr();
1711 const SDValue &Offset = LN->getOffset();
1712
1714 if (!Offset->isUndef())
1716 "unexpected offset when loading from webassembly global", false);
1717
1718 SDVTList Tys = DAG.getVTList(LN->getValueType(0), MVT::Other);
1719 SDValue Ops[] = {LN->getChain(), Base};
1720 return DAG.getMemIntrinsicNode(WebAssemblyISD::GLOBAL_GET, DL, Tys, Ops,
1721 LN->getMemoryVT(), LN->getMemOperand());
1722 }
1723
1724 if (std::optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) {
1725 if (!Offset->isUndef())
1727 "unexpected offset when loading from webassembly local", false);
1728
1729 SDValue Idx = DAG.getTargetConstant(*Local, Base, MVT::i32);
1730 EVT LocalVT = LN->getValueType(0);
1731 SDValue LocalGet = DAG.getNode(WebAssemblyISD::LOCAL_GET, DL, LocalVT,
1732 {LN->getChain(), Idx});
1733 SDValue Result = DAG.getMergeValues({LocalGet, LN->getChain()}, DL);
1734 assert(Result->getNumValues() == 2 && "Loads must carry a chain!");
1735 return Result;
1736 }
1737
1740 "Encountered an unlowerable load from the wasm_var address space",
1741 false);
1742
1743 return Op;
1744}
1745
1746SDValue WebAssemblyTargetLowering::LowerMUL_LOHI(SDValue Op,
1747 SelectionDAG &DAG) const {
1748 assert(Subtarget->hasWideArithmetic());
1749 assert(Op.getValueType() == MVT::i64);
1750 SDLoc DL(Op);
1751 unsigned Opcode;
1752 switch (Op.getOpcode()) {
1753 case ISD::UMUL_LOHI:
1754 Opcode = WebAssemblyISD::I64_MUL_WIDE_U;
1755 break;
1756 case ISD::SMUL_LOHI:
1757 Opcode = WebAssemblyISD::I64_MUL_WIDE_S;
1758 break;
1759 default:
1760 llvm_unreachable("unexpected opcode");
1761 }
1762 SDValue LHS = Op.getOperand(0);
1763 SDValue RHS = Op.getOperand(1);
1764 SDValue Hi =
1765 DAG.getNode(Opcode, DL, DAG.getVTList(MVT::i64, MVT::i64), LHS, RHS);
1766 SDValue Lo(Hi.getNode(), 1);
1767 SDValue Ops[] = {Hi, Lo};
1768 return DAG.getMergeValues(Ops, DL);
1769}
1770
1771SDValue WebAssemblyTargetLowering::Replace128Op(SDNode *N,
1772 SelectionDAG &DAG) const {
1773 assert(Subtarget->hasWideArithmetic());
1774 assert(N->getValueType(0) == MVT::i128);
1775 SDLoc DL(N);
1776 unsigned Opcode;
1777 switch (N->getOpcode()) {
1778 case ISD::ADD:
1779 Opcode = WebAssemblyISD::I64_ADD128;
1780 break;
1781 case ISD::SUB:
1782 Opcode = WebAssemblyISD::I64_SUB128;
1783 break;
1784 default:
1785 llvm_unreachable("unexpected opcode");
1786 }
1787 SDValue LHS = N->getOperand(0);
1788 SDValue RHS = N->getOperand(1);
1789
1790 SDValue C0 = DAG.getConstant(0, DL, MVT::i64);
1791 SDValue C1 = DAG.getConstant(1, DL, MVT::i64);
1792 SDValue LHS_0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, LHS, C0);
1793 SDValue LHS_1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, LHS, C1);
1794 SDValue RHS_0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, RHS, C0);
1795 SDValue RHS_1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, RHS, C1);
1796 SDValue Result_LO = DAG.getNode(Opcode, DL, DAG.getVTList(MVT::i64, MVT::i64),
1797 LHS_0, LHS_1, RHS_0, RHS_1);
1798 SDValue Result_HI(Result_LO.getNode(), 1);
1799 return DAG.getNode(ISD::BUILD_PAIR, DL, N->getVTList(), Result_LO, Result_HI);
1800}
1801
1802SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op,
1803 SelectionDAG &DAG) const {
1804 SDValue Src = Op.getOperand(2);
1805 if (isa<FrameIndexSDNode>(Src.getNode())) {
1806 // CopyToReg nodes don't support FrameIndex operands. Other targets select
1807 // the FI to some LEA-like instruction, but since we don't have that, we
1808 // need to insert some kind of instruction that can take an FI operand and
1809 // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy
1810 // local.copy between Op and its FI operand.
1811 SDValue Chain = Op.getOperand(0);
1812 SDLoc DL(Op);
1813 Register Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg();
1814 EVT VT = Src.getValueType();
1815 SDValue Copy(DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_I32
1816 : WebAssembly::COPY_I64,
1817 DL, VT, Src),
1818 0);
1819 return Op.getNode()->getNumValues() == 1
1820 ? DAG.getCopyToReg(Chain, DL, Reg, Copy)
1821 : DAG.getCopyToReg(Chain, DL, Reg, Copy,
1822 Op.getNumOperands() == 4 ? Op.getOperand(3)
1823 : SDValue());
1824 }
1825 return SDValue();
1826}
1827
1828SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op,
1829 SelectionDAG &DAG) const {
1830 int FI = cast<FrameIndexSDNode>(Op)->getIndex();
1831 return DAG.getTargetFrameIndex(FI, Op.getValueType());
1832}
1833
1834SDValue WebAssemblyTargetLowering::LowerRETURNADDR(SDValue Op,
1835 SelectionDAG &DAG) const {
1836 SDLoc DL(Op);
1837
1838 if (!Subtarget->getTargetTriple().isOSEmscripten()) {
1839 fail(DL, DAG,
1840 "Non-Emscripten WebAssembly hasn't implemented "
1841 "__builtin_return_address");
1842 return SDValue();
1843 }
1844
1846 return SDValue();
1847
1848 unsigned Depth = Op.getConstantOperandVal(0);
1849 MakeLibCallOptions CallOptions;
1850 return makeLibCall(DAG, RTLIB::RETURN_ADDRESS, Op.getValueType(),
1851 {DAG.getConstant(Depth, DL, MVT::i32)}, CallOptions, DL)
1852 .first;
1853}
1854
1855SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op,
1856 SelectionDAG &DAG) const {
1857 // Non-zero depths are not supported by WebAssembly currently. Use the
1858 // legalizer's default expansion, which is to return 0 (what this function is
1859 // documented to do).
1860 if (Op.getConstantOperandVal(0) > 0)
1861 return SDValue();
1862
1864 EVT VT = Op.getValueType();
1865 Register FP =
1867 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), FP, VT);
1868}
1869
1870SDValue
1871WebAssemblyTargetLowering::LowerGlobalTLSAddress(SDValue Op,
1872 SelectionDAG &DAG) const {
1873 SDLoc DL(Op);
1874 const auto *GA = cast<GlobalAddressSDNode>(Op);
1875
1878 report_fatal_error("cannot use thread-local storage without bulk memory",
1879 false);
1880
1881 const GlobalValue *GV = GA->getGlobal();
1882
1883 // Currently only Emscripten supports dynamic linking with threads. Therefore,
1884 // on other targets, if we have thread-local storage, only the local-exec
1885 // model is possible.
1886 auto model = Subtarget->getTargetTriple().isOSEmscripten()
1887 ? GV->getThreadLocalMode()
1889
1890 // Unsupported TLS modes
1893
1894 if (model == GlobalValue::LocalExecTLSModel ||
1897 getTargetMachine().shouldAssumeDSOLocal(GV))) {
1898 // For DSO-local TLS variables we use offset from __tls_base
1899
1900 MVT PtrVT = getPointerTy(DAG.getDataLayout());
1901 auto GlobalGet = PtrVT == MVT::i64 ? WebAssembly::GLOBAL_GET_I64
1902 : WebAssembly::GLOBAL_GET_I32;
1903 const char *BaseName = MF.createExternalSymbolName("__tls_base");
1904
1906 DAG.getMachineNode(GlobalGet, DL, PtrVT,
1907 DAG.getTargetExternalSymbol(BaseName, PtrVT)),
1908 0);
1909
1910 SDValue TLSOffset = DAG.getTargetGlobalAddress(
1911 GV, DL, PtrVT, GA->getOffset(), WebAssemblyII::MO_TLS_BASE_REL);
1912 SDValue SymOffset =
1913 DAG.getNode(WebAssemblyISD::WrapperREL, DL, PtrVT, TLSOffset);
1914
1915 return DAG.getNode(ISD::ADD, DL, PtrVT, BaseAddr, SymOffset);
1916 }
1917
1919
1920 EVT VT = Op.getValueType();
1921 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1922 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT,
1923 GA->getOffset(),
1925}
1926
1927SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op,
1928 SelectionDAG &DAG) const {
1929 SDLoc DL(Op);
1930 const auto *GA = cast<GlobalAddressSDNode>(Op);
1931 EVT VT = Op.getValueType();
1932 assert(GA->getTargetFlags() == 0 &&
1933 "Unexpected target flags on generic GlobalAddressSDNode");
1935 fail(DL, DAG, "Invalid address space for WebAssembly target");
1936
1937 unsigned OperandFlags = 0;
1938 const GlobalValue *GV = GA->getGlobal();
1939 // Since WebAssembly tables cannot yet be shared accross modules, we don't
1940 // need special treatment for tables in PIC mode.
1941 if (isPositionIndependent() &&
1943 if (getTargetMachine().shouldAssumeDSOLocal(GV)) {
1945 MVT PtrVT = getPointerTy(MF.getDataLayout());
1946 const char *BaseName;
1947 if (GV->getValueType()->isFunctionTy()) {
1948 BaseName = MF.createExternalSymbolName("__table_base");
1950 } else {
1951 BaseName = MF.createExternalSymbolName("__memory_base");
1953 }
1955 DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1956 DAG.getTargetExternalSymbol(BaseName, PtrVT));
1957
1958 SDValue SymAddr = DAG.getNode(
1959 WebAssemblyISD::WrapperREL, DL, VT,
1960 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset(),
1961 OperandFlags));
1962
1963 return DAG.getNode(ISD::ADD, DL, VT, BaseAddr, SymAddr);
1964 }
1966 }
1967
1968 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1969 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT,
1970 GA->getOffset(), OperandFlags));
1971}
1972
1973SDValue
1974WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op,
1975 SelectionDAG &DAG) const {
1976 SDLoc DL(Op);
1977 const auto *ES = cast<ExternalSymbolSDNode>(Op);
1978 EVT VT = Op.getValueType();
1979 assert(ES->getTargetFlags() == 0 &&
1980 "Unexpected target flags on generic ExternalSymbolSDNode");
1981 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1982 DAG.getTargetExternalSymbol(ES->getSymbol(), VT));
1983}
1984
1985SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op,
1986 SelectionDAG &DAG) const {
1987 // There's no need for a Wrapper node because we always incorporate a jump
1988 // table operand into a BR_TABLE instruction, rather than ever
1989 // materializing it in a register.
1990 const JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
1991 return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(),
1992 JT->getTargetFlags());
1993}
1994
1995SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op,
1996 SelectionDAG &DAG) const {
1997 SDLoc DL(Op);
1998 SDValue Chain = Op.getOperand(0);
1999 const auto *JT = cast<JumpTableSDNode>(Op.getOperand(1));
2000 SDValue Index = Op.getOperand(2);
2001 assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags");
2002
2004 Ops.push_back(Chain);
2005 Ops.push_back(Index);
2006
2008 const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs;
2009
2010 // Add an operand for each case.
2011 for (auto *MBB : MBBs)
2012 Ops.push_back(DAG.getBasicBlock(MBB));
2013
2014 // Add the first MBB as a dummy default target for now. This will be replaced
2015 // with the proper default target (and the preceding range check eliminated)
2016 // if possible by WebAssemblyFixBrTableDefaults.
2017 Ops.push_back(DAG.getBasicBlock(*MBBs.begin()));
2018 return DAG.getNode(WebAssemblyISD::BR_TABLE, DL, MVT::Other, Ops);
2019}
2020
2021SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op,
2022 SelectionDAG &DAG) const {
2023 SDLoc DL(Op);
2025
2027 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2028
2029 SDValue ArgN = DAG.getCopyFromReg(DAG.getEntryNode(), DL,
2030 MFI->getVarargBufferVreg(), PtrVT);
2031 return DAG.getStore(Op.getOperand(0), DL, ArgN, Op.getOperand(1),
2032 MachinePointerInfo(SV));
2033}
2034
2035SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
2036 SelectionDAG &DAG) const {
2038 unsigned IntNo;
2039 switch (Op.getOpcode()) {
2042 IntNo = Op.getConstantOperandVal(1);
2043 break;
2045 IntNo = Op.getConstantOperandVal(0);
2046 break;
2047 default:
2048 llvm_unreachable("Invalid intrinsic");
2049 }
2050 SDLoc DL(Op);
2051
2052 switch (IntNo) {
2053 default:
2054 return SDValue(); // Don't custom lower most intrinsics.
2055
2056 case Intrinsic::wasm_lsda: {
2057 auto PtrVT = getPointerTy(MF.getDataLayout());
2058 const char *SymName = MF.createExternalSymbolName(
2059 "GCC_except_table" + std::to_string(MF.getFunctionNumber()));
2060 if (isPositionIndependent()) {
2062 SymName, PtrVT, WebAssemblyII::MO_MEMORY_BASE_REL);
2063 const char *BaseName = MF.createExternalSymbolName("__memory_base");
2065 DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
2066 DAG.getTargetExternalSymbol(BaseName, PtrVT));
2067 SDValue SymAddr =
2068 DAG.getNode(WebAssemblyISD::WrapperREL, DL, PtrVT, Node);
2069 return DAG.getNode(ISD::ADD, DL, PtrVT, BaseAddr, SymAddr);
2070 }
2071 SDValue Node = DAG.getTargetExternalSymbol(SymName, PtrVT);
2072 return DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT, Node);
2073 }
2074
2075 case Intrinsic::wasm_shuffle: {
2076 // Drop in-chain and replace undefs, but otherwise pass through unchanged
2077 SDValue Ops[18];
2078 size_t OpIdx = 0;
2079 Ops[OpIdx++] = Op.getOperand(1);
2080 Ops[OpIdx++] = Op.getOperand(2);
2081 while (OpIdx < 18) {
2082 const SDValue &MaskIdx = Op.getOperand(OpIdx + 1);
2083 if (MaskIdx.isUndef() || MaskIdx.getNode()->getAsZExtVal() >= 32) {
2084 bool isTarget = MaskIdx.getNode()->getOpcode() == ISD::TargetConstant;
2085 Ops[OpIdx++] = DAG.getConstant(0, DL, MVT::i32, isTarget);
2086 } else {
2087 Ops[OpIdx++] = MaskIdx;
2088 }
2089 }
2090 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
2091 }
2092
2093 case Intrinsic::thread_pointer: {
2094 MVT PtrVT = getPointerTy(DAG.getDataLayout());
2095 auto GlobalGet = PtrVT == MVT::i64 ? WebAssembly::GLOBAL_GET_I64
2096 : WebAssembly::GLOBAL_GET_I32;
2097 const char *TlsBase = MF.createExternalSymbolName("__tls_base");
2098 return SDValue(
2099 DAG.getMachineNode(GlobalGet, DL, PtrVT,
2100 DAG.getTargetExternalSymbol(TlsBase, PtrVT)),
2101 0);
2102 }
2103 }
2104}
2105
2106SDValue
2107WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
2108 SelectionDAG &DAG) const {
2109 SDLoc DL(Op);
2110 // If sign extension operations are disabled, allow sext_inreg only if operand
2111 // is a vector extract of an i8 or i16 lane. SIMD does not depend on sign
2112 // extension operations, but allowing sext_inreg in this context lets us have
2113 // simple patterns to select extract_lane_s instructions. Expanding sext_inreg
2114 // everywhere would be simpler in this file, but would necessitate large and
2115 // brittle patterns to undo the expansion and select extract_lane_s
2116 // instructions.
2117 assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128());
2118 if (Op.getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT)
2119 return SDValue();
2120
2121 const SDValue &Extract = Op.getOperand(0);
2122 MVT VecT = Extract.getOperand(0).getSimpleValueType();
2123 if (VecT.getVectorElementType().getSizeInBits() > 32)
2124 return SDValue();
2125 MVT ExtractedLaneT =
2126 cast<VTSDNode>(Op.getOperand(1).getNode())->getVT().getSimpleVT();
2127 MVT ExtractedVecT =
2128 MVT::getVectorVT(ExtractedLaneT, 128 / ExtractedLaneT.getSizeInBits());
2129 if (ExtractedVecT == VecT)
2130 return Op;
2131
2132 // Bitcast vector to appropriate type to ensure ISel pattern coverage
2133 const SDNode *Index = Extract.getOperand(1).getNode();
2134 if (!isa<ConstantSDNode>(Index))
2135 return SDValue();
2136 unsigned IndexVal = Index->getAsZExtVal();
2137 unsigned Scale =
2138 ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements();
2139 assert(Scale > 1);
2140 SDValue NewIndex =
2141 DAG.getConstant(IndexVal * Scale, DL, Index->getValueType(0));
2142 SDValue NewExtract = DAG.getNode(
2144 DAG.getBitcast(ExtractedVecT, Extract.getOperand(0)), NewIndex);
2145 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Op.getValueType(), NewExtract,
2146 Op.getOperand(1));
2147}
2148
2149SDValue
2150WebAssemblyTargetLowering::LowerEXTEND_VECTOR_INREG(SDValue Op,
2151 SelectionDAG &DAG) const {
2152 SDLoc DL(Op);
2153 EVT VT = Op.getValueType();
2154 SDValue Src = Op.getOperand(0);
2155 EVT SrcVT = Src.getValueType();
2156
2157 if (SrcVT.getVectorElementType() == MVT::i1 ||
2158 SrcVT.getVectorElementType() == MVT::i64)
2159 return SDValue();
2160
2161 assert(VT.getScalarSizeInBits() % SrcVT.getScalarSizeInBits() == 0 &&
2162 "Unexpected extension factor.");
2163 unsigned Scale = VT.getScalarSizeInBits() / SrcVT.getScalarSizeInBits();
2164
2165 if (Scale != 2 && Scale != 4 && Scale != 8)
2166 return SDValue();
2167
2168 unsigned Ext;
2169 switch (Op.getOpcode()) {
2171 Ext = WebAssemblyISD::EXTEND_LOW_U;
2172 break;
2174 Ext = WebAssemblyISD::EXTEND_LOW_S;
2175 break;
2176 }
2177
2178 SDValue Ret = Src;
2179 while (Scale != 1) {
2180 Ret = DAG.getNode(Ext, DL,
2181 Ret.getValueType()
2182 .widenIntegerVectorElementType(*DAG.getContext())
2183 .getHalfNumVectorElementsVT(*DAG.getContext()),
2184 Ret);
2185 Scale /= 2;
2186 }
2187 assert(Ret.getValueType() == VT);
2188 return Ret;
2189}
2190
2192 SDLoc DL(Op);
2193 if (Op.getValueType() != MVT::v2f64)
2194 return SDValue();
2195
2196 auto GetConvertedLane = [](SDValue Op, unsigned &Opcode, SDValue &SrcVec,
2197 unsigned &Index) -> bool {
2198 switch (Op.getOpcode()) {
2199 case ISD::SINT_TO_FP:
2200 Opcode = WebAssemblyISD::CONVERT_LOW_S;
2201 break;
2202 case ISD::UINT_TO_FP:
2203 Opcode = WebAssemblyISD::CONVERT_LOW_U;
2204 break;
2205 case ISD::FP_EXTEND:
2206 Opcode = WebAssemblyISD::PROMOTE_LOW;
2207 break;
2208 default:
2209 return false;
2210 }
2211
2212 auto ExtractVector = Op.getOperand(0);
2213 if (ExtractVector.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
2214 return false;
2215
2216 if (!isa<ConstantSDNode>(ExtractVector.getOperand(1).getNode()))
2217 return false;
2218
2219 SrcVec = ExtractVector.getOperand(0);
2220 Index = ExtractVector.getConstantOperandVal(1);
2221 return true;
2222 };
2223
2224 unsigned LHSOpcode, RHSOpcode, LHSIndex, RHSIndex;
2225 SDValue LHSSrcVec, RHSSrcVec;
2226 if (!GetConvertedLane(Op.getOperand(0), LHSOpcode, LHSSrcVec, LHSIndex) ||
2227 !GetConvertedLane(Op.getOperand(1), RHSOpcode, RHSSrcVec, RHSIndex))
2228 return SDValue();
2229
2230 if (LHSOpcode != RHSOpcode)
2231 return SDValue();
2232
2233 MVT ExpectedSrcVT;
2234 switch (LHSOpcode) {
2235 case WebAssemblyISD::CONVERT_LOW_S:
2236 case WebAssemblyISD::CONVERT_LOW_U:
2237 ExpectedSrcVT = MVT::v4i32;
2238 break;
2239 case WebAssemblyISD::PROMOTE_LOW:
2240 ExpectedSrcVT = MVT::v4f32;
2241 break;
2242 }
2243 if (LHSSrcVec.getValueType() != ExpectedSrcVT)
2244 return SDValue();
2245
2246 auto Src = LHSSrcVec;
2247 if (LHSIndex != 0 || RHSIndex != 1 || LHSSrcVec != RHSSrcVec) {
2248 // Shuffle the source vector so that the converted lanes are the low lanes.
2249 Src = DAG.getVectorShuffle(
2250 ExpectedSrcVT, DL, LHSSrcVec, RHSSrcVec,
2251 {static_cast<int>(LHSIndex), static_cast<int>(RHSIndex) + 4, -1, -1});
2252 }
2253 return DAG.getNode(LHSOpcode, DL, MVT::v2f64, Src);
2254}
2255
2256SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op,
2257 SelectionDAG &DAG) const {
2258 MVT VT = Op.getSimpleValueType();
2259 if (VT == MVT::v8f16) {
2260 // BUILD_VECTOR can't handle FP16 operands since Wasm doesn't have a scaler
2261 // FP16 type, so cast them to I16s.
2262 MVT IVT = VT.changeVectorElementType(MVT::i16);
2264 for (unsigned I = 0, E = Op.getNumOperands(); I < E; ++I)
2265 NewOps.push_back(DAG.getBitcast(MVT::i16, Op.getOperand(I)));
2266 SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, SDLoc(), IVT, NewOps);
2267 return DAG.getBitcast(VT, Res);
2268 }
2269
2270 if (auto ConvertLow = LowerConvertLow(Op, DAG))
2271 return ConvertLow;
2272
2273 SDLoc DL(Op);
2274 const EVT VecT = Op.getValueType();
2275 const EVT LaneT = Op.getOperand(0).getValueType();
2276 const size_t Lanes = Op.getNumOperands();
2277 bool CanSwizzle = VecT == MVT::v16i8;
2278
2279 // BUILD_VECTORs are lowered to the instruction that initializes the highest
2280 // possible number of lanes at once followed by a sequence of replace_lane
2281 // instructions to individually initialize any remaining lanes.
2282
2283 // TODO: Tune this. For example, lanewise swizzling is very expensive, so
2284 // swizzled lanes should be given greater weight.
2285
2286 // TODO: Investigate looping rather than always extracting/replacing specific
2287 // lanes to fill gaps.
2288
2289 auto IsConstant = [](const SDValue &V) {
2290 return V.getOpcode() == ISD::Constant || V.getOpcode() == ISD::ConstantFP;
2291 };
2292
2293 // Returns the source vector and index vector pair if they exist. Checks for:
2294 // (extract_vector_elt
2295 // $src,
2296 // (sign_extend_inreg (extract_vector_elt $indices, $i))
2297 // )
2298 auto GetSwizzleSrcs = [](size_t I, const SDValue &Lane) {
2299 auto Bail = std::make_pair(SDValue(), SDValue());
2300 if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
2301 return Bail;
2302 const SDValue &SwizzleSrc = Lane->getOperand(0);
2303 const SDValue &IndexExt = Lane->getOperand(1);
2304 if (IndexExt->getOpcode() != ISD::SIGN_EXTEND_INREG)
2305 return Bail;
2306 const SDValue &Index = IndexExt->getOperand(0);
2307 if (Index->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
2308 return Bail;
2309 const SDValue &SwizzleIndices = Index->getOperand(0);
2310 if (SwizzleSrc.getValueType() != MVT::v16i8 ||
2311 SwizzleIndices.getValueType() != MVT::v16i8 ||
2312 Index->getOperand(1)->getOpcode() != ISD::Constant ||
2313 Index->getConstantOperandVal(1) != I)
2314 return Bail;
2315 return std::make_pair(SwizzleSrc, SwizzleIndices);
2316 };
2317
2318 // If the lane is extracted from another vector at a constant index, return
2319 // that vector. The source vector must not have more lanes than the dest
2320 // because the shufflevector indices are in terms of the destination lanes and
2321 // would not be able to address the smaller individual source lanes.
2322 auto GetShuffleSrc = [&](const SDValue &Lane) {
2323 if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
2324 return SDValue();
2325 if (!isa<ConstantSDNode>(Lane->getOperand(1).getNode()))
2326 return SDValue();
2327 if (Lane->getOperand(0).getValueType().getVectorNumElements() >
2328 VecT.getVectorNumElements())
2329 return SDValue();
2330 return Lane->getOperand(0);
2331 };
2332
2333 using ValueEntry = std::pair<SDValue, size_t>;
2334 SmallVector<ValueEntry, 16> SplatValueCounts;
2335
2336 using SwizzleEntry = std::pair<std::pair<SDValue, SDValue>, size_t>;
2337 SmallVector<SwizzleEntry, 16> SwizzleCounts;
2338
2339 using ShuffleEntry = std::pair<SDValue, size_t>;
2340 SmallVector<ShuffleEntry, 16> ShuffleCounts;
2341
2342 auto AddCount = [](auto &Counts, const auto &Val) {
2343 auto CountIt =
2344 llvm::find_if(Counts, [&Val](auto E) { return E.first == Val; });
2345 if (CountIt == Counts.end()) {
2346 Counts.emplace_back(Val, 1);
2347 } else {
2348 CountIt->second++;
2349 }
2350 };
2351
2352 auto GetMostCommon = [](auto &Counts) {
2353 auto CommonIt =
2354 std::max_element(Counts.begin(), Counts.end(), llvm::less_second());
2355 assert(CommonIt != Counts.end() && "Unexpected all-undef build_vector");
2356 return *CommonIt;
2357 };
2358
2359 size_t NumConstantLanes = 0;
2360
2361 // Count eligible lanes for each type of vector creation op
2362 for (size_t I = 0; I < Lanes; ++I) {
2363 const SDValue &Lane = Op->getOperand(I);
2364 if (Lane.isUndef())
2365 continue;
2366
2367 AddCount(SplatValueCounts, Lane);
2368
2369 if (IsConstant(Lane))
2370 NumConstantLanes++;
2371 if (auto ShuffleSrc = GetShuffleSrc(Lane))
2372 AddCount(ShuffleCounts, ShuffleSrc);
2373 if (CanSwizzle) {
2374 auto SwizzleSrcs = GetSwizzleSrcs(I, Lane);
2375 if (SwizzleSrcs.first)
2376 AddCount(SwizzleCounts, SwizzleSrcs);
2377 }
2378 }
2379
2380 SDValue SplatValue;
2381 size_t NumSplatLanes;
2382 std::tie(SplatValue, NumSplatLanes) = GetMostCommon(SplatValueCounts);
2383
2384 SDValue SwizzleSrc;
2385 SDValue SwizzleIndices;
2386 size_t NumSwizzleLanes = 0;
2387 if (SwizzleCounts.size())
2388 std::forward_as_tuple(std::tie(SwizzleSrc, SwizzleIndices),
2389 NumSwizzleLanes) = GetMostCommon(SwizzleCounts);
2390
2391 // Shuffles can draw from up to two vectors, so find the two most common
2392 // sources.
2393 SDValue ShuffleSrc1, ShuffleSrc2;
2394 size_t NumShuffleLanes = 0;
2395 if (ShuffleCounts.size()) {
2396 std::tie(ShuffleSrc1, NumShuffleLanes) = GetMostCommon(ShuffleCounts);
2397 llvm::erase_if(ShuffleCounts,
2398 [&](const auto &Pair) { return Pair.first == ShuffleSrc1; });
2399 }
2400 if (ShuffleCounts.size()) {
2401 size_t AdditionalShuffleLanes;
2402 std::tie(ShuffleSrc2, AdditionalShuffleLanes) =
2403 GetMostCommon(ShuffleCounts);
2404 NumShuffleLanes += AdditionalShuffleLanes;
2405 }
2406
2407 // Predicate returning true if the lane is properly initialized by the
2408 // original instruction
2409 std::function<bool(size_t, const SDValue &)> IsLaneConstructed;
2411 // Prefer swizzles over shuffles over vector consts over splats
2412 if (NumSwizzleLanes >= NumShuffleLanes &&
2413 NumSwizzleLanes >= NumConstantLanes && NumSwizzleLanes >= NumSplatLanes) {
2414 Result = DAG.getNode(WebAssemblyISD::SWIZZLE, DL, VecT, SwizzleSrc,
2415 SwizzleIndices);
2416 auto Swizzled = std::make_pair(SwizzleSrc, SwizzleIndices);
2417 IsLaneConstructed = [&, Swizzled](size_t I, const SDValue &Lane) {
2418 return Swizzled == GetSwizzleSrcs(I, Lane);
2419 };
2420 } else if (NumShuffleLanes >= NumConstantLanes &&
2421 NumShuffleLanes >= NumSplatLanes) {
2422 size_t DestLaneSize = VecT.getVectorElementType().getFixedSizeInBits() / 8;
2423 size_t DestLaneCount = VecT.getVectorNumElements();
2424 size_t Scale1 = 1;
2425 size_t Scale2 = 1;
2426 SDValue Src1 = ShuffleSrc1;
2427 SDValue Src2 = ShuffleSrc2 ? ShuffleSrc2 : DAG.getUNDEF(VecT);
2428 if (Src1.getValueType() != VecT) {
2429 size_t LaneSize =
2431 assert(LaneSize > DestLaneSize);
2432 Scale1 = LaneSize / DestLaneSize;
2433 Src1 = DAG.getBitcast(VecT, Src1);
2434 }
2435 if (Src2.getValueType() != VecT) {
2436 size_t LaneSize =
2438 assert(LaneSize > DestLaneSize);
2439 Scale2 = LaneSize / DestLaneSize;
2440 Src2 = DAG.getBitcast(VecT, Src2);
2441 }
2442
2443 int Mask[16];
2444 assert(DestLaneCount <= 16);
2445 for (size_t I = 0; I < DestLaneCount; ++I) {
2446 const SDValue &Lane = Op->getOperand(I);
2447 SDValue Src = GetShuffleSrc(Lane);
2448 if (Src == ShuffleSrc1) {
2449 Mask[I] = Lane->getConstantOperandVal(1) * Scale1;
2450 } else if (Src && Src == ShuffleSrc2) {
2451 Mask[I] = DestLaneCount + Lane->getConstantOperandVal(1) * Scale2;
2452 } else {
2453 Mask[I] = -1;
2454 }
2455 }
2456 ArrayRef<int> MaskRef(Mask, DestLaneCount);
2457 Result = DAG.getVectorShuffle(VecT, DL, Src1, Src2, MaskRef);
2458 IsLaneConstructed = [&](size_t, const SDValue &Lane) {
2459 auto Src = GetShuffleSrc(Lane);
2460 return Src == ShuffleSrc1 || (Src && Src == ShuffleSrc2);
2461 };
2462 } else if (NumConstantLanes >= NumSplatLanes) {
2463 SmallVector<SDValue, 16> ConstLanes;
2464 for (const SDValue &Lane : Op->op_values()) {
2465 if (IsConstant(Lane)) {
2466 // Values may need to be fixed so that they will sign extend to be
2467 // within the expected range during ISel. Check whether the value is in
2468 // bounds based on the lane bit width and if it is out of bounds, lop
2469 // off the extra bits and subtract 2^n to reflect giving the high bit
2470 // value -2^(n-1) rather than +2^(n-1). Skip the i64 case because it
2471 // cannot possibly be out of range.
2472 auto *Const = dyn_cast<ConstantSDNode>(Lane.getNode());
2473 int64_t Val = Const ? Const->getSExtValue() : 0;
2474 uint64_t LaneBits = 128 / Lanes;
2475 assert((LaneBits == 64 || Val >= -(1ll << (LaneBits - 1))) &&
2476 "Unexpected out of bounds negative value");
2477 if (Const && LaneBits != 64 && Val > (1ll << (LaneBits - 1)) - 1) {
2478 uint64_t Mask = (1ll << LaneBits) - 1;
2479 auto NewVal = (((uint64_t)Val & Mask) - (1ll << LaneBits)) & Mask;
2480 ConstLanes.push_back(DAG.getConstant(NewVal, SDLoc(Lane), LaneT));
2481 } else {
2482 ConstLanes.push_back(Lane);
2483 }
2484 } else if (LaneT.isFloatingPoint()) {
2485 ConstLanes.push_back(DAG.getConstantFP(0, DL, LaneT));
2486 } else {
2487 ConstLanes.push_back(DAG.getConstant(0, DL, LaneT));
2488 }
2489 }
2490 Result = DAG.getBuildVector(VecT, DL, ConstLanes);
2491 IsLaneConstructed = [&IsConstant](size_t _, const SDValue &Lane) {
2492 return IsConstant(Lane);
2493 };
2494 } else {
2495 size_t DestLaneSize = VecT.getVectorElementType().getFixedSizeInBits();
2496 if (NumSplatLanes == 1 && Op->getOperand(0) == SplatValue &&
2497 (DestLaneSize == 32 || DestLaneSize == 64)) {
2498 // Could be selected to load_zero.
2499 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecT, SplatValue);
2500 } else {
2501 // Use a splat (which might be selected as a load splat)
2502 Result = DAG.getSplatBuildVector(VecT, DL, SplatValue);
2503 }
2504 IsLaneConstructed = [&SplatValue](size_t _, const SDValue &Lane) {
2505 return Lane == SplatValue;
2506 };
2507 }
2508
2509 assert(Result);
2510 assert(IsLaneConstructed);
2511
2512 // Add replace_lane instructions for any unhandled values
2513 for (size_t I = 0; I < Lanes; ++I) {
2514 const SDValue &Lane = Op->getOperand(I);
2515 if (!Lane.isUndef() && !IsLaneConstructed(I, Lane))
2516 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane,
2517 DAG.getConstant(I, DL, MVT::i32));
2518 }
2519
2520 return Result;
2521}
2522
2523SDValue
2524WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
2525 SelectionDAG &DAG) const {
2526 SDLoc DL(Op);
2527 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op.getNode())->getMask();
2528 MVT VecType = Op.getOperand(0).getSimpleValueType();
2529 assert(VecType.is128BitVector() && "Unexpected shuffle vector type");
2530 size_t LaneBytes = VecType.getVectorElementType().getSizeInBits() / 8;
2531
2532 // Space for two vector args and sixteen mask indices
2533 SDValue Ops[18];
2534 size_t OpIdx = 0;
2535 Ops[OpIdx++] = Op.getOperand(0);
2536 Ops[OpIdx++] = Op.getOperand(1);
2537
2538 // Expand mask indices to byte indices and materialize them as operands
2539 for (int M : Mask) {
2540 for (size_t J = 0; J < LaneBytes; ++J) {
2541 // Lower undefs (represented by -1 in mask) to {0..J}, which use a
2542 // whole lane of vector input, to allow further reduction at VM. E.g.
2543 // match an 8x16 byte shuffle to an equivalent cheaper 32x4 shuffle.
2544 uint64_t ByteIndex = M == -1 ? J : (uint64_t)M * LaneBytes + J;
2545 Ops[OpIdx++] = DAG.getConstant(ByteIndex, DL, MVT::i32);
2546 }
2547 }
2548
2549 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
2550}
2551
2552SDValue WebAssemblyTargetLowering::LowerSETCC(SDValue Op,
2553 SelectionDAG &DAG) const {
2554 SDLoc DL(Op);
2555 // The legalizer does not know how to expand the unsupported comparison modes
2556 // of i64x2 vectors, so we manually unroll them here.
2557 assert(Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64);
2559 DAG.ExtractVectorElements(Op->getOperand(0), LHS);
2560 DAG.ExtractVectorElements(Op->getOperand(1), RHS);
2561 const SDValue &CC = Op->getOperand(2);
2562 auto MakeLane = [&](unsigned I) {
2563 return DAG.getNode(ISD::SELECT_CC, DL, MVT::i64, LHS[I], RHS[I],
2564 DAG.getConstant(uint64_t(-1), DL, MVT::i64),
2565 DAG.getConstant(uint64_t(0), DL, MVT::i64), CC);
2566 };
2567 return DAG.getBuildVector(Op->getValueType(0), DL,
2568 {MakeLane(0), MakeLane(1)});
2569}
2570
2571SDValue
2572WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op,
2573 SelectionDAG &DAG) const {
2574 // Allow constant lane indices, expand variable lane indices
2575 SDNode *IdxNode = Op.getOperand(Op.getNumOperands() - 1).getNode();
2576 if (isa<ConstantSDNode>(IdxNode)) {
2577 // Ensure the index type is i32 to match the tablegen patterns
2578 uint64_t Idx = IdxNode->getAsZExtVal();
2579 SmallVector<SDValue, 3> Ops(Op.getNode()->ops());
2580 Ops[Op.getNumOperands() - 1] =
2581 DAG.getConstant(Idx, SDLoc(IdxNode), MVT::i32);
2582 return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(), Ops);
2583 }
2584 // Perform default expansion
2585 return SDValue();
2586}
2587
2589 EVT LaneT = Op.getSimpleValueType().getVectorElementType();
2590 // 32-bit and 64-bit unrolled shifts will have proper semantics
2591 if (LaneT.bitsGE(MVT::i32))
2592 return DAG.UnrollVectorOp(Op.getNode());
2593 // Otherwise mask the shift value to get proper semantics from 32-bit shift
2594 SDLoc DL(Op);
2595 size_t NumLanes = Op.getSimpleValueType().getVectorNumElements();
2596 SDValue Mask = DAG.getConstant(LaneT.getSizeInBits() - 1, DL, MVT::i32);
2597 unsigned ShiftOpcode = Op.getOpcode();
2598 SmallVector<SDValue, 16> ShiftedElements;
2599 DAG.ExtractVectorElements(Op.getOperand(0), ShiftedElements, 0, 0, MVT::i32);
2600 SmallVector<SDValue, 16> ShiftElements;
2601 DAG.ExtractVectorElements(Op.getOperand(1), ShiftElements, 0, 0, MVT::i32);
2602 SmallVector<SDValue, 16> UnrolledOps;
2603 for (size_t i = 0; i < NumLanes; ++i) {
2604 SDValue MaskedShiftValue =
2605 DAG.getNode(ISD::AND, DL, MVT::i32, ShiftElements[i], Mask);
2606 SDValue ShiftedValue = ShiftedElements[i];
2607 if (ShiftOpcode == ISD::SRA)
2608 ShiftedValue = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32,
2609 ShiftedValue, DAG.getValueType(LaneT));
2610 UnrolledOps.push_back(
2611 DAG.getNode(ShiftOpcode, DL, MVT::i32, ShiftedValue, MaskedShiftValue));
2612 }
2613 return DAG.getBuildVector(Op.getValueType(), DL, UnrolledOps);
2614}
2615
2616SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op,
2617 SelectionDAG &DAG) const {
2618 SDLoc DL(Op);
2619
2620 // Only manually lower vector shifts
2621 assert(Op.getSimpleValueType().isVector());
2622
2623 uint64_t LaneBits = Op.getValueType().getScalarSizeInBits();
2624 auto ShiftVal = Op.getOperand(1);
2625
2626 // Try to skip bitmask operation since it is implied inside shift instruction
2627 auto SkipImpliedMask = [](SDValue MaskOp, uint64_t MaskBits) {
2628 if (MaskOp.getOpcode() != ISD::AND)
2629 return MaskOp;
2630 SDValue LHS = MaskOp.getOperand(0);
2631 SDValue RHS = MaskOp.getOperand(1);
2632 if (MaskOp.getValueType().isVector()) {
2633 APInt MaskVal;
2634 if (!ISD::isConstantSplatVector(RHS.getNode(), MaskVal))
2635 std::swap(LHS, RHS);
2636
2637 if (ISD::isConstantSplatVector(RHS.getNode(), MaskVal) &&
2638 MaskVal == MaskBits)
2639 MaskOp = LHS;
2640 } else {
2641 if (!isa<ConstantSDNode>(RHS.getNode()))
2642 std::swap(LHS, RHS);
2643
2644 auto ConstantRHS = dyn_cast<ConstantSDNode>(RHS.getNode());
2645 if (ConstantRHS && ConstantRHS->getAPIntValue() == MaskBits)
2646 MaskOp = LHS;
2647 }
2648
2649 return MaskOp;
2650 };
2651
2652 // Skip vector and operation
2653 ShiftVal = SkipImpliedMask(ShiftVal, LaneBits - 1);
2654 ShiftVal = DAG.getSplatValue(ShiftVal);
2655 if (!ShiftVal)
2656 return unrollVectorShift(Op, DAG);
2657
2658 // Skip scalar and operation
2659 ShiftVal = SkipImpliedMask(ShiftVal, LaneBits - 1);
2660 // Use anyext because none of the high bits can affect the shift
2661 ShiftVal = DAG.getAnyExtOrTrunc(ShiftVal, DL, MVT::i32);
2662
2663 unsigned Opcode;
2664 switch (Op.getOpcode()) {
2665 case ISD::SHL:
2666 Opcode = WebAssemblyISD::VEC_SHL;
2667 break;
2668 case ISD::SRA:
2669 Opcode = WebAssemblyISD::VEC_SHR_S;
2670 break;
2671 case ISD::SRL:
2672 Opcode = WebAssemblyISD::VEC_SHR_U;
2673 break;
2674 default:
2675 llvm_unreachable("unexpected opcode");
2676 }
2677
2678 return DAG.getNode(Opcode, DL, Op.getValueType(), Op.getOperand(0), ShiftVal);
2679}
2680
2681SDValue WebAssemblyTargetLowering::LowerFP_TO_INT_SAT(SDValue Op,
2682 SelectionDAG &DAG) const {
2683 SDLoc DL(Op);
2684 EVT ResT = Op.getValueType();
2685 EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2686
2687 if ((ResT == MVT::i32 || ResT == MVT::i64) &&
2688 (SatVT == MVT::i32 || SatVT == MVT::i64))
2689 return Op;
2690
2691 if (ResT == MVT::v4i32 && SatVT == MVT::i32)
2692 return Op;
2693
2694 if (ResT == MVT::v8i16 && SatVT == MVT::i16)
2695 return Op;
2696
2697 return SDValue();
2698}
2699
2700//===----------------------------------------------------------------------===//
2701// Custom DAG combine hooks
2702//===----------------------------------------------------------------------===//
2703static SDValue
2705 auto &DAG = DCI.DAG;
2706 auto Shuffle = cast<ShuffleVectorSDNode>(N);
2707
2708 // Hoist vector bitcasts that don't change the number of lanes out of unary
2709 // shuffles, where they are less likely to get in the way of other combines.
2710 // (shuffle (vNxT1 (bitcast (vNxT0 x))), undef, mask) ->
2711 // (vNxT1 (bitcast (vNxT0 (shuffle x, undef, mask))))
2712 SDValue Bitcast = N->getOperand(0);
2713 if (Bitcast.getOpcode() != ISD::BITCAST)
2714 return SDValue();
2715 if (!N->getOperand(1).isUndef())
2716 return SDValue();
2717 SDValue CastOp = Bitcast.getOperand(0);
2718 EVT SrcType = CastOp.getValueType();
2719 EVT DstType = Bitcast.getValueType();
2720 if (!SrcType.is128BitVector() ||
2721 SrcType.getVectorNumElements() != DstType.getVectorNumElements())
2722 return SDValue();
2723 SDValue NewShuffle = DAG.getVectorShuffle(
2724 SrcType, SDLoc(N), CastOp, DAG.getUNDEF(SrcType), Shuffle->getMask());
2725 return DAG.getBitcast(DstType, NewShuffle);
2726}
2727
2728/// Convert ({u,s}itofp vec) --> ({u,s}itofp ({s,z}ext vec)) so it doesn't get
2729/// split up into scalar instructions during legalization, and the vector
2730/// extending instructions are selected in performVectorExtendCombine below.
2731static SDValue
2734 auto &DAG = DCI.DAG;
2735 assert(N->getOpcode() == ISD::UINT_TO_FP ||
2736 N->getOpcode() == ISD::SINT_TO_FP);
2737
2738 EVT InVT = N->getOperand(0)->getValueType(0);
2739 EVT ResVT = N->getValueType(0);
2740 MVT ExtVT;
2741 if (ResVT == MVT::v4f32 && (InVT == MVT::v4i16 || InVT == MVT::v4i8))
2742 ExtVT = MVT::v4i32;
2743 else if (ResVT == MVT::v2f64 && (InVT == MVT::v2i16 || InVT == MVT::v2i8))
2744 ExtVT = MVT::v2i32;
2745 else
2746 return SDValue();
2747
2748 unsigned Op =
2750 SDValue Conv = DAG.getNode(Op, SDLoc(N), ExtVT, N->getOperand(0));
2751 return DAG.getNode(N->getOpcode(), SDLoc(N), ResVT, Conv);
2752}
2753
2754static SDValue
2756 auto &DAG = DCI.DAG;
2757 assert(N->getOpcode() == ISD::SIGN_EXTEND ||
2758 N->getOpcode() == ISD::ZERO_EXTEND);
2759
2760 // Combine ({s,z}ext (extract_subvector src, i)) into a widening operation if
2761 // possible before the extract_subvector can be expanded.
2762 auto Extract = N->getOperand(0);
2763 if (Extract.getOpcode() != ISD::EXTRACT_SUBVECTOR)
2764 return SDValue();
2765 auto Source = Extract.getOperand(0);
2766 auto *IndexNode = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
2767 if (IndexNode == nullptr)
2768 return SDValue();
2769 auto Index = IndexNode->getZExtValue();
2770
2771 // Only v8i8, v4i16, and v2i32 extracts can be widened, and only if the
2772 // extracted subvector is the low or high half of its source.
2773 EVT ResVT = N->getValueType(0);
2774 if (ResVT == MVT::v8i16) {
2775 if (Extract.getValueType() != MVT::v8i8 ||
2776 Source.getValueType() != MVT::v16i8 || (Index != 0 && Index != 8))
2777 return SDValue();
2778 } else if (ResVT == MVT::v4i32) {
2779 if (Extract.getValueType() != MVT::v4i16 ||
2780 Source.getValueType() != MVT::v8i16 || (Index != 0 && Index != 4))
2781 return SDValue();
2782 } else if (ResVT == MVT::v2i64) {
2783 if (Extract.getValueType() != MVT::v2i32 ||
2784 Source.getValueType() != MVT::v4i32 || (Index != 0 && Index != 2))
2785 return SDValue();
2786 } else {
2787 return SDValue();
2788 }
2789
2790 bool IsSext = N->getOpcode() == ISD::SIGN_EXTEND;
2791 bool IsLow = Index == 0;
2792
2793 unsigned Op = IsSext ? (IsLow ? WebAssemblyISD::EXTEND_LOW_S
2794 : WebAssemblyISD::EXTEND_HIGH_S)
2795 : (IsLow ? WebAssemblyISD::EXTEND_LOW_U
2796 : WebAssemblyISD::EXTEND_HIGH_U);
2797
2798 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2799}
2800
2801static SDValue
2803 auto &DAG = DCI.DAG;
2804
2805 auto GetWasmConversionOp = [](unsigned Op) {
2806 switch (Op) {
2808 return WebAssemblyISD::TRUNC_SAT_ZERO_S;
2810 return WebAssemblyISD::TRUNC_SAT_ZERO_U;
2811 case ISD::FP_ROUND:
2812 return WebAssemblyISD::DEMOTE_ZERO;
2813 }
2814 llvm_unreachable("unexpected op");
2815 };
2816
2817 auto IsZeroSplat = [](SDValue SplatVal) {
2818 auto *Splat = dyn_cast<BuildVectorSDNode>(SplatVal.getNode());
2819 APInt SplatValue, SplatUndef;
2820 unsigned SplatBitSize;
2821 bool HasAnyUndefs;
2822 // Endianness doesn't matter in this context because we are looking for
2823 // an all-zero value.
2824 return Splat &&
2825 Splat->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
2826 HasAnyUndefs) &&
2827 SplatValue == 0;
2828 };
2829
2830 if (N->getOpcode() == ISD::CONCAT_VECTORS) {
2831 // Combine this:
2832 //
2833 // (concat_vectors (v2i32 (fp_to_{s,u}int_sat $x, 32)), (v2i32 (splat 0)))
2834 //
2835 // into (i32x4.trunc_sat_f64x2_zero_{s,u} $x).
2836 //
2837 // Or this:
2838 //
2839 // (concat_vectors (v2f32 (fp_round (v2f64 $x))), (v2f32 (splat 0)))
2840 //
2841 // into (f32x4.demote_zero_f64x2 $x).
2842 EVT ResVT;
2843 EVT ExpectedConversionType;
2844 auto Conversion = N->getOperand(0);
2845 auto ConversionOp = Conversion.getOpcode();
2846 switch (ConversionOp) {
2849 ResVT = MVT::v4i32;
2850 ExpectedConversionType = MVT::v2i32;
2851 break;
2852 case ISD::FP_ROUND:
2853 ResVT = MVT::v4f32;
2854 ExpectedConversionType = MVT::v2f32;
2855 break;
2856 default:
2857 return SDValue();
2858 }
2859
2860 if (N->getValueType(0) != ResVT)
2861 return SDValue();
2862
2863 if (Conversion.getValueType() != ExpectedConversionType)
2864 return SDValue();
2865
2866 auto Source = Conversion.getOperand(0);
2867 if (Source.getValueType() != MVT::v2f64)
2868 return SDValue();
2869
2870 if (!IsZeroSplat(N->getOperand(1)) ||
2871 N->getOperand(1).getValueType() != ExpectedConversionType)
2872 return SDValue();
2873
2874 unsigned Op = GetWasmConversionOp(ConversionOp);
2875 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2876 }
2877
2878 // Combine this:
2879 //
2880 // (fp_to_{s,u}int_sat (concat_vectors $x, (v2f64 (splat 0))), 32)
2881 //
2882 // into (i32x4.trunc_sat_f64x2_zero_{s,u} $x).
2883 //
2884 // Or this:
2885 //
2886 // (v4f32 (fp_round (concat_vectors $x, (v2f64 (splat 0)))))
2887 //
2888 // into (f32x4.demote_zero_f64x2 $x).
2889 EVT ResVT;
2890 auto ConversionOp = N->getOpcode();
2891 switch (ConversionOp) {
2894 ResVT = MVT::v4i32;
2895 break;
2896 case ISD::FP_ROUND:
2897 ResVT = MVT::v4f32;
2898 break;
2899 default:
2900 llvm_unreachable("unexpected op");
2901 }
2902
2903 if (N->getValueType(0) != ResVT)
2904 return SDValue();
2905
2906 auto Concat = N->getOperand(0);
2907 if (Concat.getValueType() != MVT::v4f64)
2908 return SDValue();
2909
2910 auto Source = Concat.getOperand(0);
2911 if (Source.getValueType() != MVT::v2f64)
2912 return SDValue();
2913
2914 if (!IsZeroSplat(Concat.getOperand(1)) ||
2915 Concat.getOperand(1).getValueType() != MVT::v2f64)
2916 return SDValue();
2917
2918 unsigned Op = GetWasmConversionOp(ConversionOp);
2919 return DAG.getNode(Op, SDLoc(N), ResVT, Source);
2920}
2921
2922// Helper to extract VectorWidth bits from Vec, starting from IdxVal.
2923static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG,
2924 const SDLoc &DL, unsigned VectorWidth) {
2925 EVT VT = Vec.getValueType();
2926 EVT ElVT = VT.getVectorElementType();
2927 unsigned Factor = VT.getSizeInBits() / VectorWidth;
2928 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
2929 VT.getVectorNumElements() / Factor);
2930
2931 // Extract the relevant VectorWidth bits. Generate an EXTRACT_SUBVECTOR
2932 unsigned ElemsPerChunk = VectorWidth / ElVT.getSizeInBits();
2933 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
2934
2935 // This is the index of the first element of the VectorWidth-bit chunk
2936 // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
2937 IdxVal &= ~(ElemsPerChunk - 1);
2938
2939 // If the input is a buildvector just emit a smaller one.
2940 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
2941 return DAG.getBuildVector(ResultVT, DL,
2942 Vec->ops().slice(IdxVal, ElemsPerChunk));
2943
2944 SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, DL);
2945 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ResultVT, Vec, VecIdx);
2946}
2947
2948// Helper to recursively truncate vector elements in half with NARROW_U. DstVT
2949// is the expected destination value type after recursion. In is the initial
2950// input. Note that the input should have enough leading zero bits to prevent
2951// NARROW_U from saturating results.
2953 SelectionDAG &DAG) {
2954 EVT SrcVT = In.getValueType();
2955
2956 // No truncation required, we might get here due to recursive calls.
2957 if (SrcVT == DstVT)
2958 return In;
2959
2960 unsigned SrcSizeInBits = SrcVT.getSizeInBits();
2961 unsigned NumElems = SrcVT.getVectorNumElements();
2962 if (!isPowerOf2_32(NumElems))
2963 return SDValue();
2964 assert(DstVT.getVectorNumElements() == NumElems && "Illegal truncation");
2965 assert(SrcSizeInBits > DstVT.getSizeInBits() && "Illegal truncation");
2966
2967 LLVMContext &Ctx = *DAG.getContext();
2968 EVT PackedSVT = EVT::getIntegerVT(Ctx, SrcVT.getScalarSizeInBits() / 2);
2969
2970 // Narrow to the largest type possible:
2971 // vXi64/vXi32 -> i16x8.narrow_i32x4_u and vXi16 -> i8x16.narrow_i16x8_u.
2972 EVT InVT = MVT::i16, OutVT = MVT::i8;
2973 if (SrcVT.getScalarSizeInBits() > 16) {
2974 InVT = MVT::i32;
2975 OutVT = MVT::i16;
2976 }
2977 unsigned SubSizeInBits = SrcSizeInBits / 2;
2978 InVT = EVT::getVectorVT(Ctx, InVT, SubSizeInBits / InVT.getSizeInBits());
2979 OutVT = EVT::getVectorVT(Ctx, OutVT, SubSizeInBits / OutVT.getSizeInBits());
2980
2981 // Split lower/upper subvectors.
2982 SDValue Lo = extractSubVector(In, 0, DAG, DL, SubSizeInBits);
2983 SDValue Hi = extractSubVector(In, NumElems / 2, DAG, DL, SubSizeInBits);
2984
2985 // 256bit -> 128bit truncate - Narrow lower/upper 128-bit subvectors.
2986 if (SrcVT.is256BitVector() && DstVT.is128BitVector()) {
2987 Lo = DAG.getBitcast(InVT, Lo);
2988 Hi = DAG.getBitcast(InVT, Hi);
2989 SDValue Res = DAG.getNode(WebAssemblyISD::NARROW_U, DL, OutVT, Lo, Hi);
2990 return DAG.getBitcast(DstVT, Res);
2991 }
2992
2993 // Recursively narrow lower/upper subvectors, concat result and narrow again.
2994 EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems / 2);
2995 Lo = truncateVectorWithNARROW(PackedVT, Lo, DL, DAG);
2996 Hi = truncateVectorWithNARROW(PackedVT, Hi, DL, DAG);
2997
2998 PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
2999 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, PackedVT, Lo, Hi);
3000 return truncateVectorWithNARROW(DstVT, Res, DL, DAG);
3001}
3002
3005 auto &DAG = DCI.DAG;
3006
3007 SDValue In = N->getOperand(0);
3008 EVT InVT = In.getValueType();
3009 if (!InVT.isSimple())
3010 return SDValue();
3011
3012 EVT OutVT = N->getValueType(0);
3013 if (!OutVT.isVector())
3014 return SDValue();
3015
3016 EVT OutSVT = OutVT.getVectorElementType();
3017 EVT InSVT = InVT.getVectorElementType();
3018 // Currently only cover truncate to v16i8 or v8i16.
3019 if (!((InSVT == MVT::i16 || InSVT == MVT::i32 || InSVT == MVT::i64) &&
3020 (OutSVT == MVT::i8 || OutSVT == MVT::i16) && OutVT.is128BitVector()))
3021 return SDValue();
3022
3023 SDLoc DL(N);
3025 OutVT.getScalarSizeInBits());
3026 In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(Mask, DL, InVT));
3027 return truncateVectorWithNARROW(OutVT, In, DL, DAG);
3028}
3029
3032 auto &DAG = DCI.DAG;
3033 SDLoc DL(N);
3034 SDValue Src = N->getOperand(0);
3035 EVT VT = N->getValueType(0);
3036 EVT SrcVT = Src.getValueType();
3037
3038 // bitcast <N x i1> to iN
3039 // ==> bitmask
3040 if (DCI.isBeforeLegalize() && VT.isScalarInteger() &&
3041 SrcVT.isFixedLengthVector() && SrcVT.getScalarType() == MVT::i1) {
3042 unsigned NumElts = SrcVT.getVectorNumElements();
3043 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16)
3044 return SDValue();
3045 EVT Width = MVT::getIntegerVT(128 / NumElts);
3046 return DAG.getZExtOrTrunc(
3047 DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32,
3048 {DAG.getConstant(Intrinsic::wasm_bitmask, DL, MVT::i32),
3049 DAG.getSExtOrTrunc(N->getOperand(0), DL,
3050 SrcVT.changeVectorElementType(Width))}),
3051 DL, VT);
3052 }
3053
3054 return SDValue();
3055}
3056
3059 auto &DAG = DCI.DAG;
3060
3061 SDValue LHS = N->getOperand(0);
3062 SDValue RHS = N->getOperand(1);
3063 ISD::CondCode Cond = cast<CondCodeSDNode>(N->getOperand(2))->get();
3064 SDLoc DL(N);
3065 EVT VT = N->getValueType(0);
3066
3067 // setcc (iN (bitcast (vNi1 X))), 0, ne
3068 // ==> any_true (vNi1 X)
3069 // setcc (iN (bitcast (vNi1 X))), 0, eq
3070 // ==> xor (any_true (vNi1 X)), -1
3071 // setcc (iN (bitcast (vNi1 X))), -1, eq
3072 // ==> all_true (vNi1 X)
3073 // setcc (iN (bitcast (vNi1 X))), -1, ne
3074 // ==> xor (all_true (vNi1 X)), -1
3075 if (DCI.isBeforeLegalize() && VT.isScalarInteger() &&
3076 (Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
3078 LHS->getOpcode() == ISD::BITCAST) {
3079 EVT FromVT = LHS->getOperand(0).getValueType();
3080 if (FromVT.isFixedLengthVector() &&
3081 FromVT.getVectorElementType() == MVT::i1) {
3082 int Intrin = isNullConstant(RHS) ? Intrinsic::wasm_anytrue
3083 : Intrinsic::wasm_alltrue;
3084 unsigned NumElts = FromVT.getVectorNumElements();
3085 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16)
3086 return SDValue();
3087 EVT Width = MVT::getIntegerVT(128 / NumElts);
3088 SDValue Ret = DAG.getZExtOrTrunc(
3089 DAG.getNode(
3090 ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32,
3091 {DAG.getConstant(Intrin, DL, MVT::i32),
3092 DAG.getSExtOrTrunc(LHS->getOperand(0), DL,
3093 FromVT.changeVectorElementType(Width))}),
3094 DL, MVT::i1);
3095 if ((isNullConstant(RHS) && (Cond == ISD::SETEQ)) ||
3096 (isAllOnesConstant(RHS) && (Cond == ISD::SETNE))) {
3097 Ret = DAG.getNOT(DL, Ret, MVT::i1);
3098 }
3099 return DAG.getZExtOrTrunc(Ret, DL, VT);
3100 }
3101 }
3102
3103 return SDValue();
3104}
3105
3106SDValue
3107WebAssemblyTargetLowering::PerformDAGCombine(SDNode *N,
3108 DAGCombinerInfo &DCI) const {
3109 switch (N->getOpcode()) {
3110 default:
3111 return SDValue();
3112 case ISD::BITCAST:
3113 return performBitcastCombine(N, DCI);
3114 case ISD::SETCC:
3115 return performSETCCCombine(N, DCI);
3117 return performVECTOR_SHUFFLECombine(N, DCI);
3118 case ISD::SIGN_EXTEND:
3119 case ISD::ZERO_EXTEND:
3120 return performVectorExtendCombine(N, DCI);
3121 case ISD::UINT_TO_FP:
3122 case ISD::SINT_TO_FP:
3123 return performVectorExtendToFPCombine(N, DCI);
3126 case ISD::FP_ROUND:
3128 return performVectorTruncZeroCombine(N, DCI);
3129 case ISD::TRUNCATE:
3130 return performTruncateCombine(N, DCI);
3131 }
3132}
unsigned const MachineRegisterInfo * MRI
static SDValue performTruncateCombine(SDNode *N, SelectionDAG &DAG)
static SDValue performSETCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static void fail(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg, SDValue Val={})
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Symbol * Sym
Definition: ELF_riscv.cpp:479
Hexagon Common GEP
const HexagonInstrInfo * TII
#define _
IRTranslator LLVM IR MI
static unsigned NumFixedArgs
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static MachineBasicBlock * LowerFPToInt(MachineInstr &MI, DebugLoc DL, MachineBasicBlock *BB, const TargetInstrInfo &TII, bool IsUnsigned, bool Int64, bool Float64, unsigned LoweredOpcode)
static bool callingConvSupported(CallingConv::ID CallConv)
static MachineBasicBlock * LowerMemcpy(MachineInstr &MI, DebugLoc DL, MachineBasicBlock *BB, const TargetInstrInfo &TII, bool Int64)
static std::optional< unsigned > IsWebAssemblyLocal(SDValue Op, SelectionDAG &DAG)
static SDValue performVectorExtendCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG)
static MachineBasicBlock * LowerCallResults(MachineInstr &CallResults, DebugLoc DL, MachineBasicBlock *BB, const WebAssemblySubtarget *Subtarget, const TargetInstrInfo &TII)
static SDValue performVECTOR_SHUFFLECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue performVectorTruncZeroCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static bool IsWebAssemblyGlobal(SDValue Op)
static MachineBasicBlock * LowerMemset(MachineInstr &MI, DebugLoc DL, MachineBasicBlock *BB, const TargetInstrInfo &TII, bool Int64)
static SDValue performVectorExtendToFPCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Convert ({u,s}itofp vec) --> ({u,s}itofp ({s,z}ext vec)) so it doesn't get split up into scalar instr...
static SDValue LowerConvertLow(SDValue Op, SelectionDAG &DAG)
static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG, const SDLoc &DL, unsigned VectorWidth)
static SDValue performBitcastCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue truncateVectorWithNARROW(EVT DstVT, SDValue In, const SDLoc &DL, SelectionDAG &DAG)
This file defines the interfaces that WebAssembly uses to lower LLVM code into a selection DAG.
This file provides WebAssembly-specific target descriptions.
This file declares WebAssembly-specific per-machine-function information.
This file declares the WebAssembly-specific subclass of TargetSubtarget.
This file declares the WebAssembly-specific subclass of TargetMachine.
This file contains the declaration of the WebAssembly-specific type parsing utility functions.
This file contains the declaration of the WebAssembly-specific utility functions.
X86 cmov Conversion
static constexpr int Concat[]
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition: APInt.h:78
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition: APInt.h:306
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition: APInt.h:296
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:704
@ Add
*p = old + v
Definition: Instructions.h:720
@ Or
*p = old | v
Definition: Instructions.h:728
@ Sub
*p = old - v
Definition: Instructions.h:722
@ And
*p = old & v
Definition: Instructions.h:724
@ Xor
*p = old ^ v
Definition: Instructions.h:730
BinOp getOperation() const
Definition: Instructions.h:805
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
CCState - This class holds information needed while lowering arguments and return values.
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
This class represents a function call, abstracting a target machine's calling convention.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
A debug info location.
Definition: DebugLoc.h:33
Diagnostic information for unsupported feature in backend.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition: FastISel.h:66
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:216
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:369
unsigned getAddressSpace() const
const GlobalValue * getGlobal() const
ThreadLocalMode getThreadLocalMode() const
Definition: GlobalValue.h:271
Type * getValueType() const
Definition: GlobalValue.h:296
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
void setNoStrip() const
Definition: MCSymbolWasm.h:66
Machine Value Type.
@ INVALID_SIMPLE_VALUE_TYPE
static auto integer_fixedlen_vector_valuetypes()
MVT changeVectorElementType(MVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
static MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:237
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
bool isFixedLengthVector() const
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator insertAfter(iterator I, MachineInstr *MI)
Insert MI into the instruction list after I.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
unsigned getFunctionNumber() const
getFunctionNumber - Return a unique ID for the current function.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineInstr * CreateMachineInstr(const MCInstrDesc &MCID, DebugLoc DL, bool NoImplicit=false)
CreateMachineInstr - Allocate a new MachineInstr.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
const char * createExternalSymbolName(StringRef Name)
Allocate a string and populate it with the given external symbol name.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:575
iterator_range< mop_iterator > uses()
Returns a range that includes all operands which may be register uses.
Definition: MachineInstr.h:739
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
iterator_range< mop_iterator > defs()
Returns a range over all explicit operands that are register definitions.
Definition: MachineInstr.h:728
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:585
const std::vector< MachineJumpTableEntry > & getJumpTables() const
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setIsKill(bool Val=true)
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
unsigned getAddressSpace() const
Return the address space for the associated pointer.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
const SDValue & getOperand(unsigned Num) const
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
bool isUndef() const
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:228
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:748
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
Definition: SelectionDAG.h:799
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getSplatValue(SDValue V, bool LegalTypes=false)
If V is a splat vector, return its scalar source operand by extracting that element from the source v...
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
Definition: SelectionDAG.h:758
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
Definition: SelectionDAG.h:854
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
Definition: SelectionDAG.h:825
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:495
SDValue getTargetFrameIndex(int FI, EVT VT)
Definition: SelectionDAG.h:753
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getBasicBlock(MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
Definition: SelectionDAG.h:496
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
Definition: SelectionDAG.h:698
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:490
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
Definition: SelectionDAG.h:871
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVMContext * getContext() const
Definition: SelectionDAG.h:508
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getMCSymbol(MCSymbol *Sym, EVT VT)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:578
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:683
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
const SDValue & getValue() const
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:150
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
const TargetMachine & getTargetMachine() const
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
virtual bool isBinOp(unsigned Opcode) const
Return true if the node is a math/logic binary operator.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
bool isPositionIndependent() const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const
Return true if folding a constant offset with the given GlobalAddress is legal.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
bool isOSEmscripten() const
Tests whether the OS is Emscripten.
Definition: Triple.h:709
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static Type * getDoubleTy(LLVMContext &C)
bool isFunctionTy() const
True if this is an instance of FunctionType.
Definition: Type.h:255
static Type * getFloatTy(LLVMContext &C)
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
LLVM Value Representation.
Definition: Value.h:74
const Value * stripPointerCastsAndAliases() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
Definition: Value.cpp:698
static std::optional< unsigned > getLocalForStackObject(MachineFunction &MF, int FrameIndex)
This class is derived from MachineFunctionInfo and contains private WebAssembly-specific information ...
Register getFrameRegister(const MachineFunction &MF) const override
const Triple & getTargetTriple() const
const WebAssemblyInstrInfo * getInstrInfo() const override
const WebAssemblyRegisterInfo * getRegisterInfo() const override
WebAssemblyTargetLowering(const TargetMachine &TM, const WebAssemblySubtarget &STI)
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const override
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const override
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
self_iterator getIterator()
Definition: ilist_node.h:132
#define INT64_MIN
Definition: DataTypes.h:74
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:125
@ Swift
Calling convention for Swift.
Definition: CallingConv.h:69
@ PreserveMost
Used for runtime calls that preserves most registers.
Definition: CallingConv.h:63
@ CXX_FAST_TLS
Used for access functions.
Definition: CallingConv.h:72
@ WASM_EmscriptenInvoke
For emscripten __invoke_* functions.
Definition: CallingConv.h:229
@ Cold
Attempts to make code in the caller as efficient as possible under the assumption that the call is no...
Definition: CallingConv.h:47
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
Definition: CallingConv.h:66
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:780
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
Definition: ISDOpcodes.h:1197
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:1193
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:257
@ BSWAP
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:744
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
Definition: ISDOpcodes.h:1226
@ ConstantFP
Definition: ISDOpcodes.h:77
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:276
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:246
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:1102
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition: ISDOpcodes.h:498
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition: ISDOpcodes.h:205
@ GlobalAddress
Definition: ISDOpcodes.h:78
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:841
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
Definition: ISDOpcodes.h:558
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
Definition: ISDOpcodes.h:717
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
Definition: ISDOpcodes.h:871
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:262
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
Definition: ISDOpcodes.h:964
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:954
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition: ISDOpcodes.h:236
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
Definition: ISDOpcodes.h:1490
@ GlobalTLSAddress
Definition: ISDOpcodes.h:79
@ FrameIndex
Definition: ISDOpcodes.h:80
@ SIGN_EXTEND
Conversion operators.
Definition: ISDOpcodes.h:805
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
Definition: ISDOpcodes.h:635
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
Definition: ISDOpcodes.h:1059
@ BR_CC
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:1148
@ BRIND
BRIND - Indirect branch.
Definition: ISDOpcodes.h:1123
@ BR_JT
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:1127
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
Definition: ISDOpcodes.h:356
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
Definition: ISDOpcodes.h:229
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
Definition: ISDOpcodes.h:642
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
Definition: ISDOpcodes.h:1222
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition: ISDOpcodes.h:674
@ SHL
Shift and rotation operations.
Definition: ISDOpcodes.h:735
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
Definition: ISDOpcodes.h:615
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
Definition: ISDOpcodes.h:588
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition: ISDOpcodes.h:550
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
Definition: ISDOpcodes.h:209
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:811
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
Definition: ISDOpcodes.h:1282
@ FP_TO_UINT_SAT
Definition: ISDOpcodes.h:907
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:772
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
Definition: ISDOpcodes.h:1112
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition: ISDOpcodes.h:849
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
Definition: ISDOpcodes.h:697
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
Definition: ISDOpcodes.h:939
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
Definition: ISDOpcodes.h:100
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
Definition: ISDOpcodes.h:1050
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:887
@ TargetConstant
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification,...
Definition: ISDOpcodes.h:164
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:709
@ TRAP
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:1279
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:190
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:286
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
Definition: ISDOpcodes.h:539
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition: ISDOpcodes.h:52
@ ExternalSymbol
Definition: ISDOpcodes.h:83
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Definition: ISDOpcodes.h:920
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
Definition: ISDOpcodes.h:882
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
Definition: ISDOpcodes.h:906
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:817
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:1217
@ BlockAddress
Definition: ISDOpcodes.h:84
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition: ISDOpcodes.h:794
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition: ISDOpcodes.h:508
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
Definition: ISDOpcodes.h:347
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition: ISDOpcodes.h:198
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
Definition: ISDOpcodes.h:530
bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition: ISDOpcodes.h:1613
ID ArrayRef< Type * > Tys
Definition: Intrinsics.h:102
OperandFlags
These are flags set on operands, but should be considered private, all access should go through the M...
Definition: MCInstrDesc.h:50
MCSymbolWasm * getOrCreateFunctionTableSymbol(MCContext &Ctx, const WebAssemblySubtarget *Subtarget)
Returns the __indirect_function_table, for use in call_indirect and in function bitcasts.
bool isWebAssemblyFuncrefType(const Type *Ty)
Return true if this is a WebAssembly Funcref Type.
bool isWebAssemblyTableType(const Type *Ty)
Return true if the table represents a WebAssembly table type.
MCSymbolWasm * getOrCreateFuncrefCallTableSymbol(MCContext &Ctx, const WebAssemblySubtarget *Subtarget)
Returns the __funcref_call_table, for use in funcref calls when lowered to table.set + call_indirect.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
bool isValidAddressSpace(unsigned AS)
bool canLowerReturn(size_t ResultSize, const WebAssemblySubtarget *Subtarget)
Returns true if the function's return value(s) can be lowered directly, i.e., not indirectly via a po...
bool isWasmVarAddressSpace(unsigned AS)
Reg
All possible values of the reg field in the ModR/M byte.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition: MathExtras.h:353
@ Offset
Definition: DWP.cpp:480
void computeSignatureVTs(const FunctionType *Ty, const Function *TargetFunc, const Function &ContextFunc, const TargetMachine &TM, SmallVectorImpl< MVT > &Params, SmallVectorImpl< MVT > &Results)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:291
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
@ And
Bitwise or logical AND of integers.
@ Add
Sum of integers.
DWARFExpression::Operation Op
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:217
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1766
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:2099
void computeLegalValueVTs(const WebAssemblyTargetLowering &TLI, LLVMContext &Ctx, const DataLayout &DL, Type *Ty, SmallVectorImpl< MVT > &ValueVTs)
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition: MathExtras.h:382
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Extended Value Type.
Definition: ValueTypes.h:35
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
Definition: ValueTypes.h:94
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:137
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
Definition: ValueTypes.h:74
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
Definition: ValueTypes.h:147
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:368
uint64_t getScalarSizeInBits() const
Definition: ValueTypes.h:380
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:311
bool is128BitVector() const
Return true if this is a 128-bit vector type.
Definition: ValueTypes.h:207
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition: ValueTypes.h:65
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
Definition: ValueTypes.h:376
bool isFixedLengthVector() const
Definition: ValueTypes.h:181
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:168
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Definition: ValueTypes.h:318
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
Definition: ValueTypes.h:287
bool is256BitVector() const
Return true if this is a 256-bit vector type.
Definition: ValueTypes.h:212
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:210
EVT getVectorElementType() const
Given a vector type, return the type of each element.
Definition: ValueTypes.h:323
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
Definition: ValueTypes.h:157
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Definition: ValueTypes.h:331
bool isInConsecutiveRegs() const
Align getNonZeroOrigAlign() const
unsigned getByValSize() const
bool isInConsecutiveRegsLast() const
Align getNonZeroByValAlign() const
InputArg - This struct carries flags and type information about a single incoming (formal) argument o...
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
bool IsFixed
IsFixed - Is this a "fixed" value, ie not passed through a vararg "...".
unsigned getBitWidth() const
Get the bit width of this value.
Definition: KnownBits.h:43
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
Function object to check whether the second component of a container supported by std::get (like std:...
Definition: STLExtras.h:1476