LLVM 20.0.0git
GISelKnownBits.cpp
Go to the documentation of this file.
1//===- lib/CodeGen/GlobalISel/GISelKnownBits.cpp --------------*- C++ *-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// Provides analysis for querying information about KnownBits during GISel
10/// passes.
11//
12//===----------------------------------------------------------------------===//
23#include "llvm/IR/Module.h"
25
26#define DEBUG_TYPE "gisel-known-bits"
27
28using namespace llvm;
29
31
33 "Analysis for ComputingKnownBits", false, true)
34
36 : MF(MF), MRI(MF.getRegInfo()), TL(*MF.getSubtarget().getTargetLowering()),
37 DL(MF.getFunction().getDataLayout()), MaxDepth(MaxDepth) {}
38
40 const MachineInstr *MI = MRI.getVRegDef(R);
41 switch (MI->getOpcode()) {
42 case TargetOpcode::COPY:
43 return computeKnownAlignment(MI->getOperand(1).getReg(), Depth);
44 case TargetOpcode::G_ASSERT_ALIGN: {
45 // TODO: Min with source
46 return Align(MI->getOperand(2).getImm());
47 }
48 case TargetOpcode::G_FRAME_INDEX: {
49 int FrameIdx = MI->getOperand(1).getIndex();
50 return MF.getFrameInfo().getObjectAlign(FrameIdx);
51 }
52 case TargetOpcode::G_INTRINSIC:
53 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
54 case TargetOpcode::G_INTRINSIC_CONVERGENT:
55 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
56 default:
57 return TL.computeKnownAlignForTargetInstr(*this, R, MRI, Depth + 1);
58 }
59}
60
62 assert(MI.getNumExplicitDefs() == 1 &&
63 "expected single return generic instruction");
64 return getKnownBits(MI.getOperand(0).getReg());
65}
66
68 const LLT Ty = MRI.getType(R);
69 // Since the number of lanes in a scalable vector is unknown at compile time,
70 // we track one bit which is implicitly broadcast to all lanes. This means
71 // that all lanes in a scalable vector are considered demanded.
72 APInt DemandedElts =
74 return getKnownBits(R, DemandedElts);
75}
76
78 unsigned Depth) {
79 // For now, we only maintain the cache during one request.
80 assert(ComputeKnownBitsCache.empty() && "Cache should have been cleared");
81
82 KnownBits Known;
83 computeKnownBitsImpl(R, Known, DemandedElts, Depth);
84 ComputeKnownBitsCache.clear();
85 return Known;
86}
87
89 LLT Ty = MRI.getType(R);
90 unsigned BitWidth = Ty.getScalarSizeInBits();
92}
93
95 return getKnownBits(R).Zero;
96}
97
99
100LLVM_ATTRIBUTE_UNUSED static void
101dumpResult(const MachineInstr &MI, const KnownBits &Known, unsigned Depth) {
102 dbgs() << "[" << Depth << "] Compute known bits: " << MI << "[" << Depth
103 << "] Computed for: " << MI << "[" << Depth << "] Known: 0x"
104 << toString(Known.Zero | Known.One, 16, false) << "\n"
105 << "[" << Depth << "] Zero: 0x" << toString(Known.Zero, 16, false)
106 << "\n"
107 << "[" << Depth << "] One: 0x" << toString(Known.One, 16, false)
108 << "\n";
109}
110
111/// Compute known bits for the intersection of \p Src0 and \p Src1
112void GISelKnownBits::computeKnownBitsMin(Register Src0, Register Src1,
113 KnownBits &Known,
114 const APInt &DemandedElts,
115 unsigned Depth) {
116 // Test src1 first, since we canonicalize simpler expressions to the RHS.
117 computeKnownBitsImpl(Src1, Known, DemandedElts, Depth);
118
119 // If we don't know any bits, early out.
120 if (Known.isUnknown())
121 return;
122
123 KnownBits Known2;
124 computeKnownBitsImpl(Src0, Known2, DemandedElts, Depth);
125
126 // Only known if known in both the LHS and RHS.
127 Known = Known.intersectWith(Known2);
128}
129
130// Bitfield extract is computed as (Src >> Offset) & Mask, where Mask is
131// created using Width. Use this function when the inputs are KnownBits
132// objects. TODO: Move this KnownBits.h if this is usable in more cases.
133static KnownBits extractBits(unsigned BitWidth, const KnownBits &SrcOpKnown,
134 const KnownBits &OffsetKnown,
135 const KnownBits &WidthKnown) {
136 KnownBits Mask(BitWidth);
137 Mask.Zero = APInt::getBitsSetFrom(
139 Mask.One = APInt::getLowBitsSet(
141 return KnownBits::lshr(SrcOpKnown, OffsetKnown) & Mask;
142}
143
145 const APInt &DemandedElts,
146 unsigned Depth) {
147 MachineInstr &MI = *MRI.getVRegDef(R);
148 unsigned Opcode = MI.getOpcode();
149 LLT DstTy = MRI.getType(R);
150
151 // Handle the case where this is called on a register that does not have a
152 // type constraint (i.e. it has a register class constraint instead). This is
153 // unlikely to occur except by looking through copies but it is possible for
154 // the initial register being queried to be in this state.
155 if (!DstTy.isValid()) {
156 Known = KnownBits();
157 return;
158 }
159
160 unsigned BitWidth = DstTy.getScalarSizeInBits();
161 auto CacheEntry = ComputeKnownBitsCache.find(R);
162 if (CacheEntry != ComputeKnownBitsCache.end()) {
163 Known = CacheEntry->second;
164 LLVM_DEBUG(dbgs() << "Cache hit at ");
165 LLVM_DEBUG(dumpResult(MI, Known, Depth));
166 assert(Known.getBitWidth() == BitWidth && "Cache entry size doesn't match");
167 return;
168 }
169 Known = KnownBits(BitWidth); // Don't know anything
170
171 // Depth may get bigger than max depth if it gets passed to a different
172 // GISelKnownBits object.
173 // This may happen when say a generic part uses a GISelKnownBits object
174 // with some max depth, but then we hit TL.computeKnownBitsForTargetInstr
175 // which creates a new GISelKnownBits object with a different and smaller
176 // depth. If we just check for equality, we would never exit if the depth
177 // that is passed down to the target specific GISelKnownBits object is
178 // already bigger than its max depth.
179 if (Depth >= getMaxDepth())
180 return;
181
182 if (!DemandedElts)
183 return; // No demanded elts, better to assume we don't know anything.
184
185 KnownBits Known2;
186
187 switch (Opcode) {
188 default:
189 TL.computeKnownBitsForTargetInstr(*this, R, Known, DemandedElts, MRI,
190 Depth);
191 break;
192 case TargetOpcode::G_BUILD_VECTOR: {
193 // Collect the known bits that are shared by every demanded vector element.
194 Known.Zero.setAllBits(); Known.One.setAllBits();
195 for (unsigned i = 0, e = MI.getNumOperands() - 1; i < e; ++i) {
196 if (!DemandedElts[i])
197 continue;
198
199 computeKnownBitsImpl(MI.getOperand(i + 1).getReg(), Known2, DemandedElts,
200 Depth + 1);
201
202 // Known bits are the values that are shared by every demanded element.
203 Known = Known.intersectWith(Known2);
204
205 // If we don't know any bits, early out.
206 if (Known.isUnknown())
207 break;
208 }
209 break;
210 }
211 case TargetOpcode::COPY:
212 case TargetOpcode::G_PHI:
213 case TargetOpcode::PHI: {
216 // Destination registers should not have subregisters at this
217 // point of the pipeline, otherwise the main live-range will be
218 // defined more than once, which is against SSA.
219 assert(MI.getOperand(0).getSubReg() == 0 && "Is this code in SSA?");
220 // Record in the cache that we know nothing for MI.
221 // This will get updated later and in the meantime, if we reach that
222 // phi again, because of a loop, we will cut the search thanks to this
223 // cache entry.
224 // We could actually build up more information on the phi by not cutting
225 // the search, but that additional information is more a side effect
226 // than an intended choice.
227 // Therefore, for now, save on compile time until we derive a proper way
228 // to derive known bits for PHIs within loops.
229 ComputeKnownBitsCache[R] = KnownBits(BitWidth);
230 // PHI's operand are a mix of registers and basic blocks interleaved.
231 // We only care about the register ones.
232 for (unsigned Idx = 1; Idx < MI.getNumOperands(); Idx += 2) {
233 const MachineOperand &Src = MI.getOperand(Idx);
234 Register SrcReg = Src.getReg();
235 // Look through trivial copies and phis but don't look through trivial
236 // copies or phis of the form `%1:(s32) = OP %0:gpr32`, known-bits
237 // analysis is currently unable to determine the bit width of a
238 // register class.
239 //
240 // We can't use NoSubRegister by name as it's defined by each target but
241 // it's always defined to be 0 by tablegen.
242 if (SrcReg.isVirtual() && Src.getSubReg() == 0 /*NoSubRegister*/ &&
243 MRI.getType(SrcReg).isValid()) {
244 // For COPYs we don't do anything, don't increase the depth.
245 computeKnownBitsImpl(SrcReg, Known2, DemandedElts,
246 Depth + (Opcode != TargetOpcode::COPY));
247 Known = Known.intersectWith(Known2);
248 // If we reach a point where we don't know anything
249 // just stop looking through the operands.
250 if (Known.isUnknown())
251 break;
252 } else {
253 // We know nothing.
254 Known = KnownBits(BitWidth);
255 break;
256 }
257 }
258 break;
259 }
260 case TargetOpcode::G_CONSTANT: {
261 Known = KnownBits::makeConstant(MI.getOperand(1).getCImm()->getValue());
262 break;
263 }
264 case TargetOpcode::G_FRAME_INDEX: {
265 int FrameIdx = MI.getOperand(1).getIndex();
266 TL.computeKnownBitsForFrameIndex(FrameIdx, Known, MF);
267 break;
268 }
269 case TargetOpcode::G_SUB: {
270 computeKnownBitsImpl(MI.getOperand(1).getReg(), Known, DemandedElts,
271 Depth + 1);
272 computeKnownBitsImpl(MI.getOperand(2).getReg(), Known2, DemandedElts,
273 Depth + 1);
274 Known = KnownBits::sub(Known, Known2);
275 break;
276 }
277 case TargetOpcode::G_XOR: {
278 computeKnownBitsImpl(MI.getOperand(2).getReg(), Known, DemandedElts,
279 Depth + 1);
280 computeKnownBitsImpl(MI.getOperand(1).getReg(), Known2, DemandedElts,
281 Depth + 1);
282
283 Known ^= Known2;
284 break;
285 }
286 case TargetOpcode::G_PTR_ADD: {
287 if (DstTy.isVector())
288 break;
289 // G_PTR_ADD is like G_ADD. FIXME: Is this true for all targets?
290 LLT Ty = MRI.getType(MI.getOperand(1).getReg());
292 break;
293 [[fallthrough]];
294 }
295 case TargetOpcode::G_ADD: {
296 computeKnownBitsImpl(MI.getOperand(1).getReg(), Known, DemandedElts,
297 Depth + 1);
298 computeKnownBitsImpl(MI.getOperand(2).getReg(), Known2, DemandedElts,
299 Depth + 1);
300 Known = KnownBits::add(Known, Known2);
301 break;
302 }
303 case TargetOpcode::G_AND: {
304 // If either the LHS or the RHS are Zero, the result is zero.
305 computeKnownBitsImpl(MI.getOperand(2).getReg(), Known, DemandedElts,
306 Depth + 1);
307 computeKnownBitsImpl(MI.getOperand(1).getReg(), Known2, DemandedElts,
308 Depth + 1);
309
310 Known &= Known2;
311 break;
312 }
313 case TargetOpcode::G_OR: {
314 // If either the LHS or the RHS are Zero, the result is zero.
315 computeKnownBitsImpl(MI.getOperand(2).getReg(), Known, DemandedElts,
316 Depth + 1);
317 computeKnownBitsImpl(MI.getOperand(1).getReg(), Known2, DemandedElts,
318 Depth + 1);
319
320 Known |= Known2;
321 break;
322 }
323 case TargetOpcode::G_MUL: {
324 computeKnownBitsImpl(MI.getOperand(2).getReg(), Known, DemandedElts,
325 Depth + 1);
326 computeKnownBitsImpl(MI.getOperand(1).getReg(), Known2, DemandedElts,
327 Depth + 1);
328 Known = KnownBits::mul(Known, Known2);
329 break;
330 }
331 case TargetOpcode::G_SELECT: {
332 computeKnownBitsMin(MI.getOperand(2).getReg(), MI.getOperand(3).getReg(),
333 Known, DemandedElts, Depth + 1);
334 break;
335 }
336 case TargetOpcode::G_SMIN: {
337 // TODO: Handle clamp pattern with number of sign bits
338 KnownBits KnownRHS;
339 computeKnownBitsImpl(MI.getOperand(1).getReg(), Known, DemandedElts,
340 Depth + 1);
341 computeKnownBitsImpl(MI.getOperand(2).getReg(), KnownRHS, DemandedElts,
342 Depth + 1);
343 Known = KnownBits::smin(Known, KnownRHS);
344 break;
345 }
346 case TargetOpcode::G_SMAX: {
347 // TODO: Handle clamp pattern with number of sign bits
348 KnownBits KnownRHS;
349 computeKnownBitsImpl(MI.getOperand(1).getReg(), Known, DemandedElts,
350 Depth + 1);
351 computeKnownBitsImpl(MI.getOperand(2).getReg(), KnownRHS, DemandedElts,
352 Depth + 1);
353 Known = KnownBits::smax(Known, KnownRHS);
354 break;
355 }
356 case TargetOpcode::G_UMIN: {
357 KnownBits KnownRHS;
358 computeKnownBitsImpl(MI.getOperand(1).getReg(), Known,
359 DemandedElts, Depth + 1);
360 computeKnownBitsImpl(MI.getOperand(2).getReg(), KnownRHS,
361 DemandedElts, Depth + 1);
362 Known = KnownBits::umin(Known, KnownRHS);
363 break;
364 }
365 case TargetOpcode::G_UMAX: {
366 KnownBits KnownRHS;
367 computeKnownBitsImpl(MI.getOperand(1).getReg(), Known,
368 DemandedElts, Depth + 1);
369 computeKnownBitsImpl(MI.getOperand(2).getReg(), KnownRHS,
370 DemandedElts, Depth + 1);
371 Known = KnownBits::umax(Known, KnownRHS);
372 break;
373 }
374 case TargetOpcode::G_FCMP:
375 case TargetOpcode::G_ICMP: {
376 if (DstTy.isVector())
377 break;
378 if (TL.getBooleanContents(DstTy.isVector(),
379 Opcode == TargetOpcode::G_FCMP) ==
381 BitWidth > 1)
382 Known.Zero.setBitsFrom(1);
383 break;
384 }
385 case TargetOpcode::G_SEXT: {
386 computeKnownBitsImpl(MI.getOperand(1).getReg(), Known, DemandedElts,
387 Depth + 1);
388 // If the sign bit is known to be zero or one, then sext will extend
389 // it to the top bits, else it will just zext.
390 Known = Known.sext(BitWidth);
391 break;
392 }
393 case TargetOpcode::G_ASSERT_SEXT:
394 case TargetOpcode::G_SEXT_INREG: {
395 computeKnownBitsImpl(MI.getOperand(1).getReg(), Known, DemandedElts,
396 Depth + 1);
397 Known = Known.sextInReg(MI.getOperand(2).getImm());
398 break;
399 }
400 case TargetOpcode::G_ANYEXT: {
401 computeKnownBitsImpl(MI.getOperand(1).getReg(), Known, DemandedElts,
402 Depth + 1);
403 Known = Known.anyext(BitWidth);
404 break;
405 }
406 case TargetOpcode::G_LOAD: {
407 const MachineMemOperand *MMO = *MI.memoperands_begin();
408 KnownBits KnownRange(MMO->getMemoryType().getScalarSizeInBits());
409 if (const MDNode *Ranges = MMO->getRanges())
410 computeKnownBitsFromRangeMetadata(*Ranges, KnownRange);
411 Known = KnownRange.anyext(Known.getBitWidth());
412 break;
413 }
414 case TargetOpcode::G_SEXTLOAD:
415 case TargetOpcode::G_ZEXTLOAD: {
416 if (DstTy.isVector())
417 break;
418 const MachineMemOperand *MMO = *MI.memoperands_begin();
419 KnownBits KnownRange(MMO->getMemoryType().getScalarSizeInBits());
420 if (const MDNode *Ranges = MMO->getRanges())
421 computeKnownBitsFromRangeMetadata(*Ranges, KnownRange);
422 Known = Opcode == TargetOpcode::G_SEXTLOAD
423 ? KnownRange.sext(Known.getBitWidth())
424 : KnownRange.zext(Known.getBitWidth());
425 break;
426 }
427 case TargetOpcode::G_ASHR: {
428 KnownBits LHSKnown, RHSKnown;
429 computeKnownBitsImpl(MI.getOperand(1).getReg(), LHSKnown, DemandedElts,
430 Depth + 1);
431 computeKnownBitsImpl(MI.getOperand(2).getReg(), RHSKnown, DemandedElts,
432 Depth + 1);
433 Known = KnownBits::ashr(LHSKnown, RHSKnown);
434 break;
435 }
436 case TargetOpcode::G_LSHR: {
437 KnownBits LHSKnown, RHSKnown;
438 computeKnownBitsImpl(MI.getOperand(1).getReg(), LHSKnown, DemandedElts,
439 Depth + 1);
440 computeKnownBitsImpl(MI.getOperand(2).getReg(), RHSKnown, DemandedElts,
441 Depth + 1);
442 Known = KnownBits::lshr(LHSKnown, RHSKnown);
443 break;
444 }
445 case TargetOpcode::G_SHL: {
446 KnownBits LHSKnown, RHSKnown;
447 computeKnownBitsImpl(MI.getOperand(1).getReg(), LHSKnown, DemandedElts,
448 Depth + 1);
449 computeKnownBitsImpl(MI.getOperand(2).getReg(), RHSKnown, DemandedElts,
450 Depth + 1);
451 Known = KnownBits::shl(LHSKnown, RHSKnown);
452 break;
453 }
454 case TargetOpcode::G_INTTOPTR:
455 case TargetOpcode::G_PTRTOINT:
456 if (DstTy.isVector())
457 break;
458 // Fall through and handle them the same as zext/trunc.
459 [[fallthrough]];
460 case TargetOpcode::G_ASSERT_ZEXT:
461 case TargetOpcode::G_ZEXT:
462 case TargetOpcode::G_TRUNC: {
463 Register SrcReg = MI.getOperand(1).getReg();
464 LLT SrcTy = MRI.getType(SrcReg);
465 unsigned SrcBitWidth;
466
467 // G_ASSERT_ZEXT stores the original bitwidth in the immediate operand.
468 if (Opcode == TargetOpcode::G_ASSERT_ZEXT)
469 SrcBitWidth = MI.getOperand(2).getImm();
470 else {
471 SrcBitWidth = SrcTy.isPointer()
473 : SrcTy.getSizeInBits();
474 }
475 assert(SrcBitWidth && "SrcBitWidth can't be zero");
476 Known = Known.zextOrTrunc(SrcBitWidth);
477 computeKnownBitsImpl(SrcReg, Known, DemandedElts, Depth + 1);
478 Known = Known.zextOrTrunc(BitWidth);
479 if (BitWidth > SrcBitWidth)
480 Known.Zero.setBitsFrom(SrcBitWidth);
481 break;
482 }
483 case TargetOpcode::G_ASSERT_ALIGN: {
484 int64_t LogOfAlign = Log2_64(MI.getOperand(2).getImm());
485
486 // TODO: Should use maximum with source
487 // If a node is guaranteed to be aligned, set low zero bits accordingly as
488 // well as clearing one bits.
489 Known.Zero.setLowBits(LogOfAlign);
490 Known.One.clearLowBits(LogOfAlign);
491 break;
492 }
493 case TargetOpcode::G_MERGE_VALUES: {
494 unsigned NumOps = MI.getNumOperands();
495 unsigned OpSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
496
497 for (unsigned I = 0; I != NumOps - 1; ++I) {
498 KnownBits SrcOpKnown;
499 computeKnownBitsImpl(MI.getOperand(I + 1).getReg(), SrcOpKnown,
500 DemandedElts, Depth + 1);
501 Known.insertBits(SrcOpKnown, I * OpSize);
502 }
503 break;
504 }
505 case TargetOpcode::G_UNMERGE_VALUES: {
506 if (DstTy.isVector())
507 break;
508 unsigned NumOps = MI.getNumOperands();
509 Register SrcReg = MI.getOperand(NumOps - 1).getReg();
510 if (MRI.getType(SrcReg).isVector())
511 return; // TODO: Handle vectors.
512
513 KnownBits SrcOpKnown;
514 computeKnownBitsImpl(SrcReg, SrcOpKnown, DemandedElts, Depth + 1);
515
516 // Figure out the result operand index
517 unsigned DstIdx = 0;
518 for (; DstIdx != NumOps - 1 && MI.getOperand(DstIdx).getReg() != R;
519 ++DstIdx)
520 ;
521
522 Known = SrcOpKnown.extractBits(BitWidth, BitWidth * DstIdx);
523 break;
524 }
525 case TargetOpcode::G_BSWAP: {
526 Register SrcReg = MI.getOperand(1).getReg();
527 computeKnownBitsImpl(SrcReg, Known, DemandedElts, Depth + 1);
528 Known = Known.byteSwap();
529 break;
530 }
531 case TargetOpcode::G_BITREVERSE: {
532 Register SrcReg = MI.getOperand(1).getReg();
533 computeKnownBitsImpl(SrcReg, Known, DemandedElts, Depth + 1);
534 Known = Known.reverseBits();
535 break;
536 }
537 case TargetOpcode::G_CTPOP: {
538 computeKnownBitsImpl(MI.getOperand(1).getReg(), Known2, DemandedElts,
539 Depth + 1);
540 // We can bound the space the count needs. Also, bits known to be zero can't
541 // contribute to the population.
542 unsigned BitsPossiblySet = Known2.countMaxPopulation();
543 unsigned LowBits = llvm::bit_width(BitsPossiblySet);
544 Known.Zero.setBitsFrom(LowBits);
545 // TODO: we could bound Known.One using the lower bound on the number of
546 // bits which might be set provided by popcnt KnownOne2.
547 break;
548 }
549 case TargetOpcode::G_UBFX: {
550 KnownBits SrcOpKnown, OffsetKnown, WidthKnown;
551 computeKnownBitsImpl(MI.getOperand(1).getReg(), SrcOpKnown, DemandedElts,
552 Depth + 1);
553 computeKnownBitsImpl(MI.getOperand(2).getReg(), OffsetKnown, DemandedElts,
554 Depth + 1);
555 computeKnownBitsImpl(MI.getOperand(3).getReg(), WidthKnown, DemandedElts,
556 Depth + 1);
557 Known = extractBits(BitWidth, SrcOpKnown, OffsetKnown, WidthKnown);
558 break;
559 }
560 case TargetOpcode::G_SBFX: {
561 KnownBits SrcOpKnown, OffsetKnown, WidthKnown;
562 computeKnownBitsImpl(MI.getOperand(1).getReg(), SrcOpKnown, DemandedElts,
563 Depth + 1);
564 computeKnownBitsImpl(MI.getOperand(2).getReg(), OffsetKnown, DemandedElts,
565 Depth + 1);
566 computeKnownBitsImpl(MI.getOperand(3).getReg(), WidthKnown, DemandedElts,
567 Depth + 1);
568 Known = extractBits(BitWidth, SrcOpKnown, OffsetKnown, WidthKnown);
569 // Sign extend the extracted value using shift left and arithmetic shift
570 // right.
572 KnownBits ShiftKnown = KnownBits::sub(ExtKnown, WidthKnown);
573 Known = KnownBits::ashr(KnownBits::shl(Known, ShiftKnown), ShiftKnown);
574 break;
575 }
576 case TargetOpcode::G_UADDO:
577 case TargetOpcode::G_UADDE:
578 case TargetOpcode::G_SADDO:
579 case TargetOpcode::G_SADDE:
580 case TargetOpcode::G_USUBO:
581 case TargetOpcode::G_USUBE:
582 case TargetOpcode::G_SSUBO:
583 case TargetOpcode::G_SSUBE:
584 case TargetOpcode::G_UMULO:
585 case TargetOpcode::G_SMULO: {
586 if (MI.getOperand(1).getReg() == R) {
587 // If we know the result of a compare has the top bits zero, use this
588 // info.
589 if (TL.getBooleanContents(DstTy.isVector(), false) ==
591 BitWidth > 1)
592 Known.Zero.setBitsFrom(1);
593 }
594 break;
595 }
596 case TargetOpcode::G_CTLZ:
597 case TargetOpcode::G_CTLZ_ZERO_UNDEF: {
598 KnownBits SrcOpKnown;
599 computeKnownBitsImpl(MI.getOperand(1).getReg(), SrcOpKnown, DemandedElts,
600 Depth + 1);
601 // If we have a known 1, its position is our upper bound.
602 unsigned PossibleLZ = SrcOpKnown.countMaxLeadingZeros();
603 unsigned LowBits = llvm::bit_width(PossibleLZ);
604 Known.Zero.setBitsFrom(LowBits);
605 break;
606 }
607 }
608
609 LLVM_DEBUG(dumpResult(MI, Known, Depth));
610
611 // Update the cache.
612 ComputeKnownBitsCache[R] = Known;
613}
614
615/// Compute number of sign bits for the intersection of \p Src0 and \p Src1
616unsigned GISelKnownBits::computeNumSignBitsMin(Register Src0, Register Src1,
617 const APInt &DemandedElts,
618 unsigned Depth) {
619 // Test src1 first, since we canonicalize simpler expressions to the RHS.
620 unsigned Src1SignBits = computeNumSignBits(Src1, DemandedElts, Depth);
621 if (Src1SignBits == 1)
622 return 1;
623 return std::min(computeNumSignBits(Src0, DemandedElts, Depth), Src1SignBits);
624}
625
626/// Compute the known number of sign bits with attached range metadata in the
627/// memory operand. If this is an extending load, accounts for the behavior of
628/// the high bits.
630 unsigned TyBits) {
631 const MDNode *Ranges = Ld->getRanges();
632 if (!Ranges)
633 return 1;
634
636 if (TyBits > CR.getBitWidth()) {
637 switch (Ld->getOpcode()) {
638 case TargetOpcode::G_SEXTLOAD:
639 CR = CR.signExtend(TyBits);
640 break;
641 case TargetOpcode::G_ZEXTLOAD:
642 CR = CR.zeroExtend(TyBits);
643 break;
644 default:
645 break;
646 }
647 }
648
649 return std::min(CR.getSignedMin().getNumSignBits(),
651}
652
654 const APInt &DemandedElts,
655 unsigned Depth) {
656 MachineInstr &MI = *MRI.getVRegDef(R);
657 unsigned Opcode = MI.getOpcode();
658
659 if (Opcode == TargetOpcode::G_CONSTANT)
660 return MI.getOperand(1).getCImm()->getValue().getNumSignBits();
661
662 if (Depth == getMaxDepth())
663 return 1;
664
665 if (!DemandedElts)
666 return 1; // No demanded elts, better to assume we don't know anything.
667
668 LLT DstTy = MRI.getType(R);
669 const unsigned TyBits = DstTy.getScalarSizeInBits();
670
671 // Handle the case where this is called on a register that does not have a
672 // type constraint. This is unlikely to occur except by looking through copies
673 // but it is possible for the initial register being queried to be in this
674 // state.
675 if (!DstTy.isValid())
676 return 1;
677
678 unsigned FirstAnswer = 1;
679 switch (Opcode) {
680 case TargetOpcode::COPY: {
681 MachineOperand &Src = MI.getOperand(1);
682 if (Src.getReg().isVirtual() && Src.getSubReg() == 0 &&
683 MRI.getType(Src.getReg()).isValid()) {
684 // Don't increment Depth for this one since we didn't do any work.
685 return computeNumSignBits(Src.getReg(), DemandedElts, Depth);
686 }
687
688 return 1;
689 }
690 case TargetOpcode::G_SEXT: {
691 Register Src = MI.getOperand(1).getReg();
692 LLT SrcTy = MRI.getType(Src);
693 unsigned Tmp = DstTy.getScalarSizeInBits() - SrcTy.getScalarSizeInBits();
694 return computeNumSignBits(Src, DemandedElts, Depth + 1) + Tmp;
695 }
696 case TargetOpcode::G_ASSERT_SEXT:
697 case TargetOpcode::G_SEXT_INREG: {
698 // Max of the input and what this extends.
699 Register Src = MI.getOperand(1).getReg();
700 unsigned SrcBits = MI.getOperand(2).getImm();
701 unsigned InRegBits = TyBits - SrcBits + 1;
702 return std::max(computeNumSignBits(Src, DemandedElts, Depth + 1), InRegBits);
703 }
704 case TargetOpcode::G_LOAD: {
705 GLoad *Ld = cast<GLoad>(&MI);
706 if (DemandedElts != 1 || !getDataLayout().isLittleEndian())
707 break;
708
709 return computeNumSignBitsFromRangeMetadata(Ld, TyBits);
710 }
711 case TargetOpcode::G_SEXTLOAD: {
712 GSExtLoad *Ld = cast<GSExtLoad>(&MI);
713
714 // FIXME: We need an in-memory type representation.
715 if (DstTy.isVector())
716 return 1;
717
718 unsigned NumBits = computeNumSignBitsFromRangeMetadata(Ld, TyBits);
719 if (NumBits != 1)
720 return NumBits;
721
722 // e.g. i16->i32 = '17' bits known.
723 const MachineMemOperand *MMO = *MI.memoperands_begin();
724 return TyBits - MMO->getSizeInBits().getValue() + 1;
725 }
726 case TargetOpcode::G_ZEXTLOAD: {
727 GZExtLoad *Ld = cast<GZExtLoad>(&MI);
728
729 // FIXME: We need an in-memory type representation.
730 if (DstTy.isVector())
731 return 1;
732
733 unsigned NumBits = computeNumSignBitsFromRangeMetadata(Ld, TyBits);
734 if (NumBits != 1)
735 return NumBits;
736
737 // e.g. i16->i32 = '16' bits known.
738 const MachineMemOperand *MMO = *MI.memoperands_begin();
739 return TyBits - MMO->getSizeInBits().getValue();
740 }
741 case TargetOpcode::G_AND:
742 case TargetOpcode::G_OR:
743 case TargetOpcode::G_XOR: {
744 Register Src1 = MI.getOperand(1).getReg();
745 unsigned Src1NumSignBits =
746 computeNumSignBits(Src1, DemandedElts, Depth + 1);
747 if (Src1NumSignBits != 1) {
748 Register Src2 = MI.getOperand(2).getReg();
749 unsigned Src2NumSignBits =
750 computeNumSignBits(Src2, DemandedElts, Depth + 1);
751 FirstAnswer = std::min(Src1NumSignBits, Src2NumSignBits);
752 }
753 break;
754 }
755 case TargetOpcode::G_TRUNC: {
756 Register Src = MI.getOperand(1).getReg();
757 LLT SrcTy = MRI.getType(Src);
758
759 // Check if the sign bits of source go down as far as the truncated value.
760 unsigned DstTyBits = DstTy.getScalarSizeInBits();
761 unsigned NumSrcBits = SrcTy.getScalarSizeInBits();
762 unsigned NumSrcSignBits = computeNumSignBits(Src, DemandedElts, Depth + 1);
763 if (NumSrcSignBits > (NumSrcBits - DstTyBits))
764 return NumSrcSignBits - (NumSrcBits - DstTyBits);
765 break;
766 }
767 case TargetOpcode::G_SELECT: {
768 return computeNumSignBitsMin(MI.getOperand(2).getReg(),
769 MI.getOperand(3).getReg(), DemandedElts,
770 Depth + 1);
771 }
772 case TargetOpcode::G_SADDO:
773 case TargetOpcode::G_SADDE:
774 case TargetOpcode::G_UADDO:
775 case TargetOpcode::G_UADDE:
776 case TargetOpcode::G_SSUBO:
777 case TargetOpcode::G_SSUBE:
778 case TargetOpcode::G_USUBO:
779 case TargetOpcode::G_USUBE:
780 case TargetOpcode::G_SMULO:
781 case TargetOpcode::G_UMULO: {
782 // If compares returns 0/-1, all bits are sign bits.
783 // We know that we have an integer-based boolean since these operations
784 // are only available for integer.
785 if (MI.getOperand(1).getReg() == R) {
786 if (TL.getBooleanContents(DstTy.isVector(), false) ==
788 return TyBits;
789 }
790
791 break;
792 }
793 case TargetOpcode::G_FCMP:
794 case TargetOpcode::G_ICMP: {
795 bool IsFP = Opcode == TargetOpcode::G_FCMP;
796 if (TyBits == 1)
797 break;
798 auto BC = TL.getBooleanContents(DstTy.isVector(), IsFP);
800 return TyBits; // All bits are sign bits.
802 return TyBits - 1; // Every always-zero bit is a sign bit.
803 break;
804 }
805 case TargetOpcode::G_INTRINSIC:
806 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
807 case TargetOpcode::G_INTRINSIC_CONVERGENT:
808 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
809 default: {
810 unsigned NumBits =
811 TL.computeNumSignBitsForTargetInstr(*this, R, DemandedElts, MRI, Depth);
812 if (NumBits > 1)
813 FirstAnswer = std::max(FirstAnswer, NumBits);
814 break;
815 }
816 }
817
818 // Finally, if we can prove that the top bits of the result are 0's or 1's,
819 // use this information.
820 KnownBits Known = getKnownBits(R, DemandedElts, Depth);
821 APInt Mask;
822 if (Known.isNonNegative()) { // sign bit is 0
823 Mask = Known.Zero;
824 } else if (Known.isNegative()) { // sign bit is 1;
825 Mask = Known.One;
826 } else {
827 // Nothing known.
828 return FirstAnswer;
829 }
830
831 // Okay, we know that the sign bit in Mask is set. Use CLO to determine
832 // the number of identical bits in the top of the input value.
833 Mask <<= Mask.getBitWidth() - TyBits;
834 return std::max(FirstAnswer, Mask.countl_one());
835}
836
838 LLT Ty = MRI.getType(R);
839 APInt DemandedElts =
840 Ty.isVector() ? APInt::getAllOnes(Ty.getNumElements()) : APInt(1, 1);
841 return computeNumSignBits(R, DemandedElts, Depth);
842}
843
845 AU.setPreservesAll();
847}
848
850 return false;
851}
852
854 if (!Info) {
855 unsigned MaxDepth =
857 Info = std::make_unique<GISelKnownBits>(MF, MaxDepth);
858 }
859 return *Info;
860}
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
#define LLVM_ATTRIBUTE_UNUSED
Definition: Compiler.h:199
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
static Function * getFunction(Constant *C)
Definition: Evaluator.cpp:236
static KnownBits extractBits(unsigned BitWidth, const KnownBits &SrcOpKnown, const KnownBits &OffsetKnown, const KnownBits &WidthKnown)
#define DEBUG_TYPE
Provides analysis for querying information about KnownBits during GISel passes.
static unsigned computeNumSignBitsFromRangeMetadata(const GAnyLoad *Ld, unsigned TyBits)
Compute the known number of sign bits with attached range metadata in the memory operand.
static LLVM_ATTRIBUTE_UNUSED void dumpResult(const MachineInstr &MI, const KnownBits &Known, unsigned Depth)
Provides analysis for querying information about KnownBits during GISel passes.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
IRTranslator LLVM IR MI
static const unsigned MaxDepth
#define I(x, y, z)
Definition: MD5.cpp:58
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
Module.h This file contains the declarations for the Module class.
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some functions that are useful when dealing with strings.
This file describes how to lower LLVM code to machine code.
Class for arbitrary precision integers.
Definition: APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:212
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
Definition: APInt.h:207
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
Definition: APInt.h:1364
unsigned getNumSignBits() const
Computes the number of leading bits of this APInt that are equal to its sign bit.
Definition: APInt.h:1585
void clearLowBits(unsigned loBits)
Set bottom loBits bits to 0.
Definition: APInt.h:1395
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
Definition: APInt.h:453
void setAllBits()
Set every bit to 1.
Definition: APInt.h:1297
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition: APInt.h:284
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
Definition: APInt.h:1367
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
Definition: APInt.h:264
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
This class represents a range of values.
Definition: ConstantRange.h:47
ConstantRange zeroExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
ConstantRange signExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
bool isNonIntegralAddressSpace(unsigned AddrSpace) const
Definition: DataLayout.h:350
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
Definition: DataLayout.h:377
Represents any generic load, including sign/zero extending variants.
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
To use KnownBitsInfo analysis in a pass, KnownBitsInfo &Info = getAnalysis<GISelKnownBitsInfoAnalysis...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
GISelKnownBits & get(MachineFunction &MF)
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
const DataLayout & getDataLayout() const
virtual void computeKnownBitsImpl(Register R, KnownBits &Known, const APInt &DemandedElts, unsigned Depth=0)
Align computeKnownAlignment(Register R, unsigned Depth=0)
APInt getKnownOnes(Register R)
unsigned computeNumSignBits(Register R, const APInt &DemandedElts, unsigned Depth=0)
KnownBits getKnownBits(Register R)
bool maskedValueIsZero(Register Val, const APInt &Mask)
unsigned getMaxDepth() const
bool signBitIsZero(Register Op)
APInt getKnownZeroes(Register R)
Represents a G_LOAD.
Represents a G_SEXTLOAD.
Represents a G_ZEXTLOAD.
constexpr unsigned getScalarSizeInBits() const
Definition: LowLevelType.h:267
constexpr bool isValid() const
Definition: LowLevelType.h:145
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
Definition: LowLevelType.h:159
constexpr bool isVector() const
Definition: LowLevelType.h:148
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:193
constexpr bool isPointer() const
Definition: LowLevelType.h:149
constexpr unsigned getAddressSpace() const
Definition: LowLevelType.h:280
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
Definition: LowLevelType.h:178
TypeSize getValue() const
Metadata node.
Definition: Metadata.h:1069
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:569
A description of a memory reference used in the backend.
LLT getMemoryType() const
Return the memory type of the memory reference.
const MDNode * getRanges() const
Return the range tag for the memory reference.
LocationSize getSizeInBits() const
Return the size in bits of the memory reference.
MachineOperand class - Representation of each machine instruction operand.
MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
virtual void computeKnownBitsForFrameIndex(int FIOp, KnownBits &Known, const MachineFunction &MF) const
Determine which of the bits of FrameIndex FIOp are known to be 0.
virtual unsigned computeNumSignBitsForTargetInstr(GISelKnownBits &Analysis, Register R, const APInt &DemandedElts, const MachineRegisterInfo &MRI, unsigned Depth=0) const
This method can be implemented by targets that want to expose additional information about sign bits ...
virtual Align computeKnownAlignForTargetInstr(GISelKnownBits &Analysis, Register R, const MachineRegisterInfo &MRI, unsigned Depth=0) const
Determine the known alignment for the pointer value R.
virtual void computeKnownBitsForTargetInstr(GISelKnownBits &Analysis, Register R, KnownBits &Known, const APInt &DemandedElts, const MachineRegisterInfo &MRI, unsigned Depth=0) const
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
std::optional< const char * > toString(const std::optional< DWARFFormValue > &V)
Take an optional DWARFFormValue and try to extract a string value from it.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
Definition: bit.h:317
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:346
ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known)
Compute known bits from the range metadata.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
Definition: KnownBits.h:290
KnownBits sextInReg(unsigned SrcBitWidth) const
Return known bits for a in-register sign extension of the value we're tracking.
Definition: KnownBits.cpp:149
static KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smax(LHS, RHS).
Definition: KnownBits.cpp:202
bool isNonNegative() const
Returns true if this value is known to be non-negative.
Definition: KnownBits.h:97
static KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
Definition: KnownBits.cpp:428
bool isUnknown() const
Returns true if we don't know any bits.
Definition: KnownBits.h:62
KnownBits byteSwap() const
Definition: KnownBits.h:468
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
Definition: KnownBits.h:278
KnownBits reverseBits() const
Definition: KnownBits.h:472
unsigned getBitWidth() const
Get the bit width of this value.
Definition: KnownBits.h:40
static KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umax(LHS, RHS).
Definition: KnownBits.cpp:178
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
Definition: KnownBits.h:161
static KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
Definition: KnownBits.cpp:370
KnownBits extractBits(unsigned NumBits, unsigned BitPosition) const
Return a subset of the known bits from [bitPosition,bitPosition+numBits).
Definition: KnownBits.h:214
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
Definition: KnownBits.h:300
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
Definition: KnownBits.h:169
static KnownBits add(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from addition of LHS and RHS.
Definition: KnownBits.h:333
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
Definition: KnownBits.h:185
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
Definition: KnownBits.h:134
static KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smin(LHS, RHS).
Definition: KnownBits.cpp:215
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
Definition: KnownBits.h:118
bool isNegative() const
Returns true if this value is known to be negative.
Definition: KnownBits.h:94
static KnownBits sub(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from subtraction of LHS and RHS.
Definition: KnownBits.h:339
unsigned countMaxLeadingZeros() const
Returns the maximum number of leading zero bits possible.
Definition: KnownBits.h:269
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
Definition: KnownBits.h:208
static KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)
Compute known bits resulting from multiplying LHS and RHS.
Definition: KnownBits.cpp:797
KnownBits anyext(unsigned BitWidth) const
Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...
Definition: KnownBits.h:156
static KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
Definition: KnownBits.cpp:285
static KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umin(LHS, RHS).
Definition: KnownBits.cpp:196