DXR is a code search and navigation tool aimed at making sense of large projects. It supports full-text and regex searches as well as structural queries.

Line Code
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
 * vim: set ts=8 sts=2 et sw=2 tw=80:
 * This Source Code Form is subject to the terms of the Mozilla Public
 * License, v. 2.0. If a copy of the MPL was not distributed with this
 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */

#include "jit/AlignmentMaskAnalysis.h"
#include "jit/MIR.h"
#include "jit/MIRGraph.h"

using namespace js;
using namespace jit;

static bool IsAlignmentMask(uint32_t m) {
  // Test whether m is just leading ones and trailing zeros.
  return (-m & ~m) == 0;
}

static void AnalyzeAsmHeapAddress(MDefinition* ptr, MIRGraph& graph) {
  // Fold (a+i)&m to (a&m)+i, provided that this doesn't change the result,
  // since the users of the BitAnd include heap accesses. This will expose
  // the redundancy for GVN when expressions like this:
  //   a&m
  //   (a+1)&m,
  //   (a+2)&m,
  // are transformed into this:
  //   a&m
  //   (a&m)+1
  //   (a&m)+2
  // and it will allow the constants to be folded by the
  // EffectiveAddressAnalysis pass.
  //
  // Putting the add on the outside might seem like it exposes other users of
  // the expression to the possibility of i32 overflow, if we aren't in wasm
  // and they aren't naturally truncating. However, since we use MAdd::New
  // with MIRType::Int32, we make sure that the value is truncated, just as it
  // would be by the MBitAnd.

  MOZ_ASSERT(IsCompilingWasm());

  if (!ptr->isBitAnd()) {
    return;
  }

  MDefinition* lhs = ptr->toBitAnd()->getOperand(0);
  MDefinition* rhs = ptr->toBitAnd()->getOperand(1);
  if (lhs->isConstant()) {
    mozilla::Swap(lhs, rhs);
  }
  if (!lhs->isAdd() || !rhs->isConstant()) {
    return;
  }

  MDefinition* op0 = lhs->toAdd()->getOperand(0);
  MDefinition* op1 = lhs->toAdd()->getOperand(1);
  if (op0->isConstant()) {
    mozilla::Swap(op0, op1);
  }
  if (!op1->isConstant()) {
    return;
  }

  uint32_t i = op1->toConstant()->toInt32();
  uint32_t m = rhs->toConstant()->toInt32();
  if (!IsAlignmentMask(m) || (i & m) != i) {
    return;
  }

  // The pattern was matched! Produce the replacement expression.
  MInstruction* and_ = MBitAnd::New(graph.alloc(), op0, rhs, MIRType::Int32);
  ptr->block()->insertBefore(ptr->toBitAnd(), and_);
  MInstruction* add = MAdd::New(graph.alloc(), and_, op1, MIRType::Int32);
  ptr->block()->insertBefore(ptr->toBitAnd(), add);
  ptr->replaceAllUsesWith(add);
  ptr->block()->discard(ptr->toBitAnd());
}

bool AlignmentMaskAnalysis::analyze() {
  for (ReversePostorderIterator block(graph_.rpoBegin());
       block != graph_.rpoEnd(); block++) {
    for (MInstructionIterator i = block->begin(); i != block->end(); i++) {
      if (!graph_.alloc().ensureBallast()) {
        return false;
      }

      // Note that we don't check for MWasmCompareExchangeHeap
      // or MWasmAtomicBinopHeap, because the backend and the OOB
      // mechanism don't support non-zero offsets for them yet.
      if (i->isAsmJSLoadHeap()) {
        AnalyzeAsmHeapAddress(i->toAsmJSLoadHeap()->base(), graph_);
      } else if (i->isAsmJSStoreHeap()) {
        AnalyzeAsmHeapAddress(i->toAsmJSStoreHeap()->base(), graph_);
      }
    }
  }
  return true;
}