js/src/jit/EffectiveAddressAnalysis.cpp

Sat, 03 Jan 2015 20:18:00 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Sat, 03 Jan 2015 20:18:00 +0100
branch
TOR_BUG_3246
changeset 7
129ffea94266
permissions
-rw-r--r--

Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.

michael@0 1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
michael@0 2 * vim: set ts=8 sts=4 et sw=4 tw=99:
michael@0 3 * This Source Code Form is subject to the terms of the Mozilla Public
michael@0 4 * License, v. 2.0. If a copy of the MPL was not distributed with this
michael@0 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
michael@0 6
michael@0 7 #include "jit/EffectiveAddressAnalysis.h"
michael@0 8 #include "jit/MIR.h"
michael@0 9 #include "jit/MIRGraph.h"
michael@0 10
michael@0 11 using namespace js;
michael@0 12 using namespace jit;
michael@0 13
michael@0 14 static void
michael@0 15 AnalyzeLsh(TempAllocator &alloc, MLsh *lsh)
michael@0 16 {
michael@0 17 if (lsh->specialization() != MIRType_Int32)
michael@0 18 return;
michael@0 19
michael@0 20 MDefinition *index = lsh->lhs();
michael@0 21 JS_ASSERT(index->type() == MIRType_Int32);
michael@0 22
michael@0 23 MDefinition *shift = lsh->rhs();
michael@0 24 if (!shift->isConstant())
michael@0 25 return;
michael@0 26
michael@0 27 Value shiftValue = shift->toConstant()->value();
michael@0 28 if (!shiftValue.isInt32() || !IsShiftInScaleRange(shiftValue.toInt32()))
michael@0 29 return;
michael@0 30
michael@0 31 Scale scale = ShiftToScale(shiftValue.toInt32());
michael@0 32
michael@0 33 int32_t displacement = 0;
michael@0 34 MInstruction *last = lsh;
michael@0 35 MDefinition *base = nullptr;
michael@0 36 while (true) {
michael@0 37 if (!last->hasOneUse())
michael@0 38 break;
michael@0 39
michael@0 40 MUseIterator use = last->usesBegin();
michael@0 41 if (!use->consumer()->isDefinition() || !use->consumer()->toDefinition()->isAdd())
michael@0 42 break;
michael@0 43
michael@0 44 MAdd *add = use->consumer()->toDefinition()->toAdd();
michael@0 45 if (add->specialization() != MIRType_Int32 || !add->isTruncated())
michael@0 46 break;
michael@0 47
michael@0 48 MDefinition *other = add->getOperand(1 - use->index());
michael@0 49
michael@0 50 if (other->isConstant()) {
michael@0 51 displacement += other->toConstant()->value().toInt32();
michael@0 52 } else {
michael@0 53 if (base)
michael@0 54 break;
michael@0 55 base = other;
michael@0 56 }
michael@0 57
michael@0 58 last = add;
michael@0 59 }
michael@0 60
michael@0 61 if (!base) {
michael@0 62 uint32_t elemSize = 1 << ScaleToShift(scale);
michael@0 63 if (displacement % elemSize != 0)
michael@0 64 return;
michael@0 65
michael@0 66 if (!last->hasOneUse())
michael@0 67 return;
michael@0 68
michael@0 69 MUseIterator use = last->usesBegin();
michael@0 70 if (!use->consumer()->isDefinition() || !use->consumer()->toDefinition()->isBitAnd())
michael@0 71 return;
michael@0 72
michael@0 73 MBitAnd *bitAnd = use->consumer()->toDefinition()->toBitAnd();
michael@0 74 MDefinition *other = bitAnd->getOperand(1 - use->index());
michael@0 75 if (!other->isConstant() || !other->toConstant()->value().isInt32())
michael@0 76 return;
michael@0 77
michael@0 78 uint32_t bitsClearedByShift = elemSize - 1;
michael@0 79 uint32_t bitsClearedByMask = ~uint32_t(other->toConstant()->value().toInt32());
michael@0 80 if ((bitsClearedByShift & bitsClearedByMask) != bitsClearedByMask)
michael@0 81 return;
michael@0 82
michael@0 83 bitAnd->replaceAllUsesWith(last);
michael@0 84 return;
michael@0 85 }
michael@0 86
michael@0 87 MEffectiveAddress *eaddr = MEffectiveAddress::New(alloc, base, index, scale, displacement);
michael@0 88 last->replaceAllUsesWith(eaddr);
michael@0 89 last->block()->insertAfter(last, eaddr);
michael@0 90 }
michael@0 91
michael@0 92 // This analysis converts patterns of the form:
michael@0 93 // truncate(x + (y << {0,1,2,3}))
michael@0 94 // truncate(x + (y << {0,1,2,3}) + imm32)
michael@0 95 // into a single lea instruction, and patterns of the form:
michael@0 96 // asmload(x + imm32)
michael@0 97 // asmload(x << {0,1,2,3})
michael@0 98 // asmload((x << {0,1,2,3}) + imm32)
michael@0 99 // asmload((x << {0,1,2,3}) & mask) (where mask is redundant with shift)
michael@0 100 // asmload(((x << {0,1,2,3}) + imm32) & mask) (where mask is redundant with shift + imm32)
michael@0 101 // into a single asmload instruction (and for asmstore too).
michael@0 102 //
michael@0 103 // Additionally, we should consider the general forms:
michael@0 104 // truncate(x + y + imm32)
michael@0 105 // truncate((y << {0,1,2,3}) + imm32)
michael@0 106 bool
michael@0 107 EffectiveAddressAnalysis::analyze()
michael@0 108 {
michael@0 109 for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
michael@0 110 for (MInstructionIterator i = block->begin(); i != block->end(); i++) {
michael@0 111 if (i->isLsh())
michael@0 112 AnalyzeLsh(graph_.alloc(), i->toLsh());
michael@0 113 }
michael@0 114 }
michael@0 115 return true;
michael@0 116 }

mercurial