Sat, 03 Jan 2015 20:18:00 +0100
Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ |
michael@0 | 2 | /* This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 3 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 4 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 5 | |
michael@0 | 6 | /* This variant of nsIPerfMeasurement uses the perf_event interface |
michael@0 | 7 | * added in Linux 2.6.31. We key compilation of this file off the |
michael@0 | 8 | * existence of <linux/perf_event.h>. |
michael@0 | 9 | */ |
michael@0 | 10 | |
michael@0 | 11 | #include <errno.h> |
michael@0 | 12 | #include <linux/perf_event.h> |
michael@0 | 13 | #include <string.h> |
michael@0 | 14 | #include <sys/ioctl.h> |
michael@0 | 15 | #include <sys/syscall.h> |
michael@0 | 16 | #include <unistd.h> |
michael@0 | 17 | |
michael@0 | 18 | #include "perf/jsperf.h" |
michael@0 | 19 | |
michael@0 | 20 | using namespace js; |
michael@0 | 21 | |
michael@0 | 22 | // As of July 2010, this system call has not been added to the |
michael@0 | 23 | // C library, so we have to provide our own wrapper function. |
michael@0 | 24 | // If this code runs on a kernel that does not implement the |
michael@0 | 25 | // system call (2.6.30 or older) nothing unpredictable will |
michael@0 | 26 | // happen - it will just always fail and return -1. |
michael@0 | 27 | static int |
michael@0 | 28 | sys_perf_event_open(struct perf_event_attr *attr, pid_t pid, int cpu, |
michael@0 | 29 | int group_fd, unsigned long flags) |
michael@0 | 30 | { |
michael@0 | 31 | return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags); |
michael@0 | 32 | } |
michael@0 | 33 | |
michael@0 | 34 | namespace { |
michael@0 | 35 | |
michael@0 | 36 | using JS::PerfMeasurement; |
michael@0 | 37 | typedef PerfMeasurement::EventMask EventMask; |
michael@0 | 38 | |
michael@0 | 39 | // Additional state required by this implementation. |
michael@0 | 40 | struct Impl |
michael@0 | 41 | { |
michael@0 | 42 | // Each active counter corresponds to an open file descriptor. |
michael@0 | 43 | int f_cpu_cycles; |
michael@0 | 44 | int f_instructions; |
michael@0 | 45 | int f_cache_references; |
michael@0 | 46 | int f_cache_misses; |
michael@0 | 47 | int f_branch_instructions; |
michael@0 | 48 | int f_branch_misses; |
michael@0 | 49 | int f_bus_cycles; |
michael@0 | 50 | int f_page_faults; |
michael@0 | 51 | int f_major_page_faults; |
michael@0 | 52 | int f_context_switches; |
michael@0 | 53 | int f_cpu_migrations; |
michael@0 | 54 | |
michael@0 | 55 | // Counter group leader, for Start and Stop. |
michael@0 | 56 | int group_leader; |
michael@0 | 57 | |
michael@0 | 58 | // Whether counters are running. |
michael@0 | 59 | bool running; |
michael@0 | 60 | |
michael@0 | 61 | Impl(); |
michael@0 | 62 | ~Impl(); |
michael@0 | 63 | |
michael@0 | 64 | EventMask init(EventMask toMeasure); |
michael@0 | 65 | void start(); |
michael@0 | 66 | void stop(PerfMeasurement* counters); |
michael@0 | 67 | }; |
michael@0 | 68 | |
michael@0 | 69 | // Mapping from our event bitmask to codes passed into the kernel, and |
michael@0 | 70 | // to fields in the PerfMeasurement and PerfMeasurement::impl structures. |
michael@0 | 71 | static const struct |
michael@0 | 72 | { |
michael@0 | 73 | EventMask bit; |
michael@0 | 74 | uint32_t type; |
michael@0 | 75 | uint32_t config; |
michael@0 | 76 | uint64_t PerfMeasurement::* counter; |
michael@0 | 77 | int Impl::* fd; |
michael@0 | 78 | } kSlots[PerfMeasurement::NUM_MEASURABLE_EVENTS] = { |
michael@0 | 79 | #define HW(mask, constant, fieldname) \ |
michael@0 | 80 | { PerfMeasurement::mask, PERF_TYPE_HARDWARE, PERF_COUNT_HW_##constant, \ |
michael@0 | 81 | &PerfMeasurement::fieldname, &Impl::f_##fieldname } |
michael@0 | 82 | #define SW(mask, constant, fieldname) \ |
michael@0 | 83 | { PerfMeasurement::mask, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_##constant, \ |
michael@0 | 84 | &PerfMeasurement::fieldname, &Impl::f_##fieldname } |
michael@0 | 85 | |
michael@0 | 86 | HW(CPU_CYCLES, CPU_CYCLES, cpu_cycles), |
michael@0 | 87 | HW(INSTRUCTIONS, INSTRUCTIONS, instructions), |
michael@0 | 88 | HW(CACHE_REFERENCES, CACHE_REFERENCES, cache_references), |
michael@0 | 89 | HW(CACHE_MISSES, CACHE_MISSES, cache_misses), |
michael@0 | 90 | HW(BRANCH_INSTRUCTIONS, BRANCH_INSTRUCTIONS, branch_instructions), |
michael@0 | 91 | HW(BRANCH_MISSES, BRANCH_MISSES, branch_misses), |
michael@0 | 92 | HW(BUS_CYCLES, BUS_CYCLES, bus_cycles), |
michael@0 | 93 | SW(PAGE_FAULTS, PAGE_FAULTS, page_faults), |
michael@0 | 94 | SW(MAJOR_PAGE_FAULTS, PAGE_FAULTS_MAJ, major_page_faults), |
michael@0 | 95 | SW(CONTEXT_SWITCHES, CONTEXT_SWITCHES, context_switches), |
michael@0 | 96 | SW(CPU_MIGRATIONS, CPU_MIGRATIONS, cpu_migrations), |
michael@0 | 97 | |
michael@0 | 98 | #undef HW |
michael@0 | 99 | #undef SW |
michael@0 | 100 | }; |
michael@0 | 101 | |
michael@0 | 102 | Impl::Impl() |
michael@0 | 103 | : f_cpu_cycles(-1), |
michael@0 | 104 | f_instructions(-1), |
michael@0 | 105 | f_cache_references(-1), |
michael@0 | 106 | f_cache_misses(-1), |
michael@0 | 107 | f_branch_instructions(-1), |
michael@0 | 108 | f_branch_misses(-1), |
michael@0 | 109 | f_bus_cycles(-1), |
michael@0 | 110 | f_page_faults(-1), |
michael@0 | 111 | f_major_page_faults(-1), |
michael@0 | 112 | f_context_switches(-1), |
michael@0 | 113 | f_cpu_migrations(-1), |
michael@0 | 114 | group_leader(-1), |
michael@0 | 115 | running(false) |
michael@0 | 116 | { |
michael@0 | 117 | } |
michael@0 | 118 | |
michael@0 | 119 | Impl::~Impl() |
michael@0 | 120 | { |
michael@0 | 121 | // Close all active counter descriptors. Take care to do the group |
michael@0 | 122 | // leader last (this may not be necessary, but it's unclear what |
michael@0 | 123 | // happens if you close the group leader out from under a group). |
michael@0 | 124 | for (int i = 0; i < PerfMeasurement::NUM_MEASURABLE_EVENTS; i++) { |
michael@0 | 125 | int fd = this->*(kSlots[i].fd); |
michael@0 | 126 | if (fd != -1 && fd != group_leader) |
michael@0 | 127 | close(fd); |
michael@0 | 128 | } |
michael@0 | 129 | |
michael@0 | 130 | if (group_leader != -1) |
michael@0 | 131 | close(group_leader); |
michael@0 | 132 | } |
michael@0 | 133 | |
michael@0 | 134 | EventMask |
michael@0 | 135 | Impl::init(EventMask toMeasure) |
michael@0 | 136 | { |
michael@0 | 137 | JS_ASSERT(group_leader == -1); |
michael@0 | 138 | if (!toMeasure) |
michael@0 | 139 | return EventMask(0); |
michael@0 | 140 | |
michael@0 | 141 | EventMask measured = EventMask(0); |
michael@0 | 142 | struct perf_event_attr attr; |
michael@0 | 143 | for (int i = 0; i < PerfMeasurement::NUM_MEASURABLE_EVENTS; i++) { |
michael@0 | 144 | if (!(toMeasure & kSlots[i].bit)) |
michael@0 | 145 | continue; |
michael@0 | 146 | |
michael@0 | 147 | memset(&attr, 0, sizeof(attr)); |
michael@0 | 148 | attr.size = sizeof(attr); |
michael@0 | 149 | |
michael@0 | 150 | // Set the type and config fields to indicate the counter we |
michael@0 | 151 | // want to enable. We want read format 0, and we're not using |
michael@0 | 152 | // sampling, so leave those fields unset. |
michael@0 | 153 | attr.type = kSlots[i].type; |
michael@0 | 154 | attr.config = kSlots[i].config; |
michael@0 | 155 | |
michael@0 | 156 | // If this will be the group leader it should start off |
michael@0 | 157 | // disabled. Otherwise it should start off enabled (but blocked |
michael@0 | 158 | // on the group leader). |
michael@0 | 159 | if (group_leader == -1) |
michael@0 | 160 | attr.disabled = 1; |
michael@0 | 161 | |
michael@0 | 162 | // The rest of the bit fields are really poorly documented. |
michael@0 | 163 | // For instance, I have *no idea* whether we should be setting |
michael@0 | 164 | // the inherit, inherit_stat, or task flags. I'm pretty sure |
michael@0 | 165 | // we do want to set mmap and comm, and not any of the ones I |
michael@0 | 166 | // haven't mentioned. |
michael@0 | 167 | attr.mmap = 1; |
michael@0 | 168 | attr.comm = 1; |
michael@0 | 169 | |
michael@0 | 170 | int fd = sys_perf_event_open(&attr, |
michael@0 | 171 | 0 /* trace self */, |
michael@0 | 172 | -1 /* on any cpu */, |
michael@0 | 173 | group_leader, |
michael@0 | 174 | 0 /* no flags presently defined */); |
michael@0 | 175 | if (fd == -1) |
michael@0 | 176 | continue; |
michael@0 | 177 | |
michael@0 | 178 | measured = EventMask(measured | kSlots[i].bit); |
michael@0 | 179 | this->*(kSlots[i].fd) = fd; |
michael@0 | 180 | if (group_leader == -1) |
michael@0 | 181 | group_leader = fd; |
michael@0 | 182 | } |
michael@0 | 183 | return measured; |
michael@0 | 184 | } |
michael@0 | 185 | |
michael@0 | 186 | void |
michael@0 | 187 | Impl::start() |
michael@0 | 188 | { |
michael@0 | 189 | if (running || group_leader == -1) |
michael@0 | 190 | return; |
michael@0 | 191 | |
michael@0 | 192 | running = true; |
michael@0 | 193 | ioctl(group_leader, PERF_EVENT_IOC_ENABLE, 0); |
michael@0 | 194 | } |
michael@0 | 195 | |
michael@0 | 196 | void |
michael@0 | 197 | Impl::stop(PerfMeasurement* counters) |
michael@0 | 198 | { |
michael@0 | 199 | // This scratch buffer is to ensure that we have read all the |
michael@0 | 200 | // available data, even if that's more than we expect. |
michael@0 | 201 | unsigned char buf[1024]; |
michael@0 | 202 | |
michael@0 | 203 | if (!running || group_leader == -1) |
michael@0 | 204 | return; |
michael@0 | 205 | |
michael@0 | 206 | ioctl(group_leader, PERF_EVENT_IOC_DISABLE, 0); |
michael@0 | 207 | running = false; |
michael@0 | 208 | |
michael@0 | 209 | // read out and reset all the counter values |
michael@0 | 210 | for (int i = 0; i < PerfMeasurement::NUM_MEASURABLE_EVENTS; i++) { |
michael@0 | 211 | int fd = this->*(kSlots[i].fd); |
michael@0 | 212 | if (fd == -1) |
michael@0 | 213 | continue; |
michael@0 | 214 | |
michael@0 | 215 | if (read(fd, buf, sizeof(buf)) == sizeof(uint64_t)) { |
michael@0 | 216 | uint64_t cur; |
michael@0 | 217 | memcpy(&cur, buf, sizeof(uint64_t)); |
michael@0 | 218 | counters->*(kSlots[i].counter) += cur; |
michael@0 | 219 | } |
michael@0 | 220 | |
michael@0 | 221 | // Reset the counter regardless of whether the read did what |
michael@0 | 222 | // we expected. |
michael@0 | 223 | ioctl(fd, PERF_EVENT_IOC_RESET, 0); |
michael@0 | 224 | } |
michael@0 | 225 | } |
michael@0 | 226 | |
michael@0 | 227 | } // anonymous namespace |
michael@0 | 228 | |
michael@0 | 229 | |
michael@0 | 230 | namespace JS { |
michael@0 | 231 | |
michael@0 | 232 | #define initCtr(flag) ((eventsMeasured & flag) ? 0 : -1) |
michael@0 | 233 | |
michael@0 | 234 | PerfMeasurement::PerfMeasurement(PerfMeasurement::EventMask toMeasure) |
michael@0 | 235 | : impl(js_new<Impl>()), |
michael@0 | 236 | eventsMeasured(impl ? static_cast<Impl*>(impl)->init(toMeasure) |
michael@0 | 237 | : EventMask(0)), |
michael@0 | 238 | cpu_cycles(initCtr(CPU_CYCLES)), |
michael@0 | 239 | instructions(initCtr(INSTRUCTIONS)), |
michael@0 | 240 | cache_references(initCtr(CACHE_REFERENCES)), |
michael@0 | 241 | cache_misses(initCtr(CACHE_MISSES)), |
michael@0 | 242 | branch_instructions(initCtr(BRANCH_INSTRUCTIONS)), |
michael@0 | 243 | branch_misses(initCtr(BRANCH_MISSES)), |
michael@0 | 244 | bus_cycles(initCtr(BUS_CYCLES)), |
michael@0 | 245 | page_faults(initCtr(PAGE_FAULTS)), |
michael@0 | 246 | major_page_faults(initCtr(MAJOR_PAGE_FAULTS)), |
michael@0 | 247 | context_switches(initCtr(CONTEXT_SWITCHES)), |
michael@0 | 248 | cpu_migrations(initCtr(CPU_MIGRATIONS)) |
michael@0 | 249 | { |
michael@0 | 250 | } |
michael@0 | 251 | |
michael@0 | 252 | #undef initCtr |
michael@0 | 253 | |
michael@0 | 254 | PerfMeasurement::~PerfMeasurement() |
michael@0 | 255 | { |
michael@0 | 256 | js_delete(static_cast<Impl*>(impl)); |
michael@0 | 257 | } |
michael@0 | 258 | |
michael@0 | 259 | void |
michael@0 | 260 | PerfMeasurement::start() |
michael@0 | 261 | { |
michael@0 | 262 | if (impl) |
michael@0 | 263 | static_cast<Impl*>(impl)->start(); |
michael@0 | 264 | } |
michael@0 | 265 | |
michael@0 | 266 | void |
michael@0 | 267 | PerfMeasurement::stop() |
michael@0 | 268 | { |
michael@0 | 269 | if (impl) |
michael@0 | 270 | static_cast<Impl*>(impl)->stop(this); |
michael@0 | 271 | } |
michael@0 | 272 | |
michael@0 | 273 | void |
michael@0 | 274 | PerfMeasurement::reset() |
michael@0 | 275 | { |
michael@0 | 276 | for (int i = 0; i < NUM_MEASURABLE_EVENTS; i++) { |
michael@0 | 277 | if (eventsMeasured & kSlots[i].bit) |
michael@0 | 278 | this->*(kSlots[i].counter) = 0; |
michael@0 | 279 | else |
michael@0 | 280 | this->*(kSlots[i].counter) = -1; |
michael@0 | 281 | } |
michael@0 | 282 | } |
michael@0 | 283 | |
michael@0 | 284 | bool |
michael@0 | 285 | PerfMeasurement::canMeasureSomething() |
michael@0 | 286 | { |
michael@0 | 287 | // Find out if the kernel implements the performance measurement |
michael@0 | 288 | // API. If it doesn't, syscall(__NR_perf_event_open, ...) is |
michael@0 | 289 | // guaranteed to return -1 and set errno to ENOSYS. |
michael@0 | 290 | // |
michael@0 | 291 | // We set up input parameters that should provoke an EINVAL error |
michael@0 | 292 | // from a kernel that does implement perf_event_open, but we can't |
michael@0 | 293 | // be sure it will (newer kernels might add more event types), so |
michael@0 | 294 | // we have to take care to close any valid fd it might return. |
michael@0 | 295 | |
michael@0 | 296 | struct perf_event_attr attr; |
michael@0 | 297 | memset(&attr, 0, sizeof(attr)); |
michael@0 | 298 | attr.size = sizeof(attr); |
michael@0 | 299 | attr.type = PERF_TYPE_MAX; |
michael@0 | 300 | |
michael@0 | 301 | int fd = sys_perf_event_open(&attr, 0, -1, -1, 0); |
michael@0 | 302 | if (fd >= 0) { |
michael@0 | 303 | close(fd); |
michael@0 | 304 | return true; |
michael@0 | 305 | } else { |
michael@0 | 306 | return errno != ENOSYS; |
michael@0 | 307 | } |
michael@0 | 308 | } |
michael@0 | 309 | |
michael@0 | 310 | } // namespace JS |