security/sandbox/win/src/sidestep/preamble_patcher_with_stub.cpp

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/security/sandbox/win/src/sidestep/preamble_patcher_with_stub.cpp	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,179 @@
     1.4 +// Copyright (c) 2012 The Chromium Authors. All rights reserved.
     1.5 +// Use of this source code is governed by a BSD-style license that can be
     1.6 +// found in the LICENSE file.
     1.7 +
     1.8 +// Implementation of PreamblePatcher
     1.9 +
    1.10 +#include "sandbox/win/src/sidestep/preamble_patcher.h"
    1.11 +
    1.12 +#include "sandbox/win/src/sandbox_nt_util.h"
    1.13 +#include "sandbox/win/src/sidestep/mini_disassembler.h"
    1.14 +
    1.15 +// Definitions of assembly statements we need
    1.16 +#define ASM_JMP32REL 0xE9
    1.17 +#define ASM_INT3 0xCC
    1.18 +
    1.19 +namespace {
    1.20 +
    1.21 +// Very basic memcpy. We are copying 4 to 12 bytes most of the time, so there
    1.22 +// is no attempt to optimize this code or have a general purpose function.
    1.23 +// We don't want to call the crt from this code.
    1.24 +inline void* RawMemcpy(void* destination, const void* source, size_t bytes) {
    1.25 +  const char* from = reinterpret_cast<const char*>(source);
    1.26 +  char* to = reinterpret_cast<char*>(destination);
    1.27 +
    1.28 +  for (size_t i = 0; i < bytes ; i++)
    1.29 +    to[i] = from[i];
    1.30 +
    1.31 +  return destination;
    1.32 +}
    1.33 +
    1.34 +// Very basic memset. We are filling 1 to 7 bytes most of the time, so there
    1.35 +// is no attempt to optimize this code or have a general purpose function.
    1.36 +// We don't want to call the crt from this code.
    1.37 +inline void* RawMemset(void* destination, int value, size_t bytes) {
    1.38 +  char* to = reinterpret_cast<char*>(destination);
    1.39 +
    1.40 +  for (size_t i = 0; i < bytes ; i++)
    1.41 +    to[i] = static_cast<char>(value);
    1.42 +
    1.43 +  return destination;
    1.44 +}
    1.45 +
    1.46 +}  // namespace
    1.47 +
    1.48 +#define ASSERT(a, b) DCHECK_NT(a)
    1.49 +
    1.50 +namespace sidestep {
    1.51 +
    1.52 +SideStepError PreamblePatcher::RawPatchWithStub(
    1.53 +    void* target_function,
    1.54 +    void* replacement_function,
    1.55 +    unsigned char* preamble_stub,
    1.56 +    size_t stub_size,
    1.57 +    size_t* bytes_needed) {
    1.58 +  if ((NULL == target_function) ||
    1.59 +      (NULL == replacement_function) ||
    1.60 +      (NULL == preamble_stub)) {
    1.61 +    ASSERT(false, (L"Invalid parameters - either pTargetFunction or "
    1.62 +                   L"pReplacementFunction or pPreambleStub were NULL."));
    1.63 +    return SIDESTEP_INVALID_PARAMETER;
    1.64 +  }
    1.65 +
    1.66 +  // TODO(V7:joi) Siggi and I just had a discussion and decided that both
    1.67 +  // patching and unpatching are actually unsafe.  We also discussed a
    1.68 +  // method of making it safe, which is to freeze all other threads in the
    1.69 +  // process, check their thread context to see if their eip is currently
    1.70 +  // inside the block of instructions we need to copy to the stub, and if so
    1.71 +  // wait a bit and try again, then unfreeze all threads once we've patched.
    1.72 +  // Not implementing this for now since we're only using SideStep for unit
    1.73 +  // testing, but if we ever use it for production code this is what we
    1.74 +  // should do.
    1.75 +  //
    1.76 +  // NOTE: Stoyan suggests we can write 8 or even 10 bytes atomically using
    1.77 +  // FPU instructions, and on newer processors we could use cmpxchg8b or
    1.78 +  // cmpxchg16b. So it might be possible to do the patching/unpatching
    1.79 +  // atomically and avoid having to freeze other threads.  Note though, that
    1.80 +  // doing it atomically does not help if one of the other threads happens
    1.81 +  // to have its eip in the middle of the bytes you change while you change
    1.82 +  // them.
    1.83 +  unsigned char* target = reinterpret_cast<unsigned char*>(target_function);
    1.84 +
    1.85 +  // Let's disassemble the preamble of the target function to see if we can
    1.86 +  // patch, and to see how much of the preamble we need to take.  We need 5
    1.87 +  // bytes for our jmp instruction, so let's find the minimum number of
    1.88 +  // instructions to get 5 bytes.
    1.89 +  MiniDisassembler disassembler;
    1.90 +  unsigned int preamble_bytes = 0;
    1.91 +  while (preamble_bytes < 5) {
    1.92 +    InstructionType instruction_type =
    1.93 +      disassembler.Disassemble(target + preamble_bytes, &preamble_bytes);
    1.94 +    if (IT_JUMP == instruction_type) {
    1.95 +      ASSERT(false, (L"Unable to patch because there is a jump instruction "
    1.96 +                     L"in the first 5 bytes."));
    1.97 +      return SIDESTEP_JUMP_INSTRUCTION;
    1.98 +    } else if (IT_RETURN == instruction_type) {
    1.99 +      ASSERT(false, (L"Unable to patch because function is too short"));
   1.100 +      return SIDESTEP_FUNCTION_TOO_SMALL;
   1.101 +    } else if (IT_GENERIC != instruction_type) {
   1.102 +      ASSERT(false, (L"Disassembler encountered unsupported instruction "
   1.103 +                     L"(either unused or unknown"));
   1.104 +      return SIDESTEP_UNSUPPORTED_INSTRUCTION;
   1.105 +    }
   1.106 +  }
   1.107 +
   1.108 +  if (NULL != bytes_needed)
   1.109 +    *bytes_needed = preamble_bytes + 5;
   1.110 +
   1.111 +  // Inv: preamble_bytes is the number of bytes (at least 5) that we need to
   1.112 +  // take from the preamble to have whole instructions that are 5 bytes or more
   1.113 +  // in size total. The size of the stub required is cbPreamble + size of
   1.114 +  // jmp (5)
   1.115 +  if (preamble_bytes + 5 > stub_size) {
   1.116 +    NOTREACHED_NT();
   1.117 +    return SIDESTEP_INSUFFICIENT_BUFFER;
   1.118 +  }
   1.119 +
   1.120 +  // First, copy the preamble that we will overwrite.
   1.121 +  RawMemcpy(reinterpret_cast<void*>(preamble_stub),
   1.122 +            reinterpret_cast<void*>(target), preamble_bytes);
   1.123 +
   1.124 +  // Now, make a jmp instruction to the rest of the target function (minus the
   1.125 +  // preamble bytes we moved into the stub) and copy it into our preamble-stub.
   1.126 +  // find address to jump to, relative to next address after jmp instruction
   1.127 +#pragma warning(push)
   1.128 +#pragma warning(disable:4244)
   1.129 +  // This assignment generates a warning because it is 32 bit specific.
   1.130 +  int relative_offset_to_target_rest
   1.131 +    = ((reinterpret_cast<unsigned char*>(target) + preamble_bytes) -
   1.132 +        (preamble_stub + preamble_bytes + 5));
   1.133 +#pragma warning(pop)
   1.134 +  // jmp (Jump near, relative, displacement relative to next instruction)
   1.135 +  preamble_stub[preamble_bytes] = ASM_JMP32REL;
   1.136 +  // copy the address
   1.137 +  RawMemcpy(reinterpret_cast<void*>(preamble_stub + preamble_bytes + 1),
   1.138 +            reinterpret_cast<void*>(&relative_offset_to_target_rest), 4);
   1.139 +
   1.140 +  // Inv: preamble_stub points to assembly code that will execute the
   1.141 +  // original function by first executing the first cbPreamble bytes of the
   1.142 +  // preamble, then jumping to the rest of the function.
   1.143 +
   1.144 +  // Overwrite the first 5 bytes of the target function with a jump to our
   1.145 +  // replacement function.
   1.146 +  // (Jump near, relative, displacement relative to next instruction)
   1.147 +  target[0] = ASM_JMP32REL;
   1.148 +
   1.149 +  // Find offset from instruction after jmp, to the replacement function.
   1.150 +#pragma warning(push)
   1.151 +#pragma warning(disable:4244)
   1.152 +  int offset_to_replacement_function =
   1.153 +    reinterpret_cast<unsigned char*>(replacement_function) -
   1.154 +    reinterpret_cast<unsigned char*>(target) - 5;
   1.155 +#pragma warning(pop)
   1.156 +  // complete the jmp instruction
   1.157 +  RawMemcpy(reinterpret_cast<void*>(target + 1),
   1.158 +            reinterpret_cast<void*>(&offset_to_replacement_function), 4);
   1.159 +  // Set any remaining bytes that were moved to the preamble-stub to INT3 so
   1.160 +  // as not to cause confusion (otherwise you might see some strange
   1.161 +  // instructions if you look at the disassembly, or even invalid
   1.162 +  // instructions). Also, by doing this, we will break into the debugger if
   1.163 +  // some code calls into this portion of the code.  If this happens, it
   1.164 +  // means that this function cannot be patched using this patcher without
   1.165 +  // further thought.
   1.166 +  if (preamble_bytes > 5) {
   1.167 +    RawMemset(reinterpret_cast<void*>(target + 5), ASM_INT3,
   1.168 +              preamble_bytes - 5);
   1.169 +  }
   1.170 +
   1.171 +  // Inv: The memory pointed to by target_function now points to a relative
   1.172 +  // jump instruction that jumps over to the preamble_stub.  The preamble
   1.173 +  // stub contains the first stub_size bytes of the original target
   1.174 +  // function's preamble code, followed by a relative jump back to the next
   1.175 +  // instruction after the first cbPreamble bytes.
   1.176 +
   1.177 +  return SIDESTEP_SUCCESS;
   1.178 +}
   1.179 +
   1.180 +};  // namespace sidestep
   1.181 +
   1.182 +#undef ASSERT

mercurial