ipc/glue/MessageChannel.h

Wed, 31 Dec 2014 13:27:57 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 13:27:57 +0100
branch
TOR_BUG_3246
changeset 6
8bccb770b82d
permissions
-rw-r--r--

Ignore runtime configuration files generated during quality assurance.

michael@0 1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
michael@0 2 * vim: sw=4 ts=4 et :
michael@0 3 */
michael@0 4 /* This Source Code Form is subject to the terms of the Mozilla Public
michael@0 5 * License, v. 2.0. If a copy of the MPL was not distributed with this
michael@0 6 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
michael@0 7
michael@0 8 #ifndef ipc_glue_MessageChannel_h
michael@0 9 #define ipc_glue_MessageChannel_h 1
michael@0 10
michael@0 11 #include "base/basictypes.h"
michael@0 12 #include "base/message_loop.h"
michael@0 13
michael@0 14 #include "mozilla/Monitor.h"
michael@0 15 #include "mozilla/Vector.h"
michael@0 16 #include "mozilla/WeakPtr.h"
michael@0 17 #include "mozilla/ipc/Transport.h"
michael@0 18 #include "MessageLink.h"
michael@0 19 #include "nsAutoPtr.h"
michael@0 20
michael@0 21 #include <deque>
michael@0 22 #include <stack>
michael@0 23 #include <math.h>
michael@0 24
michael@0 25 namespace mozilla {
michael@0 26 namespace ipc {
michael@0 27
michael@0 28 class MessageChannel;
michael@0 29
michael@0 30 class RefCountedMonitor : public Monitor
michael@0 31 {
michael@0 32 public:
michael@0 33 RefCountedMonitor()
michael@0 34 : Monitor("mozilla.ipc.MessageChannel.mMonitor")
michael@0 35 {}
michael@0 36
michael@0 37 NS_INLINE_DECL_THREADSAFE_REFCOUNTING(RefCountedMonitor)
michael@0 38 };
michael@0 39
michael@0 40 class MessageChannel : HasResultCodes
michael@0 41 {
michael@0 42 friend class ProcessLink;
michael@0 43 friend class ThreadLink;
michael@0 44 friend class AutoEnterRPCTransaction;
michael@0 45
michael@0 46 class CxxStackFrame;
michael@0 47 class InterruptFrame;
michael@0 48
michael@0 49 typedef mozilla::Monitor Monitor;
michael@0 50
michael@0 51 public:
michael@0 52 static const int32_t kNoTimeout;
michael@0 53
michael@0 54 typedef IPC::Message Message;
michael@0 55 typedef mozilla::ipc::Transport Transport;
michael@0 56
michael@0 57 MessageChannel(MessageListener *aListener);
michael@0 58 ~MessageChannel();
michael@0 59
michael@0 60 // "Open" from the perspective of the transport layer; the underlying
michael@0 61 // socketpair/pipe should already be created.
michael@0 62 //
michael@0 63 // Returns true if the transport layer was successfully connected,
michael@0 64 // i.e., mChannelState == ChannelConnected.
michael@0 65 bool Open(Transport* aTransport, MessageLoop* aIOLoop=0, Side aSide=UnknownSide);
michael@0 66
michael@0 67 // "Open" a connection to another thread in the same process.
michael@0 68 //
michael@0 69 // Returns true if the transport layer was successfully connected,
michael@0 70 // i.e., mChannelState == ChannelConnected.
michael@0 71 //
michael@0 72 // For more details on the process of opening a channel between
michael@0 73 // threads, see the extended comment on this function
michael@0 74 // in MessageChannel.cpp.
michael@0 75 bool Open(MessageChannel *aTargetChan, MessageLoop *aTargetLoop, Side aSide);
michael@0 76
michael@0 77 // Close the underlying transport channel.
michael@0 78 void Close();
michael@0 79
michael@0 80 // Force the channel to behave as if a channel error occurred. Valid
michael@0 81 // for process links only, not thread links.
michael@0 82 void CloseWithError();
michael@0 83
michael@0 84 void SetAbortOnError(bool abort)
michael@0 85 {
michael@0 86 mAbortOnError = true;
michael@0 87 }
michael@0 88
michael@0 89 // Misc. behavioral traits consumers can request for this channel
michael@0 90 enum ChannelFlags {
michael@0 91 REQUIRE_DEFAULT = 0,
michael@0 92 // Windows: if this channel operates on the UI thread, indicates
michael@0 93 // WindowsMessageLoop code should enable deferred native message
michael@0 94 // handling to prevent deadlocks. Should only be used for protocols
michael@0 95 // that manage child processes which might create native UI, like
michael@0 96 // plugins.
michael@0 97 REQUIRE_DEFERRED_MESSAGE_PROTECTION = 1 << 0
michael@0 98 };
michael@0 99 void SetChannelFlags(ChannelFlags aFlags) { mFlags = aFlags; }
michael@0 100 ChannelFlags GetChannelFlags() { return mFlags; }
michael@0 101
michael@0 102 // Asynchronously send a message to the other side of the channel
michael@0 103 bool Send(Message* aMsg);
michael@0 104
michael@0 105 // Asynchronously deliver a message back to this side of the
michael@0 106 // channel
michael@0 107 bool Echo(Message* aMsg);
michael@0 108
michael@0 109 // Synchronously send |msg| (i.e., wait for |reply|)
michael@0 110 bool Send(Message* aMsg, Message* aReply);
michael@0 111
michael@0 112 // Make an Interrupt call to the other side of the channel
michael@0 113 bool Call(Message* aMsg, Message* aReply);
michael@0 114
michael@0 115 bool CanSend() const;
michael@0 116
michael@0 117 void SetReplyTimeoutMs(int32_t aTimeoutMs);
michael@0 118
michael@0 119 bool IsOnCxxStack() const {
michael@0 120 return !mCxxStackFrames.empty();
michael@0 121 }
michael@0 122
michael@0 123 void FlushPendingInterruptQueue();
michael@0 124
michael@0 125 // Unsound_IsClosed and Unsound_NumQueuedMessages are safe to call from any
michael@0 126 // thread, but they make no guarantees about whether you'll get an
michael@0 127 // up-to-date value; the values are written on one thread and read without
michael@0 128 // locking, on potentially different threads. Thus you should only use
michael@0 129 // them when you don't particularly care about getting a recent value (e.g.
michael@0 130 // in a memory report).
michael@0 131 bool Unsound_IsClosed() const {
michael@0 132 return mLink ? mLink->Unsound_IsClosed() : true;
michael@0 133 }
michael@0 134 uint32_t Unsound_NumQueuedMessages() const {
michael@0 135 return mLink ? mLink->Unsound_NumQueuedMessages() : 0;
michael@0 136 }
michael@0 137
michael@0 138 static bool IsPumpingMessages() {
michael@0 139 return sIsPumpingMessages;
michael@0 140 }
michael@0 141 static void SetIsPumpingMessages(bool aIsPumping) {
michael@0 142 sIsPumpingMessages = aIsPumping;
michael@0 143 }
michael@0 144
michael@0 145 #ifdef OS_WIN
michael@0 146 struct MOZ_STACK_CLASS SyncStackFrame
michael@0 147 {
michael@0 148 SyncStackFrame(MessageChannel* channel, bool interrupt);
michael@0 149 ~SyncStackFrame();
michael@0 150
michael@0 151 bool mInterrupt;
michael@0 152 bool mSpinNestedEvents;
michael@0 153 bool mListenerNotified;
michael@0 154 MessageChannel* mChannel;
michael@0 155
michael@0 156 // The previous stack frame for this channel.
michael@0 157 SyncStackFrame* mPrev;
michael@0 158
michael@0 159 // The previous stack frame on any channel.
michael@0 160 SyncStackFrame* mStaticPrev;
michael@0 161 };
michael@0 162 friend struct MessageChannel::SyncStackFrame;
michael@0 163
michael@0 164 static bool IsSpinLoopActive() {
michael@0 165 for (SyncStackFrame* frame = sStaticTopFrame; frame; frame = frame->mPrev) {
michael@0 166 if (frame->mSpinNestedEvents)
michael@0 167 return true;
michael@0 168 }
michael@0 169 return false;
michael@0 170 }
michael@0 171
michael@0 172 protected:
michael@0 173 // The deepest sync stack frame for this channel.
michael@0 174 SyncStackFrame* mTopFrame;
michael@0 175
michael@0 176 bool mIsSyncWaitingOnNonMainThread;
michael@0 177
michael@0 178 // The deepest sync stack frame on any channel.
michael@0 179 static SyncStackFrame* sStaticTopFrame;
michael@0 180
michael@0 181 public:
michael@0 182 void ProcessNativeEventsInInterruptCall();
michael@0 183 static void NotifyGeckoEventDispatch();
michael@0 184
michael@0 185 private:
michael@0 186 void SpinInternalEventLoop();
michael@0 187 #endif
michael@0 188
michael@0 189 private:
michael@0 190 void CommonThreadOpenInit(MessageChannel *aTargetChan, Side aSide);
michael@0 191 void OnOpenAsSlave(MessageChannel *aTargetChan, Side aSide);
michael@0 192
michael@0 193 void PostErrorNotifyTask();
michael@0 194 void OnNotifyMaybeChannelError();
michael@0 195 void ReportConnectionError(const char* aChannelName) const;
michael@0 196 void ReportMessageRouteError(const char* channelName) const;
michael@0 197 bool MaybeHandleError(Result code, const char* channelName);
michael@0 198
michael@0 199 void Clear();
michael@0 200
michael@0 201 // Send OnChannelConnected notification to listeners.
michael@0 202 void DispatchOnChannelConnected(int32_t peer_pid);
michael@0 203
michael@0 204 // Any protocol that requires blocking until a reply arrives, will send its
michael@0 205 // outgoing message through this function. Currently, two protocols do this:
michael@0 206 //
michael@0 207 // sync, which can only initiate messages from child to parent.
michael@0 208 // urgent, which can only initiate messages from parent to child.
michael@0 209 //
michael@0 210 // SendAndWait() expects that the worker thread owns the monitor, and that
michael@0 211 // the message has been prepared to be sent over the link. It returns as
michael@0 212 // soon as a reply has been received, or an error has occurred.
michael@0 213 //
michael@0 214 // Note that while the child is blocked waiting for a sync reply, it can wake
michael@0 215 // up to process urgent calls from the parent.
michael@0 216 bool SendAndWait(Message* aMsg, Message* aReply);
michael@0 217
michael@0 218 bool RPCCall(Message* aMsg, Message* aReply);
michael@0 219 bool InterruptCall(Message* aMsg, Message* aReply);
michael@0 220 bool UrgentCall(Message* aMsg, Message* aReply);
michael@0 221
michael@0 222 bool InterruptEventOccurred();
michael@0 223
michael@0 224 bool ProcessPendingUrgentRequest();
michael@0 225 bool ProcessPendingRPCCall();
michael@0 226
michael@0 227 void MaybeUndeferIncall();
michael@0 228 void EnqueuePendingMessages();
michael@0 229
michael@0 230 // Executed on the worker thread. Dequeues one pending message.
michael@0 231 bool OnMaybeDequeueOne();
michael@0 232 bool DequeueOne(Message *recvd);
michael@0 233
michael@0 234 // Dispatches an incoming message to its appropriate handler.
michael@0 235 void DispatchMessage(const Message &aMsg);
michael@0 236
michael@0 237 // DispatchMessage will route to one of these functions depending on the
michael@0 238 // protocol type of the message.
michael@0 239 void DispatchSyncMessage(const Message &aMsg);
michael@0 240 void DispatchUrgentMessage(const Message &aMsg);
michael@0 241 void DispatchAsyncMessage(const Message &aMsg);
michael@0 242 void DispatchRPCMessage(const Message &aMsg);
michael@0 243 void DispatchInterruptMessage(const Message &aMsg, size_t aStackDepth);
michael@0 244
michael@0 245 // Return true if the wait ended because a notification was received.
michael@0 246 //
michael@0 247 // Return false if the time elapsed from when we started the process of
michael@0 248 // waiting until afterwards exceeded the currently allotted timeout.
michael@0 249 // That *DOES NOT* mean false => "no event" (== timeout); there are many
michael@0 250 // circumstances that could cause the measured elapsed time to exceed the
michael@0 251 // timeout EVEN WHEN we were notified.
michael@0 252 //
michael@0 253 // So in sum: true is a meaningful return value; false isn't,
michael@0 254 // necessarily.
michael@0 255 bool WaitForSyncNotify();
michael@0 256 bool WaitForInterruptNotify();
michael@0 257
michael@0 258 bool WaitResponse(bool aWaitTimedOut);
michael@0 259
michael@0 260 bool ShouldContinueFromTimeout();
michael@0 261
michael@0 262 // The "remote view of stack depth" can be different than the
michael@0 263 // actual stack depth when there are out-of-turn replies. When we
michael@0 264 // receive one, our actual Interrupt stack depth doesn't decrease, but
michael@0 265 // the other side (that sent the reply) thinks it has. So, the
michael@0 266 // "view" returned here is |stackDepth| minus the number of
michael@0 267 // out-of-turn replies.
michael@0 268 //
michael@0 269 // Only called from the worker thread.
michael@0 270 size_t RemoteViewOfStackDepth(size_t stackDepth) const {
michael@0 271 AssertWorkerThread();
michael@0 272 return stackDepth - mOutOfTurnReplies.size();
michael@0 273 }
michael@0 274
michael@0 275 int32_t NextSeqno() {
michael@0 276 AssertWorkerThread();
michael@0 277 return (mSide == ChildSide) ? --mNextSeqno : ++mNextSeqno;
michael@0 278 }
michael@0 279
michael@0 280 // This helper class manages mCxxStackDepth on behalf of MessageChannel.
michael@0 281 // When the stack depth is incremented from zero to non-zero, it invokes
michael@0 282 // a callback, and similarly for when the depth goes from non-zero to zero.
michael@0 283 void EnteredCxxStack() {
michael@0 284 mListener->OnEnteredCxxStack();
michael@0 285 }
michael@0 286
michael@0 287 void ExitedCxxStack();
michael@0 288
michael@0 289 void EnteredCall() {
michael@0 290 mListener->OnEnteredCall();
michael@0 291 }
michael@0 292
michael@0 293 void ExitedCall() {
michael@0 294 mListener->OnExitedCall();
michael@0 295 }
michael@0 296
michael@0 297 MessageListener *Listener() const {
michael@0 298 return mListener.get();
michael@0 299 }
michael@0 300
michael@0 301 void DebugAbort(const char* file, int line, const char* cond,
michael@0 302 const char* why,
michael@0 303 bool reply=false) const;
michael@0 304
michael@0 305 // This method is only safe to call on the worker thread, or in a
michael@0 306 // debugger with all threads paused.
michael@0 307 void DumpInterruptStack(const char* const pfx="") const;
michael@0 308
michael@0 309 private:
michael@0 310 // Called from both threads
michael@0 311 size_t InterruptStackDepth() const {
michael@0 312 mMonitor->AssertCurrentThreadOwns();
michael@0 313 return mInterruptStack.size();
michael@0 314 }
michael@0 315
michael@0 316 // Returns true if we're blocking waiting for a reply.
michael@0 317 bool AwaitingSyncReply() const {
michael@0 318 mMonitor->AssertCurrentThreadOwns();
michael@0 319 return mPendingSyncReplies > 0;
michael@0 320 }
michael@0 321 bool AwaitingUrgentReply() const {
michael@0 322 mMonitor->AssertCurrentThreadOwns();
michael@0 323 return mPendingUrgentReplies > 0;
michael@0 324 }
michael@0 325 bool AwaitingRPCReply() const {
michael@0 326 mMonitor->AssertCurrentThreadOwns();
michael@0 327 return mPendingRPCReplies > 0;
michael@0 328 }
michael@0 329 bool AwaitingInterruptReply() const {
michael@0 330 mMonitor->AssertCurrentThreadOwns();
michael@0 331 return !mInterruptStack.empty();
michael@0 332 }
michael@0 333
michael@0 334 // Returns true if we're dispatching a sync message's callback.
michael@0 335 bool DispatchingSyncMessage() const {
michael@0 336 return mDispatchingSyncMessage;
michael@0 337 }
michael@0 338
michael@0 339 // Returns true if we're dispatching an urgent message's callback.
michael@0 340 bool DispatchingUrgentMessage() const {
michael@0 341 return mDispatchingUrgentMessageCount > 0;
michael@0 342 }
michael@0 343
michael@0 344 bool Connected() const;
michael@0 345
michael@0 346 private:
michael@0 347 // Executed on the IO thread.
michael@0 348 void NotifyWorkerThread();
michael@0 349
michael@0 350 // Return true if |aMsg| is a special message targeted at the IO
michael@0 351 // thread, in which case it shouldn't be delivered to the worker.
michael@0 352 bool MaybeInterceptSpecialIOMessage(const Message& aMsg);
michael@0 353
michael@0 354 void OnChannelConnected(int32_t peer_id);
michael@0 355
michael@0 356 // Tell the IO thread to close the channel and wait for it to ACK.
michael@0 357 void SynchronouslyClose();
michael@0 358
michael@0 359 void OnMessageReceivedFromLink(const Message& aMsg);
michael@0 360 void OnChannelErrorFromLink();
michael@0 361
michael@0 362 private:
michael@0 363 // Run on the not current thread.
michael@0 364 void NotifyChannelClosed();
michael@0 365 void NotifyMaybeChannelError();
michael@0 366
michael@0 367 private:
michael@0 368 // Can be run on either thread
michael@0 369 void AssertWorkerThread() const
michael@0 370 {
michael@0 371 NS_ABORT_IF_FALSE(mWorkerLoopID == MessageLoop::current()->id(),
michael@0 372 "not on worker thread!");
michael@0 373 }
michael@0 374
michael@0 375 // The "link" thread is either the I/O thread (ProcessLink) or the
michael@0 376 // other actor's work thread (ThreadLink). In either case, it is
michael@0 377 // NOT our worker thread.
michael@0 378 void AssertLinkThread() const
michael@0 379 {
michael@0 380 NS_ABORT_IF_FALSE(mWorkerLoopID != MessageLoop::current()->id(),
michael@0 381 "on worker thread but should not be!");
michael@0 382 }
michael@0 383
michael@0 384 private:
michael@0 385 typedef IPC::Message::msgid_t msgid_t;
michael@0 386 typedef std::deque<Message> MessageQueue;
michael@0 387 typedef std::map<size_t, Message> MessageMap;
michael@0 388
michael@0 389 // All dequeuing tasks require a single point of cancellation,
michael@0 390 // which is handled via a reference-counted task.
michael@0 391 class RefCountedTask
michael@0 392 {
michael@0 393 public:
michael@0 394 RefCountedTask(CancelableTask* aTask)
michael@0 395 : mTask(aTask)
michael@0 396 { }
michael@0 397 ~RefCountedTask() { delete mTask; }
michael@0 398 void Run() { mTask->Run(); }
michael@0 399 void Cancel() { mTask->Cancel(); }
michael@0 400
michael@0 401 NS_INLINE_DECL_THREADSAFE_REFCOUNTING(RefCountedTask)
michael@0 402
michael@0 403 private:
michael@0 404 CancelableTask* mTask;
michael@0 405 };
michael@0 406
michael@0 407 // Wrap an existing task which can be cancelled at any time
michael@0 408 // without the wrapper's knowledge.
michael@0 409 class DequeueTask : public Task
michael@0 410 {
michael@0 411 public:
michael@0 412 DequeueTask(RefCountedTask* aTask)
michael@0 413 : mTask(aTask)
michael@0 414 { }
michael@0 415 void Run() { mTask->Run(); }
michael@0 416
michael@0 417 private:
michael@0 418 nsRefPtr<RefCountedTask> mTask;
michael@0 419 };
michael@0 420
michael@0 421 private:
michael@0 422 mozilla::WeakPtr<MessageListener> mListener;
michael@0 423 ChannelState mChannelState;
michael@0 424 nsRefPtr<RefCountedMonitor> mMonitor;
michael@0 425 Side mSide;
michael@0 426 MessageLink* mLink;
michael@0 427 MessageLoop* mWorkerLoop; // thread where work is done
michael@0 428 CancelableTask* mChannelErrorTask; // NotifyMaybeChannelError runnable
michael@0 429
michael@0 430 // id() of mWorkerLoop. This persists even after mWorkerLoop is cleared
michael@0 431 // during channel shutdown.
michael@0 432 int mWorkerLoopID;
michael@0 433
michael@0 434 // A task encapsulating dequeuing one pending message.
michael@0 435 nsRefPtr<RefCountedTask> mDequeueOneTask;
michael@0 436
michael@0 437 // Timeout periods are broken up in two to prevent system suspension from
michael@0 438 // triggering an abort. This method (called by WaitForEvent with a 'did
michael@0 439 // timeout' flag) decides if we should wait again for half of mTimeoutMs
michael@0 440 // or give up.
michael@0 441 int32_t mTimeoutMs;
michael@0 442 bool mInTimeoutSecondHalf;
michael@0 443
michael@0 444 // Worker-thread only; sequence numbers for messages that require
michael@0 445 // synchronous replies.
michael@0 446 int32_t mNextSeqno;
michael@0 447
michael@0 448 static bool sIsPumpingMessages;
michael@0 449
michael@0 450 class AutoEnterPendingReply {
michael@0 451 public:
michael@0 452 AutoEnterPendingReply(size_t &replyVar)
michael@0 453 : mReplyVar(replyVar)
michael@0 454 {
michael@0 455 mReplyVar++;
michael@0 456 }
michael@0 457 ~AutoEnterPendingReply() {
michael@0 458 mReplyVar--;
michael@0 459 }
michael@0 460 private:
michael@0 461 size_t& mReplyVar;
michael@0 462 };
michael@0 463
michael@0 464 // Worker-thread only; type we're expecting for the reply to a sync
michael@0 465 // out-message. This will never be greater than 1.
michael@0 466 size_t mPendingSyncReplies;
michael@0 467
michael@0 468 // Worker-thread only; Number of urgent and rpc replies we're waiting on.
michael@0 469 // These are mutually exclusive since one channel cannot have outcalls of
michael@0 470 // both kinds.
michael@0 471 size_t mPendingUrgentReplies;
michael@0 472 size_t mPendingRPCReplies;
michael@0 473
michael@0 474 // When we send an urgent request from the parent process, we could race
michael@0 475 // with an RPC message that was issued by the child beforehand. In this
michael@0 476 // case, if the parent were to wake up while waiting for the urgent reply,
michael@0 477 // and process the RPC, it could send an additional urgent message. The
michael@0 478 // child would wake up to process the urgent message (as it always will),
michael@0 479 // then send a reply, which could be received by the parent out-of-order
michael@0 480 // with respect to the first urgent reply.
michael@0 481 //
michael@0 482 // To address this problem, urgent or RPC requests are associated with a
michael@0 483 // "transaction". Whenever one side of the channel wishes to start a
michael@0 484 // chain of RPC/urgent messages, it allocates a new transaction ID. Any
michael@0 485 // messages the parent receives, not apart of this transaction, are
michael@0 486 // deferred. When issuing RPC/urgent requests on top of a started
michael@0 487 // transaction, the initiating transaction ID is used.
michael@0 488 //
michael@0 489 // To ensure IDs are unique, we use sequence numbers for transaction IDs,
michael@0 490 // which grow in opposite directions from child to parent.
michael@0 491
michael@0 492 // The current transaction ID.
michael@0 493 int32_t mCurrentRPCTransaction;
michael@0 494
michael@0 495 class AutoEnterRPCTransaction
michael@0 496 {
michael@0 497 public:
michael@0 498 AutoEnterRPCTransaction(MessageChannel *aChan)
michael@0 499 : mChan(aChan),
michael@0 500 mOldTransaction(mChan->mCurrentRPCTransaction)
michael@0 501 {
michael@0 502 mChan->mMonitor->AssertCurrentThreadOwns();
michael@0 503 if (mChan->mCurrentRPCTransaction == 0)
michael@0 504 mChan->mCurrentRPCTransaction = mChan->NextSeqno();
michael@0 505 }
michael@0 506 AutoEnterRPCTransaction(MessageChannel *aChan, Message *message)
michael@0 507 : mChan(aChan),
michael@0 508 mOldTransaction(mChan->mCurrentRPCTransaction)
michael@0 509 {
michael@0 510 mChan->mMonitor->AssertCurrentThreadOwns();
michael@0 511
michael@0 512 if (!message->is_rpc() && !message->is_urgent())
michael@0 513 return;
michael@0 514
michael@0 515 MOZ_ASSERT_IF(mChan->mSide == ParentSide,
michael@0 516 !mOldTransaction || mOldTransaction == message->transaction_id());
michael@0 517 mChan->mCurrentRPCTransaction = message->transaction_id();
michael@0 518 }
michael@0 519 ~AutoEnterRPCTransaction() {
michael@0 520 mChan->mMonitor->AssertCurrentThreadOwns();
michael@0 521 mChan->mCurrentRPCTransaction = mOldTransaction;
michael@0 522 }
michael@0 523
michael@0 524 private:
michael@0 525 MessageChannel *mChan;
michael@0 526 int32_t mOldTransaction;
michael@0 527 };
michael@0 528
michael@0 529 // If waiting for the reply to a sync out-message, it will be saved here
michael@0 530 // on the I/O thread and then read and cleared by the worker thread.
michael@0 531 nsAutoPtr<Message> mRecvd;
michael@0 532
michael@0 533 // Set while we are dispatching a synchronous message.
michael@0 534 bool mDispatchingSyncMessage;
michael@0 535
michael@0 536 // Count of the recursion depth of dispatching urgent messages.
michael@0 537 size_t mDispatchingUrgentMessageCount;
michael@0 538
michael@0 539 // Queue of all incoming messages, except for replies to sync and urgent
michael@0 540 // messages, which are delivered directly to mRecvd, and any pending urgent
michael@0 541 // incall, which is stored in mPendingUrgentRequest.
michael@0 542 //
michael@0 543 // If both this side and the other side are functioning correctly, the queue
michael@0 544 // can only be in certain configurations. Let
michael@0 545 //
michael@0 546 // |A<| be an async in-message,
michael@0 547 // |S<| be a sync in-message,
michael@0 548 // |C<| be an Interrupt in-call,
michael@0 549 // |R<| be an Interrupt reply.
michael@0 550 //
michael@0 551 // The queue can only match this configuration
michael@0 552 //
michael@0 553 // A<* (S< | C< | R< (?{mStack.size() == 1} A<* (S< | C<)))
michael@0 554 //
michael@0 555 // The other side can send as many async messages |A<*| as it wants before
michael@0 556 // sending us a blocking message.
michael@0 557 //
michael@0 558 // The first case is |S<|, a sync in-msg. The other side must be blocked,
michael@0 559 // and thus can't send us any more messages until we process the sync
michael@0 560 // in-msg.
michael@0 561 //
michael@0 562 // The second case is |C<|, an Interrupt in-call; the other side must be blocked.
michael@0 563 // (There's a subtlety here: this in-call might have raced with an
michael@0 564 // out-call, but we detect that with the mechanism below,
michael@0 565 // |mRemoteStackDepth|, and races don't matter to the queue.)
michael@0 566 //
michael@0 567 // Final case, the other side replied to our most recent out-call |R<|.
michael@0 568 // If that was the *only* out-call on our stack, |?{mStack.size() == 1}|,
michael@0 569 // then other side "finished with us," and went back to its own business.
michael@0 570 // That business might have included sending any number of async message
michael@0 571 // |A<*| until sending a blocking message |(S< | C<)|. If we had more than
michael@0 572 // one Interrupt call on our stack, the other side *better* not have sent us
michael@0 573 // another blocking message, because it's blocked on a reply from us.
michael@0 574 //
michael@0 575 MessageQueue mPending;
michael@0 576
michael@0 577 // Note that these two pointers are mutually exclusive. One channel cannot
michael@0 578 // send both urgent requests (parent -> child) and RPC calls (child->parent).
michael@0 579 // Also note that since initiating either requires blocking, they cannot
michael@0 580 // queue up on the other side. One message slot is enough.
michael@0 581 //
michael@0 582 // Normally, all other message types are deferred into into mPending, and
michael@0 583 // only these two types have special treatment (since they wake up blocked
michael@0 584 // requests). However, when an RPC in-call races with an urgent out-call,
michael@0 585 // the RPC message will be put into mPending instead of its slot below.
michael@0 586 nsAutoPtr<Message> mPendingUrgentRequest;
michael@0 587 nsAutoPtr<Message> mPendingRPCCall;
michael@0 588
michael@0 589 // Stack of all the out-calls on which this channel is awaiting responses.
michael@0 590 // Each stack refers to a different protocol and the stacks are mutually
michael@0 591 // exclusive: multiple outcalls of the same kind cannot be initiated while
michael@0 592 // another is active.
michael@0 593 std::stack<Message> mInterruptStack;
michael@0 594
michael@0 595 // This is what we think the Interrupt stack depth is on the "other side" of this
michael@0 596 // Interrupt channel. We maintain this variable so that we can detect racy Interrupt
michael@0 597 // calls. With each Interrupt out-call sent, we send along what *we* think the
michael@0 598 // stack depth of the remote side is *before* it will receive the Interrupt call.
michael@0 599 //
michael@0 600 // After sending the out-call, our stack depth is "incremented" by pushing
michael@0 601 // that pending message onto mPending.
michael@0 602 //
michael@0 603 // Then when processing an in-call |c|, it must be true that
michael@0 604 //
michael@0 605 // mStack.size() == c.remoteDepth
michael@0 606 //
michael@0 607 // I.e., my depth is actually the same as what the other side thought it
michael@0 608 // was when it sent in-call |c|. If this fails to hold, we have detected
michael@0 609 // racy Interrupt calls.
michael@0 610 //
michael@0 611 // We then increment mRemoteStackDepth *just before* processing the
michael@0 612 // in-call, since we know the other side is waiting on it, and decrement
michael@0 613 // it *just after* finishing processing that in-call, since our response
michael@0 614 // will pop the top of the other side's |mPending|.
michael@0 615 //
michael@0 616 // One nice aspect of this race detection is that it is symmetric; if one
michael@0 617 // side detects a race, then the other side must also detect the same race.
michael@0 618 size_t mRemoteStackDepthGuess;
michael@0 619
michael@0 620 // Approximation of code frames on the C++ stack. It can only be
michael@0 621 // interpreted as the implication:
michael@0 622 //
michael@0 623 // !mCxxStackFrames.empty() => MessageChannel code on C++ stack
michael@0 624 //
michael@0 625 // This member is only accessed on the worker thread, and so is not
michael@0 626 // protected by mMonitor. It is managed exclusively by the helper
michael@0 627 // |class CxxStackFrame|.
michael@0 628 mozilla::Vector<InterruptFrame> mCxxStackFrames;
michael@0 629
michael@0 630 // Did we process an Interrupt out-call during this stack? Only meaningful in
michael@0 631 // ExitedCxxStack(), from which this variable is reset.
michael@0 632 bool mSawInterruptOutMsg;
michael@0 633
michael@0 634 // Map of replies received "out of turn", because of Interrupt
michael@0 635 // in-calls racing with replies to outstanding in-calls. See
michael@0 636 // https://bugzilla.mozilla.org/show_bug.cgi?id=521929.
michael@0 637 MessageMap mOutOfTurnReplies;
michael@0 638
michael@0 639 // Stack of Interrupt in-calls that were deferred because of race
michael@0 640 // conditions.
michael@0 641 std::stack<Message> mDeferred;
michael@0 642
michael@0 643 #ifdef OS_WIN
michael@0 644 HANDLE mEvent;
michael@0 645 #endif
michael@0 646
michael@0 647 // Should the channel abort the process from the I/O thread when
michael@0 648 // a channel error occurs?
michael@0 649 bool mAbortOnError;
michael@0 650
michael@0 651 // See SetChannelFlags
michael@0 652 ChannelFlags mFlags;
michael@0 653 };
michael@0 654
michael@0 655 } // namespace ipc
michael@0 656 } // namespace mozilla
michael@0 657
michael@0 658 #endif // ifndef ipc_glue_MessageChannel_h

mercurial