1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/ipc/glue/MessageChannel.h Wed Dec 31 06:09:35 2014 +0100 1.3 @@ -0,0 +1,658 @@ 1.4 +/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- 1.5 + * vim: sw=4 ts=4 et : 1.6 + */ 1.7 +/* This Source Code Form is subject to the terms of the Mozilla Public 1.8 + * License, v. 2.0. If a copy of the MPL was not distributed with this 1.9 + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 1.10 + 1.11 +#ifndef ipc_glue_MessageChannel_h 1.12 +#define ipc_glue_MessageChannel_h 1 1.13 + 1.14 +#include "base/basictypes.h" 1.15 +#include "base/message_loop.h" 1.16 + 1.17 +#include "mozilla/Monitor.h" 1.18 +#include "mozilla/Vector.h" 1.19 +#include "mozilla/WeakPtr.h" 1.20 +#include "mozilla/ipc/Transport.h" 1.21 +#include "MessageLink.h" 1.22 +#include "nsAutoPtr.h" 1.23 + 1.24 +#include <deque> 1.25 +#include <stack> 1.26 +#include <math.h> 1.27 + 1.28 +namespace mozilla { 1.29 +namespace ipc { 1.30 + 1.31 +class MessageChannel; 1.32 + 1.33 +class RefCountedMonitor : public Monitor 1.34 +{ 1.35 + public: 1.36 + RefCountedMonitor() 1.37 + : Monitor("mozilla.ipc.MessageChannel.mMonitor") 1.38 + {} 1.39 + 1.40 + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(RefCountedMonitor) 1.41 +}; 1.42 + 1.43 +class MessageChannel : HasResultCodes 1.44 +{ 1.45 + friend class ProcessLink; 1.46 + friend class ThreadLink; 1.47 + friend class AutoEnterRPCTransaction; 1.48 + 1.49 + class CxxStackFrame; 1.50 + class InterruptFrame; 1.51 + 1.52 + typedef mozilla::Monitor Monitor; 1.53 + 1.54 + public: 1.55 + static const int32_t kNoTimeout; 1.56 + 1.57 + typedef IPC::Message Message; 1.58 + typedef mozilla::ipc::Transport Transport; 1.59 + 1.60 + MessageChannel(MessageListener *aListener); 1.61 + ~MessageChannel(); 1.62 + 1.63 + // "Open" from the perspective of the transport layer; the underlying 1.64 + // socketpair/pipe should already be created. 1.65 + // 1.66 + // Returns true if the transport layer was successfully connected, 1.67 + // i.e., mChannelState == ChannelConnected. 1.68 + bool Open(Transport* aTransport, MessageLoop* aIOLoop=0, Side aSide=UnknownSide); 1.69 + 1.70 + // "Open" a connection to another thread in the same process. 1.71 + // 1.72 + // Returns true if the transport layer was successfully connected, 1.73 + // i.e., mChannelState == ChannelConnected. 1.74 + // 1.75 + // For more details on the process of opening a channel between 1.76 + // threads, see the extended comment on this function 1.77 + // in MessageChannel.cpp. 1.78 + bool Open(MessageChannel *aTargetChan, MessageLoop *aTargetLoop, Side aSide); 1.79 + 1.80 + // Close the underlying transport channel. 1.81 + void Close(); 1.82 + 1.83 + // Force the channel to behave as if a channel error occurred. Valid 1.84 + // for process links only, not thread links. 1.85 + void CloseWithError(); 1.86 + 1.87 + void SetAbortOnError(bool abort) 1.88 + { 1.89 + mAbortOnError = true; 1.90 + } 1.91 + 1.92 + // Misc. behavioral traits consumers can request for this channel 1.93 + enum ChannelFlags { 1.94 + REQUIRE_DEFAULT = 0, 1.95 + // Windows: if this channel operates on the UI thread, indicates 1.96 + // WindowsMessageLoop code should enable deferred native message 1.97 + // handling to prevent deadlocks. Should only be used for protocols 1.98 + // that manage child processes which might create native UI, like 1.99 + // plugins. 1.100 + REQUIRE_DEFERRED_MESSAGE_PROTECTION = 1 << 0 1.101 + }; 1.102 + void SetChannelFlags(ChannelFlags aFlags) { mFlags = aFlags; } 1.103 + ChannelFlags GetChannelFlags() { return mFlags; } 1.104 + 1.105 + // Asynchronously send a message to the other side of the channel 1.106 + bool Send(Message* aMsg); 1.107 + 1.108 + // Asynchronously deliver a message back to this side of the 1.109 + // channel 1.110 + bool Echo(Message* aMsg); 1.111 + 1.112 + // Synchronously send |msg| (i.e., wait for |reply|) 1.113 + bool Send(Message* aMsg, Message* aReply); 1.114 + 1.115 + // Make an Interrupt call to the other side of the channel 1.116 + bool Call(Message* aMsg, Message* aReply); 1.117 + 1.118 + bool CanSend() const; 1.119 + 1.120 + void SetReplyTimeoutMs(int32_t aTimeoutMs); 1.121 + 1.122 + bool IsOnCxxStack() const { 1.123 + return !mCxxStackFrames.empty(); 1.124 + } 1.125 + 1.126 + void FlushPendingInterruptQueue(); 1.127 + 1.128 + // Unsound_IsClosed and Unsound_NumQueuedMessages are safe to call from any 1.129 + // thread, but they make no guarantees about whether you'll get an 1.130 + // up-to-date value; the values are written on one thread and read without 1.131 + // locking, on potentially different threads. Thus you should only use 1.132 + // them when you don't particularly care about getting a recent value (e.g. 1.133 + // in a memory report). 1.134 + bool Unsound_IsClosed() const { 1.135 + return mLink ? mLink->Unsound_IsClosed() : true; 1.136 + } 1.137 + uint32_t Unsound_NumQueuedMessages() const { 1.138 + return mLink ? mLink->Unsound_NumQueuedMessages() : 0; 1.139 + } 1.140 + 1.141 + static bool IsPumpingMessages() { 1.142 + return sIsPumpingMessages; 1.143 + } 1.144 + static void SetIsPumpingMessages(bool aIsPumping) { 1.145 + sIsPumpingMessages = aIsPumping; 1.146 + } 1.147 + 1.148 +#ifdef OS_WIN 1.149 + struct MOZ_STACK_CLASS SyncStackFrame 1.150 + { 1.151 + SyncStackFrame(MessageChannel* channel, bool interrupt); 1.152 + ~SyncStackFrame(); 1.153 + 1.154 + bool mInterrupt; 1.155 + bool mSpinNestedEvents; 1.156 + bool mListenerNotified; 1.157 + MessageChannel* mChannel; 1.158 + 1.159 + // The previous stack frame for this channel. 1.160 + SyncStackFrame* mPrev; 1.161 + 1.162 + // The previous stack frame on any channel. 1.163 + SyncStackFrame* mStaticPrev; 1.164 + }; 1.165 + friend struct MessageChannel::SyncStackFrame; 1.166 + 1.167 + static bool IsSpinLoopActive() { 1.168 + for (SyncStackFrame* frame = sStaticTopFrame; frame; frame = frame->mPrev) { 1.169 + if (frame->mSpinNestedEvents) 1.170 + return true; 1.171 + } 1.172 + return false; 1.173 + } 1.174 + 1.175 + protected: 1.176 + // The deepest sync stack frame for this channel. 1.177 + SyncStackFrame* mTopFrame; 1.178 + 1.179 + bool mIsSyncWaitingOnNonMainThread; 1.180 + 1.181 + // The deepest sync stack frame on any channel. 1.182 + static SyncStackFrame* sStaticTopFrame; 1.183 + 1.184 + public: 1.185 + void ProcessNativeEventsInInterruptCall(); 1.186 + static void NotifyGeckoEventDispatch(); 1.187 + 1.188 + private: 1.189 + void SpinInternalEventLoop(); 1.190 +#endif 1.191 + 1.192 + private: 1.193 + void CommonThreadOpenInit(MessageChannel *aTargetChan, Side aSide); 1.194 + void OnOpenAsSlave(MessageChannel *aTargetChan, Side aSide); 1.195 + 1.196 + void PostErrorNotifyTask(); 1.197 + void OnNotifyMaybeChannelError(); 1.198 + void ReportConnectionError(const char* aChannelName) const; 1.199 + void ReportMessageRouteError(const char* channelName) const; 1.200 + bool MaybeHandleError(Result code, const char* channelName); 1.201 + 1.202 + void Clear(); 1.203 + 1.204 + // Send OnChannelConnected notification to listeners. 1.205 + void DispatchOnChannelConnected(int32_t peer_pid); 1.206 + 1.207 + // Any protocol that requires blocking until a reply arrives, will send its 1.208 + // outgoing message through this function. Currently, two protocols do this: 1.209 + // 1.210 + // sync, which can only initiate messages from child to parent. 1.211 + // urgent, which can only initiate messages from parent to child. 1.212 + // 1.213 + // SendAndWait() expects that the worker thread owns the monitor, and that 1.214 + // the message has been prepared to be sent over the link. It returns as 1.215 + // soon as a reply has been received, or an error has occurred. 1.216 + // 1.217 + // Note that while the child is blocked waiting for a sync reply, it can wake 1.218 + // up to process urgent calls from the parent. 1.219 + bool SendAndWait(Message* aMsg, Message* aReply); 1.220 + 1.221 + bool RPCCall(Message* aMsg, Message* aReply); 1.222 + bool InterruptCall(Message* aMsg, Message* aReply); 1.223 + bool UrgentCall(Message* aMsg, Message* aReply); 1.224 + 1.225 + bool InterruptEventOccurred(); 1.226 + 1.227 + bool ProcessPendingUrgentRequest(); 1.228 + bool ProcessPendingRPCCall(); 1.229 + 1.230 + void MaybeUndeferIncall(); 1.231 + void EnqueuePendingMessages(); 1.232 + 1.233 + // Executed on the worker thread. Dequeues one pending message. 1.234 + bool OnMaybeDequeueOne(); 1.235 + bool DequeueOne(Message *recvd); 1.236 + 1.237 + // Dispatches an incoming message to its appropriate handler. 1.238 + void DispatchMessage(const Message &aMsg); 1.239 + 1.240 + // DispatchMessage will route to one of these functions depending on the 1.241 + // protocol type of the message. 1.242 + void DispatchSyncMessage(const Message &aMsg); 1.243 + void DispatchUrgentMessage(const Message &aMsg); 1.244 + void DispatchAsyncMessage(const Message &aMsg); 1.245 + void DispatchRPCMessage(const Message &aMsg); 1.246 + void DispatchInterruptMessage(const Message &aMsg, size_t aStackDepth); 1.247 + 1.248 + // Return true if the wait ended because a notification was received. 1.249 + // 1.250 + // Return false if the time elapsed from when we started the process of 1.251 + // waiting until afterwards exceeded the currently allotted timeout. 1.252 + // That *DOES NOT* mean false => "no event" (== timeout); there are many 1.253 + // circumstances that could cause the measured elapsed time to exceed the 1.254 + // timeout EVEN WHEN we were notified. 1.255 + // 1.256 + // So in sum: true is a meaningful return value; false isn't, 1.257 + // necessarily. 1.258 + bool WaitForSyncNotify(); 1.259 + bool WaitForInterruptNotify(); 1.260 + 1.261 + bool WaitResponse(bool aWaitTimedOut); 1.262 + 1.263 + bool ShouldContinueFromTimeout(); 1.264 + 1.265 + // The "remote view of stack depth" can be different than the 1.266 + // actual stack depth when there are out-of-turn replies. When we 1.267 + // receive one, our actual Interrupt stack depth doesn't decrease, but 1.268 + // the other side (that sent the reply) thinks it has. So, the 1.269 + // "view" returned here is |stackDepth| minus the number of 1.270 + // out-of-turn replies. 1.271 + // 1.272 + // Only called from the worker thread. 1.273 + size_t RemoteViewOfStackDepth(size_t stackDepth) const { 1.274 + AssertWorkerThread(); 1.275 + return stackDepth - mOutOfTurnReplies.size(); 1.276 + } 1.277 + 1.278 + int32_t NextSeqno() { 1.279 + AssertWorkerThread(); 1.280 + return (mSide == ChildSide) ? --mNextSeqno : ++mNextSeqno; 1.281 + } 1.282 + 1.283 + // This helper class manages mCxxStackDepth on behalf of MessageChannel. 1.284 + // When the stack depth is incremented from zero to non-zero, it invokes 1.285 + // a callback, and similarly for when the depth goes from non-zero to zero. 1.286 + void EnteredCxxStack() { 1.287 + mListener->OnEnteredCxxStack(); 1.288 + } 1.289 + 1.290 + void ExitedCxxStack(); 1.291 + 1.292 + void EnteredCall() { 1.293 + mListener->OnEnteredCall(); 1.294 + } 1.295 + 1.296 + void ExitedCall() { 1.297 + mListener->OnExitedCall(); 1.298 + } 1.299 + 1.300 + MessageListener *Listener() const { 1.301 + return mListener.get(); 1.302 + } 1.303 + 1.304 + void DebugAbort(const char* file, int line, const char* cond, 1.305 + const char* why, 1.306 + bool reply=false) const; 1.307 + 1.308 + // This method is only safe to call on the worker thread, or in a 1.309 + // debugger with all threads paused. 1.310 + void DumpInterruptStack(const char* const pfx="") const; 1.311 + 1.312 + private: 1.313 + // Called from both threads 1.314 + size_t InterruptStackDepth() const { 1.315 + mMonitor->AssertCurrentThreadOwns(); 1.316 + return mInterruptStack.size(); 1.317 + } 1.318 + 1.319 + // Returns true if we're blocking waiting for a reply. 1.320 + bool AwaitingSyncReply() const { 1.321 + mMonitor->AssertCurrentThreadOwns(); 1.322 + return mPendingSyncReplies > 0; 1.323 + } 1.324 + bool AwaitingUrgentReply() const { 1.325 + mMonitor->AssertCurrentThreadOwns(); 1.326 + return mPendingUrgentReplies > 0; 1.327 + } 1.328 + bool AwaitingRPCReply() const { 1.329 + mMonitor->AssertCurrentThreadOwns(); 1.330 + return mPendingRPCReplies > 0; 1.331 + } 1.332 + bool AwaitingInterruptReply() const { 1.333 + mMonitor->AssertCurrentThreadOwns(); 1.334 + return !mInterruptStack.empty(); 1.335 + } 1.336 + 1.337 + // Returns true if we're dispatching a sync message's callback. 1.338 + bool DispatchingSyncMessage() const { 1.339 + return mDispatchingSyncMessage; 1.340 + } 1.341 + 1.342 + // Returns true if we're dispatching an urgent message's callback. 1.343 + bool DispatchingUrgentMessage() const { 1.344 + return mDispatchingUrgentMessageCount > 0; 1.345 + } 1.346 + 1.347 + bool Connected() const; 1.348 + 1.349 + private: 1.350 + // Executed on the IO thread. 1.351 + void NotifyWorkerThread(); 1.352 + 1.353 + // Return true if |aMsg| is a special message targeted at the IO 1.354 + // thread, in which case it shouldn't be delivered to the worker. 1.355 + bool MaybeInterceptSpecialIOMessage(const Message& aMsg); 1.356 + 1.357 + void OnChannelConnected(int32_t peer_id); 1.358 + 1.359 + // Tell the IO thread to close the channel and wait for it to ACK. 1.360 + void SynchronouslyClose(); 1.361 + 1.362 + void OnMessageReceivedFromLink(const Message& aMsg); 1.363 + void OnChannelErrorFromLink(); 1.364 + 1.365 + private: 1.366 + // Run on the not current thread. 1.367 + void NotifyChannelClosed(); 1.368 + void NotifyMaybeChannelError(); 1.369 + 1.370 + private: 1.371 + // Can be run on either thread 1.372 + void AssertWorkerThread() const 1.373 + { 1.374 + NS_ABORT_IF_FALSE(mWorkerLoopID == MessageLoop::current()->id(), 1.375 + "not on worker thread!"); 1.376 + } 1.377 + 1.378 + // The "link" thread is either the I/O thread (ProcessLink) or the 1.379 + // other actor's work thread (ThreadLink). In either case, it is 1.380 + // NOT our worker thread. 1.381 + void AssertLinkThread() const 1.382 + { 1.383 + NS_ABORT_IF_FALSE(mWorkerLoopID != MessageLoop::current()->id(), 1.384 + "on worker thread but should not be!"); 1.385 + } 1.386 + 1.387 + private: 1.388 + typedef IPC::Message::msgid_t msgid_t; 1.389 + typedef std::deque<Message> MessageQueue; 1.390 + typedef std::map<size_t, Message> MessageMap; 1.391 + 1.392 + // All dequeuing tasks require a single point of cancellation, 1.393 + // which is handled via a reference-counted task. 1.394 + class RefCountedTask 1.395 + { 1.396 + public: 1.397 + RefCountedTask(CancelableTask* aTask) 1.398 + : mTask(aTask) 1.399 + { } 1.400 + ~RefCountedTask() { delete mTask; } 1.401 + void Run() { mTask->Run(); } 1.402 + void Cancel() { mTask->Cancel(); } 1.403 + 1.404 + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(RefCountedTask) 1.405 + 1.406 + private: 1.407 + CancelableTask* mTask; 1.408 + }; 1.409 + 1.410 + // Wrap an existing task which can be cancelled at any time 1.411 + // without the wrapper's knowledge. 1.412 + class DequeueTask : public Task 1.413 + { 1.414 + public: 1.415 + DequeueTask(RefCountedTask* aTask) 1.416 + : mTask(aTask) 1.417 + { } 1.418 + void Run() { mTask->Run(); } 1.419 + 1.420 + private: 1.421 + nsRefPtr<RefCountedTask> mTask; 1.422 + }; 1.423 + 1.424 + private: 1.425 + mozilla::WeakPtr<MessageListener> mListener; 1.426 + ChannelState mChannelState; 1.427 + nsRefPtr<RefCountedMonitor> mMonitor; 1.428 + Side mSide; 1.429 + MessageLink* mLink; 1.430 + MessageLoop* mWorkerLoop; // thread where work is done 1.431 + CancelableTask* mChannelErrorTask; // NotifyMaybeChannelError runnable 1.432 + 1.433 + // id() of mWorkerLoop. This persists even after mWorkerLoop is cleared 1.434 + // during channel shutdown. 1.435 + int mWorkerLoopID; 1.436 + 1.437 + // A task encapsulating dequeuing one pending message. 1.438 + nsRefPtr<RefCountedTask> mDequeueOneTask; 1.439 + 1.440 + // Timeout periods are broken up in two to prevent system suspension from 1.441 + // triggering an abort. This method (called by WaitForEvent with a 'did 1.442 + // timeout' flag) decides if we should wait again for half of mTimeoutMs 1.443 + // or give up. 1.444 + int32_t mTimeoutMs; 1.445 + bool mInTimeoutSecondHalf; 1.446 + 1.447 + // Worker-thread only; sequence numbers for messages that require 1.448 + // synchronous replies. 1.449 + int32_t mNextSeqno; 1.450 + 1.451 + static bool sIsPumpingMessages; 1.452 + 1.453 + class AutoEnterPendingReply { 1.454 + public: 1.455 + AutoEnterPendingReply(size_t &replyVar) 1.456 + : mReplyVar(replyVar) 1.457 + { 1.458 + mReplyVar++; 1.459 + } 1.460 + ~AutoEnterPendingReply() { 1.461 + mReplyVar--; 1.462 + } 1.463 + private: 1.464 + size_t& mReplyVar; 1.465 + }; 1.466 + 1.467 + // Worker-thread only; type we're expecting for the reply to a sync 1.468 + // out-message. This will never be greater than 1. 1.469 + size_t mPendingSyncReplies; 1.470 + 1.471 + // Worker-thread only; Number of urgent and rpc replies we're waiting on. 1.472 + // These are mutually exclusive since one channel cannot have outcalls of 1.473 + // both kinds. 1.474 + size_t mPendingUrgentReplies; 1.475 + size_t mPendingRPCReplies; 1.476 + 1.477 + // When we send an urgent request from the parent process, we could race 1.478 + // with an RPC message that was issued by the child beforehand. In this 1.479 + // case, if the parent were to wake up while waiting for the urgent reply, 1.480 + // and process the RPC, it could send an additional urgent message. The 1.481 + // child would wake up to process the urgent message (as it always will), 1.482 + // then send a reply, which could be received by the parent out-of-order 1.483 + // with respect to the first urgent reply. 1.484 + // 1.485 + // To address this problem, urgent or RPC requests are associated with a 1.486 + // "transaction". Whenever one side of the channel wishes to start a 1.487 + // chain of RPC/urgent messages, it allocates a new transaction ID. Any 1.488 + // messages the parent receives, not apart of this transaction, are 1.489 + // deferred. When issuing RPC/urgent requests on top of a started 1.490 + // transaction, the initiating transaction ID is used. 1.491 + // 1.492 + // To ensure IDs are unique, we use sequence numbers for transaction IDs, 1.493 + // which grow in opposite directions from child to parent. 1.494 + 1.495 + // The current transaction ID. 1.496 + int32_t mCurrentRPCTransaction; 1.497 + 1.498 + class AutoEnterRPCTransaction 1.499 + { 1.500 + public: 1.501 + AutoEnterRPCTransaction(MessageChannel *aChan) 1.502 + : mChan(aChan), 1.503 + mOldTransaction(mChan->mCurrentRPCTransaction) 1.504 + { 1.505 + mChan->mMonitor->AssertCurrentThreadOwns(); 1.506 + if (mChan->mCurrentRPCTransaction == 0) 1.507 + mChan->mCurrentRPCTransaction = mChan->NextSeqno(); 1.508 + } 1.509 + AutoEnterRPCTransaction(MessageChannel *aChan, Message *message) 1.510 + : mChan(aChan), 1.511 + mOldTransaction(mChan->mCurrentRPCTransaction) 1.512 + { 1.513 + mChan->mMonitor->AssertCurrentThreadOwns(); 1.514 + 1.515 + if (!message->is_rpc() && !message->is_urgent()) 1.516 + return; 1.517 + 1.518 + MOZ_ASSERT_IF(mChan->mSide == ParentSide, 1.519 + !mOldTransaction || mOldTransaction == message->transaction_id()); 1.520 + mChan->mCurrentRPCTransaction = message->transaction_id(); 1.521 + } 1.522 + ~AutoEnterRPCTransaction() { 1.523 + mChan->mMonitor->AssertCurrentThreadOwns(); 1.524 + mChan->mCurrentRPCTransaction = mOldTransaction; 1.525 + } 1.526 + 1.527 + private: 1.528 + MessageChannel *mChan; 1.529 + int32_t mOldTransaction; 1.530 + }; 1.531 + 1.532 + // If waiting for the reply to a sync out-message, it will be saved here 1.533 + // on the I/O thread and then read and cleared by the worker thread. 1.534 + nsAutoPtr<Message> mRecvd; 1.535 + 1.536 + // Set while we are dispatching a synchronous message. 1.537 + bool mDispatchingSyncMessage; 1.538 + 1.539 + // Count of the recursion depth of dispatching urgent messages. 1.540 + size_t mDispatchingUrgentMessageCount; 1.541 + 1.542 + // Queue of all incoming messages, except for replies to sync and urgent 1.543 + // messages, which are delivered directly to mRecvd, and any pending urgent 1.544 + // incall, which is stored in mPendingUrgentRequest. 1.545 + // 1.546 + // If both this side and the other side are functioning correctly, the queue 1.547 + // can only be in certain configurations. Let 1.548 + // 1.549 + // |A<| be an async in-message, 1.550 + // |S<| be a sync in-message, 1.551 + // |C<| be an Interrupt in-call, 1.552 + // |R<| be an Interrupt reply. 1.553 + // 1.554 + // The queue can only match this configuration 1.555 + // 1.556 + // A<* (S< | C< | R< (?{mStack.size() == 1} A<* (S< | C<))) 1.557 + // 1.558 + // The other side can send as many async messages |A<*| as it wants before 1.559 + // sending us a blocking message. 1.560 + // 1.561 + // The first case is |S<|, a sync in-msg. The other side must be blocked, 1.562 + // and thus can't send us any more messages until we process the sync 1.563 + // in-msg. 1.564 + // 1.565 + // The second case is |C<|, an Interrupt in-call; the other side must be blocked. 1.566 + // (There's a subtlety here: this in-call might have raced with an 1.567 + // out-call, but we detect that with the mechanism below, 1.568 + // |mRemoteStackDepth|, and races don't matter to the queue.) 1.569 + // 1.570 + // Final case, the other side replied to our most recent out-call |R<|. 1.571 + // If that was the *only* out-call on our stack, |?{mStack.size() == 1}|, 1.572 + // then other side "finished with us," and went back to its own business. 1.573 + // That business might have included sending any number of async message 1.574 + // |A<*| until sending a blocking message |(S< | C<)|. If we had more than 1.575 + // one Interrupt call on our stack, the other side *better* not have sent us 1.576 + // another blocking message, because it's blocked on a reply from us. 1.577 + // 1.578 + MessageQueue mPending; 1.579 + 1.580 + // Note that these two pointers are mutually exclusive. One channel cannot 1.581 + // send both urgent requests (parent -> child) and RPC calls (child->parent). 1.582 + // Also note that since initiating either requires blocking, they cannot 1.583 + // queue up on the other side. One message slot is enough. 1.584 + // 1.585 + // Normally, all other message types are deferred into into mPending, and 1.586 + // only these two types have special treatment (since they wake up blocked 1.587 + // requests). However, when an RPC in-call races with an urgent out-call, 1.588 + // the RPC message will be put into mPending instead of its slot below. 1.589 + nsAutoPtr<Message> mPendingUrgentRequest; 1.590 + nsAutoPtr<Message> mPendingRPCCall; 1.591 + 1.592 + // Stack of all the out-calls on which this channel is awaiting responses. 1.593 + // Each stack refers to a different protocol and the stacks are mutually 1.594 + // exclusive: multiple outcalls of the same kind cannot be initiated while 1.595 + // another is active. 1.596 + std::stack<Message> mInterruptStack; 1.597 + 1.598 + // This is what we think the Interrupt stack depth is on the "other side" of this 1.599 + // Interrupt channel. We maintain this variable so that we can detect racy Interrupt 1.600 + // calls. With each Interrupt out-call sent, we send along what *we* think the 1.601 + // stack depth of the remote side is *before* it will receive the Interrupt call. 1.602 + // 1.603 + // After sending the out-call, our stack depth is "incremented" by pushing 1.604 + // that pending message onto mPending. 1.605 + // 1.606 + // Then when processing an in-call |c|, it must be true that 1.607 + // 1.608 + // mStack.size() == c.remoteDepth 1.609 + // 1.610 + // I.e., my depth is actually the same as what the other side thought it 1.611 + // was when it sent in-call |c|. If this fails to hold, we have detected 1.612 + // racy Interrupt calls. 1.613 + // 1.614 + // We then increment mRemoteStackDepth *just before* processing the 1.615 + // in-call, since we know the other side is waiting on it, and decrement 1.616 + // it *just after* finishing processing that in-call, since our response 1.617 + // will pop the top of the other side's |mPending|. 1.618 + // 1.619 + // One nice aspect of this race detection is that it is symmetric; if one 1.620 + // side detects a race, then the other side must also detect the same race. 1.621 + size_t mRemoteStackDepthGuess; 1.622 + 1.623 + // Approximation of code frames on the C++ stack. It can only be 1.624 + // interpreted as the implication: 1.625 + // 1.626 + // !mCxxStackFrames.empty() => MessageChannel code on C++ stack 1.627 + // 1.628 + // This member is only accessed on the worker thread, and so is not 1.629 + // protected by mMonitor. It is managed exclusively by the helper 1.630 + // |class CxxStackFrame|. 1.631 + mozilla::Vector<InterruptFrame> mCxxStackFrames; 1.632 + 1.633 + // Did we process an Interrupt out-call during this stack? Only meaningful in 1.634 + // ExitedCxxStack(), from which this variable is reset. 1.635 + bool mSawInterruptOutMsg; 1.636 + 1.637 + // Map of replies received "out of turn", because of Interrupt 1.638 + // in-calls racing with replies to outstanding in-calls. See 1.639 + // https://bugzilla.mozilla.org/show_bug.cgi?id=521929. 1.640 + MessageMap mOutOfTurnReplies; 1.641 + 1.642 + // Stack of Interrupt in-calls that were deferred because of race 1.643 + // conditions. 1.644 + std::stack<Message> mDeferred; 1.645 + 1.646 +#ifdef OS_WIN 1.647 + HANDLE mEvent; 1.648 +#endif 1.649 + 1.650 + // Should the channel abort the process from the I/O thread when 1.651 + // a channel error occurs? 1.652 + bool mAbortOnError; 1.653 + 1.654 + // See SetChannelFlags 1.655 + ChannelFlags mFlags; 1.656 +}; 1.657 + 1.658 +} // namespace ipc 1.659 +} // namespace mozilla 1.660 + 1.661 +#endif // ifndef ipc_glue_MessageChannel_h