Thu, 15 Jan 2015 21:03:48 +0100
Integrate friendly tips from Tor colleagues to make (or not) 4.5 alpha 3;
This includes removal of overloaded (but unused) methods, and addition of
a overlooked call to DataStruct::SetData(nsISupports, uint32_t, bool.)
michael@0 | 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- |
michael@0 | 2 | * vim: set sw=2 ts=8 et tw=80 : |
michael@0 | 3 | */ |
michael@0 | 4 | /* This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 7 | |
michael@0 | 8 | #ifndef mozilla_net_ChannelEventQueue_h |
michael@0 | 9 | #define mozilla_net_ChannelEventQueue_h |
michael@0 | 10 | |
michael@0 | 11 | #include <nsTArray.h> |
michael@0 | 12 | #include <nsAutoPtr.h> |
michael@0 | 13 | |
michael@0 | 14 | class nsISupports; |
michael@0 | 15 | class nsIEventTarget; |
michael@0 | 16 | class nsIThread; |
michael@0 | 17 | |
michael@0 | 18 | namespace mozilla { |
michael@0 | 19 | namespace net { |
michael@0 | 20 | |
michael@0 | 21 | class ChannelEvent |
michael@0 | 22 | { |
michael@0 | 23 | public: |
michael@0 | 24 | ChannelEvent() { MOZ_COUNT_CTOR(ChannelEvent); } |
michael@0 | 25 | virtual ~ChannelEvent() { MOZ_COUNT_DTOR(ChannelEvent); } |
michael@0 | 26 | virtual void Run() = 0; |
michael@0 | 27 | }; |
michael@0 | 28 | |
michael@0 | 29 | // Workaround for Necko re-entrancy dangers. We buffer IPDL messages in a |
michael@0 | 30 | // queue if still dispatching previous one(s) to listeners/observers. |
michael@0 | 31 | // Otherwise synchronous XMLHttpRequests and/or other code that spins the |
michael@0 | 32 | // event loop (ex: IPDL rpc) could cause listener->OnDataAvailable (for |
michael@0 | 33 | // instance) to be dispatched and called before mListener->OnStartRequest has |
michael@0 | 34 | // completed. |
michael@0 | 35 | |
michael@0 | 36 | class AutoEventEnqueuerBase; |
michael@0 | 37 | |
michael@0 | 38 | class ChannelEventQueue MOZ_FINAL |
michael@0 | 39 | { |
michael@0 | 40 | NS_INLINE_DECL_REFCOUNTING(ChannelEventQueue) |
michael@0 | 41 | |
michael@0 | 42 | public: |
michael@0 | 43 | ChannelEventQueue(nsISupports *owner) |
michael@0 | 44 | : mSuspendCount(0) |
michael@0 | 45 | , mSuspended(false) |
michael@0 | 46 | , mForced(false) |
michael@0 | 47 | , mFlushing(false) |
michael@0 | 48 | , mOwner(owner) {} |
michael@0 | 49 | |
michael@0 | 50 | // Checks to determine if an IPDL-generated channel event can be processed |
michael@0 | 51 | // immediately, or needs to be queued using Enqueue(). |
michael@0 | 52 | inline bool ShouldEnqueue(); |
michael@0 | 53 | |
michael@0 | 54 | // Puts IPDL-generated channel event into queue, to be run later |
michael@0 | 55 | // automatically when EndForcedQueueing and/or Resume is called. |
michael@0 | 56 | inline void Enqueue(ChannelEvent* callback); |
michael@0 | 57 | |
michael@0 | 58 | // After StartForcedQueueing is called, ShouldEnqueue() will return true and |
michael@0 | 59 | // no events will be run/flushed until EndForcedQueueing is called. |
michael@0 | 60 | // - Note: queueing may still be required after EndForcedQueueing() (if the |
michael@0 | 61 | // queue is suspended, etc): always call ShouldEnqueue() to determine |
michael@0 | 62 | // whether queueing is needed. |
michael@0 | 63 | inline void StartForcedQueueing(); |
michael@0 | 64 | inline void EndForcedQueueing(); |
michael@0 | 65 | |
michael@0 | 66 | // Suspend/resume event queue. ShouldEnqueue() will return true and no events |
michael@0 | 67 | // will be run/flushed until resume is called. These should be called when |
michael@0 | 68 | // the channel owning the event queue is suspended/resumed. |
michael@0 | 69 | inline void Suspend(); |
michael@0 | 70 | // Resume flushes the queue asynchronously, i.e. items in queue will be |
michael@0 | 71 | // dispatched in a new event on the current thread. |
michael@0 | 72 | void Resume(); |
michael@0 | 73 | |
michael@0 | 74 | // Retargets delivery of events to the target thread specified. |
michael@0 | 75 | nsresult RetargetDeliveryTo(nsIEventTarget* aTargetThread); |
michael@0 | 76 | |
michael@0 | 77 | private: |
michael@0 | 78 | // Private destructor, to discourage deletion outside of Release(): |
michael@0 | 79 | ~ChannelEventQueue() |
michael@0 | 80 | { |
michael@0 | 81 | } |
michael@0 | 82 | |
michael@0 | 83 | inline void MaybeFlushQueue(); |
michael@0 | 84 | void FlushQueue(); |
michael@0 | 85 | inline void CompleteResume(); |
michael@0 | 86 | |
michael@0 | 87 | nsTArray<nsAutoPtr<ChannelEvent> > mEventQueue; |
michael@0 | 88 | |
michael@0 | 89 | uint32_t mSuspendCount; |
michael@0 | 90 | bool mSuspended; |
michael@0 | 91 | bool mForced; |
michael@0 | 92 | bool mFlushing; |
michael@0 | 93 | |
michael@0 | 94 | // Keep ptr to avoid refcount cycle: only grab ref during flushing. |
michael@0 | 95 | nsISupports *mOwner; |
michael@0 | 96 | |
michael@0 | 97 | // Target thread for delivery of events. |
michael@0 | 98 | nsCOMPtr<nsIThread> mTargetThread; |
michael@0 | 99 | |
michael@0 | 100 | friend class AutoEventEnqueuer; |
michael@0 | 101 | }; |
michael@0 | 102 | |
michael@0 | 103 | inline bool |
michael@0 | 104 | ChannelEventQueue::ShouldEnqueue() |
michael@0 | 105 | { |
michael@0 | 106 | bool answer = mForced || mSuspended || mFlushing; |
michael@0 | 107 | |
michael@0 | 108 | NS_ABORT_IF_FALSE(answer == true || mEventQueue.IsEmpty(), |
michael@0 | 109 | "Should always enqueue if ChannelEventQueue not empty"); |
michael@0 | 110 | |
michael@0 | 111 | return answer; |
michael@0 | 112 | } |
michael@0 | 113 | |
michael@0 | 114 | inline void |
michael@0 | 115 | ChannelEventQueue::Enqueue(ChannelEvent* callback) |
michael@0 | 116 | { |
michael@0 | 117 | mEventQueue.AppendElement(callback); |
michael@0 | 118 | } |
michael@0 | 119 | |
michael@0 | 120 | inline void |
michael@0 | 121 | ChannelEventQueue::StartForcedQueueing() |
michael@0 | 122 | { |
michael@0 | 123 | mForced = true; |
michael@0 | 124 | } |
michael@0 | 125 | |
michael@0 | 126 | inline void |
michael@0 | 127 | ChannelEventQueue::EndForcedQueueing() |
michael@0 | 128 | { |
michael@0 | 129 | mForced = false; |
michael@0 | 130 | MaybeFlushQueue(); |
michael@0 | 131 | } |
michael@0 | 132 | |
michael@0 | 133 | inline void |
michael@0 | 134 | ChannelEventQueue::Suspend() |
michael@0 | 135 | { |
michael@0 | 136 | mSuspended = true; |
michael@0 | 137 | mSuspendCount++; |
michael@0 | 138 | } |
michael@0 | 139 | |
michael@0 | 140 | inline void |
michael@0 | 141 | ChannelEventQueue::CompleteResume() |
michael@0 | 142 | { |
michael@0 | 143 | // channel may have been suspended again since Resume fired event to call this. |
michael@0 | 144 | if (!mSuspendCount) { |
michael@0 | 145 | // we need to remain logically suspended (for purposes of queuing incoming |
michael@0 | 146 | // messages) until this point, else new incoming messages could run before |
michael@0 | 147 | // queued ones. |
michael@0 | 148 | mSuspended = false; |
michael@0 | 149 | MaybeFlushQueue(); |
michael@0 | 150 | } |
michael@0 | 151 | } |
michael@0 | 152 | |
michael@0 | 153 | inline void |
michael@0 | 154 | ChannelEventQueue::MaybeFlushQueue() |
michael@0 | 155 | { |
michael@0 | 156 | // Don't flush if forced queuing on, we're already being flushed, or |
michael@0 | 157 | // suspended, or there's nothing to flush |
michael@0 | 158 | if (!mForced && !mFlushing && !mSuspended && !mEventQueue.IsEmpty()) |
michael@0 | 159 | FlushQueue(); |
michael@0 | 160 | } |
michael@0 | 161 | |
michael@0 | 162 | // Ensures that ShouldEnqueue() will be true during its lifetime (letting |
michael@0 | 163 | // caller know incoming IPDL msgs should be queued). Flushes the queue when it |
michael@0 | 164 | // goes out of scope. |
michael@0 | 165 | class AutoEventEnqueuer |
michael@0 | 166 | { |
michael@0 | 167 | public: |
michael@0 | 168 | AutoEventEnqueuer(ChannelEventQueue *queue) : mEventQueue(queue) { |
michael@0 | 169 | mEventQueue->StartForcedQueueing(); |
michael@0 | 170 | } |
michael@0 | 171 | ~AutoEventEnqueuer() { |
michael@0 | 172 | mEventQueue->EndForcedQueueing(); |
michael@0 | 173 | } |
michael@0 | 174 | private: |
michael@0 | 175 | ChannelEventQueue* mEventQueue; |
michael@0 | 176 | }; |
michael@0 | 177 | |
michael@0 | 178 | } |
michael@0 | 179 | } |
michael@0 | 180 | |
michael@0 | 181 | #endif |