Fri, 16 Jan 2015 04:50:19 +0100
Replace accessor implementation with direct member state manipulation, by
request https://trac.torproject.org/projects/tor/ticket/9701#comment:32
michael@0 | 1 | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/ |
michael@0 | 2 | /* This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 3 | * License, v. 2.0. If a copy of the MPL was not distributed with this file, |
michael@0 | 4 | * You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 5 | #include "TrackEncoder.h" |
michael@0 | 6 | #include "AudioChannelFormat.h" |
michael@0 | 7 | #include "MediaStreamGraph.h" |
michael@0 | 8 | #include "prlog.h" |
michael@0 | 9 | #include "VideoUtils.h" |
michael@0 | 10 | |
michael@0 | 11 | #undef LOG |
michael@0 | 12 | #ifdef MOZ_WIDGET_GONK |
michael@0 | 13 | #include <android/log.h> |
michael@0 | 14 | #define LOG(args...) __android_log_print(ANDROID_LOG_INFO, "MediaEncoder", ## args); |
michael@0 | 15 | #else |
michael@0 | 16 | #define LOG(args, ...) |
michael@0 | 17 | #endif |
michael@0 | 18 | |
michael@0 | 19 | namespace mozilla { |
michael@0 | 20 | |
michael@0 | 21 | #ifdef PR_LOGGING |
michael@0 | 22 | PRLogModuleInfo* gTrackEncoderLog; |
michael@0 | 23 | #define TRACK_LOG(type, msg) PR_LOG(gTrackEncoderLog, type, msg) |
michael@0 | 24 | #else |
michael@0 | 25 | #define TRACK_LOG(type, msg) |
michael@0 | 26 | #endif |
michael@0 | 27 | |
michael@0 | 28 | static const int DEFAULT_CHANNELS = 1; |
michael@0 | 29 | static const int DEFAULT_SAMPLING_RATE = 16000; |
michael@0 | 30 | static const int DEFAULT_FRAME_WIDTH = 640; |
michael@0 | 31 | static const int DEFAULT_FRAME_HEIGHT = 480; |
michael@0 | 32 | static const int DEFAULT_TRACK_RATE = USECS_PER_S; |
michael@0 | 33 | |
michael@0 | 34 | TrackEncoder::TrackEncoder() |
michael@0 | 35 | : mReentrantMonitor("media.TrackEncoder") |
michael@0 | 36 | , mEncodingComplete(false) |
michael@0 | 37 | , mEosSetInEncoder(false) |
michael@0 | 38 | , mInitialized(false) |
michael@0 | 39 | , mEndOfStream(false) |
michael@0 | 40 | , mCanceled(false) |
michael@0 | 41 | #ifdef PR_LOGGING |
michael@0 | 42 | , mAudioInitCounter(0) |
michael@0 | 43 | , mVideoInitCounter(0) |
michael@0 | 44 | #endif |
michael@0 | 45 | { |
michael@0 | 46 | #ifdef PR_LOGGING |
michael@0 | 47 | if (!gTrackEncoderLog) { |
michael@0 | 48 | gTrackEncoderLog = PR_NewLogModule("TrackEncoder"); |
michael@0 | 49 | } |
michael@0 | 50 | #endif |
michael@0 | 51 | } |
michael@0 | 52 | |
michael@0 | 53 | void |
michael@0 | 54 | AudioTrackEncoder::NotifyQueuedTrackChanges(MediaStreamGraph* aGraph, |
michael@0 | 55 | TrackID aID, |
michael@0 | 56 | TrackRate aTrackRate, |
michael@0 | 57 | TrackTicks aTrackOffset, |
michael@0 | 58 | uint32_t aTrackEvents, |
michael@0 | 59 | const MediaSegment& aQueuedMedia) |
michael@0 | 60 | { |
michael@0 | 61 | if (mCanceled) { |
michael@0 | 62 | return; |
michael@0 | 63 | } |
michael@0 | 64 | |
michael@0 | 65 | const AudioSegment& audio = static_cast<const AudioSegment&>(aQueuedMedia); |
michael@0 | 66 | |
michael@0 | 67 | // Check and initialize parameters for codec encoder. |
michael@0 | 68 | if (!mInitialized) { |
michael@0 | 69 | #ifdef PR_LOGGING |
michael@0 | 70 | mAudioInitCounter++; |
michael@0 | 71 | TRACK_LOG(PR_LOG_DEBUG, ("Init the audio encoder %d times", mAudioInitCounter)); |
michael@0 | 72 | #endif |
michael@0 | 73 | AudioSegment::ChunkIterator iter(const_cast<AudioSegment&>(audio)); |
michael@0 | 74 | while (!iter.IsEnded()) { |
michael@0 | 75 | AudioChunk chunk = *iter; |
michael@0 | 76 | |
michael@0 | 77 | // The number of channels is determined by the first non-null chunk, and |
michael@0 | 78 | // thus the audio encoder is initialized at this time. |
michael@0 | 79 | if (!chunk.IsNull()) { |
michael@0 | 80 | nsresult rv = Init(chunk.mChannelData.Length(), aTrackRate); |
michael@0 | 81 | if (NS_FAILED(rv)) { |
michael@0 | 82 | LOG("[AudioTrackEncoder]: Fail to initialize the encoder!"); |
michael@0 | 83 | NotifyCancel(); |
michael@0 | 84 | } |
michael@0 | 85 | break; |
michael@0 | 86 | } |
michael@0 | 87 | |
michael@0 | 88 | iter.Next(); |
michael@0 | 89 | } |
michael@0 | 90 | } |
michael@0 | 91 | |
michael@0 | 92 | // Append and consume this raw segment. |
michael@0 | 93 | AppendAudioSegment(audio); |
michael@0 | 94 | |
michael@0 | 95 | |
michael@0 | 96 | // The stream has stopped and reached the end of track. |
michael@0 | 97 | if (aTrackEvents == MediaStreamListener::TRACK_EVENT_ENDED) { |
michael@0 | 98 | LOG("[AudioTrackEncoder]: Receive TRACK_EVENT_ENDED ."); |
michael@0 | 99 | NotifyEndOfStream(); |
michael@0 | 100 | } |
michael@0 | 101 | } |
michael@0 | 102 | |
michael@0 | 103 | void |
michael@0 | 104 | AudioTrackEncoder::NotifyEndOfStream() |
michael@0 | 105 | { |
michael@0 | 106 | // If source audio track is completely silent till the end of encoding, |
michael@0 | 107 | // initialize the encoder with default channel counts and sampling rate. |
michael@0 | 108 | if (!mCanceled && !mInitialized) { |
michael@0 | 109 | Init(DEFAULT_CHANNELS, DEFAULT_SAMPLING_RATE); |
michael@0 | 110 | } |
michael@0 | 111 | |
michael@0 | 112 | ReentrantMonitorAutoEnter mon(mReentrantMonitor); |
michael@0 | 113 | mEndOfStream = true; |
michael@0 | 114 | mReentrantMonitor.NotifyAll(); |
michael@0 | 115 | } |
michael@0 | 116 | |
michael@0 | 117 | nsresult |
michael@0 | 118 | AudioTrackEncoder::AppendAudioSegment(const AudioSegment& aSegment) |
michael@0 | 119 | { |
michael@0 | 120 | ReentrantMonitorAutoEnter mon(mReentrantMonitor); |
michael@0 | 121 | |
michael@0 | 122 | AudioSegment::ChunkIterator iter(const_cast<AudioSegment&>(aSegment)); |
michael@0 | 123 | while (!iter.IsEnded()) { |
michael@0 | 124 | AudioChunk chunk = *iter; |
michael@0 | 125 | // Append and consume both non-null and null chunks. |
michael@0 | 126 | mRawSegment.AppendAndConsumeChunk(&chunk); |
michael@0 | 127 | iter.Next(); |
michael@0 | 128 | } |
michael@0 | 129 | |
michael@0 | 130 | if (mRawSegment.GetDuration() >= GetPacketDuration()) { |
michael@0 | 131 | mReentrantMonitor.NotifyAll(); |
michael@0 | 132 | } |
michael@0 | 133 | |
michael@0 | 134 | return NS_OK; |
michael@0 | 135 | } |
michael@0 | 136 | |
michael@0 | 137 | static const int AUDIO_PROCESSING_FRAMES = 640; /* > 10ms of 48KHz audio */ |
michael@0 | 138 | static const uint8_t gZeroChannel[MAX_AUDIO_SAMPLE_SIZE*AUDIO_PROCESSING_FRAMES] = {0}; |
michael@0 | 139 | |
michael@0 | 140 | /*static*/ |
michael@0 | 141 | void |
michael@0 | 142 | AudioTrackEncoder::InterleaveTrackData(AudioChunk& aChunk, |
michael@0 | 143 | int32_t aDuration, |
michael@0 | 144 | uint32_t aOutputChannels, |
michael@0 | 145 | AudioDataValue* aOutput) |
michael@0 | 146 | { |
michael@0 | 147 | if (aChunk.mChannelData.Length() < aOutputChannels) { |
michael@0 | 148 | // Up-mix. This might make the mChannelData have more than aChannels. |
michael@0 | 149 | AudioChannelsUpMix(&aChunk.mChannelData, aOutputChannels, gZeroChannel); |
michael@0 | 150 | } |
michael@0 | 151 | |
michael@0 | 152 | if (aChunk.mChannelData.Length() > aOutputChannels) { |
michael@0 | 153 | DownmixAndInterleave(aChunk.mChannelData, aChunk.mBufferFormat, aDuration, |
michael@0 | 154 | aChunk.mVolume, aOutputChannels, aOutput); |
michael@0 | 155 | } else { |
michael@0 | 156 | InterleaveAndConvertBuffer(aChunk.mChannelData.Elements(), |
michael@0 | 157 | aChunk.mBufferFormat, aDuration, aChunk.mVolume, |
michael@0 | 158 | aOutputChannels, aOutput); |
michael@0 | 159 | } |
michael@0 | 160 | } |
michael@0 | 161 | |
michael@0 | 162 | /*static*/ |
michael@0 | 163 | void |
michael@0 | 164 | AudioTrackEncoder::DeInterleaveTrackData(AudioDataValue* aInput, |
michael@0 | 165 | int32_t aDuration, |
michael@0 | 166 | int32_t aChannels, |
michael@0 | 167 | AudioDataValue* aOutput) |
michael@0 | 168 | { |
michael@0 | 169 | for (int32_t i = 0; i < aChannels; ++i) { |
michael@0 | 170 | for(int32_t j = 0; j < aDuration; ++j) { |
michael@0 | 171 | aOutput[i * aDuration + j] = aInput[i + j * aChannels]; |
michael@0 | 172 | } |
michael@0 | 173 | } |
michael@0 | 174 | } |
michael@0 | 175 | |
michael@0 | 176 | void |
michael@0 | 177 | VideoTrackEncoder::NotifyQueuedTrackChanges(MediaStreamGraph* aGraph, |
michael@0 | 178 | TrackID aID, |
michael@0 | 179 | TrackRate aTrackRate, |
michael@0 | 180 | TrackTicks aTrackOffset, |
michael@0 | 181 | uint32_t aTrackEvents, |
michael@0 | 182 | const MediaSegment& aQueuedMedia) |
michael@0 | 183 | { |
michael@0 | 184 | if (mCanceled) { |
michael@0 | 185 | return; |
michael@0 | 186 | } |
michael@0 | 187 | |
michael@0 | 188 | const VideoSegment& video = static_cast<const VideoSegment&>(aQueuedMedia); |
michael@0 | 189 | |
michael@0 | 190 | // Check and initialize parameters for codec encoder. |
michael@0 | 191 | if (!mInitialized) { |
michael@0 | 192 | #ifdef PR_LOGGING |
michael@0 | 193 | mVideoInitCounter++; |
michael@0 | 194 | TRACK_LOG(PR_LOG_DEBUG, ("Init the video encoder %d times", mVideoInitCounter)); |
michael@0 | 195 | #endif |
michael@0 | 196 | VideoSegment::ChunkIterator iter(const_cast<VideoSegment&>(video)); |
michael@0 | 197 | while (!iter.IsEnded()) { |
michael@0 | 198 | VideoChunk chunk = *iter; |
michael@0 | 199 | if (!chunk.IsNull()) { |
michael@0 | 200 | gfx::IntSize imgsize = chunk.mFrame.GetImage()->GetSize(); |
michael@0 | 201 | gfxIntSize intrinsicSize = chunk.mFrame.GetIntrinsicSize(); |
michael@0 | 202 | nsresult rv = Init(imgsize.width, imgsize.height, |
michael@0 | 203 | intrinsicSize.width, intrinsicSize.height, |
michael@0 | 204 | aTrackRate); |
michael@0 | 205 | if (NS_FAILED(rv)) { |
michael@0 | 206 | LOG("[VideoTrackEncoder]: Fail to initialize the encoder!"); |
michael@0 | 207 | NotifyCancel(); |
michael@0 | 208 | } |
michael@0 | 209 | break; |
michael@0 | 210 | } |
michael@0 | 211 | |
michael@0 | 212 | iter.Next(); |
michael@0 | 213 | } |
michael@0 | 214 | } |
michael@0 | 215 | |
michael@0 | 216 | AppendVideoSegment(video); |
michael@0 | 217 | |
michael@0 | 218 | // The stream has stopped and reached the end of track. |
michael@0 | 219 | if (aTrackEvents == MediaStreamListener::TRACK_EVENT_ENDED) { |
michael@0 | 220 | LOG("[VideoTrackEncoder]: Receive TRACK_EVENT_ENDED ."); |
michael@0 | 221 | NotifyEndOfStream(); |
michael@0 | 222 | } |
michael@0 | 223 | |
michael@0 | 224 | } |
michael@0 | 225 | |
michael@0 | 226 | nsresult |
michael@0 | 227 | VideoTrackEncoder::AppendVideoSegment(const VideoSegment& aSegment) |
michael@0 | 228 | { |
michael@0 | 229 | ReentrantMonitorAutoEnter mon(mReentrantMonitor); |
michael@0 | 230 | |
michael@0 | 231 | // Append all video segments from MediaStreamGraph, including null an |
michael@0 | 232 | // non-null frames. |
michael@0 | 233 | VideoSegment::ChunkIterator iter(const_cast<VideoSegment&>(aSegment)); |
michael@0 | 234 | while (!iter.IsEnded()) { |
michael@0 | 235 | VideoChunk chunk = *iter; |
michael@0 | 236 | nsRefPtr<layers::Image> image = chunk.mFrame.GetImage(); |
michael@0 | 237 | mRawSegment.AppendFrame(image.forget(), chunk.GetDuration(), |
michael@0 | 238 | chunk.mFrame.GetIntrinsicSize().ToIntSize()); |
michael@0 | 239 | iter.Next(); |
michael@0 | 240 | } |
michael@0 | 241 | |
michael@0 | 242 | if (mRawSegment.GetDuration() > 0) { |
michael@0 | 243 | mReentrantMonitor.NotifyAll(); |
michael@0 | 244 | } |
michael@0 | 245 | |
michael@0 | 246 | return NS_OK; |
michael@0 | 247 | } |
michael@0 | 248 | |
michael@0 | 249 | void |
michael@0 | 250 | VideoTrackEncoder::NotifyEndOfStream() |
michael@0 | 251 | { |
michael@0 | 252 | // If source video track is muted till the end of encoding, initialize the |
michael@0 | 253 | // encoder with default frame width, frame height, and track rate. |
michael@0 | 254 | if (!mCanceled && !mInitialized) { |
michael@0 | 255 | Init(DEFAULT_FRAME_WIDTH, DEFAULT_FRAME_HEIGHT, |
michael@0 | 256 | DEFAULT_FRAME_WIDTH, DEFAULT_FRAME_HEIGHT, DEFAULT_TRACK_RATE); |
michael@0 | 257 | } |
michael@0 | 258 | |
michael@0 | 259 | ReentrantMonitorAutoEnter mon(mReentrantMonitor); |
michael@0 | 260 | mEndOfStream = true; |
michael@0 | 261 | mReentrantMonitor.NotifyAll(); |
michael@0 | 262 | } |
michael@0 | 263 | |
michael@0 | 264 | void |
michael@0 | 265 | VideoTrackEncoder::CreateMutedFrame(nsTArray<uint8_t>* aOutputBuffer) |
michael@0 | 266 | { |
michael@0 | 267 | NS_ENSURE_TRUE_VOID(aOutputBuffer); |
michael@0 | 268 | |
michael@0 | 269 | // Supports YUV420 image format only. |
michael@0 | 270 | int yPlaneLen = mFrameWidth * mFrameHeight; |
michael@0 | 271 | int cbcrPlaneLen = yPlaneLen / 2; |
michael@0 | 272 | int frameLen = yPlaneLen + cbcrPlaneLen; |
michael@0 | 273 | |
michael@0 | 274 | aOutputBuffer->SetLength(frameLen); |
michael@0 | 275 | // Fill Y plane. |
michael@0 | 276 | memset(aOutputBuffer->Elements(), 0x10, yPlaneLen); |
michael@0 | 277 | // Fill Cb/Cr planes. |
michael@0 | 278 | memset(aOutputBuffer->Elements() + yPlaneLen, 0x80, cbcrPlaneLen); |
michael@0 | 279 | } |
michael@0 | 280 | |
michael@0 | 281 | } |