1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
|
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#if !defined(PlatformDecoderModule_h_)
# define PlatformDecoderModule_h_
# include "DecoderDoctorLogger.h"
# include "GMPCrashHelper.h"
# include "MediaEventSource.h"
# include "MediaInfo.h"
# include "MediaResult.h"
# include "mozilla/EnumSet.h"
# include "mozilla/EnumTypeTraits.h"
# include "mozilla/MozPromise.h"
# include "mozilla/RefPtr.h"
# include "mozilla/TaskQueue.h"
# include "mozilla/layers/KnowsCompositor.h"
# include "mozilla/layers/LayersTypes.h"
# include "nsTArray.h"
# include <queue>
namespace mozilla {
class TrackInfo;
class AudioInfo;
class VideoInfo;
class MediaRawData;
class DecoderDoctorDiagnostics;
namespace layers {
class ImageContainer;
} // namespace layers
class GpuDecoderModule;
class MediaDataDecoder;
class RemoteDecoderModule;
class TaskQueue;
class CDMProxy;
static LazyLogModule sPDMLog("PlatformDecoderModule");
struct MOZ_STACK_CLASS CreateDecoderParams final {
explicit CreateDecoderParams(const TrackInfo& aConfig) : mConfig(aConfig) {}
enum class Option {
Default,
LowLatency,
HardwareDecoderNotAllowed,
FullH264Parsing,
ErrorIfNoInitializationData, // By default frames delivered before
// initialization data are dropped. Pass this
// option to raise an error if frames are
// delivered before initialization data.
DefaultPlaybackDeviceMono, // Currently only used by Opus on RDD to avoid
// initialization of audio backends on RDD
SENTINEL // one past the last valid value
};
using OptionSet = EnumSet<Option>;
struct UseNullDecoder {
UseNullDecoder() = default;
explicit UseNullDecoder(bool aUseNullDecoder) : mUse(aUseNullDecoder) {}
bool mUse = false;
};
// Do not wrap H264 decoder in a H264Converter.
struct NoWrapper {
NoWrapper() = default;
explicit NoWrapper(bool aDontUseWrapper)
: mDontUseWrapper(aDontUseWrapper) {}
bool mDontUseWrapper = false;
};
struct VideoFrameRate {
VideoFrameRate() = default;
explicit VideoFrameRate(float aFramerate) : mValue(aFramerate) {}
float mValue = 0.0f;
};
template <typename T1, typename... Ts>
CreateDecoderParams(const TrackInfo& aConfig, T1&& a1, Ts&&... args)
: mConfig(aConfig) {
Set(std::forward<T1>(a1), std::forward<Ts>(args)...);
}
const VideoInfo& VideoConfig() const {
MOZ_ASSERT(mConfig.IsVideo());
return *mConfig.GetAsVideoInfo();
}
const AudioInfo& AudioConfig() const {
MOZ_ASSERT(mConfig.IsAudio());
return *mConfig.GetAsAudioInfo();
}
layers::LayersBackend GetLayersBackend() const {
if (mKnowsCompositor) {
return mKnowsCompositor->GetCompositorBackendType();
}
return layers::LayersBackend::LAYERS_NONE;
}
const TrackInfo& mConfig;
TaskQueue* mTaskQueue = nullptr;
DecoderDoctorDiagnostics* mDiagnostics = nullptr;
layers::ImageContainer* mImageContainer = nullptr;
MediaResult* mError = nullptr;
RefPtr<layers::KnowsCompositor> mKnowsCompositor;
RefPtr<GMPCrashHelper> mCrashHelper;
UseNullDecoder mUseNullDecoder;
NoWrapper mNoWrapper;
TrackInfo::TrackType mType = TrackInfo::kUndefinedTrack;
MediaEventProducer<TrackInfo::TrackType>* mOnWaitingForKeyEvent = nullptr;
OptionSet mOptions = OptionSet(Option::Default);
VideoFrameRate mRate;
private:
void Set(TaskQueue* aTaskQueue) { mTaskQueue = aTaskQueue; }
void Set(DecoderDoctorDiagnostics* aDiagnostics) {
mDiagnostics = aDiagnostics;
}
void Set(layers::ImageContainer* aImageContainer) {
mImageContainer = aImageContainer;
}
void Set(MediaResult* aError) { mError = aError; }
void Set(GMPCrashHelper* aCrashHelper) { mCrashHelper = aCrashHelper; }
void Set(UseNullDecoder aUseNullDecoder) {
mUseNullDecoder = aUseNullDecoder;
}
void Set(NoWrapper aNoWrapper) { mNoWrapper = aNoWrapper; }
void Set(OptionSet aOptions) { mOptions = aOptions; }
void Set(VideoFrameRate aRate) { mRate = aRate; }
void Set(layers::KnowsCompositor* aKnowsCompositor) {
if (aKnowsCompositor) {
mKnowsCompositor = aKnowsCompositor;
MOZ_ASSERT(aKnowsCompositor->IsThreadSafe());
}
}
void Set(TrackInfo::TrackType aType) { mType = aType; }
void Set(MediaEventProducer<TrackInfo::TrackType>* aOnWaitingForKey) {
mOnWaitingForKeyEvent = aOnWaitingForKey;
}
template <typename T1, typename T2, typename... Ts>
void Set(T1&& a1, T2&& a2, Ts&&... args) {
Set(std::forward<T1>(a1));
Set(std::forward<T2>(a2), std::forward<Ts>(args)...);
}
};
// Used for IPDL serialization.
// The 'value' have to be the biggest enum from CreateDecoderParams::Option.
template <>
struct MaxEnumValue<::mozilla::CreateDecoderParams::Option> {
static constexpr unsigned int value =
static_cast<unsigned int>(CreateDecoderParams::Option::SENTINEL);
};
// The PlatformDecoderModule interface is used by the MediaFormatReader to
// abstract access to decoders provided by various
// platforms.
// Each platform (Windows, MacOSX, Linux, B2G etc) must implement a
// PlatformDecoderModule to provide access to its decoders in order to get
// decompressed H.264/AAC from the MediaFormatReader.
//
// Decoding is asynchronous, and should be performed on the task queue
// provided if the underlying platform isn't already exposing an async API.
//
// A cross-platform decoder module that discards input and produces "blank"
// output samples exists for testing, and is created when the pref
// "media.use-blank-decoder" is true.
class PlatformDecoderModule {
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(PlatformDecoderModule)
// Perform any per-instance initialization.
// This is called on the decode task queue.
virtual nsresult Startup() { return NS_OK; }
// Indicates if the PlatformDecoderModule supports decoding of aMimeType.
virtual bool SupportsMimeType(
const nsACString& aMimeType,
DecoderDoctorDiagnostics* aDiagnostics) const = 0;
virtual bool Supports(const TrackInfo& aTrackInfo,
DecoderDoctorDiagnostics* aDiagnostics) const {
if (!SupportsMimeType(aTrackInfo.mMimeType, aDiagnostics)) {
return false;
}
const auto videoInfo = aTrackInfo.GetAsVideoInfo();
return !videoInfo ||
SupportsColorDepth(videoInfo->mColorDepth, aDiagnostics);
}
protected:
PlatformDecoderModule() {}
virtual ~PlatformDecoderModule() {}
friend class MediaChangeMonitor;
friend class PDMFactory;
friend class GpuDecoderModule;
friend class EMEDecoderModule;
friend class RemoteDecoderModule;
// Indicates if the PlatformDecoderModule supports decoding of aColorDepth.
// Should override this method when the platform can support color depth != 8.
virtual bool SupportsColorDepth(
gfx::ColorDepth aColorDepth,
DecoderDoctorDiagnostics* aDiagnostics) const {
return aColorDepth == gfx::ColorDepth::COLOR_8;
}
// Creates a Video decoder. The layers backend is passed in so that
// decoders can determine whether hardware accelerated decoding can be used.
// Asynchronous decoding of video should be done in runnables dispatched
// to aVideoTaskQueue. If the task queue isn't needed, the decoder should
// not hold a reference to it.
// On Windows the task queue's threads in have MSCOM initialized with
// COINIT_MULTITHREADED.
// Returns nullptr if the decoder can't be created.
// It is safe to store a reference to aConfig.
// This is called on the decode task queue.
virtual already_AddRefed<MediaDataDecoder> CreateVideoDecoder(
const CreateDecoderParams& aParams) = 0;
// Creates an Audio decoder with the specified properties.
// Asynchronous decoding of audio should be done in runnables dispatched to
// aAudioTaskQueue. If the task queue isn't needed, the decoder should
// not hold a reference to it.
// Returns nullptr if the decoder can't be created.
// On Windows the task queue's threads in have MSCOM initialized with
// COINIT_MULTITHREADED.
// It is safe to store a reference to aConfig.
// This is called on the decode task queue.
virtual already_AddRefed<MediaDataDecoder> CreateAudioDecoder(
const CreateDecoderParams& aParams) = 0;
};
DDLoggedTypeDeclName(MediaDataDecoder);
// MediaDataDecoder is the interface exposed by decoders created by the
// PlatformDecoderModule's Create*Decoder() functions. The type of
// media data that the decoder accepts as valid input and produces as
// output is determined when the MediaDataDecoder is created.
//
// Unless otherwise noted, all functions are only called on the decode task
// queue. An exception is the MediaDataDecoder in
// MediaFormatReader::IsVideoAccelerated() for which all calls (Init(),
// IsHardwareAccelerated(), and Shutdown()) are from the main thread.
//
// Don't block inside these functions, unless it's explicitly noted that you
// should (like in Flush()).
//
// Decoding is done asynchronously. Any async work can be done on the
// TaskQueue passed into the PlatformDecoderModules's Create*Decoder()
// function. This may not be necessary for platforms with async APIs
// for decoding.
class MediaDataDecoder : public DecoderDoctorLifeLogger<MediaDataDecoder> {
protected:
virtual ~MediaDataDecoder() {}
public:
typedef TrackInfo::TrackType TrackType;
typedef nsTArray<RefPtr<MediaData>> DecodedData;
typedef MozPromise<TrackType, MediaResult, /* IsExclusive = */ true>
InitPromise;
typedef MozPromise<DecodedData, MediaResult, /* IsExclusive = */ true>
DecodePromise;
typedef MozPromise<bool, MediaResult, /* IsExclusive = */ true> FlushPromise;
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaDataDecoder)
// Initialize the decoder. The decoder should be ready to decode once
// promise resolves. The decoder should do any initialization here, rather
// than in its constructor or PlatformDecoderModule::Create*Decoder(),
// so that if the MediaFormatReader needs to shutdown during initialization,
// it can call Shutdown() to cancel this operation. Any initialization
// that requires blocking the calling thread in this function *must*
// be done here so that it can be canceled by calling Shutdown()!
virtual RefPtr<InitPromise> Init() = 0;
// Inserts a sample into the decoder's decode pipeline. The DecodePromise will
// be resolved with the decoded MediaData. In case the decoder needs more
// input, the DecodePromise may be resolved with an empty array of samples to
// indicate that Decode should be called again before a MediaData is returned.
virtual RefPtr<DecodePromise> Decode(MediaRawData* aSample) = 0;
// Causes all complete samples in the pipeline that can be decoded to be
// output. If the decoder can't produce samples from the current output,
// it drops the input samples. The decoder may be holding onto samples
// that are required to decode samples that it expects to get in future.
// This is called when the demuxer reaches end of stream.
// This function is asynchronous.
// The MediaDataDecoder shall resolve the pending DecodePromise with drained
// samples. Drain will be called multiple times until the resolved
// DecodePromise is empty which indicates that there are no more samples to
// drain.
virtual RefPtr<DecodePromise> Drain() = 0;
// Causes all samples in the decoding pipeline to be discarded. When this
// promise resolves, the decoder must be ready to accept new data for
// decoding. This function is called when the demuxer seeks, before decoding
// resumes after the seek. The current DecodePromise if any shall be rejected
// with NS_ERROR_DOM_MEDIA_CANCELED
virtual RefPtr<FlushPromise> Flush() = 0;
// Cancels all init/decode/drain operations, and shuts down the decoder. The
// platform decoder should clean up any resources it's using and release
// memory etc. The shutdown promise will be resolved once the decoder has
// completed shutdown. The reader calls Flush() before calling Shutdown(). The
// reader will delete the decoder once the promise is resolved.
// The ShutdownPromise must only ever be resolved.
virtual RefPtr<ShutdownPromise> Shutdown() = 0;
// Called from the state machine task queue or main thread. Decoder needs to
// decide whether or not hardware acceleration is supported after creating.
// It doesn't need to call Init() before calling this function.
virtual bool IsHardwareAccelerated(nsACString& aFailureReason) const {
return false;
}
// Return the name of the MediaDataDecoder, only used for decoding.
// May be accessed in a non thread-safe fashion.
virtual nsCString GetDescriptionName() const = 0;
// Set a hint of seek target time to decoder. Decoder will drop any decoded
// data which pts is smaller than this value. This threshold needs to be clear
// after reset decoder. To clear it explicitly, call this method with
// TimeUnit::Invalid().
// Decoder may not honor this value. However, it'd be better that
// video decoder implements this API to improve seek performance.
// Note: it should be called before Input() or after Flush().
virtual void SetSeekThreshold(const media::TimeUnit& aTime) {}
// When playing adaptive playback, recreating an Android video decoder will
// cause the transition not smooth during resolution change.
// Reuse the decoder if the decoder support recycling.
// Currently, only Android video decoder will return true.
virtual bool SupportDecoderRecycling() const { return false; }
enum class ConversionRequired {
kNeedNone = 0,
kNeedAVCC = 1,
kNeedAnnexB = 2,
};
// Indicates that the decoder requires a specific format.
// The demuxed data will be converted accordingly before feeding it to
// Decode().
virtual ConversionRequired NeedsConversion() const {
return ConversionRequired::kNeedNone;
}
};
} // namespace mozilla
#endif
|