You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
57046 lines
1.7 MiB
57046 lines
1.7 MiB
/** |
|
* @license |
|
* Video.js 7.5.5 <http://videojs.com/> |
|
* Copyright Brightcove, Inc. <https://www.brightcove.com/> |
|
* Available under Apache License Version 2.0 |
|
* <https://github.com/videojs/video.js/blob/master/LICENSE> |
|
* |
|
* Includes vtt.js <https://github.com/mozilla/vtt.js> |
|
* Available under Apache License Version 2.0 |
|
* <https://github.com/mozilla/vtt.js/blob/master/LICENSE> |
|
*/ |
|
|
|
(function (global, factory) { |
|
typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory(require('global/window'), require('global/document')) : |
|
typeof define === 'function' && define.amd ? define(['global/window', 'global/document'], factory) : |
|
(global = global || self, global.videojs = factory(global.window, global.document)); |
|
}(this, function (window$1, document) { |
|
window$1 = window$1 && window$1.hasOwnProperty('default') ? window$1['default'] : window$1; |
|
document = document && document.hasOwnProperty('default') ? document['default'] : document; |
|
|
|
var version = "7.5.5"; |
|
|
|
function _inheritsLoose(subClass, superClass) { |
|
subClass.prototype = Object.create(superClass.prototype); |
|
subClass.prototype.constructor = subClass; |
|
subClass.__proto__ = superClass; |
|
} |
|
|
|
function _setPrototypeOf(o, p) { |
|
_setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { |
|
o.__proto__ = p; |
|
return o; |
|
}; |
|
|
|
return _setPrototypeOf(o, p); |
|
} |
|
|
|
function isNativeReflectConstruct() { |
|
if (typeof Reflect === "undefined" || !Reflect.construct) return false; |
|
if (Reflect.construct.sham) return false; |
|
if (typeof Proxy === "function") return true; |
|
|
|
try { |
|
Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); |
|
return true; |
|
} catch (e) { |
|
return false; |
|
} |
|
} |
|
|
|
function _construct(Parent, args, Class) { |
|
if (isNativeReflectConstruct()) { |
|
_construct = Reflect.construct; |
|
} else { |
|
_construct = function _construct(Parent, args, Class) { |
|
var a = [null]; |
|
a.push.apply(a, args); |
|
var Constructor = Function.bind.apply(Parent, a); |
|
var instance = new Constructor(); |
|
if (Class) _setPrototypeOf(instance, Class.prototype); |
|
return instance; |
|
}; |
|
} |
|
|
|
return _construct.apply(null, arguments); |
|
} |
|
|
|
function _assertThisInitialized(self) { |
|
if (self === void 0) { |
|
throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); |
|
} |
|
|
|
return self; |
|
} |
|
|
|
function _taggedTemplateLiteralLoose(strings, raw) { |
|
if (!raw) { |
|
raw = strings.slice(0); |
|
} |
|
|
|
strings.raw = raw; |
|
return strings; |
|
} |
|
|
|
/** |
|
* @file create-logger.js |
|
* @module create-logger |
|
*/ |
|
|
|
var history = []; |
|
/** |
|
* Log messages to the console and history based on the type of message |
|
* |
|
* @private |
|
* @param {string} type |
|
* The name of the console method to use. |
|
* |
|
* @param {Array} args |
|
* The arguments to be passed to the matching console method. |
|
*/ |
|
|
|
var LogByTypeFactory = function LogByTypeFactory(name, log) { |
|
return function (type, level, args) { |
|
var lvl = log.levels[level]; |
|
var lvlRegExp = new RegExp("^(" + lvl + ")$"); |
|
|
|
if (type !== 'log') { |
|
// Add the type to the front of the message when it's not "log". |
|
args.unshift(type.toUpperCase() + ':'); |
|
} // Add console prefix after adding to history. |
|
|
|
|
|
args.unshift(name + ':'); // Add a clone of the args at this point to history. |
|
|
|
if (history) { |
|
history.push([].concat(args)); |
|
} // If there's no console then don't try to output messages, but they will |
|
// still be stored in history. |
|
|
|
|
|
if (!window$1.console) { |
|
return; |
|
} // Was setting these once outside of this function, but containing them |
|
// in the function makes it easier to test cases where console doesn't exist |
|
// when the module is executed. |
|
|
|
|
|
var fn = window$1.console[type]; |
|
|
|
if (!fn && type === 'debug') { |
|
// Certain browsers don't have support for console.debug. For those, we |
|
// should default to the closest comparable log. |
|
fn = window$1.console.info || window$1.console.log; |
|
} // Bail out if there's no console or if this type is not allowed by the |
|
// current logging level. |
|
|
|
|
|
if (!fn || !lvl || !lvlRegExp.test(type)) { |
|
return; |
|
} |
|
|
|
fn[Array.isArray(args) ? 'apply' : 'call'](window$1.console, args); |
|
}; |
|
}; |
|
|
|
function createLogger(name) { |
|
// This is the private tracking variable for logging level. |
|
var level = 'info'; // the curried logByType bound to the specific log and history |
|
|
|
var logByType; |
|
/** |
|
* Logs plain debug messages. Similar to `console.log`. |
|
* |
|
* Due to [limitations](https://github.com/jsdoc3/jsdoc/issues/955#issuecomment-313829149) |
|
* of our JSDoc template, we cannot properly document this as both a function |
|
* and a namespace, so its function signature is documented here. |
|
* |
|
* #### Arguments |
|
* ##### *args |
|
* Mixed[] |
|
* |
|
* Any combination of values that could be passed to `console.log()`. |
|
* |
|
* #### Return Value |
|
* |
|
* `undefined` |
|
* |
|
* @namespace |
|
* @param {Mixed[]} args |
|
* One or more messages or objects that should be logged. |
|
*/ |
|
|
|
var log = function log() { |
|
for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) { |
|
args[_key] = arguments[_key]; |
|
} |
|
|
|
logByType('log', level, args); |
|
}; // This is the logByType helper that the logging methods below use |
|
|
|
|
|
logByType = LogByTypeFactory(name, log); |
|
/** |
|
* Create a new sublogger which chains the old name to the new name. |
|
* |
|
* For example, doing `videojs.log.createLogger('player')` and then using that logger will log the following: |
|
* ```js |
|
* mylogger('foo'); |
|
* // > VIDEOJS: player: foo |
|
* ``` |
|
* |
|
* @param {string} name |
|
* The name to add call the new logger |
|
* @return {Object} |
|
*/ |
|
|
|
log.createLogger = function (subname) { |
|
return createLogger(name + ': ' + subname); |
|
}; |
|
/** |
|
* Enumeration of available logging levels, where the keys are the level names |
|
* and the values are `|`-separated strings containing logging methods allowed |
|
* in that logging level. These strings are used to create a regular expression |
|
* matching the function name being called. |
|
* |
|
* Levels provided by Video.js are: |
|
* |
|
* - `off`: Matches no calls. Any value that can be cast to `false` will have |
|
* this effect. The most restrictive. |
|
* - `all`: Matches only Video.js-provided functions (`debug`, `log`, |
|
* `log.warn`, and `log.error`). |
|
* - `debug`: Matches `log.debug`, `log`, `log.warn`, and `log.error` calls. |
|
* - `info` (default): Matches `log`, `log.warn`, and `log.error` calls. |
|
* - `warn`: Matches `log.warn` and `log.error` calls. |
|
* - `error`: Matches only `log.error` calls. |
|
* |
|
* @type {Object} |
|
*/ |
|
|
|
|
|
log.levels = { |
|
all: 'debug|log|warn|error', |
|
off: '', |
|
debug: 'debug|log|warn|error', |
|
info: 'log|warn|error', |
|
warn: 'warn|error', |
|
error: 'error', |
|
DEFAULT: level |
|
}; |
|
/** |
|
* Get or set the current logging level. |
|
* |
|
* If a string matching a key from {@link module:log.levels} is provided, acts |
|
* as a setter. |
|
* |
|
* @param {string} [lvl] |
|
* Pass a valid level to set a new logging level. |
|
* |
|
* @return {string} |
|
* The current logging level. |
|
*/ |
|
|
|
log.level = function (lvl) { |
|
if (typeof lvl === 'string') { |
|
if (!log.levels.hasOwnProperty(lvl)) { |
|
throw new Error("\"" + lvl + "\" in not a valid log level"); |
|
} |
|
|
|
level = lvl; |
|
} |
|
|
|
return level; |
|
}; |
|
/** |
|
* Returns an array containing everything that has been logged to the history. |
|
* |
|
* This array is a shallow clone of the internal history record. However, its |
|
* contents are _not_ cloned; so, mutating objects inside this array will |
|
* mutate them in history. |
|
* |
|
* @return {Array} |
|
*/ |
|
|
|
|
|
log.history = function () { |
|
return history ? [].concat(history) : []; |
|
}; |
|
/** |
|
* Allows you to filter the history by the given logger name |
|
* |
|
* @param {string} fname |
|
* The name to filter by |
|
* |
|
* @return {Array} |
|
* The filtered list to return |
|
*/ |
|
|
|
|
|
log.history.filter = function (fname) { |
|
return (history || []).filter(function (historyItem) { |
|
// if the first item in each historyItem includes `fname`, then it's a match |
|
return new RegExp(".*" + fname + ".*").test(historyItem[0]); |
|
}); |
|
}; |
|
/** |
|
* Clears the internal history tracking, but does not prevent further history |
|
* tracking. |
|
*/ |
|
|
|
|
|
log.history.clear = function () { |
|
if (history) { |
|
history.length = 0; |
|
} |
|
}; |
|
/** |
|
* Disable history tracking if it is currently enabled. |
|
*/ |
|
|
|
|
|
log.history.disable = function () { |
|
if (history !== null) { |
|
history.length = 0; |
|
history = null; |
|
} |
|
}; |
|
/** |
|
* Enable history tracking if it is currently disabled. |
|
*/ |
|
|
|
|
|
log.history.enable = function () { |
|
if (history === null) { |
|
history = []; |
|
} |
|
}; |
|
/** |
|
* Logs error messages. Similar to `console.error`. |
|
* |
|
* @param {Mixed[]} args |
|
* One or more messages or objects that should be logged as an error |
|
*/ |
|
|
|
|
|
log.error = function () { |
|
for (var _len2 = arguments.length, args = new Array(_len2), _key2 = 0; _key2 < _len2; _key2++) { |
|
args[_key2] = arguments[_key2]; |
|
} |
|
|
|
return logByType('error', level, args); |
|
}; |
|
/** |
|
* Logs warning messages. Similar to `console.warn`. |
|
* |
|
* @param {Mixed[]} args |
|
* One or more messages or objects that should be logged as a warning. |
|
*/ |
|
|
|
|
|
log.warn = function () { |
|
for (var _len3 = arguments.length, args = new Array(_len3), _key3 = 0; _key3 < _len3; _key3++) { |
|
args[_key3] = arguments[_key3]; |
|
} |
|
|
|
return logByType('warn', level, args); |
|
}; |
|
/** |
|
* Logs debug messages. Similar to `console.debug`, but may also act as a comparable |
|
* log if `console.debug` is not available |
|
* |
|
* @param {Mixed[]} args |
|
* One or more messages or objects that should be logged as debug. |
|
*/ |
|
|
|
|
|
log.debug = function () { |
|
for (var _len4 = arguments.length, args = new Array(_len4), _key4 = 0; _key4 < _len4; _key4++) { |
|
args[_key4] = arguments[_key4]; |
|
} |
|
|
|
return logByType('debug', level, args); |
|
}; |
|
|
|
return log; |
|
} |
|
|
|
/** |
|
* @file log.js |
|
* @module log |
|
*/ |
|
var log = createLogger('VIDEOJS'); |
|
var createLogger$1 = log.createLogger; |
|
|
|
function clean(s) { |
|
return s.replace(/\n\r?\s*/g, ''); |
|
} |
|
|
|
var tsml = function tsml(sa) { |
|
var s = '', |
|
i = 0; |
|
|
|
for (; i < arguments.length; i++) { |
|
s += clean(sa[i]) + (arguments[i + 1] || ''); |
|
} |
|
|
|
return s; |
|
}; |
|
|
|
/** |
|
* @file obj.js |
|
* @module obj |
|
*/ |
|
|
|
/** |
|
* @callback obj:EachCallback |
|
* |
|
* @param {Mixed} value |
|
* The current key for the object that is being iterated over. |
|
* |
|
* @param {string} key |
|
* The current key-value for object that is being iterated over |
|
*/ |
|
|
|
/** |
|
* @callback obj:ReduceCallback |
|
* |
|
* @param {Mixed} accum |
|
* The value that is accumulating over the reduce loop. |
|
* |
|
* @param {Mixed} value |
|
* The current key for the object that is being iterated over. |
|
* |
|
* @param {string} key |
|
* The current key-value for object that is being iterated over |
|
* |
|
* @return {Mixed} |
|
* The new accumulated value. |
|
*/ |
|
var toString = Object.prototype.toString; |
|
/** |
|
* Get the keys of an Object |
|
* |
|
* @param {Object} |
|
* The Object to get the keys from |
|
* |
|
* @return {string[]} |
|
* An array of the keys from the object. Returns an empty array if the |
|
* object passed in was invalid or had no keys. |
|
* |
|
* @private |
|
*/ |
|
|
|
var keys = function keys(object) { |
|
return isObject(object) ? Object.keys(object) : []; |
|
}; |
|
/** |
|
* Array-like iteration for objects. |
|
* |
|
* @param {Object} object |
|
* The object to iterate over |
|
* |
|
* @param {obj:EachCallback} fn |
|
* The callback function which is called for each key in the object. |
|
*/ |
|
|
|
|
|
function each(object, fn) { |
|
keys(object).forEach(function (key) { |
|
return fn(object[key], key); |
|
}); |
|
} |
|
/** |
|
* Array-like reduce for objects. |
|
* |
|
* @param {Object} object |
|
* The Object that you want to reduce. |
|
* |
|
* @param {Function} fn |
|
* A callback function which is called for each key in the object. It |
|
* receives the accumulated value and the per-iteration value and key |
|
* as arguments. |
|
* |
|
* @param {Mixed} [initial = 0] |
|
* Starting value |
|
* |
|
* @return {Mixed} |
|
* The final accumulated value. |
|
*/ |
|
|
|
function reduce(object, fn, initial) { |
|
if (initial === void 0) { |
|
initial = 0; |
|
} |
|
|
|
return keys(object).reduce(function (accum, key) { |
|
return fn(accum, object[key], key); |
|
}, initial); |
|
} |
|
/** |
|
* Object.assign-style object shallow merge/extend. |
|
* |
|
* @param {Object} target |
|
* @param {Object} ...sources |
|
* @return {Object} |
|
*/ |
|
|
|
function assign(target) { |
|
for (var _len = arguments.length, sources = new Array(_len > 1 ? _len - 1 : 0), _key = 1; _key < _len; _key++) { |
|
sources[_key - 1] = arguments[_key]; |
|
} |
|
|
|
if (Object.assign) { |
|
return Object.assign.apply(Object, [target].concat(sources)); |
|
} |
|
|
|
sources.forEach(function (source) { |
|
if (!source) { |
|
return; |
|
} |
|
|
|
each(source, function (value, key) { |
|
target[key] = value; |
|
}); |
|
}); |
|
return target; |
|
} |
|
/** |
|
* Returns whether a value is an object of any kind - including DOM nodes, |
|
* arrays, regular expressions, etc. Not functions, though. |
|
* |
|
* This avoids the gotcha where using `typeof` on a `null` value |
|
* results in `'object'`. |
|
* |
|
* @param {Object} value |
|
* @return {boolean} |
|
*/ |
|
|
|
function isObject(value) { |
|
return !!value && typeof value === 'object'; |
|
} |
|
/** |
|
* Returns whether an object appears to be a "plain" object - that is, a |
|
* direct instance of `Object`. |
|
* |
|
* @param {Object} value |
|
* @return {boolean} |
|
*/ |
|
|
|
function isPlain(value) { |
|
return isObject(value) && toString.call(value) === '[object Object]' && value.constructor === Object; |
|
} |
|
|
|
/** |
|
* @file computed-style.js |
|
* @module computed-style |
|
*/ |
|
/** |
|
* A safe getComputedStyle. |
|
* |
|
* This is needed because in Firefox, if the player is loaded in an iframe with |
|
* `display:none`, then `getComputedStyle` returns `null`, so, we do a |
|
* null-check to make sure that the player doesn't break in these cases. |
|
* |
|
* @function |
|
* @param {Element} el |
|
* The element you want the computed style of |
|
* |
|
* @param {string} prop |
|
* The property name you want |
|
* |
|
* @see https://bugzilla.mozilla.org/show_bug.cgi?id=548397 |
|
*/ |
|
|
|
function computedStyle(el, prop) { |
|
if (!el || !prop) { |
|
return ''; |
|
} |
|
|
|
if (typeof window$1.getComputedStyle === 'function') { |
|
var cs = window$1.getComputedStyle(el); |
|
return cs ? cs[prop] : ''; |
|
} |
|
|
|
return ''; |
|
} |
|
|
|
function _templateObject() { |
|
var data = _taggedTemplateLiteralLoose(["Setting attributes in the second argument of createEl()\n has been deprecated. Use the third argument instead.\n createEl(type, properties, attributes). Attempting to set ", " to ", "."]); |
|
|
|
_templateObject = function _templateObject() { |
|
return data; |
|
}; |
|
|
|
return data; |
|
} |
|
/** |
|
* Detect if a value is a string with any non-whitespace characters. |
|
* |
|
* @private |
|
* @param {string} str |
|
* The string to check |
|
* |
|
* @return {boolean} |
|
* Will be `true` if the string is non-blank, `false` otherwise. |
|
* |
|
*/ |
|
|
|
function isNonBlankString(str) { |
|
return typeof str === 'string' && /\S/.test(str); |
|
} |
|
/** |
|
* Throws an error if the passed string has whitespace. This is used by |
|
* class methods to be relatively consistent with the classList API. |
|
* |
|
* @private |
|
* @param {string} str |
|
* The string to check for whitespace. |
|
* |
|
* @throws {Error} |
|
* Throws an error if there is whitespace in the string. |
|
*/ |
|
|
|
|
|
function throwIfWhitespace(str) { |
|
if (/\s/.test(str)) { |
|
throw new Error('class has illegal whitespace characters'); |
|
} |
|
} |
|
/** |
|
* Produce a regular expression for matching a className within an elements className. |
|
* |
|
* @private |
|
* @param {string} className |
|
* The className to generate the RegExp for. |
|
* |
|
* @return {RegExp} |
|
* The RegExp that will check for a specific `className` in an elements |
|
* className. |
|
*/ |
|
|
|
|
|
function classRegExp(className) { |
|
return new RegExp('(^|\\s)' + className + '($|\\s)'); |
|
} |
|
/** |
|
* Whether the current DOM interface appears to be real (i.e. not simulated). |
|
* |
|
* @return {boolean} |
|
* Will be `true` if the DOM appears to be real, `false` otherwise. |
|
*/ |
|
|
|
|
|
function isReal() { |
|
// Both document and window will never be undefined thanks to `global`. |
|
return document === window$1.document; |
|
} |
|
/** |
|
* Determines, via duck typing, whether or not a value is a DOM element. |
|
* |
|
* @param {Mixed} value |
|
* The value to check. |
|
* |
|
* @return {boolean} |
|
* Will be `true` if the value is a DOM element, `false` otherwise. |
|
*/ |
|
|
|
function isEl(value) { |
|
return isObject(value) && value.nodeType === 1; |
|
} |
|
/** |
|
* Determines if the current DOM is embedded in an iframe. |
|
* |
|
* @return {boolean} |
|
* Will be `true` if the DOM is embedded in an iframe, `false` |
|
* otherwise. |
|
*/ |
|
|
|
function isInFrame() { |
|
// We need a try/catch here because Safari will throw errors when attempting |
|
// to get either `parent` or `self` |
|
try { |
|
return window$1.parent !== window$1.self; |
|
} catch (x) { |
|
return true; |
|
} |
|
} |
|
/** |
|
* Creates functions to query the DOM using a given method. |
|
* |
|
* @private |
|
* @param {string} method |
|
* The method to create the query with. |
|
* |
|
* @return {Function} |
|
* The query method |
|
*/ |
|
|
|
function createQuerier(method) { |
|
return function (selector, context) { |
|
if (!isNonBlankString(selector)) { |
|
return document[method](null); |
|
} |
|
|
|
if (isNonBlankString(context)) { |
|
context = document.querySelector(context); |
|
} |
|
|
|
var ctx = isEl(context) ? context : document; |
|
return ctx[method] && ctx[method](selector); |
|
}; |
|
} |
|
/** |
|
* Creates an element and applies properties, attributes, and inserts content. |
|
* |
|
* @param {string} [tagName='div'] |
|
* Name of tag to be created. |
|
* |
|
* @param {Object} [properties={}] |
|
* Element properties to be applied. |
|
* |
|
* @param {Object} [attributes={}] |
|
* Element attributes to be applied. |
|
* |
|
* @param {module:dom~ContentDescriptor} content |
|
* A content descriptor object. |
|
* |
|
* @return {Element} |
|
* The element that was created. |
|
*/ |
|
|
|
|
|
function createEl(tagName, properties, attributes, content) { |
|
if (tagName === void 0) { |
|
tagName = 'div'; |
|
} |
|
|
|
if (properties === void 0) { |
|
properties = {}; |
|
} |
|
|
|
if (attributes === void 0) { |
|
attributes = {}; |
|
} |
|
|
|
var el = document.createElement(tagName); |
|
Object.getOwnPropertyNames(properties).forEach(function (propName) { |
|
var val = properties[propName]; // See #2176 |
|
// We originally were accepting both properties and attributes in the |
|
// same object, but that doesn't work so well. |
|
|
|
if (propName.indexOf('aria-') !== -1 || propName === 'role' || propName === 'type') { |
|
log.warn(tsml(_templateObject(), propName, val)); |
|
el.setAttribute(propName, val); // Handle textContent since it's not supported everywhere and we have a |
|
// method for it. |
|
} else if (propName === 'textContent') { |
|
textContent(el, val); |
|
} else { |
|
el[propName] = val; |
|
} |
|
}); |
|
Object.getOwnPropertyNames(attributes).forEach(function (attrName) { |
|
el.setAttribute(attrName, attributes[attrName]); |
|
}); |
|
|
|
if (content) { |
|
appendContent(el, content); |
|
} |
|
|
|
return el; |
|
} |
|
/** |
|
* Injects text into an element, replacing any existing contents entirely. |
|
* |
|
* @param {Element} el |
|
* The element to add text content into |
|
* |
|
* @param {string} text |
|
* The text content to add. |
|
* |
|
* @return {Element} |
|
* The element with added text content. |
|
*/ |
|
|
|
function textContent(el, text) { |
|
if (typeof el.textContent === 'undefined') { |
|
el.innerText = text; |
|
} else { |
|
el.textContent = text; |
|
} |
|
|
|
return el; |
|
} |
|
/** |
|
* Insert an element as the first child node of another |
|
* |
|
* @param {Element} child |
|
* Element to insert |
|
* |
|
* @param {Element} parent |
|
* Element to insert child into |
|
*/ |
|
|
|
function prependTo(child, parent) { |
|
if (parent.firstChild) { |
|
parent.insertBefore(child, parent.firstChild); |
|
} else { |
|
parent.appendChild(child); |
|
} |
|
} |
|
/** |
|
* Check if an element has a class name. |
|
* |
|
* @param {Element} element |
|
* Element to check |
|
* |
|
* @param {string} classToCheck |
|
* Class name to check for |
|
* |
|
* @return {boolean} |
|
* Will be `true` if the element has a class, `false` otherwise. |
|
* |
|
* @throws {Error} |
|
* Throws an error if `classToCheck` has white space. |
|
*/ |
|
|
|
function hasClass(element, classToCheck) { |
|
throwIfWhitespace(classToCheck); |
|
|
|
if (element.classList) { |
|
return element.classList.contains(classToCheck); |
|
} |
|
|
|
return classRegExp(classToCheck).test(element.className); |
|
} |
|
/** |
|
* Add a class name to an element. |
|
* |
|
* @param {Element} element |
|
* Element to add class name to. |
|
* |
|
* @param {string} classToAdd |
|
* Class name to add. |
|
* |
|
* @return {Element} |
|
* The DOM element with the added class name. |
|
*/ |
|
|
|
function addClass(element, classToAdd) { |
|
if (element.classList) { |
|
element.classList.add(classToAdd); // Don't need to `throwIfWhitespace` here because `hasElClass` will do it |
|
// in the case of classList not being supported. |
|
} else if (!hasClass(element, classToAdd)) { |
|
element.className = (element.className + ' ' + classToAdd).trim(); |
|
} |
|
|
|
return element; |
|
} |
|
/** |
|
* Remove a class name from an element. |
|
* |
|
* @param {Element} element |
|
* Element to remove a class name from. |
|
* |
|
* @param {string} classToRemove |
|
* Class name to remove |
|
* |
|
* @return {Element} |
|
* The DOM element with class name removed. |
|
*/ |
|
|
|
function removeClass(element, classToRemove) { |
|
if (element.classList) { |
|
element.classList.remove(classToRemove); |
|
} else { |
|
throwIfWhitespace(classToRemove); |
|
element.className = element.className.split(/\s+/).filter(function (c) { |
|
return c !== classToRemove; |
|
}).join(' '); |
|
} |
|
|
|
return element; |
|
} |
|
/** |
|
* The callback definition for toggleClass. |
|
* |
|
* @callback module:dom~PredicateCallback |
|
* @param {Element} element |
|
* The DOM element of the Component. |
|
* |
|
* @param {string} classToToggle |
|
* The `className` that wants to be toggled |
|
* |
|
* @return {boolean|undefined} |
|
* If `true` is returned, the `classToToggle` will be added to the |
|
* `element`. If `false`, the `classToToggle` will be removed from |
|
* the `element`. If `undefined`, the callback will be ignored. |
|
*/ |
|
|
|
/** |
|
* Adds or removes a class name to/from an element depending on an optional |
|
* condition or the presence/absence of the class name. |
|
* |
|
* @param {Element} element |
|
* The element to toggle a class name on. |
|
* |
|
* @param {string} classToToggle |
|
* The class that should be toggled. |
|
* |
|
* @param {boolean|module:dom~PredicateCallback} [predicate] |
|
* See the return value for {@link module:dom~PredicateCallback} |
|
* |
|
* @return {Element} |
|
* The element with a class that has been toggled. |
|
*/ |
|
|
|
function toggleClass(element, classToToggle, predicate) { |
|
// This CANNOT use `classList` internally because IE11 does not support the |
|
// second parameter to the `classList.toggle()` method! Which is fine because |
|
// `classList` will be used by the add/remove functions. |
|
var has = hasClass(element, classToToggle); |
|
|
|
if (typeof predicate === 'function') { |
|
predicate = predicate(element, classToToggle); |
|
} |
|
|
|
if (typeof predicate !== 'boolean') { |
|
predicate = !has; |
|
} // If the necessary class operation matches the current state of the |
|
// element, no action is required. |
|
|
|
|
|
if (predicate === has) { |
|
return; |
|
} |
|
|
|
if (predicate) { |
|
addClass(element, classToToggle); |
|
} else { |
|
removeClass(element, classToToggle); |
|
} |
|
|
|
return element; |
|
} |
|
/** |
|
* Apply attributes to an HTML element. |
|
* |
|
* @param {Element} el |
|
* Element to add attributes to. |
|
* |
|
* @param {Object} [attributes] |
|
* Attributes to be applied. |
|
*/ |
|
|
|
function setAttributes(el, attributes) { |
|
Object.getOwnPropertyNames(attributes).forEach(function (attrName) { |
|
var attrValue = attributes[attrName]; |
|
|
|
if (attrValue === null || typeof attrValue === 'undefined' || attrValue === false) { |
|
el.removeAttribute(attrName); |
|
} else { |
|
el.setAttribute(attrName, attrValue === true ? '' : attrValue); |
|
} |
|
}); |
|
} |
|
/** |
|
* Get an element's attribute values, as defined on the HTML tag. |
|
* |
|
* Attributes are not the same as properties. They're defined on the tag |
|
* or with setAttribute. |
|
* |
|
* @param {Element} tag |
|
* Element from which to get tag attributes. |
|
* |
|
* @return {Object} |
|
* All attributes of the element. Boolean attributes will be `true` or |
|
* `false`, others will be strings. |
|
*/ |
|
|
|
function getAttributes(tag) { |
|
var obj = {}; // known boolean attributes |
|
// we can check for matching boolean properties, but not all browsers |
|
// and not all tags know about these attributes, so, we still want to check them manually |
|
|
|
var knownBooleans = ',' + 'autoplay,controls,playsinline,loop,muted,default,defaultMuted' + ','; |
|
|
|
if (tag && tag.attributes && tag.attributes.length > 0) { |
|
var attrs = tag.attributes; |
|
|
|
for (var i = attrs.length - 1; i >= 0; i--) { |
|
var attrName = attrs[i].name; |
|
var attrVal = attrs[i].value; // check for known booleans |
|
// the matching element property will return a value for typeof |
|
|
|
if (typeof tag[attrName] === 'boolean' || knownBooleans.indexOf(',' + attrName + ',') !== -1) { |
|
// the value of an included boolean attribute is typically an empty |
|
// string ('') which would equal false if we just check for a false value. |
|
// we also don't want support bad code like autoplay='false' |
|
attrVal = attrVal !== null ? true : false; |
|
} |
|
|
|
obj[attrName] = attrVal; |
|
} |
|
} |
|
|
|
return obj; |
|
} |
|
/** |
|
* Get the value of an element's attribute. |
|
* |
|
* @param {Element} el |
|
* A DOM element. |
|
* |
|
* @param {string} attribute |
|
* Attribute to get the value of. |
|
* |
|
* @return {string} |
|
* The value of the attribute. |
|
*/ |
|
|
|
function getAttribute(el, attribute) { |
|
return el.getAttribute(attribute); |
|
} |
|
/** |
|
* Set the value of an element's attribute. |
|
* |
|
* @param {Element} el |
|
* A DOM element. |
|
* |
|
* @param {string} attribute |
|
* Attribute to set. |
|
* |
|
* @param {string} value |
|
* Value to set the attribute to. |
|
*/ |
|
|
|
function setAttribute(el, attribute, value) { |
|
el.setAttribute(attribute, value); |
|
} |
|
/** |
|
* Remove an element's attribute. |
|
* |
|
* @param {Element} el |
|
* A DOM element. |
|
* |
|
* @param {string} attribute |
|
* Attribute to remove. |
|
*/ |
|
|
|
function removeAttribute(el, attribute) { |
|
el.removeAttribute(attribute); |
|
} |
|
/** |
|
* Attempt to block the ability to select text. |
|
*/ |
|
|
|
function blockTextSelection() { |
|
document.body.focus(); |
|
|
|
document.onselectstart = function () { |
|
return false; |
|
}; |
|
} |
|
/** |
|
* Turn off text selection blocking. |
|
*/ |
|
|
|
function unblockTextSelection() { |
|
document.onselectstart = function () { |
|
return true; |
|
}; |
|
} |
|
/** |
|
* Identical to the native `getBoundingClientRect` function, but ensures that |
|
* the method is supported at all (it is in all browsers we claim to support) |
|
* and that the element is in the DOM before continuing. |
|
* |
|
* This wrapper function also shims properties which are not provided by some |
|
* older browsers (namely, IE8). |
|
* |
|
* Additionally, some browsers do not support adding properties to a |
|
* `ClientRect`/`DOMRect` object; so, we shallow-copy it with the standard |
|
* properties (except `x` and `y` which are not widely supported). This helps |
|
* avoid implementations where keys are non-enumerable. |
|
* |
|
* @param {Element} el |
|
* Element whose `ClientRect` we want to calculate. |
|
* |
|
* @return {Object|undefined} |
|
* Always returns a plain object - or `undefined` if it cannot. |
|
*/ |
|
|
|
function getBoundingClientRect(el) { |
|
if (el && el.getBoundingClientRect && el.parentNode) { |
|
var rect = el.getBoundingClientRect(); |
|
var result = {}; |
|
['bottom', 'height', 'left', 'right', 'top', 'width'].forEach(function (k) { |
|
if (rect[k] !== undefined) { |
|
result[k] = rect[k]; |
|
} |
|
}); |
|
|
|
if (!result.height) { |
|
result.height = parseFloat(computedStyle(el, 'height')); |
|
} |
|
|
|
if (!result.width) { |
|
result.width = parseFloat(computedStyle(el, 'width')); |
|
} |
|
|
|
return result; |
|
} |
|
} |
|
/** |
|
* Represents the position of a DOM element on the page. |
|
* |
|
* @typedef {Object} module:dom~Position |
|
* |
|
* @property {number} left |
|
* Pixels to the left. |
|
* |
|
* @property {number} top |
|
* Pixels from the top. |
|
*/ |
|
|
|
/** |
|
* Get the position of an element in the DOM. |
|
* |
|
* Uses `getBoundingClientRect` technique from John Resig. |
|
* |
|
* @see http://ejohn.org/blog/getboundingclientrect-is-awesome/ |
|
* |
|
* @param {Element} el |
|
* Element from which to get offset. |
|
* |
|
* @return {module:dom~Position} |
|
* The position of the element that was passed in. |
|
*/ |
|
|
|
function findPosition(el) { |
|
var box; |
|
|
|
if (el.getBoundingClientRect && el.parentNode) { |
|
box = el.getBoundingClientRect(); |
|
} |
|
|
|
if (!box) { |
|
return { |
|
left: 0, |
|
top: 0 |
|
}; |
|
} |
|
|
|
var docEl = document.documentElement; |
|
var body = document.body; |
|
var clientLeft = docEl.clientLeft || body.clientLeft || 0; |
|
var scrollLeft = window$1.pageXOffset || body.scrollLeft; |
|
var left = box.left + scrollLeft - clientLeft; |
|
var clientTop = docEl.clientTop || body.clientTop || 0; |
|
var scrollTop = window$1.pageYOffset || body.scrollTop; |
|
var top = box.top + scrollTop - clientTop; // Android sometimes returns slightly off decimal values, so need to round |
|
|
|
return { |
|
left: Math.round(left), |
|
top: Math.round(top) |
|
}; |
|
} |
|
/** |
|
* Represents x and y coordinates for a DOM element or mouse pointer. |
|
* |
|
* @typedef {Object} module:dom~Coordinates |
|
* |
|
* @property {number} x |
|
* x coordinate in pixels |
|
* |
|
* @property {number} y |
|
* y coordinate in pixels |
|
*/ |
|
|
|
/** |
|
* Get the pointer position within an element. |
|
* |
|
* The base on the coordinates are the bottom left of the element. |
|
* |
|
* @param {Element} el |
|
* Element on which to get the pointer position on. |
|
* |
|
* @param {EventTarget~Event} event |
|
* Event object. |
|
* |
|
* @return {module:dom~Coordinates} |
|
* A coordinates object corresponding to the mouse position. |
|
* |
|
*/ |
|
|
|
function getPointerPosition(el, event) { |
|
var position = {}; |
|
var box = findPosition(el); |
|
var boxW = el.offsetWidth; |
|
var boxH = el.offsetHeight; |
|
var boxY = box.top; |
|
var boxX = box.left; |
|
var pageY = event.pageY; |
|
var pageX = event.pageX; |
|
|
|
if (event.changedTouches) { |
|
pageX = event.changedTouches[0].pageX; |
|
pageY = event.changedTouches[0].pageY; |
|
} |
|
|
|
position.y = Math.max(0, Math.min(1, (boxY - pageY + boxH) / boxH)); |
|
position.x = Math.max(0, Math.min(1, (pageX - boxX) / boxW)); |
|
return position; |
|
} |
|
/** |
|
* Determines, via duck typing, whether or not a value is a text node. |
|
* |
|
* @param {Mixed} value |
|
* Check if this value is a text node. |
|
* |
|
* @return {boolean} |
|
* Will be `true` if the value is a text node, `false` otherwise. |
|
*/ |
|
|
|
function isTextNode(value) { |
|
return isObject(value) && value.nodeType === 3; |
|
} |
|
/** |
|
* Empties the contents of an element. |
|
* |
|
* @param {Element} el |
|
* The element to empty children from |
|
* |
|
* @return {Element} |
|
* The element with no children |
|
*/ |
|
|
|
function emptyEl(el) { |
|
while (el.firstChild) { |
|
el.removeChild(el.firstChild); |
|
} |
|
|
|
return el; |
|
} |
|
/** |
|
* This is a mixed value that describes content to be injected into the DOM |
|
* via some method. It can be of the following types: |
|
* |
|
* Type | Description |
|
* -----------|------------- |
|
* `string` | The value will be normalized into a text node. |
|
* `Element` | The value will be accepted as-is. |
|
* `TextNode` | The value will be accepted as-is. |
|
* `Array` | A one-dimensional array of strings, elements, text nodes, or functions. These functions should return a string, element, or text node (any other return value, like an array, will be ignored). |
|
* `Function` | A function, which is expected to return a string, element, text node, or array - any of the other possible values described above. This means that a content descriptor could be a function that returns an array of functions, but those second-level functions must return strings, elements, or text nodes. |
|
* |
|
* @typedef {string|Element|TextNode|Array|Function} module:dom~ContentDescriptor |
|
*/ |
|
|
|
/** |
|
* Normalizes content for eventual insertion into the DOM. |
|
* |
|
* This allows a wide range of content definition methods, but helps protect |
|
* from falling into the trap of simply writing to `innerHTML`, which could |
|
* be an XSS concern. |
|
* |
|
* The content for an element can be passed in multiple types and |
|
* combinations, whose behavior is as follows: |
|
* |
|
* @param {module:dom~ContentDescriptor} content |
|
* A content descriptor value. |
|
* |
|
* @return {Array} |
|
* All of the content that was passed in, normalized to an array of |
|
* elements or text nodes. |
|
*/ |
|
|
|
function normalizeContent(content) { |
|
// First, invoke content if it is a function. If it produces an array, |
|
// that needs to happen before normalization. |
|
if (typeof content === 'function') { |
|
content = content(); |
|
} // Next up, normalize to an array, so one or many items can be normalized, |
|
// filtered, and returned. |
|
|
|
|
|
return (Array.isArray(content) ? content : [content]).map(function (value) { |
|
// First, invoke value if it is a function to produce a new value, |
|
// which will be subsequently normalized to a Node of some kind. |
|
if (typeof value === 'function') { |
|
value = value(); |
|
} |
|
|
|
if (isEl(value) || isTextNode(value)) { |
|
return value; |
|
} |
|
|
|
if (typeof value === 'string' && /\S/.test(value)) { |
|
return document.createTextNode(value); |
|
} |
|
}).filter(function (value) { |
|
return value; |
|
}); |
|
} |
|
/** |
|
* Normalizes and appends content to an element. |
|
* |
|
* @param {Element} el |
|
* Element to append normalized content to. |
|
* |
|
* @param {module:dom~ContentDescriptor} content |
|
* A content descriptor value. |
|
* |
|
* @return {Element} |
|
* The element with appended normalized content. |
|
*/ |
|
|
|
function appendContent(el, content) { |
|
normalizeContent(content).forEach(function (node) { |
|
return el.appendChild(node); |
|
}); |
|
return el; |
|
} |
|
/** |
|
* Normalizes and inserts content into an element; this is identical to |
|
* `appendContent()`, except it empties the element first. |
|
* |
|
* @param {Element} el |
|
* Element to insert normalized content into. |
|
* |
|
* @param {module:dom~ContentDescriptor} content |
|
* A content descriptor value. |
|
* |
|
* @return {Element} |
|
* The element with inserted normalized content. |
|
*/ |
|
|
|
function insertContent(el, content) { |
|
return appendContent(emptyEl(el), content); |
|
} |
|
/** |
|
* Check if an event was a single left click. |
|
* |
|
* @param {EventTarget~Event} event |
|
* Event object. |
|
* |
|
* @return {boolean} |
|
* Will be `true` if a single left click, `false` otherwise. |
|
*/ |
|
|
|
function isSingleLeftClick(event) { |
|
// Note: if you create something draggable, be sure to |
|
// call it on both `mousedown` and `mousemove` event, |
|
// otherwise `mousedown` should be enough for a button |
|
if (event.button === undefined && event.buttons === undefined) { |
|
// Why do we need `buttons` ? |
|
// Because, middle mouse sometimes have this: |
|
// e.button === 0 and e.buttons === 4 |
|
// Furthermore, we want to prevent combination click, something like |
|
// HOLD middlemouse then left click, that would be |
|
// e.button === 0, e.buttons === 5 |
|
// just `button` is not gonna work |
|
// Alright, then what this block does ? |
|
// this is for chrome `simulate mobile devices` |
|
// I want to support this as well |
|
return true; |
|
} |
|
|
|
if (event.button === 0 && event.buttons === undefined) { |
|
// Touch screen, sometimes on some specific device, `buttons` |
|
// doesn't have anything (safari on ios, blackberry...) |
|
return true; |
|
} |
|
|
|
if (event.button !== 0 || event.buttons !== 1) { |
|
// This is the reason we have those if else block above |
|
// if any special case we can catch and let it slide |
|
// we do it above, when get to here, this definitely |
|
// is-not-left-click |
|
return false; |
|
} |
|
|
|
return true; |
|
} |
|
/** |
|
* Finds a single DOM element matching `selector` within the optional |
|
* `context` of another DOM element (defaulting to `document`). |
|
* |
|
* @param {string} selector |
|
* A valid CSS selector, which will be passed to `querySelector`. |
|
* |
|
* @param {Element|String} [context=document] |
|
* A DOM element within which to query. Can also be a selector |
|
* string in which case the first matching element will be used |
|
* as context. If missing (or no element matches selector), falls |
|
* back to `document`. |
|
* |
|
* @return {Element|null} |
|
* The element that was found or null. |
|
*/ |
|
|
|
var $ = createQuerier('querySelector'); |
|
/** |
|
* Finds a all DOM elements matching `selector` within the optional |
|
* `context` of another DOM element (defaulting to `document`). |
|
* |
|
* @param {string} selector |
|
* A valid CSS selector, which will be passed to `querySelectorAll`. |
|
* |
|
* @param {Element|String} [context=document] |
|
* A DOM element within which to query. Can also be a selector |
|
* string in which case the first matching element will be used |
|
* as context. If missing (or no element matches selector), falls |
|
* back to `document`. |
|
* |
|
* @return {NodeList} |
|
* A element list of elements that were found. Will be empty if none |
|
* were found. |
|
* |
|
*/ |
|
|
|
var $$ = createQuerier('querySelectorAll'); |
|
|
|
var Dom = /*#__PURE__*/Object.freeze({ |
|
isReal: isReal, |
|
isEl: isEl, |
|
isInFrame: isInFrame, |
|
createEl: createEl, |
|
textContent: textContent, |
|
prependTo: prependTo, |
|
hasClass: hasClass, |
|
addClass: addClass, |
|
removeClass: removeClass, |
|
toggleClass: toggleClass, |
|
setAttributes: setAttributes, |
|
getAttributes: getAttributes, |
|
getAttribute: getAttribute, |
|
setAttribute: setAttribute, |
|
removeAttribute: removeAttribute, |
|
blockTextSelection: blockTextSelection, |
|
unblockTextSelection: unblockTextSelection, |
|
getBoundingClientRect: getBoundingClientRect, |
|
findPosition: findPosition, |
|
getPointerPosition: getPointerPosition, |
|
isTextNode: isTextNode, |
|
emptyEl: emptyEl, |
|
normalizeContent: normalizeContent, |
|
appendContent: appendContent, |
|
insertContent: insertContent, |
|
isSingleLeftClick: isSingleLeftClick, |
|
$: $, |
|
$$: $$ |
|
}); |
|
|
|
/** |
|
* @file guid.js |
|
* @module guid |
|
*/ |
|
|
|
/** |
|
* Unique ID for an element or function |
|
* @type {Number} |
|
*/ |
|
var _guid = 1; |
|
/** |
|
* Get a unique auto-incrementing ID by number that has not been returned before. |
|
* |
|
* @return {number} |
|
* A new unique ID. |
|
*/ |
|
|
|
function newGUID() { |
|
return _guid++; |
|
} |
|
|
|
/** |
|
* @file dom-data.js |
|
* @module dom-data |
|
*/ |
|
/** |
|
* Element Data Store. |
|
* |
|
* Allows for binding data to an element without putting it directly on the |
|
* element. Ex. Event listeners are stored here. |
|
* (also from jsninja.com, slightly modified and updated for closure compiler) |
|
* |
|
* @type {Object} |
|
* @private |
|
*/ |
|
|
|
var elData = {}; |
|
/* |
|
* Unique attribute name to store an element's guid in |
|
* |
|
* @type {String} |
|
* @constant |
|
* @private |
|
*/ |
|
|
|
var elIdAttr = 'vdata' + new Date().getTime(); |
|
/** |
|
* Returns the cache object where data for an element is stored |
|
* |
|
* @param {Element} el |
|
* Element to store data for. |
|
* |
|
* @return {Object} |
|
* The cache object for that el that was passed in. |
|
*/ |
|
|
|
function getData(el) { |
|
var id = el[elIdAttr]; |
|
|
|
if (!id) { |
|
id = el[elIdAttr] = newGUID(); |
|
} |
|
|
|
if (!elData[id]) { |
|
elData[id] = {}; |
|
} |
|
|
|
return elData[id]; |
|
} |
|
/** |
|
* Returns whether or not an element has cached data |
|
* |
|
* @param {Element} el |
|
* Check if this element has cached data. |
|
* |
|
* @return {boolean} |
|
* - True if the DOM element has cached data. |
|
* - False otherwise. |
|
*/ |
|
|
|
function hasData(el) { |
|
var id = el[elIdAttr]; |
|
|
|
if (!id) { |
|
return false; |
|
} |
|
|
|
return !!Object.getOwnPropertyNames(elData[id]).length; |
|
} |
|
/** |
|
* Delete data for the element from the cache and the guid attr from getElementById |
|
* |
|
* @param {Element} el |
|
* Remove cached data for this element. |
|
*/ |
|
|
|
function removeData(el) { |
|
var id = el[elIdAttr]; |
|
|
|
if (!id) { |
|
return; |
|
} // Remove all stored data |
|
|
|
|
|
delete elData[id]; // Remove the elIdAttr property from the DOM node |
|
|
|
try { |
|
delete el[elIdAttr]; |
|
} catch (e) { |
|
if (el.removeAttribute) { |
|
el.removeAttribute(elIdAttr); |
|
} else { |
|
// IE doesn't appear to support removeAttribute on the document element |
|
el[elIdAttr] = null; |
|
} |
|
} |
|
} |
|
|
|
/** |
|
* @file events.js. An Event System (John Resig - Secrets of a JS Ninja http://jsninja.com/) |
|
* (Original book version wasn't completely usable, so fixed some things and made Closure Compiler compatible) |
|
* This should work very similarly to jQuery's events, however it's based off the book version which isn't as |
|
* robust as jquery's, so there's probably some differences. |
|
* |
|
* @file events.js |
|
* @module events |
|
*/ |
|
/** |
|
* Clean up the listener cache and dispatchers |
|
* |
|
* @param {Element|Object} elem |
|
* Element to clean up |
|
* |
|
* @param {string} type |
|
* Type of event to clean up |
|
*/ |
|
|
|
function _cleanUpEvents(elem, type) { |
|
var data = getData(elem); // Remove the events of a particular type if there are none left |
|
|
|
if (data.handlers[type].length === 0) { |
|
delete data.handlers[type]; // data.handlers[type] = null; |
|
// Setting to null was causing an error with data.handlers |
|
// Remove the meta-handler from the element |
|
|
|
if (elem.removeEventListener) { |
|
elem.removeEventListener(type, data.dispatcher, false); |
|
} else if (elem.detachEvent) { |
|
elem.detachEvent('on' + type, data.dispatcher); |
|
} |
|
} // Remove the events object if there are no types left |
|
|
|
|
|
if (Object.getOwnPropertyNames(data.handlers).length <= 0) { |
|
delete data.handlers; |
|
delete data.dispatcher; |
|
delete data.disabled; |
|
} // Finally remove the element data if there is no data left |
|
|
|
|
|
if (Object.getOwnPropertyNames(data).length === 0) { |
|
removeData(elem); |
|
} |
|
} |
|
/** |
|
* Loops through an array of event types and calls the requested method for each type. |
|
* |
|
* @param {Function} fn |
|
* The event method we want to use. |
|
* |
|
* @param {Element|Object} elem |
|
* Element or object to bind listeners to |
|
* |
|
* @param {string} type |
|
* Type of event to bind to. |
|
* |
|
* @param {EventTarget~EventListener} callback |
|
* Event listener. |
|
*/ |
|
|
|
|
|
function _handleMultipleEvents(fn, elem, types, callback) { |
|
types.forEach(function (type) { |
|
// Call the event method for each one of the types |
|
fn(elem, type, callback); |
|
}); |
|
} |
|
/** |
|
* Fix a native event to have standard property values |
|
* |
|
* @param {Object} event |
|
* Event object to fix. |
|
* |
|
* @return {Object} |
|
* Fixed event object. |
|
*/ |
|
|
|
|
|
function fixEvent(event) { |
|
function returnTrue() { |
|
return true; |
|
} |
|
|
|
function returnFalse() { |
|
return false; |
|
} // Test if fixing up is needed |
|
// Used to check if !event.stopPropagation instead of isPropagationStopped |
|
// But native events return true for stopPropagation, but don't have |
|
// other expected methods like isPropagationStopped. Seems to be a problem |
|
// with the Javascript Ninja code. So we're just overriding all events now. |
|
|
|
|
|
if (!event || !event.isPropagationStopped) { |
|
var old = event || window$1.event; |
|
event = {}; // Clone the old object so that we can modify the values event = {}; |
|
// IE8 Doesn't like when you mess with native event properties |
|
// Firefox returns false for event.hasOwnProperty('type') and other props |
|
// which makes copying more difficult. |
|
// TODO: Probably best to create a whitelist of event props |
|
|
|
for (var key in old) { |
|
// Safari 6.0.3 warns you if you try to copy deprecated layerX/Y |
|
// Chrome warns you if you try to copy deprecated keyboardEvent.keyLocation |
|
// and webkitMovementX/Y |
|
if (key !== 'layerX' && key !== 'layerY' && key !== 'keyLocation' && key !== 'webkitMovementX' && key !== 'webkitMovementY') { |
|
// Chrome 32+ warns if you try to copy deprecated returnValue, but |
|
// we still want to if preventDefault isn't supported (IE8). |
|
if (!(key === 'returnValue' && old.preventDefault)) { |
|
event[key] = old[key]; |
|
} |
|
} |
|
} // The event occurred on this element |
|
|
|
|
|
if (!event.target) { |
|
event.target = event.srcElement || document; |
|
} // Handle which other element the event is related to |
|
|
|
|
|
if (!event.relatedTarget) { |
|
event.relatedTarget = event.fromElement === event.target ? event.toElement : event.fromElement; |
|
} // Stop the default browser action |
|
|
|
|
|
event.preventDefault = function () { |
|
if (old.preventDefault) { |
|
old.preventDefault(); |
|
} |
|
|
|
event.returnValue = false; |
|
old.returnValue = false; |
|
event.defaultPrevented = true; |
|
}; |
|
|
|
event.defaultPrevented = false; // Stop the event from bubbling |
|
|
|
event.stopPropagation = function () { |
|
if (old.stopPropagation) { |
|
old.stopPropagation(); |
|
} |
|
|
|
event.cancelBubble = true; |
|
old.cancelBubble = true; |
|
event.isPropagationStopped = returnTrue; |
|
}; |
|
|
|
event.isPropagationStopped = returnFalse; // Stop the event from bubbling and executing other handlers |
|
|
|
event.stopImmediatePropagation = function () { |
|
if (old.stopImmediatePropagation) { |
|
old.stopImmediatePropagation(); |
|
} |
|
|
|
event.isImmediatePropagationStopped = returnTrue; |
|
event.stopPropagation(); |
|
}; |
|
|
|
event.isImmediatePropagationStopped = returnFalse; // Handle mouse position |
|
|
|
if (event.clientX !== null && event.clientX !== undefined) { |
|
var doc = document.documentElement; |
|
var body = document.body; |
|
event.pageX = event.clientX + (doc && doc.scrollLeft || body && body.scrollLeft || 0) - (doc && doc.clientLeft || body && body.clientLeft || 0); |
|
event.pageY = event.clientY + (doc && doc.scrollTop || body && body.scrollTop || 0) - (doc && doc.clientTop || body && body.clientTop || 0); |
|
} // Handle key presses |
|
|
|
|
|
event.which = event.charCode || event.keyCode; // Fix button for mouse clicks: |
|
// 0 == left; 1 == middle; 2 == right |
|
|
|
if (event.button !== null && event.button !== undefined) { |
|
// The following is disabled because it does not pass videojs-standard |
|
// and... yikes. |
|
|
|
/* eslint-disable */ |
|
event.button = event.button & 1 ? 0 : event.button & 4 ? 1 : event.button & 2 ? 2 : 0; |
|
/* eslint-enable */ |
|
} |
|
} // Returns fixed-up instance |
|
|
|
|
|
return event; |
|
} |
|
/** |
|
* Whether passive event listeners are supported |
|
*/ |
|
|
|
var _supportsPassive = false; |
|
|
|
(function () { |
|
try { |
|
var opts = Object.defineProperty({}, 'passive', { |
|
get: function get() { |
|
_supportsPassive = true; |
|
} |
|
}); |
|
window$1.addEventListener('test', null, opts); |
|
window$1.removeEventListener('test', null, opts); |
|
} catch (e) {// disregard |
|
} |
|
})(); |
|
/** |
|
* Touch events Chrome expects to be passive |
|
*/ |
|
|
|
|
|
var passiveEvents = ['touchstart', 'touchmove']; |
|
/** |
|
* Add an event listener to element |
|
* It stores the handler function in a separate cache object |
|
* and adds a generic handler to the element's event, |
|
* along with a unique id (guid) to the element. |
|
* |
|
* @param {Element|Object} elem |
|
* Element or object to bind listeners to |
|
* |
|
* @param {string|string[]} type |
|
* Type of event to bind to. |
|
* |
|
* @param {EventTarget~EventListener} fn |
|
* Event listener. |
|
*/ |
|
|
|
function on(elem, type, fn) { |
|
if (Array.isArray(type)) { |
|
return _handleMultipleEvents(on, elem, type, fn); |
|
} |
|
|
|
var data = getData(elem); // We need a place to store all our handler data |
|
|
|
if (!data.handlers) { |
|
data.handlers = {}; |
|
} |
|
|
|
if (!data.handlers[type]) { |
|
data.handlers[type] = []; |
|
} |
|
|
|
if (!fn.guid) { |
|
fn.guid = newGUID(); |
|
} |
|
|
|
data.handlers[type].push(fn); |
|
|
|
if (!data.dispatcher) { |
|
data.disabled = false; |
|
|
|
data.dispatcher = function (event, hash) { |
|
if (data.disabled) { |
|
return; |
|
} |
|
|
|
event = fixEvent(event); |
|
var handlers = data.handlers[event.type]; |
|
|
|
if (handlers) { |
|
// Copy handlers so if handlers are added/removed during the process it doesn't throw everything off. |
|
var handlersCopy = handlers.slice(0); |
|
|
|
for (var m = 0, n = handlersCopy.length; m < n; m++) { |
|
if (event.isImmediatePropagationStopped()) { |
|
break; |
|
} else { |
|
try { |
|
handlersCopy[m].call(elem, event, hash); |
|
} catch (e) { |
|
log.error(e); |
|
} |
|
} |
|
} |
|
} |
|
}; |
|
} |
|
|
|
if (data.handlers[type].length === 1) { |
|
if (elem.addEventListener) { |
|
var options = false; |
|
|
|
if (_supportsPassive && passiveEvents.indexOf(type) > -1) { |
|
options = { |
|
passive: true |
|
}; |
|
} |
|
|
|
elem.addEventListener(type, data.dispatcher, options); |
|
} else if (elem.attachEvent) { |
|
elem.attachEvent('on' + type, data.dispatcher); |
|
} |
|
} |
|
} |
|
/** |
|
* Removes event listeners from an element |
|
* |
|
* @param {Element|Object} elem |
|
* Object to remove listeners from. |
|
* |
|
* @param {string|string[]} [type] |
|
* Type of listener to remove. Don't include to remove all events from element. |
|
* |
|
* @param {EventTarget~EventListener} [fn] |
|
* Specific listener to remove. Don't include to remove listeners for an event |
|
* type. |
|
*/ |
|
|
|
function off(elem, type, fn) { |
|
// Don't want to add a cache object through getElData if not needed |
|
if (!hasData(elem)) { |
|
return; |
|
} |
|
|
|
var data = getData(elem); // If no events exist, nothing to unbind |
|
|
|
if (!data.handlers) { |
|
return; |
|
} |
|
|
|
if (Array.isArray(type)) { |
|
return _handleMultipleEvents(off, elem, type, fn); |
|
} // Utility function |
|
|
|
|
|
var removeType = function removeType(el, t) { |
|
data.handlers[t] = []; |
|
|
|
_cleanUpEvents(el, t); |
|
}; // Are we removing all bound events? |
|
|
|
|
|
if (type === undefined) { |
|
for (var t in data.handlers) { |
|
if (Object.prototype.hasOwnProperty.call(data.handlers || {}, t)) { |
|
removeType(elem, t); |
|
} |
|
} |
|
|
|
return; |
|
} |
|
|
|
var handlers = data.handlers[type]; // If no handlers exist, nothing to unbind |
|
|
|
if (!handlers) { |
|
return; |
|
} // If no listener was provided, remove all listeners for type |
|
|
|
|
|
if (!fn) { |
|
removeType(elem, type); |
|
return; |
|
} // We're only removing a single handler |
|
|
|
|
|
if (fn.guid) { |
|
for (var n = 0; n < handlers.length; n++) { |
|
if (handlers[n].guid === fn.guid) { |
|
handlers.splice(n--, 1); |
|
} |
|
} |
|
} |
|
|
|
_cleanUpEvents(elem, type); |
|
} |
|
/** |
|
* Trigger an event for an element |
|
* |
|
* @param {Element|Object} elem |
|
* Element to trigger an event on |
|
* |
|
* @param {EventTarget~Event|string} event |
|
* A string (the type) or an event object with a type attribute |
|
* |
|
* @param {Object} [hash] |
|
* data hash to pass along with the event |
|
* |
|
* @return {boolean|undefined} |
|
* Returns the opposite of `defaultPrevented` if default was |
|
* prevented. Otherwise, returns `undefined` |
|
*/ |
|
|
|
function trigger(elem, event, hash) { |
|
// Fetches element data and a reference to the parent (for bubbling). |
|
// Don't want to add a data object to cache for every parent, |
|
// so checking hasElData first. |
|
var elemData = hasData(elem) ? getData(elem) : {}; |
|
var parent = elem.parentNode || elem.ownerDocument; // type = event.type || event, |
|
// handler; |
|
// If an event name was passed as a string, creates an event out of it |
|
|
|
if (typeof event === 'string') { |
|
event = { |
|
type: event, |
|
target: elem |
|
}; |
|
} else if (!event.target) { |
|
event.target = elem; |
|
} // Normalizes the event properties. |
|
|
|
|
|
event = fixEvent(event); // If the passed element has a dispatcher, executes the established handlers. |
|
|
|
if (elemData.dispatcher) { |
|
elemData.dispatcher.call(elem, event, hash); |
|
} // Unless explicitly stopped or the event does not bubble (e.g. media events) |
|
// recursively calls this function to bubble the event up the DOM. |
|
|
|
|
|
if (parent && !event.isPropagationStopped() && event.bubbles === true) { |
|
trigger.call(null, parent, event, hash); // If at the top of the DOM, triggers the default action unless disabled. |
|
} else if (!parent && !event.defaultPrevented && event.target && event.target[event.type]) { |
|
var targetData = getData(event.target); // Checks if the target has a default action for this event. |
|
|
|
if (event.target[event.type]) { |
|
// Temporarily disables event dispatching on the target as we have already executed the handler. |
|
targetData.disabled = true; // Executes the default action. |
|
|
|
if (typeof event.target[event.type] === 'function') { |
|
event.target[event.type](); |
|
} // Re-enables event dispatching. |
|
|
|
|
|
targetData.disabled = false; |
|
} |
|
} // Inform the triggerer if the default was prevented by returning false |
|
|
|
|
|
return !event.defaultPrevented; |
|
} |
|
/** |
|
* Trigger a listener only once for an event. |
|
* |
|
* @param {Element|Object} elem |
|
* Element or object to bind to. |
|
* |
|
* @param {string|string[]} type |
|
* Name/type of event |
|
* |
|
* @param {Event~EventListener} fn |
|
* Event listener function |
|
*/ |
|
|
|
function one(elem, type, fn) { |
|
if (Array.isArray(type)) { |
|
return _handleMultipleEvents(one, elem, type, fn); |
|
} |
|
|
|
var func = function func() { |
|
off(elem, type, func); |
|
fn.apply(this, arguments); |
|
}; // copy the guid to the new function so it can removed using the original function's ID |
|
|
|
|
|
func.guid = fn.guid = fn.guid || newGUID(); |
|
on(elem, type, func); |
|
} |
|
|
|
var Events = /*#__PURE__*/Object.freeze({ |
|
fixEvent: fixEvent, |
|
on: on, |
|
off: off, |
|
trigger: trigger, |
|
one: one |
|
}); |
|
|
|
/** |
|
* @file setup.js - Functions for setting up a player without |
|
* user interaction based on the data-setup `attribute` of the video tag. |
|
* |
|
* @module setup |
|
*/ |
|
var _windowLoaded = false; |
|
var videojs; |
|
/** |
|
* Set up any tags that have a data-setup `attribute` when the player is started. |
|
*/ |
|
|
|
var autoSetup = function autoSetup() { |
|
// Protect against breakage in non-browser environments and check global autoSetup option. |
|
if (!isReal() || videojs.options.autoSetup === false) { |
|
return; |
|
} |
|
|
|
var vids = Array.prototype.slice.call(document.getElementsByTagName('video')); |
|
var audios = Array.prototype.slice.call(document.getElementsByTagName('audio')); |
|
var divs = Array.prototype.slice.call(document.getElementsByTagName('video-js')); |
|
var mediaEls = vids.concat(audios, divs); // Check if any media elements exist |
|
|
|
if (mediaEls && mediaEls.length > 0) { |
|
for (var i = 0, e = mediaEls.length; i < e; i++) { |
|
var mediaEl = mediaEls[i]; // Check if element exists, has getAttribute func. |
|
|
|
if (mediaEl && mediaEl.getAttribute) { |
|
// Make sure this player hasn't already been set up. |
|
if (mediaEl.player === undefined) { |
|
var options = mediaEl.getAttribute('data-setup'); // Check if data-setup attr exists. |
|
// We only auto-setup if they've added the data-setup attr. |
|
|
|
if (options !== null) { |
|
// Create new video.js instance. |
|
videojs(mediaEl); |
|
} |
|
} // If getAttribute isn't defined, we need to wait for the DOM. |
|
|
|
} else { |
|
autoSetupTimeout(1); |
|
break; |
|
} |
|
} // No videos were found, so keep looping unless page is finished loading. |
|
|
|
} else if (!_windowLoaded) { |
|
autoSetupTimeout(1); |
|
} |
|
}; |
|
/** |
|
* Wait until the page is loaded before running autoSetup. This will be called in |
|
* autoSetup if `hasLoaded` returns false. |
|
* |
|
* @param {number} wait |
|
* How long to wait in ms |
|
* |
|
* @param {module:videojs} [vjs] |
|
* The videojs library function |
|
*/ |
|
|
|
|
|
function autoSetupTimeout(wait, vjs) { |
|
if (vjs) { |
|
videojs = vjs; |
|
} |
|
|
|
window$1.setTimeout(autoSetup, wait); |
|
} |
|
|
|
if (isReal() && document.readyState === 'complete') { |
|
_windowLoaded = true; |
|
} else { |
|
/** |
|
* Listen for the load event on window, and set _windowLoaded to true. |
|
* |
|
* @listens load |
|
*/ |
|
one(window$1, 'load', function () { |
|
_windowLoaded = true; |
|
}); |
|
} |
|
|
|
/** |
|
* @file stylesheet.js |
|
* @module stylesheet |
|
*/ |
|
/** |
|
* Create a DOM syle element given a className for it. |
|
* |
|
* @param {string} className |
|
* The className to add to the created style element. |
|
* |
|
* @return {Element} |
|
* The element that was created. |
|
*/ |
|
|
|
var createStyleElement = function createStyleElement(className) { |
|
var style = document.createElement('style'); |
|
style.className = className; |
|
return style; |
|
}; |
|
/** |
|
* Add text to a DOM element. |
|
* |
|
* @param {Element} el |
|
* The Element to add text content to. |
|
* |
|
* @param {string} content |
|
* The text to add to the element. |
|
*/ |
|
|
|
var setTextContent = function setTextContent(el, content) { |
|
if (el.styleSheet) { |
|
el.styleSheet.cssText = content; |
|
} else { |
|
el.textContent = content; |
|
} |
|
}; |
|
|
|
/** |
|
* @file fn.js |
|
* @module fn |
|
*/ |
|
/** |
|
* Bind (a.k.a proxy or context). A simple method for changing the context of |
|
* a function. |
|
* |
|
* It also stores a unique id on the function so it can be easily removed from |
|
* events. |
|
* |
|
* @function |
|
* @param {Mixed} context |
|
* The object to bind as scope. |
|
* |
|
* @param {Function} fn |
|
* The function to be bound to a scope. |
|
* |
|
* @param {number} [uid] |
|
* An optional unique ID for the function to be set |
|
* |
|
* @return {Function} |
|
* The new function that will be bound into the context given |
|
*/ |
|
|
|
var bind = function bind(context, fn, uid) { |
|
// Make sure the function has a unique ID |
|
if (!fn.guid) { |
|
fn.guid = newGUID(); |
|
} // Create the new function that changes the context |
|
|
|
|
|
var bound = function bound() { |
|
return fn.apply(context, arguments); |
|
}; // Allow for the ability to individualize this function |
|
// Needed in the case where multiple objects might share the same prototype |
|
// IF both items add an event listener with the same function, then you try to remove just one |
|
// it will remove both because they both have the same guid. |
|
// when using this, you need to use the bind method when you remove the listener as well. |
|
// currently used in text tracks |
|
|
|
|
|
bound.guid = uid ? uid + '_' + fn.guid : fn.guid; |
|
return bound; |
|
}; |
|
/** |
|
* Wraps the given function, `fn`, with a new function that only invokes `fn` |
|
* at most once per every `wait` milliseconds. |
|
* |
|
* @function |
|
* @param {Function} fn |
|
* The function to be throttled. |
|
* |
|
* @param {number} wait |
|
* The number of milliseconds by which to throttle. |
|
* |
|
* @return {Function} |
|
*/ |
|
|
|
var throttle = function throttle(fn, wait) { |
|
var last = Date.now(); |
|
|
|
var throttled = function throttled() { |
|
var now = Date.now(); |
|
|
|
if (now - last >= wait) { |
|
fn.apply(void 0, arguments); |
|
last = now; |
|
} |
|
}; |
|
|
|
return throttled; |
|
}; |
|
/** |
|
* Creates a debounced function that delays invoking `func` until after `wait` |
|
* milliseconds have elapsed since the last time the debounced function was |
|
* invoked. |
|
* |
|
* Inspired by lodash and underscore implementations. |
|
* |
|
* @function |
|
* @param {Function} func |
|
* The function to wrap with debounce behavior. |
|
* |
|
* @param {number} wait |
|
* The number of milliseconds to wait after the last invocation. |
|
* |
|
* @param {boolean} [immediate] |
|
* Whether or not to invoke the function immediately upon creation. |
|
* |
|
* @param {Object} [context=window] |
|
* The "context" in which the debounced function should debounce. For |
|
* example, if this function should be tied to a Video.js player, |
|
* the player can be passed here. Alternatively, defaults to the |
|
* global `window` object. |
|
* |
|
* @return {Function} |
|
* A debounced function. |
|
*/ |
|
|
|
var debounce = function debounce(func, wait, immediate, context) { |
|
if (context === void 0) { |
|
context = window$1; |
|
} |
|
|
|
var timeout; |
|
|
|
var cancel = function cancel() { |
|
context.clearTimeout(timeout); |
|
timeout = null; |
|
}; |
|
/* eslint-disable consistent-this */ |
|
|
|
|
|
var debounced = function debounced() { |
|
var self = this; |
|
var args = arguments; |
|
|
|
var _later = function later() { |
|
timeout = null; |
|
_later = null; |
|
|
|
if (!immediate) { |
|
func.apply(self, args); |
|
} |
|
}; |
|
|
|
if (!timeout && immediate) { |
|
func.apply(self, args); |
|
} |
|
|
|
context.clearTimeout(timeout); |
|
timeout = context.setTimeout(_later, wait); |
|
}; |
|
/* eslint-enable consistent-this */ |
|
|
|
|
|
debounced.cancel = cancel; |
|
return debounced; |
|
}; |
|
|
|
/** |
|
* @file src/js/event-target.js |
|
*/ |
|
/** |
|
* `EventTarget` is a class that can have the same API as the DOM `EventTarget`. It |
|
* adds shorthand functions that wrap around lengthy functions. For example: |
|
* the `on` function is a wrapper around `addEventListener`. |
|
* |
|
* @see [EventTarget Spec]{@link https://www.w3.org/TR/DOM-Level-2-Events/events.html#Events-EventTarget} |
|
* @class EventTarget |
|
*/ |
|
|
|
var EventTarget = function EventTarget() {}; |
|
/** |
|
* A Custom DOM event. |
|
* |
|
* @typedef {Object} EventTarget~Event |
|
* @see [Properties]{@link https://developer.mozilla.org/en-US/docs/Web/API/CustomEvent} |
|
*/ |
|
|
|
/** |
|
* All event listeners should follow the following format. |
|
* |
|
* @callback EventTarget~EventListener |
|
* @this {EventTarget} |
|
* |
|
* @param {EventTarget~Event} event |
|
* the event that triggered this function |
|
* |
|
* @param {Object} [hash] |
|
* hash of data sent during the event |
|
*/ |
|
|
|
/** |
|
* An object containing event names as keys and booleans as values. |
|
* |
|
* > NOTE: If an event name is set to a true value here {@link EventTarget#trigger} |
|
* will have extra functionality. See that function for more information. |
|
* |
|
* @property EventTarget.prototype.allowedEvents_ |
|
* @private |
|
*/ |
|
|
|
|
|
EventTarget.prototype.allowedEvents_ = {}; |
|
/** |
|
* Adds an `event listener` to an instance of an `EventTarget`. An `event listener` is a |
|
* function that will get called when an event with a certain name gets triggered. |
|
* |
|
* @param {string|string[]} type |
|
* An event name or an array of event names. |
|
* |
|
* @param {EventTarget~EventListener} fn |
|
* The function to call with `EventTarget`s |
|
*/ |
|
|
|
EventTarget.prototype.on = function (type, fn) { |
|
// Remove the addEventListener alias before calling Events.on |
|
// so we don't get into an infinite type loop |
|
var ael = this.addEventListener; |
|
|
|
this.addEventListener = function () {}; |
|
|
|
on(this, type, fn); |
|
this.addEventListener = ael; |
|
}; |
|
/** |
|
* An alias of {@link EventTarget#on}. Allows `EventTarget` to mimic |
|
* the standard DOM API. |
|
* |
|
* @function |
|
* @see {@link EventTarget#on} |
|
*/ |
|
|
|
|
|
EventTarget.prototype.addEventListener = EventTarget.prototype.on; |
|
/** |
|
* Removes an `event listener` for a specific event from an instance of `EventTarget`. |
|
* This makes it so that the `event listener` will no longer get called when the |
|
* named event happens. |
|
* |
|
* @param {string|string[]} type |
|
* An event name or an array of event names. |
|
* |
|
* @param {EventTarget~EventListener} fn |
|
* The function to remove. |
|
*/ |
|
|
|
EventTarget.prototype.off = function (type, fn) { |
|
off(this, type, fn); |
|
}; |
|
/** |
|
* An alias of {@link EventTarget#off}. Allows `EventTarget` to mimic |
|
* the standard DOM API. |
|
* |
|
* @function |
|
* @see {@link EventTarget#off} |
|
*/ |
|
|
|
|
|
EventTarget.prototype.removeEventListener = EventTarget.prototype.off; |
|
/** |
|
* This function will add an `event listener` that gets triggered only once. After the |
|
* first trigger it will get removed. This is like adding an `event listener` |
|
* with {@link EventTarget#on} that calls {@link EventTarget#off} on itself. |
|
* |
|
* @param {string|string[]} type |
|
* An event name or an array of event names. |
|
* |
|
* @param {EventTarget~EventListener} fn |
|
* The function to be called once for each event name. |
|
*/ |
|
|
|
EventTarget.prototype.one = function (type, fn) { |
|
// Remove the addEventListener alialing Events.on |
|
// so we don't get into an infinite type loop |
|
var ael = this.addEventListener; |
|
|
|
this.addEventListener = function () {}; |
|
|
|
one(this, type, fn); |
|
this.addEventListener = ael; |
|
}; |
|
/** |
|
* This function causes an event to happen. This will then cause any `event listeners` |
|
* that are waiting for that event, to get called. If there are no `event listeners` |
|
* for an event then nothing will happen. |
|
* |
|
* If the name of the `Event` that is being triggered is in `EventTarget.allowedEvents_`. |
|
* Trigger will also call the `on` + `uppercaseEventName` function. |
|
* |
|
* Example: |
|
* 'click' is in `EventTarget.allowedEvents_`, so, trigger will attempt to call |
|
* `onClick` if it exists. |
|
* |
|
* @param {string|EventTarget~Event|Object} event |
|
* The name of the event, an `Event`, or an object with a key of type set to |
|
* an event name. |
|
*/ |
|
|
|
|
|
EventTarget.prototype.trigger = function (event) { |
|
var type = event.type || event; // deprecation |
|
// In a future version we should default target to `this` |
|
// similar to how we default the target to `elem` in |
|
// `Events.trigger`. Right now the default `target` will be |
|
// `document` due to the `Event.fixEvent` call. |
|
|
|
if (typeof event === 'string') { |
|
event = { |
|
type: type |
|
}; |
|
} |
|
|
|
event = fixEvent(event); |
|
|
|
if (this.allowedEvents_[type] && this['on' + type]) { |
|
this['on' + type](event); |
|
} |
|
|
|
trigger(this, event); |
|
}; |
|
/** |
|
* An alias of {@link EventTarget#trigger}. Allows `EventTarget` to mimic |
|
* the standard DOM API. |
|
* |
|
* @function |
|
* @see {@link EventTarget#trigger} |
|
*/ |
|
|
|
|
|
EventTarget.prototype.dispatchEvent = EventTarget.prototype.trigger; |
|
var EVENT_MAP; |
|
|
|
EventTarget.prototype.queueTrigger = function (event) { |
|
var _this = this; |
|
|
|
// only set up EVENT_MAP if it'll be used |
|
if (!EVENT_MAP) { |
|
EVENT_MAP = new Map(); |
|
} |
|
|
|
var type = event.type || event; |
|
var map = EVENT_MAP.get(this); |
|
|
|
if (!map) { |
|
map = new Map(); |
|
EVENT_MAP.set(this, map); |
|
} |
|
|
|
var oldTimeout = map.get(type); |
|
map.delete(type); |
|
window$1.clearTimeout(oldTimeout); |
|
var timeout = window$1.setTimeout(function () { |
|
// if we cleared out all timeouts for the current target, delete its map |
|
if (map.size === 0) { |
|
map = null; |
|
EVENT_MAP.delete(_this); |
|
} |
|
|
|
_this.trigger(event); |
|
}, 0); |
|
map.set(type, timeout); |
|
}; |
|
|
|
/** |
|
* @file mixins/evented.js |
|
* @module evented |
|
*/ |
|
/** |
|
* Returns whether or not an object has had the evented mixin applied. |
|
* |
|
* @param {Object} object |
|
* An object to test. |
|
* |
|
* @return {boolean} |
|
* Whether or not the object appears to be evented. |
|
*/ |
|
|
|
var isEvented = function isEvented(object) { |
|
return object instanceof EventTarget || !!object.eventBusEl_ && ['on', 'one', 'off', 'trigger'].every(function (k) { |
|
return typeof object[k] === 'function'; |
|
}); |
|
}; |
|
/** |
|
* Adds a callback to run after the evented mixin applied. |
|
* |
|
* @param {Object} object |
|
* An object to Add |
|
* @param {Function} callback |
|
* The callback to run. |
|
*/ |
|
|
|
|
|
var addEventedCallback = function addEventedCallback(target, callback) { |
|
if (isEvented(target)) { |
|
callback(); |
|
} else { |
|
if (!target.eventedCallbacks) { |
|
target.eventedCallbacks = []; |
|
} |
|
|
|
target.eventedCallbacks.push(callback); |
|
} |
|
}; |
|
/** |
|
* Whether a value is a valid event type - non-empty string or array. |
|
* |
|
* @private |
|
* @param {string|Array} type |
|
* The type value to test. |
|
* |
|
* @return {boolean} |
|
* Whether or not the type is a valid event type. |
|
*/ |
|
|
|
|
|
var isValidEventType = function isValidEventType(type) { |
|
return (// The regex here verifies that the `type` contains at least one non- |
|
// whitespace character. |
|
typeof type === 'string' && /\S/.test(type) || Array.isArray(type) && !!type.length |
|
); |
|
}; |
|
/** |
|
* Validates a value to determine if it is a valid event target. Throws if not. |
|
* |
|
* @private |
|
* @throws {Error} |
|
* If the target does not appear to be a valid event target. |
|
* |
|
* @param {Object} target |
|
* The object to test. |
|
*/ |
|
|
|
|
|
var validateTarget = function validateTarget(target) { |
|
if (!target.nodeName && !isEvented(target)) { |
|
throw new Error('Invalid target; must be a DOM node or evented object.'); |
|
} |
|
}; |
|
/** |
|
* Validates a value to determine if it is a valid event target. Throws if not. |
|
* |
|
* @private |
|
* @throws {Error} |
|
* If the type does not appear to be a valid event type. |
|
* |
|
* @param {string|Array} type |
|
* The type to test. |
|
*/ |
|
|
|
|
|
var validateEventType = function validateEventType(type) { |
|
if (!isValidEventType(type)) { |
|
throw new Error('Invalid event type; must be a non-empty string or array.'); |
|
} |
|
}; |
|
/** |
|
* Validates a value to determine if it is a valid listener. Throws if not. |
|
* |
|
* @private |
|
* @throws {Error} |
|
* If the listener is not a function. |
|
* |
|
* @param {Function} listener |
|
* The listener to test. |
|
*/ |
|
|
|
|
|
var validateListener = function validateListener(listener) { |
|
if (typeof listener !== 'function') { |
|
throw new Error('Invalid listener; must be a function.'); |
|
} |
|
}; |
|
/** |
|
* Takes an array of arguments given to `on()` or `one()`, validates them, and |
|
* normalizes them into an object. |
|
* |
|
* @private |
|
* @param {Object} self |
|
* The evented object on which `on()` or `one()` was called. This |
|
* object will be bound as the `this` value for the listener. |
|
* |
|
* @param {Array} args |
|
* An array of arguments passed to `on()` or `one()`. |
|
* |
|
* @return {Object} |
|
* An object containing useful values for `on()` or `one()` calls. |
|
*/ |
|
|
|
|
|
var normalizeListenArgs = function normalizeListenArgs(self, args) { |
|
// If the number of arguments is less than 3, the target is always the |
|
// evented object itself. |
|
var isTargetingSelf = args.length < 3 || args[0] === self || args[0] === self.eventBusEl_; |
|
var target; |
|
var type; |
|
var listener; |
|
|
|
if (isTargetingSelf) { |
|
target = self.eventBusEl_; // Deal with cases where we got 3 arguments, but we are still listening to |
|
// the evented object itself. |
|
|
|
if (args.length >= 3) { |
|
args.shift(); |
|
} |
|
|
|
type = args[0]; |
|
listener = args[1]; |
|
} else { |
|
target = args[0]; |
|
type = args[1]; |
|
listener = args[2]; |
|
} |
|
|
|
validateTarget(target); |
|
validateEventType(type); |
|
validateListener(listener); |
|
listener = bind(self, listener); |
|
return { |
|
isTargetingSelf: isTargetingSelf, |
|
target: target, |
|
type: type, |
|
listener: listener |
|
}; |
|
}; |
|
/** |
|
* Adds the listener to the event type(s) on the target, normalizing for |
|
* the type of target. |
|
* |
|
* @private |
|
* @param {Element|Object} target |
|
* A DOM node or evented object. |
|
* |
|
* @param {string} method |
|
* The event binding method to use ("on" or "one"). |
|
* |
|
* @param {string|Array} type |
|
* One or more event type(s). |
|
* |
|
* @param {Function} listener |
|
* A listener function. |
|
*/ |
|
|
|
|
|
var listen = function listen(target, method, type, listener) { |
|
validateTarget(target); |
|
|
|
if (target.nodeName) { |
|
Events[method](target, type, listener); |
|
} else { |
|
target[method](type, listener); |
|
} |
|
}; |
|
/** |
|
* Contains methods that provide event capabilities to an object which is passed |
|
* to {@link module:evented|evented}. |
|
* |
|
* @mixin EventedMixin |
|
*/ |
|
|
|
|
|
var EventedMixin = { |
|
/** |
|
* Add a listener to an event (or events) on this object or another evented |
|
* object. |
|
* |
|
* @param {string|Array|Element|Object} targetOrType |
|
* If this is a string or array, it represents the event type(s) |
|
* that will trigger the listener. |
|
* |
|
* Another evented object can be passed here instead, which will |
|
* cause the listener to listen for events on _that_ object. |
|
* |
|
* In either case, the listener's `this` value will be bound to |
|
* this object. |
|
* |
|
* @param {string|Array|Function} typeOrListener |
|
* If the first argument was a string or array, this should be the |
|
* listener function. Otherwise, this is a string or array of event |
|
* type(s). |
|
* |
|
* @param {Function} [listener] |
|
* If the first argument was another evented object, this will be |
|
* the listener function. |
|
*/ |
|
on: function on$$1() { |
|
var _this = this; |
|
|
|
for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) { |
|
args[_key] = arguments[_key]; |
|
} |
|
|
|
var _normalizeListenArgs = normalizeListenArgs(this, args), |
|
isTargetingSelf = _normalizeListenArgs.isTargetingSelf, |
|
target = _normalizeListenArgs.target, |
|
type = _normalizeListenArgs.type, |
|
listener = _normalizeListenArgs.listener; |
|
|
|
listen(target, 'on', type, listener); // If this object is listening to another evented object. |
|
|
|
if (!isTargetingSelf) { |
|
// If this object is disposed, remove the listener. |
|
var removeListenerOnDispose = function removeListenerOnDispose() { |
|
return _this.off(target, type, listener); |
|
}; // Use the same function ID as the listener so we can remove it later it |
|
// using the ID of the original listener. |
|
|
|
|
|
removeListenerOnDispose.guid = listener.guid; // Add a listener to the target's dispose event as well. This ensures |
|
// that if the target is disposed BEFORE this object, we remove the |
|
// removal listener that was just added. Otherwise, we create a memory leak. |
|
|
|
var removeRemoverOnTargetDispose = function removeRemoverOnTargetDispose() { |
|
return _this.off('dispose', removeListenerOnDispose); |
|
}; // Use the same function ID as the listener so we can remove it later |
|
// it using the ID of the original listener. |
|
|
|
|
|
removeRemoverOnTargetDispose.guid = listener.guid; |
|
listen(this, 'on', 'dispose', removeListenerOnDispose); |
|
listen(target, 'on', 'dispose', removeRemoverOnTargetDispose); |
|
} |
|
}, |
|
|
|
/** |
|
* Add a listener to an event (or events) on this object or another evented |
|
* object. The listener will only be called once and then removed. |
|
* |
|
* @param {string|Array|Element|Object} targetOrType |
|
* If this is a string or array, it represents the event type(s) |
|
* that will trigger the listener. |
|
* |
|
* Another evented object can be passed here instead, which will |
|
* cause the listener to listen for events on _that_ object. |
|
* |
|
* In either case, the listener's `this` value will be bound to |
|
* this object. |
|
* |
|
* @param {string|Array|Function} typeOrListener |
|
* If the first argument was a string or array, this should be the |
|
* listener function. Otherwise, this is a string or array of event |
|
* type(s). |
|
* |
|
* @param {Function} [listener] |
|
* If the first argument was another evented object, this will be |
|
* the listener function. |
|
*/ |
|
one: function one$$1() { |
|
var _this2 = this; |
|
|
|
for (var _len2 = arguments.length, args = new Array(_len2), _key2 = 0; _key2 < _len2; _key2++) { |
|
args[_key2] = arguments[_key2]; |
|
} |
|
|
|
var _normalizeListenArgs2 = normalizeListenArgs(this, args), |
|
isTargetingSelf = _normalizeListenArgs2.isTargetingSelf, |
|
target = _normalizeListenArgs2.target, |
|
type = _normalizeListenArgs2.type, |
|
listener = _normalizeListenArgs2.listener; // Targeting this evented object. |
|
|
|
|
|
if (isTargetingSelf) { |
|
listen(target, 'one', type, listener); // Targeting another evented object. |
|
} else { |
|
var wrapper = function wrapper() { |
|
_this2.off(target, type, wrapper); |
|
|
|
for (var _len3 = arguments.length, largs = new Array(_len3), _key3 = 0; _key3 < _len3; _key3++) { |
|
largs[_key3] = arguments[_key3]; |
|
} |
|
|
|
listener.apply(null, largs); |
|
}; // Use the same function ID as the listener so we can remove it later |
|
// it using the ID of the original listener. |
|
|
|
|
|
wrapper.guid = listener.guid; |
|
listen(target, 'one', type, wrapper); |
|
} |
|
}, |
|
|
|
/** |
|
* Removes listener(s) from event(s) on an evented object. |
|
* |
|
* @param {string|Array|Element|Object} [targetOrType] |
|
* If this is a string or array, it represents the event type(s). |
|
* |
|
* Another evented object can be passed here instead, in which case |
|
* ALL 3 arguments are _required_. |
|
* |
|
* @param {string|Array|Function} [typeOrListener] |
|
* If the first argument was a string or array, this may be the |
|
* listener function. Otherwise, this is a string or array of event |
|
* type(s). |
|
* |
|
* @param {Function} [listener] |
|
* If the first argument was another evented object, this will be |
|
* the listener function; otherwise, _all_ listeners bound to the |
|
* event type(s) will be removed. |
|
*/ |
|
off: function off$$1(targetOrType, typeOrListener, listener) { |
|
// Targeting this evented object. |
|
if (!targetOrType || isValidEventType(targetOrType)) { |
|
off(this.eventBusEl_, targetOrType, typeOrListener); // Targeting another evented object. |
|
} else { |
|
var target = targetOrType; |
|
var type = typeOrListener; // Fail fast and in a meaningful way! |
|
|
|
validateTarget(target); |
|
validateEventType(type); |
|
validateListener(listener); // Ensure there's at least a guid, even if the function hasn't been used |
|
|
|
listener = bind(this, listener); // Remove the dispose listener on this evented object, which was given |
|
// the same guid as the event listener in on(). |
|
|
|
this.off('dispose', listener); |
|
|
|
if (target.nodeName) { |
|
off(target, type, listener); |
|
off(target, 'dispose', listener); |
|
} else if (isEvented(target)) { |
|
target.off(type, listener); |
|
target.off('dispose', listener); |
|
} |
|
} |
|
}, |
|
|
|
/** |
|
* Fire an event on this evented object, causing its listeners to be called. |
|
* |
|
* @param {string|Object} event |
|
* An event type or an object with a type property. |
|
* |
|
* @param {Object} [hash] |
|
* An additional object to pass along to listeners. |
|
* |
|
* @return {boolean} |
|
* Whether or not the default behavior was prevented. |
|
*/ |
|
trigger: function trigger$$1(event, hash) { |
|
return trigger(this.eventBusEl_, event, hash); |
|
} |
|
}; |
|
/** |
|
* Applies {@link module:evented~EventedMixin|EventedMixin} to a target object. |
|
* |
|
* @param {Object} target |
|
* The object to which to add event methods. |
|
* |
|
* @param {Object} [options={}] |
|
* Options for customizing the mixin behavior. |
|
* |
|
* @param {string} [options.eventBusKey] |
|
* By default, adds a `eventBusEl_` DOM element to the target object, |
|
* which is used as an event bus. If the target object already has a |
|
* DOM element that should be used, pass its key here. |
|
* |
|
* @return {Object} |
|
* The target object. |
|
*/ |
|
|
|
function evented(target, options) { |
|
if (options === void 0) { |
|
options = {}; |
|
} |
|
|
|
var _options = options, |
|
eventBusKey = _options.eventBusKey; // Set or create the eventBusEl_. |
|
|
|
if (eventBusKey) { |
|
if (!target[eventBusKey].nodeName) { |
|
throw new Error("The eventBusKey \"" + eventBusKey + "\" does not refer to an element."); |
|
} |
|
|
|
target.eventBusEl_ = target[eventBusKey]; |
|
} else { |
|
target.eventBusEl_ = createEl('span', { |
|
className: 'vjs-event-bus' |
|
}); |
|
} |
|
|
|
assign(target, EventedMixin); |
|
|
|
if (target.eventedCallbacks) { |
|
target.eventedCallbacks.forEach(function (callback) { |
|
callback(); |
|
}); |
|
} // When any evented object is disposed, it removes all its listeners. |
|
|
|
|
|
target.on('dispose', function () { |
|
target.off(); |
|
window$1.setTimeout(function () { |
|
target.eventBusEl_ = null; |
|
}, 0); |
|
}); |
|
return target; |
|
} |
|
|
|
/** |
|
* @file mixins/stateful.js |
|
* @module stateful |
|
*/ |
|
/** |
|
* Contains methods that provide statefulness to an object which is passed |
|
* to {@link module:stateful}. |
|
* |
|
* @mixin StatefulMixin |
|
*/ |
|
|
|
var StatefulMixin = { |
|
/** |
|
* A hash containing arbitrary keys and values representing the state of |
|
* the object. |
|
* |
|
* @type {Object} |
|
*/ |
|
state: {}, |
|
|
|
/** |
|
* Set the state of an object by mutating its |
|
* {@link module:stateful~StatefulMixin.state|state} object in place. |
|
* |
|
* @fires module:stateful~StatefulMixin#statechanged |
|
* @param {Object|Function} stateUpdates |
|
* A new set of properties to shallow-merge into the plugin state. |
|
* Can be a plain object or a function returning a plain object. |
|
* |
|
* @return {Object|undefined} |
|
* An object containing changes that occurred. If no changes |
|
* occurred, returns `undefined`. |
|
*/ |
|
setState: function setState(stateUpdates) { |
|
var _this = this; |
|
|
|
// Support providing the `stateUpdates` state as a function. |
|
if (typeof stateUpdates === 'function') { |
|
stateUpdates = stateUpdates(); |
|
} |
|
|
|
var changes; |
|
each(stateUpdates, function (value, key) { |
|
// Record the change if the value is different from what's in the |
|
// current state. |
|
if (_this.state[key] !== value) { |
|
changes = changes || {}; |
|
changes[key] = { |
|
from: _this.state[key], |
|
to: value |
|
}; |
|
} |
|
|
|
_this.state[key] = value; |
|
}); // Only trigger "statechange" if there were changes AND we have a trigger |
|
// function. This allows us to not require that the target object be an |
|
// evented object. |
|
|
|
if (changes && isEvented(this)) { |
|
/** |
|
* An event triggered on an object that is both |
|
* {@link module:stateful|stateful} and {@link module:evented|evented} |
|
* indicating that its state has changed. |
|
* |
|
* @event module:stateful~StatefulMixin#statechanged |
|
* @type {Object} |
|
* @property {Object} changes |
|
* A hash containing the properties that were changed and |
|
* the values they were changed `from` and `to`. |
|
*/ |
|
this.trigger({ |
|
changes: changes, |
|
type: 'statechanged' |
|
}); |
|
} |
|
|
|
return changes; |
|
} |
|
}; |
|
/** |
|
* Applies {@link module:stateful~StatefulMixin|StatefulMixin} to a target |
|
* object. |
|
* |
|
* If the target object is {@link module:evented|evented} and has a |
|
* `handleStateChanged` method, that method will be automatically bound to the |
|
* `statechanged` event on itself. |
|
* |
|
* @param {Object} target |
|
* The object to be made stateful. |
|
* |
|
* @param {Object} [defaultState] |
|
* A default set of properties to populate the newly-stateful object's |
|
* `state` property. |
|
* |
|
* @return {Object} |
|
* Returns the `target`. |
|
*/ |
|
|
|
function stateful(target, defaultState) { |
|
assign(target, StatefulMixin); // This happens after the mixing-in because we need to replace the `state` |
|
// added in that step. |
|
|
|
target.state = assign({}, target.state, defaultState); // Auto-bind the `handleStateChanged` method of the target object if it exists. |
|
|
|
if (typeof target.handleStateChanged === 'function' && isEvented(target)) { |
|
target.on('statechanged', target.handleStateChanged); |
|
} |
|
|
|
return target; |
|
} |
|
|
|
/** |
|
* @file to-title-case.js |
|
* @module to-title-case |
|
*/ |
|
|
|
/** |
|
* Uppercase the first letter of a string. |
|
* |
|
* @param {string} string |
|
* String to be uppercased |
|
* |
|
* @return {string} |
|
* The string with an uppercased first letter |
|
*/ |
|
function toTitleCase(string) { |
|
if (typeof string !== 'string') { |
|
return string; |
|
} |
|
|
|
return string.charAt(0).toUpperCase() + string.slice(1); |
|
} |
|
/** |
|
* Compares the TitleCase versions of the two strings for equality. |
|
* |
|
* @param {string} str1 |
|
* The first string to compare |
|
* |
|
* @param {string} str2 |
|
* The second string to compare |
|
* |
|
* @return {boolean} |
|
* Whether the TitleCase versions of the strings are equal |
|
*/ |
|
|
|
function titleCaseEquals(str1, str2) { |
|
return toTitleCase(str1) === toTitleCase(str2); |
|
} |
|
|
|
/** |
|
* @file merge-options.js |
|
* @module merge-options |
|
*/ |
|
/** |
|
* Merge two objects recursively. |
|
* |
|
* Performs a deep merge like |
|
* {@link https://lodash.com/docs/4.17.10#merge|lodash.merge}, but only merges |
|
* plain objects (not arrays, elements, or anything else). |
|
* |
|
* Non-plain object values will be copied directly from the right-most |
|
* argument. |
|
* |
|
* @static |
|
* @param {Object[]} sources |
|
* One or more objects to merge into a new object. |
|
* |
|
* @return {Object} |
|
* A new object that is the merged result of all sources. |
|
*/ |
|
|
|
function mergeOptions() { |
|
var result = {}; |
|
|
|
for (var _len = arguments.length, sources = new Array(_len), _key = 0; _key < _len; _key++) { |
|
sources[_key] = arguments[_key]; |
|
} |
|
|
|
sources.forEach(function (source) { |
|
if (!source) { |
|
return; |
|
} |
|
|
|
each(source, function (value, key) { |
|
if (!isPlain(value)) { |
|
result[key] = value; |
|
return; |
|
} |
|
|
|
if (!isPlain(result[key])) { |
|
result[key] = {}; |
|
} |
|
|
|
result[key] = mergeOptions(result[key], value); |
|
}); |
|
}); |
|
return result; |
|
} |
|
|
|
/** |
|
* Player Component - Base class for all UI objects |
|
* |
|
* @file component.js |
|
*/ |
|
/** |
|
* Base class for all UI Components. |
|
* Components are UI objects which represent both a javascript object and an element |
|
* in the DOM. They can be children of other components, and can have |
|
* children themselves. |
|
* |
|
* Components can also use methods from {@link EventTarget} |
|
*/ |
|
|
|
var Component = |
|
/*#__PURE__*/ |
|
function () { |
|
/** |
|
* A callback that is called when a component is ready. Does not have any |
|
* paramters and any callback value will be ignored. |
|
* |
|
* @callback Component~ReadyCallback |
|
* @this Component |
|
*/ |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
* |
|
* @param {Object[]} [options.children] |
|
* An array of children objects to intialize this component with. Children objects have |
|
* a name property that will be used if more than one component of the same type needs to be |
|
* added. |
|
* |
|
* @param {Component~ReadyCallback} [ready] |
|
* Function that gets called when the `Component` is ready. |
|
*/ |
|
function Component(player, options, ready) { |
|
// The component might be the player itself and we can't pass `this` to super |
|
if (!player && this.play) { |
|
this.player_ = player = this; // eslint-disable-line |
|
} else { |
|
this.player_ = player; |
|
} // Hold the reference to the parent component via `addChild` method |
|
|
|
|
|
this.parentComponent_ = null; // Make a copy of prototype.options_ to protect against overriding defaults |
|
|
|
this.options_ = mergeOptions({}, this.options_); // Updated options with supplied options |
|
|
|
options = this.options_ = mergeOptions(this.options_, options); // Get ID from options or options element if one is supplied |
|
|
|
this.id_ = options.id || options.el && options.el.id; // If there was no ID from the options, generate one |
|
|
|
if (!this.id_) { |
|
// Don't require the player ID function in the case of mock players |
|
var id = player && player.id && player.id() || 'no_player'; |
|
this.id_ = id + "_component_" + newGUID(); |
|
} |
|
|
|
this.name_ = options.name || null; // Create element if one wasn't provided in options |
|
|
|
if (options.el) { |
|
this.el_ = options.el; |
|
} else if (options.createEl !== false) { |
|
this.el_ = this.createEl(); |
|
} // if evented is anything except false, we want to mixin in evented |
|
|
|
|
|
if (options.evented !== false) { |
|
// Make this an evented object and use `el_`, if available, as its event bus |
|
evented(this, { |
|
eventBusKey: this.el_ ? 'el_' : null |
|
}); |
|
} |
|
|
|
stateful(this, this.constructor.defaultState); |
|
this.children_ = []; |
|
this.childIndex_ = {}; |
|
this.childNameIndex_ = {}; // Add any child components in options |
|
|
|
if (options.initChildren !== false) { |
|
this.initChildren(); |
|
} |
|
|
|
this.ready(ready); // Don't want to trigger ready here or it will before init is actually |
|
// finished for all children that run this constructor |
|
|
|
if (options.reportTouchActivity !== false) { |
|
this.enableTouchActivity(); |
|
} |
|
} |
|
/** |
|
* Dispose of the `Component` and all child components. |
|
* |
|
* @fires Component#dispose |
|
*/ |
|
|
|
|
|
var _proto = Component.prototype; |
|
|
|
_proto.dispose = function dispose() { |
|
/** |
|
* Triggered when a `Component` is disposed. |
|
* |
|
* @event Component#dispose |
|
* @type {EventTarget~Event} |
|
* |
|
* @property {boolean} [bubbles=false] |
|
* set to false so that the close event does not |
|
* bubble up |
|
*/ |
|
this.trigger({ |
|
type: 'dispose', |
|
bubbles: false |
|
}); // Dispose all children. |
|
|
|
if (this.children_) { |
|
for (var i = this.children_.length - 1; i >= 0; i--) { |
|
if (this.children_[i].dispose) { |
|
this.children_[i].dispose(); |
|
} |
|
} |
|
} // Delete child references |
|
|
|
|
|
this.children_ = null; |
|
this.childIndex_ = null; |
|
this.childNameIndex_ = null; |
|
this.parentComponent_ = null; |
|
|
|
if (this.el_) { |
|
// Remove element from DOM |
|
if (this.el_.parentNode) { |
|
this.el_.parentNode.removeChild(this.el_); |
|
} |
|
|
|
removeData(this.el_); |
|
this.el_ = null; |
|
} // remove reference to the player after disposing of the element |
|
|
|
|
|
this.player_ = null; |
|
} |
|
/** |
|
* Return the {@link Player} that the `Component` has attached to. |
|
* |
|
* @return {Player} |
|
* The player that this `Component` has attached to. |
|
*/ |
|
; |
|
|
|
_proto.player = function player() { |
|
return this.player_; |
|
} |
|
/** |
|
* Deep merge of options objects with new options. |
|
* > Note: When both `obj` and `options` contain properties whose values are objects. |
|
* The two properties get merged using {@link module:mergeOptions} |
|
* |
|
* @param {Object} obj |
|
* The object that contains new options. |
|
* |
|
* @return {Object} |
|
* A new object of `this.options_` and `obj` merged together. |
|
* |
|
* @deprecated since version 5 |
|
*/ |
|
; |
|
|
|
_proto.options = function options(obj) { |
|
log.warn('this.options() has been deprecated and will be moved to the constructor in 6.0'); |
|
|
|
if (!obj) { |
|
return this.options_; |
|
} |
|
|
|
this.options_ = mergeOptions(this.options_, obj); |
|
return this.options_; |
|
} |
|
/** |
|
* Get the `Component`s DOM element |
|
* |
|
* @return {Element} |
|
* The DOM element for this `Component`. |
|
*/ |
|
; |
|
|
|
_proto.el = function el() { |
|
return this.el_; |
|
} |
|
/** |
|
* Create the `Component`s DOM element. |
|
* |
|
* @param {string} [tagName] |
|
* Element's DOM node type. e.g. 'div' |
|
* |
|
* @param {Object} [properties] |
|
* An object of properties that should be set. |
|
* |
|
* @param {Object} [attributes] |
|
* An object of attributes that should be set. |
|
* |
|
* @return {Element} |
|
* The element that gets created. |
|
*/ |
|
; |
|
|
|
_proto.createEl = function createEl$$1(tagName, properties, attributes) { |
|
return createEl(tagName, properties, attributes); |
|
} |
|
/** |
|
* Localize a string given the string in english. |
|
* |
|
* If tokens are provided, it'll try and run a simple token replacement on the provided string. |
|
* The tokens it looks for look like `{1}` with the index being 1-indexed into the tokens array. |
|
* |
|
* If a `defaultValue` is provided, it'll use that over `string`, |
|
* if a value isn't found in provided language files. |
|
* This is useful if you want to have a descriptive key for token replacement |
|
* but have a succinct localized string and not require `en.json` to be included. |
|
* |
|
* Currently, it is used for the progress bar timing. |
|
* ```js |
|
* { |
|
* "progress bar timing: currentTime={1} duration={2}": "{1} of {2}" |
|
* } |
|
* ``` |
|
* It is then used like so: |
|
* ```js |
|
* this.localize('progress bar timing: currentTime={1} duration{2}', |
|
* [this.player_.currentTime(), this.player_.duration()], |
|
* '{1} of {2}'); |
|
* ``` |
|
* |
|
* Which outputs something like: `01:23 of 24:56`. |
|
* |
|
* |
|
* @param {string} string |
|
* The string to localize and the key to lookup in the language files. |
|
* @param {string[]} [tokens] |
|
* If the current item has token replacements, provide the tokens here. |
|
* @param {string} [defaultValue] |
|
* Defaults to `string`. Can be a default value to use for token replacement |
|
* if the lookup key is needed to be separate. |
|
* |
|
* @return {string} |
|
* The localized string or if no localization exists the english string. |
|
*/ |
|
; |
|
|
|
_proto.localize = function localize(string, tokens, defaultValue) { |
|
if (defaultValue === void 0) { |
|
defaultValue = string; |
|
} |
|
|
|
var code = this.player_.language && this.player_.language(); |
|
var languages = this.player_.languages && this.player_.languages(); |
|
var language = languages && languages[code]; |
|
var primaryCode = code && code.split('-')[0]; |
|
var primaryLang = languages && languages[primaryCode]; |
|
var localizedString = defaultValue; |
|
|
|
if (language && language[string]) { |
|
localizedString = language[string]; |
|
} else if (primaryLang && primaryLang[string]) { |
|
localizedString = primaryLang[string]; |
|
} |
|
|
|
if (tokens) { |
|
localizedString = localizedString.replace(/\{(\d+)\}/g, function (match, index) { |
|
var value = tokens[index - 1]; |
|
var ret = value; |
|
|
|
if (typeof value === 'undefined') { |
|
ret = match; |
|
} |
|
|
|
return ret; |
|
}); |
|
} |
|
|
|
return localizedString; |
|
} |
|
/** |
|
* Return the `Component`s DOM element. This is where children get inserted. |
|
* This will usually be the the same as the element returned in {@link Component#el}. |
|
* |
|
* @return {Element} |
|
* The content element for this `Component`. |
|
*/ |
|
; |
|
|
|
_proto.contentEl = function contentEl() { |
|
return this.contentEl_ || this.el_; |
|
} |
|
/** |
|
* Get this `Component`s ID |
|
* |
|
* @return {string} |
|
* The id of this `Component` |
|
*/ |
|
; |
|
|
|
_proto.id = function id() { |
|
return this.id_; |
|
} |
|
/** |
|
* Get the `Component`s name. The name gets used to reference the `Component` |
|
* and is set during registration. |
|
* |
|
* @return {string} |
|
* The name of this `Component`. |
|
*/ |
|
; |
|
|
|
_proto.name = function name() { |
|
return this.name_; |
|
} |
|
/** |
|
* Get an array of all child components |
|
* |
|
* @return {Array} |
|
* The children |
|
*/ |
|
; |
|
|
|
_proto.children = function children() { |
|
return this.children_; |
|
} |
|
/** |
|
* Returns the child `Component` with the given `id`. |
|
* |
|
* @param {string} id |
|
* The id of the child `Component` to get. |
|
* |
|
* @return {Component|undefined} |
|
* The child `Component` with the given `id` or undefined. |
|
*/ |
|
; |
|
|
|
_proto.getChildById = function getChildById(id) { |
|
return this.childIndex_[id]; |
|
} |
|
/** |
|
* Returns the child `Component` with the given `name`. |
|
* |
|
* @param {string} name |
|
* The name of the child `Component` to get. |
|
* |
|
* @return {Component|undefined} |
|
* The child `Component` with the given `name` or undefined. |
|
*/ |
|
; |
|
|
|
_proto.getChild = function getChild(name) { |
|
if (!name) { |
|
return; |
|
} |
|
|
|
name = toTitleCase(name); |
|
return this.childNameIndex_[name]; |
|
} |
|
/** |
|
* Add a child `Component` inside the current `Component`. |
|
* |
|
* |
|
* @param {string|Component} child |
|
* The name or instance of a child to add. |
|
* |
|
* @param {Object} [options={}] |
|
* The key/value store of options that will get passed to children of |
|
* the child. |
|
* |
|
* @param {number} [index=this.children_.length] |
|
* The index to attempt to add a child into. |
|
* |
|
* @return {Component} |
|
* The `Component` that gets added as a child. When using a string the |
|
* `Component` will get created by this process. |
|
*/ |
|
; |
|
|
|
_proto.addChild = function addChild(child, options, index) { |
|
if (options === void 0) { |
|
options = {}; |
|
} |
|
|
|
if (index === void 0) { |
|
index = this.children_.length; |
|
} |
|
|
|
var component; |
|
var componentName; // If child is a string, create component with options |
|
|
|
if (typeof child === 'string') { |
|
componentName = toTitleCase(child); |
|
var componentClassName = options.componentClass || componentName; // Set name through options |
|
|
|
options.name = componentName; // Create a new object & element for this controls set |
|
// If there's no .player_, this is a player |
|
|
|
var ComponentClass = Component.getComponent(componentClassName); |
|
|
|
if (!ComponentClass) { |
|
throw new Error("Component " + componentClassName + " does not exist"); |
|
} // data stored directly on the videojs object may be |
|
// misidentified as a component to retain |
|
// backwards-compatibility with 4.x. check to make sure the |
|
// component class can be instantiated. |
|
|
|
|
|
if (typeof ComponentClass !== 'function') { |
|
return null; |
|
} |
|
|
|
component = new ComponentClass(this.player_ || this, options); // child is a component instance |
|
} else { |
|
component = child; |
|
} |
|
|
|
if (component.parentComponent_) { |
|
component.parentComponent_.removeChild(component); |
|
} |
|
|
|
this.children_.splice(index, 0, component); |
|
component.parentComponent_ = this; |
|
|
|
if (typeof component.id === 'function') { |
|
this.childIndex_[component.id()] = component; |
|
} // If a name wasn't used to create the component, check if we can use the |
|
// name function of the component |
|
|
|
|
|
componentName = componentName || component.name && toTitleCase(component.name()); |
|
|
|
if (componentName) { |
|
this.childNameIndex_[componentName] = component; |
|
} // Add the UI object's element to the container div (box) |
|
// Having an element is not required |
|
|
|
|
|
if (typeof component.el === 'function' && component.el()) { |
|
var childNodes = this.contentEl().children; |
|
var refNode = childNodes[index] || null; |
|
this.contentEl().insertBefore(component.el(), refNode); |
|
} // Return so it can stored on parent object if desired. |
|
|
|
|
|
return component; |
|
} |
|
/** |
|
* Remove a child `Component` from this `Component`s list of children. Also removes |
|
* the child `Component`s element from this `Component`s element. |
|
* |
|
* @param {Component} component |
|
* The child `Component` to remove. |
|
*/ |
|
; |
|
|
|
_proto.removeChild = function removeChild(component) { |
|
if (typeof component === 'string') { |
|
component = this.getChild(component); |
|
} |
|
|
|
if (!component || !this.children_) { |
|
return; |
|
} |
|
|
|
var childFound = false; |
|
|
|
for (var i = this.children_.length - 1; i >= 0; i--) { |
|
if (this.children_[i] === component) { |
|
childFound = true; |
|
this.children_.splice(i, 1); |
|
break; |
|
} |
|
} |
|
|
|
if (!childFound) { |
|
return; |
|
} |
|
|
|
component.parentComponent_ = null; |
|
this.childIndex_[component.id()] = null; |
|
this.childNameIndex_[component.name()] = null; |
|
var compEl = component.el(); |
|
|
|
if (compEl && compEl.parentNode === this.contentEl()) { |
|
this.contentEl().removeChild(component.el()); |
|
} |
|
} |
|
/** |
|
* Add and initialize default child `Component`s based upon options. |
|
*/ |
|
; |
|
|
|
_proto.initChildren = function initChildren() { |
|
var _this = this; |
|
|
|
var children = this.options_.children; |
|
|
|
if (children) { |
|
// `this` is `parent` |
|
var parentOptions = this.options_; |
|
|
|
var handleAdd = function handleAdd(child) { |
|
var name = child.name; |
|
var opts = child.opts; // Allow options for children to be set at the parent options |
|
// e.g. videojs(id, { controlBar: false }); |
|
// instead of videojs(id, { children: { controlBar: false }); |
|
|
|
if (parentOptions[name] !== undefined) { |
|
opts = parentOptions[name]; |
|
} // Allow for disabling default components |
|
// e.g. options['children']['posterImage'] = false |
|
|
|
|
|
if (opts === false) { |
|
return; |
|
} // Allow options to be passed as a simple boolean if no configuration |
|
// is necessary. |
|
|
|
|
|
if (opts === true) { |
|
opts = {}; |
|
} // We also want to pass the original player options |
|
// to each component as well so they don't need to |
|
// reach back into the player for options later. |
|
|
|
|
|
opts.playerOptions = _this.options_.playerOptions; // Create and add the child component. |
|
// Add a direct reference to the child by name on the parent instance. |
|
// If two of the same component are used, different names should be supplied |
|
// for each |
|
|
|
var newChild = _this.addChild(name, opts); |
|
|
|
if (newChild) { |
|
_this[name] = newChild; |
|
} |
|
}; // Allow for an array of children details to passed in the options |
|
|
|
|
|
var workingChildren; |
|
var Tech = Component.getComponent('Tech'); |
|
|
|
if (Array.isArray(children)) { |
|
workingChildren = children; |
|
} else { |
|
workingChildren = Object.keys(children); |
|
} |
|
|
|
workingChildren // children that are in this.options_ but also in workingChildren would |
|
// give us extra children we do not want. So, we want to filter them out. |
|
.concat(Object.keys(this.options_).filter(function (child) { |
|
return !workingChildren.some(function (wchild) { |
|
if (typeof wchild === 'string') { |
|
return child === wchild; |
|
} |
|
|
|
return child === wchild.name; |
|
}); |
|
})).map(function (child) { |
|
var name; |
|
var opts; |
|
|
|
if (typeof child === 'string') { |
|
name = child; |
|
opts = children[name] || _this.options_[name] || {}; |
|
} else { |
|
name = child.name; |
|
opts = child; |
|
} |
|
|
|
return { |
|
name: name, |
|
opts: opts |
|
}; |
|
}).filter(function (child) { |
|
// we have to make sure that child.name isn't in the techOrder since |
|
// techs are registerd as Components but can't aren't compatible |
|
// See https://github.com/videojs/video.js/issues/2772 |
|
var c = Component.getComponent(child.opts.componentClass || toTitleCase(child.name)); |
|
return c && !Tech.isTech(c); |
|
}).forEach(handleAdd); |
|
} |
|
} |
|
/** |
|
* Builds the default DOM class name. Should be overriden by sub-components. |
|
* |
|
* @return {string} |
|
* The DOM class name for this object. |
|
* |
|
* @abstract |
|
*/ |
|
; |
|
|
|
_proto.buildCSSClass = function buildCSSClass() { |
|
// Child classes can include a function that does: |
|
// return 'CLASS NAME' + this._super(); |
|
return ''; |
|
} |
|
/** |
|
* Bind a listener to the component's ready state. |
|
* Different from event listeners in that if the ready event has already happened |
|
* it will trigger the function immediately. |
|
* |
|
* @return {Component} |
|
* Returns itself; method can be chained. |
|
*/ |
|
; |
|
|
|
_proto.ready = function ready(fn, sync) { |
|
if (sync === void 0) { |
|
sync = false; |
|
} |
|
|
|
if (!fn) { |
|
return; |
|
} |
|
|
|
if (!this.isReady_) { |
|
this.readyQueue_ = this.readyQueue_ || []; |
|
this.readyQueue_.push(fn); |
|
return; |
|
} |
|
|
|
if (sync) { |
|
fn.call(this); |
|
} else { |
|
// Call the function asynchronously by default for consistency |
|
this.setTimeout(fn, 1); |
|
} |
|
} |
|
/** |
|
* Trigger all the ready listeners for this `Component`. |
|
* |
|
* @fires Component#ready |
|
*/ |
|
; |
|
|
|
_proto.triggerReady = function triggerReady() { |
|
this.isReady_ = true; // Ensure ready is triggered asynchronously |
|
|
|
this.setTimeout(function () { |
|
var readyQueue = this.readyQueue_; // Reset Ready Queue |
|
|
|
this.readyQueue_ = []; |
|
|
|
if (readyQueue && readyQueue.length > 0) { |
|
readyQueue.forEach(function (fn) { |
|
fn.call(this); |
|
}, this); |
|
} // Allow for using event listeners also |
|
|
|
/** |
|
* Triggered when a `Component` is ready. |
|
* |
|
* @event Component#ready |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
|
|
this.trigger('ready'); |
|
}, 1); |
|
} |
|
/** |
|
* Find a single DOM element matching a `selector`. This can be within the `Component`s |
|
* `contentEl()` or another custom context. |
|
* |
|
* @param {string} selector |
|
* A valid CSS selector, which will be passed to `querySelector`. |
|
* |
|
* @param {Element|string} [context=this.contentEl()] |
|
* A DOM element within which to query. Can also be a selector string in |
|
* which case the first matching element will get used as context. If |
|
* missing `this.contentEl()` gets used. If `this.contentEl()` returns |
|
* nothing it falls back to `document`. |
|
* |
|
* @return {Element|null} |
|
* the dom element that was found, or null |
|
* |
|
* @see [Information on CSS Selectors](https://developer.mozilla.org/en-US/docs/Web/Guide/CSS/Getting_Started/Selectors) |
|
*/ |
|
; |
|
|
|
_proto.$ = function $$$1(selector, context) { |
|
return $(selector, context || this.contentEl()); |
|
} |
|
/** |
|
* Finds all DOM element matching a `selector`. This can be within the `Component`s |
|
* `contentEl()` or another custom context. |
|
* |
|
* @param {string} selector |
|
* A valid CSS selector, which will be passed to `querySelectorAll`. |
|
* |
|
* @param {Element|string} [context=this.contentEl()] |
|
* A DOM element within which to query. Can also be a selector string in |
|
* which case the first matching element will get used as context. If |
|
* missing `this.contentEl()` gets used. If `this.contentEl()` returns |
|
* nothing it falls back to `document`. |
|
* |
|
* @return {NodeList} |
|
* a list of dom elements that were found |
|
* |
|
* @see [Information on CSS Selectors](https://developer.mozilla.org/en-US/docs/Web/Guide/CSS/Getting_Started/Selectors) |
|
*/ |
|
; |
|
|
|
_proto.$$ = function $$$$1(selector, context) { |
|
return $$(selector, context || this.contentEl()); |
|
} |
|
/** |
|
* Check if a component's element has a CSS class name. |
|
* |
|
* @param {string} classToCheck |
|
* CSS class name to check. |
|
* |
|
* @return {boolean} |
|
* - True if the `Component` has the class. |
|
* - False if the `Component` does not have the class` |
|
*/ |
|
; |
|
|
|
_proto.hasClass = function hasClass$$1(classToCheck) { |
|
return hasClass(this.el_, classToCheck); |
|
} |
|
/** |
|
* Add a CSS class name to the `Component`s element. |
|
* |
|
* @param {string} classToAdd |
|
* CSS class name to add |
|
*/ |
|
; |
|
|
|
_proto.addClass = function addClass$$1(classToAdd) { |
|
addClass(this.el_, classToAdd); |
|
} |
|
/** |
|
* Remove a CSS class name from the `Component`s element. |
|
* |
|
* @param {string} classToRemove |
|
* CSS class name to remove |
|
*/ |
|
; |
|
|
|
_proto.removeClass = function removeClass$$1(classToRemove) { |
|
removeClass(this.el_, classToRemove); |
|
} |
|
/** |
|
* Add or remove a CSS class name from the component's element. |
|
* - `classToToggle` gets added when {@link Component#hasClass} would return false. |
|
* - `classToToggle` gets removed when {@link Component#hasClass} would return true. |
|
* |
|
* @param {string} classToToggle |
|
* The class to add or remove based on (@link Component#hasClass} |
|
* |
|
* @param {boolean|Dom~predicate} [predicate] |
|
* An {@link Dom~predicate} function or a boolean |
|
*/ |
|
; |
|
|
|
_proto.toggleClass = function toggleClass$$1(classToToggle, predicate) { |
|
toggleClass(this.el_, classToToggle, predicate); |
|
} |
|
/** |
|
* Show the `Component`s element if it is hidden by removing the |
|
* 'vjs-hidden' class name from it. |
|
*/ |
|
; |
|
|
|
_proto.show = function show() { |
|
this.removeClass('vjs-hidden'); |
|
} |
|
/** |
|
* Hide the `Component`s element if it is currently showing by adding the |
|
* 'vjs-hidden` class name to it. |
|
*/ |
|
; |
|
|
|
_proto.hide = function hide() { |
|
this.addClass('vjs-hidden'); |
|
} |
|
/** |
|
* Lock a `Component`s element in its visible state by adding the 'vjs-lock-showing' |
|
* class name to it. Used during fadeIn/fadeOut. |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.lockShowing = function lockShowing() { |
|
this.addClass('vjs-lock-showing'); |
|
} |
|
/** |
|
* Unlock a `Component`s element from its visible state by removing the 'vjs-lock-showing' |
|
* class name from it. Used during fadeIn/fadeOut. |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.unlockShowing = function unlockShowing() { |
|
this.removeClass('vjs-lock-showing'); |
|
} |
|
/** |
|
* Get the value of an attribute on the `Component`s element. |
|
* |
|
* @param {string} attribute |
|
* Name of the attribute to get the value from. |
|
* |
|
* @return {string|null} |
|
* - The value of the attribute that was asked for. |
|
* - Can be an empty string on some browsers if the attribute does not exist |
|
* or has no value |
|
* - Most browsers will return null if the attibute does not exist or has |
|
* no value. |
|
* |
|
* @see [DOM API]{@link https://developer.mozilla.org/en-US/docs/Web/API/Element/getAttribute} |
|
*/ |
|
; |
|
|
|
_proto.getAttribute = function getAttribute$$1(attribute) { |
|
return getAttribute(this.el_, attribute); |
|
} |
|
/** |
|
* Set the value of an attribute on the `Component`'s element |
|
* |
|
* @param {string} attribute |
|
* Name of the attribute to set. |
|
* |
|
* @param {string} value |
|
* Value to set the attribute to. |
|
* |
|
* @see [DOM API]{@link https://developer.mozilla.org/en-US/docs/Web/API/Element/setAttribute} |
|
*/ |
|
; |
|
|
|
_proto.setAttribute = function setAttribute$$1(attribute, value) { |
|
setAttribute(this.el_, attribute, value); |
|
} |
|
/** |
|
* Remove an attribute from the `Component`s element. |
|
* |
|
* @param {string} attribute |
|
* Name of the attribute to remove. |
|
* |
|
* @see [DOM API]{@link https://developer.mozilla.org/en-US/docs/Web/API/Element/removeAttribute} |
|
*/ |
|
; |
|
|
|
_proto.removeAttribute = function removeAttribute$$1(attribute) { |
|
removeAttribute(this.el_, attribute); |
|
} |
|
/** |
|
* Get or set the width of the component based upon the CSS styles. |
|
* See {@link Component#dimension} for more detailed information. |
|
* |
|
* @param {number|string} [num] |
|
* The width that you want to set postfixed with '%', 'px' or nothing. |
|
* |
|
* @param {boolean} [skipListeners] |
|
* Skip the componentresize event trigger |
|
* |
|
* @return {number|string} |
|
* The width when getting, zero if there is no width. Can be a string |
|
* postpixed with '%' or 'px'. |
|
*/ |
|
; |
|
|
|
_proto.width = function width(num, skipListeners) { |
|
return this.dimension('width', num, skipListeners); |
|
} |
|
/** |
|
* Get or set the height of the component based upon the CSS styles. |
|
* See {@link Component#dimension} for more detailed information. |
|
* |
|
* @param {number|string} [num] |
|
* The height that you want to set postfixed with '%', 'px' or nothing. |
|
* |
|
* @param {boolean} [skipListeners] |
|
* Skip the componentresize event trigger |
|
* |
|
* @return {number|string} |
|
* The width when getting, zero if there is no width. Can be a string |
|
* postpixed with '%' or 'px'. |
|
*/ |
|
; |
|
|
|
_proto.height = function height(num, skipListeners) { |
|
return this.dimension('height', num, skipListeners); |
|
} |
|
/** |
|
* Set both the width and height of the `Component` element at the same time. |
|
* |
|
* @param {number|string} width |
|
* Width to set the `Component`s element to. |
|
* |
|
* @param {number|string} height |
|
* Height to set the `Component`s element to. |
|
*/ |
|
; |
|
|
|
_proto.dimensions = function dimensions(width, height) { |
|
// Skip componentresize listeners on width for optimization |
|
this.width(width, true); |
|
this.height(height); |
|
} |
|
/** |
|
* Get or set width or height of the `Component` element. This is the shared code |
|
* for the {@link Component#width} and {@link Component#height}. |
|
* |
|
* Things to know: |
|
* - If the width or height in an number this will return the number postfixed with 'px'. |
|
* - If the width/height is a percent this will return the percent postfixed with '%' |
|
* - Hidden elements have a width of 0 with `window.getComputedStyle`. This function |
|
* defaults to the `Component`s `style.width` and falls back to `window.getComputedStyle`. |
|
* See [this]{@link http://www.foliotek.com/devblog/getting-the-width-of-a-hidden-element-with-jquery-using-width/} |
|
* for more information |
|
* - If you want the computed style of the component, use {@link Component#currentWidth} |
|
* and {@link {Component#currentHeight} |
|
* |
|
* @fires Component#componentresize |
|
* |
|
* @param {string} widthOrHeight |
|
8 'width' or 'height' |
|
* |
|
* @param {number|string} [num] |
|
8 New dimension |
|
* |
|
* @param {boolean} [skipListeners] |
|
* Skip componentresize event trigger |
|
* |
|
* @return {number} |
|
* The dimension when getting or 0 if unset |
|
*/ |
|
; |
|
|
|
_proto.dimension = function dimension(widthOrHeight, num, skipListeners) { |
|
if (num !== undefined) { |
|
// Set to zero if null or literally NaN (NaN !== NaN) |
|
if (num === null || num !== num) { |
|
num = 0; |
|
} // Check if using css width/height (% or px) and adjust |
|
|
|
|
|
if (('' + num).indexOf('%') !== -1 || ('' + num).indexOf('px') !== -1) { |
|
this.el_.style[widthOrHeight] = num; |
|
} else if (num === 'auto') { |
|
this.el_.style[widthOrHeight] = ''; |
|
} else { |
|
this.el_.style[widthOrHeight] = num + 'px'; |
|
} // skipListeners allows us to avoid triggering the resize event when setting both width and height |
|
|
|
|
|
if (!skipListeners) { |
|
/** |
|
* Triggered when a component is resized. |
|
* |
|
* @event Component#componentresize |
|
* @type {EventTarget~Event} |
|
*/ |
|
this.trigger('componentresize'); |
|
} |
|
|
|
return; |
|
} // Not setting a value, so getting it |
|
// Make sure element exists |
|
|
|
|
|
if (!this.el_) { |
|
return 0; |
|
} // Get dimension value from style |
|
|
|
|
|
var val = this.el_.style[widthOrHeight]; |
|
var pxIndex = val.indexOf('px'); |
|
|
|
if (pxIndex !== -1) { |
|
// Return the pixel value with no 'px' |
|
return parseInt(val.slice(0, pxIndex), 10); |
|
} // No px so using % or no style was set, so falling back to offsetWidth/height |
|
// If component has display:none, offset will return 0 |
|
// TODO: handle display:none and no dimension style using px |
|
|
|
|
|
return parseInt(this.el_['offset' + toTitleCase(widthOrHeight)], 10); |
|
} |
|
/** |
|
* Get the computed width or the height of the component's element. |
|
* |
|
* Uses `window.getComputedStyle`. |
|
* |
|
* @param {string} widthOrHeight |
|
* A string containing 'width' or 'height'. Whichever one you want to get. |
|
* |
|
* @return {number} |
|
* The dimension that gets asked for or 0 if nothing was set |
|
* for that dimension. |
|
*/ |
|
; |
|
|
|
_proto.currentDimension = function currentDimension(widthOrHeight) { |
|
var computedWidthOrHeight = 0; |
|
|
|
if (widthOrHeight !== 'width' && widthOrHeight !== 'height') { |
|
throw new Error('currentDimension only accepts width or height value'); |
|
} |
|
|
|
if (typeof window$1.getComputedStyle === 'function') { |
|
var computedStyle = window$1.getComputedStyle(this.el_); |
|
computedWidthOrHeight = computedStyle.getPropertyValue(widthOrHeight) || computedStyle[widthOrHeight]; |
|
} // remove 'px' from variable and parse as integer |
|
|
|
|
|
computedWidthOrHeight = parseFloat(computedWidthOrHeight); // if the computed value is still 0, it's possible that the browser is lying |
|
// and we want to check the offset values. |
|
// This code also runs wherever getComputedStyle doesn't exist. |
|
|
|
if (computedWidthOrHeight === 0) { |
|
var rule = "offset" + toTitleCase(widthOrHeight); |
|
computedWidthOrHeight = this.el_[rule]; |
|
} |
|
|
|
return computedWidthOrHeight; |
|
} |
|
/** |
|
* An object that contains width and height values of the `Component`s |
|
* computed style. Uses `window.getComputedStyle`. |
|
* |
|
* @typedef {Object} Component~DimensionObject |
|
* |
|
* @property {number} width |
|
* The width of the `Component`s computed style. |
|
* |
|
* @property {number} height |
|
* The height of the `Component`s computed style. |
|
*/ |
|
|
|
/** |
|
* Get an object that contains computed width and height values of the |
|
* component's element. |
|
* |
|
* Uses `window.getComputedStyle`. |
|
* |
|
* @return {Component~DimensionObject} |
|
* The computed dimensions of the component's element. |
|
*/ |
|
; |
|
|
|
_proto.currentDimensions = function currentDimensions() { |
|
return { |
|
width: this.currentDimension('width'), |
|
height: this.currentDimension('height') |
|
}; |
|
} |
|
/** |
|
* Get the computed width of the component's element. |
|
* |
|
* Uses `window.getComputedStyle`. |
|
* |
|
* @return {number} |
|
* The computed width of the component's element. |
|
*/ |
|
; |
|
|
|
_proto.currentWidth = function currentWidth() { |
|
return this.currentDimension('width'); |
|
} |
|
/** |
|
* Get the computed height of the component's element. |
|
* |
|
* Uses `window.getComputedStyle`. |
|
* |
|
* @return {number} |
|
* The computed height of the component's element. |
|
*/ |
|
; |
|
|
|
_proto.currentHeight = function currentHeight() { |
|
return this.currentDimension('height'); |
|
} |
|
/** |
|
* Set the focus to this component |
|
*/ |
|
; |
|
|
|
_proto.focus = function focus() { |
|
this.el_.focus(); |
|
} |
|
/** |
|
* Remove the focus from this component |
|
*/ |
|
; |
|
|
|
_proto.blur = function blur() { |
|
this.el_.blur(); |
|
} |
|
/** |
|
* When this Component receives a `keydown` event which it does not process, |
|
* it passes the event to the Player for handling. |
|
* |
|
* @param {EventTarget~Event} event |
|
* The `keydown` event that caused this function to be called. |
|
*/ |
|
; |
|
|
|
_proto.handleKeyDown = function handleKeyDown(event) { |
|
if (this.player_) { |
|
// We only stop propagation here because we want unhandled events to fall |
|
// back to the browser. |
|
event.stopPropagation(); |
|
this.player_.handleKeyDown(event); |
|
} |
|
} |
|
/** |
|
* Many components used to have a `handleKeyPress` method, which was poorly |
|
* named because it listened to a `keydown` event. This method name now |
|
* delegates to `handleKeyDown`. This means anyone calling `handleKeyPress` |
|
* will not see their method calls stop working. |
|
* |
|
* @param {EventTarget~Event} event |
|
* The event that caused this function to be called. |
|
*/ |
|
; |
|
|
|
_proto.handleKeyPress = function handleKeyPress(event) { |
|
this.handleKeyDown(event); |
|
} |
|
/** |
|
* Emit a 'tap' events when touch event support gets detected. This gets used to |
|
* support toggling the controls through a tap on the video. They get enabled |
|
* because every sub-component would have extra overhead otherwise. |
|
* |
|
* @private |
|
* @fires Component#tap |
|
* @listens Component#touchstart |
|
* @listens Component#touchmove |
|
* @listens Component#touchleave |
|
* @listens Component#touchcancel |
|
* @listens Component#touchend |
|
*/ |
|
; |
|
|
|
_proto.emitTapEvents = function emitTapEvents() { |
|
// Track the start time so we can determine how long the touch lasted |
|
var touchStart = 0; |
|
var firstTouch = null; // Maximum movement allowed during a touch event to still be considered a tap |
|
// Other popular libs use anywhere from 2 (hammer.js) to 15, |
|
// so 10 seems like a nice, round number. |
|
|
|
var tapMovementThreshold = 10; // The maximum length a touch can be while still being considered a tap |
|
|
|
var touchTimeThreshold = 200; |
|
var couldBeTap; |
|
this.on('touchstart', function (event) { |
|
// If more than one finger, don't consider treating this as a click |
|
if (event.touches.length === 1) { |
|
// Copy pageX/pageY from the object |
|
firstTouch = { |
|
pageX: event.touches[0].pageX, |
|
pageY: event.touches[0].pageY |
|
}; // Record start time so we can detect a tap vs. "touch and hold" |
|
|
|
touchStart = new Date().getTime(); // Reset couldBeTap tracking |
|
|
|
couldBeTap = true; |
|
} |
|
}); |
|
this.on('touchmove', function (event) { |
|
// If more than one finger, don't consider treating this as a click |
|
if (event.touches.length > 1) { |
|
couldBeTap = false; |
|
} else if (firstTouch) { |
|
// Some devices will throw touchmoves for all but the slightest of taps. |
|
// So, if we moved only a small distance, this could still be a tap |
|
var xdiff = event.touches[0].pageX - firstTouch.pageX; |
|
var ydiff = event.touches[0].pageY - firstTouch.pageY; |
|
var touchDistance = Math.sqrt(xdiff * xdiff + ydiff * ydiff); |
|
|
|
if (touchDistance > tapMovementThreshold) { |
|
couldBeTap = false; |
|
} |
|
} |
|
}); |
|
|
|
var noTap = function noTap() { |
|
couldBeTap = false; |
|
}; // TODO: Listen to the original target. http://youtu.be/DujfpXOKUp8?t=13m8s |
|
|
|
|
|
this.on('touchleave', noTap); |
|
this.on('touchcancel', noTap); // When the touch ends, measure how long it took and trigger the appropriate |
|
// event |
|
|
|
this.on('touchend', function (event) { |
|
firstTouch = null; // Proceed only if the touchmove/leave/cancel event didn't happen |
|
|
|
if (couldBeTap === true) { |
|
// Measure how long the touch lasted |
|
var touchTime = new Date().getTime() - touchStart; // Make sure the touch was less than the threshold to be considered a tap |
|
|
|
if (touchTime < touchTimeThreshold) { |
|
// Don't let browser turn this into a click |
|
event.preventDefault(); |
|
/** |
|
* Triggered when a `Component` is tapped. |
|
* |
|
* @event Component#tap |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
this.trigger('tap'); // It may be good to copy the touchend event object and change the |
|
// type to tap, if the other event properties aren't exact after |
|
// Events.fixEvent runs (e.g. event.target) |
|
} |
|
} |
|
}); |
|
} |
|
/** |
|
* This function reports user activity whenever touch events happen. This can get |
|
* turned off by any sub-components that wants touch events to act another way. |
|
* |
|
* Report user touch activity when touch events occur. User activity gets used to |
|
* determine when controls should show/hide. It is simple when it comes to mouse |
|
* events, because any mouse event should show the controls. So we capture mouse |
|
* events that bubble up to the player and report activity when that happens. |
|
* With touch events it isn't as easy as `touchstart` and `touchend` toggle player |
|
* controls. So touch events can't help us at the player level either. |
|
* |
|
* User activity gets checked asynchronously. So what could happen is a tap event |
|
* on the video turns the controls off. Then the `touchend` event bubbles up to |
|
* the player. Which, if it reported user activity, would turn the controls right |
|
* back on. We also don't want to completely block touch events from bubbling up. |
|
* Furthermore a `touchmove` event and anything other than a tap, should not turn |
|
* controls back on. |
|
* |
|
* @listens Component#touchstart |
|
* @listens Component#touchmove |
|
* @listens Component#touchend |
|
* @listens Component#touchcancel |
|
*/ |
|
; |
|
|
|
_proto.enableTouchActivity = function enableTouchActivity() { |
|
// Don't continue if the root player doesn't support reporting user activity |
|
if (!this.player() || !this.player().reportUserActivity) { |
|
return; |
|
} // listener for reporting that the user is active |
|
|
|
|
|
var report = bind(this.player(), this.player().reportUserActivity); |
|
var touchHolding; |
|
this.on('touchstart', function () { |
|
report(); // For as long as the they are touching the device or have their mouse down, |
|
// we consider them active even if they're not moving their finger or mouse. |
|
// So we want to continue to update that they are active |
|
|
|
this.clearInterval(touchHolding); // report at the same interval as activityCheck |
|
|
|
touchHolding = this.setInterval(report, 250); |
|
}); |
|
|
|
var touchEnd = function touchEnd(event) { |
|
report(); // stop the interval that maintains activity if the touch is holding |
|
|
|
this.clearInterval(touchHolding); |
|
}; |
|
|
|
this.on('touchmove', report); |
|
this.on('touchend', touchEnd); |
|
this.on('touchcancel', touchEnd); |
|
} |
|
/** |
|
* A callback that has no parameters and is bound into `Component`s context. |
|
* |
|
* @callback Component~GenericCallback |
|
* @this Component |
|
*/ |
|
|
|
/** |
|
* Creates a function that runs after an `x` millisecond timeout. This function is a |
|
* wrapper around `window.setTimeout`. There are a few reasons to use this one |
|
* instead though: |
|
* 1. It gets cleared via {@link Component#clearTimeout} when |
|
* {@link Component#dispose} gets called. |
|
* 2. The function callback will gets turned into a {@link Component~GenericCallback} |
|
* |
|
* > Note: You can't use `window.clearTimeout` on the id returned by this function. This |
|
* will cause its dispose listener not to get cleaned up! Please use |
|
* {@link Component#clearTimeout} or {@link Component#dispose} instead. |
|
* |
|
* @param {Component~GenericCallback} fn |
|
* The function that will be run after `timeout`. |
|
* |
|
* @param {number} timeout |
|
* Timeout in milliseconds to delay before executing the specified function. |
|
* |
|
* @return {number} |
|
* Returns a timeout ID that gets used to identify the timeout. It can also |
|
* get used in {@link Component#clearTimeout} to clear the timeout that |
|
* was set. |
|
* |
|
* @listens Component#dispose |
|
* @see [Similar to]{@link https://developer.mozilla.org/en-US/docs/Web/API/WindowTimers/setTimeout} |
|
*/ |
|
; |
|
|
|
_proto.setTimeout = function setTimeout(fn, timeout) { |
|
var _this2 = this; |
|
|
|
// declare as variables so they are properly available in timeout function |
|
// eslint-disable-next-line |
|
var timeoutId, disposeFn; |
|
fn = bind(this, fn); |
|
timeoutId = window$1.setTimeout(function () { |
|
_this2.off('dispose', disposeFn); |
|
|
|
fn(); |
|
}, timeout); |
|
|
|
disposeFn = function disposeFn() { |
|
return _this2.clearTimeout(timeoutId); |
|
}; |
|
|
|
disposeFn.guid = "vjs-timeout-" + timeoutId; |
|
this.on('dispose', disposeFn); |
|
return timeoutId; |
|
} |
|
/** |
|
* Clears a timeout that gets created via `window.setTimeout` or |
|
* {@link Component#setTimeout}. If you set a timeout via {@link Component#setTimeout} |
|
* use this function instead of `window.clearTimout`. If you don't your dispose |
|
* listener will not get cleaned up until {@link Component#dispose}! |
|
* |
|
* @param {number} timeoutId |
|
* The id of the timeout to clear. The return value of |
|
* {@link Component#setTimeout} or `window.setTimeout`. |
|
* |
|
* @return {number} |
|
* Returns the timeout id that was cleared. |
|
* |
|
* @see [Similar to]{@link https://developer.mozilla.org/en-US/docs/Web/API/WindowTimers/clearTimeout} |
|
*/ |
|
; |
|
|
|
_proto.clearTimeout = function clearTimeout(timeoutId) { |
|
window$1.clearTimeout(timeoutId); |
|
|
|
var disposeFn = function disposeFn() {}; |
|
|
|
disposeFn.guid = "vjs-timeout-" + timeoutId; |
|
this.off('dispose', disposeFn); |
|
return timeoutId; |
|
} |
|
/** |
|
* Creates a function that gets run every `x` milliseconds. This function is a wrapper |
|
* around `window.setInterval`. There are a few reasons to use this one instead though. |
|
* 1. It gets cleared via {@link Component#clearInterval} when |
|
* {@link Component#dispose} gets called. |
|
* 2. The function callback will be a {@link Component~GenericCallback} |
|
* |
|
* @param {Component~GenericCallback} fn |
|
* The function to run every `x` seconds. |
|
* |
|
* @param {number} interval |
|
* Execute the specified function every `x` milliseconds. |
|
* |
|
* @return {number} |
|
* Returns an id that can be used to identify the interval. It can also be be used in |
|
* {@link Component#clearInterval} to clear the interval. |
|
* |
|
* @listens Component#dispose |
|
* @see [Similar to]{@link https://developer.mozilla.org/en-US/docs/Web/API/WindowTimers/setInterval} |
|
*/ |
|
; |
|
|
|
_proto.setInterval = function setInterval(fn, interval) { |
|
var _this3 = this; |
|
|
|
fn = bind(this, fn); |
|
var intervalId = window$1.setInterval(fn, interval); |
|
|
|
var disposeFn = function disposeFn() { |
|
return _this3.clearInterval(intervalId); |
|
}; |
|
|
|
disposeFn.guid = "vjs-interval-" + intervalId; |
|
this.on('dispose', disposeFn); |
|
return intervalId; |
|
} |
|
/** |
|
* Clears an interval that gets created via `window.setInterval` or |
|
* {@link Component#setInterval}. If you set an inteval via {@link Component#setInterval} |
|
* use this function instead of `window.clearInterval`. If you don't your dispose |
|
* listener will not get cleaned up until {@link Component#dispose}! |
|
* |
|
* @param {number} intervalId |
|
* The id of the interval to clear. The return value of |
|
* {@link Component#setInterval} or `window.setInterval`. |
|
* |
|
* @return {number} |
|
* Returns the interval id that was cleared. |
|
* |
|
* @see [Similar to]{@link https://developer.mozilla.org/en-US/docs/Web/API/WindowTimers/clearInterval} |
|
*/ |
|
; |
|
|
|
_proto.clearInterval = function clearInterval(intervalId) { |
|
window$1.clearInterval(intervalId); |
|
|
|
var disposeFn = function disposeFn() {}; |
|
|
|
disposeFn.guid = "vjs-interval-" + intervalId; |
|
this.off('dispose', disposeFn); |
|
return intervalId; |
|
} |
|
/** |
|
* Queues up a callback to be passed to requestAnimationFrame (rAF), but |
|
* with a few extra bonuses: |
|
* |
|
* - Supports browsers that do not support rAF by falling back to |
|
* {@link Component#setTimeout}. |
|
* |
|
* - The callback is turned into a {@link Component~GenericCallback} (i.e. |
|
* bound to the component). |
|
* |
|
* - Automatic cancellation of the rAF callback is handled if the component |
|
* is disposed before it is called. |
|
* |
|
* @param {Component~GenericCallback} fn |
|
* A function that will be bound to this component and executed just |
|
* before the browser's next repaint. |
|
* |
|
* @return {number} |
|
* Returns an rAF ID that gets used to identify the timeout. It can |
|
* also be used in {@link Component#cancelAnimationFrame} to cancel |
|
* the animation frame callback. |
|
* |
|
* @listens Component#dispose |
|
* @see [Similar to]{@link https://developer.mozilla.org/en-US/docs/Web/API/window/requestAnimationFrame} |
|
*/ |
|
; |
|
|
|
_proto.requestAnimationFrame = function requestAnimationFrame(fn) { |
|
var _this4 = this; |
|
|
|
// declare as variables so they are properly available in rAF function |
|
// eslint-disable-next-line |
|
var id, disposeFn; |
|
|
|
if (this.supportsRaf_) { |
|
fn = bind(this, fn); |
|
id = window$1.requestAnimationFrame(function () { |
|
_this4.off('dispose', disposeFn); |
|
|
|
fn(); |
|
}); |
|
|
|
disposeFn = function disposeFn() { |
|
return _this4.cancelAnimationFrame(id); |
|
}; |
|
|
|
disposeFn.guid = "vjs-raf-" + id; |
|
this.on('dispose', disposeFn); |
|
return id; |
|
} // Fall back to using a timer. |
|
|
|
|
|
return this.setTimeout(fn, 1000 / 60); |
|
} |
|
/** |
|
* Cancels a queued callback passed to {@link Component#requestAnimationFrame} |
|
* (rAF). |
|
* |
|
* If you queue an rAF callback via {@link Component#requestAnimationFrame}, |
|
* use this function instead of `window.cancelAnimationFrame`. If you don't, |
|
* your dispose listener will not get cleaned up until {@link Component#dispose}! |
|
* |
|
* @param {number} id |
|
* The rAF ID to clear. The return value of {@link Component#requestAnimationFrame}. |
|
* |
|
* @return {number} |
|
* Returns the rAF ID that was cleared. |
|
* |
|
* @see [Similar to]{@link https://developer.mozilla.org/en-US/docs/Web/API/window/cancelAnimationFrame} |
|
*/ |
|
; |
|
|
|
_proto.cancelAnimationFrame = function cancelAnimationFrame(id) { |
|
if (this.supportsRaf_) { |
|
window$1.cancelAnimationFrame(id); |
|
|
|
var disposeFn = function disposeFn() {}; |
|
|
|
disposeFn.guid = "vjs-raf-" + id; |
|
this.off('dispose', disposeFn); |
|
return id; |
|
} // Fall back to using a timer. |
|
|
|
|
|
return this.clearTimeout(id); |
|
} |
|
/** |
|
* Register a `Component` with `videojs` given the name and the component. |
|
* |
|
* > NOTE: {@link Tech}s should not be registered as a `Component`. {@link Tech}s |
|
* should be registered using {@link Tech.registerTech} or |
|
* {@link videojs:videojs.registerTech}. |
|
* |
|
* > NOTE: This function can also be seen on videojs as |
|
* {@link videojs:videojs.registerComponent}. |
|
* |
|
* @param {string} name |
|
* The name of the `Component` to register. |
|
* |
|
* @param {Component} ComponentToRegister |
|
* The `Component` class to register. |
|
* |
|
* @return {Component} |
|
* The `Component` that was registered. |
|
*/ |
|
; |
|
|
|
Component.registerComponent = function registerComponent(name, ComponentToRegister) { |
|
if (typeof name !== 'string' || !name) { |
|
throw new Error("Illegal component name, \"" + name + "\"; must be a non-empty string."); |
|
} |
|
|
|
var Tech = Component.getComponent('Tech'); // We need to make sure this check is only done if Tech has been registered. |
|
|
|
var isTech = Tech && Tech.isTech(ComponentToRegister); |
|
var isComp = Component === ComponentToRegister || Component.prototype.isPrototypeOf(ComponentToRegister.prototype); |
|
|
|
if (isTech || !isComp) { |
|
var reason; |
|
|
|
if (isTech) { |
|
reason = 'techs must be registered using Tech.registerTech()'; |
|
} else { |
|
reason = 'must be a Component subclass'; |
|
} |
|
|
|
throw new Error("Illegal component, \"" + name + "\"; " + reason + "."); |
|
} |
|
|
|
name = toTitleCase(name); |
|
|
|
if (!Component.components_) { |
|
Component.components_ = {}; |
|
} |
|
|
|
var Player = Component.getComponent('Player'); |
|
|
|
if (name === 'Player' && Player && Player.players) { |
|
var players = Player.players; |
|
var playerNames = Object.keys(players); // If we have players that were disposed, then their name will still be |
|
// in Players.players. So, we must loop through and verify that the value |
|
// for each item is not null. This allows registration of the Player component |
|
// after all players have been disposed or before any were created. |
|
|
|
if (players && playerNames.length > 0 && playerNames.map(function (pname) { |
|
return players[pname]; |
|
}).every(Boolean)) { |
|
throw new Error('Can not register Player component after player has been created.'); |
|
} |
|
} |
|
|
|
Component.components_[name] = ComponentToRegister; |
|
return ComponentToRegister; |
|
} |
|
/** |
|
* Get a `Component` based on the name it was registered with. |
|
* |
|
* @param {string} name |
|
* The Name of the component to get. |
|
* |
|
* @return {Component} |
|
* The `Component` that got registered under the given name. |
|
* |
|
* @deprecated In `videojs` 6 this will not return `Component`s that were not |
|
* registered using {@link Component.registerComponent}. Currently we |
|
* check the global `videojs` object for a `Component` name and |
|
* return that if it exists. |
|
*/ |
|
; |
|
|
|
Component.getComponent = function getComponent(name) { |
|
if (!name) { |
|
return; |
|
} |
|
|
|
name = toTitleCase(name); |
|
|
|
if (Component.components_ && Component.components_[name]) { |
|
return Component.components_[name]; |
|
} |
|
}; |
|
|
|
return Component; |
|
}(); |
|
/** |
|
* Whether or not this component supports `requestAnimationFrame`. |
|
* |
|
* This is exposed primarily for testing purposes. |
|
* |
|
* @private |
|
* @type {Boolean} |
|
*/ |
|
|
|
|
|
Component.prototype.supportsRaf_ = typeof window$1.requestAnimationFrame === 'function' && typeof window$1.cancelAnimationFrame === 'function'; |
|
Component.registerComponent('Component', Component); |
|
|
|
/** |
|
* @file browser.js |
|
* @module browser |
|
*/ |
|
var USER_AGENT = window$1.navigator && window$1.navigator.userAgent || ''; |
|
var webkitVersionMap = /AppleWebKit\/([\d.]+)/i.exec(USER_AGENT); |
|
var appleWebkitVersion = webkitVersionMap ? parseFloat(webkitVersionMap.pop()) : null; |
|
/** |
|
* Whether or not this device is an iPad. |
|
* |
|
* @static |
|
* @const |
|
* @type {Boolean} |
|
*/ |
|
|
|
var IS_IPAD = /iPad/i.test(USER_AGENT); |
|
/** |
|
* Whether or not this device is an iPhone. |
|
* |
|
* @static |
|
* @const |
|
* @type {Boolean} |
|
*/ |
|
// The Facebook app's UIWebView identifies as both an iPhone and iPad, so |
|
// to identify iPhones, we need to exclude iPads. |
|
// http://artsy.github.io/blog/2012/10/18/the-perils-of-ios-user-agent-sniffing/ |
|
|
|
var IS_IPHONE = /iPhone/i.test(USER_AGENT) && !IS_IPAD; |
|
/** |
|
* Whether or not this device is an iPod. |
|
* |
|
* @static |
|
* @const |
|
* @type {Boolean} |
|
*/ |
|
|
|
var IS_IPOD = /iPod/i.test(USER_AGENT); |
|
/** |
|
* Whether or not this is an iOS device. |
|
* |
|
* @static |
|
* @const |
|
* @type {Boolean} |
|
*/ |
|
|
|
var IS_IOS = IS_IPHONE || IS_IPAD || IS_IPOD; |
|
/** |
|
* The detected iOS version - or `null`. |
|
* |
|
* @static |
|
* @const |
|
* @type {string|null} |
|
*/ |
|
|
|
var IOS_VERSION = function () { |
|
var match = USER_AGENT.match(/OS (\d+)_/i); |
|
|
|
if (match && match[1]) { |
|
return match[1]; |
|
} |
|
|
|
return null; |
|
}(); |
|
/** |
|
* Whether or not this is an Android device. |
|
* |
|
* @static |
|
* @const |
|
* @type {Boolean} |
|
*/ |
|
|
|
var IS_ANDROID = /Android/i.test(USER_AGENT); |
|
/** |
|
* The detected Android version - or `null`. |
|
* |
|
* @static |
|
* @const |
|
* @type {number|string|null} |
|
*/ |
|
|
|
var ANDROID_VERSION = function () { |
|
// This matches Android Major.Minor.Patch versions |
|
// ANDROID_VERSION is Major.Minor as a Number, if Minor isn't available, then only Major is returned |
|
var match = USER_AGENT.match(/Android (\d+)(?:\.(\d+))?(?:\.(\d+))*/i); |
|
|
|
if (!match) { |
|
return null; |
|
} |
|
|
|
var major = match[1] && parseFloat(match[1]); |
|
var minor = match[2] && parseFloat(match[2]); |
|
|
|
if (major && minor) { |
|
return parseFloat(match[1] + '.' + match[2]); |
|
} else if (major) { |
|
return major; |
|
} |
|
|
|
return null; |
|
}(); |
|
/** |
|
* Whether or not this is a native Android browser. |
|
* |
|
* @static |
|
* @const |
|
* @type {Boolean} |
|
*/ |
|
|
|
var IS_NATIVE_ANDROID = IS_ANDROID && ANDROID_VERSION < 5 && appleWebkitVersion < 537; |
|
/** |
|
* Whether or not this is Mozilla Firefox. |
|
* |
|
* @static |
|
* @const |
|
* @type {Boolean} |
|
*/ |
|
|
|
var IS_FIREFOX = /Firefox/i.test(USER_AGENT); |
|
/** |
|
* Whether or not this is Microsoft Edge. |
|
* |
|
* @static |
|
* @const |
|
* @type {Boolean} |
|
*/ |
|
|
|
var IS_EDGE = /Edge/i.test(USER_AGENT); |
|
/** |
|
* Whether or not this is Google Chrome. |
|
* |
|
* This will also be `true` for Chrome on iOS, which will have different support |
|
* as it is actually Safari under the hood. |
|
* |
|
* @static |
|
* @const |
|
* @type {Boolean} |
|
*/ |
|
|
|
var IS_CHROME = !IS_EDGE && (/Chrome/i.test(USER_AGENT) || /CriOS/i.test(USER_AGENT)); |
|
/** |
|
* The detected Google Chrome version - or `null`. |
|
* |
|
* @static |
|
* @const |
|
* @type {number|null} |
|
*/ |
|
|
|
var CHROME_VERSION = function () { |
|
var match = USER_AGENT.match(/(Chrome|CriOS)\/(\d+)/); |
|
|
|
if (match && match[2]) { |
|
return parseFloat(match[2]); |
|
} |
|
|
|
return null; |
|
}(); |
|
/** |
|
* The detected Internet Explorer version - or `null`. |
|
* |
|
* @static |
|
* @const |
|
* @type {number|null} |
|
*/ |
|
|
|
var IE_VERSION = function () { |
|
var result = /MSIE\s(\d+)\.\d/.exec(USER_AGENT); |
|
var version = result && parseFloat(result[1]); |
|
|
|
if (!version && /Trident\/7.0/i.test(USER_AGENT) && /rv:11.0/.test(USER_AGENT)) { |
|
// IE 11 has a different user agent string than other IE versions |
|
version = 11.0; |
|
} |
|
|
|
return version; |
|
}(); |
|
/** |
|
* Whether or not this is desktop Safari. |
|
* |
|
* @static |
|
* @const |
|
* @type {Boolean} |
|
*/ |
|
|
|
var IS_SAFARI = /Safari/i.test(USER_AGENT) && !IS_CHROME && !IS_ANDROID && !IS_EDGE; |
|
/** |
|
* Whether or not this is any flavor of Safari - including iOS. |
|
* |
|
* @static |
|
* @const |
|
* @type {Boolean} |
|
*/ |
|
|
|
var IS_ANY_SAFARI = (IS_SAFARI || IS_IOS) && !IS_CHROME; |
|
/** |
|
* Whether or not this device is touch-enabled. |
|
* |
|
* @static |
|
* @const |
|
* @type {Boolean} |
|
*/ |
|
|
|
var TOUCH_ENABLED = isReal() && ('ontouchstart' in window$1 || window$1.navigator.maxTouchPoints || window$1.DocumentTouch && window$1.document instanceof window$1.DocumentTouch); |
|
|
|
var browser = /*#__PURE__*/Object.freeze({ |
|
IS_IPAD: IS_IPAD, |
|
IS_IPHONE: IS_IPHONE, |
|
IS_IPOD: IS_IPOD, |
|
IS_IOS: IS_IOS, |
|
IOS_VERSION: IOS_VERSION, |
|
IS_ANDROID: IS_ANDROID, |
|
ANDROID_VERSION: ANDROID_VERSION, |
|
IS_NATIVE_ANDROID: IS_NATIVE_ANDROID, |
|
IS_FIREFOX: IS_FIREFOX, |
|
IS_EDGE: IS_EDGE, |
|
IS_CHROME: IS_CHROME, |
|
CHROME_VERSION: CHROME_VERSION, |
|
IE_VERSION: IE_VERSION, |
|
IS_SAFARI: IS_SAFARI, |
|
IS_ANY_SAFARI: IS_ANY_SAFARI, |
|
TOUCH_ENABLED: TOUCH_ENABLED |
|
}); |
|
|
|
/** |
|
* @file time-ranges.js |
|
* @module time-ranges |
|
*/ |
|
|
|
/** |
|
* Returns the time for the specified index at the start or end |
|
* of a TimeRange object. |
|
* |
|
* @typedef {Function} TimeRangeIndex |
|
* |
|
* @param {number} [index=0] |
|
* The range number to return the time for. |
|
* |
|
* @return {number} |
|
* The time offset at the specified index. |
|
* |
|
* @deprecated The index argument must be provided. |
|
* In the future, leaving it out will throw an error. |
|
*/ |
|
|
|
/** |
|
* An object that contains ranges of time. |
|
* |
|
* @typedef {Object} TimeRange |
|
* |
|
* @property {number} length |
|
* The number of time ranges represented by this object. |
|
* |
|
* @property {module:time-ranges~TimeRangeIndex} start |
|
* Returns the time offset at which a specified time range begins. |
|
* |
|
* @property {module:time-ranges~TimeRangeIndex} end |
|
* Returns the time offset at which a specified time range ends. |
|
* |
|
* @see https://developer.mozilla.org/en-US/docs/Web/API/TimeRanges |
|
*/ |
|
|
|
/** |
|
* Check if any of the time ranges are over the maximum index. |
|
* |
|
* @private |
|
* @param {string} fnName |
|
* The function name to use for logging |
|
* |
|
* @param {number} index |
|
* The index to check |
|
* |
|
* @param {number} maxIndex |
|
* The maximum possible index |
|
* |
|
* @throws {Error} if the timeRanges provided are over the maxIndex |
|
*/ |
|
function rangeCheck(fnName, index, maxIndex) { |
|
if (typeof index !== 'number' || index < 0 || index > maxIndex) { |
|
throw new Error("Failed to execute '" + fnName + "' on 'TimeRanges': The index provided (" + index + ") is non-numeric or out of bounds (0-" + maxIndex + ")."); |
|
} |
|
} |
|
/** |
|
* Get the time for the specified index at the start or end |
|
* of a TimeRange object. |
|
* |
|
* @private |
|
* @param {string} fnName |
|
* The function name to use for logging |
|
* |
|
* @param {string} valueIndex |
|
* The property that should be used to get the time. should be |
|
* 'start' or 'end' |
|
* |
|
* @param {Array} ranges |
|
* An array of time ranges |
|
* |
|
* @param {Array} [rangeIndex=0] |
|
* The index to start the search at |
|
* |
|
* @return {number} |
|
* The time that offset at the specified index. |
|
* |
|
* @deprecated rangeIndex must be set to a value, in the future this will throw an error. |
|
* @throws {Error} if rangeIndex is more than the length of ranges |
|
*/ |
|
|
|
|
|
function getRange(fnName, valueIndex, ranges, rangeIndex) { |
|
rangeCheck(fnName, rangeIndex, ranges.length - 1); |
|
return ranges[rangeIndex][valueIndex]; |
|
} |
|
/** |
|
* Create a time range object given ranges of time. |
|
* |
|
* @private |
|
* @param {Array} [ranges] |
|
* An array of time ranges. |
|
*/ |
|
|
|
|
|
function createTimeRangesObj(ranges) { |
|
if (ranges === undefined || ranges.length === 0) { |
|
return { |
|
length: 0, |
|
start: function start() { |
|
throw new Error('This TimeRanges object is empty'); |
|
}, |
|
end: function end() { |
|
throw new Error('This TimeRanges object is empty'); |
|
} |
|
}; |
|
} |
|
|
|
return { |
|
length: ranges.length, |
|
start: getRange.bind(null, 'start', 0, ranges), |
|
end: getRange.bind(null, 'end', 1, ranges) |
|
}; |
|
} |
|
/** |
|
* Create a `TimeRange` object which mimics an |
|
* {@link https://developer.mozilla.org/en-US/docs/Web/API/TimeRanges|HTML5 TimeRanges instance}. |
|
* |
|
* @param {number|Array[]} start |
|
* The start of a single range (a number) or an array of ranges (an |
|
* array of arrays of two numbers each). |
|
* |
|
* @param {number} end |
|
* The end of a single range. Cannot be used with the array form of |
|
* the `start` argument. |
|
*/ |
|
|
|
|
|
function createTimeRanges(start, end) { |
|
if (Array.isArray(start)) { |
|
return createTimeRangesObj(start); |
|
} else if (start === undefined || end === undefined) { |
|
return createTimeRangesObj(); |
|
} |
|
|
|
return createTimeRangesObj([[start, end]]); |
|
} |
|
|
|
/** |
|
* @file buffer.js |
|
* @module buffer |
|
*/ |
|
/** |
|
* Compute the percentage of the media that has been buffered. |
|
* |
|
* @param {TimeRange} buffered |
|
* The current `TimeRange` object representing buffered time ranges |
|
* |
|
* @param {number} duration |
|
* Total duration of the media |
|
* |
|
* @return {number} |
|
* Percent buffered of the total duration in decimal form. |
|
*/ |
|
|
|
function bufferedPercent(buffered, duration) { |
|
var bufferedDuration = 0; |
|
var start; |
|
var end; |
|
|
|
if (!duration) { |
|
return 0; |
|
} |
|
|
|
if (!buffered || !buffered.length) { |
|
buffered = createTimeRanges(0, 0); |
|
} |
|
|
|
for (var i = 0; i < buffered.length; i++) { |
|
start = buffered.start(i); |
|
end = buffered.end(i); // buffered end can be bigger than duration by a very small fraction |
|
|
|
if (end > duration) { |
|
end = duration; |
|
} |
|
|
|
bufferedDuration += end - start; |
|
} |
|
|
|
return bufferedDuration / duration; |
|
} |
|
|
|
/** |
|
* @file fullscreen-api.js |
|
* @module fullscreen-api |
|
* @private |
|
*/ |
|
/** |
|
* Store the browser-specific methods for the fullscreen API. |
|
* |
|
* @type {Object} |
|
* @see [Specification]{@link https://fullscreen.spec.whatwg.org} |
|
* @see [Map Approach From Screenfull.js]{@link https://github.com/sindresorhus/screenfull.js} |
|
*/ |
|
|
|
var FullscreenApi = {}; // browser API methods |
|
|
|
var apiMap = [['requestFullscreen', 'exitFullscreen', 'fullscreenElement', 'fullscreenEnabled', 'fullscreenchange', 'fullscreenerror', 'fullscreen'], // WebKit |
|
['webkitRequestFullscreen', 'webkitExitFullscreen', 'webkitFullscreenElement', 'webkitFullscreenEnabled', 'webkitfullscreenchange', 'webkitfullscreenerror', '-webkit-full-screen'], // Mozilla |
|
['mozRequestFullScreen', 'mozCancelFullScreen', 'mozFullScreenElement', 'mozFullScreenEnabled', 'mozfullscreenchange', 'mozfullscreenerror', '-moz-full-screen'], // Microsoft |
|
['msRequestFullscreen', 'msExitFullscreen', 'msFullscreenElement', 'msFullscreenEnabled', 'MSFullscreenChange', 'MSFullscreenError', '-ms-fullscreen']]; |
|
var specApi = apiMap[0]; |
|
var browserApi; |
|
var prefixedAPI = false; // determine the supported set of functions |
|
|
|
for (var i = 0; i < apiMap.length; i++) { |
|
// check for exitFullscreen function |
|
if (apiMap[i][1] in document) { |
|
browserApi = apiMap[i]; |
|
break; |
|
} |
|
} // map the browser API names to the spec API names |
|
|
|
|
|
if (browserApi) { |
|
for (var _i = 0; _i < browserApi.length; _i++) { |
|
FullscreenApi[specApi[_i]] = browserApi[_i]; |
|
} |
|
|
|
prefixedAPI = browserApi[0] === specApi[0]; |
|
} |
|
|
|
/** |
|
* @file media-error.js |
|
*/ |
|
/** |
|
* A Custom `MediaError` class which mimics the standard HTML5 `MediaError` class. |
|
* |
|
* @param {number|string|Object|MediaError} value |
|
* This can be of multiple types: |
|
* - number: should be a standard error code |
|
* - string: an error message (the code will be 0) |
|
* - Object: arbitrary properties |
|
* - `MediaError` (native): used to populate a video.js `MediaError` object |
|
* - `MediaError` (video.js): will return itself if it's already a |
|
* video.js `MediaError` object. |
|
* |
|
* @see [MediaError Spec]{@link https://dev.w3.org/html5/spec-author-view/video.html#mediaerror} |
|
* @see [Encrypted MediaError Spec]{@link https://www.w3.org/TR/2013/WD-encrypted-media-20130510/#error-codes} |
|
* |
|
* @class MediaError |
|
*/ |
|
|
|
function MediaError(value) { |
|
// Allow redundant calls to this constructor to avoid having `instanceof` |
|
// checks peppered around the code. |
|
if (value instanceof MediaError) { |
|
return value; |
|
} |
|
|
|
if (typeof value === 'number') { |
|
this.code = value; |
|
} else if (typeof value === 'string') { |
|
// default code is zero, so this is a custom error |
|
this.message = value; |
|
} else if (isObject(value)) { |
|
// We assign the `code` property manually because native `MediaError` objects |
|
// do not expose it as an own/enumerable property of the object. |
|
if (typeof value.code === 'number') { |
|
this.code = value.code; |
|
} |
|
|
|
assign(this, value); |
|
} |
|
|
|
if (!this.message) { |
|
this.message = MediaError.defaultMessages[this.code] || ''; |
|
} |
|
} |
|
/** |
|
* The error code that refers two one of the defined `MediaError` types |
|
* |
|
* @type {Number} |
|
*/ |
|
|
|
|
|
MediaError.prototype.code = 0; |
|
/** |
|
* An optional message that to show with the error. Message is not part of the HTML5 |
|
* video spec but allows for more informative custom errors. |
|
* |
|
* @type {String} |
|
*/ |
|
|
|
MediaError.prototype.message = ''; |
|
/** |
|
* An optional status code that can be set by plugins to allow even more detail about |
|
* the error. For example a plugin might provide a specific HTTP status code and an |
|
* error message for that code. Then when the plugin gets that error this class will |
|
* know how to display an error message for it. This allows a custom message to show |
|
* up on the `Player` error overlay. |
|
* |
|
* @type {Array} |
|
*/ |
|
|
|
MediaError.prototype.status = null; |
|
/** |
|
* Errors indexed by the W3C standard. The order **CANNOT CHANGE**! See the |
|
* specification listed under {@link MediaError} for more information. |
|
* |
|
* @enum {array} |
|
* @readonly |
|
* @property {string} 0 - MEDIA_ERR_CUSTOM |
|
* @property {string} 1 - MEDIA_ERR_ABORTED |
|
* @property {string} 2 - MEDIA_ERR_NETWORK |
|
* @property {string} 3 - MEDIA_ERR_DECODE |
|
* @property {string} 4 - MEDIA_ERR_SRC_NOT_SUPPORTED |
|
* @property {string} 5 - MEDIA_ERR_ENCRYPTED |
|
*/ |
|
|
|
MediaError.errorTypes = ['MEDIA_ERR_CUSTOM', 'MEDIA_ERR_ABORTED', 'MEDIA_ERR_NETWORK', 'MEDIA_ERR_DECODE', 'MEDIA_ERR_SRC_NOT_SUPPORTED', 'MEDIA_ERR_ENCRYPTED']; |
|
/** |
|
* The default `MediaError` messages based on the {@link MediaError.errorTypes}. |
|
* |
|
* @type {Array} |
|
* @constant |
|
*/ |
|
|
|
MediaError.defaultMessages = { |
|
1: 'You aborted the media playback', |
|
2: 'A network error caused the media download to fail part-way.', |
|
3: 'The media playback was aborted due to a corruption problem or because the media used features your browser did not support.', |
|
4: 'The media could not be loaded, either because the server or network failed or because the format is not supported.', |
|
5: 'The media is encrypted and we do not have the keys to decrypt it.' |
|
}; // Add types as properties on MediaError |
|
// e.g. MediaError.MEDIA_ERR_SRC_NOT_SUPPORTED = 4; |
|
|
|
for (var errNum = 0; errNum < MediaError.errorTypes.length; errNum++) { |
|
MediaError[MediaError.errorTypes[errNum]] = errNum; // values should be accessible on both the class and instance |
|
|
|
MediaError.prototype[MediaError.errorTypes[errNum]] = errNum; |
|
} // jsdocs for instance/static members added above |
|
|
|
var tuple = SafeParseTuple; |
|
|
|
function SafeParseTuple(obj, reviver) { |
|
var json; |
|
var error = null; |
|
|
|
try { |
|
json = JSON.parse(obj, reviver); |
|
} catch (err) { |
|
error = err; |
|
} |
|
|
|
return [error, json]; |
|
} |
|
|
|
/** |
|
* Returns whether an object is `Promise`-like (i.e. has a `then` method). |
|
* |
|
* @param {Object} value |
|
* An object that may or may not be `Promise`-like. |
|
* |
|
* @return {boolean} |
|
* Whether or not the object is `Promise`-like. |
|
*/ |
|
function isPromise(value) { |
|
return value !== undefined && value !== null && typeof value.then === 'function'; |
|
} |
|
/** |
|
* Silence a Promise-like object. |
|
* |
|
* This is useful for avoiding non-harmful, but potentially confusing "uncaught |
|
* play promise" rejection error messages. |
|
* |
|
* @param {Object} value |
|
* An object that may or may not be `Promise`-like. |
|
*/ |
|
|
|
function silencePromise(value) { |
|
if (isPromise(value)) { |
|
value.then(null, function (e) {}); |
|
} |
|
} |
|
|
|
/** |
|
* @file text-track-list-converter.js Utilities for capturing text track state and |
|
* re-creating tracks based on a capture. |
|
* |
|
* @module text-track-list-converter |
|
*/ |
|
|
|
/** |
|
* Examine a single {@link TextTrack} and return a JSON-compatible javascript object that |
|
* represents the {@link TextTrack}'s state. |
|
* |
|
* @param {TextTrack} track |
|
* The text track to query. |
|
* |
|
* @return {Object} |
|
* A serializable javascript representation of the TextTrack. |
|
* @private |
|
*/ |
|
var trackToJson_ = function trackToJson_(track) { |
|
var ret = ['kind', 'label', 'language', 'id', 'inBandMetadataTrackDispatchType', 'mode', 'src'].reduce(function (acc, prop, i) { |
|
if (track[prop]) { |
|
acc[prop] = track[prop]; |
|
} |
|
|
|
return acc; |
|
}, { |
|
cues: track.cues && Array.prototype.map.call(track.cues, function (cue) { |
|
return { |
|
startTime: cue.startTime, |
|
endTime: cue.endTime, |
|
text: cue.text, |
|
id: cue.id |
|
}; |
|
}) |
|
}); |
|
return ret; |
|
}; |
|
/** |
|
* Examine a {@link Tech} and return a JSON-compatible javascript array that represents the |
|
* state of all {@link TextTrack}s currently configured. The return array is compatible with |
|
* {@link text-track-list-converter:jsonToTextTracks}. |
|
* |
|
* @param {Tech} tech |
|
* The tech object to query |
|
* |
|
* @return {Array} |
|
* A serializable javascript representation of the {@link Tech}s |
|
* {@link TextTrackList}. |
|
*/ |
|
|
|
|
|
var textTracksToJson = function textTracksToJson(tech) { |
|
var trackEls = tech.$$('track'); |
|
var trackObjs = Array.prototype.map.call(trackEls, function (t) { |
|
return t.track; |
|
}); |
|
var tracks = Array.prototype.map.call(trackEls, function (trackEl) { |
|
var json = trackToJson_(trackEl.track); |
|
|
|
if (trackEl.src) { |
|
json.src = trackEl.src; |
|
} |
|
|
|
return json; |
|
}); |
|
return tracks.concat(Array.prototype.filter.call(tech.textTracks(), function (track) { |
|
return trackObjs.indexOf(track) === -1; |
|
}).map(trackToJson_)); |
|
}; |
|
/** |
|
* Create a set of remote {@link TextTrack}s on a {@link Tech} based on an array of javascript |
|
* object {@link TextTrack} representations. |
|
* |
|
* @param {Array} json |
|
* An array of `TextTrack` representation objects, like those that would be |
|
* produced by `textTracksToJson`. |
|
* |
|
* @param {Tech} tech |
|
* The `Tech` to create the `TextTrack`s on. |
|
*/ |
|
|
|
|
|
var jsonToTextTracks = function jsonToTextTracks(json, tech) { |
|
json.forEach(function (track) { |
|
var addedTrack = tech.addRemoteTextTrack(track).track; |
|
|
|
if (!track.src && track.cues) { |
|
track.cues.forEach(function (cue) { |
|
return addedTrack.addCue(cue); |
|
}); |
|
} |
|
}); |
|
return tech.textTracks(); |
|
}; |
|
|
|
var textTrackConverter = { |
|
textTracksToJson: textTracksToJson, |
|
jsonToTextTracks: jsonToTextTracks, |
|
trackToJson_: trackToJson_ |
|
}; |
|
|
|
var commonjsGlobal = typeof window !== 'undefined' ? window : typeof global !== 'undefined' ? global : typeof self !== 'undefined' ? self : {}; |
|
|
|
function createCommonjsModule(fn, module) { |
|
return module = { exports: {} }, fn(module, module.exports), module.exports; |
|
} |
|
|
|
var keycode = createCommonjsModule(function (module, exports) { |
|
// Source: http://jsfiddle.net/vWx8V/ |
|
// http://stackoverflow.com/questions/5603195/full-list-of-javascript-keycodes |
|
|
|
/** |
|
* Conenience method returns corresponding value for given keyName or keyCode. |
|
* |
|
* @param {Mixed} keyCode {Number} or keyName {String} |
|
* @return {Mixed} |
|
* @api public |
|
*/ |
|
function keyCode(searchInput) { |
|
// Keyboard Events |
|
if (searchInput && 'object' === typeof searchInput) { |
|
var hasKeyCode = searchInput.which || searchInput.keyCode || searchInput.charCode; |
|
if (hasKeyCode) searchInput = hasKeyCode; |
|
} // Numbers |
|
|
|
|
|
if ('number' === typeof searchInput) return names[searchInput]; // Everything else (cast to string) |
|
|
|
var search = String(searchInput); // check codes |
|
|
|
var foundNamedKey = codes[search.toLowerCase()]; |
|
if (foundNamedKey) return foundNamedKey; // check aliases |
|
|
|
var foundNamedKey = aliases[search.toLowerCase()]; |
|
if (foundNamedKey) return foundNamedKey; // weird character? |
|
|
|
if (search.length === 1) return search.charCodeAt(0); |
|
return undefined; |
|
} |
|
/** |
|
* Compares a keyboard event with a given keyCode or keyName. |
|
* |
|
* @param {Event} event Keyboard event that should be tested |
|
* @param {Mixed} keyCode {Number} or keyName {String} |
|
* @return {Boolean} |
|
* @api public |
|
*/ |
|
|
|
|
|
keyCode.isEventKey = function isEventKey(event, nameOrCode) { |
|
if (event && 'object' === typeof event) { |
|
var keyCode = event.which || event.keyCode || event.charCode; |
|
|
|
if (keyCode === null || keyCode === undefined) { |
|
return false; |
|
} |
|
|
|
if (typeof nameOrCode === 'string') { |
|
// check codes |
|
var foundNamedKey = codes[nameOrCode.toLowerCase()]; |
|
|
|
if (foundNamedKey) { |
|
return foundNamedKey === keyCode; |
|
} // check aliases |
|
|
|
|
|
var foundNamedKey = aliases[nameOrCode.toLowerCase()]; |
|
|
|
if (foundNamedKey) { |
|
return foundNamedKey === keyCode; |
|
} |
|
} else if (typeof nameOrCode === 'number') { |
|
return nameOrCode === keyCode; |
|
} |
|
|
|
return false; |
|
} |
|
}; |
|
|
|
exports = module.exports = keyCode; |
|
/** |
|
* Get by name |
|
* |
|
* exports.code['enter'] // => 13 |
|
*/ |
|
|
|
var codes = exports.code = exports.codes = { |
|
'backspace': 8, |
|
'tab': 9, |
|
'enter': 13, |
|
'shift': 16, |
|
'ctrl': 17, |
|
'alt': 18, |
|
'pause/break': 19, |
|
'caps lock': 20, |
|
'esc': 27, |
|
'space': 32, |
|
'page up': 33, |
|
'page down': 34, |
|
'end': 35, |
|
'home': 36, |
|
'left': 37, |
|
'up': 38, |
|
'right': 39, |
|
'down': 40, |
|
'insert': 45, |
|
'delete': 46, |
|
'command': 91, |
|
'left command': 91, |
|
'right command': 93, |
|
'numpad *': 106, |
|
'numpad +': 107, |
|
'numpad -': 109, |
|
'numpad .': 110, |
|
'numpad /': 111, |
|
'num lock': 144, |
|
'scroll lock': 145, |
|
'my computer': 182, |
|
'my calculator': 183, |
|
';': 186, |
|
'=': 187, |
|
',': 188, |
|
'-': 189, |
|
'.': 190, |
|
'/': 191, |
|
'`': 192, |
|
'[': 219, |
|
'\\': 220, |
|
']': 221, |
|
"'": 222 // Helper aliases |
|
|
|
}; |
|
var aliases = exports.aliases = { |
|
'windows': 91, |
|
'⇧': 16, |
|
'⌥': 18, |
|
'⌃': 17, |
|
'⌘': 91, |
|
'ctl': 17, |
|
'control': 17, |
|
'option': 18, |
|
'pause': 19, |
|
'break': 19, |
|
'caps': 20, |
|
'return': 13, |
|
'escape': 27, |
|
'spc': 32, |
|
'spacebar': 32, |
|
'pgup': 33, |
|
'pgdn': 34, |
|
'ins': 45, |
|
'del': 46, |
|
'cmd': 91 |
|
/*! |
|
* Programatically add the following |
|
*/ |
|
// lower case chars |
|
|
|
}; |
|
|
|
for (i = 97; i < 123; i++) { |
|
codes[String.fromCharCode(i)] = i - 32; |
|
} // numbers |
|
|
|
|
|
for (var i = 48; i < 58; i++) { |
|
codes[i - 48] = i; |
|
} // function keys |
|
|
|
|
|
for (i = 1; i < 13; i++) { |
|
codes['f' + i] = i + 111; |
|
} // numpad keys |
|
|
|
|
|
for (i = 0; i < 10; i++) { |
|
codes['numpad ' + i] = i + 96; |
|
} |
|
/** |
|
* Get by code |
|
* |
|
* exports.name[13] // => 'Enter' |
|
*/ |
|
|
|
|
|
var names = exports.names = exports.title = {}; // title for backward compat |
|
// Create reverse mapping |
|
|
|
for (i in codes) { |
|
names[codes[i]] = i; |
|
} // Add aliases |
|
|
|
|
|
for (var alias in aliases) { |
|
codes[alias] = aliases[alias]; |
|
} |
|
}); |
|
var keycode_1 = keycode.code; |
|
var keycode_2 = keycode.codes; |
|
var keycode_3 = keycode.aliases; |
|
var keycode_4 = keycode.names; |
|
var keycode_5 = keycode.title; |
|
|
|
var MODAL_CLASS_NAME = 'vjs-modal-dialog'; |
|
/** |
|
* The `ModalDialog` displays over the video and its controls, which blocks |
|
* interaction with the player until it is closed. |
|
* |
|
* Modal dialogs include a "Close" button and will close when that button |
|
* is activated - or when ESC is pressed anywhere. |
|
* |
|
* @extends Component |
|
*/ |
|
|
|
var ModalDialog = |
|
/*#__PURE__*/ |
|
function (_Component) { |
|
_inheritsLoose(ModalDialog, _Component); |
|
|
|
/** |
|
* Create an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
* |
|
* @param {Mixed} [options.content=undefined] |
|
* Provide customized content for this modal. |
|
* |
|
* @param {string} [options.description] |
|
* A text description for the modal, primarily for accessibility. |
|
* |
|
* @param {boolean} [options.fillAlways=false] |
|
* Normally, modals are automatically filled only the first time |
|
* they open. This tells the modal to refresh its content |
|
* every time it opens. |
|
* |
|
* @param {string} [options.label] |
|
* A text label for the modal, primarily for accessibility. |
|
* |
|
* @param {boolean} [options.pauseOnOpen=true] |
|
* If `true`, playback will will be paused if playing when |
|
* the modal opens, and resumed when it closes. |
|
* |
|
* @param {boolean} [options.temporary=true] |
|
* If `true`, the modal can only be opened once; it will be |
|
* disposed as soon as it's closed. |
|
* |
|
* @param {boolean} [options.uncloseable=false] |
|
* If `true`, the user will not be able to close the modal |
|
* through the UI in the normal ways. Programmatic closing is |
|
* still possible. |
|
*/ |
|
function ModalDialog(player, options) { |
|
var _this; |
|
|
|
_this = _Component.call(this, player, options) || this; |
|
_this.opened_ = _this.hasBeenOpened_ = _this.hasBeenFilled_ = false; |
|
|
|
_this.closeable(!_this.options_.uncloseable); |
|
|
|
_this.content(_this.options_.content); // Make sure the contentEl is defined AFTER any children are initialized |
|
// because we only want the contents of the modal in the contentEl |
|
// (not the UI elements like the close button). |
|
|
|
|
|
_this.contentEl_ = createEl('div', { |
|
className: MODAL_CLASS_NAME + "-content" |
|
}, { |
|
role: 'document' |
|
}); |
|
_this.descEl_ = createEl('p', { |
|
className: MODAL_CLASS_NAME + "-description vjs-control-text", |
|
id: _this.el().getAttribute('aria-describedby') |
|
}); |
|
textContent(_this.descEl_, _this.description()); |
|
|
|
_this.el_.appendChild(_this.descEl_); |
|
|
|
_this.el_.appendChild(_this.contentEl_); |
|
|
|
return _this; |
|
} |
|
/** |
|
* Create the `ModalDialog`'s DOM element |
|
* |
|
* @return {Element} |
|
* The DOM element that gets created. |
|
*/ |
|
|
|
|
|
var _proto = ModalDialog.prototype; |
|
|
|
_proto.createEl = function createEl$$1() { |
|
return _Component.prototype.createEl.call(this, 'div', { |
|
className: this.buildCSSClass(), |
|
tabIndex: -1 |
|
}, { |
|
'aria-describedby': this.id() + "_description", |
|
'aria-hidden': 'true', |
|
'aria-label': this.label(), |
|
'role': 'dialog' |
|
}); |
|
}; |
|
|
|
_proto.dispose = function dispose() { |
|
this.contentEl_ = null; |
|
this.descEl_ = null; |
|
this.previouslyActiveEl_ = null; |
|
|
|
_Component.prototype.dispose.call(this); |
|
} |
|
/** |
|
* Builds the default DOM `className`. |
|
* |
|
* @return {string} |
|
* The DOM `className` for this object. |
|
*/ |
|
; |
|
|
|
_proto.buildCSSClass = function buildCSSClass() { |
|
return MODAL_CLASS_NAME + " vjs-hidden " + _Component.prototype.buildCSSClass.call(this); |
|
} |
|
/** |
|
* Returns the label string for this modal. Primarily used for accessibility. |
|
* |
|
* @return {string} |
|
* the localized or raw label of this modal. |
|
*/ |
|
; |
|
|
|
_proto.label = function label() { |
|
return this.localize(this.options_.label || 'Modal Window'); |
|
} |
|
/** |
|
* Returns the description string for this modal. Primarily used for |
|
* accessibility. |
|
* |
|
* @return {string} |
|
* The localized or raw description of this modal. |
|
*/ |
|
; |
|
|
|
_proto.description = function description() { |
|
var desc = this.options_.description || this.localize('This is a modal window.'); // Append a universal closeability message if the modal is closeable. |
|
|
|
if (this.closeable()) { |
|
desc += ' ' + this.localize('This modal can be closed by pressing the Escape key or activating the close button.'); |
|
} |
|
|
|
return desc; |
|
} |
|
/** |
|
* Opens the modal. |
|
* |
|
* @fires ModalDialog#beforemodalopen |
|
* @fires ModalDialog#modalopen |
|
*/ |
|
; |
|
|
|
_proto.open = function open() { |
|
if (!this.opened_) { |
|
var player = this.player(); |
|
/** |
|
* Fired just before a `ModalDialog` is opened. |
|
* |
|
* @event ModalDialog#beforemodalopen |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
this.trigger('beforemodalopen'); |
|
this.opened_ = true; // Fill content if the modal has never opened before and |
|
// never been filled. |
|
|
|
if (this.options_.fillAlways || !this.hasBeenOpened_ && !this.hasBeenFilled_) { |
|
this.fill(); |
|
} // If the player was playing, pause it and take note of its previously |
|
// playing state. |
|
|
|
|
|
this.wasPlaying_ = !player.paused(); |
|
|
|
if (this.options_.pauseOnOpen && this.wasPlaying_) { |
|
player.pause(); |
|
} |
|
|
|
this.on('keydown', this.handleKeyDown); // Hide controls and note if they were enabled. |
|
|
|
this.hadControls_ = player.controls(); |
|
player.controls(false); |
|
this.show(); |
|
this.conditionalFocus_(); |
|
this.el().setAttribute('aria-hidden', 'false'); |
|
/** |
|
* Fired just after a `ModalDialog` is opened. |
|
* |
|
* @event ModalDialog#modalopen |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
this.trigger('modalopen'); |
|
this.hasBeenOpened_ = true; |
|
} |
|
} |
|
/** |
|
* If the `ModalDialog` is currently open or closed. |
|
* |
|
* @param {boolean} [value] |
|
* If given, it will open (`true`) or close (`false`) the modal. |
|
* |
|
* @return {boolean} |
|
* the current open state of the modaldialog |
|
*/ |
|
; |
|
|
|
_proto.opened = function opened(value) { |
|
if (typeof value === 'boolean') { |
|
this[value ? 'open' : 'close'](); |
|
} |
|
|
|
return this.opened_; |
|
} |
|
/** |
|
* Closes the modal, does nothing if the `ModalDialog` is |
|
* not open. |
|
* |
|
* @fires ModalDialog#beforemodalclose |
|
* @fires ModalDialog#modalclose |
|
*/ |
|
; |
|
|
|
_proto.close = function close() { |
|
if (!this.opened_) { |
|
return; |
|
} |
|
|
|
var player = this.player(); |
|
/** |
|
* Fired just before a `ModalDialog` is closed. |
|
* |
|
* @event ModalDialog#beforemodalclose |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
this.trigger('beforemodalclose'); |
|
this.opened_ = false; |
|
|
|
if (this.wasPlaying_ && this.options_.pauseOnOpen) { |
|
player.play(); |
|
} |
|
|
|
this.off('keydown', this.handleKeyDown); |
|
|
|
if (this.hadControls_) { |
|
player.controls(true); |
|
} |
|
|
|
this.hide(); |
|
this.el().setAttribute('aria-hidden', 'true'); |
|
/** |
|
* Fired just after a `ModalDialog` is closed. |
|
* |
|
* @event ModalDialog#modalclose |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
this.trigger('modalclose'); |
|
this.conditionalBlur_(); |
|
|
|
if (this.options_.temporary) { |
|
this.dispose(); |
|
} |
|
} |
|
/** |
|
* Check to see if the `ModalDialog` is closeable via the UI. |
|
* |
|
* @param {boolean} [value] |
|
* If given as a boolean, it will set the `closeable` option. |
|
* |
|
* @return {boolean} |
|
* Returns the final value of the closable option. |
|
*/ |
|
; |
|
|
|
_proto.closeable = function closeable(value) { |
|
if (typeof value === 'boolean') { |
|
var closeable = this.closeable_ = !!value; |
|
var close = this.getChild('closeButton'); // If this is being made closeable and has no close button, add one. |
|
|
|
if (closeable && !close) { |
|
// The close button should be a child of the modal - not its |
|
// content element, so temporarily change the content element. |
|
var temp = this.contentEl_; |
|
this.contentEl_ = this.el_; |
|
close = this.addChild('closeButton', { |
|
controlText: 'Close Modal Dialog' |
|
}); |
|
this.contentEl_ = temp; |
|
this.on(close, 'close', this.close); |
|
} // If this is being made uncloseable and has a close button, remove it. |
|
|
|
|
|
if (!closeable && close) { |
|
this.off(close, 'close', this.close); |
|
this.removeChild(close); |
|
close.dispose(); |
|
} |
|
} |
|
|
|
return this.closeable_; |
|
} |
|
/** |
|
* Fill the modal's content element with the modal's "content" option. |
|
* The content element will be emptied before this change takes place. |
|
*/ |
|
; |
|
|
|
_proto.fill = function fill() { |
|
this.fillWith(this.content()); |
|
} |
|
/** |
|
* Fill the modal's content element with arbitrary content. |
|
* The content element will be emptied before this change takes place. |
|
* |
|
* @fires ModalDialog#beforemodalfill |
|
* @fires ModalDialog#modalfill |
|
* |
|
* @param {Mixed} [content] |
|
* The same rules apply to this as apply to the `content` option. |
|
*/ |
|
; |
|
|
|
_proto.fillWith = function fillWith(content) { |
|
var contentEl = this.contentEl(); |
|
var parentEl = contentEl.parentNode; |
|
var nextSiblingEl = contentEl.nextSibling; |
|
/** |
|
* Fired just before a `ModalDialog` is filled with content. |
|
* |
|
* @event ModalDialog#beforemodalfill |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
this.trigger('beforemodalfill'); |
|
this.hasBeenFilled_ = true; // Detach the content element from the DOM before performing |
|
// manipulation to avoid modifying the live DOM multiple times. |
|
|
|
parentEl.removeChild(contentEl); |
|
this.empty(); |
|
insertContent(contentEl, content); |
|
/** |
|
* Fired just after a `ModalDialog` is filled with content. |
|
* |
|
* @event ModalDialog#modalfill |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
this.trigger('modalfill'); // Re-inject the re-filled content element. |
|
|
|
if (nextSiblingEl) { |
|
parentEl.insertBefore(contentEl, nextSiblingEl); |
|
} else { |
|
parentEl.appendChild(contentEl); |
|
} // make sure that the close button is last in the dialog DOM |
|
|
|
|
|
var closeButton = this.getChild('closeButton'); |
|
|
|
if (closeButton) { |
|
parentEl.appendChild(closeButton.el_); |
|
} |
|
} |
|
/** |
|
* Empties the content element. This happens anytime the modal is filled. |
|
* |
|
* @fires ModalDialog#beforemodalempty |
|
* @fires ModalDialog#modalempty |
|
*/ |
|
; |
|
|
|
_proto.empty = function empty() { |
|
/** |
|
* Fired just before a `ModalDialog` is emptied. |
|
* |
|
* @event ModalDialog#beforemodalempty |
|
* @type {EventTarget~Event} |
|
*/ |
|
this.trigger('beforemodalempty'); |
|
emptyEl(this.contentEl()); |
|
/** |
|
* Fired just after a `ModalDialog` is emptied. |
|
* |
|
* @event ModalDialog#modalempty |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
this.trigger('modalempty'); |
|
} |
|
/** |
|
* Gets or sets the modal content, which gets normalized before being |
|
* rendered into the DOM. |
|
* |
|
* This does not update the DOM or fill the modal, but it is called during |
|
* that process. |
|
* |
|
* @param {Mixed} [value] |
|
* If defined, sets the internal content value to be used on the |
|
* next call(s) to `fill`. This value is normalized before being |
|
* inserted. To "clear" the internal content value, pass `null`. |
|
* |
|
* @return {Mixed} |
|
* The current content of the modal dialog |
|
*/ |
|
; |
|
|
|
_proto.content = function content(value) { |
|
if (typeof value !== 'undefined') { |
|
this.content_ = value; |
|
} |
|
|
|
return this.content_; |
|
} |
|
/** |
|
* conditionally focus the modal dialog if focus was previously on the player. |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.conditionalFocus_ = function conditionalFocus_() { |
|
var activeEl = document.activeElement; |
|
var playerEl = this.player_.el_; |
|
this.previouslyActiveEl_ = null; |
|
|
|
if (playerEl.contains(activeEl) || playerEl === activeEl) { |
|
this.previouslyActiveEl_ = activeEl; |
|
this.focus(); |
|
} |
|
} |
|
/** |
|
* conditionally blur the element and refocus the last focused element |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.conditionalBlur_ = function conditionalBlur_() { |
|
if (this.previouslyActiveEl_) { |
|
this.previouslyActiveEl_.focus(); |
|
this.previouslyActiveEl_ = null; |
|
} |
|
} |
|
/** |
|
* Keydown handler. Attached when modal is focused. |
|
* |
|
* @listens keydown |
|
*/ |
|
; |
|
|
|
_proto.handleKeyDown = function handleKeyDown(event) { |
|
// Do not allow keydowns to reach out of the modal dialog. |
|
event.stopPropagation(); |
|
|
|
if (keycode.isEventKey(event, 'Escape') && this.closeable()) { |
|
event.preventDefault(); |
|
this.close(); |
|
return; |
|
} // exit early if it isn't a tab key |
|
|
|
|
|
if (!keycode.isEventKey(event, 'Tab')) { |
|
return; |
|
} |
|
|
|
var focusableEls = this.focusableEls_(); |
|
var activeEl = this.el_.querySelector(':focus'); |
|
var focusIndex; |
|
|
|
for (var i = 0; i < focusableEls.length; i++) { |
|
if (activeEl === focusableEls[i]) { |
|
focusIndex = i; |
|
break; |
|
} |
|
} |
|
|
|
if (document.activeElement === this.el_) { |
|
focusIndex = 0; |
|
} |
|
|
|
if (event.shiftKey && focusIndex === 0) { |
|
focusableEls[focusableEls.length - 1].focus(); |
|
event.preventDefault(); |
|
} else if (!event.shiftKey && focusIndex === focusableEls.length - 1) { |
|
focusableEls[0].focus(); |
|
event.preventDefault(); |
|
} |
|
} |
|
/** |
|
* get all focusable elements |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.focusableEls_ = function focusableEls_() { |
|
var allChildren = this.el_.querySelectorAll('*'); |
|
return Array.prototype.filter.call(allChildren, function (child) { |
|
return (child instanceof window$1.HTMLAnchorElement || child instanceof window$1.HTMLAreaElement) && child.hasAttribute('href') || (child instanceof window$1.HTMLInputElement || child instanceof window$1.HTMLSelectElement || child instanceof window$1.HTMLTextAreaElement || child instanceof window$1.HTMLButtonElement) && !child.hasAttribute('disabled') || child instanceof window$1.HTMLIFrameElement || child instanceof window$1.HTMLObjectElement || child instanceof window$1.HTMLEmbedElement || child.hasAttribute('tabindex') && child.getAttribute('tabindex') !== -1 || child.hasAttribute('contenteditable'); |
|
}); |
|
}; |
|
|
|
return ModalDialog; |
|
}(Component); |
|
/** |
|
* Default options for `ModalDialog` default options. |
|
* |
|
* @type {Object} |
|
* @private |
|
*/ |
|
|
|
|
|
ModalDialog.prototype.options_ = { |
|
pauseOnOpen: true, |
|
temporary: true |
|
}; |
|
Component.registerComponent('ModalDialog', ModalDialog); |
|
|
|
/** |
|
* Common functionaliy between {@link TextTrackList}, {@link AudioTrackList}, and |
|
* {@link VideoTrackList} |
|
* |
|
* @extends EventTarget |
|
*/ |
|
|
|
var TrackList = |
|
/*#__PURE__*/ |
|
function (_EventTarget) { |
|
_inheritsLoose(TrackList, _EventTarget); |
|
|
|
/** |
|
* Create an instance of this class |
|
* |
|
* @param {Track[]} tracks |
|
* A list of tracks to initialize the list with. |
|
* |
|
* @abstract |
|
*/ |
|
function TrackList(tracks) { |
|
var _this; |
|
|
|
if (tracks === void 0) { |
|
tracks = []; |
|
} |
|
|
|
_this = _EventTarget.call(this) || this; |
|
_this.tracks_ = []; |
|
/** |
|
* @memberof TrackList |
|
* @member {number} length |
|
* The current number of `Track`s in the this Trackist. |
|
* @instance |
|
*/ |
|
|
|
Object.defineProperty(_assertThisInitialized(_assertThisInitialized(_this)), 'length', { |
|
get: function get() { |
|
return this.tracks_.length; |
|
} |
|
}); |
|
|
|
for (var i = 0; i < tracks.length; i++) { |
|
_this.addTrack(tracks[i]); |
|
} |
|
|
|
return _this; |
|
} |
|
/** |
|
* Add a {@link Track} to the `TrackList` |
|
* |
|
* @param {Track} track |
|
* The audio, video, or text track to add to the list. |
|
* |
|
* @fires TrackList#addtrack |
|
*/ |
|
|
|
|
|
var _proto = TrackList.prototype; |
|
|
|
_proto.addTrack = function addTrack(track) { |
|
var index = this.tracks_.length; |
|
|
|
if (!('' + index in this)) { |
|
Object.defineProperty(this, index, { |
|
get: function get() { |
|
return this.tracks_[index]; |
|
} |
|
}); |
|
} // Do not add duplicate tracks |
|
|
|
|
|
if (this.tracks_.indexOf(track) === -1) { |
|
this.tracks_.push(track); |
|
/** |
|
* Triggered when a track is added to a track list. |
|
* |
|
* @event TrackList#addtrack |
|
* @type {EventTarget~Event} |
|
* @property {Track} track |
|
* A reference to track that was added. |
|
*/ |
|
|
|
this.trigger({ |
|
track: track, |
|
type: 'addtrack', |
|
target: this |
|
}); |
|
} |
|
} |
|
/** |
|
* Remove a {@link Track} from the `TrackList` |
|
* |
|
* @param {Track} rtrack |
|
* The audio, video, or text track to remove from the list. |
|
* |
|
* @fires TrackList#removetrack |
|
*/ |
|
; |
|
|
|
_proto.removeTrack = function removeTrack(rtrack) { |
|
var track; |
|
|
|
for (var i = 0, l = this.length; i < l; i++) { |
|
if (this[i] === rtrack) { |
|
track = this[i]; |
|
|
|
if (track.off) { |
|
track.off(); |
|
} |
|
|
|
this.tracks_.splice(i, 1); |
|
break; |
|
} |
|
} |
|
|
|
if (!track) { |
|
return; |
|
} |
|
/** |
|
* Triggered when a track is removed from track list. |
|
* |
|
* @event TrackList#removetrack |
|
* @type {EventTarget~Event} |
|
* @property {Track} track |
|
* A reference to track that was removed. |
|
*/ |
|
|
|
|
|
this.trigger({ |
|
track: track, |
|
type: 'removetrack', |
|
target: this |
|
}); |
|
} |
|
/** |
|
* Get a Track from the TrackList by a tracks id |
|
* |
|
* @param {string} id - the id of the track to get |
|
* @method getTrackById |
|
* @return {Track} |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.getTrackById = function getTrackById(id) { |
|
var result = null; |
|
|
|
for (var i = 0, l = this.length; i < l; i++) { |
|
var track = this[i]; |
|
|
|
if (track.id === id) { |
|
result = track; |
|
break; |
|
} |
|
} |
|
|
|
return result; |
|
}; |
|
|
|
return TrackList; |
|
}(EventTarget); |
|
/** |
|
* Triggered when a different track is selected/enabled. |
|
* |
|
* @event TrackList#change |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
/** |
|
* Events that can be called with on + eventName. See {@link EventHandler}. |
|
* |
|
* @property {Object} TrackList#allowedEvents_ |
|
* @private |
|
*/ |
|
|
|
|
|
TrackList.prototype.allowedEvents_ = { |
|
change: 'change', |
|
addtrack: 'addtrack', |
|
removetrack: 'removetrack' |
|
}; // emulate attribute EventHandler support to allow for feature detection |
|
|
|
for (var event in TrackList.prototype.allowedEvents_) { |
|
TrackList.prototype['on' + event] = null; |
|
} |
|
|
|
/** |
|
* Anywhere we call this function we diverge from the spec |
|
* as we only support one enabled audiotrack at a time |
|
* |
|
* @param {AudioTrackList} list |
|
* list to work on |
|
* |
|
* @param {AudioTrack} track |
|
* The track to skip |
|
* |
|
* @private |
|
*/ |
|
|
|
var disableOthers = function disableOthers(list, track) { |
|
for (var i = 0; i < list.length; i++) { |
|
if (!Object.keys(list[i]).length || track.id === list[i].id) { |
|
continue; |
|
} // another audio track is enabled, disable it |
|
|
|
|
|
list[i].enabled = false; |
|
} |
|
}; |
|
/** |
|
* The current list of {@link AudioTrack} for a media file. |
|
* |
|
* @see [Spec]{@link https://html.spec.whatwg.org/multipage/embedded-content.html#audiotracklist} |
|
* @extends TrackList |
|
*/ |
|
|
|
|
|
var AudioTrackList = |
|
/*#__PURE__*/ |
|
function (_TrackList) { |
|
_inheritsLoose(AudioTrackList, _TrackList); |
|
|
|
/** |
|
* Create an instance of this class. |
|
* |
|
* @param {AudioTrack[]} [tracks=[]] |
|
* A list of `AudioTrack` to instantiate the list with. |
|
*/ |
|
function AudioTrackList(tracks) { |
|
var _this; |
|
|
|
if (tracks === void 0) { |
|
tracks = []; |
|
} |
|
|
|
// make sure only 1 track is enabled |
|
// sorted from last index to first index |
|
for (var i = tracks.length - 1; i >= 0; i--) { |
|
if (tracks[i].enabled) { |
|
disableOthers(tracks, tracks[i]); |
|
break; |
|
} |
|
} |
|
|
|
_this = _TrackList.call(this, tracks) || this; |
|
_this.changing_ = false; |
|
return _this; |
|
} |
|
/** |
|
* Add an {@link AudioTrack} to the `AudioTrackList`. |
|
* |
|
* @param {AudioTrack} track |
|
* The AudioTrack to add to the list |
|
* |
|
* @fires TrackList#addtrack |
|
*/ |
|
|
|
|
|
var _proto = AudioTrackList.prototype; |
|
|
|
_proto.addTrack = function addTrack(track) { |
|
var _this2 = this; |
|
|
|
if (track.enabled) { |
|
disableOthers(this, track); |
|
} |
|
|
|
_TrackList.prototype.addTrack.call(this, track); // native tracks don't have this |
|
|
|
|
|
if (!track.addEventListener) { |
|
return; |
|
} |
|
|
|
track.enabledChange_ = function () { |
|
// when we are disabling other tracks (since we don't support |
|
// more than one track at a time) we will set changing_ |
|
// to true so that we don't trigger additional change events |
|
if (_this2.changing_) { |
|
return; |
|
} |
|
|
|
_this2.changing_ = true; |
|
disableOthers(_this2, track); |
|
_this2.changing_ = false; |
|
|
|
_this2.trigger('change'); |
|
}; |
|
/** |
|
* @listens AudioTrack#enabledchange |
|
* @fires TrackList#change |
|
*/ |
|
|
|
|
|
track.addEventListener('enabledchange', track.enabledChange_); |
|
}; |
|
|
|
_proto.removeTrack = function removeTrack(rtrack) { |
|
_TrackList.prototype.removeTrack.call(this, rtrack); |
|
|
|
if (rtrack.removeEventListener && rtrack.enabledChange_) { |
|
rtrack.removeEventListener('enabledchange', rtrack.enabledChange_); |
|
rtrack.enabledChange_ = null; |
|
} |
|
}; |
|
|
|
return AudioTrackList; |
|
}(TrackList); |
|
|
|
/** |
|
* Un-select all other {@link VideoTrack}s that are selected. |
|
* |
|
* @param {VideoTrackList} list |
|
* list to work on |
|
* |
|
* @param {VideoTrack} track |
|
* The track to skip |
|
* |
|
* @private |
|
*/ |
|
|
|
var disableOthers$1 = function disableOthers(list, track) { |
|
for (var i = 0; i < list.length; i++) { |
|
if (!Object.keys(list[i]).length || track.id === list[i].id) { |
|
continue; |
|
} // another video track is enabled, disable it |
|
|
|
|
|
list[i].selected = false; |
|
} |
|
}; |
|
/** |
|
* The current list of {@link VideoTrack} for a video. |
|
* |
|
* @see [Spec]{@link https://html.spec.whatwg.org/multipage/embedded-content.html#videotracklist} |
|
* @extends TrackList |
|
*/ |
|
|
|
|
|
var VideoTrackList = |
|
/*#__PURE__*/ |
|
function (_TrackList) { |
|
_inheritsLoose(VideoTrackList, _TrackList); |
|
|
|
/** |
|
* Create an instance of this class. |
|
* |
|
* @param {VideoTrack[]} [tracks=[]] |
|
* A list of `VideoTrack` to instantiate the list with. |
|
*/ |
|
function VideoTrackList(tracks) { |
|
var _this; |
|
|
|
if (tracks === void 0) { |
|
tracks = []; |
|
} |
|
|
|
// make sure only 1 track is enabled |
|
// sorted from last index to first index |
|
for (var i = tracks.length - 1; i >= 0; i--) { |
|
if (tracks[i].selected) { |
|
disableOthers$1(tracks, tracks[i]); |
|
break; |
|
} |
|
} |
|
|
|
_this = _TrackList.call(this, tracks) || this; |
|
_this.changing_ = false; |
|
/** |
|
* @member {number} VideoTrackList#selectedIndex |
|
* The current index of the selected {@link VideoTrack`}. |
|
*/ |
|
|
|
Object.defineProperty(_assertThisInitialized(_assertThisInitialized(_this)), 'selectedIndex', { |
|
get: function get() { |
|
for (var _i = 0; _i < this.length; _i++) { |
|
if (this[_i].selected) { |
|
return _i; |
|
} |
|
} |
|
|
|
return -1; |
|
}, |
|
set: function set() {} |
|
}); |
|
return _this; |
|
} |
|
/** |
|
* Add a {@link VideoTrack} to the `VideoTrackList`. |
|
* |
|
* @param {VideoTrack} track |
|
* The VideoTrack to add to the list |
|
* |
|
* @fires TrackList#addtrack |
|
*/ |
|
|
|
|
|
var _proto = VideoTrackList.prototype; |
|
|
|
_proto.addTrack = function addTrack(track) { |
|
var _this2 = this; |
|
|
|
if (track.selected) { |
|
disableOthers$1(this, track); |
|
} |
|
|
|
_TrackList.prototype.addTrack.call(this, track); // native tracks don't have this |
|
|
|
|
|
if (!track.addEventListener) { |
|
return; |
|
} |
|
|
|
track.selectedChange_ = function () { |
|
if (_this2.changing_) { |
|
return; |
|
} |
|
|
|
_this2.changing_ = true; |
|
disableOthers$1(_this2, track); |
|
_this2.changing_ = false; |
|
|
|
_this2.trigger('change'); |
|
}; |
|
/** |
|
* @listens VideoTrack#selectedchange |
|
* @fires TrackList#change |
|
*/ |
|
|
|
|
|
track.addEventListener('selectedchange', track.selectedChange_); |
|
}; |
|
|
|
_proto.removeTrack = function removeTrack(rtrack) { |
|
_TrackList.prototype.removeTrack.call(this, rtrack); |
|
|
|
if (rtrack.removeEventListener && rtrack.selectedChange_) { |
|
rtrack.removeEventListener('selectedchange', rtrack.selectedChange_); |
|
rtrack.selectedChange_ = null; |
|
} |
|
}; |
|
|
|
return VideoTrackList; |
|
}(TrackList); |
|
|
|
/** |
|
* The current list of {@link TextTrack} for a media file. |
|
* |
|
* @see [Spec]{@link https://html.spec.whatwg.org/multipage/embedded-content.html#texttracklist} |
|
* @extends TrackList |
|
*/ |
|
|
|
var TextTrackList = |
|
/*#__PURE__*/ |
|
function (_TrackList) { |
|
_inheritsLoose(TextTrackList, _TrackList); |
|
|
|
function TextTrackList() { |
|
return _TrackList.apply(this, arguments) || this; |
|
} |
|
|
|
var _proto = TextTrackList.prototype; |
|
|
|
/** |
|
* Add a {@link TextTrack} to the `TextTrackList` |
|
* |
|
* @param {TextTrack} track |
|
* The text track to add to the list. |
|
* |
|
* @fires TrackList#addtrack |
|
*/ |
|
_proto.addTrack = function addTrack(track) { |
|
var _this = this; |
|
|
|
_TrackList.prototype.addTrack.call(this, track); |
|
|
|
if (!this.queueChange_) { |
|
this.queueChange_ = function () { |
|
return _this.queueTrigger('change'); |
|
}; |
|
} |
|
|
|
if (!this.triggerSelectedlanguagechange) { |
|
this.triggerSelectedlanguagechange_ = function () { |
|
return _this.trigger('selectedlanguagechange'); |
|
}; |
|
} |
|
/** |
|
* @listens TextTrack#modechange |
|
* @fires TrackList#change |
|
*/ |
|
|
|
|
|
track.addEventListener('modechange', this.queueChange_); |
|
var nonLanguageTextTrackKind = ['metadata', 'chapters']; |
|
|
|
if (nonLanguageTextTrackKind.indexOf(track.kind) === -1) { |
|
track.addEventListener('modechange', this.triggerSelectedlanguagechange_); |
|
} |
|
}; |
|
|
|
_proto.removeTrack = function removeTrack(rtrack) { |
|
_TrackList.prototype.removeTrack.call(this, rtrack); // manually remove the event handlers we added |
|
|
|
|
|
if (rtrack.removeEventListener) { |
|
if (this.queueChange_) { |
|
rtrack.removeEventListener('modechange', this.queueChange_); |
|
} |
|
|
|
if (this.selectedlanguagechange_) { |
|
rtrack.removeEventListener('modechange', this.triggerSelectedlanguagechange_); |
|
} |
|
} |
|
}; |
|
|
|
return TextTrackList; |
|
}(TrackList); |
|
|
|
/** |
|
* @file html-track-element-list.js |
|
*/ |
|
|
|
/** |
|
* The current list of {@link HtmlTrackElement}s. |
|
*/ |
|
var HtmlTrackElementList = |
|
/*#__PURE__*/ |
|
function () { |
|
/** |
|
* Create an instance of this class. |
|
* |
|
* @param {HtmlTrackElement[]} [tracks=[]] |
|
* A list of `HtmlTrackElement` to instantiate the list with. |
|
*/ |
|
function HtmlTrackElementList(trackElements) { |
|
if (trackElements === void 0) { |
|
trackElements = []; |
|
} |
|
|
|
this.trackElements_ = []; |
|
/** |
|
* @memberof HtmlTrackElementList |
|
* @member {number} length |
|
* The current number of `Track`s in the this Trackist. |
|
* @instance |
|
*/ |
|
|
|
Object.defineProperty(this, 'length', { |
|
get: function get() { |
|
return this.trackElements_.length; |
|
} |
|
}); |
|
|
|
for (var i = 0, length = trackElements.length; i < length; i++) { |
|
this.addTrackElement_(trackElements[i]); |
|
} |
|
} |
|
/** |
|
* Add an {@link HtmlTrackElement} to the `HtmlTrackElementList` |
|
* |
|
* @param {HtmlTrackElement} trackElement |
|
* The track element to add to the list. |
|
* |
|
* @private |
|
*/ |
|
|
|
|
|
var _proto = HtmlTrackElementList.prototype; |
|
|
|
_proto.addTrackElement_ = function addTrackElement_(trackElement) { |
|
var index = this.trackElements_.length; |
|
|
|
if (!('' + index in this)) { |
|
Object.defineProperty(this, index, { |
|
get: function get() { |
|
return this.trackElements_[index]; |
|
} |
|
}); |
|
} // Do not add duplicate elements |
|
|
|
|
|
if (this.trackElements_.indexOf(trackElement) === -1) { |
|
this.trackElements_.push(trackElement); |
|
} |
|
} |
|
/** |
|
* Get an {@link HtmlTrackElement} from the `HtmlTrackElementList` given an |
|
* {@link TextTrack}. |
|
* |
|
* @param {TextTrack} track |
|
* The track associated with a track element. |
|
* |
|
* @return {HtmlTrackElement|undefined} |
|
* The track element that was found or undefined. |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.getTrackElementByTrack_ = function getTrackElementByTrack_(track) { |
|
var trackElement_; |
|
|
|
for (var i = 0, length = this.trackElements_.length; i < length; i++) { |
|
if (track === this.trackElements_[i].track) { |
|
trackElement_ = this.trackElements_[i]; |
|
break; |
|
} |
|
} |
|
|
|
return trackElement_; |
|
} |
|
/** |
|
* Remove a {@link HtmlTrackElement} from the `HtmlTrackElementList` |
|
* |
|
* @param {HtmlTrackElement} trackElement |
|
* The track element to remove from the list. |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.removeTrackElement_ = function removeTrackElement_(trackElement) { |
|
for (var i = 0, length = this.trackElements_.length; i < length; i++) { |
|
if (trackElement === this.trackElements_[i]) { |
|
if (this.trackElements_[i].track && typeof this.trackElements_[i].track.off === 'function') { |
|
this.trackElements_[i].track.off(); |
|
} |
|
|
|
if (typeof this.trackElements_[i].off === 'function') { |
|
this.trackElements_[i].off(); |
|
} |
|
|
|
this.trackElements_.splice(i, 1); |
|
break; |
|
} |
|
} |
|
}; |
|
|
|
return HtmlTrackElementList; |
|
}(); |
|
|
|
/** |
|
* @file text-track-cue-list.js |
|
*/ |
|
|
|
/** |
|
* @typedef {Object} TextTrackCueList~TextTrackCue |
|
* |
|
* @property {string} id |
|
* The unique id for this text track cue |
|
* |
|
* @property {number} startTime |
|
* The start time for this text track cue |
|
* |
|
* @property {number} endTime |
|
* The end time for this text track cue |
|
* |
|
* @property {boolean} pauseOnExit |
|
* Pause when the end time is reached if true. |
|
* |
|
* @see [Spec]{@link https://html.spec.whatwg.org/multipage/embedded-content.html#texttrackcue} |
|
*/ |
|
|
|
/** |
|
* A List of TextTrackCues. |
|
* |
|
* @see [Spec]{@link https://html.spec.whatwg.org/multipage/embedded-content.html#texttrackcuelist} |
|
*/ |
|
var TextTrackCueList = |
|
/*#__PURE__*/ |
|
function () { |
|
/** |
|
* Create an instance of this class.. |
|
* |
|
* @param {Array} cues |
|
* A list of cues to be initialized with |
|
*/ |
|
function TextTrackCueList(cues) { |
|
TextTrackCueList.prototype.setCues_.call(this, cues); |
|
/** |
|
* @memberof TextTrackCueList |
|
* @member {number} length |
|
* The current number of `TextTrackCue`s in the TextTrackCueList. |
|
* @instance |
|
*/ |
|
|
|
Object.defineProperty(this, 'length', { |
|
get: function get() { |
|
return this.length_; |
|
} |
|
}); |
|
} |
|
/** |
|
* A setter for cues in this list. Creates getters |
|
* an an index for the cues. |
|
* |
|
* @param {Array} cues |
|
* An array of cues to set |
|
* |
|
* @private |
|
*/ |
|
|
|
|
|
var _proto = TextTrackCueList.prototype; |
|
|
|
_proto.setCues_ = function setCues_(cues) { |
|
var oldLength = this.length || 0; |
|
var i = 0; |
|
var l = cues.length; |
|
this.cues_ = cues; |
|
this.length_ = cues.length; |
|
|
|
var defineProp = function defineProp(index) { |
|
if (!('' + index in this)) { |
|
Object.defineProperty(this, '' + index, { |
|
get: function get() { |
|
return this.cues_[index]; |
|
} |
|
}); |
|
} |
|
}; |
|
|
|
if (oldLength < l) { |
|
i = oldLength; |
|
|
|
for (; i < l; i++) { |
|
defineProp.call(this, i); |
|
} |
|
} |
|
} |
|
/** |
|
* Get a `TextTrackCue` that is currently in the `TextTrackCueList` by id. |
|
* |
|
* @param {string} id |
|
* The id of the cue that should be searched for. |
|
* |
|
* @return {TextTrackCueList~TextTrackCue|null} |
|
* A single cue or null if none was found. |
|
*/ |
|
; |
|
|
|
_proto.getCueById = function getCueById(id) { |
|
var result = null; |
|
|
|
for (var i = 0, l = this.length; i < l; i++) { |
|
var cue = this[i]; |
|
|
|
if (cue.id === id) { |
|
result = cue; |
|
break; |
|
} |
|
} |
|
|
|
return result; |
|
}; |
|
|
|
return TextTrackCueList; |
|
}(); |
|
|
|
/** |
|
* @file track-kinds.js |
|
*/ |
|
|
|
/** |
|
* All possible `VideoTrackKind`s |
|
* |
|
* @see https://html.spec.whatwg.org/multipage/embedded-content.html#dom-videotrack-kind |
|
* @typedef VideoTrack~Kind |
|
* @enum |
|
*/ |
|
var VideoTrackKind = { |
|
alternative: 'alternative', |
|
captions: 'captions', |
|
main: 'main', |
|
sign: 'sign', |
|
subtitles: 'subtitles', |
|
commentary: 'commentary' |
|
}; |
|
/** |
|
* All possible `AudioTrackKind`s |
|
* |
|
* @see https://html.spec.whatwg.org/multipage/embedded-content.html#dom-audiotrack-kind |
|
* @typedef AudioTrack~Kind |
|
* @enum |
|
*/ |
|
|
|
var AudioTrackKind = { |
|
'alternative': 'alternative', |
|
'descriptions': 'descriptions', |
|
'main': 'main', |
|
'main-desc': 'main-desc', |
|
'translation': 'translation', |
|
'commentary': 'commentary' |
|
}; |
|
/** |
|
* All possible `TextTrackKind`s |
|
* |
|
* @see https://html.spec.whatwg.org/multipage/embedded-content.html#dom-texttrack-kind |
|
* @typedef TextTrack~Kind |
|
* @enum |
|
*/ |
|
|
|
var TextTrackKind = { |
|
subtitles: 'subtitles', |
|
captions: 'captions', |
|
descriptions: 'descriptions', |
|
chapters: 'chapters', |
|
metadata: 'metadata' |
|
}; |
|
/** |
|
* All possible `TextTrackMode`s |
|
* |
|
* @see https://html.spec.whatwg.org/multipage/embedded-content.html#texttrackmode |
|
* @typedef TextTrack~Mode |
|
* @enum |
|
*/ |
|
|
|
var TextTrackMode = { |
|
disabled: 'disabled', |
|
hidden: 'hidden', |
|
showing: 'showing' |
|
}; |
|
|
|
/** |
|
* A Track class that contains all of the common functionality for {@link AudioTrack}, |
|
* {@link VideoTrack}, and {@link TextTrack}. |
|
* |
|
* > Note: This class should not be used directly |
|
* |
|
* @see {@link https://html.spec.whatwg.org/multipage/embedded-content.html} |
|
* @extends EventTarget |
|
* @abstract |
|
*/ |
|
|
|
var Track = |
|
/*#__PURE__*/ |
|
function (_EventTarget) { |
|
_inheritsLoose(Track, _EventTarget); |
|
|
|
/** |
|
* Create an instance of this class. |
|
* |
|
* @param {Object} [options={}] |
|
* Object of option names and values |
|
* |
|
* @param {string} [options.kind=''] |
|
* A valid kind for the track type you are creating. |
|
* |
|
* @param {string} [options.id='vjs_track_' + Guid.newGUID()] |
|
* A unique id for this AudioTrack. |
|
* |
|
* @param {string} [options.label=''] |
|
* The menu label for this track. |
|
* |
|
* @param {string} [options.language=''] |
|
* A valid two character language code. |
|
* |
|
* @abstract |
|
*/ |
|
function Track(options) { |
|
var _this; |
|
|
|
if (options === void 0) { |
|
options = {}; |
|
} |
|
|
|
_this = _EventTarget.call(this) || this; |
|
var trackProps = { |
|
id: options.id || 'vjs_track_' + newGUID(), |
|
kind: options.kind || '', |
|
label: options.label || '', |
|
language: options.language || '' |
|
}; |
|
/** |
|
* @memberof Track |
|
* @member {string} id |
|
* The id of this track. Cannot be changed after creation. |
|
* @instance |
|
* |
|
* @readonly |
|
*/ |
|
|
|
/** |
|
* @memberof Track |
|
* @member {string} kind |
|
* The kind of track that this is. Cannot be changed after creation. |
|
* @instance |
|
* |
|
* @readonly |
|
*/ |
|
|
|
/** |
|
* @memberof Track |
|
* @member {string} label |
|
* The label of this track. Cannot be changed after creation. |
|
* @instance |
|
* |
|
* @readonly |
|
*/ |
|
|
|
/** |
|
* @memberof Track |
|
* @member {string} language |
|
* The two letter language code for this track. Cannot be changed after |
|
* creation. |
|
* @instance |
|
* |
|
* @readonly |
|
*/ |
|
|
|
var _loop = function _loop(key) { |
|
Object.defineProperty(_assertThisInitialized(_assertThisInitialized(_this)), key, { |
|
get: function get() { |
|
return trackProps[key]; |
|
}, |
|
set: function set() {} |
|
}); |
|
}; |
|
|
|
for (var key in trackProps) { |
|
_loop(key); |
|
} |
|
|
|
return _this; |
|
} |
|
|
|
return Track; |
|
}(EventTarget); |
|
|
|
/** |
|
* @file url.js |
|
* @module url |
|
*/ |
|
/** |
|
* @typedef {Object} url:URLObject |
|
* |
|
* @property {string} protocol |
|
* The protocol of the url that was parsed. |
|
* |
|
* @property {string} hostname |
|
* The hostname of the url that was parsed. |
|
* |
|
* @property {string} port |
|
* The port of the url that was parsed. |
|
* |
|
* @property {string} pathname |
|
* The pathname of the url that was parsed. |
|
* |
|
* @property {string} search |
|
* The search query of the url that was parsed. |
|
* |
|
* @property {string} hash |
|
* The hash of the url that was parsed. |
|
* |
|
* @property {string} host |
|
* The host of the url that was parsed. |
|
*/ |
|
|
|
/** |
|
* Resolve and parse the elements of a URL. |
|
* |
|
* @function |
|
* @param {String} url |
|
* The url to parse |
|
* |
|
* @return {url:URLObject} |
|
* An object of url details |
|
*/ |
|
|
|
var parseUrl = function parseUrl(url) { |
|
var props = ['protocol', 'hostname', 'port', 'pathname', 'search', 'hash', 'host']; // add the url to an anchor and let the browser parse the URL |
|
|
|
var a = document.createElement('a'); |
|
a.href = url; // IE8 (and 9?) Fix |
|
// ie8 doesn't parse the URL correctly until the anchor is actually |
|
// added to the body, and an innerHTML is needed to trigger the parsing |
|
|
|
var addToBody = a.host === '' && a.protocol !== 'file:'; |
|
var div; |
|
|
|
if (addToBody) { |
|
div = document.createElement('div'); |
|
div.innerHTML = "<a href=\"" + url + "\"></a>"; |
|
a = div.firstChild; // prevent the div from affecting layout |
|
|
|
div.setAttribute('style', 'display:none; position:absolute;'); |
|
document.body.appendChild(div); |
|
} // Copy the specific URL properties to a new object |
|
// This is also needed for IE8 because the anchor loses its |
|
// properties when it's removed from the dom |
|
|
|
|
|
var details = {}; |
|
|
|
for (var i = 0; i < props.length; i++) { |
|
details[props[i]] = a[props[i]]; |
|
} // IE9 adds the port to the host property unlike everyone else. If |
|
// a port identifier is added for standard ports, strip it. |
|
|
|
|
|
if (details.protocol === 'http:') { |
|
details.host = details.host.replace(/:80$/, ''); |
|
} |
|
|
|
if (details.protocol === 'https:') { |
|
details.host = details.host.replace(/:443$/, ''); |
|
} |
|
|
|
if (!details.protocol) { |
|
details.protocol = window$1.location.protocol; |
|
} |
|
|
|
if (addToBody) { |
|
document.body.removeChild(div); |
|
} |
|
|
|
return details; |
|
}; |
|
/** |
|
* Get absolute version of relative URL. Used to tell Flash the correct URL. |
|
* |
|
* @function |
|
* @param {string} url |
|
* URL to make absolute |
|
* |
|
* @return {string} |
|
* Absolute URL |
|
* |
|
* @see http://stackoverflow.com/questions/470832/getting-an-absolute-url-from-a-relative-one-ie6-issue |
|
*/ |
|
|
|
var getAbsoluteURL = function getAbsoluteURL(url) { |
|
// Check if absolute URL |
|
if (!url.match(/^https?:\/\//)) { |
|
// Convert to absolute URL. Flash hosted off-site needs an absolute URL. |
|
var div = document.createElement('div'); |
|
div.innerHTML = "<a href=\"" + url + "\">x</a>"; |
|
url = div.firstChild.href; |
|
} |
|
|
|
return url; |
|
}; |
|
/** |
|
* Returns the extension of the passed file name. It will return an empty string |
|
* if passed an invalid path. |
|
* |
|
* @function |
|
* @param {string} path |
|
* The fileName path like '/path/to/file.mp4' |
|
* |
|
* @return {string} |
|
* The extension in lower case or an empty string if no |
|
* extension could be found. |
|
*/ |
|
|
|
var getFileExtension = function getFileExtension(path) { |
|
if (typeof path === 'string') { |
|
var splitPathRe = /^(\/?)([\s\S]*?)((?:\.{1,2}|[^\/]+?)(\.([^\.\/\?]+)))(?:[\/]*|[\?].*)$/i; |
|
var pathParts = splitPathRe.exec(path); |
|
|
|
if (pathParts) { |
|
return pathParts.pop().toLowerCase(); |
|
} |
|
} |
|
|
|
return ''; |
|
}; |
|
/** |
|
* Returns whether the url passed is a cross domain request or not. |
|
* |
|
* @function |
|
* @param {string} url |
|
* The url to check. |
|
* |
|
* @return {boolean} |
|
* Whether it is a cross domain request or not. |
|
*/ |
|
|
|
var isCrossOrigin = function isCrossOrigin(url) { |
|
var winLoc = window$1.location; |
|
var urlInfo = parseUrl(url); // IE8 protocol relative urls will return ':' for protocol |
|
|
|
var srcProtocol = urlInfo.protocol === ':' ? winLoc.protocol : urlInfo.protocol; // Check if url is for another domain/origin |
|
// IE8 doesn't know location.origin, so we won't rely on it here |
|
|
|
var crossOrigin = srcProtocol + urlInfo.host !== winLoc.protocol + winLoc.host; |
|
return crossOrigin; |
|
}; |
|
|
|
var Url = /*#__PURE__*/Object.freeze({ |
|
parseUrl: parseUrl, |
|
getAbsoluteURL: getAbsoluteURL, |
|
getFileExtension: getFileExtension, |
|
isCrossOrigin: isCrossOrigin |
|
}); |
|
|
|
var isFunction_1 = isFunction; |
|
var toString$1 = Object.prototype.toString; |
|
|
|
function isFunction(fn) { |
|
var string = toString$1.call(fn); |
|
return string === '[object Function]' || typeof fn === 'function' && string !== '[object RegExp]' || typeof window !== 'undefined' && ( // IE8 and below |
|
fn === window.setTimeout || fn === window.alert || fn === window.confirm || fn === window.prompt); |
|
} |
|
|
|
var trim_1 = createCommonjsModule(function (module, exports) { |
|
exports = module.exports = trim; |
|
|
|
function trim(str) { |
|
return str.replace(/^\s*|\s*$/g, ''); |
|
} |
|
|
|
exports.left = function (str) { |
|
return str.replace(/^\s*/, ''); |
|
}; |
|
|
|
exports.right = function (str) { |
|
return str.replace(/\s*$/, ''); |
|
}; |
|
}); |
|
var trim_2 = trim_1.left; |
|
var trim_3 = trim_1.right; |
|
|
|
var fnToStr = Function.prototype.toString; |
|
var constructorRegex = /^\s*class\b/; |
|
|
|
var isES6ClassFn = function isES6ClassFunction(value) { |
|
try { |
|
var fnStr = fnToStr.call(value); |
|
return constructorRegex.test(fnStr); |
|
} catch (e) { |
|
return false; // not a function |
|
} |
|
}; |
|
|
|
var tryFunctionObject = function tryFunctionToStr(value) { |
|
try { |
|
if (isES6ClassFn(value)) { |
|
return false; |
|
} |
|
|
|
fnToStr.call(value); |
|
return true; |
|
} catch (e) { |
|
return false; |
|
} |
|
}; |
|
|
|
var toStr = Object.prototype.toString; |
|
var fnClass = '[object Function]'; |
|
var genClass = '[object GeneratorFunction]'; |
|
var hasToStringTag = typeof Symbol === 'function' && typeof Symbol.toStringTag === 'symbol'; |
|
|
|
var isCallable = function isCallable(value) { |
|
if (!value) { |
|
return false; |
|
} |
|
|
|
if (typeof value !== 'function' && typeof value !== 'object') { |
|
return false; |
|
} |
|
|
|
if (typeof value === 'function' && !value.prototype) { |
|
return true; |
|
} |
|
|
|
if (hasToStringTag) { |
|
return tryFunctionObject(value); |
|
} |
|
|
|
if (isES6ClassFn(value)) { |
|
return false; |
|
} |
|
|
|
var strClass = toStr.call(value); |
|
return strClass === fnClass || strClass === genClass; |
|
}; |
|
|
|
var toStr$1 = Object.prototype.toString; |
|
var hasOwnProperty = Object.prototype.hasOwnProperty; |
|
|
|
var forEachArray = function forEachArray(array, iterator, receiver) { |
|
for (var i = 0, len = array.length; i < len; i++) { |
|
if (hasOwnProperty.call(array, i)) { |
|
if (receiver == null) { |
|
iterator(array[i], i, array); |
|
} else { |
|
iterator.call(receiver, array[i], i, array); |
|
} |
|
} |
|
} |
|
}; |
|
|
|
var forEachString = function forEachString(string, iterator, receiver) { |
|
for (var i = 0, len = string.length; i < len; i++) { |
|
// no such thing as a sparse string. |
|
if (receiver == null) { |
|
iterator(string.charAt(i), i, string); |
|
} else { |
|
iterator.call(receiver, string.charAt(i), i, string); |
|
} |
|
} |
|
}; |
|
|
|
var forEachObject = function forEachObject(object, iterator, receiver) { |
|
for (var k in object) { |
|
if (hasOwnProperty.call(object, k)) { |
|
if (receiver == null) { |
|
iterator(object[k], k, object); |
|
} else { |
|
iterator.call(receiver, object[k], k, object); |
|
} |
|
} |
|
} |
|
}; |
|
|
|
var forEach = function forEach(list, iterator, thisArg) { |
|
if (!isCallable(iterator)) { |
|
throw new TypeError('iterator must be a function'); |
|
} |
|
|
|
var receiver; |
|
|
|
if (arguments.length >= 3) { |
|
receiver = thisArg; |
|
} |
|
|
|
if (toStr$1.call(list) === '[object Array]') { |
|
forEachArray(list, iterator, receiver); |
|
} else if (typeof list === 'string') { |
|
forEachString(list, iterator, receiver); |
|
} else { |
|
forEachObject(list, iterator, receiver); |
|
} |
|
}; |
|
|
|
var forEach_1 = forEach; |
|
|
|
var isArray = function isArray(arg) { |
|
return Object.prototype.toString.call(arg) === '[object Array]'; |
|
}; |
|
|
|
var parseHeaders = function parseHeaders(headers) { |
|
if (!headers) return {}; |
|
var result = {}; |
|
forEach_1(trim_1(headers).split('\n'), function (row) { |
|
var index = row.indexOf(':'), |
|
key = trim_1(row.slice(0, index)).toLowerCase(), |
|
value = trim_1(row.slice(index + 1)); |
|
|
|
if (typeof result[key] === 'undefined') { |
|
result[key] = value; |
|
} else if (isArray(result[key])) { |
|
result[key].push(value); |
|
} else { |
|
result[key] = [result[key], value]; |
|
} |
|
}); |
|
return result; |
|
}; |
|
|
|
var immutable = extend; |
|
var hasOwnProperty$1 = Object.prototype.hasOwnProperty; |
|
|
|
function extend() { |
|
var target = {}; |
|
|
|
for (var i = 0; i < arguments.length; i++) { |
|
var source = arguments[i]; |
|
|
|
for (var key in source) { |
|
if (hasOwnProperty$1.call(source, key)) { |
|
target[key] = source[key]; |
|
} |
|
} |
|
} |
|
|
|
return target; |
|
} |
|
|
|
var xhr = createXHR; |
|
createXHR.XMLHttpRequest = window$1.XMLHttpRequest || noop; |
|
createXHR.XDomainRequest = "withCredentials" in new createXHR.XMLHttpRequest() ? createXHR.XMLHttpRequest : window$1.XDomainRequest; |
|
forEachArray$1(["get", "put", "post", "patch", "head", "delete"], function (method) { |
|
createXHR[method === "delete" ? "del" : method] = function (uri, options, callback) { |
|
options = initParams(uri, options, callback); |
|
options.method = method.toUpperCase(); |
|
return _createXHR(options); |
|
}; |
|
}); |
|
|
|
function forEachArray$1(array, iterator) { |
|
for (var i = 0; i < array.length; i++) { |
|
iterator(array[i]); |
|
} |
|
} |
|
|
|
function isEmpty(obj) { |
|
for (var i in obj) { |
|
if (obj.hasOwnProperty(i)) return false; |
|
} |
|
|
|
return true; |
|
} |
|
|
|
function initParams(uri, options, callback) { |
|
var params = uri; |
|
|
|
if (isFunction_1(options)) { |
|
callback = options; |
|
|
|
if (typeof uri === "string") { |
|
params = { |
|
uri: uri |
|
}; |
|
} |
|
} else { |
|
params = immutable(options, { |
|
uri: uri |
|
}); |
|
} |
|
|
|
params.callback = callback; |
|
return params; |
|
} |
|
|
|
function createXHR(uri, options, callback) { |
|
options = initParams(uri, options, callback); |
|
return _createXHR(options); |
|
} |
|
|
|
function _createXHR(options) { |
|
if (typeof options.callback === "undefined") { |
|
throw new Error("callback argument missing"); |
|
} |
|
|
|
var called = false; |
|
|
|
var callback = function cbOnce(err, response, body) { |
|
if (!called) { |
|
called = true; |
|
options.callback(err, response, body); |
|
} |
|
}; |
|
|
|
function readystatechange() { |
|
if (xhr.readyState === 4) { |
|
setTimeout(loadFunc, 0); |
|
} |
|
} |
|
|
|
function getBody() { |
|
// Chrome with requestType=blob throws errors arround when even testing access to responseText |
|
var body = undefined; |
|
|
|
if (xhr.response) { |
|
body = xhr.response; |
|
} else { |
|
body = xhr.responseText || getXml(xhr); |
|
} |
|
|
|
if (isJson) { |
|
try { |
|
body = JSON.parse(body); |
|
} catch (e) {} |
|
} |
|
|
|
return body; |
|
} |
|
|
|
function errorFunc(evt) { |
|
clearTimeout(timeoutTimer); |
|
|
|
if (!(evt instanceof Error)) { |
|
evt = new Error("" + (evt || "Unknown XMLHttpRequest Error")); |
|
} |
|
|
|
evt.statusCode = 0; |
|
return callback(evt, failureResponse); |
|
} // will load the data & process the response in a special response object |
|
|
|
|
|
function loadFunc() { |
|
if (aborted) return; |
|
var status; |
|
clearTimeout(timeoutTimer); |
|
|
|
if (options.useXDR && xhr.status === undefined) { |
|
//IE8 CORS GET successful response doesn't have a status field, but body is fine |
|
status = 200; |
|
} else { |
|
status = xhr.status === 1223 ? 204 : xhr.status; |
|
} |
|
|
|
var response = failureResponse; |
|
var err = null; |
|
|
|
if (status !== 0) { |
|
response = { |
|
body: getBody(), |
|
statusCode: status, |
|
method: method, |
|
headers: {}, |
|
url: uri, |
|
rawRequest: xhr |
|
}; |
|
|
|
if (xhr.getAllResponseHeaders) { |
|
//remember xhr can in fact be XDR for CORS in IE |
|
response.headers = parseHeaders(xhr.getAllResponseHeaders()); |
|
} |
|
} else { |
|
err = new Error("Internal XMLHttpRequest Error"); |
|
} |
|
|
|
return callback(err, response, response.body); |
|
} |
|
|
|
var xhr = options.xhr || null; |
|
|
|
if (!xhr) { |
|
if (options.cors || options.useXDR) { |
|
xhr = new createXHR.XDomainRequest(); |
|
} else { |
|
xhr = new createXHR.XMLHttpRequest(); |
|
} |
|
} |
|
|
|
var key; |
|
var aborted; |
|
var uri = xhr.url = options.uri || options.url; |
|
var method = xhr.method = options.method || "GET"; |
|
var body = options.body || options.data; |
|
var headers = xhr.headers = options.headers || {}; |
|
var sync = !!options.sync; |
|
var isJson = false; |
|
var timeoutTimer; |
|
var failureResponse = { |
|
body: undefined, |
|
headers: {}, |
|
statusCode: 0, |
|
method: method, |
|
url: uri, |
|
rawRequest: xhr |
|
}; |
|
|
|
if ("json" in options && options.json !== false) { |
|
isJson = true; |
|
headers["accept"] || headers["Accept"] || (headers["Accept"] = "application/json"); //Don't override existing accept header declared by user |
|
|
|
if (method !== "GET" && method !== "HEAD") { |
|
headers["content-type"] || headers["Content-Type"] || (headers["Content-Type"] = "application/json"); //Don't override existing accept header declared by user |
|
|
|
body = JSON.stringify(options.json === true ? body : options.json); |
|
} |
|
} |
|
|
|
xhr.onreadystatechange = readystatechange; |
|
xhr.onload = loadFunc; |
|
xhr.onerror = errorFunc; // IE9 must have onprogress be set to a unique function. |
|
|
|
xhr.onprogress = function () {// IE must die |
|
}; |
|
|
|
xhr.onabort = function () { |
|
aborted = true; |
|
}; |
|
|
|
xhr.ontimeout = errorFunc; |
|
xhr.open(method, uri, !sync, options.username, options.password); //has to be after open |
|
|
|
if (!sync) { |
|
xhr.withCredentials = !!options.withCredentials; |
|
} // Cannot set timeout with sync request |
|
// not setting timeout on the xhr object, because of old webkits etc. not handling that correctly |
|
// both npm's request and jquery 1.x use this kind of timeout, so this is being consistent |
|
|
|
|
|
if (!sync && options.timeout > 0) { |
|
timeoutTimer = setTimeout(function () { |
|
if (aborted) return; |
|
aborted = true; //IE9 may still call readystatechange |
|
|
|
xhr.abort("timeout"); |
|
var e = new Error("XMLHttpRequest timeout"); |
|
e.code = "ETIMEDOUT"; |
|
errorFunc(e); |
|
}, options.timeout); |
|
} |
|
|
|
if (xhr.setRequestHeader) { |
|
for (key in headers) { |
|
if (headers.hasOwnProperty(key)) { |
|
xhr.setRequestHeader(key, headers[key]); |
|
} |
|
} |
|
} else if (options.headers && !isEmpty(options.headers)) { |
|
throw new Error("Headers cannot be set on an XDomainRequest object"); |
|
} |
|
|
|
if ("responseType" in options) { |
|
xhr.responseType = options.responseType; |
|
} |
|
|
|
if ("beforeSend" in options && typeof options.beforeSend === "function") { |
|
options.beforeSend(xhr); |
|
} // Microsoft Edge browser sends "undefined" when send is called with undefined value. |
|
// XMLHttpRequest spec says to pass null as body to indicate no body |
|
// See https://github.com/naugtur/xhr/issues/100. |
|
|
|
|
|
xhr.send(body || null); |
|
return xhr; |
|
} |
|
|
|
function getXml(xhr) { |
|
if (xhr.responseType === "document") { |
|
return xhr.responseXML; |
|
} |
|
|
|
var firefoxBugTakenEffect = xhr.responseXML && xhr.responseXML.documentElement.nodeName === "parsererror"; |
|
|
|
if (xhr.responseType === "" && !firefoxBugTakenEffect) { |
|
return xhr.responseXML; |
|
} |
|
|
|
return null; |
|
} |
|
|
|
function noop() {} |
|
|
|
/** |
|
* Takes a webvtt file contents and parses it into cues |
|
* |
|
* @param {string} srcContent |
|
* webVTT file contents |
|
* |
|
* @param {TextTrack} track |
|
* TextTrack to add cues to. Cues come from the srcContent. |
|
* |
|
* @private |
|
*/ |
|
|
|
var parseCues = function parseCues(srcContent, track) { |
|
var parser = new window$1.WebVTT.Parser(window$1, window$1.vttjs, window$1.WebVTT.StringDecoder()); |
|
var errors = []; |
|
|
|
parser.oncue = function (cue) { |
|
track.addCue(cue); |
|
}; |
|
|
|
parser.onparsingerror = function (error) { |
|
errors.push(error); |
|
}; |
|
|
|
parser.onflush = function () { |
|
track.trigger({ |
|
type: 'loadeddata', |
|
target: track |
|
}); |
|
}; |
|
|
|
parser.parse(srcContent); |
|
|
|
if (errors.length > 0) { |
|
if (window$1.console && window$1.console.groupCollapsed) { |
|
window$1.console.groupCollapsed("Text Track parsing errors for " + track.src); |
|
} |
|
|
|
errors.forEach(function (error) { |
|
return log.error(error); |
|
}); |
|
|
|
if (window$1.console && window$1.console.groupEnd) { |
|
window$1.console.groupEnd(); |
|
} |
|
} |
|
|
|
parser.flush(); |
|
}; |
|
/** |
|
* Load a `TextTrack` from a specified url. |
|
* |
|
* @param {string} src |
|
* Url to load track from. |
|
* |
|
* @param {TextTrack} track |
|
* Track to add cues to. Comes from the content at the end of `url`. |
|
* |
|
* @private |
|
*/ |
|
|
|
|
|
var loadTrack = function loadTrack(src, track) { |
|
var opts = { |
|
uri: src |
|
}; |
|
var crossOrigin = isCrossOrigin(src); |
|
|
|
if (crossOrigin) { |
|
opts.cors = crossOrigin; |
|
} |
|
|
|
xhr(opts, bind(this, function (err, response, responseBody) { |
|
if (err) { |
|
return log.error(err, response); |
|
} |
|
|
|
track.loaded_ = true; // Make sure that vttjs has loaded, otherwise, wait till it finished loading |
|
// NOTE: this is only used for the alt/video.novtt.js build |
|
|
|
if (typeof window$1.WebVTT !== 'function') { |
|
if (track.tech_) { |
|
// to prevent use before define eslint error, we define loadHandler |
|
// as a let here |
|
var loadHandler; |
|
|
|
var errorHandler = function errorHandler() { |
|
log.error("vttjs failed to load, stopping trying to process " + track.src); |
|
track.tech_.off('vttjsloaded', loadHandler); |
|
}; |
|
|
|
loadHandler = function loadHandler() { |
|
track.tech_.off('vttjserror', errorHandler); |
|
return parseCues(responseBody, track); |
|
}; |
|
|
|
track.tech_.one('vttjsloaded', loadHandler); |
|
track.tech_.one('vttjserror', errorHandler); |
|
} |
|
} else { |
|
parseCues(responseBody, track); |
|
} |
|
})); |
|
}; |
|
/** |
|
* A representation of a single `TextTrack`. |
|
* |
|
* @see [Spec]{@link https://html.spec.whatwg.org/multipage/embedded-content.html#texttrack} |
|
* @extends Track |
|
*/ |
|
|
|
|
|
var TextTrack = |
|
/*#__PURE__*/ |
|
function (_Track) { |
|
_inheritsLoose(TextTrack, _Track); |
|
|
|
/** |
|
* Create an instance of this class. |
|
* |
|
* @param {Object} options={} |
|
* Object of option names and values |
|
* |
|
* @param {Tech} options.tech |
|
* A reference to the tech that owns this TextTrack. |
|
* |
|
* @param {TextTrack~Kind} [options.kind='subtitles'] |
|
* A valid text track kind. |
|
* |
|
* @param {TextTrack~Mode} [options.mode='disabled'] |
|
* A valid text track mode. |
|
* |
|
* @param {string} [options.id='vjs_track_' + Guid.newGUID()] |
|
* A unique id for this TextTrack. |
|
* |
|
* @param {string} [options.label=''] |
|
* The menu label for this track. |
|
* |
|
* @param {string} [options.language=''] |
|
* A valid two character language code. |
|
* |
|
* @param {string} [options.srclang=''] |
|
* A valid two character language code. An alternative, but deprioritized |
|
* version of `options.language` |
|
* |
|
* @param {string} [options.src] |
|
* A url to TextTrack cues. |
|
* |
|
* @param {boolean} [options.default] |
|
* If this track should default to on or off. |
|
*/ |
|
function TextTrack(options) { |
|
var _this; |
|
|
|
if (options === void 0) { |
|
options = {}; |
|
} |
|
|
|
if (!options.tech) { |
|
throw new Error('A tech was not provided.'); |
|
} |
|
|
|
var settings = mergeOptions(options, { |
|
kind: TextTrackKind[options.kind] || 'subtitles', |
|
language: options.language || options.srclang || '' |
|
}); |
|
var mode = TextTrackMode[settings.mode] || 'disabled'; |
|
var default_ = settings.default; |
|
|
|
if (settings.kind === 'metadata' || settings.kind === 'chapters') { |
|
mode = 'hidden'; |
|
} |
|
|
|
_this = _Track.call(this, settings) || this; |
|
_this.tech_ = settings.tech; |
|
_this.cues_ = []; |
|
_this.activeCues_ = []; |
|
var cues = new TextTrackCueList(_this.cues_); |
|
var activeCues = new TextTrackCueList(_this.activeCues_); |
|
var changed = false; |
|
var timeupdateHandler = bind(_assertThisInitialized(_assertThisInitialized(_this)), function () { |
|
// Accessing this.activeCues for the side-effects of updating itself |
|
// due to its nature as a getter function. Do not remove or cues will |
|
// stop updating! |
|
// Use the setter to prevent deletion from uglify (pure_getters rule) |
|
this.activeCues = this.activeCues; |
|
|
|
if (changed) { |
|
this.trigger('cuechange'); |
|
changed = false; |
|
} |
|
}); |
|
|
|
if (mode !== 'disabled') { |
|
_this.tech_.ready(function () { |
|
_this.tech_.on('timeupdate', timeupdateHandler); |
|
}, true); |
|
} |
|
|
|
Object.defineProperties(_assertThisInitialized(_assertThisInitialized(_this)), { |
|
/** |
|
* @memberof TextTrack |
|
* @member {boolean} default |
|
* If this track was set to be on or off by default. Cannot be changed after |
|
* creation. |
|
* @instance |
|
* |
|
* @readonly |
|
*/ |
|
default: { |
|
get: function get() { |
|
return default_; |
|
}, |
|
set: function set() {} |
|
}, |
|
|
|
/** |
|
* @memberof TextTrack |
|
* @member {string} mode |
|
* Set the mode of this TextTrack to a valid {@link TextTrack~Mode}. Will |
|
* not be set if setting to an invalid mode. |
|
* @instance |
|
* |
|
* @fires TextTrack#modechange |
|
*/ |
|
mode: { |
|
get: function get() { |
|
return mode; |
|
}, |
|
set: function set(newMode) { |
|
var _this2 = this; |
|
|
|
if (!TextTrackMode[newMode]) { |
|
return; |
|
} |
|
|
|
mode = newMode; |
|
|
|
if (mode !== 'disabled') { |
|
this.tech_.ready(function () { |
|
_this2.tech_.on('timeupdate', timeupdateHandler); |
|
}, true); |
|
} else { |
|
this.tech_.off('timeupdate', timeupdateHandler); |
|
} |
|
/** |
|
* An event that fires when mode changes on this track. This allows |
|
* the TextTrackList that holds this track to act accordingly. |
|
* |
|
* > Note: This is not part of the spec! |
|
* |
|
* @event TextTrack#modechange |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
|
|
this.trigger('modechange'); |
|
} |
|
}, |
|
|
|
/** |
|
* @memberof TextTrack |
|
* @member {TextTrackCueList} cues |
|
* The text track cue list for this TextTrack. |
|
* @instance |
|
*/ |
|
cues: { |
|
get: function get() { |
|
if (!this.loaded_) { |
|
return null; |
|
} |
|
|
|
return cues; |
|
}, |
|
set: function set() {} |
|
}, |
|
|
|
/** |
|
* @memberof TextTrack |
|
* @member {TextTrackCueList} activeCues |
|
* The list text track cues that are currently active for this TextTrack. |
|
* @instance |
|
*/ |
|
activeCues: { |
|
get: function get() { |
|
if (!this.loaded_) { |
|
return null; |
|
} // nothing to do |
|
|
|
|
|
if (this.cues.length === 0) { |
|
return activeCues; |
|
} |
|
|
|
var ct = this.tech_.currentTime(); |
|
var active = []; |
|
|
|
for (var i = 0, l = this.cues.length; i < l; i++) { |
|
var cue = this.cues[i]; |
|
|
|
if (cue.startTime <= ct && cue.endTime >= ct) { |
|
active.push(cue); |
|
} else if (cue.startTime === cue.endTime && cue.startTime <= ct && cue.startTime + 0.5 >= ct) { |
|
active.push(cue); |
|
} |
|
} |
|
|
|
changed = false; |
|
|
|
if (active.length !== this.activeCues_.length) { |
|
changed = true; |
|
} else { |
|
for (var _i = 0; _i < active.length; _i++) { |
|
if (this.activeCues_.indexOf(active[_i]) === -1) { |
|
changed = true; |
|
} |
|
} |
|
} |
|
|
|
this.activeCues_ = active; |
|
activeCues.setCues_(this.activeCues_); |
|
return activeCues; |
|
}, |
|
// /!\ Keep this setter empty (see the timeupdate handler above) |
|
set: function set() {} |
|
} |
|
}); |
|
|
|
if (settings.src) { |
|
_this.src = settings.src; |
|
loadTrack(settings.src, _assertThisInitialized(_assertThisInitialized(_this))); |
|
} else { |
|
_this.loaded_ = true; |
|
} |
|
|
|
return _this; |
|
} |
|
/** |
|
* Add a cue to the internal list of cues. |
|
* |
|
* @param {TextTrack~Cue} cue |
|
* The cue to add to our internal list |
|
*/ |
|
|
|
|
|
var _proto = TextTrack.prototype; |
|
|
|
_proto.addCue = function addCue(originalCue) { |
|
var cue = originalCue; |
|
|
|
if (window$1.vttjs && !(originalCue instanceof window$1.vttjs.VTTCue)) { |
|
cue = new window$1.vttjs.VTTCue(originalCue.startTime, originalCue.endTime, originalCue.text); |
|
|
|
for (var prop in originalCue) { |
|
if (!(prop in cue)) { |
|
cue[prop] = originalCue[prop]; |
|
} |
|
} // make sure that `id` is copied over |
|
|
|
|
|
cue.id = originalCue.id; |
|
cue.originalCue_ = originalCue; |
|
} |
|
|
|
var tracks = this.tech_.textTracks(); |
|
|
|
for (var i = 0; i < tracks.length; i++) { |
|
if (tracks[i] !== this) { |
|
tracks[i].removeCue(cue); |
|
} |
|
} |
|
|
|
this.cues_.push(cue); |
|
this.cues.setCues_(this.cues_); |
|
} |
|
/** |
|
* Remove a cue from our internal list |
|
* |
|
* @param {TextTrack~Cue} removeCue |
|
* The cue to remove from our internal list |
|
*/ |
|
; |
|
|
|
_proto.removeCue = function removeCue(_removeCue) { |
|
var i = this.cues_.length; |
|
|
|
while (i--) { |
|
var cue = this.cues_[i]; |
|
|
|
if (cue === _removeCue || cue.originalCue_ && cue.originalCue_ === _removeCue) { |
|
this.cues_.splice(i, 1); |
|
this.cues.setCues_(this.cues_); |
|
break; |
|
} |
|
} |
|
}; |
|
|
|
return TextTrack; |
|
}(Track); |
|
/** |
|
* cuechange - One or more cues in the track have become active or stopped being active. |
|
*/ |
|
|
|
|
|
TextTrack.prototype.allowedEvents_ = { |
|
cuechange: 'cuechange' |
|
}; |
|
|
|
/** |
|
* A representation of a single `AudioTrack`. If it is part of an {@link AudioTrackList} |
|
* only one `AudioTrack` in the list will be enabled at a time. |
|
* |
|
* @see [Spec]{@link https://html.spec.whatwg.org/multipage/embedded-content.html#audiotrack} |
|
* @extends Track |
|
*/ |
|
|
|
var AudioTrack = |
|
/*#__PURE__*/ |
|
function (_Track) { |
|
_inheritsLoose(AudioTrack, _Track); |
|
|
|
/** |
|
* Create an instance of this class. |
|
* |
|
* @param {Object} [options={}] |
|
* Object of option names and values |
|
* |
|
* @param {AudioTrack~Kind} [options.kind=''] |
|
* A valid audio track kind |
|
* |
|
* @param {string} [options.id='vjs_track_' + Guid.newGUID()] |
|
* A unique id for this AudioTrack. |
|
* |
|
* @param {string} [options.label=''] |
|
* The menu label for this track. |
|
* |
|
* @param {string} [options.language=''] |
|
* A valid two character language code. |
|
* |
|
* @param {boolean} [options.enabled] |
|
* If this track is the one that is currently playing. If this track is part of |
|
* an {@link AudioTrackList}, only one {@link AudioTrack} will be enabled. |
|
*/ |
|
function AudioTrack(options) { |
|
var _this; |
|
|
|
if (options === void 0) { |
|
options = {}; |
|
} |
|
|
|
var settings = mergeOptions(options, { |
|
kind: AudioTrackKind[options.kind] || '' |
|
}); |
|
_this = _Track.call(this, settings) || this; |
|
var enabled = false; |
|
/** |
|
* @memberof AudioTrack |
|
* @member {boolean} enabled |
|
* If this `AudioTrack` is enabled or not. When setting this will |
|
* fire {@link AudioTrack#enabledchange} if the state of enabled is changed. |
|
* @instance |
|
* |
|
* @fires VideoTrack#selectedchange |
|
*/ |
|
|
|
Object.defineProperty(_assertThisInitialized(_assertThisInitialized(_this)), 'enabled', { |
|
get: function get() { |
|
return enabled; |
|
}, |
|
set: function set(newEnabled) { |
|
// an invalid or unchanged value |
|
if (typeof newEnabled !== 'boolean' || newEnabled === enabled) { |
|
return; |
|
} |
|
|
|
enabled = newEnabled; |
|
/** |
|
* An event that fires when enabled changes on this track. This allows |
|
* the AudioTrackList that holds this track to act accordingly. |
|
* |
|
* > Note: This is not part of the spec! Native tracks will do |
|
* this internally without an event. |
|
* |
|
* @event AudioTrack#enabledchange |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
this.trigger('enabledchange'); |
|
} |
|
}); // if the user sets this track to selected then |
|
// set selected to that true value otherwise |
|
// we keep it false |
|
|
|
if (settings.enabled) { |
|
_this.enabled = settings.enabled; |
|
} |
|
|
|
_this.loaded_ = true; |
|
return _this; |
|
} |
|
|
|
return AudioTrack; |
|
}(Track); |
|
|
|
/** |
|
* A representation of a single `VideoTrack`. |
|
* |
|
* @see [Spec]{@link https://html.spec.whatwg.org/multipage/embedded-content.html#videotrack} |
|
* @extends Track |
|
*/ |
|
|
|
var VideoTrack = |
|
/*#__PURE__*/ |
|
function (_Track) { |
|
_inheritsLoose(VideoTrack, _Track); |
|
|
|
/** |
|
* Create an instance of this class. |
|
* |
|
* @param {Object} [options={}] |
|
* Object of option names and values |
|
* |
|
* @param {string} [options.kind=''] |
|
* A valid {@link VideoTrack~Kind} |
|
* |
|
* @param {string} [options.id='vjs_track_' + Guid.newGUID()] |
|
* A unique id for this AudioTrack. |
|
* |
|
* @param {string} [options.label=''] |
|
* The menu label for this track. |
|
* |
|
* @param {string} [options.language=''] |
|
* A valid two character language code. |
|
* |
|
* @param {boolean} [options.selected] |
|
* If this track is the one that is currently playing. |
|
*/ |
|
function VideoTrack(options) { |
|
var _this; |
|
|
|
if (options === void 0) { |
|
options = {}; |
|
} |
|
|
|
var settings = mergeOptions(options, { |
|
kind: VideoTrackKind[options.kind] || '' |
|
}); |
|
_this = _Track.call(this, settings) || this; |
|
var selected = false; |
|
/** |
|
* @memberof VideoTrack |
|
* @member {boolean} selected |
|
* If this `VideoTrack` is selected or not. When setting this will |
|
* fire {@link VideoTrack#selectedchange} if the state of selected changed. |
|
* @instance |
|
* |
|
* @fires VideoTrack#selectedchange |
|
*/ |
|
|
|
Object.defineProperty(_assertThisInitialized(_assertThisInitialized(_this)), 'selected', { |
|
get: function get() { |
|
return selected; |
|
}, |
|
set: function set(newSelected) { |
|
// an invalid or unchanged value |
|
if (typeof newSelected !== 'boolean' || newSelected === selected) { |
|
return; |
|
} |
|
|
|
selected = newSelected; |
|
/** |
|
* An event that fires when selected changes on this track. This allows |
|
* the VideoTrackList that holds this track to act accordingly. |
|
* |
|
* > Note: This is not part of the spec! Native tracks will do |
|
* this internally without an event. |
|
* |
|
* @event VideoTrack#selectedchange |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
this.trigger('selectedchange'); |
|
} |
|
}); // if the user sets this track to selected then |
|
// set selected to that true value otherwise |
|
// we keep it false |
|
|
|
if (settings.selected) { |
|
_this.selected = settings.selected; |
|
} |
|
|
|
return _this; |
|
} |
|
|
|
return VideoTrack; |
|
}(Track); |
|
|
|
/** |
|
* @memberof HTMLTrackElement |
|
* @typedef {HTMLTrackElement~ReadyState} |
|
* @enum {number} |
|
*/ |
|
|
|
var NONE = 0; |
|
var LOADING = 1; |
|
var LOADED = 2; |
|
var ERROR = 3; |
|
/** |
|
* A single track represented in the DOM. |
|
* |
|
* @see [Spec]{@link https://html.spec.whatwg.org/multipage/embedded-content.html#htmltrackelement} |
|
* @extends EventTarget |
|
*/ |
|
|
|
var HTMLTrackElement = |
|
/*#__PURE__*/ |
|
function (_EventTarget) { |
|
_inheritsLoose(HTMLTrackElement, _EventTarget); |
|
|
|
/** |
|
* Create an instance of this class. |
|
* |
|
* @param {Object} options={} |
|
* Object of option names and values |
|
* |
|
* @param {Tech} options.tech |
|
* A reference to the tech that owns this HTMLTrackElement. |
|
* |
|
* @param {TextTrack~Kind} [options.kind='subtitles'] |
|
* A valid text track kind. |
|
* |
|
* @param {TextTrack~Mode} [options.mode='disabled'] |
|
* A valid text track mode. |
|
* |
|
* @param {string} [options.id='vjs_track_' + Guid.newGUID()] |
|
* A unique id for this TextTrack. |
|
* |
|
* @param {string} [options.label=''] |
|
* The menu label for this track. |
|
* |
|
* @param {string} [options.language=''] |
|
* A valid two character language code. |
|
* |
|
* @param {string} [options.srclang=''] |
|
* A valid two character language code. An alternative, but deprioritized |
|
* vesion of `options.language` |
|
* |
|
* @param {string} [options.src] |
|
* A url to TextTrack cues. |
|
* |
|
* @param {boolean} [options.default] |
|
* If this track should default to on or off. |
|
*/ |
|
function HTMLTrackElement(options) { |
|
var _this; |
|
|
|
if (options === void 0) { |
|
options = {}; |
|
} |
|
|
|
_this = _EventTarget.call(this) || this; |
|
var readyState; |
|
var track = new TextTrack(options); |
|
_this.kind = track.kind; |
|
_this.src = track.src; |
|
_this.srclang = track.language; |
|
_this.label = track.label; |
|
_this.default = track.default; |
|
Object.defineProperties(_assertThisInitialized(_assertThisInitialized(_this)), { |
|
/** |
|
* @memberof HTMLTrackElement |
|
* @member {HTMLTrackElement~ReadyState} readyState |
|
* The current ready state of the track element. |
|
* @instance |
|
*/ |
|
readyState: { |
|
get: function get() { |
|
return readyState; |
|
} |
|
}, |
|
|
|
/** |
|
* @memberof HTMLTrackElement |
|
* @member {TextTrack} track |
|
* The underlying TextTrack object. |
|
* @instance |
|
* |
|
*/ |
|
track: { |
|
get: function get() { |
|
return track; |
|
} |
|
} |
|
}); |
|
readyState = NONE; |
|
/** |
|
* @listens TextTrack#loadeddata |
|
* @fires HTMLTrackElement#load |
|
*/ |
|
|
|
track.addEventListener('loadeddata', function () { |
|
readyState = LOADED; |
|
|
|
_this.trigger({ |
|
type: 'load', |
|
target: _assertThisInitialized(_assertThisInitialized(_this)) |
|
}); |
|
}); |
|
return _this; |
|
} |
|
|
|
return HTMLTrackElement; |
|
}(EventTarget); |
|
|
|
HTMLTrackElement.prototype.allowedEvents_ = { |
|
load: 'load' |
|
}; |
|
HTMLTrackElement.NONE = NONE; |
|
HTMLTrackElement.LOADING = LOADING; |
|
HTMLTrackElement.LOADED = LOADED; |
|
HTMLTrackElement.ERROR = ERROR; |
|
|
|
/* |
|
* This file contains all track properties that are used in |
|
* player.js, tech.js, html5.js and possibly other techs in the future. |
|
*/ |
|
|
|
var NORMAL = { |
|
audio: { |
|
ListClass: AudioTrackList, |
|
TrackClass: AudioTrack, |
|
capitalName: 'Audio' |
|
}, |
|
video: { |
|
ListClass: VideoTrackList, |
|
TrackClass: VideoTrack, |
|
capitalName: 'Video' |
|
}, |
|
text: { |
|
ListClass: TextTrackList, |
|
TrackClass: TextTrack, |
|
capitalName: 'Text' |
|
} |
|
}; |
|
Object.keys(NORMAL).forEach(function (type) { |
|
NORMAL[type].getterName = type + "Tracks"; |
|
NORMAL[type].privateName = type + "Tracks_"; |
|
}); |
|
var REMOTE = { |
|
remoteText: { |
|
ListClass: TextTrackList, |
|
TrackClass: TextTrack, |
|
capitalName: 'RemoteText', |
|
getterName: 'remoteTextTracks', |
|
privateName: 'remoteTextTracks_' |
|
}, |
|
remoteTextEl: { |
|
ListClass: HtmlTrackElementList, |
|
TrackClass: HTMLTrackElement, |
|
capitalName: 'RemoteTextTrackEls', |
|
getterName: 'remoteTextTrackEls', |
|
privateName: 'remoteTextTrackEls_' |
|
} |
|
}; |
|
var ALL = mergeOptions(NORMAL, REMOTE); |
|
REMOTE.names = Object.keys(REMOTE); |
|
NORMAL.names = Object.keys(NORMAL); |
|
ALL.names = [].concat(REMOTE.names).concat(NORMAL.names); |
|
|
|
var vtt = {}; |
|
|
|
/** |
|
* An Object containing a structure like: `{src: 'url', type: 'mimetype'}` or string |
|
* that just contains the src url alone. |
|
* * `var SourceObject = {src: 'http://ex.com/video.mp4', type: 'video/mp4'};` |
|
* `var SourceString = 'http://example.com/some-video.mp4';` |
|
* |
|
* @typedef {Object|string} Tech~SourceObject |
|
* |
|
* @property {string} src |
|
* The url to the source |
|
* |
|
* @property {string} type |
|
* The mime type of the source |
|
*/ |
|
|
|
/** |
|
* A function used by {@link Tech} to create a new {@link TextTrack}. |
|
* |
|
* @private |
|
* |
|
* @param {Tech} self |
|
* An instance of the Tech class. |
|
* |
|
* @param {string} kind |
|
* `TextTrack` kind (subtitles, captions, descriptions, chapters, or metadata) |
|
* |
|
* @param {string} [label] |
|
* Label to identify the text track |
|
* |
|
* @param {string} [language] |
|
* Two letter language abbreviation |
|
* |
|
* @param {Object} [options={}] |
|
* An object with additional text track options |
|
* |
|
* @return {TextTrack} |
|
* The text track that was created. |
|
*/ |
|
|
|
function createTrackHelper(self, kind, label, language, options) { |
|
if (options === void 0) { |
|
options = {}; |
|
} |
|
|
|
var tracks = self.textTracks(); |
|
options.kind = kind; |
|
|
|
if (label) { |
|
options.label = label; |
|
} |
|
|
|
if (language) { |
|
options.language = language; |
|
} |
|
|
|
options.tech = self; |
|
var track = new ALL.text.TrackClass(options); |
|
tracks.addTrack(track); |
|
return track; |
|
} |
|
/** |
|
* This is the base class for media playback technology controllers, such as |
|
* {@link Flash} and {@link HTML5} |
|
* |
|
* @extends Component |
|
*/ |
|
|
|
|
|
var Tech = |
|
/*#__PURE__*/ |
|
function (_Component) { |
|
_inheritsLoose(Tech, _Component); |
|
|
|
/** |
|
* Create an instance of this Tech. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
* |
|
* @param {Component~ReadyCallback} ready |
|
* Callback function to call when the `HTML5` Tech is ready. |
|
*/ |
|
function Tech(options, ready) { |
|
var _this; |
|
|
|
if (options === void 0) { |
|
options = {}; |
|
} |
|
|
|
if (ready === void 0) { |
|
ready = function ready() {}; |
|
} |
|
|
|
// we don't want the tech to report user activity automatically. |
|
// This is done manually in addControlsListeners |
|
options.reportTouchActivity = false; |
|
_this = _Component.call(this, null, options, ready) || this; // keep track of whether the current source has played at all to |
|
// implement a very limited played() |
|
|
|
_this.hasStarted_ = false; |
|
|
|
_this.on('playing', function () { |
|
this.hasStarted_ = true; |
|
}); |
|
|
|
_this.on('loadstart', function () { |
|
this.hasStarted_ = false; |
|
}); |
|
|
|
ALL.names.forEach(function (name) { |
|
var props = ALL[name]; |
|
|
|
if (options && options[props.getterName]) { |
|
_this[props.privateName] = options[props.getterName]; |
|
} |
|
}); // Manually track progress in cases where the browser/flash player doesn't report it. |
|
|
|
if (!_this.featuresProgressEvents) { |
|
_this.manualProgressOn(); |
|
} // Manually track timeupdates in cases where the browser/flash player doesn't report it. |
|
|
|
|
|
if (!_this.featuresTimeupdateEvents) { |
|
_this.manualTimeUpdatesOn(); |
|
} |
|
|
|
['Text', 'Audio', 'Video'].forEach(function (track) { |
|
if (options["native" + track + "Tracks"] === false) { |
|
_this["featuresNative" + track + "Tracks"] = false; |
|
} |
|
}); |
|
|
|
if (options.nativeCaptions === false || options.nativeTextTracks === false) { |
|
_this.featuresNativeTextTracks = false; |
|
} else if (options.nativeCaptions === true || options.nativeTextTracks === true) { |
|
_this.featuresNativeTextTracks = true; |
|
} |
|
|
|
if (!_this.featuresNativeTextTracks) { |
|
_this.emulateTextTracks(); |
|
} |
|
|
|
_this.autoRemoteTextTracks_ = new ALL.text.ListClass(); |
|
|
|
_this.initTrackListeners(); // Turn on component tap events only if not using native controls |
|
|
|
|
|
if (!options.nativeControlsForTouch) { |
|
_this.emitTapEvents(); |
|
} |
|
|
|
if (_this.constructor) { |
|
_this.name_ = _this.constructor.name || 'Unknown Tech'; |
|
} |
|
|
|
return _this; |
|
} |
|
/** |
|
* A special function to trigger source set in a way that will allow player |
|
* to re-trigger if the player or tech are not ready yet. |
|
* |
|
* @fires Tech#sourceset |
|
* @param {string} src The source string at the time of the source changing. |
|
*/ |
|
|
|
|
|
var _proto = Tech.prototype; |
|
|
|
_proto.triggerSourceset = function triggerSourceset(src) { |
|
var _this2 = this; |
|
|
|
if (!this.isReady_) { |
|
// on initial ready we have to trigger source set |
|
// 1ms after ready so that player can watch for it. |
|
this.one('ready', function () { |
|
return _this2.setTimeout(function () { |
|
return _this2.triggerSourceset(src); |
|
}, 1); |
|
}); |
|
} |
|
/** |
|
* Fired when the source is set on the tech causing the media element |
|
* to reload. |
|
* |
|
* @see {@link Player#event:sourceset} |
|
* @event Tech#sourceset |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
|
|
this.trigger({ |
|
src: src, |
|
type: 'sourceset' |
|
}); |
|
} |
|
/* Fallbacks for unsupported event types |
|
================================================================================ */ |
|
|
|
/** |
|
* Polyfill the `progress` event for browsers that don't support it natively. |
|
* |
|
* @see {@link Tech#trackProgress} |
|
*/ |
|
; |
|
|
|
_proto.manualProgressOn = function manualProgressOn() { |
|
this.on('durationchange', this.onDurationChange); |
|
this.manualProgress = true; // Trigger progress watching when a source begins loading |
|
|
|
this.one('ready', this.trackProgress); |
|
} |
|
/** |
|
* Turn off the polyfill for `progress` events that was created in |
|
* {@link Tech#manualProgressOn} |
|
*/ |
|
; |
|
|
|
_proto.manualProgressOff = function manualProgressOff() { |
|
this.manualProgress = false; |
|
this.stopTrackingProgress(); |
|
this.off('durationchange', this.onDurationChange); |
|
} |
|
/** |
|
* This is used to trigger a `progress` event when the buffered percent changes. It |
|
* sets an interval function that will be called every 500 milliseconds to check if the |
|
* buffer end percent has changed. |
|
* |
|
* > This function is called by {@link Tech#manualProgressOn} |
|
* |
|
* @param {EventTarget~Event} event |
|
* The `ready` event that caused this to run. |
|
* |
|
* @listens Tech#ready |
|
* @fires Tech#progress |
|
*/ |
|
; |
|
|
|
_proto.trackProgress = function trackProgress(event) { |
|
this.stopTrackingProgress(); |
|
this.progressInterval = this.setInterval(bind(this, function () { |
|
// Don't trigger unless buffered amount is greater than last time |
|
var numBufferedPercent = this.bufferedPercent(); |
|
|
|
if (this.bufferedPercent_ !== numBufferedPercent) { |
|
/** |
|
* See {@link Player#progress} |
|
* |
|
* @event Tech#progress |
|
* @type {EventTarget~Event} |
|
*/ |
|
this.trigger('progress'); |
|
} |
|
|
|
this.bufferedPercent_ = numBufferedPercent; |
|
|
|
if (numBufferedPercent === 1) { |
|
this.stopTrackingProgress(); |
|
} |
|
}), 500); |
|
} |
|
/** |
|
* Update our internal duration on a `durationchange` event by calling |
|
* {@link Tech#duration}. |
|
* |
|
* @param {EventTarget~Event} event |
|
* The `durationchange` event that caused this to run. |
|
* |
|
* @listens Tech#durationchange |
|
*/ |
|
; |
|
|
|
_proto.onDurationChange = function onDurationChange(event) { |
|
this.duration_ = this.duration(); |
|
} |
|
/** |
|
* Get and create a `TimeRange` object for buffering. |
|
* |
|
* @return {TimeRange} |
|
* The time range object that was created. |
|
*/ |
|
; |
|
|
|
_proto.buffered = function buffered() { |
|
return createTimeRanges(0, 0); |
|
} |
|
/** |
|
* Get the percentage of the current video that is currently buffered. |
|
* |
|
* @return {number} |
|
* A number from 0 to 1 that represents the decimal percentage of the |
|
* video that is buffered. |
|
* |
|
*/ |
|
; |
|
|
|
_proto.bufferedPercent = function bufferedPercent$$1() { |
|
return bufferedPercent(this.buffered(), this.duration_); |
|
} |
|
/** |
|
* Turn off the polyfill for `progress` events that was created in |
|
* {@link Tech#manualProgressOn} |
|
* Stop manually tracking progress events by clearing the interval that was set in |
|
* {@link Tech#trackProgress}. |
|
*/ |
|
; |
|
|
|
_proto.stopTrackingProgress = function stopTrackingProgress() { |
|
this.clearInterval(this.progressInterval); |
|
} |
|
/** |
|
* Polyfill the `timeupdate` event for browsers that don't support it. |
|
* |
|
* @see {@link Tech#trackCurrentTime} |
|
*/ |
|
; |
|
|
|
_proto.manualTimeUpdatesOn = function manualTimeUpdatesOn() { |
|
this.manualTimeUpdates = true; |
|
this.on('play', this.trackCurrentTime); |
|
this.on('pause', this.stopTrackingCurrentTime); |
|
} |
|
/** |
|
* Turn off the polyfill for `timeupdate` events that was created in |
|
* {@link Tech#manualTimeUpdatesOn} |
|
*/ |
|
; |
|
|
|
_proto.manualTimeUpdatesOff = function manualTimeUpdatesOff() { |
|
this.manualTimeUpdates = false; |
|
this.stopTrackingCurrentTime(); |
|
this.off('play', this.trackCurrentTime); |
|
this.off('pause', this.stopTrackingCurrentTime); |
|
} |
|
/** |
|
* Sets up an interval function to track current time and trigger `timeupdate` every |
|
* 250 milliseconds. |
|
* |
|
* @listens Tech#play |
|
* @triggers Tech#timeupdate |
|
*/ |
|
; |
|
|
|
_proto.trackCurrentTime = function trackCurrentTime() { |
|
if (this.currentTimeInterval) { |
|
this.stopTrackingCurrentTime(); |
|
} |
|
|
|
this.currentTimeInterval = this.setInterval(function () { |
|
/** |
|
* Triggered at an interval of 250ms to indicated that time is passing in the video. |
|
* |
|
* @event Tech#timeupdate |
|
* @type {EventTarget~Event} |
|
*/ |
|
this.trigger({ |
|
type: 'timeupdate', |
|
target: this, |
|
manuallyTriggered: true |
|
}); // 42 = 24 fps // 250 is what Webkit uses // FF uses 15 |
|
}, 250); |
|
} |
|
/** |
|
* Stop the interval function created in {@link Tech#trackCurrentTime} so that the |
|
* `timeupdate` event is no longer triggered. |
|
* |
|
* @listens {Tech#pause} |
|
*/ |
|
; |
|
|
|
_proto.stopTrackingCurrentTime = function stopTrackingCurrentTime() { |
|
this.clearInterval(this.currentTimeInterval); // #1002 - if the video ends right before the next timeupdate would happen, |
|
// the progress bar won't make it all the way to the end |
|
|
|
this.trigger({ |
|
type: 'timeupdate', |
|
target: this, |
|
manuallyTriggered: true |
|
}); |
|
} |
|
/** |
|
* Turn off all event polyfills, clear the `Tech`s {@link AudioTrackList}, |
|
* {@link VideoTrackList}, and {@link TextTrackList}, and dispose of this Tech. |
|
* |
|
* @fires Component#dispose |
|
*/ |
|
; |
|
|
|
_proto.dispose = function dispose() { |
|
// clear out all tracks because we can't reuse them between techs |
|
this.clearTracks(NORMAL.names); // Turn off any manual progress or timeupdate tracking |
|
|
|
if (this.manualProgress) { |
|
this.manualProgressOff(); |
|
} |
|
|
|
if (this.manualTimeUpdates) { |
|
this.manualTimeUpdatesOff(); |
|
} |
|
|
|
_Component.prototype.dispose.call(this); |
|
} |
|
/** |
|
* Clear out a single `TrackList` or an array of `TrackLists` given their names. |
|
* |
|
* > Note: Techs without source handlers should call this between sources for `video` |
|
* & `audio` tracks. You don't want to use them between tracks! |
|
* |
|
* @param {string[]|string} types |
|
* TrackList names to clear, valid names are `video`, `audio`, and |
|
* `text`. |
|
*/ |
|
; |
|
|
|
_proto.clearTracks = function clearTracks(types) { |
|
var _this3 = this; |
|
|
|
types = [].concat(types); // clear out all tracks because we can't reuse them between techs |
|
|
|
types.forEach(function (type) { |
|
var list = _this3[type + "Tracks"]() || []; |
|
var i = list.length; |
|
|
|
while (i--) { |
|
var track = list[i]; |
|
|
|
if (type === 'text') { |
|
_this3.removeRemoteTextTrack(track); |
|
} |
|
|
|
list.removeTrack(track); |
|
} |
|
}); |
|
} |
|
/** |
|
* Remove any TextTracks added via addRemoteTextTrack that are |
|
* flagged for automatic garbage collection |
|
*/ |
|
; |
|
|
|
_proto.cleanupAutoTextTracks = function cleanupAutoTextTracks() { |
|
var list = this.autoRemoteTextTracks_ || []; |
|
var i = list.length; |
|
|
|
while (i--) { |
|
var track = list[i]; |
|
this.removeRemoteTextTrack(track); |
|
} |
|
} |
|
/** |
|
* Reset the tech, which will removes all sources and reset the internal readyState. |
|
* |
|
* @abstract |
|
*/ |
|
; |
|
|
|
_proto.reset = function reset() {} |
|
/** |
|
* Get or set an error on the Tech. |
|
* |
|
* @param {MediaError} [err] |
|
* Error to set on the Tech |
|
* |
|
* @return {MediaError|null} |
|
* The current error object on the tech, or null if there isn't one. |
|
*/ |
|
; |
|
|
|
_proto.error = function error(err) { |
|
if (err !== undefined) { |
|
this.error_ = new MediaError(err); |
|
this.trigger('error'); |
|
} |
|
|
|
return this.error_; |
|
} |
|
/** |
|
* Returns the `TimeRange`s that have been played through for the current source. |
|
* |
|
* > NOTE: This implementation is incomplete. It does not track the played `TimeRange`. |
|
* It only checks whether the source has played at all or not. |
|
* |
|
* @return {TimeRange} |
|
* - A single time range if this video has played |
|
* - An empty set of ranges if not. |
|
*/ |
|
; |
|
|
|
_proto.played = function played() { |
|
if (this.hasStarted_) { |
|
return createTimeRanges(0, 0); |
|
} |
|
|
|
return createTimeRanges(); |
|
} |
|
/** |
|
* Causes a manual time update to occur if {@link Tech#manualTimeUpdatesOn} was |
|
* previously called. |
|
* |
|
* @fires Tech#timeupdate |
|
*/ |
|
; |
|
|
|
_proto.setCurrentTime = function setCurrentTime() { |
|
// improve the accuracy of manual timeupdates |
|
if (this.manualTimeUpdates) { |
|
/** |
|
* A manual `timeupdate` event. |
|
* |
|
* @event Tech#timeupdate |
|
* @type {EventTarget~Event} |
|
*/ |
|
this.trigger({ |
|
type: 'timeupdate', |
|
target: this, |
|
manuallyTriggered: true |
|
}); |
|
} |
|
} |
|
/** |
|
* Turn on listeners for {@link VideoTrackList}, {@link {AudioTrackList}, and |
|
* {@link TextTrackList} events. |
|
* |
|
* This adds {@link EventTarget~EventListeners} for `addtrack`, and `removetrack`. |
|
* |
|
* @fires Tech#audiotrackchange |
|
* @fires Tech#videotrackchange |
|
* @fires Tech#texttrackchange |
|
*/ |
|
; |
|
|
|
_proto.initTrackListeners = function initTrackListeners() { |
|
var _this4 = this; |
|
|
|
/** |
|
* Triggered when tracks are added or removed on the Tech {@link AudioTrackList} |
|
* |
|
* @event Tech#audiotrackchange |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
/** |
|
* Triggered when tracks are added or removed on the Tech {@link VideoTrackList} |
|
* |
|
* @event Tech#videotrackchange |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
/** |
|
* Triggered when tracks are added or removed on the Tech {@link TextTrackList} |
|
* |
|
* @event Tech#texttrackchange |
|
* @type {EventTarget~Event} |
|
*/ |
|
NORMAL.names.forEach(function (name) { |
|
var props = NORMAL[name]; |
|
|
|
var trackListChanges = function trackListChanges() { |
|
_this4.trigger(name + "trackchange"); |
|
}; |
|
|
|
var tracks = _this4[props.getterName](); |
|
|
|
tracks.addEventListener('removetrack', trackListChanges); |
|
tracks.addEventListener('addtrack', trackListChanges); |
|
|
|
_this4.on('dispose', function () { |
|
tracks.removeEventListener('removetrack', trackListChanges); |
|
tracks.removeEventListener('addtrack', trackListChanges); |
|
}); |
|
}); |
|
} |
|
/** |
|
* Emulate TextTracks using vtt.js if necessary |
|
* |
|
* @fires Tech#vttjsloaded |
|
* @fires Tech#vttjserror |
|
*/ |
|
; |
|
|
|
_proto.addWebVttScript_ = function addWebVttScript_() { |
|
var _this5 = this; |
|
|
|
if (window$1.WebVTT) { |
|
return; |
|
} // Initially, Tech.el_ is a child of a dummy-div wait until the Component system |
|
// signals that the Tech is ready at which point Tech.el_ is part of the DOM |
|
// before inserting the WebVTT script |
|
|
|
|
|
if (document.body.contains(this.el())) { |
|
// load via require if available and vtt.js script location was not passed in |
|
// as an option. novtt builds will turn the above require call into an empty object |
|
// which will cause this if check to always fail. |
|
if (!this.options_['vtt.js'] && isPlain(vtt) && Object.keys(vtt).length > 0) { |
|
this.trigger('vttjsloaded'); |
|
return; |
|
} // load vtt.js via the script location option or the cdn of no location was |
|
// passed in |
|
|
|
|
|
var script = document.createElement('script'); |
|
script.src = this.options_['vtt.js'] || 'https://vjs.zencdn.net/vttjs/0.14.1/vtt.min.js'; |
|
|
|
script.onload = function () { |
|
/** |
|
* Fired when vtt.js is loaded. |
|
* |
|
* @event Tech#vttjsloaded |
|
* @type {EventTarget~Event} |
|
*/ |
|
_this5.trigger('vttjsloaded'); |
|
}; |
|
|
|
script.onerror = function () { |
|
/** |
|
* Fired when vtt.js was not loaded due to an error |
|
* |
|
* @event Tech#vttjsloaded |
|
* @type {EventTarget~Event} |
|
*/ |
|
_this5.trigger('vttjserror'); |
|
}; |
|
|
|
this.on('dispose', function () { |
|
script.onload = null; |
|
script.onerror = null; |
|
}); // but have not loaded yet and we set it to true before the inject so that |
|
// we don't overwrite the injected window.WebVTT if it loads right away |
|
|
|
window$1.WebVTT = true; |
|
this.el().parentNode.appendChild(script); |
|
} else { |
|
this.ready(this.addWebVttScript_); |
|
} |
|
} |
|
/** |
|
* Emulate texttracks |
|
* |
|
*/ |
|
; |
|
|
|
_proto.emulateTextTracks = function emulateTextTracks() { |
|
var _this6 = this; |
|
|
|
var tracks = this.textTracks(); |
|
var remoteTracks = this.remoteTextTracks(); |
|
|
|
var handleAddTrack = function handleAddTrack(e) { |
|
return tracks.addTrack(e.track); |
|
}; |
|
|
|
var handleRemoveTrack = function handleRemoveTrack(e) { |
|
return tracks.removeTrack(e.track); |
|
}; |
|
|
|
remoteTracks.on('addtrack', handleAddTrack); |
|
remoteTracks.on('removetrack', handleRemoveTrack); |
|
this.addWebVttScript_(); |
|
|
|
var updateDisplay = function updateDisplay() { |
|
return _this6.trigger('texttrackchange'); |
|
}; |
|
|
|
var textTracksChanges = function textTracksChanges() { |
|
updateDisplay(); |
|
|
|
for (var i = 0; i < tracks.length; i++) { |
|
var track = tracks[i]; |
|
track.removeEventListener('cuechange', updateDisplay); |
|
|
|
if (track.mode === 'showing') { |
|
track.addEventListener('cuechange', updateDisplay); |
|
} |
|
} |
|
}; |
|
|
|
textTracksChanges(); |
|
tracks.addEventListener('change', textTracksChanges); |
|
tracks.addEventListener('addtrack', textTracksChanges); |
|
tracks.addEventListener('removetrack', textTracksChanges); |
|
this.on('dispose', function () { |
|
remoteTracks.off('addtrack', handleAddTrack); |
|
remoteTracks.off('removetrack', handleRemoveTrack); |
|
tracks.removeEventListener('change', textTracksChanges); |
|
tracks.removeEventListener('addtrack', textTracksChanges); |
|
tracks.removeEventListener('removetrack', textTracksChanges); |
|
|
|
for (var i = 0; i < tracks.length; i++) { |
|
var track = tracks[i]; |
|
track.removeEventListener('cuechange', updateDisplay); |
|
} |
|
}); |
|
} |
|
/** |
|
* Create and returns a remote {@link TextTrack} object. |
|
* |
|
* @param {string} kind |
|
* `TextTrack` kind (subtitles, captions, descriptions, chapters, or metadata) |
|
* |
|
* @param {string} [label] |
|
* Label to identify the text track |
|
* |
|
* @param {string} [language] |
|
* Two letter language abbreviation |
|
* |
|
* @return {TextTrack} |
|
* The TextTrack that gets created. |
|
*/ |
|
; |
|
|
|
_proto.addTextTrack = function addTextTrack(kind, label, language) { |
|
if (!kind) { |
|
throw new Error('TextTrack kind is required but was not provided'); |
|
} |
|
|
|
return createTrackHelper(this, kind, label, language); |
|
} |
|
/** |
|
* Create an emulated TextTrack for use by addRemoteTextTrack |
|
* |
|
* This is intended to be overridden by classes that inherit from |
|
* Tech in order to create native or custom TextTracks. |
|
* |
|
* @param {Object} options |
|
* The object should contain the options to initialize the TextTrack with. |
|
* |
|
* @param {string} [options.kind] |
|
* `TextTrack` kind (subtitles, captions, descriptions, chapters, or metadata). |
|
* |
|
* @param {string} [options.label]. |
|
* Label to identify the text track |
|
* |
|
* @param {string} [options.language] |
|
* Two letter language abbreviation. |
|
* |
|
* @return {HTMLTrackElement} |
|
* The track element that gets created. |
|
*/ |
|
; |
|
|
|
_proto.createRemoteTextTrack = function createRemoteTextTrack(options) { |
|
var track = mergeOptions(options, { |
|
tech: this |
|
}); |
|
return new REMOTE.remoteTextEl.TrackClass(track); |
|
} |
|
/** |
|
* Creates a remote text track object and returns an html track element. |
|
* |
|
* > Note: This can be an emulated {@link HTMLTrackElement} or a native one. |
|
* |
|
* @param {Object} options |
|
* See {@link Tech#createRemoteTextTrack} for more detailed properties. |
|
* |
|
* @param {boolean} [manualCleanup=true] |
|
* - When false: the TextTrack will be automatically removed from the video |
|
* element whenever the source changes |
|
* - When True: The TextTrack will have to be cleaned up manually |
|
* |
|
* @return {HTMLTrackElement} |
|
* An Html Track Element. |
|
* |
|
* @deprecated The default functionality for this function will be equivalent |
|
* to "manualCleanup=false" in the future. The manualCleanup parameter will |
|
* also be removed. |
|
*/ |
|
; |
|
|
|
_proto.addRemoteTextTrack = function addRemoteTextTrack(options, manualCleanup) { |
|
var _this7 = this; |
|
|
|
if (options === void 0) { |
|
options = {}; |
|
} |
|
|
|
var htmlTrackElement = this.createRemoteTextTrack(options); |
|
|
|
if (manualCleanup !== true && manualCleanup !== false) { |
|
// deprecation warning |
|
log.warn('Calling addRemoteTextTrack without explicitly setting the "manualCleanup" parameter to `true` is deprecated and default to `false` in future version of video.js'); |
|
manualCleanup = true; |
|
} // store HTMLTrackElement and TextTrack to remote list |
|
|
|
|
|
this.remoteTextTrackEls().addTrackElement_(htmlTrackElement); |
|
this.remoteTextTracks().addTrack(htmlTrackElement.track); |
|
|
|
if (manualCleanup !== true) { |
|
// create the TextTrackList if it doesn't exist |
|
this.ready(function () { |
|
return _this7.autoRemoteTextTracks_.addTrack(htmlTrackElement.track); |
|
}); |
|
} |
|
|
|
return htmlTrackElement; |
|
} |
|
/** |
|
* Remove a remote text track from the remote `TextTrackList`. |
|
* |
|
* @param {TextTrack} track |
|
* `TextTrack` to remove from the `TextTrackList` |
|
*/ |
|
; |
|
|
|
_proto.removeRemoteTextTrack = function removeRemoteTextTrack(track) { |
|
var trackElement = this.remoteTextTrackEls().getTrackElementByTrack_(track); // remove HTMLTrackElement and TextTrack from remote list |
|
|
|
this.remoteTextTrackEls().removeTrackElement_(trackElement); |
|
this.remoteTextTracks().removeTrack(track); |
|
this.autoRemoteTextTracks_.removeTrack(track); |
|
} |
|
/** |
|
* Gets available media playback quality metrics as specified by the W3C's Media |
|
* Playback Quality API. |
|
* |
|
* @see [Spec]{@link https://wicg.github.io/media-playback-quality} |
|
* |
|
* @return {Object} |
|
* An object with supported media playback quality metrics |
|
* |
|
* @abstract |
|
*/ |
|
; |
|
|
|
_proto.getVideoPlaybackQuality = function getVideoPlaybackQuality() { |
|
return {}; |
|
} |
|
/** |
|
* A method to set a poster from a `Tech`. |
|
* |
|
* @abstract |
|
*/ |
|
; |
|
|
|
_proto.setPoster = function setPoster() {} |
|
/** |
|
* A method to check for the presence of the 'playsinline' <video> attribute. |
|
* |
|
* @abstract |
|
*/ |
|
; |
|
|
|
_proto.playsinline = function playsinline() {} |
|
/** |
|
* A method to set or unset the 'playsinline' <video> attribute. |
|
* |
|
* @abstract |
|
*/ |
|
; |
|
|
|
_proto.setPlaysinline = function setPlaysinline() {} |
|
/** |
|
* Attempt to force override of native audio tracks. |
|
* |
|
* @param {boolean} override - If set to true native audio will be overridden, |
|
* otherwise native audio will potentially be used. |
|
* |
|
* @abstract |
|
*/ |
|
; |
|
|
|
_proto.overrideNativeAudioTracks = function overrideNativeAudioTracks() {} |
|
/** |
|
* Attempt to force override of native video tracks. |
|
* |
|
* @param {boolean} override - If set to true native video will be overridden, |
|
* otherwise native video will potentially be used. |
|
* |
|
* @abstract |
|
*/ |
|
; |
|
|
|
_proto.overrideNativeVideoTracks = function overrideNativeVideoTracks() {} |
|
/* |
|
* Check if the tech can support the given mime-type. |
|
* |
|
* The base tech does not support any type, but source handlers might |
|
* overwrite this. |
|
* |
|
* @param {string} type |
|
* The mimetype to check for support |
|
* |
|
* @return {string} |
|
* 'probably', 'maybe', or empty string |
|
* |
|
* @see [Spec]{@link https://developer.mozilla.org/en-US/docs/Web/API/HTMLMediaElement/canPlayType} |
|
* |
|
* @abstract |
|
*/ |
|
; |
|
|
|
_proto.canPlayType = function canPlayType() { |
|
return ''; |
|
} |
|
/** |
|
* Check if the type is supported by this tech. |
|
* |
|
* The base tech does not support any type, but source handlers might |
|
* overwrite this. |
|
* |
|
* @param {string} type |
|
* The media type to check |
|
* @return {string} Returns the native video element's response |
|
*/ |
|
; |
|
|
|
Tech.canPlayType = function canPlayType() { |
|
return ''; |
|
} |
|
/** |
|
* Check if the tech can support the given source |
|
* |
|
* @param {Object} srcObj |
|
* The source object |
|
* @param {Object} options |
|
* The options passed to the tech |
|
* @return {string} 'probably', 'maybe', or '' (empty string) |
|
*/ |
|
; |
|
|
|
Tech.canPlaySource = function canPlaySource(srcObj, options) { |
|
return Tech.canPlayType(srcObj.type); |
|
} |
|
/* |
|
* Return whether the argument is a Tech or not. |
|
* Can be passed either a Class like `Html5` or a instance like `player.tech_` |
|
* |
|
* @param {Object} component |
|
* The item to check |
|
* |
|
* @return {boolean} |
|
* Whether it is a tech or not |
|
* - True if it is a tech |
|
* - False if it is not |
|
*/ |
|
; |
|
|
|
Tech.isTech = function isTech(component) { |
|
return component.prototype instanceof Tech || component instanceof Tech || component === Tech; |
|
} |
|
/** |
|
* Registers a `Tech` into a shared list for videojs. |
|
* |
|
* @param {string} name |
|
* Name of the `Tech` to register. |
|
* |
|
* @param {Object} tech |
|
* The `Tech` class to register. |
|
*/ |
|
; |
|
|
|
Tech.registerTech = function registerTech(name, tech) { |
|
if (!Tech.techs_) { |
|
Tech.techs_ = {}; |
|
} |
|
|
|
if (!Tech.isTech(tech)) { |
|
throw new Error("Tech " + name + " must be a Tech"); |
|
} |
|
|
|
if (!Tech.canPlayType) { |
|
throw new Error('Techs must have a static canPlayType method on them'); |
|
} |
|
|
|
if (!Tech.canPlaySource) { |
|
throw new Error('Techs must have a static canPlaySource method on them'); |
|
} |
|
|
|
name = toTitleCase(name); |
|
Tech.techs_[name] = tech; |
|
|
|
if (name !== 'Tech') { |
|
// camel case the techName for use in techOrder |
|
Tech.defaultTechOrder_.push(name); |
|
} |
|
|
|
return tech; |
|
} |
|
/** |
|
* Get a `Tech` from the shared list by name. |
|
* |
|
* @param {string} name |
|
* `camelCase` or `TitleCase` name of the Tech to get |
|
* |
|
* @return {Tech|undefined} |
|
* The `Tech` or undefined if there was no tech with the name requested. |
|
*/ |
|
; |
|
|
|
Tech.getTech = function getTech(name) { |
|
if (!name) { |
|
return; |
|
} |
|
|
|
name = toTitleCase(name); |
|
|
|
if (Tech.techs_ && Tech.techs_[name]) { |
|
return Tech.techs_[name]; |
|
} |
|
|
|
if (window$1 && window$1.videojs && window$1.videojs[name]) { |
|
log.warn("The " + name + " tech was added to the videojs object when it should be registered using videojs.registerTech(name, tech)"); |
|
return window$1.videojs[name]; |
|
} |
|
}; |
|
|
|
return Tech; |
|
}(Component); |
|
/** |
|
* Get the {@link VideoTrackList} |
|
* |
|
* @returns {VideoTrackList} |
|
* @method Tech.prototype.videoTracks |
|
*/ |
|
|
|
/** |
|
* Get the {@link AudioTrackList} |
|
* |
|
* @returns {AudioTrackList} |
|
* @method Tech.prototype.audioTracks |
|
*/ |
|
|
|
/** |
|
* Get the {@link TextTrackList} |
|
* |
|
* @returns {TextTrackList} |
|
* @method Tech.prototype.textTracks |
|
*/ |
|
|
|
/** |
|
* Get the remote element {@link TextTrackList} |
|
* |
|
* @returns {TextTrackList} |
|
* @method Tech.prototype.remoteTextTracks |
|
*/ |
|
|
|
/** |
|
* Get the remote element {@link HtmlTrackElementList} |
|
* |
|
* @returns {HtmlTrackElementList} |
|
* @method Tech.prototype.remoteTextTrackEls |
|
*/ |
|
|
|
|
|
ALL.names.forEach(function (name) { |
|
var props = ALL[name]; |
|
|
|
Tech.prototype[props.getterName] = function () { |
|
this[props.privateName] = this[props.privateName] || new props.ListClass(); |
|
return this[props.privateName]; |
|
}; |
|
}); |
|
/** |
|
* List of associated text tracks |
|
* |
|
* @type {TextTrackList} |
|
* @private |
|
* @property Tech#textTracks_ |
|
*/ |
|
|
|
/** |
|
* List of associated audio tracks. |
|
* |
|
* @type {AudioTrackList} |
|
* @private |
|
* @property Tech#audioTracks_ |
|
*/ |
|
|
|
/** |
|
* List of associated video tracks. |
|
* |
|
* @type {VideoTrackList} |
|
* @private |
|
* @property Tech#videoTracks_ |
|
*/ |
|
|
|
/** |
|
* Boolean indicating whether the `Tech` supports volume control. |
|
* |
|
* @type {boolean} |
|
* @default |
|
*/ |
|
|
|
Tech.prototype.featuresVolumeControl = true; |
|
/** |
|
* Boolean indicating whether the `Tech` supports muting volume. |
|
* |
|
* @type {bolean} |
|
* @default |
|
*/ |
|
|
|
Tech.prototype.featuresMuteControl = true; |
|
/** |
|
* Boolean indicating whether the `Tech` supports fullscreen resize control. |
|
* Resizing plugins using request fullscreen reloads the plugin |
|
* |
|
* @type {boolean} |
|
* @default |
|
*/ |
|
|
|
Tech.prototype.featuresFullscreenResize = false; |
|
/** |
|
* Boolean indicating whether the `Tech` supports changing the speed at which the video |
|
* plays. Examples: |
|
* - Set player to play 2x (twice) as fast |
|
* - Set player to play 0.5x (half) as fast |
|
* |
|
* @type {boolean} |
|
* @default |
|
*/ |
|
|
|
Tech.prototype.featuresPlaybackRate = false; |
|
/** |
|
* Boolean indicating whether the `Tech` supports the `progress` event. This is currently |
|
* not triggered by video-js-swf. This will be used to determine if |
|
* {@link Tech#manualProgressOn} should be called. |
|
* |
|
* @type {boolean} |
|
* @default |
|
*/ |
|
|
|
Tech.prototype.featuresProgressEvents = false; |
|
/** |
|
* Boolean indicating whether the `Tech` supports the `sourceset` event. |
|
* |
|
* A tech should set this to `true` and then use {@link Tech#triggerSourceset} |
|
* to trigger a {@link Tech#event:sourceset} at the earliest time after getting |
|
* a new source. |
|
* |
|
* @type {boolean} |
|
* @default |
|
*/ |
|
|
|
Tech.prototype.featuresSourceset = false; |
|
/** |
|
* Boolean indicating whether the `Tech` supports the `timeupdate` event. This is currently |
|
* not triggered by video-js-swf. This will be used to determine if |
|
* {@link Tech#manualTimeUpdates} should be called. |
|
* |
|
* @type {boolean} |
|
* @default |
|
*/ |
|
|
|
Tech.prototype.featuresTimeupdateEvents = false; |
|
/** |
|
* Boolean indicating whether the `Tech` supports the native `TextTrack`s. |
|
* This will help us integrate with native `TextTrack`s if the browser supports them. |
|
* |
|
* @type {boolean} |
|
* @default |
|
*/ |
|
|
|
Tech.prototype.featuresNativeTextTracks = false; |
|
/** |
|
* A functional mixin for techs that want to use the Source Handler pattern. |
|
* Source handlers are scripts for handling specific formats. |
|
* The source handler pattern is used for adaptive formats (HLS, DASH) that |
|
* manually load video data and feed it into a Source Buffer (Media Source Extensions) |
|
* Example: `Tech.withSourceHandlers.call(MyTech);` |
|
* |
|
* @param {Tech} _Tech |
|
* The tech to add source handler functions to. |
|
* |
|
* @mixes Tech~SourceHandlerAdditions |
|
*/ |
|
|
|
Tech.withSourceHandlers = function (_Tech) { |
|
/** |
|
* Register a source handler |
|
* |
|
* @param {Function} handler |
|
* The source handler class |
|
* |
|
* @param {number} [index] |
|
* Register it at the following index |
|
*/ |
|
_Tech.registerSourceHandler = function (handler, index) { |
|
var handlers = _Tech.sourceHandlers; |
|
|
|
if (!handlers) { |
|
handlers = _Tech.sourceHandlers = []; |
|
} |
|
|
|
if (index === undefined) { |
|
// add to the end of the list |
|
index = handlers.length; |
|
} |
|
|
|
handlers.splice(index, 0, handler); |
|
}; |
|
/** |
|
* Check if the tech can support the given type. Also checks the |
|
* Techs sourceHandlers. |
|
* |
|
* @param {string} type |
|
* The mimetype to check. |
|
* |
|
* @return {string} |
|
* 'probably', 'maybe', or '' (empty string) |
|
*/ |
|
|
|
|
|
_Tech.canPlayType = function (type) { |
|
var handlers = _Tech.sourceHandlers || []; |
|
var can; |
|
|
|
for (var i = 0; i < handlers.length; i++) { |
|
can = handlers[i].canPlayType(type); |
|
|
|
if (can) { |
|
return can; |
|
} |
|
} |
|
|
|
return ''; |
|
}; |
|
/** |
|
* Returns the first source handler that supports the source. |
|
* |
|
* TODO: Answer question: should 'probably' be prioritized over 'maybe' |
|
* |
|
* @param {Tech~SourceObject} source |
|
* The source object |
|
* |
|
* @param {Object} options |
|
* The options passed to the tech |
|
* |
|
* @return {SourceHandler|null} |
|
* The first source handler that supports the source or null if |
|
* no SourceHandler supports the source |
|
*/ |
|
|
|
|
|
_Tech.selectSourceHandler = function (source, options) { |
|
var handlers = _Tech.sourceHandlers || []; |
|
var can; |
|
|
|
for (var i = 0; i < handlers.length; i++) { |
|
can = handlers[i].canHandleSource(source, options); |
|
|
|
if (can) { |
|
return handlers[i]; |
|
} |
|
} |
|
|
|
return null; |
|
}; |
|
/** |
|
* Check if the tech can support the given source. |
|
* |
|
* @param {Tech~SourceObject} srcObj |
|
* The source object |
|
* |
|
* @param {Object} options |
|
* The options passed to the tech |
|
* |
|
* @return {string} |
|
* 'probably', 'maybe', or '' (empty string) |
|
*/ |
|
|
|
|
|
_Tech.canPlaySource = function (srcObj, options) { |
|
var sh = _Tech.selectSourceHandler(srcObj, options); |
|
|
|
if (sh) { |
|
return sh.canHandleSource(srcObj, options); |
|
} |
|
|
|
return ''; |
|
}; |
|
/** |
|
* When using a source handler, prefer its implementation of |
|
* any function normally provided by the tech. |
|
*/ |
|
|
|
|
|
var deferrable = ['seekable', 'seeking', 'duration']; |
|
/** |
|
* A wrapper around {@link Tech#seekable} that will call a `SourceHandler`s seekable |
|
* function if it exists, with a fallback to the Techs seekable function. |
|
* |
|
* @method _Tech.seekable |
|
*/ |
|
|
|
/** |
|
* A wrapper around {@link Tech#duration} that will call a `SourceHandler`s duration |
|
* function if it exists, otherwise it will fallback to the techs duration function. |
|
* |
|
* @method _Tech.duration |
|
*/ |
|
|
|
deferrable.forEach(function (fnName) { |
|
var originalFn = this[fnName]; |
|
|
|
if (typeof originalFn !== 'function') { |
|
return; |
|
} |
|
|
|
this[fnName] = function () { |
|
if (this.sourceHandler_ && this.sourceHandler_[fnName]) { |
|
return this.sourceHandler_[fnName].apply(this.sourceHandler_, arguments); |
|
} |
|
|
|
return originalFn.apply(this, arguments); |
|
}; |
|
}, _Tech.prototype); |
|
/** |
|
* Create a function for setting the source using a source object |
|
* and source handlers. |
|
* Should never be called unless a source handler was found. |
|
* |
|
* @param {Tech~SourceObject} source |
|
* A source object with src and type keys |
|
*/ |
|
|
|
_Tech.prototype.setSource = function (source) { |
|
var sh = _Tech.selectSourceHandler(source, this.options_); |
|
|
|
if (!sh) { |
|
// Fall back to a native source hander when unsupported sources are |
|
// deliberately set |
|
if (_Tech.nativeSourceHandler) { |
|
sh = _Tech.nativeSourceHandler; |
|
} else { |
|
log.error('No source handler found for the current source.'); |
|
} |
|
} // Dispose any existing source handler |
|
|
|
|
|
this.disposeSourceHandler(); |
|
this.off('dispose', this.disposeSourceHandler); |
|
|
|
if (sh !== _Tech.nativeSourceHandler) { |
|
this.currentSource_ = source; |
|
} |
|
|
|
this.sourceHandler_ = sh.handleSource(source, this, this.options_); |
|
this.one('dispose', this.disposeSourceHandler); |
|
}; |
|
/** |
|
* Clean up any existing SourceHandlers and listeners when the Tech is disposed. |
|
* |
|
* @listens Tech#dispose |
|
*/ |
|
|
|
|
|
_Tech.prototype.disposeSourceHandler = function () { |
|
// if we have a source and get another one |
|
// then we are loading something new |
|
// than clear all of our current tracks |
|
if (this.currentSource_) { |
|
this.clearTracks(['audio', 'video']); |
|
this.currentSource_ = null; |
|
} // always clean up auto-text tracks |
|
|
|
|
|
this.cleanupAutoTextTracks(); |
|
|
|
if (this.sourceHandler_) { |
|
if (this.sourceHandler_.dispose) { |
|
this.sourceHandler_.dispose(); |
|
} |
|
|
|
this.sourceHandler_ = null; |
|
} |
|
}; |
|
}; // The base Tech class needs to be registered as a Component. It is the only |
|
// Tech that can be registered as a Component. |
|
|
|
|
|
Component.registerComponent('Tech', Tech); |
|
Tech.registerTech('Tech', Tech); |
|
/** |
|
* A list of techs that should be added to techOrder on Players |
|
* |
|
* @private |
|
*/ |
|
|
|
Tech.defaultTechOrder_ = []; |
|
|
|
/** |
|
* @file middleware.js |
|
* @module middleware |
|
*/ |
|
var middlewares = {}; |
|
var middlewareInstances = {}; |
|
var TERMINATOR = {}; |
|
/** |
|
* A middleware object is a plain JavaScript object that has methods that |
|
* match the {@link Tech} methods found in the lists of allowed |
|
* {@link module:middleware.allowedGetters|getters}, |
|
* {@link module:middleware.allowedSetters|setters}, and |
|
* {@link module:middleware.allowedMediators|mediators}. |
|
* |
|
* @typedef {Object} MiddlewareObject |
|
*/ |
|
|
|
/** |
|
* A middleware factory function that should return a |
|
* {@link module:middleware~MiddlewareObject|MiddlewareObject}. |
|
* |
|
* This factory will be called for each player when needed, with the player |
|
* passed in as an argument. |
|
* |
|
* @callback MiddlewareFactory |
|
* @param {Player} player |
|
* A Video.js player. |
|
*/ |
|
|
|
/** |
|
* Define a middleware that the player should use by way of a factory function |
|
* that returns a middleware object. |
|
* |
|
* @param {string} type |
|
* The MIME type to match or `"*"` for all MIME types. |
|
* |
|
* @param {MiddlewareFactory} middleware |
|
* A middleware factory function that will be executed for |
|
* matching types. |
|
*/ |
|
|
|
function use(type, middleware) { |
|
middlewares[type] = middlewares[type] || []; |
|
middlewares[type].push(middleware); |
|
} |
|
/** |
|
* Asynchronously sets a source using middleware by recursing through any |
|
* matching middlewares and calling `setSource` on each, passing along the |
|
* previous returned value each time. |
|
* |
|
* @param {Player} player |
|
* A {@link Player} instance. |
|
* |
|
* @param {Tech~SourceObject} src |
|
* A source object. |
|
* |
|
* @param {Function} |
|
* The next middleware to run. |
|
*/ |
|
|
|
function setSource(player, src, next) { |
|
player.setTimeout(function () { |
|
return setSourceHelper(src, middlewares[src.type], next, player); |
|
}, 1); |
|
} |
|
/** |
|
* When the tech is set, passes the tech to each middleware's `setTech` method. |
|
* |
|
* @param {Object[]} middleware |
|
* An array of middleware instances. |
|
* |
|
* @param {Tech} tech |
|
* A Video.js tech. |
|
*/ |
|
|
|
function setTech(middleware, tech) { |
|
middleware.forEach(function (mw) { |
|
return mw.setTech && mw.setTech(tech); |
|
}); |
|
} |
|
/** |
|
* Calls a getter on the tech first, through each middleware |
|
* from right to left to the player. |
|
* |
|
* @param {Object[]} middleware |
|
* An array of middleware instances. |
|
* |
|
* @param {Tech} tech |
|
* The current tech. |
|
* |
|
* @param {string} method |
|
* A method name. |
|
* |
|
* @return {Mixed} |
|
* The final value from the tech after middleware has intercepted it. |
|
*/ |
|
|
|
function get(middleware, tech, method) { |
|
return middleware.reduceRight(middlewareIterator(method), tech[method]()); |
|
} |
|
/** |
|
* Takes the argument given to the player and calls the setter method on each |
|
* middleware from left to right to the tech. |
|
* |
|
* @param {Object[]} middleware |
|
* An array of middleware instances. |
|
* |
|
* @param {Tech} tech |
|
* The current tech. |
|
* |
|
* @param {string} method |
|
* A method name. |
|
* |
|
* @param {Mixed} arg |
|
* The value to set on the tech. |
|
* |
|
* @return {Mixed} |
|
* The return value of the `method` of the `tech`. |
|
*/ |
|
|
|
function set$1(middleware, tech, method, arg) { |
|
return tech[method](middleware.reduce(middlewareIterator(method), arg)); |
|
} |
|
/** |
|
* Takes the argument given to the player and calls the `call` version of the |
|
* method on each middleware from left to right. |
|
* |
|
* Then, call the passed in method on the tech and return the result unchanged |
|
* back to the player, through middleware, this time from right to left. |
|
* |
|
* @param {Object[]} middleware |
|
* An array of middleware instances. |
|
* |
|
* @param {Tech} tech |
|
* The current tech. |
|
* |
|
* @param {string} method |
|
* A method name. |
|
* |
|
* @param {Mixed} arg |
|
* The value to set on the tech. |
|
* |
|
* @return {Mixed} |
|
* The return value of the `method` of the `tech`, regardless of the |
|
* return values of middlewares. |
|
*/ |
|
|
|
function mediate(middleware, tech, method, arg) { |
|
if (arg === void 0) { |
|
arg = null; |
|
} |
|
|
|
var callMethod = 'call' + toTitleCase(method); |
|
var middlewareValue = middleware.reduce(middlewareIterator(callMethod), arg); |
|
var terminated = middlewareValue === TERMINATOR; // deprecated. The `null` return value should instead return TERMINATOR to |
|
// prevent confusion if a techs method actually returns null. |
|
|
|
var returnValue = terminated ? null : tech[method](middlewareValue); |
|
executeRight(middleware, method, returnValue, terminated); |
|
return returnValue; |
|
} |
|
/** |
|
* Enumeration of allowed getters where the keys are method names. |
|
* |
|
* @type {Object} |
|
*/ |
|
|
|
var allowedGetters = { |
|
buffered: 1, |
|
currentTime: 1, |
|
duration: 1, |
|
seekable: 1, |
|
played: 1, |
|
paused: 1 |
|
}; |
|
/** |
|
* Enumeration of allowed setters where the keys are method names. |
|
* |
|
* @type {Object} |
|
*/ |
|
|
|
var allowedSetters = { |
|
setCurrentTime: 1 |
|
}; |
|
/** |
|
* Enumeration of allowed mediators where the keys are method names. |
|
* |
|
* @type {Object} |
|
*/ |
|
|
|
var allowedMediators = { |
|
play: 1, |
|
pause: 1 |
|
}; |
|
|
|
function middlewareIterator(method) { |
|
return function (value, mw) { |
|
// if the previous middleware terminated, pass along the termination |
|
if (value === TERMINATOR) { |
|
return TERMINATOR; |
|
} |
|
|
|
if (mw[method]) { |
|
return mw[method](value); |
|
} |
|
|
|
return value; |
|
}; |
|
} |
|
|
|
function executeRight(mws, method, value, terminated) { |
|
for (var i = mws.length - 1; i >= 0; i--) { |
|
var mw = mws[i]; |
|
|
|
if (mw[method]) { |
|
mw[method](terminated, value); |
|
} |
|
} |
|
} |
|
/** |
|
* Clear the middleware cache for a player. |
|
* |
|
* @param {Player} player |
|
* A {@link Player} instance. |
|
*/ |
|
|
|
|
|
function clearCacheForPlayer(player) { |
|
middlewareInstances[player.id()] = null; |
|
} |
|
/** |
|
* { |
|
* [playerId]: [[mwFactory, mwInstance], ...] |
|
* } |
|
* |
|
* @private |
|
*/ |
|
|
|
function getOrCreateFactory(player, mwFactory) { |
|
var mws = middlewareInstances[player.id()]; |
|
var mw = null; |
|
|
|
if (mws === undefined || mws === null) { |
|
mw = mwFactory(player); |
|
middlewareInstances[player.id()] = [[mwFactory, mw]]; |
|
return mw; |
|
} |
|
|
|
for (var i = 0; i < mws.length; i++) { |
|
var _mws$i = mws[i], |
|
mwf = _mws$i[0], |
|
mwi = _mws$i[1]; |
|
|
|
if (mwf !== mwFactory) { |
|
continue; |
|
} |
|
|
|
mw = mwi; |
|
} |
|
|
|
if (mw === null) { |
|
mw = mwFactory(player); |
|
mws.push([mwFactory, mw]); |
|
} |
|
|
|
return mw; |
|
} |
|
|
|
function setSourceHelper(src, middleware, next, player, acc, lastRun) { |
|
if (src === void 0) { |
|
src = {}; |
|
} |
|
|
|
if (middleware === void 0) { |
|
middleware = []; |
|
} |
|
|
|
if (acc === void 0) { |
|
acc = []; |
|
} |
|
|
|
if (lastRun === void 0) { |
|
lastRun = false; |
|
} |
|
|
|
var _middleware = middleware, |
|
mwFactory = _middleware[0], |
|
mwrest = _middleware.slice(1); // if mwFactory is a string, then we're at a fork in the road |
|
|
|
|
|
if (typeof mwFactory === 'string') { |
|
setSourceHelper(src, middlewares[mwFactory], next, player, acc, lastRun); // if we have an mwFactory, call it with the player to get the mw, |
|
// then call the mw's setSource method |
|
} else if (mwFactory) { |
|
var mw = getOrCreateFactory(player, mwFactory); // if setSource isn't present, implicitly select this middleware |
|
|
|
if (!mw.setSource) { |
|
acc.push(mw); |
|
return setSourceHelper(src, mwrest, next, player, acc, lastRun); |
|
} |
|
|
|
mw.setSource(assign({}, src), function (err, _src) { |
|
// something happened, try the next middleware on the current level |
|
// make sure to use the old src |
|
if (err) { |
|
return setSourceHelper(src, mwrest, next, player, acc, lastRun); |
|
} // we've succeeded, now we need to go deeper |
|
|
|
|
|
acc.push(mw); // if it's the same type, continue down the current chain |
|
// otherwise, we want to go down the new chain |
|
|
|
setSourceHelper(_src, src.type === _src.type ? mwrest : middlewares[_src.type], next, player, acc, lastRun); |
|
}); |
|
} else if (mwrest.length) { |
|
setSourceHelper(src, mwrest, next, player, acc, lastRun); |
|
} else if (lastRun) { |
|
next(src, acc); |
|
} else { |
|
setSourceHelper(src, middlewares['*'], next, player, acc, true); |
|
} |
|
} |
|
|
|
/** |
|
* Mimetypes |
|
* |
|
* @see http://hul.harvard.edu/ois/////systems/wax/wax-public-help/mimetypes.htm |
|
* @typedef Mimetypes~Kind |
|
* @enum |
|
*/ |
|
|
|
var MimetypesKind = { |
|
opus: 'video/ogg', |
|
ogv: 'video/ogg', |
|
mp4: 'video/mp4', |
|
mov: 'video/mp4', |
|
m4v: 'video/mp4', |
|
mkv: 'video/x-matroska', |
|
mp3: 'audio/mpeg', |
|
aac: 'audio/aac', |
|
oga: 'audio/ogg', |
|
m3u8: 'application/x-mpegURL', |
|
jpg: 'image/jpeg', |
|
jpeg: 'image/jpeg', |
|
gif: 'image/gif', |
|
png: 'image/png', |
|
svg: 'image/svg+xml', |
|
webp: 'image/webp' |
|
}; |
|
/** |
|
* Get the mimetype of a given src url if possible |
|
* |
|
* @param {string} src |
|
* The url to the src |
|
* |
|
* @return {string} |
|
* return the mimetype if it was known or empty string otherwise |
|
*/ |
|
|
|
var getMimetype = function getMimetype(src) { |
|
if (src === void 0) { |
|
src = ''; |
|
} |
|
|
|
var ext = getFileExtension(src); |
|
var mimetype = MimetypesKind[ext.toLowerCase()]; |
|
return mimetype || ''; |
|
}; |
|
/** |
|
* Find the mime type of a given source string if possible. Uses the player |
|
* source cache. |
|
* |
|
* @param {Player} player |
|
* The player object |
|
* |
|
* @param {string} src |
|
* The source string |
|
* |
|
* @return {string} |
|
* The type that was found |
|
*/ |
|
|
|
var findMimetype = function findMimetype(player, src) { |
|
if (!src) { |
|
return ''; |
|
} // 1. check for the type in the `source` cache |
|
|
|
|
|
if (player.cache_.source.src === src && player.cache_.source.type) { |
|
return player.cache_.source.type; |
|
} // 2. see if we have this source in our `currentSources` cache |
|
|
|
|
|
var matchingSources = player.cache_.sources.filter(function (s) { |
|
return s.src === src; |
|
}); |
|
|
|
if (matchingSources.length) { |
|
return matchingSources[0].type; |
|
} // 3. look for the src url in source elements and use the type there |
|
|
|
|
|
var sources = player.$$('source'); |
|
|
|
for (var i = 0; i < sources.length; i++) { |
|
var s = sources[i]; |
|
|
|
if (s.type && s.src && s.src === src) { |
|
return s.type; |
|
} |
|
} // 4. finally fallback to our list of mime types based on src url extension |
|
|
|
|
|
return getMimetype(src); |
|
}; |
|
|
|
/** |
|
* @module filter-source |
|
*/ |
|
/** |
|
* Filter out single bad source objects or multiple source objects in an |
|
* array. Also flattens nested source object arrays into a 1 dimensional |
|
* array of source objects. |
|
* |
|
* @param {Tech~SourceObject|Tech~SourceObject[]} src |
|
* The src object to filter |
|
* |
|
* @return {Tech~SourceObject[]} |
|
* An array of sourceobjects containing only valid sources |
|
* |
|
* @private |
|
*/ |
|
|
|
var filterSource = function filterSource(src) { |
|
// traverse array |
|
if (Array.isArray(src)) { |
|
var newsrc = []; |
|
src.forEach(function (srcobj) { |
|
srcobj = filterSource(srcobj); |
|
|
|
if (Array.isArray(srcobj)) { |
|
newsrc = newsrc.concat(srcobj); |
|
} else if (isObject(srcobj)) { |
|
newsrc.push(srcobj); |
|
} |
|
}); |
|
src = newsrc; |
|
} else if (typeof src === 'string' && src.trim()) { |
|
// convert string into object |
|
src = [fixSource({ |
|
src: src |
|
})]; |
|
} else if (isObject(src) && typeof src.src === 'string' && src.src && src.src.trim()) { |
|
// src is already valid |
|
src = [fixSource(src)]; |
|
} else { |
|
// invalid source, turn it into an empty array |
|
src = []; |
|
} |
|
|
|
return src; |
|
}; |
|
/** |
|
* Checks src mimetype, adding it when possible |
|
* |
|
* @param {Tech~SourceObject} src |
|
* The src object to check |
|
* @return {Tech~SourceObject} |
|
* src Object with known type |
|
*/ |
|
|
|
|
|
function fixSource(src) { |
|
var mimetype = getMimetype(src.src); |
|
|
|
if (!src.type && mimetype) { |
|
src.type = mimetype; |
|
} |
|
|
|
return src; |
|
} |
|
|
|
/** |
|
* The `MediaLoader` is the `Component` that decides which playback technology to load |
|
* when a player is initialized. |
|
* |
|
* @extends Component |
|
*/ |
|
|
|
var MediaLoader = |
|
/*#__PURE__*/ |
|
function (_Component) { |
|
_inheritsLoose(MediaLoader, _Component); |
|
|
|
/** |
|
* Create an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should attach to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
* |
|
* @param {Component~ReadyCallback} [ready] |
|
* The function that is run when this component is ready. |
|
*/ |
|
function MediaLoader(player, options, ready) { |
|
var _this; |
|
|
|
// MediaLoader has no element |
|
var options_ = mergeOptions({ |
|
createEl: false |
|
}, options); |
|
_this = _Component.call(this, player, options_, ready) || this; // If there are no sources when the player is initialized, |
|
// load the first supported playback technology. |
|
|
|
if (!options.playerOptions.sources || options.playerOptions.sources.length === 0) { |
|
for (var i = 0, j = options.playerOptions.techOrder; i < j.length; i++) { |
|
var techName = toTitleCase(j[i]); |
|
var tech = Tech.getTech(techName); // Support old behavior of techs being registered as components. |
|
// Remove once that deprecated behavior is removed. |
|
|
|
if (!techName) { |
|
tech = Component.getComponent(techName); |
|
} // Check if the browser supports this technology |
|
|
|
|
|
if (tech && tech.isSupported()) { |
|
player.loadTech_(techName); |
|
break; |
|
} |
|
} |
|
} else { |
|
// Loop through playback technologies (HTML5, Flash) and check for support. |
|
// Then load the best source. |
|
// A few assumptions here: |
|
// All playback technologies respect preload false. |
|
player.src(options.playerOptions.sources); |
|
} |
|
|
|
return _this; |
|
} |
|
|
|
return MediaLoader; |
|
}(Component); |
|
|
|
Component.registerComponent('MediaLoader', MediaLoader); |
|
|
|
/** |
|
* Component which is clickable or keyboard actionable, but is not a |
|
* native HTML button. |
|
* |
|
* @extends Component |
|
*/ |
|
|
|
var ClickableComponent = |
|
/*#__PURE__*/ |
|
function (_Component) { |
|
_inheritsLoose(ClickableComponent, _Component); |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
*/ |
|
function ClickableComponent(player, options) { |
|
var _this; |
|
|
|
_this = _Component.call(this, player, options) || this; |
|
|
|
_this.emitTapEvents(); |
|
|
|
_this.enable(); |
|
|
|
return _this; |
|
} |
|
/** |
|
* Create the `ClickableComponent`s DOM element. |
|
* |
|
* @param {string} [tag=div] |
|
* The element's node type. |
|
* |
|
* @param {Object} [props={}] |
|
* An object of properties that should be set on the element. |
|
* |
|
* @param {Object} [attributes={}] |
|
* An object of attributes that should be set on the element. |
|
* |
|
* @return {Element} |
|
* The element that gets created. |
|
*/ |
|
|
|
|
|
var _proto = ClickableComponent.prototype; |
|
|
|
_proto.createEl = function createEl$$1(tag, props, attributes) { |
|
if (tag === void 0) { |
|
tag = 'div'; |
|
} |
|
|
|
if (props === void 0) { |
|
props = {}; |
|
} |
|
|
|
if (attributes === void 0) { |
|
attributes = {}; |
|
} |
|
|
|
props = assign({ |
|
innerHTML: '<span aria-hidden="true" class="vjs-icon-placeholder"></span>', |
|
className: this.buildCSSClass(), |
|
tabIndex: 0 |
|
}, props); |
|
|
|
if (tag === 'button') { |
|
log.error("Creating a ClickableComponent with an HTML element of " + tag + " is not supported; use a Button instead."); |
|
} // Add ARIA attributes for clickable element which is not a native HTML button |
|
|
|
|
|
attributes = assign({ |
|
role: 'button' |
|
}, attributes); |
|
this.tabIndex_ = props.tabIndex; |
|
|
|
var el = _Component.prototype.createEl.call(this, tag, props, attributes); |
|
|
|
this.createControlTextEl(el); |
|
return el; |
|
}; |
|
|
|
_proto.dispose = function dispose() { |
|
// remove controlTextEl_ on dispose |
|
this.controlTextEl_ = null; |
|
|
|
_Component.prototype.dispose.call(this); |
|
} |
|
/** |
|
* Create a control text element on this `ClickableComponent` |
|
* |
|
* @param {Element} [el] |
|
* Parent element for the control text. |
|
* |
|
* @return {Element} |
|
* The control text element that gets created. |
|
*/ |
|
; |
|
|
|
_proto.createControlTextEl = function createControlTextEl(el) { |
|
this.controlTextEl_ = createEl('span', { |
|
className: 'vjs-control-text' |
|
}, { |
|
// let the screen reader user know that the text of the element may change |
|
'aria-live': 'polite' |
|
}); |
|
|
|
if (el) { |
|
el.appendChild(this.controlTextEl_); |
|
} |
|
|
|
this.controlText(this.controlText_, el); |
|
return this.controlTextEl_; |
|
} |
|
/** |
|
* Get or set the localize text to use for the controls on the `ClickableComponent`. |
|
* |
|
* @param {string} [text] |
|
* Control text for element. |
|
* |
|
* @param {Element} [el=this.el()] |
|
* Element to set the title on. |
|
* |
|
* @return {string} |
|
* - The control text when getting |
|
*/ |
|
; |
|
|
|
_proto.controlText = function controlText(text, el) { |
|
if (el === void 0) { |
|
el = this.el(); |
|
} |
|
|
|
if (text === undefined) { |
|
return this.controlText_ || 'Need Text'; |
|
} |
|
|
|
var localizedText = this.localize(text); |
|
this.controlText_ = text; |
|
textContent(this.controlTextEl_, localizedText); |
|
|
|
if (!this.nonIconControl) { |
|
// Set title attribute if only an icon is shown |
|
el.setAttribute('title', localizedText); |
|
} |
|
} |
|
/** |
|
* Builds the default DOM `className`. |
|
* |
|
* @return {string} |
|
* The DOM `className` for this object. |
|
*/ |
|
; |
|
|
|
_proto.buildCSSClass = function buildCSSClass() { |
|
return "vjs-control vjs-button " + _Component.prototype.buildCSSClass.call(this); |
|
} |
|
/** |
|
* Enable this `ClickableComponent` |
|
*/ |
|
; |
|
|
|
_proto.enable = function enable() { |
|
if (!this.enabled_) { |
|
this.enabled_ = true; |
|
this.removeClass('vjs-disabled'); |
|
this.el_.setAttribute('aria-disabled', 'false'); |
|
|
|
if (typeof this.tabIndex_ !== 'undefined') { |
|
this.el_.setAttribute('tabIndex', this.tabIndex_); |
|
} |
|
|
|
this.on(['tap', 'click'], this.handleClick); |
|
this.on('keydown', this.handleKeyDown); |
|
} |
|
} |
|
/** |
|
* Disable this `ClickableComponent` |
|
*/ |
|
; |
|
|
|
_proto.disable = function disable() { |
|
this.enabled_ = false; |
|
this.addClass('vjs-disabled'); |
|
this.el_.setAttribute('aria-disabled', 'true'); |
|
|
|
if (typeof this.tabIndex_ !== 'undefined') { |
|
this.el_.removeAttribute('tabIndex'); |
|
} |
|
|
|
this.off(['tap', 'click'], this.handleClick); |
|
this.off('keydown', this.handleKeyDown); |
|
} |
|
/** |
|
* Event handler that is called when a `ClickableComponent` receives a |
|
* `click` or `tap` event. |
|
* |
|
* @param {EventTarget~Event} event |
|
* The `tap` or `click` event that caused this function to be called. |
|
* |
|
* @listens tap |
|
* @listens click |
|
* @abstract |
|
*/ |
|
; |
|
|
|
_proto.handleClick = function handleClick(event) {} |
|
/** |
|
* Event handler that is called when a `ClickableComponent` receives a |
|
* `keydown` event. |
|
* |
|
* By default, if the key is Space or Enter, it will trigger a `click` event. |
|
* |
|
* @param {EventTarget~Event} event |
|
* The `keydown` event that caused this function to be called. |
|
* |
|
* @listens keydown |
|
*/ |
|
; |
|
|
|
_proto.handleKeyDown = function handleKeyDown(event) { |
|
// Support Space or Enter key operation to fire a click event. Also, |
|
// prevent the event from propagating through the DOM and triggering |
|
// Player hotkeys. |
|
if (keycode.isEventKey(event, 'Space') || keycode.isEventKey(event, 'Enter')) { |
|
event.preventDefault(); |
|
event.stopPropagation(); |
|
this.trigger('click'); |
|
} else { |
|
// Pass keypress handling up for unsupported keys |
|
_Component.prototype.handleKeyDown.call(this, event); |
|
} |
|
}; |
|
|
|
return ClickableComponent; |
|
}(Component); |
|
|
|
Component.registerComponent('ClickableComponent', ClickableComponent); |
|
|
|
/** |
|
* A `ClickableComponent` that handles showing the poster image for the player. |
|
* |
|
* @extends ClickableComponent |
|
*/ |
|
|
|
var PosterImage = |
|
/*#__PURE__*/ |
|
function (_ClickableComponent) { |
|
_inheritsLoose(PosterImage, _ClickableComponent); |
|
|
|
/** |
|
* Create an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should attach to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
*/ |
|
function PosterImage(player, options) { |
|
var _this; |
|
|
|
_this = _ClickableComponent.call(this, player, options) || this; |
|
|
|
_this.update(); |
|
|
|
player.on('posterchange', bind(_assertThisInitialized(_assertThisInitialized(_this)), _this.update)); |
|
return _this; |
|
} |
|
/** |
|
* Clean up and dispose of the `PosterImage`. |
|
*/ |
|
|
|
|
|
var _proto = PosterImage.prototype; |
|
|
|
_proto.dispose = function dispose() { |
|
this.player().off('posterchange', this.update); |
|
|
|
_ClickableComponent.prototype.dispose.call(this); |
|
} |
|
/** |
|
* Create the `PosterImage`s DOM element. |
|
* |
|
* @return {Element} |
|
* The element that gets created. |
|
*/ |
|
; |
|
|
|
_proto.createEl = function createEl$$1() { |
|
var el = createEl('div', { |
|
className: 'vjs-poster', |
|
// Don't want poster to be tabbable. |
|
tabIndex: -1 |
|
}); |
|
return el; |
|
} |
|
/** |
|
* An {@link EventTarget~EventListener} for {@link Player#posterchange} events. |
|
* |
|
* @listens Player#posterchange |
|
* |
|
* @param {EventTarget~Event} [event] |
|
* The `Player#posterchange` event that triggered this function. |
|
*/ |
|
; |
|
|
|
_proto.update = function update(event) { |
|
var url = this.player().poster(); |
|
this.setSrc(url); // If there's no poster source we should display:none on this component |
|
// so it's not still clickable or right-clickable |
|
|
|
if (url) { |
|
this.show(); |
|
} else { |
|
this.hide(); |
|
} |
|
} |
|
/** |
|
* Set the source of the `PosterImage` depending on the display method. |
|
* |
|
* @param {string} url |
|
* The URL to the source for the `PosterImage`. |
|
*/ |
|
; |
|
|
|
_proto.setSrc = function setSrc(url) { |
|
var backgroundImage = ''; // Any falsy value should stay as an empty string, otherwise |
|
// this will throw an extra error |
|
|
|
if (url) { |
|
backgroundImage = "url(\"" + url + "\")"; |
|
} |
|
|
|
this.el_.style.backgroundImage = backgroundImage; |
|
} |
|
/** |
|
* An {@link EventTarget~EventListener} for clicks on the `PosterImage`. See |
|
* {@link ClickableComponent#handleClick} for instances where this will be triggered. |
|
* |
|
* @listens tap |
|
* @listens click |
|
* @listens keydown |
|
* |
|
* @param {EventTarget~Event} event |
|
+ The `click`, `tap` or `keydown` event that caused this function to be called. |
|
*/ |
|
; |
|
|
|
_proto.handleClick = function handleClick(event) { |
|
// We don't want a click to trigger playback when controls are disabled |
|
if (!this.player_.controls()) { |
|
return; |
|
} |
|
|
|
this.player_.tech(true).focus(); |
|
|
|
if (this.player_.paused()) { |
|
silencePromise(this.player_.play()); |
|
} else { |
|
this.player_.pause(); |
|
} |
|
}; |
|
|
|
return PosterImage; |
|
}(ClickableComponent); |
|
|
|
Component.registerComponent('PosterImage', PosterImage); |
|
|
|
var darkGray = '#222'; |
|
var lightGray = '#ccc'; |
|
var fontMap = { |
|
monospace: 'monospace', |
|
sansSerif: 'sans-serif', |
|
serif: 'serif', |
|
monospaceSansSerif: '"Andale Mono", "Lucida Console", monospace', |
|
monospaceSerif: '"Courier New", monospace', |
|
proportionalSansSerif: 'sans-serif', |
|
proportionalSerif: 'serif', |
|
casual: '"Comic Sans MS", Impact, fantasy', |
|
script: '"Monotype Corsiva", cursive', |
|
smallcaps: '"Andale Mono", "Lucida Console", monospace, sans-serif' |
|
}; |
|
/** |
|
* Construct an rgba color from a given hex color code. |
|
* |
|
* @param {number} color |
|
* Hex number for color, like #f0e or #f604e2. |
|
* |
|
* @param {number} opacity |
|
* Value for opacity, 0.0 - 1.0. |
|
* |
|
* @return {string} |
|
* The rgba color that was created, like 'rgba(255, 0, 0, 0.3)'. |
|
*/ |
|
|
|
function constructColor(color, opacity) { |
|
var hex; |
|
|
|
if (color.length === 4) { |
|
// color looks like "#f0e" |
|
hex = color[1] + color[1] + color[2] + color[2] + color[3] + color[3]; |
|
} else if (color.length === 7) { |
|
// color looks like "#f604e2" |
|
hex = color.slice(1); |
|
} else { |
|
throw new Error('Invalid color code provided, ' + color + '; must be formatted as e.g. #f0e or #f604e2.'); |
|
} |
|
|
|
return 'rgba(' + parseInt(hex.slice(0, 2), 16) + ',' + parseInt(hex.slice(2, 4), 16) + ',' + parseInt(hex.slice(4, 6), 16) + ',' + opacity + ')'; |
|
} |
|
/** |
|
* Try to update the style of a DOM element. Some style changes will throw an error, |
|
* particularly in IE8. Those should be noops. |
|
* |
|
* @param {Element} el |
|
* The DOM element to be styled. |
|
* |
|
* @param {string} style |
|
* The CSS property on the element that should be styled. |
|
* |
|
* @param {string} rule |
|
* The style rule that should be applied to the property. |
|
* |
|
* @private |
|
*/ |
|
|
|
function tryUpdateStyle(el, style, rule) { |
|
try { |
|
el.style[style] = rule; |
|
} catch (e) { |
|
// Satisfies linter. |
|
return; |
|
} |
|
} |
|
/** |
|
* The component for displaying text track cues. |
|
* |
|
* @extends Component |
|
*/ |
|
|
|
|
|
var TextTrackDisplay = |
|
/*#__PURE__*/ |
|
function (_Component) { |
|
_inheritsLoose(TextTrackDisplay, _Component); |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
* |
|
* @param {Component~ReadyCallback} [ready] |
|
* The function to call when `TextTrackDisplay` is ready. |
|
*/ |
|
function TextTrackDisplay(player, options, ready) { |
|
var _this; |
|
|
|
_this = _Component.call(this, player, options, ready) || this; |
|
var updateDisplayHandler = bind(_assertThisInitialized(_assertThisInitialized(_this)), _this.updateDisplay); |
|
player.on('loadstart', bind(_assertThisInitialized(_assertThisInitialized(_this)), _this.toggleDisplay)); |
|
player.on('texttrackchange', updateDisplayHandler); |
|
player.on('loadedmetadata', bind(_assertThisInitialized(_assertThisInitialized(_this)), _this.preselectTrack)); // This used to be called during player init, but was causing an error |
|
// if a track should show by default and the display hadn't loaded yet. |
|
// Should probably be moved to an external track loader when we support |
|
// tracks that don't need a display. |
|
|
|
player.ready(bind(_assertThisInitialized(_assertThisInitialized(_this)), function () { |
|
if (player.tech_ && player.tech_.featuresNativeTextTracks) { |
|
this.hide(); |
|
return; |
|
} |
|
|
|
player.on('fullscreenchange', updateDisplayHandler); |
|
player.on('playerresize', updateDisplayHandler); |
|
window$1.addEventListener('orientationchange', updateDisplayHandler); |
|
player.on('dispose', function () { |
|
return window$1.removeEventListener('orientationchange', updateDisplayHandler); |
|
}); |
|
var tracks = this.options_.playerOptions.tracks || []; |
|
|
|
for (var i = 0; i < tracks.length; i++) { |
|
this.player_.addRemoteTextTrack(tracks[i], true); |
|
} |
|
|
|
this.preselectTrack(); |
|
})); |
|
return _this; |
|
} |
|
/** |
|
* Preselect a track following this precedence: |
|
* - matches the previously selected {@link TextTrack}'s language and kind |
|
* - matches the previously selected {@link TextTrack}'s language only |
|
* - is the first default captions track |
|
* - is the first default descriptions track |
|
* |
|
* @listens Player#loadstart |
|
*/ |
|
|
|
|
|
var _proto = TextTrackDisplay.prototype; |
|
|
|
_proto.preselectTrack = function preselectTrack() { |
|
var modes = { |
|
captions: 1, |
|
subtitles: 1 |
|
}; |
|
var trackList = this.player_.textTracks(); |
|
var userPref = this.player_.cache_.selectedLanguage; |
|
var firstDesc; |
|
var firstCaptions; |
|
var preferredTrack; |
|
|
|
for (var i = 0; i < trackList.length; i++) { |
|
var track = trackList[i]; |
|
|
|
if (userPref && userPref.enabled && userPref.language && userPref.language === track.language && track.kind in modes) { |
|
// Always choose the track that matches both language and kind |
|
if (track.kind === userPref.kind) { |
|
preferredTrack = track; // or choose the first track that matches language |
|
} else if (!preferredTrack) { |
|
preferredTrack = track; |
|
} // clear everything if offTextTrackMenuItem was clicked |
|
|
|
} else if (userPref && !userPref.enabled) { |
|
preferredTrack = null; |
|
firstDesc = null; |
|
firstCaptions = null; |
|
} else if (track.default) { |
|
if (track.kind === 'descriptions' && !firstDesc) { |
|
firstDesc = track; |
|
} else if (track.kind in modes && !firstCaptions) { |
|
firstCaptions = track; |
|
} |
|
} |
|
} // The preferredTrack matches the user preference and takes |
|
// precedence over all the other tracks. |
|
// So, display the preferredTrack before the first default track |
|
// and the subtitles/captions track before the descriptions track |
|
|
|
|
|
if (preferredTrack) { |
|
preferredTrack.mode = 'showing'; |
|
} else if (firstCaptions) { |
|
firstCaptions.mode = 'showing'; |
|
} else if (firstDesc) { |
|
firstDesc.mode = 'showing'; |
|
} |
|
} |
|
/** |
|
* Turn display of {@link TextTrack}'s from the current state into the other state. |
|
* There are only two states: |
|
* - 'shown' |
|
* - 'hidden' |
|
* |
|
* @listens Player#loadstart |
|
*/ |
|
; |
|
|
|
_proto.toggleDisplay = function toggleDisplay() { |
|
if (this.player_.tech_ && this.player_.tech_.featuresNativeTextTracks) { |
|
this.hide(); |
|
} else { |
|
this.show(); |
|
} |
|
} |
|
/** |
|
* Create the {@link Component}'s DOM element. |
|
* |
|
* @return {Element} |
|
* The element that was created. |
|
*/ |
|
; |
|
|
|
_proto.createEl = function createEl() { |
|
return _Component.prototype.createEl.call(this, 'div', { |
|
className: 'vjs-text-track-display' |
|
}, { |
|
'aria-live': 'off', |
|
'aria-atomic': 'true' |
|
}); |
|
} |
|
/** |
|
* Clear all displayed {@link TextTrack}s. |
|
*/ |
|
; |
|
|
|
_proto.clearDisplay = function clearDisplay() { |
|
if (typeof window$1.WebVTT === 'function') { |
|
window$1.WebVTT.processCues(window$1, [], this.el_); |
|
} |
|
} |
|
/** |
|
* Update the displayed TextTrack when a either a {@link Player#texttrackchange} or |
|
* a {@link Player#fullscreenchange} is fired. |
|
* |
|
* @listens Player#texttrackchange |
|
* @listens Player#fullscreenchange |
|
*/ |
|
; |
|
|
|
_proto.updateDisplay = function updateDisplay() { |
|
var tracks = this.player_.textTracks(); |
|
this.clearDisplay(); // Track display prioritization model: if multiple tracks are 'showing', |
|
// display the first 'subtitles' or 'captions' track which is 'showing', |
|
// otherwise display the first 'descriptions' track which is 'showing' |
|
|
|
var descriptionsTrack = null; |
|
var captionsSubtitlesTrack = null; |
|
var i = tracks.length; |
|
|
|
while (i--) { |
|
var track = tracks[i]; |
|
|
|
if (track.mode === 'showing') { |
|
if (track.kind === 'descriptions') { |
|
descriptionsTrack = track; |
|
} else { |
|
captionsSubtitlesTrack = track; |
|
} |
|
} |
|
} |
|
|
|
if (captionsSubtitlesTrack) { |
|
if (this.getAttribute('aria-live') !== 'off') { |
|
this.setAttribute('aria-live', 'off'); |
|
} |
|
|
|
this.updateForTrack(captionsSubtitlesTrack); |
|
} else if (descriptionsTrack) { |
|
if (this.getAttribute('aria-live') !== 'assertive') { |
|
this.setAttribute('aria-live', 'assertive'); |
|
} |
|
|
|
this.updateForTrack(descriptionsTrack); |
|
} |
|
} |
|
/** |
|
* Add an {@link TextTrack} to to the {@link Tech}s {@link TextTrackList}. |
|
* |
|
* @param {TextTrack} track |
|
* Text track object to be added to the list. |
|
*/ |
|
; |
|
|
|
_proto.updateForTrack = function updateForTrack(track) { |
|
if (typeof window$1.WebVTT !== 'function' || !track.activeCues) { |
|
return; |
|
} |
|
|
|
var cues = []; |
|
|
|
for (var _i = 0; _i < track.activeCues.length; _i++) { |
|
cues.push(track.activeCues[_i]); |
|
} |
|
|
|
window$1.WebVTT.processCues(window$1, cues, this.el_); |
|
|
|
if (!this.player_.textTrackSettings) { |
|
return; |
|
} |
|
|
|
var overrides = this.player_.textTrackSettings.getValues(); |
|
var i = cues.length; |
|
|
|
while (i--) { |
|
var cue = cues[i]; |
|
|
|
if (!cue) { |
|
continue; |
|
} |
|
|
|
var cueDiv = cue.displayState; |
|
|
|
if (overrides.color) { |
|
cueDiv.firstChild.style.color = overrides.color; |
|
} |
|
|
|
if (overrides.textOpacity) { |
|
tryUpdateStyle(cueDiv.firstChild, 'color', constructColor(overrides.color || '#fff', overrides.textOpacity)); |
|
} |
|
|
|
if (overrides.backgroundColor) { |
|
cueDiv.firstChild.style.backgroundColor = overrides.backgroundColor; |
|
} |
|
|
|
if (overrides.backgroundOpacity) { |
|
tryUpdateStyle(cueDiv.firstChild, 'backgroundColor', constructColor(overrides.backgroundColor || '#000', overrides.backgroundOpacity)); |
|
} |
|
|
|
if (overrides.windowColor) { |
|
if (overrides.windowOpacity) { |
|
tryUpdateStyle(cueDiv, 'backgroundColor', constructColor(overrides.windowColor, overrides.windowOpacity)); |
|
} else { |
|
cueDiv.style.backgroundColor = overrides.windowColor; |
|
} |
|
} |
|
|
|
if (overrides.edgeStyle) { |
|
if (overrides.edgeStyle === 'dropshadow') { |
|
cueDiv.firstChild.style.textShadow = "2px 2px 3px " + darkGray + ", 2px 2px 4px " + darkGray + ", 2px 2px 5px " + darkGray; |
|
} else if (overrides.edgeStyle === 'raised') { |
|
cueDiv.firstChild.style.textShadow = "1px 1px " + darkGray + ", 2px 2px " + darkGray + ", 3px 3px " + darkGray; |
|
} else if (overrides.edgeStyle === 'depressed') { |
|
cueDiv.firstChild.style.textShadow = "1px 1px " + lightGray + ", 0 1px " + lightGray + ", -1px -1px " + darkGray + ", 0 -1px " + darkGray; |
|
} else if (overrides.edgeStyle === 'uniform') { |
|
cueDiv.firstChild.style.textShadow = "0 0 4px " + darkGray + ", 0 0 4px " + darkGray + ", 0 0 4px " + darkGray + ", 0 0 4px " + darkGray; |
|
} |
|
} |
|
|
|
if (overrides.fontPercent && overrides.fontPercent !== 1) { |
|
var fontSize = window$1.parseFloat(cueDiv.style.fontSize); |
|
cueDiv.style.fontSize = fontSize * overrides.fontPercent + 'px'; |
|
cueDiv.style.height = 'auto'; |
|
cueDiv.style.top = 'auto'; |
|
cueDiv.style.bottom = '2px'; |
|
} |
|
|
|
if (overrides.fontFamily && overrides.fontFamily !== 'default') { |
|
if (overrides.fontFamily === 'small-caps') { |
|
cueDiv.firstChild.style.fontVariant = 'small-caps'; |
|
} else { |
|
cueDiv.firstChild.style.fontFamily = fontMap[overrides.fontFamily]; |
|
} |
|
} |
|
} |
|
}; |
|
|
|
return TextTrackDisplay; |
|
}(Component); |
|
|
|
Component.registerComponent('TextTrackDisplay', TextTrackDisplay); |
|
|
|
/** |
|
* A loading spinner for use during waiting/loading events. |
|
* |
|
* @extends Component |
|
*/ |
|
|
|
var LoadingSpinner = |
|
/*#__PURE__*/ |
|
function (_Component) { |
|
_inheritsLoose(LoadingSpinner, _Component); |
|
|
|
function LoadingSpinner() { |
|
return _Component.apply(this, arguments) || this; |
|
} |
|
|
|
var _proto = LoadingSpinner.prototype; |
|
|
|
/** |
|
* Create the `LoadingSpinner`s DOM element. |
|
* |
|
* @return {Element} |
|
* The dom element that gets created. |
|
*/ |
|
_proto.createEl = function createEl$$1() { |
|
var isAudio = this.player_.isAudio(); |
|
var playerType = this.localize(isAudio ? 'Audio Player' : 'Video Player'); |
|
var controlText = createEl('span', { |
|
className: 'vjs-control-text', |
|
innerHTML: this.localize('{1} is loading.', [playerType]) |
|
}); |
|
|
|
var el = _Component.prototype.createEl.call(this, 'div', { |
|
className: 'vjs-loading-spinner', |
|
dir: 'ltr' |
|
}); |
|
|
|
el.appendChild(controlText); |
|
return el; |
|
}; |
|
|
|
return LoadingSpinner; |
|
}(Component); |
|
|
|
Component.registerComponent('LoadingSpinner', LoadingSpinner); |
|
|
|
/** |
|
* Base class for all buttons. |
|
* |
|
* @extends ClickableComponent |
|
*/ |
|
|
|
var Button = |
|
/*#__PURE__*/ |
|
function (_ClickableComponent) { |
|
_inheritsLoose(Button, _ClickableComponent); |
|
|
|
function Button() { |
|
return _ClickableComponent.apply(this, arguments) || this; |
|
} |
|
|
|
var _proto = Button.prototype; |
|
|
|
/** |
|
* Create the `Button`s DOM element. |
|
* |
|
* @param {string} [tag="button"] |
|
* The element's node type. This argument is IGNORED: no matter what |
|
* is passed, it will always create a `button` element. |
|
* |
|
* @param {Object} [props={}] |
|
* An object of properties that should be set on the element. |
|
* |
|
* @param {Object} [attributes={}] |
|
* An object of attributes that should be set on the element. |
|
* |
|
* @return {Element} |
|
* The element that gets created. |
|
*/ |
|
_proto.createEl = function createEl(tag, props, attributes) { |
|
if (props === void 0) { |
|
props = {}; |
|
} |
|
|
|
if (attributes === void 0) { |
|
attributes = {}; |
|
} |
|
|
|
tag = 'button'; |
|
props = assign({ |
|
innerHTML: '<span aria-hidden="true" class="vjs-icon-placeholder"></span>', |
|
className: this.buildCSSClass() |
|
}, props); // Add attributes for button element |
|
|
|
attributes = assign({ |
|
// Necessary since the default button type is "submit" |
|
type: 'button' |
|
}, attributes); |
|
var el = Component.prototype.createEl.call(this, tag, props, attributes); |
|
this.createControlTextEl(el); |
|
return el; |
|
} |
|
/** |
|
* Add a child `Component` inside of this `Button`. |
|
* |
|
* @param {string|Component} child |
|
* The name or instance of a child to add. |
|
* |
|
* @param {Object} [options={}] |
|
* The key/value store of options that will get passed to children of |
|
* the child. |
|
* |
|
* @return {Component} |
|
* The `Component` that gets added as a child. When using a string the |
|
* `Component` will get created by this process. |
|
* |
|
* @deprecated since version 5 |
|
*/ |
|
; |
|
|
|
_proto.addChild = function addChild(child, options) { |
|
if (options === void 0) { |
|
options = {}; |
|
} |
|
|
|
var className = this.constructor.name; |
|
log.warn("Adding an actionable (user controllable) child to a Button (" + className + ") is not supported; use a ClickableComponent instead."); // Avoid the error message generated by ClickableComponent's addChild method |
|
|
|
return Component.prototype.addChild.call(this, child, options); |
|
} |
|
/** |
|
* Enable the `Button` element so that it can be activated or clicked. Use this with |
|
* {@link Button#disable}. |
|
*/ |
|
; |
|
|
|
_proto.enable = function enable() { |
|
_ClickableComponent.prototype.enable.call(this); |
|
|
|
this.el_.removeAttribute('disabled'); |
|
} |
|
/** |
|
* Disable the `Button` element so that it cannot be activated or clicked. Use this with |
|
* {@link Button#enable}. |
|
*/ |
|
; |
|
|
|
_proto.disable = function disable() { |
|
_ClickableComponent.prototype.disable.call(this); |
|
|
|
this.el_.setAttribute('disabled', 'disabled'); |
|
} |
|
/** |
|
* This gets called when a `Button` has focus and `keydown` is triggered via a key |
|
* press. |
|
* |
|
* @param {EventTarget~Event} event |
|
* The event that caused this function to get called. |
|
* |
|
* @listens keydown |
|
*/ |
|
; |
|
|
|
_proto.handleKeyDown = function handleKeyDown(event) { |
|
// Ignore Space or Enter key operation, which is handled by the browser for |
|
// a button - though not for its super class, ClickableComponent. Also, |
|
// prevent the event from propagating through the DOM and triggering Player |
|
// hotkeys. We do not preventDefault here because we _want_ the browser to |
|
// handle it. |
|
if (keycode.isEventKey(event, 'Space') || keycode.isEventKey(event, 'Enter')) { |
|
event.stopPropagation(); |
|
return; |
|
} // Pass keypress handling up for unsupported keys |
|
|
|
|
|
_ClickableComponent.prototype.handleKeyDown.call(this, event); |
|
}; |
|
|
|
return Button; |
|
}(ClickableComponent); |
|
|
|
Component.registerComponent('Button', Button); |
|
|
|
/** |
|
* The initial play button that shows before the video has played. The hiding of the |
|
* `BigPlayButton` get done via CSS and `Player` states. |
|
* |
|
* @extends Button |
|
*/ |
|
|
|
var BigPlayButton = |
|
/*#__PURE__*/ |
|
function (_Button) { |
|
_inheritsLoose(BigPlayButton, _Button); |
|
|
|
function BigPlayButton(player, options) { |
|
var _this; |
|
|
|
_this = _Button.call(this, player, options) || this; |
|
_this.mouseused_ = false; |
|
|
|
_this.on('mousedown', _this.handleMouseDown); |
|
|
|
return _this; |
|
} |
|
/** |
|
* Builds the default DOM `className`. |
|
* |
|
* @return {string} |
|
* The DOM `className` for this object. Always returns 'vjs-big-play-button'. |
|
*/ |
|
|
|
|
|
var _proto = BigPlayButton.prototype; |
|
|
|
_proto.buildCSSClass = function buildCSSClass() { |
|
return 'vjs-big-play-button'; |
|
} |
|
/** |
|
* This gets called when a `BigPlayButton` "clicked". See {@link ClickableComponent} |
|
* for more detailed information on what a click can be. |
|
* |
|
* @param {EventTarget~Event} event |
|
* The `keydown`, `tap`, or `click` event that caused this function to be |
|
* called. |
|
* |
|
* @listens tap |
|
* @listens click |
|
*/ |
|
; |
|
|
|
_proto.handleClick = function handleClick(event) { |
|
var playPromise = this.player_.play(); // exit early if clicked via the mouse |
|
|
|
if (this.mouseused_ && event.clientX && event.clientY) { |
|
silencePromise(playPromise); |
|
this.player_.tech(true).focus(); |
|
return; |
|
} |
|
|
|
var cb = this.player_.getChild('controlBar'); |
|
var playToggle = cb && cb.getChild('playToggle'); |
|
|
|
if (!playToggle) { |
|
this.player_.tech(true).focus(); |
|
return; |
|
} |
|
|
|
var playFocus = function playFocus() { |
|
return playToggle.focus(); |
|
}; |
|
|
|
if (isPromise(playPromise)) { |
|
playPromise.then(playFocus, function () {}); |
|
} else { |
|
this.setTimeout(playFocus, 1); |
|
} |
|
}; |
|
|
|
_proto.handleKeyDown = function handleKeyDown(event) { |
|
this.mouseused_ = false; |
|
|
|
_Button.prototype.handleKeyDown.call(this, event); |
|
}; |
|
|
|
_proto.handleMouseDown = function handleMouseDown(event) { |
|
this.mouseused_ = true; |
|
}; |
|
|
|
return BigPlayButton; |
|
}(Button); |
|
/** |
|
* The text that should display over the `BigPlayButton`s controls. Added to for localization. |
|
* |
|
* @type {string} |
|
* @private |
|
*/ |
|
|
|
|
|
BigPlayButton.prototype.controlText_ = 'Play Video'; |
|
Component.registerComponent('BigPlayButton', BigPlayButton); |
|
|
|
/** |
|
* The `CloseButton` is a `{@link Button}` that fires a `close` event when |
|
* it gets clicked. |
|
* |
|
* @extends Button |
|
*/ |
|
|
|
var CloseButton = |
|
/*#__PURE__*/ |
|
function (_Button) { |
|
_inheritsLoose(CloseButton, _Button); |
|
|
|
/** |
|
* Creates an instance of the this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
*/ |
|
function CloseButton(player, options) { |
|
var _this; |
|
|
|
_this = _Button.call(this, player, options) || this; |
|
|
|
_this.controlText(options && options.controlText || _this.localize('Close')); |
|
|
|
return _this; |
|
} |
|
/** |
|
* Builds the default DOM `className`. |
|
* |
|
* @return {string} |
|
* The DOM `className` for this object. |
|
*/ |
|
|
|
|
|
var _proto = CloseButton.prototype; |
|
|
|
_proto.buildCSSClass = function buildCSSClass() { |
|
return "vjs-close-button " + _Button.prototype.buildCSSClass.call(this); |
|
} |
|
/** |
|
* This gets called when a `CloseButton` gets clicked. See |
|
* {@link ClickableComponent#handleClick} for more information on when |
|
* this will be triggered |
|
* |
|
* @param {EventTarget~Event} event |
|
* The `keydown`, `tap`, or `click` event that caused this function to be |
|
* called. |
|
* |
|
* @listens tap |
|
* @listens click |
|
* @fires CloseButton#close |
|
*/ |
|
; |
|
|
|
_proto.handleClick = function handleClick(event) { |
|
/** |
|
* Triggered when the a `CloseButton` is clicked. |
|
* |
|
* @event CloseButton#close |
|
* @type {EventTarget~Event} |
|
* |
|
* @property {boolean} [bubbles=false] |
|
* set to false so that the close event does not |
|
* bubble up to parents if there is no listener |
|
*/ |
|
this.trigger({ |
|
type: 'close', |
|
bubbles: false |
|
}); |
|
}; |
|
|
|
return CloseButton; |
|
}(Button); |
|
|
|
Component.registerComponent('CloseButton', CloseButton); |
|
|
|
/** |
|
* Button to toggle between play and pause. |
|
* |
|
* @extends Button |
|
*/ |
|
|
|
var PlayToggle = |
|
/*#__PURE__*/ |
|
function (_Button) { |
|
_inheritsLoose(PlayToggle, _Button); |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options={}] |
|
* The key/value store of player options. |
|
*/ |
|
function PlayToggle(player, options) { |
|
var _this; |
|
|
|
if (options === void 0) { |
|
options = {}; |
|
} |
|
|
|
_this = _Button.call(this, player, options) || this; // show or hide replay icon |
|
|
|
options.replay = options.replay === undefined || options.replay; |
|
|
|
_this.on(player, 'play', _this.handlePlay); |
|
|
|
_this.on(player, 'pause', _this.handlePause); |
|
|
|
if (options.replay) { |
|
_this.on(player, 'ended', _this.handleEnded); |
|
} |
|
|
|
return _this; |
|
} |
|
/** |
|
* Builds the default DOM `className`. |
|
* |
|
* @return {string} |
|
* The DOM `className` for this object. |
|
*/ |
|
|
|
|
|
var _proto = PlayToggle.prototype; |
|
|
|
_proto.buildCSSClass = function buildCSSClass() { |
|
return "vjs-play-control " + _Button.prototype.buildCSSClass.call(this); |
|
} |
|
/** |
|
* This gets called when an `PlayToggle` is "clicked". See |
|
* {@link ClickableComponent} for more detailed information on what a click can be. |
|
* |
|
* @param {EventTarget~Event} [event] |
|
* The `keydown`, `tap`, or `click` event that caused this function to be |
|
* called. |
|
* |
|
* @listens tap |
|
* @listens click |
|
*/ |
|
; |
|
|
|
_proto.handleClick = function handleClick(event) { |
|
if (this.player_.paused()) { |
|
this.player_.play(); |
|
} else { |
|
this.player_.pause(); |
|
} |
|
} |
|
/** |
|
* This gets called once after the video has ended and the user seeks so that |
|
* we can change the replay button back to a play button. |
|
* |
|
* @param {EventTarget~Event} [event] |
|
* The event that caused this function to run. |
|
* |
|
* @listens Player#seeked |
|
*/ |
|
; |
|
|
|
_proto.handleSeeked = function handleSeeked(event) { |
|
this.removeClass('vjs-ended'); |
|
|
|
if (this.player_.paused()) { |
|
this.handlePause(event); |
|
} else { |
|
this.handlePlay(event); |
|
} |
|
} |
|
/** |
|
* Add the vjs-playing class to the element so it can change appearance. |
|
* |
|
* @param {EventTarget~Event} [event] |
|
* The event that caused this function to run. |
|
* |
|
* @listens Player#play |
|
*/ |
|
; |
|
|
|
_proto.handlePlay = function handlePlay(event) { |
|
this.removeClass('vjs-ended'); |
|
this.removeClass('vjs-paused'); |
|
this.addClass('vjs-playing'); // change the button text to "Pause" |
|
|
|
this.controlText('Pause'); |
|
} |
|
/** |
|
* Add the vjs-paused class to the element so it can change appearance. |
|
* |
|
* @param {EventTarget~Event} [event] |
|
* The event that caused this function to run. |
|
* |
|
* @listens Player#pause |
|
*/ |
|
; |
|
|
|
_proto.handlePause = function handlePause(event) { |
|
this.removeClass('vjs-playing'); |
|
this.addClass('vjs-paused'); // change the button text to "Play" |
|
|
|
this.controlText('Play'); |
|
} |
|
/** |
|
* Add the vjs-ended class to the element so it can change appearance |
|
* |
|
* @param {EventTarget~Event} [event] |
|
* The event that caused this function to run. |
|
* |
|
* @listens Player#ended |
|
*/ |
|
; |
|
|
|
_proto.handleEnded = function handleEnded(event) { |
|
this.removeClass('vjs-playing'); |
|
this.addClass('vjs-ended'); // change the button text to "Replay" |
|
|
|
this.controlText('Replay'); // on the next seek remove the replay button |
|
|
|
this.one(this.player_, 'seeked', this.handleSeeked); |
|
}; |
|
|
|
return PlayToggle; |
|
}(Button); |
|
/** |
|
* The text that should display over the `PlayToggle`s controls. Added for localization. |
|
* |
|
* @type {string} |
|
* @private |
|
*/ |
|
|
|
|
|
PlayToggle.prototype.controlText_ = 'Play'; |
|
Component.registerComponent('PlayToggle', PlayToggle); |
|
|
|
/** |
|
* @file format-time.js |
|
* @module format-time |
|
*/ |
|
|
|
/** |
|
* Format seconds as a time string, H:MM:SS or M:SS. Supplying a guide (in |
|
* seconds) will force a number of leading zeros to cover the length of the |
|
* guide. |
|
* |
|
* @private |
|
* @param {number} seconds |
|
* Number of seconds to be turned into a string |
|
* |
|
* @param {number} guide |
|
* Number (in seconds) to model the string after |
|
* |
|
* @return {string} |
|
* Time formatted as H:MM:SS or M:SS |
|
*/ |
|
var defaultImplementation = function defaultImplementation(seconds, guide) { |
|
seconds = seconds < 0 ? 0 : seconds; |
|
var s = Math.floor(seconds % 60); |
|
var m = Math.floor(seconds / 60 % 60); |
|
var h = Math.floor(seconds / 3600); |
|
var gm = Math.floor(guide / 60 % 60); |
|
var gh = Math.floor(guide / 3600); // handle invalid times |
|
|
|
if (isNaN(seconds) || seconds === Infinity) { |
|
// '-' is false for all relational operators (e.g. <, >=) so this setting |
|
// will add the minimum number of fields specified by the guide |
|
h = m = s = '-'; |
|
} // Check if we need to show hours |
|
|
|
|
|
h = h > 0 || gh > 0 ? h + ':' : ''; // If hours are showing, we may need to add a leading zero. |
|
// Always show at least one digit of minutes. |
|
|
|
m = ((h || gm >= 10) && m < 10 ? '0' + m : m) + ':'; // Check if leading zero is need for seconds |
|
|
|
s = s < 10 ? '0' + s : s; |
|
return h + m + s; |
|
}; // Internal pointer to the current implementation. |
|
|
|
|
|
var implementation = defaultImplementation; |
|
/** |
|
* Replaces the default formatTime implementation with a custom implementation. |
|
* |
|
* @param {Function} customImplementation |
|
* A function which will be used in place of the default formatTime |
|
* implementation. Will receive the current time in seconds and the |
|
* guide (in seconds) as arguments. |
|
*/ |
|
|
|
function setFormatTime(customImplementation) { |
|
implementation = customImplementation; |
|
} |
|
/** |
|
* Resets formatTime to the default implementation. |
|
*/ |
|
|
|
function resetFormatTime() { |
|
implementation = defaultImplementation; |
|
} |
|
/** |
|
* Delegates to either the default time formatting function or a custom |
|
* function supplied via `setFormatTime`. |
|
* |
|
* Formats seconds as a time string (H:MM:SS or M:SS). Supplying a |
|
* guide (in seconds) will force a number of leading zeros to cover the |
|
* length of the guide. |
|
* |
|
* @static |
|
* @example formatTime(125, 600) === "02:05" |
|
* @param {number} seconds |
|
* Number of seconds to be turned into a string |
|
* |
|
* @param {number} guide |
|
* Number (in seconds) to model the string after |
|
* |
|
* @return {string} |
|
* Time formatted as H:MM:SS or M:SS |
|
*/ |
|
|
|
function formatTime(seconds, guide) { |
|
if (guide === void 0) { |
|
guide = seconds; |
|
} |
|
|
|
return implementation(seconds, guide); |
|
} |
|
|
|
/** |
|
* Displays time information about the video |
|
* |
|
* @extends Component |
|
*/ |
|
|
|
var TimeDisplay = |
|
/*#__PURE__*/ |
|
function (_Component) { |
|
_inheritsLoose(TimeDisplay, _Component); |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
*/ |
|
function TimeDisplay(player, options) { |
|
var _this; |
|
|
|
_this = _Component.call(this, player, options) || this; |
|
_this.throttledUpdateContent = throttle(bind(_assertThisInitialized(_assertThisInitialized(_this)), _this.updateContent), 25); |
|
|
|
_this.on(player, 'timeupdate', _this.throttledUpdateContent); |
|
|
|
return _this; |
|
} |
|
/** |
|
* Create the `Component`'s DOM element |
|
* |
|
* @return {Element} |
|
* The element that was created. |
|
*/ |
|
|
|
|
|
var _proto = TimeDisplay.prototype; |
|
|
|
_proto.createEl = function createEl$$1() { |
|
var className = this.buildCSSClass(); |
|
|
|
var el = _Component.prototype.createEl.call(this, 'div', { |
|
className: className + " vjs-time-control vjs-control", |
|
innerHTML: "<span class=\"vjs-control-text\" role=\"presentation\">" + this.localize(this.labelText_) + "\xA0</span>" |
|
}); |
|
|
|
this.contentEl_ = createEl('span', { |
|
className: className + "-display" |
|
}, { |
|
// tell screen readers not to automatically read the time as it changes |
|
'aria-live': 'off', |
|
// span elements have no implicit role, but some screen readers (notably VoiceOver) |
|
// treat them as a break between items in the DOM when using arrow keys |
|
// (or left-to-right swipes on iOS) to read contents of a page. Using |
|
// role='presentation' causes VoiceOver to NOT treat this span as a break. |
|
'role': 'presentation' |
|
}); |
|
this.updateTextNode_(); |
|
el.appendChild(this.contentEl_); |
|
return el; |
|
}; |
|
|
|
_proto.dispose = function dispose() { |
|
this.contentEl_ = null; |
|
this.textNode_ = null; |
|
|
|
_Component.prototype.dispose.call(this); |
|
} |
|
/** |
|
* Updates the "remaining time" text node with new content using the |
|
* contents of the `formattedTime_` property. |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.updateTextNode_ = function updateTextNode_() { |
|
if (!this.contentEl_) { |
|
return; |
|
} |
|
|
|
while (this.contentEl_.firstChild) { |
|
this.contentEl_.removeChild(this.contentEl_.firstChild); |
|
} |
|
|
|
this.textNode_ = document.createTextNode(this.formattedTime_ || this.formatTime_(0)); |
|
this.contentEl_.appendChild(this.textNode_); |
|
} |
|
/** |
|
* Generates a formatted time for this component to use in display. |
|
* |
|
* @param {number} time |
|
* A numeric time, in seconds. |
|
* |
|
* @return {string} |
|
* A formatted time |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.formatTime_ = function formatTime_(time) { |
|
return formatTime(time); |
|
} |
|
/** |
|
* Updates the time display text node if it has what was passed in changed |
|
* the formatted time. |
|
* |
|
* @param {number} time |
|
* The time to update to |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.updateFormattedTime_ = function updateFormattedTime_(time) { |
|
var formattedTime = this.formatTime_(time); |
|
|
|
if (formattedTime === this.formattedTime_) { |
|
return; |
|
} |
|
|
|
this.formattedTime_ = formattedTime; |
|
this.requestAnimationFrame(this.updateTextNode_); |
|
} |
|
/** |
|
* To be filled out in the child class, should update the displayed time |
|
* in accordance with the fact that the current time has changed. |
|
* |
|
* @param {EventTarget~Event} [event] |
|
* The `timeupdate` event that caused this to run. |
|
* |
|
* @listens Player#timeupdate |
|
*/ |
|
; |
|
|
|
_proto.updateContent = function updateContent(event) {}; |
|
|
|
return TimeDisplay; |
|
}(Component); |
|
/** |
|
* The text that is added to the `TimeDisplay` for screen reader users. |
|
* |
|
* @type {string} |
|
* @private |
|
*/ |
|
|
|
|
|
TimeDisplay.prototype.labelText_ = 'Time'; |
|
/** |
|
* The text that should display over the `TimeDisplay`s controls. Added to for localization. |
|
* |
|
* @type {string} |
|
* @private |
|
* |
|
* @deprecated in v7; controlText_ is not used in non-active display Components |
|
*/ |
|
|
|
TimeDisplay.prototype.controlText_ = 'Time'; |
|
Component.registerComponent('TimeDisplay', TimeDisplay); |
|
|
|
/** |
|
* Displays the current time |
|
* |
|
* @extends Component |
|
*/ |
|
|
|
var CurrentTimeDisplay = |
|
/*#__PURE__*/ |
|
function (_TimeDisplay) { |
|
_inheritsLoose(CurrentTimeDisplay, _TimeDisplay); |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
*/ |
|
function CurrentTimeDisplay(player, options) { |
|
var _this; |
|
|
|
_this = _TimeDisplay.call(this, player, options) || this; |
|
|
|
_this.on(player, 'ended', _this.handleEnded); |
|
|
|
return _this; |
|
} |
|
/** |
|
* Builds the default DOM `className`. |
|
* |
|
* @return {string} |
|
* The DOM `className` for this object. |
|
*/ |
|
|
|
|
|
var _proto = CurrentTimeDisplay.prototype; |
|
|
|
_proto.buildCSSClass = function buildCSSClass() { |
|
return 'vjs-current-time'; |
|
} |
|
/** |
|
* Update current time display |
|
* |
|
* @param {EventTarget~Event} [event] |
|
* The `timeupdate` event that caused this function to run. |
|
* |
|
* @listens Player#timeupdate |
|
*/ |
|
; |
|
|
|
_proto.updateContent = function updateContent(event) { |
|
// Allows for smooth scrubbing, when player can't keep up. |
|
var time = this.player_.scrubbing() ? this.player_.getCache().currentTime : this.player_.currentTime(); |
|
this.updateFormattedTime_(time); |
|
} |
|
/** |
|
* When the player fires ended there should be no time left. Sadly |
|
* this is not always the case, lets make it seem like that is the case |
|
* for users. |
|
* |
|
* @param {EventTarget~Event} [event] |
|
* The `ended` event that caused this to run. |
|
* |
|
* @listens Player#ended |
|
*/ |
|
; |
|
|
|
_proto.handleEnded = function handleEnded(event) { |
|
if (!this.player_.duration()) { |
|
return; |
|
} |
|
|
|
this.updateFormattedTime_(this.player_.duration()); |
|
}; |
|
|
|
return CurrentTimeDisplay; |
|
}(TimeDisplay); |
|
/** |
|
* The text that is added to the `CurrentTimeDisplay` for screen reader users. |
|
* |
|
* @type {string} |
|
* @private |
|
*/ |
|
|
|
|
|
CurrentTimeDisplay.prototype.labelText_ = 'Current Time'; |
|
/** |
|
* The text that should display over the `CurrentTimeDisplay`s controls. Added to for localization. |
|
* |
|
* @type {string} |
|
* @private |
|
* |
|
* @deprecated in v7; controlText_ is not used in non-active display Components |
|
*/ |
|
|
|
CurrentTimeDisplay.prototype.controlText_ = 'Current Time'; |
|
Component.registerComponent('CurrentTimeDisplay', CurrentTimeDisplay); |
|
|
|
/** |
|
* Displays the duration |
|
* |
|
* @extends Component |
|
*/ |
|
|
|
var DurationDisplay = |
|
/*#__PURE__*/ |
|
function (_TimeDisplay) { |
|
_inheritsLoose(DurationDisplay, _TimeDisplay); |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
*/ |
|
function DurationDisplay(player, options) { |
|
var _this; |
|
|
|
_this = _TimeDisplay.call(this, player, options) || this; // we do not want to/need to throttle duration changes, |
|
// as they should always display the changed duration as |
|
// it has changed |
|
|
|
_this.on(player, 'durationchange', _this.updateContent); // Listen to loadstart because the player duration is reset when a new media element is loaded, |
|
// but the durationchange on the user agent will not fire. |
|
// @see [Spec]{@link https://www.w3.org/TR/2011/WD-html5-20110113/video.html#media-element-load-algorithm} |
|
|
|
|
|
_this.on(player, 'loadstart', _this.updateContent); // Also listen for timeupdate (in the parent) and loadedmetadata because removing those |
|
// listeners could have broken dependent applications/libraries. These |
|
// can likely be removed for 7.0. |
|
|
|
|
|
_this.on(player, 'loadedmetadata', _this.throttledUpdateContent); |
|
|
|
return _this; |
|
} |
|
/** |
|
* Builds the default DOM `className`. |
|
* |
|
* @return {string} |
|
* The DOM `className` for this object. |
|
*/ |
|
|
|
|
|
var _proto = DurationDisplay.prototype; |
|
|
|
_proto.buildCSSClass = function buildCSSClass() { |
|
return 'vjs-duration'; |
|
} |
|
/** |
|
* Update duration time display. |
|
* |
|
* @param {EventTarget~Event} [event] |
|
* The `durationchange`, `timeupdate`, or `loadedmetadata` event that caused |
|
* this function to be called. |
|
* |
|
* @listens Player#durationchange |
|
* @listens Player#timeupdate |
|
* @listens Player#loadedmetadata |
|
*/ |
|
; |
|
|
|
_proto.updateContent = function updateContent(event) { |
|
var duration = this.player_.duration(); |
|
|
|
if (this.duration_ !== duration) { |
|
this.duration_ = duration; |
|
this.updateFormattedTime_(duration); |
|
} |
|
}; |
|
|
|
return DurationDisplay; |
|
}(TimeDisplay); |
|
/** |
|
* The text that is added to the `DurationDisplay` for screen reader users. |
|
* |
|
* @type {string} |
|
* @private |
|
*/ |
|
|
|
|
|
DurationDisplay.prototype.labelText_ = 'Duration'; |
|
/** |
|
* The text that should display over the `DurationDisplay`s controls. Added to for localization. |
|
* |
|
* @type {string} |
|
* @private |
|
* |
|
* @deprecated in v7; controlText_ is not used in non-active display Components |
|
*/ |
|
|
|
DurationDisplay.prototype.controlText_ = 'Duration'; |
|
Component.registerComponent('DurationDisplay', DurationDisplay); |
|
|
|
/** |
|
* The separator between the current time and duration. |
|
* Can be hidden if it's not needed in the design. |
|
* |
|
* @extends Component |
|
*/ |
|
|
|
var TimeDivider = |
|
/*#__PURE__*/ |
|
function (_Component) { |
|
_inheritsLoose(TimeDivider, _Component); |
|
|
|
function TimeDivider() { |
|
return _Component.apply(this, arguments) || this; |
|
} |
|
|
|
var _proto = TimeDivider.prototype; |
|
|
|
/** |
|
* Create the component's DOM element |
|
* |
|
* @return {Element} |
|
* The element that was created. |
|
*/ |
|
_proto.createEl = function createEl() { |
|
return _Component.prototype.createEl.call(this, 'div', { |
|
className: 'vjs-time-control vjs-time-divider', |
|
innerHTML: '<div><span>/</span></div>' |
|
}, { |
|
// this element and its contents can be hidden from assistive techs since |
|
// it is made extraneous by the announcement of the control text |
|
// for the current time and duration displays |
|
'aria-hidden': true |
|
}); |
|
}; |
|
|
|
return TimeDivider; |
|
}(Component); |
|
|
|
Component.registerComponent('TimeDivider', TimeDivider); |
|
|
|
/** |
|
* Displays the time left in the video |
|
* |
|
* @extends Component |
|
*/ |
|
|
|
var RemainingTimeDisplay = |
|
/*#__PURE__*/ |
|
function (_TimeDisplay) { |
|
_inheritsLoose(RemainingTimeDisplay, _TimeDisplay); |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
*/ |
|
function RemainingTimeDisplay(player, options) { |
|
var _this; |
|
|
|
_this = _TimeDisplay.call(this, player, options) || this; |
|
|
|
_this.on(player, 'durationchange', _this.throttledUpdateContent); |
|
|
|
_this.on(player, 'ended', _this.handleEnded); |
|
|
|
return _this; |
|
} |
|
/** |
|
* Builds the default DOM `className`. |
|
* |
|
* @return {string} |
|
* The DOM `className` for this object. |
|
*/ |
|
|
|
|
|
var _proto = RemainingTimeDisplay.prototype; |
|
|
|
_proto.buildCSSClass = function buildCSSClass() { |
|
return 'vjs-remaining-time'; |
|
} |
|
/** |
|
* Create the `Component`'s DOM element with the "minus" characted prepend to the time |
|
* |
|
* @return {Element} |
|
* The element that was created. |
|
*/ |
|
; |
|
|
|
_proto.createEl = function createEl$$1() { |
|
var el = _TimeDisplay.prototype.createEl.call(this); |
|
|
|
el.insertBefore(createEl('span', {}, { |
|
'aria-hidden': true |
|
}, '-'), this.contentEl_); |
|
return el; |
|
} |
|
/** |
|
* Update remaining time display. |
|
* |
|
* @param {EventTarget~Event} [event] |
|
* The `timeupdate` or `durationchange` event that caused this to run. |
|
* |
|
* @listens Player#timeupdate |
|
* @listens Player#durationchange |
|
*/ |
|
; |
|
|
|
_proto.updateContent = function updateContent(event) { |
|
if (typeof this.player_.duration() !== 'number') { |
|
return; |
|
} // @deprecated We should only use remainingTimeDisplay |
|
// as of video.js 7 |
|
|
|
|
|
if (this.player_.remainingTimeDisplay) { |
|
this.updateFormattedTime_(this.player_.remainingTimeDisplay()); |
|
} else { |
|
this.updateFormattedTime_(this.player_.remainingTime()); |
|
} |
|
} |
|
/** |
|
* When the player fires ended there should be no time left. Sadly |
|
* this is not always the case, lets make it seem like that is the case |
|
* for users. |
|
* |
|
* @param {EventTarget~Event} [event] |
|
* The `ended` event that caused this to run. |
|
* |
|
* @listens Player#ended |
|
*/ |
|
; |
|
|
|
_proto.handleEnded = function handleEnded(event) { |
|
if (!this.player_.duration()) { |
|
return; |
|
} |
|
|
|
this.updateFormattedTime_(0); |
|
}; |
|
|
|
return RemainingTimeDisplay; |
|
}(TimeDisplay); |
|
/** |
|
* The text that is added to the `RemainingTimeDisplay` for screen reader users. |
|
* |
|
* @type {string} |
|
* @private |
|
*/ |
|
|
|
|
|
RemainingTimeDisplay.prototype.labelText_ = 'Remaining Time'; |
|
/** |
|
* The text that should display over the `RemainingTimeDisplay`s controls. Added to for localization. |
|
* |
|
* @type {string} |
|
* @private |
|
* |
|
* @deprecated in v7; controlText_ is not used in non-active display Components |
|
*/ |
|
|
|
RemainingTimeDisplay.prototype.controlText_ = 'Remaining Time'; |
|
Component.registerComponent('RemainingTimeDisplay', RemainingTimeDisplay); |
|
|
|
/** |
|
* Displays the live indicator when duration is Infinity. |
|
* |
|
* @extends Component |
|
*/ |
|
|
|
var LiveDisplay = |
|
/*#__PURE__*/ |
|
function (_Component) { |
|
_inheritsLoose(LiveDisplay, _Component); |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
*/ |
|
function LiveDisplay(player, options) { |
|
var _this; |
|
|
|
_this = _Component.call(this, player, options) || this; |
|
|
|
_this.updateShowing(); |
|
|
|
_this.on(_this.player(), 'durationchange', _this.updateShowing); |
|
|
|
return _this; |
|
} |
|
/** |
|
* Create the `Component`'s DOM element |
|
* |
|
* @return {Element} |
|
* The element that was created. |
|
*/ |
|
|
|
|
|
var _proto = LiveDisplay.prototype; |
|
|
|
_proto.createEl = function createEl$$1() { |
|
var el = _Component.prototype.createEl.call(this, 'div', { |
|
className: 'vjs-live-control vjs-control' |
|
}); |
|
|
|
this.contentEl_ = createEl('div', { |
|
className: 'vjs-live-display', |
|
innerHTML: "<span class=\"vjs-control-text\">" + this.localize('Stream Type') + "\xA0</span>" + this.localize('LIVE') |
|
}, { |
|
'aria-live': 'off' |
|
}); |
|
el.appendChild(this.contentEl_); |
|
return el; |
|
}; |
|
|
|
_proto.dispose = function dispose() { |
|
this.contentEl_ = null; |
|
|
|
_Component.prototype.dispose.call(this); |
|
} |
|
/** |
|
* Check the duration to see if the LiveDisplay should be showing or not. Then show/hide |
|
* it accordingly |
|
* |
|
* @param {EventTarget~Event} [event] |
|
* The {@link Player#durationchange} event that caused this function to run. |
|
* |
|
* @listens Player#durationchange |
|
*/ |
|
; |
|
|
|
_proto.updateShowing = function updateShowing(event) { |
|
if (this.player().duration() === Infinity) { |
|
this.show(); |
|
} else { |
|
this.hide(); |
|
} |
|
}; |
|
|
|
return LiveDisplay; |
|
}(Component); |
|
|
|
Component.registerComponent('LiveDisplay', LiveDisplay); |
|
|
|
/** |
|
* Displays the live indicator when duration is Infinity. |
|
* |
|
* @extends Component |
|
*/ |
|
|
|
var SeekToLive = |
|
/*#__PURE__*/ |
|
function (_Button) { |
|
_inheritsLoose(SeekToLive, _Button); |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
*/ |
|
function SeekToLive(player, options) { |
|
var _this; |
|
|
|
_this = _Button.call(this, player, options) || this; |
|
|
|
_this.updateLiveEdgeStatus(); |
|
|
|
if (_this.player_.liveTracker) { |
|
_this.on(_this.player_.liveTracker, 'liveedgechange', _this.updateLiveEdgeStatus); |
|
} |
|
|
|
return _this; |
|
} |
|
/** |
|
* Create the `Component`'s DOM element |
|
* |
|
* @return {Element} |
|
* The element that was created. |
|
*/ |
|
|
|
|
|
var _proto = SeekToLive.prototype; |
|
|
|
_proto.createEl = function createEl$$1() { |
|
var el = _Button.prototype.createEl.call(this, 'button', { |
|
className: 'vjs-seek-to-live-control vjs-control' |
|
}); |
|
|
|
this.textEl_ = createEl('span', { |
|
className: 'vjs-seek-to-live-text', |
|
innerHTML: this.localize('LIVE') |
|
}, { |
|
'aria-hidden': 'true' |
|
}); |
|
el.appendChild(this.textEl_); |
|
return el; |
|
} |
|
/** |
|
* Update the state of this button if we are at the live edge |
|
* or not |
|
*/ |
|
; |
|
|
|
_proto.updateLiveEdgeStatus = function updateLiveEdgeStatus(e) { |
|
// default to live edge |
|
if (!this.player_.liveTracker || this.player_.liveTracker.atLiveEdge()) { |
|
this.setAttribute('aria-disabled', true); |
|
this.addClass('vjs-at-live-edge'); |
|
this.controlText('Seek to live, currently playing live'); |
|
} else { |
|
this.setAttribute('aria-disabled', false); |
|
this.removeClass('vjs-at-live-edge'); |
|
this.controlText('Seek to live, currently behind live'); |
|
} |
|
} |
|
/** |
|
* On click bring us as near to the live point as possible. |
|
* This requires that we wait for the next `live-seekable-change` |
|
* event which will happen every segment length seconds. |
|
*/ |
|
; |
|
|
|
_proto.handleClick = function handleClick() { |
|
this.player_.liveTracker.seekToLiveEdge(); |
|
} |
|
/** |
|
* Dispose of the element and stop tracking |
|
*/ |
|
; |
|
|
|
_proto.dispose = function dispose() { |
|
if (this.player_.liveTracker) { |
|
this.off(this.player_.liveTracker, 'liveedgechange', this.updateLiveEdgeStatus); |
|
} |
|
|
|
this.textEl_ = null; |
|
|
|
_Button.prototype.dispose.call(this); |
|
}; |
|
|
|
return SeekToLive; |
|
}(Button); |
|
|
|
SeekToLive.prototype.controlText_ = 'Seek to live, currently playing live'; |
|
Component.registerComponent('SeekToLive', SeekToLive); |
|
|
|
/** |
|
* The base functionality for a slider. Can be vertical or horizontal. |
|
* For instance the volume bar or the seek bar on a video is a slider. |
|
* |
|
* @extends Component |
|
*/ |
|
|
|
var Slider = |
|
/*#__PURE__*/ |
|
function (_Component) { |
|
_inheritsLoose(Slider, _Component); |
|
|
|
/** |
|
* Create an instance of this class |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
*/ |
|
function Slider(player, options) { |
|
var _this; |
|
|
|
_this = _Component.call(this, player, options) || this; // Set property names to bar to match with the child Slider class is looking for |
|
|
|
_this.bar = _this.getChild(_this.options_.barName); // Set a horizontal or vertical class on the slider depending on the slider type |
|
|
|
_this.vertical(!!_this.options_.vertical); |
|
|
|
_this.enable(); |
|
|
|
return _this; |
|
} |
|
/** |
|
* Are controls are currently enabled for this slider or not. |
|
* |
|
* @return {boolean} |
|
* true if controls are enabled, false otherwise |
|
*/ |
|
|
|
|
|
var _proto = Slider.prototype; |
|
|
|
_proto.enabled = function enabled() { |
|
return this.enabled_; |
|
} |
|
/** |
|
* Enable controls for this slider if they are disabled |
|
*/ |
|
; |
|
|
|
_proto.enable = function enable() { |
|
if (this.enabled()) { |
|
return; |
|
} |
|
|
|
this.on('mousedown', this.handleMouseDown); |
|
this.on('touchstart', this.handleMouseDown); |
|
this.on('keydown', this.handleKeyDown); |
|
this.on('click', this.handleClick); |
|
this.on(this.player_, 'controlsvisible', this.update); |
|
|
|
if (this.playerEvent) { |
|
this.on(this.player_, this.playerEvent, this.update); |
|
} |
|
|
|
this.removeClass('disabled'); |
|
this.setAttribute('tabindex', 0); |
|
this.enabled_ = true; |
|
} |
|
/** |
|
* Disable controls for this slider if they are enabled |
|
*/ |
|
; |
|
|
|
_proto.disable = function disable() { |
|
if (!this.enabled()) { |
|
return; |
|
} |
|
|
|
var doc = this.bar.el_.ownerDocument; |
|
this.off('mousedown', this.handleMouseDown); |
|
this.off('touchstart', this.handleMouseDown); |
|
this.off('keydown', this.handleKeyDown); |
|
this.off('click', this.handleClick); |
|
this.off(this.player_, 'controlsvisible', this.update); |
|
this.off(doc, 'mousemove', this.handleMouseMove); |
|
this.off(doc, 'mouseup', this.handleMouseUp); |
|
this.off(doc, 'touchmove', this.handleMouseMove); |
|
this.off(doc, 'touchend', this.handleMouseUp); |
|
this.removeAttribute('tabindex'); |
|
this.addClass('disabled'); |
|
|
|
if (this.playerEvent) { |
|
this.off(this.player_, this.playerEvent, this.update); |
|
} |
|
|
|
this.enabled_ = false; |
|
} |
|
/** |
|
* Create the `Slider`s DOM element. |
|
* |
|
* @param {string} type |
|
* Type of element to create. |
|
* |
|
* @param {Object} [props={}] |
|
* List of properties in Object form. |
|
* |
|
* @param {Object} [attributes={}] |
|
* list of attributes in Object form. |
|
* |
|
* @return {Element} |
|
* The element that gets created. |
|
*/ |
|
; |
|
|
|
_proto.createEl = function createEl$$1(type, props, attributes) { |
|
if (props === void 0) { |
|
props = {}; |
|
} |
|
|
|
if (attributes === void 0) { |
|
attributes = {}; |
|
} |
|
|
|
// Add the slider element class to all sub classes |
|
props.className = props.className + ' vjs-slider'; |
|
props = assign({ |
|
tabIndex: 0 |
|
}, props); |
|
attributes = assign({ |
|
'role': 'slider', |
|
'aria-valuenow': 0, |
|
'aria-valuemin': 0, |
|
'aria-valuemax': 100, |
|
'tabIndex': 0 |
|
}, attributes); |
|
return _Component.prototype.createEl.call(this, type, props, attributes); |
|
} |
|
/** |
|
* Handle `mousedown` or `touchstart` events on the `Slider`. |
|
* |
|
* @param {EventTarget~Event} event |
|
* `mousedown` or `touchstart` event that triggered this function |
|
* |
|
* @listens mousedown |
|
* @listens touchstart |
|
* @fires Slider#slideractive |
|
*/ |
|
; |
|
|
|
_proto.handleMouseDown = function handleMouseDown(event) { |
|
var doc = this.bar.el_.ownerDocument; |
|
|
|
if (event.type === 'mousedown') { |
|
event.preventDefault(); |
|
} // Do not call preventDefault() on touchstart in Chrome |
|
// to avoid console warnings. Use a 'touch-action: none' style |
|
// instead to prevent unintented scrolling. |
|
// https://developers.google.com/web/updates/2017/01/scrolling-intervention |
|
|
|
|
|
if (event.type === 'touchstart' && !IS_CHROME) { |
|
event.preventDefault(); |
|
} |
|
|
|
blockTextSelection(); |
|
this.addClass('vjs-sliding'); |
|
/** |
|
* Triggered when the slider is in an active state |
|
* |
|
* @event Slider#slideractive |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
this.trigger('slideractive'); |
|
this.on(doc, 'mousemove', this.handleMouseMove); |
|
this.on(doc, 'mouseup', this.handleMouseUp); |
|
this.on(doc, 'touchmove', this.handleMouseMove); |
|
this.on(doc, 'touchend', this.handleMouseUp); |
|
this.handleMouseMove(event); |
|
} |
|
/** |
|
* Handle the `mousemove`, `touchmove`, and `mousedown` events on this `Slider`. |
|
* The `mousemove` and `touchmove` events will only only trigger this function during |
|
* `mousedown` and `touchstart`. This is due to {@link Slider#handleMouseDown} and |
|
* {@link Slider#handleMouseUp}. |
|
* |
|
* @param {EventTarget~Event} event |
|
* `mousedown`, `mousemove`, `touchstart`, or `touchmove` event that triggered |
|
* this function |
|
* |
|
* @listens mousemove |
|
* @listens touchmove |
|
*/ |
|
; |
|
|
|
_proto.handleMouseMove = function handleMouseMove(event) {} |
|
/** |
|
* Handle `mouseup` or `touchend` events on the `Slider`. |
|
* |
|
* @param {EventTarget~Event} event |
|
* `mouseup` or `touchend` event that triggered this function. |
|
* |
|
* @listens touchend |
|
* @listens mouseup |
|
* @fires Slider#sliderinactive |
|
*/ |
|
; |
|
|
|
_proto.handleMouseUp = function handleMouseUp() { |
|
var doc = this.bar.el_.ownerDocument; |
|
unblockTextSelection(); |
|
this.removeClass('vjs-sliding'); |
|
/** |
|
* Triggered when the slider is no longer in an active state. |
|
* |
|
* @event Slider#sliderinactive |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
this.trigger('sliderinactive'); |
|
this.off(doc, 'mousemove', this.handleMouseMove); |
|
this.off(doc, 'mouseup', this.handleMouseUp); |
|
this.off(doc, 'touchmove', this.handleMouseMove); |
|
this.off(doc, 'touchend', this.handleMouseUp); |
|
this.update(); |
|
} |
|
/** |
|
* Update the progress bar of the `Slider`. |
|
* |
|
* @return {number} |
|
* The percentage of progress the progress bar represents as a |
|
* number from 0 to 1. |
|
*/ |
|
; |
|
|
|
_proto.update = function update() { |
|
// In VolumeBar init we have a setTimeout for update that pops and update |
|
// to the end of the execution stack. The player is destroyed before then |
|
// update will cause an error |
|
if (!this.el_) { |
|
return; |
|
} // If scrubbing, we could use a cached value to make the handle keep up |
|
// with the user's mouse. On HTML5 browsers scrubbing is really smooth, but |
|
// some flash players are slow, so we might want to utilize this later. |
|
// var progress = (this.player_.scrubbing()) ? this.player_.getCache().currentTime / this.player_.duration() : this.player_.currentTime() / this.player_.duration(); |
|
|
|
|
|
var progress = this.getPercent(); |
|
var bar = this.bar; // If there's no bar... |
|
|
|
if (!bar) { |
|
return; |
|
} // Protect against no duration and other division issues |
|
|
|
|
|
if (typeof progress !== 'number' || progress !== progress || progress < 0 || progress === Infinity) { |
|
progress = 0; |
|
} // Convert to a percentage for setting |
|
|
|
|
|
var percentage = (progress * 100).toFixed(2) + '%'; |
|
var style = bar.el().style; // Set the new bar width or height |
|
|
|
if (this.vertical()) { |
|
style.height = percentage; |
|
} else { |
|
style.width = percentage; |
|
} |
|
|
|
return progress; |
|
} |
|
/** |
|
* Calculate distance for slider |
|
* |
|
* @param {EventTarget~Event} event |
|
* The event that caused this function to run. |
|
* |
|
* @return {number} |
|
* The current position of the Slider. |
|
* - position.x for vertical `Slider`s |
|
* - position.y for horizontal `Slider`s |
|
*/ |
|
; |
|
|
|
_proto.calculateDistance = function calculateDistance(event) { |
|
var position = getPointerPosition(this.el_, event); |
|
|
|
if (this.vertical()) { |
|
return position.y; |
|
} |
|
|
|
return position.x; |
|
} |
|
/** |
|
* Handle a `keydown` event on the `Slider`. Watches for left, rigth, up, and down |
|
* arrow keys. This function will only be called when the slider has focus. See |
|
* {@link Slider#handleFocus} and {@link Slider#handleBlur}. |
|
* |
|
* @param {EventTarget~Event} event |
|
* the `keydown` event that caused this function to run. |
|
* |
|
* @listens keydown |
|
*/ |
|
; |
|
|
|
_proto.handleKeyDown = function handleKeyDown(event) { |
|
// Left and Down Arrows |
|
if (keycode.isEventKey(event, 'Left') || keycode.isEventKey(event, 'Down')) { |
|
event.preventDefault(); |
|
event.stopPropagation(); |
|
this.stepBack(); // Up and Right Arrows |
|
} else if (keycode.isEventKey(event, 'Right') || keycode.isEventKey(event, 'Up')) { |
|
event.preventDefault(); |
|
event.stopPropagation(); |
|
this.stepForward(); |
|
} else { |
|
// Pass keydown handling up for unsupported keys |
|
_Component.prototype.handleKeyDown.call(this, event); |
|
} |
|
} |
|
/** |
|
* Listener for click events on slider, used to prevent clicks |
|
* from bubbling up to parent elements like button menus. |
|
* |
|
* @param {Object} event |
|
* Event that caused this object to run |
|
*/ |
|
; |
|
|
|
_proto.handleClick = function handleClick(event) { |
|
event.stopPropagation(); |
|
event.preventDefault(); |
|
} |
|
/** |
|
* Get/set if slider is horizontal for vertical |
|
* |
|
* @param {boolean} [bool] |
|
* - true if slider is vertical, |
|
* - false is horizontal |
|
* |
|
* @return {boolean} |
|
* - true if slider is vertical, and getting |
|
* - false if the slider is horizontal, and getting |
|
*/ |
|
; |
|
|
|
_proto.vertical = function vertical(bool) { |
|
if (bool === undefined) { |
|
return this.vertical_ || false; |
|
} |
|
|
|
this.vertical_ = !!bool; |
|
|
|
if (this.vertical_) { |
|
this.addClass('vjs-slider-vertical'); |
|
} else { |
|
this.addClass('vjs-slider-horizontal'); |
|
} |
|
}; |
|
|
|
return Slider; |
|
}(Component); |
|
|
|
Component.registerComponent('Slider', Slider); |
|
|
|
/** |
|
* Shows loading progress |
|
* |
|
* @extends Component |
|
*/ |
|
|
|
var LoadProgressBar = |
|
/*#__PURE__*/ |
|
function (_Component) { |
|
_inheritsLoose(LoadProgressBar, _Component); |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
*/ |
|
function LoadProgressBar(player, options) { |
|
var _this; |
|
|
|
_this = _Component.call(this, player, options) || this; |
|
_this.partEls_ = []; |
|
|
|
_this.on(player, 'progress', _this.update); |
|
|
|
return _this; |
|
} |
|
/** |
|
* Create the `Component`'s DOM element |
|
* |
|
* @return {Element} |
|
* The element that was created. |
|
*/ |
|
|
|
|
|
var _proto = LoadProgressBar.prototype; |
|
|
|
_proto.createEl = function createEl$$1() { |
|
return _Component.prototype.createEl.call(this, 'div', { |
|
className: 'vjs-load-progress', |
|
innerHTML: "<span class=\"vjs-control-text\"><span>" + this.localize('Loaded') + "</span>: <span class=\"vjs-control-text-loaded-percentage\">0%</span></span>" |
|
}); |
|
}; |
|
|
|
_proto.dispose = function dispose() { |
|
this.partEls_ = null; |
|
|
|
_Component.prototype.dispose.call(this); |
|
} |
|
/** |
|
* Update progress bar |
|
* |
|
* @param {EventTarget~Event} [event] |
|
* The `progress` event that caused this function to run. |
|
* |
|
* @listens Player#progress |
|
*/ |
|
; |
|
|
|
_proto.update = function update(event) { |
|
var liveTracker = this.player_.liveTracker; |
|
var buffered = this.player_.buffered(); |
|
var duration = liveTracker && liveTracker.isLive() ? liveTracker.seekableEnd() : this.player_.duration(); |
|
var bufferedEnd = this.player_.bufferedEnd(); |
|
var children = this.partEls_; |
|
var controlTextPercentage = this.$('.vjs-control-text-loaded-percentage'); // get the percent width of a time compared to the total end |
|
|
|
var percentify = function percentify(time, end, rounded) { |
|
// no NaN |
|
var percent = time / end || 0; |
|
percent = (percent >= 1 ? 1 : percent) * 100; |
|
|
|
if (rounded) { |
|
percent = percent.toFixed(2); |
|
} |
|
|
|
return percent + '%'; |
|
}; // update the width of the progress bar |
|
|
|
|
|
this.el_.style.width = percentify(bufferedEnd, duration); // update the control-text |
|
|
|
textContent(controlTextPercentage, percentify(bufferedEnd, duration, true)); // add child elements to represent the individual buffered time ranges |
|
|
|
for (var i = 0; i < buffered.length; i++) { |
|
var start = buffered.start(i); |
|
var end = buffered.end(i); |
|
var part = children[i]; |
|
|
|
if (!part) { |
|
part = this.el_.appendChild(createEl()); |
|
children[i] = part; |
|
} // set the percent based on the width of the progress bar (bufferedEnd) |
|
|
|
|
|
part.style.left = percentify(start, bufferedEnd); |
|
part.style.width = percentify(end - start, bufferedEnd); |
|
} // remove unused buffered range elements |
|
|
|
|
|
for (var _i = children.length; _i > buffered.length; _i--) { |
|
this.el_.removeChild(children[_i - 1]); |
|
} |
|
|
|
children.length = buffered.length; |
|
}; |
|
|
|
return LoadProgressBar; |
|
}(Component); |
|
|
|
Component.registerComponent('LoadProgressBar', LoadProgressBar); |
|
|
|
/** |
|
* Time tooltips display a time above the progress bar. |
|
* |
|
* @extends Component |
|
*/ |
|
|
|
var TimeTooltip = |
|
/*#__PURE__*/ |
|
function (_Component) { |
|
_inheritsLoose(TimeTooltip, _Component); |
|
|
|
function TimeTooltip() { |
|
return _Component.apply(this, arguments) || this; |
|
} |
|
|
|
var _proto = TimeTooltip.prototype; |
|
|
|
/** |
|
* Create the time tooltip DOM element |
|
* |
|
* @return {Element} |
|
* The element that was created. |
|
*/ |
|
_proto.createEl = function createEl$$1() { |
|
return _Component.prototype.createEl.call(this, 'div', { |
|
className: 'vjs-time-tooltip' |
|
}, { |
|
'aria-hidden': 'true' |
|
}); |
|
} |
|
/** |
|
* Updates the position of the time tooltip relative to the `SeekBar`. |
|
* |
|
* @param {Object} seekBarRect |
|
* The `ClientRect` for the {@link SeekBar} element. |
|
* |
|
* @param {number} seekBarPoint |
|
* A number from 0 to 1, representing a horizontal reference point |
|
* from the left edge of the {@link SeekBar} |
|
*/ |
|
; |
|
|
|
_proto.update = function update(seekBarRect, seekBarPoint, content) { |
|
var tooltipRect = getBoundingClientRect(this.el_); |
|
var playerRect = getBoundingClientRect(this.player_.el()); |
|
var seekBarPointPx = seekBarRect.width * seekBarPoint; // do nothing if either rect isn't available |
|
// for example, if the player isn't in the DOM for testing |
|
|
|
if (!playerRect || !tooltipRect) { |
|
return; |
|
} // This is the space left of the `seekBarPoint` available within the bounds |
|
// of the player. We calculate any gap between the left edge of the player |
|
// and the left edge of the `SeekBar` and add the number of pixels in the |
|
// `SeekBar` before hitting the `seekBarPoint` |
|
|
|
|
|
var spaceLeftOfPoint = seekBarRect.left - playerRect.left + seekBarPointPx; // This is the space right of the `seekBarPoint` available within the bounds |
|
// of the player. We calculate the number of pixels from the `seekBarPoint` |
|
// to the right edge of the `SeekBar` and add to that any gap between the |
|
// right edge of the `SeekBar` and the player. |
|
|
|
var spaceRightOfPoint = seekBarRect.width - seekBarPointPx + (playerRect.right - seekBarRect.right); // This is the number of pixels by which the tooltip will need to be pulled |
|
// further to the right to center it over the `seekBarPoint`. |
|
|
|
var pullTooltipBy = tooltipRect.width / 2; // Adjust the `pullTooltipBy` distance to the left or right depending on |
|
// the results of the space calculations above. |
|
|
|
if (spaceLeftOfPoint < pullTooltipBy) { |
|
pullTooltipBy += pullTooltipBy - spaceLeftOfPoint; |
|
} else if (spaceRightOfPoint < pullTooltipBy) { |
|
pullTooltipBy = spaceRightOfPoint; |
|
} // Due to the imprecision of decimal/ratio based calculations and varying |
|
// rounding behaviors, there are cases where the spacing adjustment is off |
|
// by a pixel or two. This adds insurance to these calculations. |
|
|
|
|
|
if (pullTooltipBy < 0) { |
|
pullTooltipBy = 0; |
|
} else if (pullTooltipBy > tooltipRect.width) { |
|
pullTooltipBy = tooltipRect.width; |
|
} |
|
|
|
this.el_.style.right = "-" + pullTooltipBy + "px"; |
|
textContent(this.el_, content); |
|
} |
|
/** |
|
* Updates the position of the time tooltip relative to the `SeekBar`. |
|
* |
|
* @param {Object} seekBarRect |
|
* The `ClientRect` for the {@link SeekBar} element. |
|
* |
|
* @param {number} seekBarPoint |
|
* A number from 0 to 1, representing a horizontal reference point |
|
* from the left edge of the {@link SeekBar} |
|
* |
|
* @param {number} time |
|
* The time to update the tooltip to, not used during live playback |
|
* |
|
* @param {Function} cb |
|
* A function that will be called during the request animation frame |
|
* for tooltips that need to do additional animations from the default |
|
*/ |
|
; |
|
|
|
_proto.updateTime = function updateTime(seekBarRect, seekBarPoint, time, cb) { |
|
var _this = this; |
|
|
|
// If there is an existing rAF ID, cancel it so we don't over-queue. |
|
if (this.rafId_) { |
|
this.cancelAnimationFrame(this.rafId_); |
|
} |
|
|
|
this.rafId_ = this.requestAnimationFrame(function () { |
|
var content; |
|
|
|
var duration = _this.player_.duration(); |
|
|
|
if (_this.player_.liveTracker && _this.player_.liveTracker.isLive()) { |
|
var liveWindow = _this.player_.liveTracker.liveWindow(); |
|
|
|
var secondsBehind = liveWindow - seekBarPoint * liveWindow; |
|
content = (secondsBehind < 1 ? '' : '-') + formatTime(secondsBehind, liveWindow); |
|
} else { |
|
content = formatTime(time, duration); |
|
} |
|
|
|
_this.update(seekBarRect, seekBarPoint, content); |
|
|
|
if (cb) { |
|
cb(); |
|
} |
|
}); |
|
}; |
|
|
|
return TimeTooltip; |
|
}(Component); |
|
|
|
Component.registerComponent('TimeTooltip', TimeTooltip); |
|
|
|
/** |
|
* Used by {@link SeekBar} to display media playback progress as part of the |
|
* {@link ProgressControl}. |
|
* |
|
* @extends Component |
|
*/ |
|
|
|
var PlayProgressBar = |
|
/*#__PURE__*/ |
|
function (_Component) { |
|
_inheritsLoose(PlayProgressBar, _Component); |
|
|
|
function PlayProgressBar() { |
|
return _Component.apply(this, arguments) || this; |
|
} |
|
|
|
var _proto = PlayProgressBar.prototype; |
|
|
|
/** |
|
* Create the the DOM element for this class. |
|
* |
|
* @return {Element} |
|
* The element that was created. |
|
*/ |
|
_proto.createEl = function createEl() { |
|
return _Component.prototype.createEl.call(this, 'div', { |
|
className: 'vjs-play-progress vjs-slider-bar' |
|
}, { |
|
'aria-hidden': 'true' |
|
}); |
|
} |
|
/** |
|
* Enqueues updates to its own DOM as well as the DOM of its |
|
* {@link TimeTooltip} child. |
|
* |
|
* @param {Object} seekBarRect |
|
* The `ClientRect` for the {@link SeekBar} element. |
|
* |
|
* @param {number} seekBarPoint |
|
* A number from 0 to 1, representing a horizontal reference point |
|
* from the left edge of the {@link SeekBar} |
|
*/ |
|
; |
|
|
|
_proto.update = function update(seekBarRect, seekBarPoint) { |
|
var timeTooltip = this.getChild('timeTooltip'); |
|
|
|
if (!timeTooltip) { |
|
return; |
|
} |
|
|
|
var time = this.player_.scrubbing() ? this.player_.getCache().currentTime : this.player_.currentTime(); |
|
timeTooltip.updateTime(seekBarRect, seekBarPoint, time); |
|
}; |
|
|
|
return PlayProgressBar; |
|
}(Component); |
|
/** |
|
* Default options for {@link PlayProgressBar}. |
|
* |
|
* @type {Object} |
|
* @private |
|
*/ |
|
|
|
|
|
PlayProgressBar.prototype.options_ = { |
|
children: [] |
|
}; // Time tooltips should not be added to a player on mobile devices |
|
|
|
if (!IS_IOS && !IS_ANDROID) { |
|
PlayProgressBar.prototype.options_.children.push('timeTooltip'); |
|
} |
|
|
|
Component.registerComponent('PlayProgressBar', PlayProgressBar); |
|
|
|
/** |
|
* The {@link MouseTimeDisplay} component tracks mouse movement over the |
|
* {@link ProgressControl}. It displays an indicator and a {@link TimeTooltip} |
|
* indicating the time which is represented by a given point in the |
|
* {@link ProgressControl}. |
|
* |
|
* @extends Component |
|
*/ |
|
|
|
var MouseTimeDisplay = |
|
/*#__PURE__*/ |
|
function (_Component) { |
|
_inheritsLoose(MouseTimeDisplay, _Component); |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The {@link Player} that this class should be attached to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
*/ |
|
function MouseTimeDisplay(player, options) { |
|
var _this; |
|
|
|
_this = _Component.call(this, player, options) || this; |
|
_this.update = throttle(bind(_assertThisInitialized(_assertThisInitialized(_this)), _this.update), 25); |
|
return _this; |
|
} |
|
/** |
|
* Create the DOM element for this class. |
|
* |
|
* @return {Element} |
|
* The element that was created. |
|
*/ |
|
|
|
|
|
var _proto = MouseTimeDisplay.prototype; |
|
|
|
_proto.createEl = function createEl() { |
|
return _Component.prototype.createEl.call(this, 'div', { |
|
className: 'vjs-mouse-display' |
|
}); |
|
} |
|
/** |
|
* Enqueues updates to its own DOM as well as the DOM of its |
|
* {@link TimeTooltip} child. |
|
* |
|
* @param {Object} seekBarRect |
|
* The `ClientRect` for the {@link SeekBar} element. |
|
* |
|
* @param {number} seekBarPoint |
|
* A number from 0 to 1, representing a horizontal reference point |
|
* from the left edge of the {@link SeekBar} |
|
*/ |
|
; |
|
|
|
_proto.update = function update(seekBarRect, seekBarPoint) { |
|
var _this2 = this; |
|
|
|
var time = seekBarPoint * this.player_.duration(); |
|
this.getChild('timeTooltip').updateTime(seekBarRect, seekBarPoint, time, function () { |
|
_this2.el_.style.left = seekBarRect.width * seekBarPoint + "px"; |
|
}); |
|
}; |
|
|
|
return MouseTimeDisplay; |
|
}(Component); |
|
/** |
|
* Default options for `MouseTimeDisplay` |
|
* |
|
* @type {Object} |
|
* @private |
|
*/ |
|
|
|
|
|
MouseTimeDisplay.prototype.options_ = { |
|
children: ['timeTooltip'] |
|
}; |
|
Component.registerComponent('MouseTimeDisplay', MouseTimeDisplay); |
|
|
|
var STEP_SECONDS = 5; // The multiplier of STEP_SECONDS that PgUp/PgDown move the timeline. |
|
|
|
var PAGE_KEY_MULTIPLIER = 12; // The interval at which the bar should update as it progresses. |
|
|
|
var UPDATE_REFRESH_INTERVAL = 30; |
|
/** |
|
* Seek bar and container for the progress bars. Uses {@link PlayProgressBar} |
|
* as its `bar`. |
|
* |
|
* @extends Slider |
|
*/ |
|
|
|
var SeekBar = |
|
/*#__PURE__*/ |
|
function (_Slider) { |
|
_inheritsLoose(SeekBar, _Slider); |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
*/ |
|
function SeekBar(player, options) { |
|
var _this; |
|
|
|
_this = _Slider.call(this, player, options) || this; |
|
|
|
_this.setEventHandlers_(); |
|
|
|
return _this; |
|
} |
|
/** |
|
* Sets the event handlers |
|
* |
|
* @private |
|
*/ |
|
|
|
|
|
var _proto = SeekBar.prototype; |
|
|
|
_proto.setEventHandlers_ = function setEventHandlers_() { |
|
this.update = throttle(bind(this, this.update), UPDATE_REFRESH_INTERVAL); |
|
this.on(this.player_, 'timeupdate', this.update); |
|
this.on(this.player_, 'ended', this.handleEnded); |
|
this.on(this.player_, 'durationchange', this.update); |
|
|
|
if (this.player_.liveTracker) { |
|
this.on(this.player_.liveTracker, 'liveedgechange', this.update); |
|
} // when playing, let's ensure we smoothly update the play progress bar |
|
// via an interval |
|
|
|
|
|
this.updateInterval = null; |
|
this.on(this.player_, ['playing'], this.enableInterval_); |
|
this.on(this.player_, ['ended', 'pause', 'waiting'], this.disableInterval_); // we don't need to update the play progress if the document is hidden, |
|
// also, this causes the CPU to spike and eventually crash the page on IE11. |
|
|
|
if ('hidden' in document && 'visibilityState' in document) { |
|
this.on(document, 'visibilitychange', this.toggleVisibility_); |
|
} |
|
}; |
|
|
|
_proto.toggleVisibility_ = function toggleVisibility_(e) { |
|
if (document.hidden) { |
|
this.disableInterval_(e); |
|
} else { |
|
this.enableInterval_(); // we just switched back to the page and someone may be looking, so, update ASAP |
|
|
|
this.requestAnimationFrame(this.update); |
|
} |
|
}; |
|
|
|
_proto.enableInterval_ = function enableInterval_() { |
|
var _this2 = this; |
|
|
|
this.clearInterval(this.updateInterval); |
|
this.updateInterval = this.setInterval(function () { |
|
_this2.requestAnimationFrame(_this2.update); |
|
}, UPDATE_REFRESH_INTERVAL); |
|
}; |
|
|
|
_proto.disableInterval_ = function disableInterval_(e) { |
|
if (this.player_.liveTracker && this.player_.liveTracker.isLive() && e.type !== 'ended') { |
|
return; |
|
} |
|
|
|
this.clearInterval(this.updateInterval); |
|
} |
|
/** |
|
* Create the `Component`'s DOM element |
|
* |
|
* @return {Element} |
|
* The element that was created. |
|
*/ |
|
; |
|
|
|
_proto.createEl = function createEl$$1() { |
|
return _Slider.prototype.createEl.call(this, 'div', { |
|
className: 'vjs-progress-holder' |
|
}, { |
|
'aria-label': this.localize('Progress Bar') |
|
}); |
|
} |
|
/** |
|
* This function updates the play progress bar and accessibility |
|
* attributes to whatever is passed in. |
|
* |
|
* @param {number} currentTime |
|
* The currentTime value that should be used for accessibility |
|
* |
|
* @param {number} percent |
|
* The percentage as a decimal that the bar should be filled from 0-1. |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.update_ = function update_(currentTime, percent) { |
|
var liveTracker = this.player_.liveTracker; |
|
var duration = this.player_.duration(); |
|
|
|
if (liveTracker && liveTracker.isLive()) { |
|
duration = this.player_.liveTracker.liveCurrentTime(); |
|
} // machine readable value of progress bar (percentage complete) |
|
|
|
|
|
this.el_.setAttribute('aria-valuenow', (percent * 100).toFixed(2)); // human readable value of progress bar (time complete) |
|
|
|
this.el_.setAttribute('aria-valuetext', this.localize('progress bar timing: currentTime={1} duration={2}', [formatTime(currentTime, duration), formatTime(duration, duration)], '{1} of {2}')); // Update the `PlayProgressBar`. |
|
|
|
if (this.bar) { |
|
this.bar.update(getBoundingClientRect(this.el_), percent); |
|
} |
|
} |
|
/** |
|
* Update the seek bar's UI. |
|
* |
|
* @param {EventTarget~Event} [event] |
|
* The `timeupdate` or `ended` event that caused this to run. |
|
* |
|
* @listens Player#timeupdate |
|
* |
|
* @return {number} |
|
* The current percent at a number from 0-1 |
|
*/ |
|
; |
|
|
|
_proto.update = function update(event) { |
|
// if the offsetParent is null, then this element is hidden, in which case |
|
// we don't need to update it. |
|
if (this.el().offsetParent === null) { |
|
return; |
|
} |
|
|
|
var percent = _Slider.prototype.update.call(this); |
|
|
|
this.update_(this.getCurrentTime_(), percent); |
|
return percent; |
|
} |
|
/** |
|
* Get the value of current time but allows for smooth scrubbing, |
|
* when player can't keep up. |
|
* |
|
* @return {number} |
|
* The current time value to display |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.getCurrentTime_ = function getCurrentTime_() { |
|
return this.player_.scrubbing() ? this.player_.getCache().currentTime : this.player_.currentTime(); |
|
} |
|
/** |
|
* We want the seek bar to be full on ended |
|
* no matter what the actual internal values are. so we force it. |
|
* |
|
* @param {EventTarget~Event} [event] |
|
* The `timeupdate` or `ended` event that caused this to run. |
|
* |
|
* @listens Player#ended |
|
*/ |
|
; |
|
|
|
_proto.handleEnded = function handleEnded(event) { |
|
this.update_(this.player_.duration(), 1); |
|
} |
|
/** |
|
* Get the percentage of media played so far. |
|
* |
|
* @return {number} |
|
* The percentage of media played so far (0 to 1). |
|
*/ |
|
; |
|
|
|
_proto.getPercent = function getPercent() { |
|
var currentTime = this.getCurrentTime_(); |
|
var percent; |
|
var liveTracker = this.player_.liveTracker; |
|
|
|
if (liveTracker && liveTracker.isLive()) { |
|
percent = (currentTime - liveTracker.seekableStart()) / liveTracker.liveWindow(); // prevent the percent from changing at the live edge |
|
|
|
if (liveTracker.atLiveEdge()) { |
|
percent = 1; |
|
} |
|
} else { |
|
percent = currentTime / this.player_.duration(); |
|
} |
|
|
|
return percent >= 1 ? 1 : percent || 0; |
|
} |
|
/** |
|
* Handle mouse down on seek bar |
|
* |
|
* @param {EventTarget~Event} event |
|
* The `mousedown` event that caused this to run. |
|
* |
|
* @listens mousedown |
|
*/ |
|
; |
|
|
|
_proto.handleMouseDown = function handleMouseDown(event) { |
|
if (!isSingleLeftClick(event)) { |
|
return; |
|
} // Stop event propagation to prevent double fire in progress-control.js |
|
|
|
|
|
event.stopPropagation(); |
|
this.player_.scrubbing(true); |
|
this.videoWasPlaying = !this.player_.paused(); |
|
this.player_.pause(); |
|
|
|
_Slider.prototype.handleMouseDown.call(this, event); |
|
} |
|
/** |
|
* Handle mouse move on seek bar |
|
* |
|
* @param {EventTarget~Event} event |
|
* The `mousemove` event that caused this to run. |
|
* |
|
* @listens mousemove |
|
*/ |
|
; |
|
|
|
_proto.handleMouseMove = function handleMouseMove(event) { |
|
if (!isSingleLeftClick(event)) { |
|
return; |
|
} |
|
|
|
var newTime; |
|
var distance = this.calculateDistance(event); |
|
var liveTracker = this.player_.liveTracker; |
|
|
|
if (!liveTracker || !liveTracker.isLive()) { |
|
newTime = distance * this.player_.duration(); // Don't let video end while scrubbing. |
|
|
|
if (newTime === this.player_.duration()) { |
|
newTime = newTime - 0.1; |
|
} |
|
} else { |
|
var seekableStart = liveTracker.seekableStart(); |
|
var seekableEnd = liveTracker.liveCurrentTime(); |
|
newTime = seekableStart + distance * liveTracker.liveWindow(); // Don't let video end while scrubbing. |
|
|
|
if (newTime >= seekableEnd) { |
|
newTime = seekableEnd; |
|
} // Compensate for precision differences so that currentTime is not less |
|
// than seekable start |
|
|
|
|
|
if (newTime <= seekableStart) { |
|
newTime = seekableStart + 0.1; |
|
} // On android seekableEnd can be Infinity sometimes, |
|
// this will cause newTime to be Infinity, which is |
|
// not a valid currentTime. |
|
|
|
|
|
if (newTime === Infinity) { |
|
return; |
|
} |
|
} // Set new time (tell player to seek to new time) |
|
|
|
|
|
this.player_.currentTime(newTime); |
|
}; |
|
|
|
_proto.enable = function enable() { |
|
_Slider.prototype.enable.call(this); |
|
|
|
var mouseTimeDisplay = this.getChild('mouseTimeDisplay'); |
|
|
|
if (!mouseTimeDisplay) { |
|
return; |
|
} |
|
|
|
mouseTimeDisplay.show(); |
|
}; |
|
|
|
_proto.disable = function disable() { |
|
_Slider.prototype.disable.call(this); |
|
|
|
var mouseTimeDisplay = this.getChild('mouseTimeDisplay'); |
|
|
|
if (!mouseTimeDisplay) { |
|
return; |
|
} |
|
|
|
mouseTimeDisplay.hide(); |
|
} |
|
/** |
|
* Handle mouse up on seek bar |
|
* |
|
* @param {EventTarget~Event} event |
|
* The `mouseup` event that caused this to run. |
|
* |
|
* @listens mouseup |
|
*/ |
|
; |
|
|
|
_proto.handleMouseUp = function handleMouseUp(event) { |
|
_Slider.prototype.handleMouseUp.call(this, event); // Stop event propagation to prevent double fire in progress-control.js |
|
|
|
|
|
if (event) { |
|
event.stopPropagation(); |
|
} |
|
|
|
this.player_.scrubbing(false); |
|
/** |
|
* Trigger timeupdate because we're done seeking and the time has changed. |
|
* This is particularly useful for if the player is paused to time the time displays. |
|
* |
|
* @event Tech#timeupdate |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
this.player_.trigger({ |
|
type: 'timeupdate', |
|
target: this, |
|
manuallyTriggered: true |
|
}); |
|
|
|
if (this.videoWasPlaying) { |
|
silencePromise(this.player_.play()); |
|
} |
|
} |
|
/** |
|
* Move more quickly fast forward for keyboard-only users |
|
*/ |
|
; |
|
|
|
_proto.stepForward = function stepForward() { |
|
this.player_.currentTime(this.player_.currentTime() + STEP_SECONDS); |
|
} |
|
/** |
|
* Move more quickly rewind for keyboard-only users |
|
*/ |
|
; |
|
|
|
_proto.stepBack = function stepBack() { |
|
this.player_.currentTime(this.player_.currentTime() - STEP_SECONDS); |
|
} |
|
/** |
|
* Toggles the playback state of the player |
|
* This gets called when enter or space is used on the seekbar |
|
* |
|
* @param {EventTarget~Event} event |
|
* The `keydown` event that caused this function to be called |
|
* |
|
*/ |
|
; |
|
|
|
_proto.handleAction = function handleAction(event) { |
|
if (this.player_.paused()) { |
|
this.player_.play(); |
|
} else { |
|
this.player_.pause(); |
|
} |
|
} |
|
/** |
|
* Called when this SeekBar has focus and a key gets pressed down. |
|
* Supports the following keys: |
|
* |
|
* Space or Enter key fire a click event |
|
* Home key moves to start of the timeline |
|
* End key moves to end of the timeline |
|
* Digit "0" through "9" keys move to 0%, 10% ... 80%, 90% of the timeline |
|
* PageDown key moves back a larger step than ArrowDown |
|
* PageUp key moves forward a large step |
|
* |
|
* @param {EventTarget~Event} event |
|
* The `keydown` event that caused this function to be called. |
|
* |
|
* @listens keydown |
|
*/ |
|
; |
|
|
|
_proto.handleKeyDown = function handleKeyDown(event) { |
|
if (keycode.isEventKey(event, 'Space') || keycode.isEventKey(event, 'Enter')) { |
|
event.preventDefault(); |
|
event.stopPropagation(); |
|
this.handleAction(event); |
|
} else if (keycode.isEventKey(event, 'Home')) { |
|
event.preventDefault(); |
|
event.stopPropagation(); |
|
this.player_.currentTime(0); |
|
} else if (keycode.isEventKey(event, 'End')) { |
|
event.preventDefault(); |
|
event.stopPropagation(); |
|
this.player_.currentTime(this.player_.duration()); |
|
} else if (/^[0-9]$/.test(keycode(event))) { |
|
event.preventDefault(); |
|
event.stopPropagation(); |
|
var gotoFraction = (keycode.codes[keycode(event)] - keycode.codes['0']) * 10.0 / 100.0; |
|
this.player_.currentTime(this.player_.duration() * gotoFraction); |
|
} else if (keycode.isEventKey(event, 'PgDn')) { |
|
event.preventDefault(); |
|
event.stopPropagation(); |
|
this.player_.currentTime(this.player_.currentTime() - STEP_SECONDS * PAGE_KEY_MULTIPLIER); |
|
} else if (keycode.isEventKey(event, 'PgUp')) { |
|
event.preventDefault(); |
|
event.stopPropagation(); |
|
this.player_.currentTime(this.player_.currentTime() + STEP_SECONDS * PAGE_KEY_MULTIPLIER); |
|
} else { |
|
// Pass keydown handling up for unsupported keys |
|
_Slider.prototype.handleKeyDown.call(this, event); |
|
} |
|
}; |
|
|
|
return SeekBar; |
|
}(Slider); |
|
/** |
|
* Default options for the `SeekBar` |
|
* |
|
* @type {Object} |
|
* @private |
|
*/ |
|
|
|
|
|
SeekBar.prototype.options_ = { |
|
children: ['loadProgressBar', 'playProgressBar'], |
|
barName: 'playProgressBar' |
|
}; // MouseTimeDisplay tooltips should not be added to a player on mobile devices |
|
|
|
if (!IS_IOS && !IS_ANDROID) { |
|
SeekBar.prototype.options_.children.splice(1, 0, 'mouseTimeDisplay'); |
|
} |
|
|
|
Component.registerComponent('SeekBar', SeekBar); |
|
|
|
/** |
|
* The Progress Control component contains the seek bar, load progress, |
|
* and play progress. |
|
* |
|
* @extends Component |
|
*/ |
|
|
|
var ProgressControl = |
|
/*#__PURE__*/ |
|
function (_Component) { |
|
_inheritsLoose(ProgressControl, _Component); |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
*/ |
|
function ProgressControl(player, options) { |
|
var _this; |
|
|
|
_this = _Component.call(this, player, options) || this; |
|
_this.handleMouseMove = throttle(bind(_assertThisInitialized(_assertThisInitialized(_this)), _this.handleMouseMove), 25); |
|
_this.throttledHandleMouseSeek = throttle(bind(_assertThisInitialized(_assertThisInitialized(_this)), _this.handleMouseSeek), 25); |
|
|
|
_this.enable(); |
|
|
|
return _this; |
|
} |
|
/** |
|
* Create the `Component`'s DOM element |
|
* |
|
* @return {Element} |
|
* The element that was created. |
|
*/ |
|
|
|
|
|
var _proto = ProgressControl.prototype; |
|
|
|
_proto.createEl = function createEl$$1() { |
|
return _Component.prototype.createEl.call(this, 'div', { |
|
className: 'vjs-progress-control vjs-control' |
|
}); |
|
} |
|
/** |
|
* When the mouse moves over the `ProgressControl`, the pointer position |
|
* gets passed down to the `MouseTimeDisplay` component. |
|
* |
|
* @param {EventTarget~Event} event |
|
* The `mousemove` event that caused this function to run. |
|
* |
|
* @listen mousemove |
|
*/ |
|
; |
|
|
|
_proto.handleMouseMove = function handleMouseMove(event) { |
|
var seekBar = this.getChild('seekBar'); |
|
|
|
if (seekBar) { |
|
var mouseTimeDisplay = seekBar.getChild('mouseTimeDisplay'); |
|
var seekBarEl = seekBar.el(); |
|
var seekBarRect = getBoundingClientRect(seekBarEl); |
|
var seekBarPoint = getPointerPosition(seekBarEl, event).x; // The default skin has a gap on either side of the `SeekBar`. This means |
|
// that it's possible to trigger this behavior outside the boundaries of |
|
// the `SeekBar`. This ensures we stay within it at all times. |
|
|
|
if (seekBarPoint > 1) { |
|
seekBarPoint = 1; |
|
} else if (seekBarPoint < 0) { |
|
seekBarPoint = 0; |
|
} |
|
|
|
if (mouseTimeDisplay) { |
|
mouseTimeDisplay.update(seekBarRect, seekBarPoint); |
|
} |
|
} |
|
} |
|
/** |
|
* A throttled version of the {@link ProgressControl#handleMouseSeek} listener. |
|
* |
|
* @method ProgressControl#throttledHandleMouseSeek |
|
* @param {EventTarget~Event} event |
|
* The `mousemove` event that caused this function to run. |
|
* |
|
* @listen mousemove |
|
* @listen touchmove |
|
*/ |
|
|
|
/** |
|
* Handle `mousemove` or `touchmove` events on the `ProgressControl`. |
|
* |
|
* @param {EventTarget~Event} event |
|
* `mousedown` or `touchstart` event that triggered this function |
|
* |
|
* @listens mousemove |
|
* @listens touchmove |
|
*/ |
|
; |
|
|
|
_proto.handleMouseSeek = function handleMouseSeek(event) { |
|
var seekBar = this.getChild('seekBar'); |
|
|
|
if (seekBar) { |
|
seekBar.handleMouseMove(event); |
|
} |
|
} |
|
/** |
|
* Are controls are currently enabled for this progress control. |
|
* |
|
* @return {boolean} |
|
* true if controls are enabled, false otherwise |
|
*/ |
|
; |
|
|
|
_proto.enabled = function enabled() { |
|
return this.enabled_; |
|
} |
|
/** |
|
* Disable all controls on the progress control and its children |
|
*/ |
|
; |
|
|
|
_proto.disable = function disable() { |
|
this.children().forEach(function (child) { |
|
return child.disable && child.disable(); |
|
}); |
|
|
|
if (!this.enabled()) { |
|
return; |
|
} |
|
|
|
this.off(['mousedown', 'touchstart'], this.handleMouseDown); |
|
this.off(this.el_, 'mousemove', this.handleMouseMove); |
|
this.handleMouseUp(); |
|
this.addClass('disabled'); |
|
this.enabled_ = false; |
|
} |
|
/** |
|
* Enable all controls on the progress control and its children |
|
*/ |
|
; |
|
|
|
_proto.enable = function enable() { |
|
this.children().forEach(function (child) { |
|
return child.enable && child.enable(); |
|
}); |
|
|
|
if (this.enabled()) { |
|
return; |
|
} |
|
|
|
this.on(['mousedown', 'touchstart'], this.handleMouseDown); |
|
this.on(this.el_, 'mousemove', this.handleMouseMove); |
|
this.removeClass('disabled'); |
|
this.enabled_ = true; |
|
} |
|
/** |
|
* Handle `mousedown` or `touchstart` events on the `ProgressControl`. |
|
* |
|
* @param {EventTarget~Event} event |
|
* `mousedown` or `touchstart` event that triggered this function |
|
* |
|
* @listens mousedown |
|
* @listens touchstart |
|
*/ |
|
; |
|
|
|
_proto.handleMouseDown = function handleMouseDown(event) { |
|
var doc = this.el_.ownerDocument; |
|
var seekBar = this.getChild('seekBar'); |
|
|
|
if (seekBar) { |
|
seekBar.handleMouseDown(event); |
|
} |
|
|
|
this.on(doc, 'mousemove', this.throttledHandleMouseSeek); |
|
this.on(doc, 'touchmove', this.throttledHandleMouseSeek); |
|
this.on(doc, 'mouseup', this.handleMouseUp); |
|
this.on(doc, 'touchend', this.handleMouseUp); |
|
} |
|
/** |
|
* Handle `mouseup` or `touchend` events on the `ProgressControl`. |
|
* |
|
* @param {EventTarget~Event} event |
|
* `mouseup` or `touchend` event that triggered this function. |
|
* |
|
* @listens touchend |
|
* @listens mouseup |
|
*/ |
|
; |
|
|
|
_proto.handleMouseUp = function handleMouseUp(event) { |
|
var doc = this.el_.ownerDocument; |
|
var seekBar = this.getChild('seekBar'); |
|
|
|
if (seekBar) { |
|
seekBar.handleMouseUp(event); |
|
} |
|
|
|
this.off(doc, 'mousemove', this.throttledHandleMouseSeek); |
|
this.off(doc, 'touchmove', this.throttledHandleMouseSeek); |
|
this.off(doc, 'mouseup', this.handleMouseUp); |
|
this.off(doc, 'touchend', this.handleMouseUp); |
|
}; |
|
|
|
return ProgressControl; |
|
}(Component); |
|
/** |
|
* Default options for `ProgressControl` |
|
* |
|
* @type {Object} |
|
* @private |
|
*/ |
|
|
|
|
|
ProgressControl.prototype.options_ = { |
|
children: ['seekBar'] |
|
}; |
|
Component.registerComponent('ProgressControl', ProgressControl); |
|
|
|
/** |
|
* Toggle fullscreen video |
|
* |
|
* @extends Button |
|
*/ |
|
|
|
var FullscreenToggle = |
|
/*#__PURE__*/ |
|
function (_Button) { |
|
_inheritsLoose(FullscreenToggle, _Button); |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
*/ |
|
function FullscreenToggle(player, options) { |
|
var _this; |
|
|
|
_this = _Button.call(this, player, options) || this; |
|
|
|
_this.on(player, 'fullscreenchange', _this.handleFullscreenChange); |
|
|
|
if (document[FullscreenApi.fullscreenEnabled] === false) { |
|
_this.disable(); |
|
} |
|
|
|
return _this; |
|
} |
|
/** |
|
* Builds the default DOM `className`. |
|
* |
|
* @return {string} |
|
* The DOM `className` for this object. |
|
*/ |
|
|
|
|
|
var _proto = FullscreenToggle.prototype; |
|
|
|
_proto.buildCSSClass = function buildCSSClass() { |
|
return "vjs-fullscreen-control " + _Button.prototype.buildCSSClass.call(this); |
|
} |
|
/** |
|
* Handles fullscreenchange on the player and change control text accordingly. |
|
* |
|
* @param {EventTarget~Event} [event] |
|
* The {@link Player#fullscreenchange} event that caused this function to be |
|
* called. |
|
* |
|
* @listens Player#fullscreenchange |
|
*/ |
|
; |
|
|
|
_proto.handleFullscreenChange = function handleFullscreenChange(event) { |
|
if (this.player_.isFullscreen()) { |
|
this.controlText('Non-Fullscreen'); |
|
} else { |
|
this.controlText('Fullscreen'); |
|
} |
|
} |
|
/** |
|
* This gets called when an `FullscreenToggle` is "clicked". See |
|
* {@link ClickableComponent} for more detailed information on what a click can be. |
|
* |
|
* @param {EventTarget~Event} [event] |
|
* The `keydown`, `tap`, or `click` event that caused this function to be |
|
* called. |
|
* |
|
* @listens tap |
|
* @listens click |
|
*/ |
|
; |
|
|
|
_proto.handleClick = function handleClick(event) { |
|
if (!this.player_.isFullscreen()) { |
|
this.player_.requestFullscreen(); |
|
} else { |
|
this.player_.exitFullscreen(); |
|
} |
|
}; |
|
|
|
return FullscreenToggle; |
|
}(Button); |
|
/** |
|
* The text that should display over the `FullscreenToggle`s controls. Added for localization. |
|
* |
|
* @type {string} |
|
* @private |
|
*/ |
|
|
|
|
|
FullscreenToggle.prototype.controlText_ = 'Fullscreen'; |
|
Component.registerComponent('FullscreenToggle', FullscreenToggle); |
|
|
|
/** |
|
* Check if volume control is supported and if it isn't hide the |
|
* `Component` that was passed using the `vjs-hidden` class. |
|
* |
|
* @param {Component} self |
|
* The component that should be hidden if volume is unsupported |
|
* |
|
* @param {Player} player |
|
* A reference to the player |
|
* |
|
* @private |
|
*/ |
|
var checkVolumeSupport = function checkVolumeSupport(self, player) { |
|
// hide volume controls when they're not supported by the current tech |
|
if (player.tech_ && !player.tech_.featuresVolumeControl) { |
|
self.addClass('vjs-hidden'); |
|
} |
|
|
|
self.on(player, 'loadstart', function () { |
|
if (!player.tech_.featuresVolumeControl) { |
|
self.addClass('vjs-hidden'); |
|
} else { |
|
self.removeClass('vjs-hidden'); |
|
} |
|
}); |
|
}; |
|
|
|
/** |
|
* Shows volume level |
|
* |
|
* @extends Component |
|
*/ |
|
|
|
var VolumeLevel = |
|
/*#__PURE__*/ |
|
function (_Component) { |
|
_inheritsLoose(VolumeLevel, _Component); |
|
|
|
function VolumeLevel() { |
|
return _Component.apply(this, arguments) || this; |
|
} |
|
|
|
var _proto = VolumeLevel.prototype; |
|
|
|
/** |
|
* Create the `Component`'s DOM element |
|
* |
|
* @return {Element} |
|
* The element that was created. |
|
*/ |
|
_proto.createEl = function createEl() { |
|
return _Component.prototype.createEl.call(this, 'div', { |
|
className: 'vjs-volume-level', |
|
innerHTML: '<span class="vjs-control-text"></span>' |
|
}); |
|
}; |
|
|
|
return VolumeLevel; |
|
}(Component); |
|
|
|
Component.registerComponent('VolumeLevel', VolumeLevel); |
|
|
|
/** |
|
* The bar that contains the volume level and can be clicked on to adjust the level |
|
* |
|
* @extends Slider |
|
*/ |
|
|
|
var VolumeBar = |
|
/*#__PURE__*/ |
|
function (_Slider) { |
|
_inheritsLoose(VolumeBar, _Slider); |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
*/ |
|
function VolumeBar(player, options) { |
|
var _this; |
|
|
|
_this = _Slider.call(this, player, options) || this; |
|
|
|
_this.on('slideractive', _this.updateLastVolume_); |
|
|
|
_this.on(player, 'volumechange', _this.updateARIAAttributes); |
|
|
|
player.ready(function () { |
|
return _this.updateARIAAttributes(); |
|
}); |
|
return _this; |
|
} |
|
/** |
|
* Create the `Component`'s DOM element |
|
* |
|
* @return {Element} |
|
* The element that was created. |
|
*/ |
|
|
|
|
|
var _proto = VolumeBar.prototype; |
|
|
|
_proto.createEl = function createEl$$1() { |
|
return _Slider.prototype.createEl.call(this, 'div', { |
|
className: 'vjs-volume-bar vjs-slider-bar' |
|
}, { |
|
'aria-label': this.localize('Volume Level'), |
|
'aria-live': 'polite' |
|
}); |
|
} |
|
/** |
|
* Handle mouse down on volume bar |
|
* |
|
* @param {EventTarget~Event} event |
|
* The `mousedown` event that caused this to run. |
|
* |
|
* @listens mousedown |
|
*/ |
|
; |
|
|
|
_proto.handleMouseDown = function handleMouseDown(event) { |
|
if (!isSingleLeftClick(event)) { |
|
return; |
|
} |
|
|
|
_Slider.prototype.handleMouseDown.call(this, event); |
|
} |
|
/** |
|
* Handle movement events on the {@link VolumeMenuButton}. |
|
* |
|
* @param {EventTarget~Event} event |
|
* The event that caused this function to run. |
|
* |
|
* @listens mousemove |
|
*/ |
|
; |
|
|
|
_proto.handleMouseMove = function handleMouseMove(event) { |
|
if (!isSingleLeftClick(event)) { |
|
return; |
|
} |
|
|
|
this.checkMuted(); |
|
this.player_.volume(this.calculateDistance(event)); |
|
} |
|
/** |
|
* If the player is muted unmute it. |
|
*/ |
|
; |
|
|
|
_proto.checkMuted = function checkMuted() { |
|
if (this.player_.muted()) { |
|
this.player_.muted(false); |
|
} |
|
} |
|
/** |
|
* Get percent of volume level |
|
* |
|
* @return {number} |
|
* Volume level percent as a decimal number. |
|
*/ |
|
; |
|
|
|
_proto.getPercent = function getPercent() { |
|
if (this.player_.muted()) { |
|
return 0; |
|
} |
|
|
|
return this.player_.volume(); |
|
} |
|
/** |
|
* Increase volume level for keyboard users |
|
*/ |
|
; |
|
|
|
_proto.stepForward = function stepForward() { |
|
this.checkMuted(); |
|
this.player_.volume(this.player_.volume() + 0.1); |
|
} |
|
/** |
|
* Decrease volume level for keyboard users |
|
*/ |
|
; |
|
|
|
_proto.stepBack = function stepBack() { |
|
this.checkMuted(); |
|
this.player_.volume(this.player_.volume() - 0.1); |
|
} |
|
/** |
|
* Update ARIA accessibility attributes |
|
* |
|
* @param {EventTarget~Event} [event] |
|
* The `volumechange` event that caused this function to run. |
|
* |
|
* @listens Player#volumechange |
|
*/ |
|
; |
|
|
|
_proto.updateARIAAttributes = function updateARIAAttributes(event) { |
|
var ariaValue = this.player_.muted() ? 0 : this.volumeAsPercentage_(); |
|
this.el_.setAttribute('aria-valuenow', ariaValue); |
|
this.el_.setAttribute('aria-valuetext', ariaValue + '%'); |
|
} |
|
/** |
|
* Returns the current value of the player volume as a percentage |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.volumeAsPercentage_ = function volumeAsPercentage_() { |
|
return Math.round(this.player_.volume() * 100); |
|
} |
|
/** |
|
* When user starts dragging the VolumeBar, store the volume and listen for |
|
* the end of the drag. When the drag ends, if the volume was set to zero, |
|
* set lastVolume to the stored volume. |
|
* |
|
* @listens slideractive |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.updateLastVolume_ = function updateLastVolume_() { |
|
var _this2 = this; |
|
|
|
var volumeBeforeDrag = this.player_.volume(); |
|
this.one('sliderinactive', function () { |
|
if (_this2.player_.volume() === 0) { |
|
_this2.player_.lastVolume_(volumeBeforeDrag); |
|
} |
|
}); |
|
}; |
|
|
|
return VolumeBar; |
|
}(Slider); |
|
/** |
|
* Default options for the `VolumeBar` |
|
* |
|
* @type {Object} |
|
* @private |
|
*/ |
|
|
|
|
|
VolumeBar.prototype.options_ = { |
|
children: ['volumeLevel'], |
|
barName: 'volumeLevel' |
|
}; |
|
/** |
|
* Call the update event for this Slider when this event happens on the player. |
|
* |
|
* @type {string} |
|
*/ |
|
|
|
VolumeBar.prototype.playerEvent = 'volumechange'; |
|
Component.registerComponent('VolumeBar', VolumeBar); |
|
|
|
/** |
|
* The component for controlling the volume level |
|
* |
|
* @extends Component |
|
*/ |
|
|
|
var VolumeControl = |
|
/*#__PURE__*/ |
|
function (_Component) { |
|
_inheritsLoose(VolumeControl, _Component); |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options={}] |
|
* The key/value store of player options. |
|
*/ |
|
function VolumeControl(player, options) { |
|
var _this; |
|
|
|
if (options === void 0) { |
|
options = {}; |
|
} |
|
|
|
options.vertical = options.vertical || false; // Pass the vertical option down to the VolumeBar if |
|
// the VolumeBar is turned on. |
|
|
|
if (typeof options.volumeBar === 'undefined' || isPlain(options.volumeBar)) { |
|
options.volumeBar = options.volumeBar || {}; |
|
options.volumeBar.vertical = options.vertical; |
|
} |
|
|
|
_this = _Component.call(this, player, options) || this; // hide this control if volume support is missing |
|
|
|
checkVolumeSupport(_assertThisInitialized(_assertThisInitialized(_this)), player); |
|
_this.throttledHandleMouseMove = throttle(bind(_assertThisInitialized(_assertThisInitialized(_this)), _this.handleMouseMove), 25); |
|
|
|
_this.on('mousedown', _this.handleMouseDown); |
|
|
|
_this.on('touchstart', _this.handleMouseDown); // while the slider is active (the mouse has been pressed down and |
|
// is dragging) or in focus we do not want to hide the VolumeBar |
|
|
|
|
|
_this.on(_this.volumeBar, ['focus', 'slideractive'], function () { |
|
_this.volumeBar.addClass('vjs-slider-active'); |
|
|
|
_this.addClass('vjs-slider-active'); |
|
|
|
_this.trigger('slideractive'); |
|
}); |
|
|
|
_this.on(_this.volumeBar, ['blur', 'sliderinactive'], function () { |
|
_this.volumeBar.removeClass('vjs-slider-active'); |
|
|
|
_this.removeClass('vjs-slider-active'); |
|
|
|
_this.trigger('sliderinactive'); |
|
}); |
|
|
|
return _this; |
|
} |
|
/** |
|
* Create the `Component`'s DOM element |
|
* |
|
* @return {Element} |
|
* The element that was created. |
|
*/ |
|
|
|
|
|
var _proto = VolumeControl.prototype; |
|
|
|
_proto.createEl = function createEl() { |
|
var orientationClass = 'vjs-volume-horizontal'; |
|
|
|
if (this.options_.vertical) { |
|
orientationClass = 'vjs-volume-vertical'; |
|
} |
|
|
|
return _Component.prototype.createEl.call(this, 'div', { |
|
className: "vjs-volume-control vjs-control " + orientationClass |
|
}); |
|
} |
|
/** |
|
* Handle `mousedown` or `touchstart` events on the `VolumeControl`. |
|
* |
|
* @param {EventTarget~Event} event |
|
* `mousedown` or `touchstart` event that triggered this function |
|
* |
|
* @listens mousedown |
|
* @listens touchstart |
|
*/ |
|
; |
|
|
|
_proto.handleMouseDown = function handleMouseDown(event) { |
|
var doc = this.el_.ownerDocument; |
|
this.on(doc, 'mousemove', this.throttledHandleMouseMove); |
|
this.on(doc, 'touchmove', this.throttledHandleMouseMove); |
|
this.on(doc, 'mouseup', this.handleMouseUp); |
|
this.on(doc, 'touchend', this.handleMouseUp); |
|
} |
|
/** |
|
* Handle `mouseup` or `touchend` events on the `VolumeControl`. |
|
* |
|
* @param {EventTarget~Event} event |
|
* `mouseup` or `touchend` event that triggered this function. |
|
* |
|
* @listens touchend |
|
* @listens mouseup |
|
*/ |
|
; |
|
|
|
_proto.handleMouseUp = function handleMouseUp(event) { |
|
var doc = this.el_.ownerDocument; |
|
this.off(doc, 'mousemove', this.throttledHandleMouseMove); |
|
this.off(doc, 'touchmove', this.throttledHandleMouseMove); |
|
this.off(doc, 'mouseup', this.handleMouseUp); |
|
this.off(doc, 'touchend', this.handleMouseUp); |
|
} |
|
/** |
|
* Handle `mousedown` or `touchstart` events on the `VolumeControl`. |
|
* |
|
* @param {EventTarget~Event} event |
|
* `mousedown` or `touchstart` event that triggered this function |
|
* |
|
* @listens mousedown |
|
* @listens touchstart |
|
*/ |
|
; |
|
|
|
_proto.handleMouseMove = function handleMouseMove(event) { |
|
this.volumeBar.handleMouseMove(event); |
|
}; |
|
|
|
return VolumeControl; |
|
}(Component); |
|
/** |
|
* Default options for the `VolumeControl` |
|
* |
|
* @type {Object} |
|
* @private |
|
*/ |
|
|
|
|
|
VolumeControl.prototype.options_ = { |
|
children: ['volumeBar'] |
|
}; |
|
Component.registerComponent('VolumeControl', VolumeControl); |
|
|
|
/** |
|
* Check if muting volume is supported and if it isn't hide the mute toggle |
|
* button. |
|
* |
|
* @param {Component} self |
|
* A reference to the mute toggle button |
|
* |
|
* @param {Player} player |
|
* A reference to the player |
|
* |
|
* @private |
|
*/ |
|
var checkMuteSupport = function checkMuteSupport(self, player) { |
|
// hide mute toggle button if it's not supported by the current tech |
|
if (player.tech_ && !player.tech_.featuresMuteControl) { |
|
self.addClass('vjs-hidden'); |
|
} |
|
|
|
self.on(player, 'loadstart', function () { |
|
if (!player.tech_.featuresMuteControl) { |
|
self.addClass('vjs-hidden'); |
|
} else { |
|
self.removeClass('vjs-hidden'); |
|
} |
|
}); |
|
}; |
|
|
|
/** |
|
* A button component for muting the audio. |
|
* |
|
* @extends Button |
|
*/ |
|
|
|
var MuteToggle = |
|
/*#__PURE__*/ |
|
function (_Button) { |
|
_inheritsLoose(MuteToggle, _Button); |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
*/ |
|
function MuteToggle(player, options) { |
|
var _this; |
|
|
|
_this = _Button.call(this, player, options) || this; // hide this control if volume support is missing |
|
|
|
checkMuteSupport(_assertThisInitialized(_assertThisInitialized(_this)), player); |
|
|
|
_this.on(player, ['loadstart', 'volumechange'], _this.update); |
|
|
|
return _this; |
|
} |
|
/** |
|
* Builds the default DOM `className`. |
|
* |
|
* @return {string} |
|
* The DOM `className` for this object. |
|
*/ |
|
|
|
|
|
var _proto = MuteToggle.prototype; |
|
|
|
_proto.buildCSSClass = function buildCSSClass() { |
|
return "vjs-mute-control " + _Button.prototype.buildCSSClass.call(this); |
|
} |
|
/** |
|
* This gets called when an `MuteToggle` is "clicked". See |
|
* {@link ClickableComponent} for more detailed information on what a click can be. |
|
* |
|
* @param {EventTarget~Event} [event] |
|
* The `keydown`, `tap`, or `click` event that caused this function to be |
|
* called. |
|
* |
|
* @listens tap |
|
* @listens click |
|
*/ |
|
; |
|
|
|
_proto.handleClick = function handleClick(event) { |
|
var vol = this.player_.volume(); |
|
var lastVolume = this.player_.lastVolume_(); |
|
|
|
if (vol === 0) { |
|
var volumeToSet = lastVolume < 0.1 ? 0.1 : lastVolume; |
|
this.player_.volume(volumeToSet); |
|
this.player_.muted(false); |
|
} else { |
|
this.player_.muted(this.player_.muted() ? false : true); |
|
} |
|
} |
|
/** |
|
* Update the `MuteToggle` button based on the state of `volume` and `muted` |
|
* on the player. |
|
* |
|
* @param {EventTarget~Event} [event] |
|
* The {@link Player#loadstart} event if this function was called |
|
* through an event. |
|
* |
|
* @listens Player#loadstart |
|
* @listens Player#volumechange |
|
*/ |
|
; |
|
|
|
_proto.update = function update(event) { |
|
this.updateIcon_(); |
|
this.updateControlText_(); |
|
} |
|
/** |
|
* Update the appearance of the `MuteToggle` icon. |
|
* |
|
* Possible states (given `level` variable below): |
|
* - 0: crossed out |
|
* - 1: zero bars of volume |
|
* - 2: one bar of volume |
|
* - 3: two bars of volume |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.updateIcon_ = function updateIcon_() { |
|
var vol = this.player_.volume(); |
|
var level = 3; // in iOS when a player is loaded with muted attribute |
|
// and volume is changed with a native mute button |
|
// we want to make sure muted state is updated |
|
|
|
if (IS_IOS && this.player_.tech_ && this.player_.tech_.el_) { |
|
this.player_.muted(this.player_.tech_.el_.muted); |
|
} |
|
|
|
if (vol === 0 || this.player_.muted()) { |
|
level = 0; |
|
} else if (vol < 0.33) { |
|
level = 1; |
|
} else if (vol < 0.67) { |
|
level = 2; |
|
} // TODO improve muted icon classes |
|
|
|
|
|
for (var i = 0; i < 4; i++) { |
|
removeClass(this.el_, "vjs-vol-" + i); |
|
} |
|
|
|
addClass(this.el_, "vjs-vol-" + level); |
|
} |
|
/** |
|
* If `muted` has changed on the player, update the control text |
|
* (`title` attribute on `vjs-mute-control` element and content of |
|
* `vjs-control-text` element). |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.updateControlText_ = function updateControlText_() { |
|
var soundOff = this.player_.muted() || this.player_.volume() === 0; |
|
var text = soundOff ? 'Unmute' : 'Mute'; |
|
|
|
if (this.controlText() !== text) { |
|
this.controlText(text); |
|
} |
|
}; |
|
|
|
return MuteToggle; |
|
}(Button); |
|
/** |
|
* The text that should display over the `MuteToggle`s controls. Added for localization. |
|
* |
|
* @type {string} |
|
* @private |
|
*/ |
|
|
|
|
|
MuteToggle.prototype.controlText_ = 'Mute'; |
|
Component.registerComponent('MuteToggle', MuteToggle); |
|
|
|
/** |
|
* A Component to contain the MuteToggle and VolumeControl so that |
|
* they can work together. |
|
* |
|
* @extends Component |
|
*/ |
|
|
|
var VolumePanel = |
|
/*#__PURE__*/ |
|
function (_Component) { |
|
_inheritsLoose(VolumePanel, _Component); |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options={}] |
|
* The key/value store of player options. |
|
*/ |
|
function VolumePanel(player, options) { |
|
var _this; |
|
|
|
if (options === void 0) { |
|
options = {}; |
|
} |
|
|
|
if (typeof options.inline !== 'undefined') { |
|
options.inline = options.inline; |
|
} else { |
|
options.inline = true; |
|
} // pass the inline option down to the VolumeControl as vertical if |
|
// the VolumeControl is on. |
|
|
|
|
|
if (typeof options.volumeControl === 'undefined' || isPlain(options.volumeControl)) { |
|
options.volumeControl = options.volumeControl || {}; |
|
options.volumeControl.vertical = !options.inline; |
|
} |
|
|
|
_this = _Component.call(this, player, options) || this; |
|
|
|
_this.on(player, ['loadstart'], _this.volumePanelState_); // while the slider is active (the mouse has been pressed down and |
|
// is dragging) we do not want to hide the VolumeBar |
|
|
|
|
|
_this.on(_this.volumeControl, ['slideractive'], _this.sliderActive_); |
|
|
|
_this.on(_this.volumeControl, ['sliderinactive'], _this.sliderInactive_); |
|
|
|
return _this; |
|
} |
|
/** |
|
* Add vjs-slider-active class to the VolumePanel |
|
* |
|
* @listens VolumeControl#slideractive |
|
* @private |
|
*/ |
|
|
|
|
|
var _proto = VolumePanel.prototype; |
|
|
|
_proto.sliderActive_ = function sliderActive_() { |
|
this.addClass('vjs-slider-active'); |
|
} |
|
/** |
|
* Removes vjs-slider-active class to the VolumePanel |
|
* |
|
* @listens VolumeControl#sliderinactive |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.sliderInactive_ = function sliderInactive_() { |
|
this.removeClass('vjs-slider-active'); |
|
} |
|
/** |
|
* Adds vjs-hidden or vjs-mute-toggle-only to the VolumePanel |
|
* depending on MuteToggle and VolumeControl state |
|
* |
|
* @listens Player#loadstart |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.volumePanelState_ = function volumePanelState_() { |
|
// hide volume panel if neither volume control or mute toggle |
|
// are displayed |
|
if (this.volumeControl.hasClass('vjs-hidden') && this.muteToggle.hasClass('vjs-hidden')) { |
|
this.addClass('vjs-hidden'); |
|
} // if only mute toggle is visible we don't want |
|
// volume panel expanding when hovered or active |
|
|
|
|
|
if (this.volumeControl.hasClass('vjs-hidden') && !this.muteToggle.hasClass('vjs-hidden')) { |
|
this.addClass('vjs-mute-toggle-only'); |
|
} |
|
} |
|
/** |
|
* Create the `Component`'s DOM element |
|
* |
|
* @return {Element} |
|
* The element that was created. |
|
*/ |
|
; |
|
|
|
_proto.createEl = function createEl() { |
|
var orientationClass = 'vjs-volume-panel-horizontal'; |
|
|
|
if (!this.options_.inline) { |
|
orientationClass = 'vjs-volume-panel-vertical'; |
|
} |
|
|
|
return _Component.prototype.createEl.call(this, 'div', { |
|
className: "vjs-volume-panel vjs-control " + orientationClass |
|
}); |
|
}; |
|
|
|
return VolumePanel; |
|
}(Component); |
|
/** |
|
* Default options for the `VolumeControl` |
|
* |
|
* @type {Object} |
|
* @private |
|
*/ |
|
|
|
|
|
VolumePanel.prototype.options_ = { |
|
children: ['muteToggle', 'volumeControl'] |
|
}; |
|
Component.registerComponent('VolumePanel', VolumePanel); |
|
|
|
/** |
|
* The Menu component is used to build popup menus, including subtitle and |
|
* captions selection menus. |
|
* |
|
* @extends Component |
|
*/ |
|
|
|
var Menu = |
|
/*#__PURE__*/ |
|
function (_Component) { |
|
_inheritsLoose(Menu, _Component); |
|
|
|
/** |
|
* Create an instance of this class. |
|
* |
|
* @param {Player} player |
|
* the player that this component should attach to |
|
* |
|
* @param {Object} [options] |
|
* Object of option names and values |
|
* |
|
*/ |
|
function Menu(player, options) { |
|
var _this; |
|
|
|
_this = _Component.call(this, player, options) || this; |
|
|
|
if (options) { |
|
_this.menuButton_ = options.menuButton; |
|
} |
|
|
|
_this.focusedChild_ = -1; |
|
|
|
_this.on('keydown', _this.handleKeyDown); // All the menu item instances share the same blur handler provided by the menu container. |
|
|
|
|
|
_this.boundHandleBlur_ = bind(_assertThisInitialized(_assertThisInitialized(_this)), _this.handleBlur); |
|
_this.boundHandleTapClick_ = bind(_assertThisInitialized(_assertThisInitialized(_this)), _this.handleTapClick); |
|
return _this; |
|
} |
|
/** |
|
* Add event listeners to the {@link MenuItem}. |
|
* |
|
* @param {Object} component |
|
* The instance of the `MenuItem` to add listeners to. |
|
* |
|
*/ |
|
|
|
|
|
var _proto = Menu.prototype; |
|
|
|
_proto.addEventListenerForItem = function addEventListenerForItem(component) { |
|
if (!(component instanceof Component)) { |
|
return; |
|
} |
|
|
|
this.on(component, 'blur', this.boundHandleBlur_); |
|
this.on(component, ['tap', 'click'], this.boundHandleTapClick_); |
|
} |
|
/** |
|
* Remove event listeners from the {@link MenuItem}. |
|
* |
|
* @param {Object} component |
|
* The instance of the `MenuItem` to remove listeners. |
|
* |
|
*/ |
|
; |
|
|
|
_proto.removeEventListenerForItem = function removeEventListenerForItem(component) { |
|
if (!(component instanceof Component)) { |
|
return; |
|
} |
|
|
|
this.off(component, 'blur', this.boundHandleBlur_); |
|
this.off(component, ['tap', 'click'], this.boundHandleTapClick_); |
|
} |
|
/** |
|
* This method will be called indirectly when the component has been added |
|
* before the component adds to the new menu instance by `addItem`. |
|
* In this case, the original menu instance will remove the component |
|
* by calling `removeChild`. |
|
* |
|
* @param {Object} component |
|
* The instance of the `MenuItem` |
|
*/ |
|
; |
|
|
|
_proto.removeChild = function removeChild(component) { |
|
if (typeof component === 'string') { |
|
component = this.getChild(component); |
|
} |
|
|
|
this.removeEventListenerForItem(component); |
|
|
|
_Component.prototype.removeChild.call(this, component); |
|
} |
|
/** |
|
* Add a {@link MenuItem} to the menu. |
|
* |
|
* @param {Object|string} component |
|
* The name or instance of the `MenuItem` to add. |
|
* |
|
*/ |
|
; |
|
|
|
_proto.addItem = function addItem(component) { |
|
var childComponent = this.addChild(component); |
|
|
|
if (childComponent) { |
|
this.addEventListenerForItem(childComponent); |
|
} |
|
} |
|
/** |
|
* Create the `Menu`s DOM element. |
|
* |
|
* @return {Element} |
|
* the element that was created |
|
*/ |
|
; |
|
|
|
_proto.createEl = function createEl$$1() { |
|
var contentElType = this.options_.contentElType || 'ul'; |
|
this.contentEl_ = createEl(contentElType, { |
|
className: 'vjs-menu-content' |
|
}); |
|
this.contentEl_.setAttribute('role', 'menu'); |
|
|
|
var el = _Component.prototype.createEl.call(this, 'div', { |
|
append: this.contentEl_, |
|
className: 'vjs-menu' |
|
}); |
|
|
|
el.appendChild(this.contentEl_); // Prevent clicks from bubbling up. Needed for Menu Buttons, |
|
// where a click on the parent is significant |
|
|
|
on(el, 'click', function (event) { |
|
event.preventDefault(); |
|
event.stopImmediatePropagation(); |
|
}); |
|
return el; |
|
}; |
|
|
|
_proto.dispose = function dispose() { |
|
this.contentEl_ = null; |
|
this.boundHandleBlur_ = null; |
|
this.boundHandleTapClick_ = null; |
|
|
|
_Component.prototype.dispose.call(this); |
|
} |
|
/** |
|
* Called when a `MenuItem` loses focus. |
|
* |
|
* @param {EventTarget~Event} event |
|
* The `blur` event that caused this function to be called. |
|
* |
|
* @listens blur |
|
*/ |
|
; |
|
|
|
_proto.handleBlur = function handleBlur(event) { |
|
var relatedTarget = event.relatedTarget || document.activeElement; // Close menu popup when a user clicks outside the menu |
|
|
|
if (!this.children().some(function (element) { |
|
return element.el() === relatedTarget; |
|
})) { |
|
var btn = this.menuButton_; |
|
|
|
if (btn && btn.buttonPressed_ && relatedTarget !== btn.el().firstChild) { |
|
btn.unpressButton(); |
|
} |
|
} |
|
} |
|
/** |
|
* Called when a `MenuItem` gets clicked or tapped. |
|
* |
|
* @param {EventTarget~Event} event |
|
* The `click` or `tap` event that caused this function to be called. |
|
* |
|
* @listens click,tap |
|
*/ |
|
; |
|
|
|
_proto.handleTapClick = function handleTapClick(event) { |
|
// Unpress the associated MenuButton, and move focus back to it |
|
if (this.menuButton_) { |
|
this.menuButton_.unpressButton(); |
|
var childComponents = this.children(); |
|
|
|
if (!Array.isArray(childComponents)) { |
|
return; |
|
} |
|
|
|
var foundComponent = childComponents.filter(function (component) { |
|
return component.el() === event.target; |
|
})[0]; |
|
|
|
if (!foundComponent) { |
|
return; |
|
} // don't focus menu button if item is a caption settings item |
|
// because focus will move elsewhere |
|
|
|
|
|
if (foundComponent.name() !== 'CaptionSettingsMenuItem') { |
|
this.menuButton_.focus(); |
|
} |
|
} |
|
} |
|
/** |
|
* Handle a `keydown` event on this menu. This listener is added in the constructor. |
|
* |
|
* @param {EventTarget~Event} event |
|
* A `keydown` event that happened on the menu. |
|
* |
|
* @listens keydown |
|
*/ |
|
; |
|
|
|
_proto.handleKeyDown = function handleKeyDown(event) { |
|
// Left and Down Arrows |
|
if (keycode.isEventKey(event, 'Left') || keycode.isEventKey(event, 'Down')) { |
|
event.preventDefault(); |
|
event.stopPropagation(); |
|
this.stepForward(); // Up and Right Arrows |
|
} else if (keycode.isEventKey(event, 'Right') || keycode.isEventKey(event, 'Up')) { |
|
event.preventDefault(); |
|
event.stopPropagation(); |
|
this.stepBack(); |
|
} |
|
} |
|
/** |
|
* Move to next (lower) menu item for keyboard users. |
|
*/ |
|
; |
|
|
|
_proto.stepForward = function stepForward() { |
|
var stepChild = 0; |
|
|
|
if (this.focusedChild_ !== undefined) { |
|
stepChild = this.focusedChild_ + 1; |
|
} |
|
|
|
this.focus(stepChild); |
|
} |
|
/** |
|
* Move to previous (higher) menu item for keyboard users. |
|
*/ |
|
; |
|
|
|
_proto.stepBack = function stepBack() { |
|
var stepChild = 0; |
|
|
|
if (this.focusedChild_ !== undefined) { |
|
stepChild = this.focusedChild_ - 1; |
|
} |
|
|
|
this.focus(stepChild); |
|
} |
|
/** |
|
* Set focus on a {@link MenuItem} in the `Menu`. |
|
* |
|
* @param {Object|string} [item=0] |
|
* Index of child item set focus on. |
|
*/ |
|
; |
|
|
|
_proto.focus = function focus(item) { |
|
if (item === void 0) { |
|
item = 0; |
|
} |
|
|
|
var children = this.children().slice(); |
|
var haveTitle = children.length && children[0].className && /vjs-menu-title/.test(children[0].className); |
|
|
|
if (haveTitle) { |
|
children.shift(); |
|
} |
|
|
|
if (children.length > 0) { |
|
if (item < 0) { |
|
item = 0; |
|
} else if (item >= children.length) { |
|
item = children.length - 1; |
|
} |
|
|
|
this.focusedChild_ = item; |
|
children[item].el_.focus(); |
|
} |
|
}; |
|
|
|
return Menu; |
|
}(Component); |
|
|
|
Component.registerComponent('Menu', Menu); |
|
|
|
/** |
|
* A `MenuButton` class for any popup {@link Menu}. |
|
* |
|
* @extends Component |
|
*/ |
|
|
|
var MenuButton = |
|
/*#__PURE__*/ |
|
function (_Component) { |
|
_inheritsLoose(MenuButton, _Component); |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options={}] |
|
* The key/value store of player options. |
|
*/ |
|
function MenuButton(player, options) { |
|
var _this; |
|
|
|
if (options === void 0) { |
|
options = {}; |
|
} |
|
|
|
_this = _Component.call(this, player, options) || this; |
|
_this.menuButton_ = new Button(player, options); |
|
|
|
_this.menuButton_.controlText(_this.controlText_); |
|
|
|
_this.menuButton_.el_.setAttribute('aria-haspopup', 'true'); // Add buildCSSClass values to the button, not the wrapper |
|
|
|
|
|
var buttonClass = Button.prototype.buildCSSClass(); |
|
_this.menuButton_.el_.className = _this.buildCSSClass() + ' ' + buttonClass; |
|
|
|
_this.menuButton_.removeClass('vjs-control'); |
|
|
|
_this.addChild(_this.menuButton_); |
|
|
|
_this.update(); |
|
|
|
_this.enabled_ = true; |
|
|
|
_this.on(_this.menuButton_, 'tap', _this.handleClick); |
|
|
|
_this.on(_this.menuButton_, 'click', _this.handleClick); |
|
|
|
_this.on(_this.menuButton_, 'keydown', _this.handleKeyDown); |
|
|
|
_this.on(_this.menuButton_, 'mouseenter', function () { |
|
_this.menu.show(); |
|
}); |
|
|
|
_this.on('keydown', _this.handleSubmenuKeyDown); |
|
|
|
return _this; |
|
} |
|
/** |
|
* Update the menu based on the current state of its items. |
|
*/ |
|
|
|
|
|
var _proto = MenuButton.prototype; |
|
|
|
_proto.update = function update() { |
|
var menu = this.createMenu(); |
|
|
|
if (this.menu) { |
|
this.menu.dispose(); |
|
this.removeChild(this.menu); |
|
} |
|
|
|
this.menu = menu; |
|
this.addChild(menu); |
|
/** |
|
* Track the state of the menu button |
|
* |
|
* @type {Boolean} |
|
* @private |
|
*/ |
|
|
|
this.buttonPressed_ = false; |
|
this.menuButton_.el_.setAttribute('aria-expanded', 'false'); |
|
|
|
if (this.items && this.items.length <= this.hideThreshold_) { |
|
this.hide(); |
|
} else { |
|
this.show(); |
|
} |
|
} |
|
/** |
|
* Create the menu and add all items to it. |
|
* |
|
* @return {Menu} |
|
* The constructed menu |
|
*/ |
|
; |
|
|
|
_proto.createMenu = function createMenu() { |
|
var menu = new Menu(this.player_, { |
|
menuButton: this |
|
}); |
|
/** |
|
* Hide the menu if the number of items is less than or equal to this threshold. This defaults |
|
* to 0 and whenever we add items which can be hidden to the menu we'll increment it. We list |
|
* it here because every time we run `createMenu` we need to reset the value. |
|
* |
|
* @protected |
|
* @type {Number} |
|
*/ |
|
|
|
this.hideThreshold_ = 0; // Add a title list item to the top |
|
|
|
if (this.options_.title) { |
|
var titleEl = createEl('li', { |
|
className: 'vjs-menu-title', |
|
innerHTML: toTitleCase(this.options_.title), |
|
tabIndex: -1 |
|
}); |
|
this.hideThreshold_ += 1; |
|
var titleComponent = new Component(this.player_, { |
|
el: titleEl |
|
}); |
|
menu.addItem(titleComponent); |
|
} |
|
|
|
this.items = this.createItems(); |
|
|
|
if (this.items) { |
|
// Add menu items to the menu |
|
for (var i = 0; i < this.items.length; i++) { |
|
menu.addItem(this.items[i]); |
|
} |
|
} |
|
|
|
return menu; |
|
} |
|
/** |
|
* Create the list of menu items. Specific to each subclass. |
|
* |
|
* @abstract |
|
*/ |
|
; |
|
|
|
_proto.createItems = function createItems() {} |
|
/** |
|
* Create the `MenuButtons`s DOM element. |
|
* |
|
* @return {Element} |
|
* The element that gets created. |
|
*/ |
|
; |
|
|
|
_proto.createEl = function createEl$$1() { |
|
return _Component.prototype.createEl.call(this, 'div', { |
|
className: this.buildWrapperCSSClass() |
|
}, {}); |
|
} |
|
/** |
|
* Allow sub components to stack CSS class names for the wrapper element |
|
* |
|
* @return {string} |
|
* The constructed wrapper DOM `className` |
|
*/ |
|
; |
|
|
|
_proto.buildWrapperCSSClass = function buildWrapperCSSClass() { |
|
var menuButtonClass = 'vjs-menu-button'; // If the inline option is passed, we want to use different styles altogether. |
|
|
|
if (this.options_.inline === true) { |
|
menuButtonClass += '-inline'; |
|
} else { |
|
menuButtonClass += '-popup'; |
|
} // TODO: Fix the CSS so that this isn't necessary |
|
|
|
|
|
var buttonClass = Button.prototype.buildCSSClass(); |
|
return "vjs-menu-button " + menuButtonClass + " " + buttonClass + " " + _Component.prototype.buildCSSClass.call(this); |
|
} |
|
/** |
|
* Builds the default DOM `className`. |
|
* |
|
* @return {string} |
|
* The DOM `className` for this object. |
|
*/ |
|
; |
|
|
|
_proto.buildCSSClass = function buildCSSClass() { |
|
var menuButtonClass = 'vjs-menu-button'; // If the inline option is passed, we want to use different styles altogether. |
|
|
|
if (this.options_.inline === true) { |
|
menuButtonClass += '-inline'; |
|
} else { |
|
menuButtonClass += '-popup'; |
|
} |
|
|
|
return "vjs-menu-button " + menuButtonClass + " " + _Component.prototype.buildCSSClass.call(this); |
|
} |
|
/** |
|
* Get or set the localized control text that will be used for accessibility. |
|
* |
|
* > NOTE: This will come from the internal `menuButton_` element. |
|
* |
|
* @param {string} [text] |
|
* Control text for element. |
|
* |
|
* @param {Element} [el=this.menuButton_.el()] |
|
* Element to set the title on. |
|
* |
|
* @return {string} |
|
* - The control text when getting |
|
*/ |
|
; |
|
|
|
_proto.controlText = function controlText(text, el) { |
|
if (el === void 0) { |
|
el = this.menuButton_.el(); |
|
} |
|
|
|
return this.menuButton_.controlText(text, el); |
|
} |
|
/** |
|
* Handle a click on a `MenuButton`. |
|
* See {@link ClickableComponent#handleClick} for instances where this is called. |
|
* |
|
* @param {EventTarget~Event} event |
|
* The `keydown`, `tap`, or `click` event that caused this function to be |
|
* called. |
|
* |
|
* @listens tap |
|
* @listens click |
|
*/ |
|
; |
|
|
|
_proto.handleClick = function handleClick(event) { |
|
if (this.buttonPressed_) { |
|
this.unpressButton(); |
|
} else { |
|
this.pressButton(); |
|
} |
|
} |
|
/** |
|
* Set the focus to the actual button, not to this element |
|
*/ |
|
; |
|
|
|
_proto.focus = function focus() { |
|
this.menuButton_.focus(); |
|
} |
|
/** |
|
* Remove the focus from the actual button, not this element |
|
*/ |
|
; |
|
|
|
_proto.blur = function blur() { |
|
this.menuButton_.blur(); |
|
} |
|
/** |
|
* Handle tab, escape, down arrow, and up arrow keys for `MenuButton`. See |
|
* {@link ClickableComponent#handleKeyDown} for instances where this is called. |
|
* |
|
* @param {EventTarget~Event} event |
|
* The `keydown` event that caused this function to be called. |
|
* |
|
* @listens keydown |
|
*/ |
|
; |
|
|
|
_proto.handleKeyDown = function handleKeyDown(event) { |
|
// Escape or Tab unpress the 'button' |
|
if (keycode.isEventKey(event, 'Esc') || keycode.isEventKey(event, 'Tab')) { |
|
if (this.buttonPressed_) { |
|
this.unpressButton(); |
|
} // Don't preventDefault for Tab key - we still want to lose focus |
|
|
|
|
|
if (!keycode.isEventKey(event, 'Tab')) { |
|
event.preventDefault(); // Set focus back to the menu button's button |
|
|
|
this.menuButton_.focus(); |
|
} // Up Arrow or Down Arrow also 'press' the button to open the menu |
|
|
|
} else if (keycode.isEventKey(event, 'Up') || keycode.isEventKey(event, 'Down')) { |
|
if (!this.buttonPressed_) { |
|
event.preventDefault(); |
|
this.pressButton(); |
|
} |
|
} |
|
} |
|
/** |
|
* This method name now delegates to `handleSubmenuKeyDown`. This means |
|
* anyone calling `handleSubmenuKeyPress` will not see their method calls |
|
* stop working. |
|
* |
|
* @param {EventTarget~Event} event |
|
* The event that caused this function to be called. |
|
*/ |
|
; |
|
|
|
_proto.handleSubmenuKeyPress = function handleSubmenuKeyPress(event) { |
|
this.handleSubmenuKeyDown(event); |
|
} |
|
/** |
|
* Handle a `keydown` event on a sub-menu. The listener for this is added in |
|
* the constructor. |
|
* |
|
* @param {EventTarget~Event} event |
|
* Key press event |
|
* |
|
* @listens keydown |
|
*/ |
|
; |
|
|
|
_proto.handleSubmenuKeyDown = function handleSubmenuKeyDown(event) { |
|
// Escape or Tab unpress the 'button' |
|
if (keycode.isEventKey(event, 'Esc') || keycode.isEventKey(event, 'Tab')) { |
|
if (this.buttonPressed_) { |
|
this.unpressButton(); |
|
} // Don't preventDefault for Tab key - we still want to lose focus |
|
|
|
|
|
if (!keycode.isEventKey(event, 'Tab')) { |
|
event.preventDefault(); // Set focus back to the menu button's button |
|
|
|
this.menuButton_.focus(); |
|
} |
|
} |
|
} |
|
/** |
|
* Put the current `MenuButton` into a pressed state. |
|
*/ |
|
; |
|
|
|
_proto.pressButton = function pressButton() { |
|
if (this.enabled_) { |
|
this.buttonPressed_ = true; |
|
this.menu.show(); |
|
this.menu.lockShowing(); |
|
this.menuButton_.el_.setAttribute('aria-expanded', 'true'); // set the focus into the submenu, except on iOS where it is resulting in |
|
// undesired scrolling behavior when the player is in an iframe |
|
|
|
if (IS_IOS && isInFrame()) { |
|
// Return early so that the menu isn't focused |
|
return; |
|
} |
|
|
|
this.menu.focus(); |
|
} |
|
} |
|
/** |
|
* Take the current `MenuButton` out of a pressed state. |
|
*/ |
|
; |
|
|
|
_proto.unpressButton = function unpressButton() { |
|
if (this.enabled_) { |
|
this.buttonPressed_ = false; |
|
this.menu.unlockShowing(); |
|
this.menu.hide(); |
|
this.menuButton_.el_.setAttribute('aria-expanded', 'false'); |
|
} |
|
} |
|
/** |
|
* Disable the `MenuButton`. Don't allow it to be clicked. |
|
*/ |
|
; |
|
|
|
_proto.disable = function disable() { |
|
this.unpressButton(); |
|
this.enabled_ = false; |
|
this.addClass('vjs-disabled'); |
|
this.menuButton_.disable(); |
|
} |
|
/** |
|
* Enable the `MenuButton`. Allow it to be clicked. |
|
*/ |
|
; |
|
|
|
_proto.enable = function enable() { |
|
this.enabled_ = true; |
|
this.removeClass('vjs-disabled'); |
|
this.menuButton_.enable(); |
|
}; |
|
|
|
return MenuButton; |
|
}(Component); |
|
|
|
Component.registerComponent('MenuButton', MenuButton); |
|
|
|
/** |
|
* The base class for buttons that toggle specific track types (e.g. subtitles). |
|
* |
|
* @extends MenuButton |
|
*/ |
|
|
|
var TrackButton = |
|
/*#__PURE__*/ |
|
function (_MenuButton) { |
|
_inheritsLoose(TrackButton, _MenuButton); |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
*/ |
|
function TrackButton(player, options) { |
|
var _this; |
|
|
|
var tracks = options.tracks; |
|
_this = _MenuButton.call(this, player, options) || this; |
|
|
|
if (_this.items.length <= 1) { |
|
_this.hide(); |
|
} |
|
|
|
if (!tracks) { |
|
return _assertThisInitialized(_this); |
|
} |
|
|
|
var updateHandler = bind(_assertThisInitialized(_assertThisInitialized(_this)), _this.update); |
|
tracks.addEventListener('removetrack', updateHandler); |
|
tracks.addEventListener('addtrack', updateHandler); |
|
|
|
_this.player_.on('ready', updateHandler); |
|
|
|
_this.player_.on('dispose', function () { |
|
tracks.removeEventListener('removetrack', updateHandler); |
|
tracks.removeEventListener('addtrack', updateHandler); |
|
}); |
|
|
|
return _this; |
|
} |
|
|
|
return TrackButton; |
|
}(MenuButton); |
|
|
|
Component.registerComponent('TrackButton', TrackButton); |
|
|
|
/** |
|
* @file menu-keys.js |
|
*/ |
|
|
|
/** |
|
* All keys used for operation of a menu (`MenuButton`, `Menu`, and `MenuItem`) |
|
* Note that 'Enter' and 'Space' are not included here (otherwise they would |
|
* prevent the `MenuButton` and `MenuItem` from being keyboard-clickable) |
|
* @typedef MenuKeys |
|
* @array |
|
*/ |
|
var MenuKeys = ['Tab', 'Esc', 'Up', 'Down', 'Right', 'Left']; |
|
|
|
/** |
|
* The component for a menu item. `<li>` |
|
* |
|
* @extends ClickableComponent |
|
*/ |
|
|
|
var MenuItem = |
|
/*#__PURE__*/ |
|
function (_ClickableComponent) { |
|
_inheritsLoose(MenuItem, _ClickableComponent); |
|
|
|
/** |
|
* Creates an instance of the this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options={}] |
|
* The key/value store of player options. |
|
* |
|
*/ |
|
function MenuItem(player, options) { |
|
var _this; |
|
|
|
_this = _ClickableComponent.call(this, player, options) || this; |
|
_this.selectable = options.selectable; |
|
_this.isSelected_ = options.selected || false; |
|
_this.multiSelectable = options.multiSelectable; |
|
|
|
_this.selected(_this.isSelected_); |
|
|
|
if (_this.selectable) { |
|
if (_this.multiSelectable) { |
|
_this.el_.setAttribute('role', 'menuitemcheckbox'); |
|
} else { |
|
_this.el_.setAttribute('role', 'menuitemradio'); |
|
} |
|
} else { |
|
_this.el_.setAttribute('role', 'menuitem'); |
|
} |
|
|
|
return _this; |
|
} |
|
/** |
|
* Create the `MenuItem's DOM element |
|
* |
|
* @param {string} [type=li] |
|
* Element's node type, not actually used, always set to `li`. |
|
* |
|
* @param {Object} [props={}] |
|
* An object of properties that should be set on the element |
|
* |
|
* @param {Object} [attrs={}] |
|
* An object of attributes that should be set on the element |
|
* |
|
* @return {Element} |
|
* The element that gets created. |
|
*/ |
|
|
|
|
|
var _proto = MenuItem.prototype; |
|
|
|
_proto.createEl = function createEl(type, props, attrs) { |
|
// The control is textual, not just an icon |
|
this.nonIconControl = true; |
|
return _ClickableComponent.prototype.createEl.call(this, 'li', assign({ |
|
className: 'vjs-menu-item', |
|
innerHTML: "<span class=\"vjs-menu-item-text\">" + this.localize(this.options_.label) + "</span>", |
|
tabIndex: -1 |
|
}, props), attrs); |
|
} |
|
/** |
|
* Ignore keys which are used by the menu, but pass any other ones up. See |
|
* {@link ClickableComponent#handleKeyDown} for instances where this is called. |
|
* |
|
* @param {EventTarget~Event} event |
|
* The `keydown` event that caused this function to be called. |
|
* |
|
* @listens keydown |
|
*/ |
|
; |
|
|
|
_proto.handleKeyDown = function handleKeyDown(event) { |
|
if (!MenuKeys.some(function (key) { |
|
return keycode.isEventKey(event, key); |
|
})) { |
|
// Pass keydown handling up for unused keys |
|
_ClickableComponent.prototype.handleKeyDown.call(this, event); |
|
} |
|
} |
|
/** |
|
* Any click on a `MenuItem` puts it into the selected state. |
|
* See {@link ClickableComponent#handleClick} for instances where this is called. |
|
* |
|
* @param {EventTarget~Event} event |
|
* The `keydown`, `tap`, or `click` event that caused this function to be |
|
* called. |
|
* |
|
* @listens tap |
|
* @listens click |
|
*/ |
|
; |
|
|
|
_proto.handleClick = function handleClick(event) { |
|
this.selected(true); |
|
} |
|
/** |
|
* Set the state for this menu item as selected or not. |
|
* |
|
* @param {boolean} selected |
|
* if the menu item is selected or not |
|
*/ |
|
; |
|
|
|
_proto.selected = function selected(_selected) { |
|
if (this.selectable) { |
|
if (_selected) { |
|
this.addClass('vjs-selected'); |
|
this.el_.setAttribute('aria-checked', 'true'); // aria-checked isn't fully supported by browsers/screen readers, |
|
// so indicate selected state to screen reader in the control text. |
|
|
|
this.controlText(', selected'); |
|
this.isSelected_ = true; |
|
} else { |
|
this.removeClass('vjs-selected'); |
|
this.el_.setAttribute('aria-checked', 'false'); // Indicate un-selected state to screen reader |
|
|
|
this.controlText(''); |
|
this.isSelected_ = false; |
|
} |
|
} |
|
}; |
|
|
|
return MenuItem; |
|
}(ClickableComponent); |
|
|
|
Component.registerComponent('MenuItem', MenuItem); |
|
|
|
/** |
|
* The specific menu item type for selecting a language within a text track kind |
|
* |
|
* @extends MenuItem |
|
*/ |
|
|
|
var TextTrackMenuItem = |
|
/*#__PURE__*/ |
|
function (_MenuItem) { |
|
_inheritsLoose(TextTrackMenuItem, _MenuItem); |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
*/ |
|
function TextTrackMenuItem(player, options) { |
|
var _this; |
|
|
|
var track = options.track; |
|
var tracks = player.textTracks(); // Modify options for parent MenuItem class's init. |
|
|
|
options.label = track.label || track.language || 'Unknown'; |
|
options.selected = track.mode === 'showing'; |
|
_this = _MenuItem.call(this, player, options) || this; |
|
_this.track = track; // Determine the relevant kind(s) of tracks for this component and filter |
|
// out empty kinds. |
|
|
|
_this.kinds = (options.kinds || [options.kind || _this.track.kind]).filter(Boolean); |
|
|
|
var changeHandler = function changeHandler() { |
|
for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) { |
|
args[_key] = arguments[_key]; |
|
} |
|
|
|
_this.handleTracksChange.apply(_assertThisInitialized(_assertThisInitialized(_this)), args); |
|
}; |
|
|
|
var selectedLanguageChangeHandler = function selectedLanguageChangeHandler() { |
|
for (var _len2 = arguments.length, args = new Array(_len2), _key2 = 0; _key2 < _len2; _key2++) { |
|
args[_key2] = arguments[_key2]; |
|
} |
|
|
|
_this.handleSelectedLanguageChange.apply(_assertThisInitialized(_assertThisInitialized(_this)), args); |
|
}; |
|
|
|
player.on(['loadstart', 'texttrackchange'], changeHandler); |
|
tracks.addEventListener('change', changeHandler); |
|
tracks.addEventListener('selectedlanguagechange', selectedLanguageChangeHandler); |
|
|
|
_this.on('dispose', function () { |
|
player.off(['loadstart', 'texttrackchange'], changeHandler); |
|
tracks.removeEventListener('change', changeHandler); |
|
tracks.removeEventListener('selectedlanguagechange', selectedLanguageChangeHandler); |
|
}); // iOS7 doesn't dispatch change events to TextTrackLists when an |
|
// associated track's mode changes. Without something like |
|
// Object.observe() (also not present on iOS7), it's not |
|
// possible to detect changes to the mode attribute and polyfill |
|
// the change event. As a poor substitute, we manually dispatch |
|
// change events whenever the controls modify the mode. |
|
|
|
|
|
if (tracks.onchange === undefined) { |
|
var event; |
|
|
|
_this.on(['tap', 'click'], function () { |
|
if (typeof window$1.Event !== 'object') { |
|
// Android 2.3 throws an Illegal Constructor error for window.Event |
|
try { |
|
event = new window$1.Event('change'); |
|
} catch (err) {// continue regardless of error |
|
} |
|
} |
|
|
|
if (!event) { |
|
event = document.createEvent('Event'); |
|
event.initEvent('change', true, true); |
|
} |
|
|
|
tracks.dispatchEvent(event); |
|
}); |
|
} // set the default state based on current tracks |
|
|
|
|
|
_this.handleTracksChange(); |
|
|
|
return _this; |
|
} |
|
/** |
|
* This gets called when an `TextTrackMenuItem` is "clicked". See |
|
* {@link ClickableComponent} for more detailed information on what a click can be. |
|
* |
|
* @param {EventTarget~Event} event |
|
* The `keydown`, `tap`, or `click` event that caused this function to be |
|
* called. |
|
* |
|
* @listens tap |
|
* @listens click |
|
*/ |
|
|
|
|
|
var _proto = TextTrackMenuItem.prototype; |
|
|
|
_proto.handleClick = function handleClick(event) { |
|
var referenceTrack = this.track; |
|
var tracks = this.player_.textTracks(); |
|
|
|
_MenuItem.prototype.handleClick.call(this, event); |
|
|
|
if (!tracks) { |
|
return; |
|
} |
|
|
|
for (var i = 0; i < tracks.length; i++) { |
|
var track = tracks[i]; // If the track from the text tracks list is not of the right kind, |
|
// skip it. We do not want to affect tracks of incompatible kind(s). |
|
|
|
if (this.kinds.indexOf(track.kind) === -1) { |
|
continue; |
|
} // If this text track is the component's track and it is not showing, |
|
// set it to showing. |
|
|
|
|
|
if (track === referenceTrack) { |
|
if (track.mode !== 'showing') { |
|
track.mode = 'showing'; |
|
} // If this text track is not the component's track and it is not |
|
// disabled, set it to disabled. |
|
|
|
} else if (track.mode !== 'disabled') { |
|
track.mode = 'disabled'; |
|
} |
|
} |
|
} |
|
/** |
|
* Handle text track list change |
|
* |
|
* @param {EventTarget~Event} event |
|
* The `change` event that caused this function to be called. |
|
* |
|
* @listens TextTrackList#change |
|
*/ |
|
; |
|
|
|
_proto.handleTracksChange = function handleTracksChange(event) { |
|
var shouldBeSelected = this.track.mode === 'showing'; // Prevent redundant selected() calls because they may cause |
|
// screen readers to read the appended control text unnecessarily |
|
|
|
if (shouldBeSelected !== this.isSelected_) { |
|
this.selected(shouldBeSelected); |
|
} |
|
}; |
|
|
|
_proto.handleSelectedLanguageChange = function handleSelectedLanguageChange(event) { |
|
if (this.track.mode === 'showing') { |
|
var selectedLanguage = this.player_.cache_.selectedLanguage; // Don't replace the kind of track across the same language |
|
|
|
if (selectedLanguage && selectedLanguage.enabled && selectedLanguage.language === this.track.language && selectedLanguage.kind !== this.track.kind) { |
|
return; |
|
} |
|
|
|
this.player_.cache_.selectedLanguage = { |
|
enabled: true, |
|
language: this.track.language, |
|
kind: this.track.kind |
|
}; |
|
} |
|
}; |
|
|
|
_proto.dispose = function dispose() { |
|
// remove reference to track object on dispose |
|
this.track = null; |
|
|
|
_MenuItem.prototype.dispose.call(this); |
|
}; |
|
|
|
return TextTrackMenuItem; |
|
}(MenuItem); |
|
|
|
Component.registerComponent('TextTrackMenuItem', TextTrackMenuItem); |
|
|
|
/** |
|
* A special menu item for turning of a specific type of text track |
|
* |
|
* @extends TextTrackMenuItem |
|
*/ |
|
|
|
var OffTextTrackMenuItem = |
|
/*#__PURE__*/ |
|
function (_TextTrackMenuItem) { |
|
_inheritsLoose(OffTextTrackMenuItem, _TextTrackMenuItem); |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
*/ |
|
function OffTextTrackMenuItem(player, options) { |
|
// Create pseudo track info |
|
// Requires options['kind'] |
|
options.track = { |
|
player: player, |
|
// it is no longer necessary to store `kind` or `kinds` on the track itself |
|
// since they are now stored in the `kinds` property of all instances of |
|
// TextTrackMenuItem, but this will remain for backwards compatibility |
|
kind: options.kind, |
|
kinds: options.kinds, |
|
default: false, |
|
mode: 'disabled' |
|
}; |
|
|
|
if (!options.kinds) { |
|
options.kinds = [options.kind]; |
|
} |
|
|
|
if (options.label) { |
|
options.track.label = options.label; |
|
} else { |
|
options.track.label = options.kinds.join(' and ') + ' off'; |
|
} // MenuItem is selectable |
|
|
|
|
|
options.selectable = true; // MenuItem is NOT multiSelectable (i.e. only one can be marked "selected" at a time) |
|
|
|
options.multiSelectable = false; |
|
return _TextTrackMenuItem.call(this, player, options) || this; |
|
} |
|
/** |
|
* Handle text track change |
|
* |
|
* @param {EventTarget~Event} event |
|
* The event that caused this function to run |
|
*/ |
|
|
|
|
|
var _proto = OffTextTrackMenuItem.prototype; |
|
|
|
_proto.handleTracksChange = function handleTracksChange(event) { |
|
var tracks = this.player().textTracks(); |
|
var shouldBeSelected = true; |
|
|
|
for (var i = 0, l = tracks.length; i < l; i++) { |
|
var track = tracks[i]; |
|
|
|
if (this.options_.kinds.indexOf(track.kind) > -1 && track.mode === 'showing') { |
|
shouldBeSelected = false; |
|
break; |
|
} |
|
} // Prevent redundant selected() calls because they may cause |
|
// screen readers to read the appended control text unnecessarily |
|
|
|
|
|
if (shouldBeSelected !== this.isSelected_) { |
|
this.selected(shouldBeSelected); |
|
} |
|
}; |
|
|
|
_proto.handleSelectedLanguageChange = function handleSelectedLanguageChange(event) { |
|
var tracks = this.player().textTracks(); |
|
var allHidden = true; |
|
|
|
for (var i = 0, l = tracks.length; i < l; i++) { |
|
var track = tracks[i]; |
|
|
|
if (['captions', 'descriptions', 'subtitles'].indexOf(track.kind) > -1 && track.mode === 'showing') { |
|
allHidden = false; |
|
break; |
|
} |
|
} |
|
|
|
if (allHidden) { |
|
this.player_.cache_.selectedLanguage = { |
|
enabled: false |
|
}; |
|
} |
|
}; |
|
|
|
return OffTextTrackMenuItem; |
|
}(TextTrackMenuItem); |
|
|
|
Component.registerComponent('OffTextTrackMenuItem', OffTextTrackMenuItem); |
|
|
|
/** |
|
* The base class for buttons that toggle specific text track types (e.g. subtitles) |
|
* |
|
* @extends MenuButton |
|
*/ |
|
|
|
var TextTrackButton = |
|
/*#__PURE__*/ |
|
function (_TrackButton) { |
|
_inheritsLoose(TextTrackButton, _TrackButton); |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options={}] |
|
* The key/value store of player options. |
|
*/ |
|
function TextTrackButton(player, options) { |
|
if (options === void 0) { |
|
options = {}; |
|
} |
|
|
|
options.tracks = player.textTracks(); |
|
return _TrackButton.call(this, player, options) || this; |
|
} |
|
/** |
|
* Create a menu item for each text track |
|
* |
|
* @param {TextTrackMenuItem[]} [items=[]] |
|
* Existing array of items to use during creation |
|
* |
|
* @return {TextTrackMenuItem[]} |
|
* Array of menu items that were created |
|
*/ |
|
|
|
|
|
var _proto = TextTrackButton.prototype; |
|
|
|
_proto.createItems = function createItems(items, TrackMenuItem) { |
|
if (items === void 0) { |
|
items = []; |
|
} |
|
|
|
if (TrackMenuItem === void 0) { |
|
TrackMenuItem = TextTrackMenuItem; |
|
} |
|
|
|
// Label is an override for the [track] off label |
|
// USed to localise captions/subtitles |
|
var label; |
|
|
|
if (this.label_) { |
|
label = this.label_ + " off"; |
|
} // Add an OFF menu item to turn all tracks off |
|
|
|
|
|
items.push(new OffTextTrackMenuItem(this.player_, { |
|
kinds: this.kinds_, |
|
kind: this.kind_, |
|
label: label |
|
})); |
|
this.hideThreshold_ += 1; |
|
var tracks = this.player_.textTracks(); |
|
|
|
if (!Array.isArray(this.kinds_)) { |
|
this.kinds_ = [this.kind_]; |
|
} |
|
|
|
for (var i = 0; i < tracks.length; i++) { |
|
var track = tracks[i]; // only add tracks that are of an appropriate kind and have a label |
|
|
|
if (this.kinds_.indexOf(track.kind) > -1) { |
|
var item = new TrackMenuItem(this.player_, { |
|
track: track, |
|
kinds: this.kinds_, |
|
kind: this.kind_, |
|
// MenuItem is selectable |
|
selectable: true, |
|
// MenuItem is NOT multiSelectable (i.e. only one can be marked "selected" at a time) |
|
multiSelectable: false |
|
}); |
|
item.addClass("vjs-" + track.kind + "-menu-item"); |
|
items.push(item); |
|
} |
|
} |
|
|
|
return items; |
|
}; |
|
|
|
return TextTrackButton; |
|
}(TrackButton); |
|
|
|
Component.registerComponent('TextTrackButton', TextTrackButton); |
|
|
|
/** |
|
* The chapter track menu item |
|
* |
|
* @extends MenuItem |
|
*/ |
|
|
|
var ChaptersTrackMenuItem = |
|
/*#__PURE__*/ |
|
function (_MenuItem) { |
|
_inheritsLoose(ChaptersTrackMenuItem, _MenuItem); |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
*/ |
|
function ChaptersTrackMenuItem(player, options) { |
|
var _this; |
|
|
|
var track = options.track; |
|
var cue = options.cue; |
|
var currentTime = player.currentTime(); // Modify options for parent MenuItem class's init. |
|
|
|
options.selectable = true; |
|
options.multiSelectable = false; |
|
options.label = cue.text; |
|
options.selected = cue.startTime <= currentTime && currentTime < cue.endTime; |
|
_this = _MenuItem.call(this, player, options) || this; |
|
_this.track = track; |
|
_this.cue = cue; |
|
track.addEventListener('cuechange', bind(_assertThisInitialized(_assertThisInitialized(_this)), _this.update)); |
|
return _this; |
|
} |
|
/** |
|
* This gets called when an `ChaptersTrackMenuItem` is "clicked". See |
|
* {@link ClickableComponent} for more detailed information on what a click can be. |
|
* |
|
* @param {EventTarget~Event} [event] |
|
* The `keydown`, `tap`, or `click` event that caused this function to be |
|
* called. |
|
* |
|
* @listens tap |
|
* @listens click |
|
*/ |
|
|
|
|
|
var _proto = ChaptersTrackMenuItem.prototype; |
|
|
|
_proto.handleClick = function handleClick(event) { |
|
_MenuItem.prototype.handleClick.call(this); |
|
|
|
this.player_.currentTime(this.cue.startTime); |
|
this.update(this.cue.startTime); |
|
} |
|
/** |
|
* Update chapter menu item |
|
* |
|
* @param {EventTarget~Event} [event] |
|
* The `cuechange` event that caused this function to run. |
|
* |
|
* @listens TextTrack#cuechange |
|
*/ |
|
; |
|
|
|
_proto.update = function update(event) { |
|
var cue = this.cue; |
|
var currentTime = this.player_.currentTime(); // vjs.log(currentTime, cue.startTime); |
|
|
|
this.selected(cue.startTime <= currentTime && currentTime < cue.endTime); |
|
}; |
|
|
|
return ChaptersTrackMenuItem; |
|
}(MenuItem); |
|
|
|
Component.registerComponent('ChaptersTrackMenuItem', ChaptersTrackMenuItem); |
|
|
|
/** |
|
* The button component for toggling and selecting chapters |
|
* Chapters act much differently than other text tracks |
|
* Cues are navigation vs. other tracks of alternative languages |
|
* |
|
* @extends TextTrackButton |
|
*/ |
|
|
|
var ChaptersButton = |
|
/*#__PURE__*/ |
|
function (_TextTrackButton) { |
|
_inheritsLoose(ChaptersButton, _TextTrackButton); |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
* |
|
* @param {Component~ReadyCallback} [ready] |
|
* The function to call when this function is ready. |
|
*/ |
|
function ChaptersButton(player, options, ready) { |
|
return _TextTrackButton.call(this, player, options, ready) || this; |
|
} |
|
/** |
|
* Builds the default DOM `className`. |
|
* |
|
* @return {string} |
|
* The DOM `className` for this object. |
|
*/ |
|
|
|
|
|
var _proto = ChaptersButton.prototype; |
|
|
|
_proto.buildCSSClass = function buildCSSClass() { |
|
return "vjs-chapters-button " + _TextTrackButton.prototype.buildCSSClass.call(this); |
|
}; |
|
|
|
_proto.buildWrapperCSSClass = function buildWrapperCSSClass() { |
|
return "vjs-chapters-button " + _TextTrackButton.prototype.buildWrapperCSSClass.call(this); |
|
} |
|
/** |
|
* Update the menu based on the current state of its items. |
|
* |
|
* @param {EventTarget~Event} [event] |
|
* An event that triggered this function to run. |
|
* |
|
* @listens TextTrackList#addtrack |
|
* @listens TextTrackList#removetrack |
|
* @listens TextTrackList#change |
|
*/ |
|
; |
|
|
|
_proto.update = function update(event) { |
|
if (!this.track_ || event && (event.type === 'addtrack' || event.type === 'removetrack')) { |
|
this.setTrack(this.findChaptersTrack()); |
|
} |
|
|
|
_TextTrackButton.prototype.update.call(this); |
|
} |
|
/** |
|
* Set the currently selected track for the chapters button. |
|
* |
|
* @param {TextTrack} track |
|
* The new track to select. Nothing will change if this is the currently selected |
|
* track. |
|
*/ |
|
; |
|
|
|
_proto.setTrack = function setTrack(track) { |
|
if (this.track_ === track) { |
|
return; |
|
} |
|
|
|
if (!this.updateHandler_) { |
|
this.updateHandler_ = this.update.bind(this); |
|
} // here this.track_ refers to the old track instance |
|
|
|
|
|
if (this.track_) { |
|
var remoteTextTrackEl = this.player_.remoteTextTrackEls().getTrackElementByTrack_(this.track_); |
|
|
|
if (remoteTextTrackEl) { |
|
remoteTextTrackEl.removeEventListener('load', this.updateHandler_); |
|
} |
|
|
|
this.track_ = null; |
|
} |
|
|
|
this.track_ = track; // here this.track_ refers to the new track instance |
|
|
|
if (this.track_) { |
|
this.track_.mode = 'hidden'; |
|
|
|
var _remoteTextTrackEl = this.player_.remoteTextTrackEls().getTrackElementByTrack_(this.track_); |
|
|
|
if (_remoteTextTrackEl) { |
|
_remoteTextTrackEl.addEventListener('load', this.updateHandler_); |
|
} |
|
} |
|
} |
|
/** |
|
* Find the track object that is currently in use by this ChaptersButton |
|
* |
|
* @return {TextTrack|undefined} |
|
* The current track or undefined if none was found. |
|
*/ |
|
; |
|
|
|
_proto.findChaptersTrack = function findChaptersTrack() { |
|
var tracks = this.player_.textTracks() || []; |
|
|
|
for (var i = tracks.length - 1; i >= 0; i--) { |
|
// We will always choose the last track as our chaptersTrack |
|
var track = tracks[i]; |
|
|
|
if (track.kind === this.kind_) { |
|
return track; |
|
} |
|
} |
|
} |
|
/** |
|
* Get the caption for the ChaptersButton based on the track label. This will also |
|
* use the current tracks localized kind as a fallback if a label does not exist. |
|
* |
|
* @return {string} |
|
* The tracks current label or the localized track kind. |
|
*/ |
|
; |
|
|
|
_proto.getMenuCaption = function getMenuCaption() { |
|
if (this.track_ && this.track_.label) { |
|
return this.track_.label; |
|
} |
|
|
|
return this.localize(toTitleCase(this.kind_)); |
|
} |
|
/** |
|
* Create menu from chapter track |
|
* |
|
* @return {Menu} |
|
* New menu for the chapter buttons |
|
*/ |
|
; |
|
|
|
_proto.createMenu = function createMenu() { |
|
this.options_.title = this.getMenuCaption(); |
|
return _TextTrackButton.prototype.createMenu.call(this); |
|
} |
|
/** |
|
* Create a menu item for each text track |
|
* |
|
* @return {TextTrackMenuItem[]} |
|
* Array of menu items |
|
*/ |
|
; |
|
|
|
_proto.createItems = function createItems() { |
|
var items = []; |
|
|
|
if (!this.track_) { |
|
return items; |
|
} |
|
|
|
var cues = this.track_.cues; |
|
|
|
if (!cues) { |
|
return items; |
|
} |
|
|
|
for (var i = 0, l = cues.length; i < l; i++) { |
|
var cue = cues[i]; |
|
var mi = new ChaptersTrackMenuItem(this.player_, { |
|
track: this.track_, |
|
cue: cue |
|
}); |
|
items.push(mi); |
|
} |
|
|
|
return items; |
|
}; |
|
|
|
return ChaptersButton; |
|
}(TextTrackButton); |
|
/** |
|
* `kind` of TextTrack to look for to associate it with this menu. |
|
* |
|
* @type {string} |
|
* @private |
|
*/ |
|
|
|
|
|
ChaptersButton.prototype.kind_ = 'chapters'; |
|
/** |
|
* The text that should display over the `ChaptersButton`s controls. Added for localization. |
|
* |
|
* @type {string} |
|
* @private |
|
*/ |
|
|
|
ChaptersButton.prototype.controlText_ = 'Chapters'; |
|
Component.registerComponent('ChaptersButton', ChaptersButton); |
|
|
|
/** |
|
* The button component for toggling and selecting descriptions |
|
* |
|
* @extends TextTrackButton |
|
*/ |
|
|
|
var DescriptionsButton = |
|
/*#__PURE__*/ |
|
function (_TextTrackButton) { |
|
_inheritsLoose(DescriptionsButton, _TextTrackButton); |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
* |
|
* @param {Component~ReadyCallback} [ready] |
|
* The function to call when this component is ready. |
|
*/ |
|
function DescriptionsButton(player, options, ready) { |
|
var _this; |
|
|
|
_this = _TextTrackButton.call(this, player, options, ready) || this; |
|
var tracks = player.textTracks(); |
|
var changeHandler = bind(_assertThisInitialized(_assertThisInitialized(_this)), _this.handleTracksChange); |
|
tracks.addEventListener('change', changeHandler); |
|
|
|
_this.on('dispose', function () { |
|
tracks.removeEventListener('change', changeHandler); |
|
}); |
|
|
|
return _this; |
|
} |
|
/** |
|
* Handle text track change |
|
* |
|
* @param {EventTarget~Event} event |
|
* The event that caused this function to run |
|
* |
|
* @listens TextTrackList#change |
|
*/ |
|
|
|
|
|
var _proto = DescriptionsButton.prototype; |
|
|
|
_proto.handleTracksChange = function handleTracksChange(event) { |
|
var tracks = this.player().textTracks(); |
|
var disabled = false; // Check whether a track of a different kind is showing |
|
|
|
for (var i = 0, l = tracks.length; i < l; i++) { |
|
var track = tracks[i]; |
|
|
|
if (track.kind !== this.kind_ && track.mode === 'showing') { |
|
disabled = true; |
|
break; |
|
} |
|
} // If another track is showing, disable this menu button |
|
|
|
|
|
if (disabled) { |
|
this.disable(); |
|
} else { |
|
this.enable(); |
|
} |
|
} |
|
/** |
|
* Builds the default DOM `className`. |
|
* |
|
* @return {string} |
|
* The DOM `className` for this object. |
|
*/ |
|
; |
|
|
|
_proto.buildCSSClass = function buildCSSClass() { |
|
return "vjs-descriptions-button " + _TextTrackButton.prototype.buildCSSClass.call(this); |
|
}; |
|
|
|
_proto.buildWrapperCSSClass = function buildWrapperCSSClass() { |
|
return "vjs-descriptions-button " + _TextTrackButton.prototype.buildWrapperCSSClass.call(this); |
|
}; |
|
|
|
return DescriptionsButton; |
|
}(TextTrackButton); |
|
/** |
|
* `kind` of TextTrack to look for to associate it with this menu. |
|
* |
|
* @type {string} |
|
* @private |
|
*/ |
|
|
|
|
|
DescriptionsButton.prototype.kind_ = 'descriptions'; |
|
/** |
|
* The text that should display over the `DescriptionsButton`s controls. Added for localization. |
|
* |
|
* @type {string} |
|
* @private |
|
*/ |
|
|
|
DescriptionsButton.prototype.controlText_ = 'Descriptions'; |
|
Component.registerComponent('DescriptionsButton', DescriptionsButton); |
|
|
|
/** |
|
* The button component for toggling and selecting subtitles |
|
* |
|
* @extends TextTrackButton |
|
*/ |
|
|
|
var SubtitlesButton = |
|
/*#__PURE__*/ |
|
function (_TextTrackButton) { |
|
_inheritsLoose(SubtitlesButton, _TextTrackButton); |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
* |
|
* @param {Component~ReadyCallback} [ready] |
|
* The function to call when this component is ready. |
|
*/ |
|
function SubtitlesButton(player, options, ready) { |
|
return _TextTrackButton.call(this, player, options, ready) || this; |
|
} |
|
/** |
|
* Builds the default DOM `className`. |
|
* |
|
* @return {string} |
|
* The DOM `className` for this object. |
|
*/ |
|
|
|
|
|
var _proto = SubtitlesButton.prototype; |
|
|
|
_proto.buildCSSClass = function buildCSSClass() { |
|
return "vjs-subtitles-button " + _TextTrackButton.prototype.buildCSSClass.call(this); |
|
}; |
|
|
|
_proto.buildWrapperCSSClass = function buildWrapperCSSClass() { |
|
return "vjs-subtitles-button " + _TextTrackButton.prototype.buildWrapperCSSClass.call(this); |
|
}; |
|
|
|
return SubtitlesButton; |
|
}(TextTrackButton); |
|
/** |
|
* `kind` of TextTrack to look for to associate it with this menu. |
|
* |
|
* @type {string} |
|
* @private |
|
*/ |
|
|
|
|
|
SubtitlesButton.prototype.kind_ = 'subtitles'; |
|
/** |
|
* The text that should display over the `SubtitlesButton`s controls. Added for localization. |
|
* |
|
* @type {string} |
|
* @private |
|
*/ |
|
|
|
SubtitlesButton.prototype.controlText_ = 'Subtitles'; |
|
Component.registerComponent('SubtitlesButton', SubtitlesButton); |
|
|
|
/** |
|
* The menu item for caption track settings menu |
|
* |
|
* @extends TextTrackMenuItem |
|
*/ |
|
|
|
var CaptionSettingsMenuItem = |
|
/*#__PURE__*/ |
|
function (_TextTrackMenuItem) { |
|
_inheritsLoose(CaptionSettingsMenuItem, _TextTrackMenuItem); |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
*/ |
|
function CaptionSettingsMenuItem(player, options) { |
|
var _this; |
|
|
|
options.track = { |
|
player: player, |
|
kind: options.kind, |
|
label: options.kind + ' settings', |
|
selectable: false, |
|
default: false, |
|
mode: 'disabled' |
|
}; // CaptionSettingsMenuItem has no concept of 'selected' |
|
|
|
options.selectable = false; |
|
options.name = 'CaptionSettingsMenuItem'; |
|
_this = _TextTrackMenuItem.call(this, player, options) || this; |
|
|
|
_this.addClass('vjs-texttrack-settings'); |
|
|
|
_this.controlText(', opens ' + options.kind + ' settings dialog'); |
|
|
|
return _this; |
|
} |
|
/** |
|
* This gets called when an `CaptionSettingsMenuItem` is "clicked". See |
|
* {@link ClickableComponent} for more detailed information on what a click can be. |
|
* |
|
* @param {EventTarget~Event} [event] |
|
* The `keydown`, `tap`, or `click` event that caused this function to be |
|
* called. |
|
* |
|
* @listens tap |
|
* @listens click |
|
*/ |
|
|
|
|
|
var _proto = CaptionSettingsMenuItem.prototype; |
|
|
|
_proto.handleClick = function handleClick(event) { |
|
this.player().getChild('textTrackSettings').open(); |
|
}; |
|
|
|
return CaptionSettingsMenuItem; |
|
}(TextTrackMenuItem); |
|
|
|
Component.registerComponent('CaptionSettingsMenuItem', CaptionSettingsMenuItem); |
|
|
|
/** |
|
* The button component for toggling and selecting captions |
|
* |
|
* @extends TextTrackButton |
|
*/ |
|
|
|
var CaptionsButton = |
|
/*#__PURE__*/ |
|
function (_TextTrackButton) { |
|
_inheritsLoose(CaptionsButton, _TextTrackButton); |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
* |
|
* @param {Component~ReadyCallback} [ready] |
|
* The function to call when this component is ready. |
|
*/ |
|
function CaptionsButton(player, options, ready) { |
|
return _TextTrackButton.call(this, player, options, ready) || this; |
|
} |
|
/** |
|
* Builds the default DOM `className`. |
|
* |
|
* @return {string} |
|
* The DOM `className` for this object. |
|
*/ |
|
|
|
|
|
var _proto = CaptionsButton.prototype; |
|
|
|
_proto.buildCSSClass = function buildCSSClass() { |
|
return "vjs-captions-button " + _TextTrackButton.prototype.buildCSSClass.call(this); |
|
}; |
|
|
|
_proto.buildWrapperCSSClass = function buildWrapperCSSClass() { |
|
return "vjs-captions-button " + _TextTrackButton.prototype.buildWrapperCSSClass.call(this); |
|
} |
|
/** |
|
* Create caption menu items |
|
* |
|
* @return {CaptionSettingsMenuItem[]} |
|
* The array of current menu items. |
|
*/ |
|
; |
|
|
|
_proto.createItems = function createItems() { |
|
var items = []; |
|
|
|
if (!(this.player().tech_ && this.player().tech_.featuresNativeTextTracks) && this.player().getChild('textTrackSettings')) { |
|
items.push(new CaptionSettingsMenuItem(this.player_, { |
|
kind: this.kind_ |
|
})); |
|
this.hideThreshold_ += 1; |
|
} |
|
|
|
return _TextTrackButton.prototype.createItems.call(this, items); |
|
}; |
|
|
|
return CaptionsButton; |
|
}(TextTrackButton); |
|
/** |
|
* `kind` of TextTrack to look for to associate it with this menu. |
|
* |
|
* @type {string} |
|
* @private |
|
*/ |
|
|
|
|
|
CaptionsButton.prototype.kind_ = 'captions'; |
|
/** |
|
* The text that should display over the `CaptionsButton`s controls. Added for localization. |
|
* |
|
* @type {string} |
|
* @private |
|
*/ |
|
|
|
CaptionsButton.prototype.controlText_ = 'Captions'; |
|
Component.registerComponent('CaptionsButton', CaptionsButton); |
|
|
|
/** |
|
* SubsCapsMenuItem has an [cc] icon to distinguish captions from subtitles |
|
* in the SubsCapsMenu. |
|
* |
|
* @extends TextTrackMenuItem |
|
*/ |
|
|
|
var SubsCapsMenuItem = |
|
/*#__PURE__*/ |
|
function (_TextTrackMenuItem) { |
|
_inheritsLoose(SubsCapsMenuItem, _TextTrackMenuItem); |
|
|
|
function SubsCapsMenuItem() { |
|
return _TextTrackMenuItem.apply(this, arguments) || this; |
|
} |
|
|
|
var _proto = SubsCapsMenuItem.prototype; |
|
|
|
_proto.createEl = function createEl(type, props, attrs) { |
|
var innerHTML = "<span class=\"vjs-menu-item-text\">" + this.localize(this.options_.label); |
|
|
|
if (this.options_.track.kind === 'captions') { |
|
innerHTML += "\n <span aria-hidden=\"true\" class=\"vjs-icon-placeholder\"></span>\n <span class=\"vjs-control-text\"> " + this.localize('Captions') + "</span>\n "; |
|
} |
|
|
|
innerHTML += '</span>'; |
|
|
|
var el = _TextTrackMenuItem.prototype.createEl.call(this, type, assign({ |
|
innerHTML: innerHTML |
|
}, props), attrs); |
|
|
|
return el; |
|
}; |
|
|
|
return SubsCapsMenuItem; |
|
}(TextTrackMenuItem); |
|
|
|
Component.registerComponent('SubsCapsMenuItem', SubsCapsMenuItem); |
|
|
|
/** |
|
* The button component for toggling and selecting captions and/or subtitles |
|
* |
|
* @extends TextTrackButton |
|
*/ |
|
|
|
var SubsCapsButton = |
|
/*#__PURE__*/ |
|
function (_TextTrackButton) { |
|
_inheritsLoose(SubsCapsButton, _TextTrackButton); |
|
|
|
function SubsCapsButton(player, options) { |
|
var _this; |
|
|
|
if (options === void 0) { |
|
options = {}; |
|
} |
|
|
|
_this = _TextTrackButton.call(this, player, options) || this; // Although North America uses "captions" in most cases for |
|
// "captions and subtitles" other locales use "subtitles" |
|
|
|
_this.label_ = 'subtitles'; |
|
|
|
if (['en', 'en-us', 'en-ca', 'fr-ca'].indexOf(_this.player_.language_) > -1) { |
|
_this.label_ = 'captions'; |
|
} |
|
|
|
_this.menuButton_.controlText(toTitleCase(_this.label_)); |
|
|
|
return _this; |
|
} |
|
/** |
|
* Builds the default DOM `className`. |
|
* |
|
* @return {string} |
|
* The DOM `className` for this object. |
|
*/ |
|
|
|
|
|
var _proto = SubsCapsButton.prototype; |
|
|
|
_proto.buildCSSClass = function buildCSSClass() { |
|
return "vjs-subs-caps-button " + _TextTrackButton.prototype.buildCSSClass.call(this); |
|
}; |
|
|
|
_proto.buildWrapperCSSClass = function buildWrapperCSSClass() { |
|
return "vjs-subs-caps-button " + _TextTrackButton.prototype.buildWrapperCSSClass.call(this); |
|
} |
|
/** |
|
* Create caption/subtitles menu items |
|
* |
|
* @return {CaptionSettingsMenuItem[]} |
|
* The array of current menu items. |
|
*/ |
|
; |
|
|
|
_proto.createItems = function createItems() { |
|
var items = []; |
|
|
|
if (!(this.player().tech_ && this.player().tech_.featuresNativeTextTracks) && this.player().getChild('textTrackSettings')) { |
|
items.push(new CaptionSettingsMenuItem(this.player_, { |
|
kind: this.label_ |
|
})); |
|
this.hideThreshold_ += 1; |
|
} |
|
|
|
items = _TextTrackButton.prototype.createItems.call(this, items, SubsCapsMenuItem); |
|
return items; |
|
}; |
|
|
|
return SubsCapsButton; |
|
}(TextTrackButton); |
|
/** |
|
* `kind`s of TextTrack to look for to associate it with this menu. |
|
* |
|
* @type {array} |
|
* @private |
|
*/ |
|
|
|
|
|
SubsCapsButton.prototype.kinds_ = ['captions', 'subtitles']; |
|
/** |
|
* The text that should display over the `SubsCapsButton`s controls. |
|
* |
|
* |
|
* @type {string} |
|
* @private |
|
*/ |
|
|
|
SubsCapsButton.prototype.controlText_ = 'Subtitles'; |
|
Component.registerComponent('SubsCapsButton', SubsCapsButton); |
|
|
|
/** |
|
* An {@link AudioTrack} {@link MenuItem} |
|
* |
|
* @extends MenuItem |
|
*/ |
|
|
|
var AudioTrackMenuItem = |
|
/*#__PURE__*/ |
|
function (_MenuItem) { |
|
_inheritsLoose(AudioTrackMenuItem, _MenuItem); |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
*/ |
|
function AudioTrackMenuItem(player, options) { |
|
var _this; |
|
|
|
var track = options.track; |
|
var tracks = player.audioTracks(); // Modify options for parent MenuItem class's init. |
|
|
|
options.label = track.label || track.language || 'Unknown'; |
|
options.selected = track.enabled; |
|
_this = _MenuItem.call(this, player, options) || this; |
|
_this.track = track; |
|
|
|
_this.addClass("vjs-" + track.kind + "-menu-item"); |
|
|
|
var changeHandler = function changeHandler() { |
|
for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) { |
|
args[_key] = arguments[_key]; |
|
} |
|
|
|
_this.handleTracksChange.apply(_assertThisInitialized(_assertThisInitialized(_this)), args); |
|
}; |
|
|
|
tracks.addEventListener('change', changeHandler); |
|
|
|
_this.on('dispose', function () { |
|
tracks.removeEventListener('change', changeHandler); |
|
}); |
|
|
|
return _this; |
|
} |
|
|
|
var _proto = AudioTrackMenuItem.prototype; |
|
|
|
_proto.createEl = function createEl(type, props, attrs) { |
|
var innerHTML = "<span class=\"vjs-menu-item-text\">" + this.localize(this.options_.label); |
|
|
|
if (this.options_.track.kind === 'main-desc') { |
|
innerHTML += "\n <span aria-hidden=\"true\" class=\"vjs-icon-placeholder\"></span>\n <span class=\"vjs-control-text\"> " + this.localize('Descriptions') + "</span>\n "; |
|
} |
|
|
|
innerHTML += '</span>'; |
|
|
|
var el = _MenuItem.prototype.createEl.call(this, type, assign({ |
|
innerHTML: innerHTML |
|
}, props), attrs); |
|
|
|
return el; |
|
} |
|
/** |
|
* This gets called when an `AudioTrackMenuItem is "clicked". See {@link ClickableComponent} |
|
* for more detailed information on what a click can be. |
|
* |
|
* @param {EventTarget~Event} [event] |
|
* The `keydown`, `tap`, or `click` event that caused this function to be |
|
* called. |
|
* |
|
* @listens tap |
|
* @listens click |
|
*/ |
|
; |
|
|
|
_proto.handleClick = function handleClick(event) { |
|
var tracks = this.player_.audioTracks(); |
|
|
|
_MenuItem.prototype.handleClick.call(this, event); |
|
|
|
for (var i = 0; i < tracks.length; i++) { |
|
var track = tracks[i]; |
|
track.enabled = track === this.track; |
|
} |
|
} |
|
/** |
|
* Handle any {@link AudioTrack} change. |
|
* |
|
* @param {EventTarget~Event} [event] |
|
* The {@link AudioTrackList#change} event that caused this to run. |
|
* |
|
* @listens AudioTrackList#change |
|
*/ |
|
; |
|
|
|
_proto.handleTracksChange = function handleTracksChange(event) { |
|
this.selected(this.track.enabled); |
|
}; |
|
|
|
return AudioTrackMenuItem; |
|
}(MenuItem); |
|
|
|
Component.registerComponent('AudioTrackMenuItem', AudioTrackMenuItem); |
|
|
|
/** |
|
* The base class for buttons that toggle specific {@link AudioTrack} types. |
|
* |
|
* @extends TrackButton |
|
*/ |
|
|
|
var AudioTrackButton = |
|
/*#__PURE__*/ |
|
function (_TrackButton) { |
|
_inheritsLoose(AudioTrackButton, _TrackButton); |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options={}] |
|
* The key/value store of player options. |
|
*/ |
|
function AudioTrackButton(player, options) { |
|
if (options === void 0) { |
|
options = {}; |
|
} |
|
|
|
options.tracks = player.audioTracks(); |
|
return _TrackButton.call(this, player, options) || this; |
|
} |
|
/** |
|
* Builds the default DOM `className`. |
|
* |
|
* @return {string} |
|
* The DOM `className` for this object. |
|
*/ |
|
|
|
|
|
var _proto = AudioTrackButton.prototype; |
|
|
|
_proto.buildCSSClass = function buildCSSClass() { |
|
return "vjs-audio-button " + _TrackButton.prototype.buildCSSClass.call(this); |
|
}; |
|
|
|
_proto.buildWrapperCSSClass = function buildWrapperCSSClass() { |
|
return "vjs-audio-button " + _TrackButton.prototype.buildWrapperCSSClass.call(this); |
|
} |
|
/** |
|
* Create a menu item for each audio track |
|
* |
|
* @param {AudioTrackMenuItem[]} [items=[]] |
|
* An array of existing menu items to use. |
|
* |
|
* @return {AudioTrackMenuItem[]} |
|
* An array of menu items |
|
*/ |
|
; |
|
|
|
_proto.createItems = function createItems(items) { |
|
if (items === void 0) { |
|
items = []; |
|
} |
|
|
|
// if there's only one audio track, there no point in showing it |
|
this.hideThreshold_ = 1; |
|
var tracks = this.player_.audioTracks(); |
|
|
|
for (var i = 0; i < tracks.length; i++) { |
|
var track = tracks[i]; |
|
items.push(new AudioTrackMenuItem(this.player_, { |
|
track: track, |
|
// MenuItem is selectable |
|
selectable: true, |
|
// MenuItem is NOT multiSelectable (i.e. only one can be marked "selected" at a time) |
|
multiSelectable: false |
|
})); |
|
} |
|
|
|
return items; |
|
}; |
|
|
|
return AudioTrackButton; |
|
}(TrackButton); |
|
/** |
|
* The text that should display over the `AudioTrackButton`s controls. Added for localization. |
|
* |
|
* @type {string} |
|
* @private |
|
*/ |
|
|
|
|
|
AudioTrackButton.prototype.controlText_ = 'Audio Track'; |
|
Component.registerComponent('AudioTrackButton', AudioTrackButton); |
|
|
|
/** |
|
* The specific menu item type for selecting a playback rate. |
|
* |
|
* @extends MenuItem |
|
*/ |
|
|
|
var PlaybackRateMenuItem = |
|
/*#__PURE__*/ |
|
function (_MenuItem) { |
|
_inheritsLoose(PlaybackRateMenuItem, _MenuItem); |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
*/ |
|
function PlaybackRateMenuItem(player, options) { |
|
var _this; |
|
|
|
var label = options.rate; |
|
var rate = parseFloat(label, 10); // Modify options for parent MenuItem class's init. |
|
|
|
options.label = label; |
|
options.selected = rate === 1; |
|
options.selectable = true; |
|
options.multiSelectable = false; |
|
_this = _MenuItem.call(this, player, options) || this; |
|
_this.label = label; |
|
_this.rate = rate; |
|
|
|
_this.on(player, 'ratechange', _this.update); |
|
|
|
return _this; |
|
} |
|
/** |
|
* This gets called when an `PlaybackRateMenuItem` is "clicked". See |
|
* {@link ClickableComponent} for more detailed information on what a click can be. |
|
* |
|
* @param {EventTarget~Event} [event] |
|
* The `keydown`, `tap`, or `click` event that caused this function to be |
|
* called. |
|
* |
|
* @listens tap |
|
* @listens click |
|
*/ |
|
|
|
|
|
var _proto = PlaybackRateMenuItem.prototype; |
|
|
|
_proto.handleClick = function handleClick(event) { |
|
_MenuItem.prototype.handleClick.call(this); |
|
|
|
this.player().playbackRate(this.rate); |
|
} |
|
/** |
|
* Update the PlaybackRateMenuItem when the playbackrate changes. |
|
* |
|
* @param {EventTarget~Event} [event] |
|
* The `ratechange` event that caused this function to run. |
|
* |
|
* @listens Player#ratechange |
|
*/ |
|
; |
|
|
|
_proto.update = function update(event) { |
|
this.selected(this.player().playbackRate() === this.rate); |
|
}; |
|
|
|
return PlaybackRateMenuItem; |
|
}(MenuItem); |
|
/** |
|
* The text that should display over the `PlaybackRateMenuItem`s controls. Added for localization. |
|
* |
|
* @type {string} |
|
* @private |
|
*/ |
|
|
|
|
|
PlaybackRateMenuItem.prototype.contentElType = 'button'; |
|
Component.registerComponent('PlaybackRateMenuItem', PlaybackRateMenuItem); |
|
|
|
/** |
|
* The component for controlling the playback rate. |
|
* |
|
* @extends MenuButton |
|
*/ |
|
|
|
var PlaybackRateMenuButton = |
|
/*#__PURE__*/ |
|
function (_MenuButton) { |
|
_inheritsLoose(PlaybackRateMenuButton, _MenuButton); |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
*/ |
|
function PlaybackRateMenuButton(player, options) { |
|
var _this; |
|
|
|
_this = _MenuButton.call(this, player, options) || this; |
|
|
|
_this.updateVisibility(); |
|
|
|
_this.updateLabel(); |
|
|
|
_this.on(player, 'loadstart', _this.updateVisibility); |
|
|
|
_this.on(player, 'ratechange', _this.updateLabel); |
|
|
|
return _this; |
|
} |
|
/** |
|
* Create the `Component`'s DOM element |
|
* |
|
* @return {Element} |
|
* The element that was created. |
|
*/ |
|
|
|
|
|
var _proto = PlaybackRateMenuButton.prototype; |
|
|
|
_proto.createEl = function createEl$$1() { |
|
var el = _MenuButton.prototype.createEl.call(this); |
|
|
|
this.labelEl_ = createEl('div', { |
|
className: 'vjs-playback-rate-value', |
|
innerHTML: '1x' |
|
}); |
|
el.appendChild(this.labelEl_); |
|
return el; |
|
}; |
|
|
|
_proto.dispose = function dispose() { |
|
this.labelEl_ = null; |
|
|
|
_MenuButton.prototype.dispose.call(this); |
|
} |
|
/** |
|
* Builds the default DOM `className`. |
|
* |
|
* @return {string} |
|
* The DOM `className` for this object. |
|
*/ |
|
; |
|
|
|
_proto.buildCSSClass = function buildCSSClass() { |
|
return "vjs-playback-rate " + _MenuButton.prototype.buildCSSClass.call(this); |
|
}; |
|
|
|
_proto.buildWrapperCSSClass = function buildWrapperCSSClass() { |
|
return "vjs-playback-rate " + _MenuButton.prototype.buildWrapperCSSClass.call(this); |
|
} |
|
/** |
|
* Create the playback rate menu |
|
* |
|
* @return {Menu} |
|
* Menu object populated with {@link PlaybackRateMenuItem}s |
|
*/ |
|
; |
|
|
|
_proto.createMenu = function createMenu() { |
|
var menu = new Menu(this.player()); |
|
var rates = this.playbackRates(); |
|
|
|
if (rates) { |
|
for (var i = rates.length - 1; i >= 0; i--) { |
|
menu.addChild(new PlaybackRateMenuItem(this.player(), { |
|
rate: rates[i] + 'x' |
|
})); |
|
} |
|
} |
|
|
|
return menu; |
|
} |
|
/** |
|
* Updates ARIA accessibility attributes |
|
*/ |
|
; |
|
|
|
_proto.updateARIAAttributes = function updateARIAAttributes() { |
|
// Current playback rate |
|
this.el().setAttribute('aria-valuenow', this.player().playbackRate()); |
|
} |
|
/** |
|
* This gets called when an `PlaybackRateMenuButton` is "clicked". See |
|
* {@link ClickableComponent} for more detailed information on what a click can be. |
|
* |
|
* @param {EventTarget~Event} [event] |
|
* The `keydown`, `tap`, or `click` event that caused this function to be |
|
* called. |
|
* |
|
* @listens tap |
|
* @listens click |
|
*/ |
|
; |
|
|
|
_proto.handleClick = function handleClick(event) { |
|
// select next rate option |
|
var currentRate = this.player().playbackRate(); |
|
var rates = this.playbackRates(); // this will select first one if the last one currently selected |
|
|
|
var newRate = rates[0]; |
|
|
|
for (var i = 0; i < rates.length; i++) { |
|
if (rates[i] > currentRate) { |
|
newRate = rates[i]; |
|
break; |
|
} |
|
} |
|
|
|
this.player().playbackRate(newRate); |
|
} |
|
/** |
|
* Get possible playback rates |
|
* |
|
* @return {Array} |
|
* All possible playback rates |
|
*/ |
|
; |
|
|
|
_proto.playbackRates = function playbackRates() { |
|
return this.options_.playbackRates || this.options_.playerOptions && this.options_.playerOptions.playbackRates; |
|
} |
|
/** |
|
* Get whether playback rates is supported by the tech |
|
* and an array of playback rates exists |
|
* |
|
* @return {boolean} |
|
* Whether changing playback rate is supported |
|
*/ |
|
; |
|
|
|
_proto.playbackRateSupported = function playbackRateSupported() { |
|
return this.player().tech_ && this.player().tech_.featuresPlaybackRate && this.playbackRates() && this.playbackRates().length > 0; |
|
} |
|
/** |
|
* Hide playback rate controls when they're no playback rate options to select |
|
* |
|
* @param {EventTarget~Event} [event] |
|
* The event that caused this function to run. |
|
* |
|
* @listens Player#loadstart |
|
*/ |
|
; |
|
|
|
_proto.updateVisibility = function updateVisibility(event) { |
|
if (this.playbackRateSupported()) { |
|
this.removeClass('vjs-hidden'); |
|
} else { |
|
this.addClass('vjs-hidden'); |
|
} |
|
} |
|
/** |
|
* Update button label when rate changed |
|
* |
|
* @param {EventTarget~Event} [event] |
|
* The event that caused this function to run. |
|
* |
|
* @listens Player#ratechange |
|
*/ |
|
; |
|
|
|
_proto.updateLabel = function updateLabel(event) { |
|
if (this.playbackRateSupported()) { |
|
this.labelEl_.innerHTML = this.player().playbackRate() + 'x'; |
|
} |
|
}; |
|
|
|
return PlaybackRateMenuButton; |
|
}(MenuButton); |
|
/** |
|
* The text that should display over the `FullscreenToggle`s controls. Added for localization. |
|
* |
|
* @type {string} |
|
* @private |
|
*/ |
|
|
|
|
|
PlaybackRateMenuButton.prototype.controlText_ = 'Playback Rate'; |
|
Component.registerComponent('PlaybackRateMenuButton', PlaybackRateMenuButton); |
|
|
|
/** |
|
* Just an empty spacer element that can be used as an append point for plugins, etc. |
|
* Also can be used to create space between elements when necessary. |
|
* |
|
* @extends Component |
|
*/ |
|
|
|
var Spacer = |
|
/*#__PURE__*/ |
|
function (_Component) { |
|
_inheritsLoose(Spacer, _Component); |
|
|
|
function Spacer() { |
|
return _Component.apply(this, arguments) || this; |
|
} |
|
|
|
var _proto = Spacer.prototype; |
|
|
|
/** |
|
* Builds the default DOM `className`. |
|
* |
|
* @return {string} |
|
* The DOM `className` for this object. |
|
*/ |
|
_proto.buildCSSClass = function buildCSSClass() { |
|
return "vjs-spacer " + _Component.prototype.buildCSSClass.call(this); |
|
} |
|
/** |
|
* Create the `Component`'s DOM element |
|
* |
|
* @return {Element} |
|
* The element that was created. |
|
*/ |
|
; |
|
|
|
_proto.createEl = function createEl() { |
|
return _Component.prototype.createEl.call(this, 'div', { |
|
className: this.buildCSSClass() |
|
}); |
|
}; |
|
|
|
return Spacer; |
|
}(Component); |
|
|
|
Component.registerComponent('Spacer', Spacer); |
|
|
|
/** |
|
* Spacer specifically meant to be used as an insertion point for new plugins, etc. |
|
* |
|
* @extends Spacer |
|
*/ |
|
|
|
var CustomControlSpacer = |
|
/*#__PURE__*/ |
|
function (_Spacer) { |
|
_inheritsLoose(CustomControlSpacer, _Spacer); |
|
|
|
function CustomControlSpacer() { |
|
return _Spacer.apply(this, arguments) || this; |
|
} |
|
|
|
var _proto = CustomControlSpacer.prototype; |
|
|
|
/** |
|
* Builds the default DOM `className`. |
|
* |
|
* @return {string} |
|
* The DOM `className` for this object. |
|
*/ |
|
_proto.buildCSSClass = function buildCSSClass() { |
|
return "vjs-custom-control-spacer " + _Spacer.prototype.buildCSSClass.call(this); |
|
} |
|
/** |
|
* Create the `Component`'s DOM element |
|
* |
|
* @return {Element} |
|
* The element that was created. |
|
*/ |
|
; |
|
|
|
_proto.createEl = function createEl() { |
|
var el = _Spacer.prototype.createEl.call(this, { |
|
className: this.buildCSSClass() |
|
}); // No-flex/table-cell mode requires there be some content |
|
// in the cell to fill the remaining space of the table. |
|
|
|
|
|
el.innerHTML = "\xA0"; |
|
return el; |
|
}; |
|
|
|
return CustomControlSpacer; |
|
}(Spacer); |
|
|
|
Component.registerComponent('CustomControlSpacer', CustomControlSpacer); |
|
|
|
/** |
|
* Container of main controls. |
|
* |
|
* @extends Component |
|
*/ |
|
|
|
var ControlBar = |
|
/*#__PURE__*/ |
|
function (_Component) { |
|
_inheritsLoose(ControlBar, _Component); |
|
|
|
function ControlBar() { |
|
return _Component.apply(this, arguments) || this; |
|
} |
|
|
|
var _proto = ControlBar.prototype; |
|
|
|
/** |
|
* Create the `Component`'s DOM element |
|
* |
|
* @return {Element} |
|
* The element that was created. |
|
*/ |
|
_proto.createEl = function createEl() { |
|
return _Component.prototype.createEl.call(this, 'div', { |
|
className: 'vjs-control-bar', |
|
dir: 'ltr' |
|
}); |
|
}; |
|
|
|
return ControlBar; |
|
}(Component); |
|
/** |
|
* Default options for `ControlBar` |
|
* |
|
* @type {Object} |
|
* @private |
|
*/ |
|
|
|
|
|
ControlBar.prototype.options_ = { |
|
children: ['playToggle', 'volumePanel', 'currentTimeDisplay', 'timeDivider', 'durationDisplay', 'progressControl', 'liveDisplay', 'seekToLive', 'remainingTimeDisplay', 'customControlSpacer', 'playbackRateMenuButton', 'chaptersButton', 'descriptionsButton', 'subsCapsButton', 'audioTrackButton', 'fullscreenToggle'] |
|
}; |
|
Component.registerComponent('ControlBar', ControlBar); |
|
|
|
/** |
|
* A display that indicates an error has occurred. This means that the video |
|
* is unplayable. |
|
* |
|
* @extends ModalDialog |
|
*/ |
|
|
|
var ErrorDisplay = |
|
/*#__PURE__*/ |
|
function (_ModalDialog) { |
|
_inheritsLoose(ErrorDisplay, _ModalDialog); |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
*/ |
|
function ErrorDisplay(player, options) { |
|
var _this; |
|
|
|
_this = _ModalDialog.call(this, player, options) || this; |
|
|
|
_this.on(player, 'error', _this.open); |
|
|
|
return _this; |
|
} |
|
/** |
|
* Builds the default DOM `className`. |
|
* |
|
* @return {string} |
|
* The DOM `className` for this object. |
|
* |
|
* @deprecated Since version 5. |
|
*/ |
|
|
|
|
|
var _proto = ErrorDisplay.prototype; |
|
|
|
_proto.buildCSSClass = function buildCSSClass() { |
|
return "vjs-error-display " + _ModalDialog.prototype.buildCSSClass.call(this); |
|
} |
|
/** |
|
* Gets the localized error message based on the `Player`s error. |
|
* |
|
* @return {string} |
|
* The `Player`s error message localized or an empty string. |
|
*/ |
|
; |
|
|
|
_proto.content = function content() { |
|
var error = this.player().error(); |
|
return error ? this.localize(error.message) : ''; |
|
}; |
|
|
|
return ErrorDisplay; |
|
}(ModalDialog); |
|
/** |
|
* The default options for an `ErrorDisplay`. |
|
* |
|
* @private |
|
*/ |
|
|
|
|
|
ErrorDisplay.prototype.options_ = mergeOptions(ModalDialog.prototype.options_, { |
|
pauseOnOpen: false, |
|
fillAlways: true, |
|
temporary: false, |
|
uncloseable: true |
|
}); |
|
Component.registerComponent('ErrorDisplay', ErrorDisplay); |
|
|
|
var LOCAL_STORAGE_KEY = 'vjs-text-track-settings'; |
|
var COLOR_BLACK = ['#000', 'Black']; |
|
var COLOR_BLUE = ['#00F', 'Blue']; |
|
var COLOR_CYAN = ['#0FF', 'Cyan']; |
|
var COLOR_GREEN = ['#0F0', 'Green']; |
|
var COLOR_MAGENTA = ['#F0F', 'Magenta']; |
|
var COLOR_RED = ['#F00', 'Red']; |
|
var COLOR_WHITE = ['#FFF', 'White']; |
|
var COLOR_YELLOW = ['#FF0', 'Yellow']; |
|
var OPACITY_OPAQUE = ['1', 'Opaque']; |
|
var OPACITY_SEMI = ['0.5', 'Semi-Transparent']; |
|
var OPACITY_TRANS = ['0', 'Transparent']; // Configuration for the various <select> elements in the DOM of this component. |
|
// |
|
// Possible keys include: |
|
// |
|
// `default`: |
|
// The default option index. Only needs to be provided if not zero. |
|
// `parser`: |
|
// A function which is used to parse the value from the selected option in |
|
// a customized way. |
|
// `selector`: |
|
// The selector used to find the associated <select> element. |
|
|
|
var selectConfigs = { |
|
backgroundColor: { |
|
selector: '.vjs-bg-color > select', |
|
id: 'captions-background-color-%s', |
|
label: 'Color', |
|
options: [COLOR_BLACK, COLOR_WHITE, COLOR_RED, COLOR_GREEN, COLOR_BLUE, COLOR_YELLOW, COLOR_MAGENTA, COLOR_CYAN] |
|
}, |
|
backgroundOpacity: { |
|
selector: '.vjs-bg-opacity > select', |
|
id: 'captions-background-opacity-%s', |
|
label: 'Transparency', |
|
options: [OPACITY_OPAQUE, OPACITY_SEMI, OPACITY_TRANS] |
|
}, |
|
color: { |
|
selector: '.vjs-fg-color > select', |
|
id: 'captions-foreground-color-%s', |
|
label: 'Color', |
|
options: [COLOR_WHITE, COLOR_BLACK, COLOR_RED, COLOR_GREEN, COLOR_BLUE, COLOR_YELLOW, COLOR_MAGENTA, COLOR_CYAN] |
|
}, |
|
edgeStyle: { |
|
selector: '.vjs-edge-style > select', |
|
id: '%s', |
|
label: 'Text Edge Style', |
|
options: [['none', 'None'], ['raised', 'Raised'], ['depressed', 'Depressed'], ['uniform', 'Uniform'], ['dropshadow', 'Dropshadow']] |
|
}, |
|
fontFamily: { |
|
selector: '.vjs-font-family > select', |
|
id: 'captions-font-family-%s', |
|
label: 'Font Family', |
|
options: [['proportionalSansSerif', 'Proportional Sans-Serif'], ['monospaceSansSerif', 'Monospace Sans-Serif'], ['proportionalSerif', 'Proportional Serif'], ['monospaceSerif', 'Monospace Serif'], ['casual', 'Casual'], ['script', 'Script'], ['small-caps', 'Small Caps']] |
|
}, |
|
fontPercent: { |
|
selector: '.vjs-font-percent > select', |
|
id: 'captions-font-size-%s', |
|
label: 'Font Size', |
|
options: [['0.50', '50%'], ['0.75', '75%'], ['1.00', '100%'], ['1.25', '125%'], ['1.50', '150%'], ['1.75', '175%'], ['2.00', '200%'], ['3.00', '300%'], ['4.00', '400%']], |
|
default: 2, |
|
parser: function parser(v) { |
|
return v === '1.00' ? null : Number(v); |
|
} |
|
}, |
|
textOpacity: { |
|
selector: '.vjs-text-opacity > select', |
|
id: 'captions-foreground-opacity-%s', |
|
label: 'Transparency', |
|
options: [OPACITY_OPAQUE, OPACITY_SEMI] |
|
}, |
|
// Options for this object are defined below. |
|
windowColor: { |
|
selector: '.vjs-window-color > select', |
|
id: 'captions-window-color-%s', |
|
label: 'Color' |
|
}, |
|
// Options for this object are defined below. |
|
windowOpacity: { |
|
selector: '.vjs-window-opacity > select', |
|
id: 'captions-window-opacity-%s', |
|
label: 'Transparency', |
|
options: [OPACITY_TRANS, OPACITY_SEMI, OPACITY_OPAQUE] |
|
} |
|
}; |
|
selectConfigs.windowColor.options = selectConfigs.backgroundColor.options; |
|
/** |
|
* Get the actual value of an option. |
|
* |
|
* @param {string} value |
|
* The value to get |
|
* |
|
* @param {Function} [parser] |
|
* Optional function to adjust the value. |
|
* |
|
* @return {Mixed} |
|
* - Will be `undefined` if no value exists |
|
* - Will be `undefined` if the given value is "none". |
|
* - Will be the actual value otherwise. |
|
* |
|
* @private |
|
*/ |
|
|
|
function parseOptionValue(value, parser) { |
|
if (parser) { |
|
value = parser(value); |
|
} |
|
|
|
if (value && value !== 'none') { |
|
return value; |
|
} |
|
} |
|
/** |
|
* Gets the value of the selected <option> element within a <select> element. |
|
* |
|
* @param {Element} el |
|
* the element to look in |
|
* |
|
* @param {Function} [parser] |
|
* Optional function to adjust the value. |
|
* |
|
* @return {Mixed} |
|
* - Will be `undefined` if no value exists |
|
* - Will be `undefined` if the given value is "none". |
|
* - Will be the actual value otherwise. |
|
* |
|
* @private |
|
*/ |
|
|
|
|
|
function getSelectedOptionValue(el, parser) { |
|
var value = el.options[el.options.selectedIndex].value; |
|
return parseOptionValue(value, parser); |
|
} |
|
/** |
|
* Sets the selected <option> element within a <select> element based on a |
|
* given value. |
|
* |
|
* @param {Element} el |
|
* The element to look in. |
|
* |
|
* @param {string} value |
|
* the property to look on. |
|
* |
|
* @param {Function} [parser] |
|
* Optional function to adjust the value before comparing. |
|
* |
|
* @private |
|
*/ |
|
|
|
|
|
function setSelectedOption(el, value, parser) { |
|
if (!value) { |
|
return; |
|
} |
|
|
|
for (var i = 0; i < el.options.length; i++) { |
|
if (parseOptionValue(el.options[i].value, parser) === value) { |
|
el.selectedIndex = i; |
|
break; |
|
} |
|
} |
|
} |
|
/** |
|
* Manipulate Text Tracks settings. |
|
* |
|
* @extends ModalDialog |
|
*/ |
|
|
|
|
|
var TextTrackSettings = |
|
/*#__PURE__*/ |
|
function (_ModalDialog) { |
|
_inheritsLoose(TextTrackSettings, _ModalDialog); |
|
|
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* @param {Player} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
*/ |
|
function TextTrackSettings(player, options) { |
|
var _this; |
|
|
|
options.temporary = false; |
|
_this = _ModalDialog.call(this, player, options) || this; |
|
_this.updateDisplay = bind(_assertThisInitialized(_assertThisInitialized(_this)), _this.updateDisplay); // fill the modal and pretend we have opened it |
|
|
|
_this.fill(); |
|
|
|
_this.hasBeenOpened_ = _this.hasBeenFilled_ = true; |
|
_this.endDialog = createEl('p', { |
|
className: 'vjs-control-text', |
|
textContent: _this.localize('End of dialog window.') |
|
}); |
|
|
|
_this.el().appendChild(_this.endDialog); |
|
|
|
_this.setDefaults(); // Grab `persistTextTrackSettings` from the player options if not passed in child options |
|
|
|
|
|
if (options.persistTextTrackSettings === undefined) { |
|
_this.options_.persistTextTrackSettings = _this.options_.playerOptions.persistTextTrackSettings; |
|
} |
|
|
|
_this.on(_this.$('.vjs-done-button'), 'click', function () { |
|
_this.saveSettings(); |
|
|
|
_this.close(); |
|
}); |
|
|
|
_this.on(_this.$('.vjs-default-button'), 'click', function () { |
|
_this.setDefaults(); |
|
|
|
_this.updateDisplay(); |
|
}); |
|
|
|
each(selectConfigs, function (config) { |
|
_this.on(_this.$(config.selector), 'change', _this.updateDisplay); |
|
}); |
|
|
|
if (_this.options_.persistTextTrackSettings) { |
|
_this.restoreSettings(); |
|
} |
|
|
|
return _this; |
|
} |
|
|
|
var _proto = TextTrackSettings.prototype; |
|
|
|
_proto.dispose = function dispose() { |
|
this.endDialog = null; |
|
|
|
_ModalDialog.prototype.dispose.call(this); |
|
} |
|
/** |
|
* Create a <select> element with configured options. |
|
* |
|
* @param {string} key |
|
* Configuration key to use during creation. |
|
* |
|
* @return {string} |
|
* An HTML string. |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.createElSelect_ = function createElSelect_(key, legendId, type) { |
|
var _this2 = this; |
|
|
|
if (legendId === void 0) { |
|
legendId = ''; |
|
} |
|
|
|
if (type === void 0) { |
|
type = 'label'; |
|
} |
|
|
|
var config = selectConfigs[key]; |
|
var id = config.id.replace('%s', this.id_); |
|
var selectLabelledbyIds = [legendId, id].join(' ').trim(); |
|
return ["<" + type + " id=\"" + id + "\" class=\"" + (type === 'label' ? 'vjs-label' : '') + "\">", this.localize(config.label), "</" + type + ">", "<select aria-labelledby=\"" + selectLabelledbyIds + "\">"].concat(config.options.map(function (o) { |
|
var optionId = id + '-' + o[1].replace(/\W+/g, ''); |
|
return ["<option id=\"" + optionId + "\" value=\"" + o[0] + "\" ", "aria-labelledby=\"" + selectLabelledbyIds + " " + optionId + "\">", _this2.localize(o[1]), '</option>'].join(''); |
|
})).concat('</select>').join(''); |
|
} |
|
/** |
|
* Create foreground color element for the component |
|
* |
|
* @return {string} |
|
* An HTML string. |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.createElFgColor_ = function createElFgColor_() { |
|
var legendId = "captions-text-legend-" + this.id_; |
|
return ['<fieldset class="vjs-fg-color vjs-track-setting">', "<legend id=\"" + legendId + "\">", this.localize('Text'), '</legend>', this.createElSelect_('color', legendId), '<span class="vjs-text-opacity vjs-opacity">', this.createElSelect_('textOpacity', legendId), '</span>', '</fieldset>'].join(''); |
|
} |
|
/** |
|
* Create background color element for the component |
|
* |
|
* @return {string} |
|
* An HTML string. |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.createElBgColor_ = function createElBgColor_() { |
|
var legendId = "captions-background-" + this.id_; |
|
return ['<fieldset class="vjs-bg-color vjs-track-setting">', "<legend id=\"" + legendId + "\">", this.localize('Background'), '</legend>', this.createElSelect_('backgroundColor', legendId), '<span class="vjs-bg-opacity vjs-opacity">', this.createElSelect_('backgroundOpacity', legendId), '</span>', '</fieldset>'].join(''); |
|
} |
|
/** |
|
* Create window color element for the component |
|
* |
|
* @return {string} |
|
* An HTML string. |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.createElWinColor_ = function createElWinColor_() { |
|
var legendId = "captions-window-" + this.id_; |
|
return ['<fieldset class="vjs-window-color vjs-track-setting">', "<legend id=\"" + legendId + "\">", this.localize('Window'), '</legend>', this.createElSelect_('windowColor', legendId), '<span class="vjs-window-opacity vjs-opacity">', this.createElSelect_('windowOpacity', legendId), '</span>', '</fieldset>'].join(''); |
|
} |
|
/** |
|
* Create color elements for the component |
|
* |
|
* @return {Element} |
|
* The element that was created |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.createElColors_ = function createElColors_() { |
|
return createEl('div', { |
|
className: 'vjs-track-settings-colors', |
|
innerHTML: [this.createElFgColor_(), this.createElBgColor_(), this.createElWinColor_()].join('') |
|
}); |
|
} |
|
/** |
|
* Create font elements for the component |
|
* |
|
* @return {Element} |
|
* The element that was created. |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.createElFont_ = function createElFont_() { |
|
return createEl('div', { |
|
className: 'vjs-track-settings-font', |
|
innerHTML: ['<fieldset class="vjs-font-percent vjs-track-setting">', this.createElSelect_('fontPercent', '', 'legend'), '</fieldset>', '<fieldset class="vjs-edge-style vjs-track-setting">', this.createElSelect_('edgeStyle', '', 'legend'), '</fieldset>', '<fieldset class="vjs-font-family vjs-track-setting">', this.createElSelect_('fontFamily', '', 'legend'), '</fieldset>'].join('') |
|
}); |
|
} |
|
/** |
|
* Create controls for the component |
|
* |
|
* @return {Element} |
|
* The element that was created. |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.createElControls_ = function createElControls_() { |
|
var defaultsDescription = this.localize('restore all settings to the default values'); |
|
return createEl('div', { |
|
className: 'vjs-track-settings-controls', |
|
innerHTML: ["<button type=\"button\" class=\"vjs-default-button\" title=\"" + defaultsDescription + "\">", this.localize('Reset'), "<span class=\"vjs-control-text\"> " + defaultsDescription + "</span>", '</button>', "<button type=\"button\" class=\"vjs-done-button\">" + this.localize('Done') + "</button>"].join('') |
|
}); |
|
}; |
|
|
|
_proto.content = function content() { |
|
return [this.createElColors_(), this.createElFont_(), this.createElControls_()]; |
|
}; |
|
|
|
_proto.label = function label() { |
|
return this.localize('Caption Settings Dialog'); |
|
}; |
|
|
|
_proto.description = function description() { |
|
return this.localize('Beginning of dialog window. Escape will cancel and close the window.'); |
|
}; |
|
|
|
_proto.buildCSSClass = function buildCSSClass() { |
|
return _ModalDialog.prototype.buildCSSClass.call(this) + ' vjs-text-track-settings'; |
|
} |
|
/** |
|
* Gets an object of text track settings (or null). |
|
* |
|
* @return {Object} |
|
* An object with config values parsed from the DOM or localStorage. |
|
*/ |
|
; |
|
|
|
_proto.getValues = function getValues() { |
|
var _this3 = this; |
|
|
|
return reduce(selectConfigs, function (accum, config, key) { |
|
var value = getSelectedOptionValue(_this3.$(config.selector), config.parser); |
|
|
|
if (value !== undefined) { |
|
accum[key] = value; |
|
} |
|
|
|
return accum; |
|
}, {}); |
|
} |
|
/** |
|
* Sets text track settings from an object of values. |
|
* |
|
* @param {Object} values |
|
* An object with config values parsed from the DOM or localStorage. |
|
*/ |
|
; |
|
|
|
_proto.setValues = function setValues(values) { |
|
var _this4 = this; |
|
|
|
each(selectConfigs, function (config, key) { |
|
setSelectedOption(_this4.$(config.selector), values[key], config.parser); |
|
}); |
|
} |
|
/** |
|
* Sets all `<select>` elements to their default values. |
|
*/ |
|
; |
|
|
|
_proto.setDefaults = function setDefaults() { |
|
var _this5 = this; |
|
|
|
each(selectConfigs, function (config) { |
|
var index = config.hasOwnProperty('default') ? config.default : 0; |
|
_this5.$(config.selector).selectedIndex = index; |
|
}); |
|
} |
|
/** |
|
* Restore texttrack settings from localStorage |
|
*/ |
|
; |
|
|
|
_proto.restoreSettings = function restoreSettings() { |
|
var values; |
|
|
|
try { |
|
values = JSON.parse(window$1.localStorage.getItem(LOCAL_STORAGE_KEY)); |
|
} catch (err) { |
|
log.warn(err); |
|
} |
|
|
|
if (values) { |
|
this.setValues(values); |
|
} |
|
} |
|
/** |
|
* Save text track settings to localStorage |
|
*/ |
|
; |
|
|
|
_proto.saveSettings = function saveSettings() { |
|
if (!this.options_.persistTextTrackSettings) { |
|
return; |
|
} |
|
|
|
var values = this.getValues(); |
|
|
|
try { |
|
if (Object.keys(values).length) { |
|
window$1.localStorage.setItem(LOCAL_STORAGE_KEY, JSON.stringify(values)); |
|
} else { |
|
window$1.localStorage.removeItem(LOCAL_STORAGE_KEY); |
|
} |
|
} catch (err) { |
|
log.warn(err); |
|
} |
|
} |
|
/** |
|
* Update display of text track settings |
|
*/ |
|
; |
|
|
|
_proto.updateDisplay = function updateDisplay() { |
|
var ttDisplay = this.player_.getChild('textTrackDisplay'); |
|
|
|
if (ttDisplay) { |
|
ttDisplay.updateDisplay(); |
|
} |
|
} |
|
/** |
|
* conditionally blur the element and refocus the captions button |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.conditionalBlur_ = function conditionalBlur_() { |
|
this.previouslyActiveEl_ = null; |
|
var cb = this.player_.controlBar; |
|
var subsCapsBtn = cb && cb.subsCapsButton; |
|
var ccBtn = cb && cb.captionsButton; |
|
|
|
if (subsCapsBtn) { |
|
subsCapsBtn.focus(); |
|
} else if (ccBtn) { |
|
ccBtn.focus(); |
|
} |
|
}; |
|
|
|
return TextTrackSettings; |
|
}(ModalDialog); |
|
|
|
Component.registerComponent('TextTrackSettings', TextTrackSettings); |
|
|
|
/** |
|
* A Resize Manager. It is in charge of triggering `playerresize` on the player in the right conditions. |
|
* |
|
* It'll either create an iframe and use a debounced resize handler on it or use the new {@link https://wicg.github.io/ResizeObserver/|ResizeObserver}. |
|
* |
|
* If the ResizeObserver is available natively, it will be used. A polyfill can be passed in as an option. |
|
* If a `playerresize` event is not needed, the ResizeManager component can be removed from the player, see the example below. |
|
* @example <caption>How to disable the resize manager</caption> |
|
* const player = videojs('#vid', { |
|
* resizeManager: false |
|
* }); |
|
* |
|
* @see {@link https://wicg.github.io/ResizeObserver/|ResizeObserver specification} |
|
* |
|
* @extends Component |
|
*/ |
|
|
|
var ResizeManager = |
|
/*#__PURE__*/ |
|
function (_Component) { |
|
_inheritsLoose(ResizeManager, _Component); |
|
|
|
/** |
|
* Create the ResizeManager. |
|
* |
|
* @param {Object} player |
|
* The `Player` that this class should be attached to. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of ResizeManager options. |
|
* |
|
* @param {Object} [options.ResizeObserver] |
|
* A polyfill for ResizeObserver can be passed in here. |
|
* If this is set to null it will ignore the native ResizeObserver and fall back to the iframe fallback. |
|
*/ |
|
function ResizeManager(player, options) { |
|
var _this; |
|
|
|
var RESIZE_OBSERVER_AVAILABLE = options.ResizeObserver || window$1.ResizeObserver; // if `null` was passed, we want to disable the ResizeObserver |
|
|
|
if (options.ResizeObserver === null) { |
|
RESIZE_OBSERVER_AVAILABLE = false; |
|
} // Only create an element when ResizeObserver isn't available |
|
|
|
|
|
var options_ = mergeOptions({ |
|
createEl: !RESIZE_OBSERVER_AVAILABLE, |
|
reportTouchActivity: false |
|
}, options); |
|
_this = _Component.call(this, player, options_) || this; |
|
_this.ResizeObserver = options.ResizeObserver || window$1.ResizeObserver; |
|
_this.loadListener_ = null; |
|
_this.resizeObserver_ = null; |
|
_this.debouncedHandler_ = debounce(function () { |
|
_this.resizeHandler(); |
|
}, 100, false, _assertThisInitialized(_assertThisInitialized(_this))); |
|
|
|
if (RESIZE_OBSERVER_AVAILABLE) { |
|
_this.resizeObserver_ = new _this.ResizeObserver(_this.debouncedHandler_); |
|
|
|
_this.resizeObserver_.observe(player.el()); |
|
} else { |
|
_this.loadListener_ = function () { |
|
if (!_this.el_ || !_this.el_.contentWindow) { |
|
return; |
|
} |
|
|
|
var debouncedHandler_ = _this.debouncedHandler_; |
|
|
|
var unloadListener_ = _this.unloadListener_ = function () { |
|
off(this, 'resize', debouncedHandler_); |
|
off(this, 'unload', unloadListener_); |
|
unloadListener_ = null; |
|
}; // safari and edge can unload the iframe before resizemanager dispose |
|
// we have to dispose of event handlers correctly before that happens |
|
|
|
|
|
on(_this.el_.contentWindow, 'unload', unloadListener_); |
|
on(_this.el_.contentWindow, 'resize', debouncedHandler_); |
|
}; |
|
|
|
_this.one('load', _this.loadListener_); |
|
} |
|
|
|
return _this; |
|
} |
|
|
|
var _proto = ResizeManager.prototype; |
|
|
|
_proto.createEl = function createEl() { |
|
return _Component.prototype.createEl.call(this, 'iframe', { |
|
className: 'vjs-resize-manager', |
|
tabIndex: -1 |
|
}, { |
|
'aria-hidden': 'true' |
|
}); |
|
} |
|
/** |
|
* Called when a resize is triggered on the iframe or a resize is observed via the ResizeObserver |
|
* |
|
* @fires Player#playerresize |
|
*/ |
|
; |
|
|
|
_proto.resizeHandler = function resizeHandler() { |
|
/** |
|
* Called when the player size has changed |
|
* |
|
* @event Player#playerresize |
|
* @type {EventTarget~Event} |
|
*/ |
|
// make sure player is still around to trigger |
|
// prevents this from causing an error after dispose |
|
if (!this.player_ || !this.player_.trigger) { |
|
return; |
|
} |
|
|
|
this.player_.trigger('playerresize'); |
|
}; |
|
|
|
_proto.dispose = function dispose() { |
|
if (this.debouncedHandler_) { |
|
this.debouncedHandler_.cancel(); |
|
} |
|
|
|
if (this.resizeObserver_) { |
|
if (this.player_.el()) { |
|
this.resizeObserver_.unobserve(this.player_.el()); |
|
} |
|
|
|
this.resizeObserver_.disconnect(); |
|
} |
|
|
|
if (this.loadListener_) { |
|
this.off('load', this.loadListener_); |
|
} |
|
|
|
if (this.el_ && this.el_.contentWindow && this.unloadListener_) { |
|
this.unloadListener_.call(this.el_.contentWindow); |
|
} |
|
|
|
this.ResizeObserver = null; |
|
this.resizeObserver = null; |
|
this.debouncedHandler_ = null; |
|
this.loadListener_ = null; |
|
|
|
_Component.prototype.dispose.call(this); |
|
}; |
|
|
|
return ResizeManager; |
|
}(Component); |
|
|
|
Component.registerComponent('ResizeManager', ResizeManager); |
|
|
|
/* track when we are at the live edge, and other helpers for live playback */ |
|
|
|
var LiveTracker = |
|
/*#__PURE__*/ |
|
function (_Component) { |
|
_inheritsLoose(LiveTracker, _Component); |
|
|
|
function LiveTracker(player, options) { |
|
var _this; |
|
|
|
// LiveTracker does not need an element |
|
var options_ = mergeOptions({ |
|
createEl: false |
|
}, options); |
|
_this = _Component.call(this, player, options_) || this; |
|
|
|
_this.reset_(); |
|
|
|
_this.on(_this.player_, 'durationchange', _this.handleDurationchange); // we don't need to track live playback if the document is hidden, |
|
// also, tracking when the document is hidden can |
|
// cause the CPU to spike and eventually crash the page on IE11. |
|
|
|
|
|
if (IE_VERSION && 'hidden' in document && 'visibilityState' in document) { |
|
_this.on(document, 'visibilitychange', _this.handleVisibilityChange); |
|
} |
|
|
|
return _this; |
|
} |
|
|
|
var _proto = LiveTracker.prototype; |
|
|
|
_proto.handleVisibilityChange = function handleVisibilityChange() { |
|
if (this.player_.duration() !== Infinity) { |
|
return; |
|
} |
|
|
|
if (document.hidden) { |
|
this.stopTracking(); |
|
} else { |
|
this.startTracking(); |
|
} |
|
}; |
|
|
|
_proto.isBehind_ = function isBehind_() { |
|
// don't report that we are behind until a timeupdate has been seen |
|
if (!this.timeupdateSeen_) { |
|
return false; |
|
} |
|
|
|
var liveCurrentTime = this.liveCurrentTime(); |
|
var currentTime = this.player_.currentTime(); |
|
var seekableIncrement = this.seekableIncrement_; // the live edge window is the amount of seconds away from live |
|
// that a player can be, but still be considered live. |
|
// we add 0.07 because the live tracking happens every 30ms |
|
// and we want some wiggle room for short segment live playback |
|
|
|
var liveEdgeWindow = seekableIncrement * 2 + 0.07; // on Android liveCurrentTime can bee Infinity, because seekableEnd |
|
// can be Infinity, so we handle that case. |
|
|
|
return liveCurrentTime !== Infinity && liveCurrentTime - liveEdgeWindow >= currentTime; |
|
} // all the functionality for tracking when seek end changes |
|
// and for tracking how far past seek end we should be |
|
; |
|
|
|
_proto.trackLive_ = function trackLive_() { |
|
this.pastSeekEnd_ = this.pastSeekEnd_; |
|
var seekable = this.player_.seekable(); // skip undefined seekable |
|
|
|
if (!seekable || !seekable.length) { |
|
return; |
|
} |
|
|
|
var newSeekEnd = this.seekableEnd(); // we can only tell if we are behind live, when seekable changes |
|
// once we detect that seekable has changed we check the new seek |
|
// end against current time, with a fudge value of half a second. |
|
|
|
if (newSeekEnd !== this.lastSeekEnd_) { |
|
if (this.lastSeekEnd_) { |
|
this.seekableIncrement_ = Math.abs(newSeekEnd - this.lastSeekEnd_); |
|
} |
|
|
|
this.pastSeekEnd_ = 0; |
|
this.lastSeekEnd_ = newSeekEnd; |
|
this.trigger('seekableendchange'); |
|
} |
|
|
|
this.pastSeekEnd_ = this.pastSeekEnd() + 0.03; |
|
|
|
if (this.isBehind_() !== this.behindLiveEdge()) { |
|
this.behindLiveEdge_ = this.isBehind_(); |
|
this.trigger('liveedgechange'); |
|
} |
|
} |
|
/** |
|
* handle a durationchange event on the player |
|
* and start/stop tracking accordingly. |
|
*/ |
|
; |
|
|
|
_proto.handleDurationchange = function handleDurationchange() { |
|
if (this.player_.duration() === Infinity) { |
|
this.startTracking(); |
|
} else { |
|
this.stopTracking(); |
|
} |
|
} |
|
/** |
|
* start tracking live playback |
|
*/ |
|
; |
|
|
|
_proto.startTracking = function startTracking() { |
|
var _this2 = this; |
|
|
|
if (this.isTracking()) { |
|
return; |
|
} |
|
|
|
this.trackingInterval_ = this.setInterval(this.trackLive_, 30); |
|
this.trackLive_(); |
|
this.on(this.player_, 'play', this.trackLive_); |
|
this.on(this.player_, 'pause', this.trackLive_); |
|
this.one(this.player_, 'play', this.handlePlay); // this is to prevent showing that we are not live |
|
// before a video starts to play |
|
|
|
if (!this.timeupdateSeen_) { |
|
this.handleTimeupdate = function () { |
|
_this2.timeupdateSeen_ = true; |
|
_this2.handleTimeupdate = null; |
|
}; |
|
|
|
this.one(this.player_, 'timeupdate', this.handleTimeupdate); |
|
} |
|
}; |
|
|
|
_proto.handlePlay = function handlePlay() { |
|
this.one(this.player_, 'timeupdate', this.seekToLiveEdge); |
|
} |
|
/** |
|
* Stop tracking, and set all internal variables to |
|
* their initial value. |
|
*/ |
|
; |
|
|
|
_proto.reset_ = function reset_() { |
|
this.pastSeekEnd_ = 0; |
|
this.lastSeekEnd_ = null; |
|
this.behindLiveEdge_ = null; |
|
this.timeupdateSeen_ = false; |
|
this.clearInterval(this.trackingInterval_); |
|
this.trackingInterval_ = null; |
|
this.seekableIncrement_ = 12; |
|
this.off(this.player_, 'play', this.trackLive_); |
|
this.off(this.player_, 'pause', this.trackLive_); |
|
this.off(this.player_, 'play', this.handlePlay); |
|
this.off(this.player_, 'timeupdate', this.seekToLiveEdge); |
|
|
|
if (this.handleTimeupdate) { |
|
this.off(this.player_, 'timeupdate', this.handleTimeupdate); |
|
this.handleTimeupdate = null; |
|
} |
|
} |
|
/** |
|
* stop tracking live playback |
|
*/ |
|
; |
|
|
|
_proto.stopTracking = function stopTracking() { |
|
if (!this.isTracking()) { |
|
return; |
|
} |
|
|
|
this.reset_(); |
|
} |
|
/** |
|
* A helper to get the player seekable end |
|
* so that we don't have to null check everywhere |
|
*/ |
|
; |
|
|
|
_proto.seekableEnd = function seekableEnd() { |
|
var seekable = this.player_.seekable(); |
|
var seekableEnds = []; |
|
var i = seekable ? seekable.length : 0; |
|
|
|
while (i--) { |
|
seekableEnds.push(seekable.end(i)); |
|
} // grab the furthest seekable end after sorting, or if there are none |
|
// default to Infinity |
|
|
|
|
|
return seekableEnds.length ? seekableEnds.sort()[seekableEnds.length - 1] : Infinity; |
|
} |
|
/** |
|
* A helper to get the player seekable start |
|
* so that we don't have to null check everywhere |
|
*/ |
|
; |
|
|
|
_proto.seekableStart = function seekableStart() { |
|
var seekable = this.player_.seekable(); |
|
var seekableStarts = []; |
|
var i = seekable ? seekable.length : 0; |
|
|
|
while (i--) { |
|
seekableStarts.push(seekable.start(i)); |
|
} // grab the first seekable start after sorting, or if there are none |
|
// default to 0 |
|
|
|
|
|
return seekableStarts.length ? seekableStarts.sort()[0] : 0; |
|
} |
|
/** |
|
* Get the live time window |
|
*/ |
|
; |
|
|
|
_proto.liveWindow = function liveWindow() { |
|
var liveCurrentTime = this.liveCurrentTime(); |
|
|
|
if (liveCurrentTime === Infinity) { |
|
return Infinity; |
|
} |
|
|
|
return liveCurrentTime - this.seekableStart(); |
|
} |
|
/** |
|
* Determines if the player is live, only checks if this component |
|
* is tracking live playback or not |
|
*/ |
|
; |
|
|
|
_proto.isLive = function isLive() { |
|
return this.isTracking(); |
|
} |
|
/** |
|
* Determines if currentTime is at the live edge and won't fall behind |
|
* on each seekableendchange |
|
*/ |
|
; |
|
|
|
_proto.atLiveEdge = function atLiveEdge() { |
|
return !this.behindLiveEdge(); |
|
} |
|
/** |
|
* get what we expect the live current time to be |
|
*/ |
|
; |
|
|
|
_proto.liveCurrentTime = function liveCurrentTime() { |
|
return this.pastSeekEnd() + this.seekableEnd(); |
|
} |
|
/** |
|
* Returns how far past seek end we expect current time to be |
|
*/ |
|
; |
|
|
|
_proto.pastSeekEnd = function pastSeekEnd() { |
|
return this.pastSeekEnd_; |
|
} |
|
/** |
|
* If we are currently behind the live edge, aka currentTime will be |
|
* behind on a seekableendchange |
|
*/ |
|
; |
|
|
|
_proto.behindLiveEdge = function behindLiveEdge() { |
|
return this.behindLiveEdge_; |
|
}; |
|
|
|
_proto.isTracking = function isTracking() { |
|
return typeof this.trackingInterval_ === 'number'; |
|
} |
|
/** |
|
* Seek to the live edge if we are behind the live edge |
|
*/ |
|
; |
|
|
|
_proto.seekToLiveEdge = function seekToLiveEdge() { |
|
if (this.atLiveEdge()) { |
|
return; |
|
} |
|
|
|
this.player_.currentTime(this.liveCurrentTime()); |
|
|
|
if (this.player_.paused()) { |
|
this.player_.play(); |
|
} |
|
}; |
|
|
|
_proto.dispose = function dispose() { |
|
this.stopTracking(); |
|
|
|
_Component.prototype.dispose.call(this); |
|
}; |
|
|
|
return LiveTracker; |
|
}(Component); |
|
|
|
Component.registerComponent('LiveTracker', LiveTracker); |
|
|
|
/** |
|
* This function is used to fire a sourceset when there is something |
|
* similar to `mediaEl.load()` being called. It will try to find the source via |
|
* the `src` attribute and then the `<source>` elements. It will then fire `sourceset` |
|
* with the source that was found or empty string if we cannot know. If it cannot |
|
* find a source then `sourceset` will not be fired. |
|
* |
|
* @param {Html5} tech |
|
* The tech object that sourceset was setup on |
|
* |
|
* @return {boolean} |
|
* returns false if the sourceset was not fired and true otherwise. |
|
*/ |
|
|
|
var sourcesetLoad = function sourcesetLoad(tech) { |
|
var el = tech.el(); // if `el.src` is set, that source will be loaded. |
|
|
|
if (el.hasAttribute('src')) { |
|
tech.triggerSourceset(el.src); |
|
return true; |
|
} |
|
/** |
|
* Since there isn't a src property on the media element, source elements will be used for |
|
* implementing the source selection algorithm. This happens asynchronously and |
|
* for most cases were there is more than one source we cannot tell what source will |
|
* be loaded, without re-implementing the source selection algorithm. At this time we are not |
|
* going to do that. There are three special cases that we do handle here though: |
|
* |
|
* 1. If there are no sources, do not fire `sourceset`. |
|
* 2. If there is only one `<source>` with a `src` property/attribute that is our `src` |
|
* 3. If there is more than one `<source>` but all of them have the same `src` url. |
|
* That will be our src. |
|
*/ |
|
|
|
|
|
var sources = tech.$$('source'); |
|
var srcUrls = []; |
|
var src = ''; // if there are no sources, do not fire sourceset |
|
|
|
if (!sources.length) { |
|
return false; |
|
} // only count valid/non-duplicate source elements |
|
|
|
|
|
for (var i = 0; i < sources.length; i++) { |
|
var url = sources[i].src; |
|
|
|
if (url && srcUrls.indexOf(url) === -1) { |
|
srcUrls.push(url); |
|
} |
|
} // there were no valid sources |
|
|
|
|
|
if (!srcUrls.length) { |
|
return false; |
|
} // there is only one valid source element url |
|
// use that |
|
|
|
|
|
if (srcUrls.length === 1) { |
|
src = srcUrls[0]; |
|
} |
|
|
|
tech.triggerSourceset(src); |
|
return true; |
|
}; |
|
/** |
|
* our implementation of an `innerHTML` descriptor for browsers |
|
* that do not have one. |
|
*/ |
|
|
|
|
|
var innerHTMLDescriptorPolyfill = Object.defineProperty({}, 'innerHTML', { |
|
get: function get() { |
|
return this.cloneNode(true).innerHTML; |
|
}, |
|
set: function set(v) { |
|
// make a dummy node to use innerHTML on |
|
var dummy = document.createElement(this.nodeName.toLowerCase()); // set innerHTML to the value provided |
|
|
|
dummy.innerHTML = v; // make a document fragment to hold the nodes from dummy |
|
|
|
var docFrag = document.createDocumentFragment(); // copy all of the nodes created by the innerHTML on dummy |
|
// to the document fragment |
|
|
|
while (dummy.childNodes.length) { |
|
docFrag.appendChild(dummy.childNodes[0]); |
|
} // remove content |
|
|
|
|
|
this.innerText = ''; // now we add all of that html in one by appending the |
|
// document fragment. This is how innerHTML does it. |
|
|
|
window$1.Element.prototype.appendChild.call(this, docFrag); // then return the result that innerHTML's setter would |
|
|
|
return this.innerHTML; |
|
} |
|
}); |
|
/** |
|
* Get a property descriptor given a list of priorities and the |
|
* property to get. |
|
*/ |
|
|
|
var getDescriptor = function getDescriptor(priority, prop) { |
|
var descriptor = {}; |
|
|
|
for (var i = 0; i < priority.length; i++) { |
|
descriptor = Object.getOwnPropertyDescriptor(priority[i], prop); |
|
|
|
if (descriptor && descriptor.set && descriptor.get) { |
|
break; |
|
} |
|
} |
|
|
|
descriptor.enumerable = true; |
|
descriptor.configurable = true; |
|
return descriptor; |
|
}; |
|
|
|
var getInnerHTMLDescriptor = function getInnerHTMLDescriptor(tech) { |
|
return getDescriptor([tech.el(), window$1.HTMLMediaElement.prototype, window$1.Element.prototype, innerHTMLDescriptorPolyfill], 'innerHTML'); |
|
}; |
|
/** |
|
* Patches browser internal functions so that we can tell synchronously |
|
* if a `<source>` was appended to the media element. For some reason this |
|
* causes a `sourceset` if the the media element is ready and has no source. |
|
* This happens when: |
|
* - The page has just loaded and the media element does not have a source. |
|
* - The media element was emptied of all sources, then `load()` was called. |
|
* |
|
* It does this by patching the following functions/properties when they are supported: |
|
* |
|
* - `append()` - can be used to add a `<source>` element to the media element |
|
* - `appendChild()` - can be used to add a `<source>` element to the media element |
|
* - `insertAdjacentHTML()` - can be used to add a `<source>` element to the media element |
|
* - `innerHTML` - can be used to add a `<source>` element to the media element |
|
* |
|
* @param {Html5} tech |
|
* The tech object that sourceset is being setup on. |
|
*/ |
|
|
|
|
|
var firstSourceWatch = function firstSourceWatch(tech) { |
|
var el = tech.el(); // make sure firstSourceWatch isn't setup twice. |
|
|
|
if (el.resetSourceWatch_) { |
|
return; |
|
} |
|
|
|
var old = {}; |
|
var innerDescriptor = getInnerHTMLDescriptor(tech); |
|
|
|
var appendWrapper = function appendWrapper(appendFn) { |
|
return function () { |
|
for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) { |
|
args[_key] = arguments[_key]; |
|
} |
|
|
|
var retval = appendFn.apply(el, args); |
|
sourcesetLoad(tech); |
|
return retval; |
|
}; |
|
}; |
|
|
|
['append', 'appendChild', 'insertAdjacentHTML'].forEach(function (k) { |
|
if (!el[k]) { |
|
return; |
|
} // store the old function |
|
|
|
|
|
old[k] = el[k]; // call the old function with a sourceset if a source |
|
// was loaded |
|
|
|
el[k] = appendWrapper(old[k]); |
|
}); |
|
Object.defineProperty(el, 'innerHTML', mergeOptions(innerDescriptor, { |
|
set: appendWrapper(innerDescriptor.set) |
|
})); |
|
|
|
el.resetSourceWatch_ = function () { |
|
el.resetSourceWatch_ = null; |
|
Object.keys(old).forEach(function (k) { |
|
el[k] = old[k]; |
|
}); |
|
Object.defineProperty(el, 'innerHTML', innerDescriptor); |
|
}; // on the first sourceset, we need to revert our changes |
|
|
|
|
|
tech.one('sourceset', el.resetSourceWatch_); |
|
}; |
|
/** |
|
* our implementation of a `src` descriptor for browsers |
|
* that do not have one. |
|
*/ |
|
|
|
|
|
var srcDescriptorPolyfill = Object.defineProperty({}, 'src', { |
|
get: function get() { |
|
if (this.hasAttribute('src')) { |
|
return getAbsoluteURL(window$1.Element.prototype.getAttribute.call(this, 'src')); |
|
} |
|
|
|
return ''; |
|
}, |
|
set: function set(v) { |
|
window$1.Element.prototype.setAttribute.call(this, 'src', v); |
|
return v; |
|
} |
|
}); |
|
|
|
var getSrcDescriptor = function getSrcDescriptor(tech) { |
|
return getDescriptor([tech.el(), window$1.HTMLMediaElement.prototype, srcDescriptorPolyfill], 'src'); |
|
}; |
|
/** |
|
* setup `sourceset` handling on the `Html5` tech. This function |
|
* patches the following element properties/functions: |
|
* |
|
* - `src` - to determine when `src` is set |
|
* - `setAttribute()` - to determine when `src` is set |
|
* - `load()` - this re-triggers the source selection algorithm, and can |
|
* cause a sourceset. |
|
* |
|
* If there is no source when we are adding `sourceset` support or during a `load()` |
|
* we also patch the functions listed in `firstSourceWatch`. |
|
* |
|
* @param {Html5} tech |
|
* The tech to patch |
|
*/ |
|
|
|
|
|
var setupSourceset = function setupSourceset(tech) { |
|
if (!tech.featuresSourceset) { |
|
return; |
|
} |
|
|
|
var el = tech.el(); // make sure sourceset isn't setup twice. |
|
|
|
if (el.resetSourceset_) { |
|
return; |
|
} |
|
|
|
var srcDescriptor = getSrcDescriptor(tech); |
|
var oldSetAttribute = el.setAttribute; |
|
var oldLoad = el.load; |
|
Object.defineProperty(el, 'src', mergeOptions(srcDescriptor, { |
|
set: function set(v) { |
|
var retval = srcDescriptor.set.call(el, v); // we use the getter here to get the actual value set on src |
|
|
|
tech.triggerSourceset(el.src); |
|
return retval; |
|
} |
|
})); |
|
|
|
el.setAttribute = function (n, v) { |
|
var retval = oldSetAttribute.call(el, n, v); |
|
|
|
if (/src/i.test(n)) { |
|
tech.triggerSourceset(el.src); |
|
} |
|
|
|
return retval; |
|
}; |
|
|
|
el.load = function () { |
|
var retval = oldLoad.call(el); // if load was called, but there was no source to fire |
|
// sourceset on. We have to watch for a source append |
|
// as that can trigger a `sourceset` when the media element |
|
// has no source |
|
|
|
if (!sourcesetLoad(tech)) { |
|
tech.triggerSourceset(''); |
|
firstSourceWatch(tech); |
|
} |
|
|
|
return retval; |
|
}; |
|
|
|
if (el.currentSrc) { |
|
tech.triggerSourceset(el.currentSrc); |
|
} else if (!sourcesetLoad(tech)) { |
|
firstSourceWatch(tech); |
|
} |
|
|
|
el.resetSourceset_ = function () { |
|
el.resetSourceset_ = null; |
|
el.load = oldLoad; |
|
el.setAttribute = oldSetAttribute; |
|
Object.defineProperty(el, 'src', srcDescriptor); |
|
|
|
if (el.resetSourceWatch_) { |
|
el.resetSourceWatch_(); |
|
} |
|
}; |
|
}; |
|
|
|
function _templateObject$1() { |
|
var data = _taggedTemplateLiteralLoose(["Text Tracks are being loaded from another origin but the crossorigin attribute isn't used.\n This may prevent text tracks from loading."]); |
|
|
|
_templateObject$1 = function _templateObject() { |
|
return data; |
|
}; |
|
|
|
return data; |
|
} |
|
/** |
|
* HTML5 Media Controller - Wrapper for HTML5 Media API |
|
* |
|
* @mixes Tech~SourceHandlerAdditions |
|
* @extends Tech |
|
*/ |
|
|
|
var Html5 = |
|
/*#__PURE__*/ |
|
function (_Tech) { |
|
_inheritsLoose(Html5, _Tech); |
|
|
|
/** |
|
* Create an instance of this Tech. |
|
* |
|
* @param {Object} [options] |
|
* The key/value store of player options. |
|
* |
|
* @param {Component~ReadyCallback} ready |
|
* Callback function to call when the `HTML5` Tech is ready. |
|
*/ |
|
function Html5(options, ready) { |
|
var _this; |
|
|
|
_this = _Tech.call(this, options, ready) || this; |
|
var source = options.source; |
|
var crossoriginTracks = false; // Set the source if one is provided |
|
// 1) Check if the source is new (if not, we want to keep the original so playback isn't interrupted) |
|
// 2) Check to see if the network state of the tag was failed at init, and if so, reset the source |
|
// anyway so the error gets fired. |
|
|
|
if (source && (_this.el_.currentSrc !== source.src || options.tag && options.tag.initNetworkState_ === 3)) { |
|
_this.setSource(source); |
|
} else { |
|
_this.handleLateInit_(_this.el_); |
|
} // setup sourceset after late sourceset/init |
|
|
|
|
|
if (options.enableSourceset) { |
|
_this.setupSourcesetHandling_(); |
|
} |
|
|
|
if (_this.el_.hasChildNodes()) { |
|
var nodes = _this.el_.childNodes; |
|
var nodesLength = nodes.length; |
|
var removeNodes = []; |
|
|
|
while (nodesLength--) { |
|
var node = nodes[nodesLength]; |
|
var nodeName = node.nodeName.toLowerCase(); |
|
|
|
if (nodeName === 'track') { |
|
if (!_this.featuresNativeTextTracks) { |
|
// Empty video tag tracks so the built-in player doesn't use them also. |
|
// This may not be fast enough to stop HTML5 browsers from reading the tags |
|
// so we'll need to turn off any default tracks if we're manually doing |
|
// captions and subtitles. videoElement.textTracks |
|
removeNodes.push(node); |
|
} else { |
|
// store HTMLTrackElement and TextTrack to remote list |
|
_this.remoteTextTrackEls().addTrackElement_(node); |
|
|
|
_this.remoteTextTracks().addTrack(node.track); |
|
|
|
_this.textTracks().addTrack(node.track); |
|
|
|
if (!crossoriginTracks && !_this.el_.hasAttribute('crossorigin') && isCrossOrigin(node.src)) { |
|
crossoriginTracks = true; |
|
} |
|
} |
|
} |
|
} |
|
|
|
for (var i = 0; i < removeNodes.length; i++) { |
|
_this.el_.removeChild(removeNodes[i]); |
|
} |
|
} |
|
|
|
_this.proxyNativeTracks_(); |
|
|
|
if (_this.featuresNativeTextTracks && crossoriginTracks) { |
|
log.warn(tsml(_templateObject$1())); |
|
} // prevent iOS Safari from disabling metadata text tracks during native playback |
|
|
|
|
|
_this.restoreMetadataTracksInIOSNativePlayer_(); // Determine if native controls should be used |
|
// Our goal should be to get the custom controls on mobile solid everywhere |
|
// so we can remove this all together. Right now this will block custom |
|
// controls on touch enabled laptops like the Chrome Pixel |
|
|
|
|
|
if ((TOUCH_ENABLED || IS_IPHONE || IS_NATIVE_ANDROID) && options.nativeControlsForTouch === true) { |
|
_this.setControls(true); |
|
} // on iOS, we want to proxy `webkitbeginfullscreen` and `webkitendfullscreen` |
|
// into a `fullscreenchange` event |
|
|
|
|
|
_this.proxyWebkitFullscreen_(); |
|
|
|
_this.triggerReady(); |
|
|
|
return _this; |
|
} |
|
/** |
|
* Dispose of `HTML5` media element and remove all tracks. |
|
*/ |
|
|
|
|
|
var _proto = Html5.prototype; |
|
|
|
_proto.dispose = function dispose() { |
|
if (this.el_ && this.el_.resetSourceset_) { |
|
this.el_.resetSourceset_(); |
|
} |
|
|
|
Html5.disposeMediaElement(this.el_); |
|
this.options_ = null; // tech will handle clearing of the emulated track list |
|
|
|
_Tech.prototype.dispose.call(this); |
|
} |
|
/** |
|
* Modify the media element so that we can detect when |
|
* the source is changed. Fires `sourceset` just after the source has changed |
|
*/ |
|
; |
|
|
|
_proto.setupSourcesetHandling_ = function setupSourcesetHandling_() { |
|
setupSourceset(this); |
|
} |
|
/** |
|
* When a captions track is enabled in the iOS Safari native player, all other |
|
* tracks are disabled (including metadata tracks), which nulls all of their |
|
* associated cue points. This will restore metadata tracks to their pre-fullscreen |
|
* state in those cases so that cue points are not needlessly lost. |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.restoreMetadataTracksInIOSNativePlayer_ = function restoreMetadataTracksInIOSNativePlayer_() { |
|
var textTracks = this.textTracks(); |
|
var metadataTracksPreFullscreenState; // captures a snapshot of every metadata track's current state |
|
|
|
var takeMetadataTrackSnapshot = function takeMetadataTrackSnapshot() { |
|
metadataTracksPreFullscreenState = []; |
|
|
|
for (var i = 0; i < textTracks.length; i++) { |
|
var track = textTracks[i]; |
|
|
|
if (track.kind === 'metadata') { |
|
metadataTracksPreFullscreenState.push({ |
|
track: track, |
|
storedMode: track.mode |
|
}); |
|
} |
|
} |
|
}; // snapshot each metadata track's initial state, and update the snapshot |
|
// each time there is a track 'change' event |
|
|
|
|
|
takeMetadataTrackSnapshot(); |
|
textTracks.addEventListener('change', takeMetadataTrackSnapshot); |
|
this.on('dispose', function () { |
|
return textTracks.removeEventListener('change', takeMetadataTrackSnapshot); |
|
}); |
|
|
|
var restoreTrackMode = function restoreTrackMode() { |
|
for (var i = 0; i < metadataTracksPreFullscreenState.length; i++) { |
|
var storedTrack = metadataTracksPreFullscreenState[i]; |
|
|
|
if (storedTrack.track.mode === 'disabled' && storedTrack.track.mode !== storedTrack.storedMode) { |
|
storedTrack.track.mode = storedTrack.storedMode; |
|
} |
|
} // we only want this handler to be executed on the first 'change' event |
|
|
|
|
|
textTracks.removeEventListener('change', restoreTrackMode); |
|
}; // when we enter fullscreen playback, stop updating the snapshot and |
|
// restore all track modes to their pre-fullscreen state |
|
|
|
|
|
this.on('webkitbeginfullscreen', function () { |
|
textTracks.removeEventListener('change', takeMetadataTrackSnapshot); // remove the listener before adding it just in case it wasn't previously removed |
|
|
|
textTracks.removeEventListener('change', restoreTrackMode); |
|
textTracks.addEventListener('change', restoreTrackMode); |
|
}); // start updating the snapshot again after leaving fullscreen |
|
|
|
this.on('webkitendfullscreen', function () { |
|
// remove the listener before adding it just in case it wasn't previously removed |
|
textTracks.removeEventListener('change', takeMetadataTrackSnapshot); |
|
textTracks.addEventListener('change', takeMetadataTrackSnapshot); // remove the restoreTrackMode handler in case it wasn't triggered during fullscreen playback |
|
|
|
textTracks.removeEventListener('change', restoreTrackMode); |
|
}); |
|
} |
|
/** |
|
* Attempt to force override of tracks for the given type |
|
* |
|
* @param {string} type - Track type to override, possible values include 'Audio', |
|
* 'Video', and 'Text'. |
|
* @param {boolean} override - If set to true native audio/video will be overridden, |
|
* otherwise native audio/video will potentially be used. |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.overrideNative_ = function overrideNative_(type, override) { |
|
var _this2 = this; |
|
|
|
// If there is no behavioral change don't add/remove listeners |
|
if (override !== this["featuresNative" + type + "Tracks"]) { |
|
return; |
|
} |
|
|
|
var lowerCaseType = type.toLowerCase(); |
|
|
|
if (this[lowerCaseType + "TracksListeners_"]) { |
|
Object.keys(this[lowerCaseType + "TracksListeners_"]).forEach(function (eventName) { |
|
var elTracks = _this2.el()[lowerCaseType + "Tracks"]; |
|
|
|
elTracks.removeEventListener(eventName, _this2[lowerCaseType + "TracksListeners_"][eventName]); |
|
}); |
|
} |
|
|
|
this["featuresNative" + type + "Tracks"] = !override; |
|
this[lowerCaseType + "TracksListeners_"] = null; |
|
this.proxyNativeTracksForType_(lowerCaseType); |
|
} |
|
/** |
|
* Attempt to force override of native audio tracks. |
|
* |
|
* @param {boolean} override - If set to true native audio will be overridden, |
|
* otherwise native audio will potentially be used. |
|
*/ |
|
; |
|
|
|
_proto.overrideNativeAudioTracks = function overrideNativeAudioTracks(override) { |
|
this.overrideNative_('Audio', override); |
|
} |
|
/** |
|
* Attempt to force override of native video tracks. |
|
* |
|
* @param {boolean} override - If set to true native video will be overridden, |
|
* otherwise native video will potentially be used. |
|
*/ |
|
; |
|
|
|
_proto.overrideNativeVideoTracks = function overrideNativeVideoTracks(override) { |
|
this.overrideNative_('Video', override); |
|
} |
|
/** |
|
* Proxy native track list events for the given type to our track |
|
* lists if the browser we are playing in supports that type of track list. |
|
* |
|
* @param {string} name - Track type; values include 'audio', 'video', and 'text' |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.proxyNativeTracksForType_ = function proxyNativeTracksForType_(name) { |
|
var _this3 = this; |
|
|
|
var props = NORMAL[name]; |
|
var elTracks = this.el()[props.getterName]; |
|
var techTracks = this[props.getterName](); |
|
|
|
if (!this["featuresNative" + props.capitalName + "Tracks"] || !elTracks || !elTracks.addEventListener) { |
|
return; |
|
} |
|
|
|
var listeners = { |
|
change: function change(e) { |
|
techTracks.trigger({ |
|
type: 'change', |
|
target: techTracks, |
|
currentTarget: techTracks, |
|
srcElement: techTracks |
|
}); |
|
}, |
|
addtrack: function addtrack(e) { |
|
techTracks.addTrack(e.track); |
|
}, |
|
removetrack: function removetrack(e) { |
|
techTracks.removeTrack(e.track); |
|
} |
|
}; |
|
|
|
var removeOldTracks = function removeOldTracks() { |
|
var removeTracks = []; |
|
|
|
for (var i = 0; i < techTracks.length; i++) { |
|
var found = false; |
|
|
|
for (var j = 0; j < elTracks.length; j++) { |
|
if (elTracks[j] === techTracks[i]) { |
|
found = true; |
|
break; |
|
} |
|
} |
|
|
|
if (!found) { |
|
removeTracks.push(techTracks[i]); |
|
} |
|
} |
|
|
|
while (removeTracks.length) { |
|
techTracks.removeTrack(removeTracks.shift()); |
|
} |
|
}; |
|
|
|
this[props.getterName + 'Listeners_'] = listeners; |
|
Object.keys(listeners).forEach(function (eventName) { |
|
var listener = listeners[eventName]; |
|
elTracks.addEventListener(eventName, listener); |
|
|
|
_this3.on('dispose', function (e) { |
|
return elTracks.removeEventListener(eventName, listener); |
|
}); |
|
}); // Remove (native) tracks that are not used anymore |
|
|
|
this.on('loadstart', removeOldTracks); |
|
this.on('dispose', function (e) { |
|
return _this3.off('loadstart', removeOldTracks); |
|
}); |
|
} |
|
/** |
|
* Proxy all native track list events to our track lists if the browser we are playing |
|
* in supports that type of track list. |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.proxyNativeTracks_ = function proxyNativeTracks_() { |
|
var _this4 = this; |
|
|
|
NORMAL.names.forEach(function (name) { |
|
_this4.proxyNativeTracksForType_(name); |
|
}); |
|
} |
|
/** |
|
* Create the `Html5` Tech's DOM element. |
|
* |
|
* @return {Element} |
|
* The element that gets created. |
|
*/ |
|
; |
|
|
|
_proto.createEl = function createEl$$1() { |
|
var el = this.options_.tag; // Check if this browser supports moving the element into the box. |
|
// On the iPhone video will break if you move the element, |
|
// So we have to create a brand new element. |
|
// If we ingested the player div, we do not need to move the media element. |
|
|
|
if (!el || !(this.options_.playerElIngest || this.movingMediaElementInDOM)) { |
|
// If the original tag is still there, clone and remove it. |
|
if (el) { |
|
var clone = el.cloneNode(true); |
|
|
|
if (el.parentNode) { |
|
el.parentNode.insertBefore(clone, el); |
|
} |
|
|
|
Html5.disposeMediaElement(el); |
|
el = clone; |
|
} else { |
|
el = document.createElement('video'); // determine if native controls should be used |
|
|
|
var tagAttributes = this.options_.tag && getAttributes(this.options_.tag); |
|
var attributes = mergeOptions({}, tagAttributes); |
|
|
|
if (!TOUCH_ENABLED || this.options_.nativeControlsForTouch !== true) { |
|
delete attributes.controls; |
|
} |
|
|
|
setAttributes(el, assign(attributes, { |
|
id: this.options_.techId, |
|
class: 'vjs-tech' |
|
})); |
|
} |
|
|
|
el.playerId = this.options_.playerId; |
|
} |
|
|
|
if (typeof this.options_.preload !== 'undefined') { |
|
setAttribute(el, 'preload', this.options_.preload); |
|
} // Update specific tag settings, in case they were overridden |
|
// `autoplay` has to be *last* so that `muted` and `playsinline` are present |
|
// when iOS/Safari or other browsers attempt to autoplay. |
|
|
|
|
|
var settingsAttrs = ['loop', 'muted', 'playsinline', 'autoplay']; |
|
|
|
for (var i = 0; i < settingsAttrs.length; i++) { |
|
var attr = settingsAttrs[i]; |
|
var value = this.options_[attr]; |
|
|
|
if (typeof value !== 'undefined') { |
|
if (value) { |
|
setAttribute(el, attr, attr); |
|
} else { |
|
removeAttribute(el, attr); |
|
} |
|
|
|
el[attr] = value; |
|
} |
|
} |
|
|
|
return el; |
|
} |
|
/** |
|
* This will be triggered if the loadstart event has already fired, before videojs was |
|
* ready. Two known examples of when this can happen are: |
|
* 1. If we're loading the playback object after it has started loading |
|
* 2. The media is already playing the (often with autoplay on) then |
|
* |
|
* This function will fire another loadstart so that videojs can catchup. |
|
* |
|
* @fires Tech#loadstart |
|
* |
|
* @return {undefined} |
|
* returns nothing. |
|
*/ |
|
; |
|
|
|
_proto.handleLateInit_ = function handleLateInit_(el) { |
|
if (el.networkState === 0 || el.networkState === 3) { |
|
// The video element hasn't started loading the source yet |
|
// or didn't find a source |
|
return; |
|
} |
|
|
|
if (el.readyState === 0) { |
|
// NetworkState is set synchronously BUT loadstart is fired at the |
|
// end of the current stack, usually before setInterval(fn, 0). |
|
// So at this point we know loadstart may have already fired or is |
|
// about to fire, and either way the player hasn't seen it yet. |
|
// We don't want to fire loadstart prematurely here and cause a |
|
// double loadstart so we'll wait and see if it happens between now |
|
// and the next loop, and fire it if not. |
|
// HOWEVER, we also want to make sure it fires before loadedmetadata |
|
// which could also happen between now and the next loop, so we'll |
|
// watch for that also. |
|
var loadstartFired = false; |
|
|
|
var setLoadstartFired = function setLoadstartFired() { |
|
loadstartFired = true; |
|
}; |
|
|
|
this.on('loadstart', setLoadstartFired); |
|
|
|
var triggerLoadstart = function triggerLoadstart() { |
|
// We did miss the original loadstart. Make sure the player |
|
// sees loadstart before loadedmetadata |
|
if (!loadstartFired) { |
|
this.trigger('loadstart'); |
|
} |
|
}; |
|
|
|
this.on('loadedmetadata', triggerLoadstart); |
|
this.ready(function () { |
|
this.off('loadstart', setLoadstartFired); |
|
this.off('loadedmetadata', triggerLoadstart); |
|
|
|
if (!loadstartFired) { |
|
// We did miss the original native loadstart. Fire it now. |
|
this.trigger('loadstart'); |
|
} |
|
}); |
|
return; |
|
} // From here on we know that loadstart already fired and we missed it. |
|
// The other readyState events aren't as much of a problem if we double |
|
// them, so not going to go to as much trouble as loadstart to prevent |
|
// that unless we find reason to. |
|
|
|
|
|
var eventsToTrigger = ['loadstart']; // loadedmetadata: newly equal to HAVE_METADATA (1) or greater |
|
|
|
eventsToTrigger.push('loadedmetadata'); // loadeddata: newly increased to HAVE_CURRENT_DATA (2) or greater |
|
|
|
if (el.readyState >= 2) { |
|
eventsToTrigger.push('loadeddata'); |
|
} // canplay: newly increased to HAVE_FUTURE_DATA (3) or greater |
|
|
|
|
|
if (el.readyState >= 3) { |
|
eventsToTrigger.push('canplay'); |
|
} // canplaythrough: newly equal to HAVE_ENOUGH_DATA (4) |
|
|
|
|
|
if (el.readyState >= 4) { |
|
eventsToTrigger.push('canplaythrough'); |
|
} // We still need to give the player time to add event listeners |
|
|
|
|
|
this.ready(function () { |
|
eventsToTrigger.forEach(function (type) { |
|
this.trigger(type); |
|
}, this); |
|
}); |
|
} |
|
/** |
|
* Set current time for the `HTML5` tech. |
|
* |
|
* @param {number} seconds |
|
* Set the current time of the media to this. |
|
*/ |
|
; |
|
|
|
_proto.setCurrentTime = function setCurrentTime(seconds) { |
|
try { |
|
this.el_.currentTime = seconds; |
|
} catch (e) { |
|
log(e, 'Video is not ready. (Video.js)'); // this.warning(VideoJS.warnings.videoNotReady); |
|
} |
|
} |
|
/** |
|
* Get the current duration of the HTML5 media element. |
|
* |
|
* @return {number} |
|
* The duration of the media or 0 if there is no duration. |
|
*/ |
|
; |
|
|
|
_proto.duration = function duration() { |
|
var _this5 = this; |
|
|
|
// Android Chrome will report duration as Infinity for VOD HLS until after |
|
// playback has started, which triggers the live display erroneously. |
|
// Return NaN if playback has not started and trigger a durationupdate once |
|
// the duration can be reliably known. |
|
if (this.el_.duration === Infinity && IS_ANDROID && IS_CHROME && this.el_.currentTime === 0) { |
|
// Wait for the first `timeupdate` with currentTime > 0 - there may be |
|
// several with 0 |
|
var checkProgress = function checkProgress() { |
|
if (_this5.el_.currentTime > 0) { |
|
// Trigger durationchange for genuinely live video |
|
if (_this5.el_.duration === Infinity) { |
|
_this5.trigger('durationchange'); |
|
} |
|
|
|
_this5.off('timeupdate', checkProgress); |
|
} |
|
}; |
|
|
|
this.on('timeupdate', checkProgress); |
|
return NaN; |
|
} |
|
|
|
return this.el_.duration || NaN; |
|
} |
|
/** |
|
* Get the current width of the HTML5 media element. |
|
* |
|
* @return {number} |
|
* The width of the HTML5 media element. |
|
*/ |
|
; |
|
|
|
_proto.width = function width() { |
|
return this.el_.offsetWidth; |
|
} |
|
/** |
|
* Get the current height of the HTML5 media element. |
|
* |
|
* @return {number} |
|
* The height of the HTML5 media element. |
|
*/ |
|
; |
|
|
|
_proto.height = function height() { |
|
return this.el_.offsetHeight; |
|
} |
|
/** |
|
* Proxy iOS `webkitbeginfullscreen` and `webkitendfullscreen` into |
|
* `fullscreenchange` event. |
|
* |
|
* @private |
|
* @fires fullscreenchange |
|
* @listens webkitendfullscreen |
|
* @listens webkitbeginfullscreen |
|
* @listens webkitbeginfullscreen |
|
*/ |
|
; |
|
|
|
_proto.proxyWebkitFullscreen_ = function proxyWebkitFullscreen_() { |
|
var _this6 = this; |
|
|
|
if (!('webkitDisplayingFullscreen' in this.el_)) { |
|
return; |
|
} |
|
|
|
var endFn = function endFn() { |
|
this.trigger('fullscreenchange', { |
|
isFullscreen: false |
|
}); |
|
}; |
|
|
|
var beginFn = function beginFn() { |
|
if ('webkitPresentationMode' in this.el_ && this.el_.webkitPresentationMode !== 'picture-in-picture') { |
|
this.one('webkitendfullscreen', endFn); |
|
this.trigger('fullscreenchange', { |
|
isFullscreen: true |
|
}); |
|
} |
|
}; |
|
|
|
this.on('webkitbeginfullscreen', beginFn); |
|
this.on('dispose', function () { |
|
_this6.off('webkitbeginfullscreen', beginFn); |
|
|
|
_this6.off('webkitendfullscreen', endFn); |
|
}); |
|
} |
|
/** |
|
* Check if fullscreen is supported on the current playback device. |
|
* |
|
* @return {boolean} |
|
* - True if fullscreen is supported. |
|
* - False if fullscreen is not supported. |
|
*/ |
|
; |
|
|
|
_proto.supportsFullScreen = function supportsFullScreen() { |
|
if (typeof this.el_.webkitEnterFullScreen === 'function') { |
|
var userAgent = window$1.navigator && window$1.navigator.userAgent || ''; // Seems to be broken in Chromium/Chrome && Safari in Leopard |
|
|
|
if (/Android/.test(userAgent) || !/Chrome|Mac OS X 10.5/.test(userAgent)) { |
|
return true; |
|
} |
|
} |
|
|
|
return false; |
|
} |
|
/** |
|
* Request that the `HTML5` Tech enter fullscreen. |
|
*/ |
|
; |
|
|
|
_proto.enterFullScreen = function enterFullScreen() { |
|
var video = this.el_; |
|
|
|
if (video.paused && video.networkState <= video.HAVE_METADATA) { |
|
// attempt to prime the video element for programmatic access |
|
// this isn't necessary on the desktop but shouldn't hurt |
|
this.el_.play(); // playing and pausing synchronously during the transition to fullscreen |
|
// can get iOS ~6.1 devices into a play/pause loop |
|
|
|
this.setTimeout(function () { |
|
video.pause(); |
|
video.webkitEnterFullScreen(); |
|
}, 0); |
|
} else { |
|
video.webkitEnterFullScreen(); |
|
} |
|
} |
|
/** |
|
* Request that the `HTML5` Tech exit fullscreen. |
|
*/ |
|
; |
|
|
|
_proto.exitFullScreen = function exitFullScreen() { |
|
this.el_.webkitExitFullScreen(); |
|
} |
|
/** |
|
* A getter/setter for the `Html5` Tech's source object. |
|
* > Note: Please use {@link Html5#setSource} |
|
* |
|
* @param {Tech~SourceObject} [src] |
|
* The source object you want to set on the `HTML5` techs element. |
|
* |
|
* @return {Tech~SourceObject|undefined} |
|
* - The current source object when a source is not passed in. |
|
* - undefined when setting |
|
* |
|
* @deprecated Since version 5. |
|
*/ |
|
; |
|
|
|
_proto.src = function src(_src) { |
|
if (_src === undefined) { |
|
return this.el_.src; |
|
} // Setting src through `src` instead of `setSrc` will be deprecated |
|
|
|
|
|
this.setSrc(_src); |
|
} |
|
/** |
|
* Reset the tech by removing all sources and then calling |
|
* {@link Html5.resetMediaElement}. |
|
*/ |
|
; |
|
|
|
_proto.reset = function reset() { |
|
Html5.resetMediaElement(this.el_); |
|
} |
|
/** |
|
* Get the current source on the `HTML5` Tech. Falls back to returning the source from |
|
* the HTML5 media element. |
|
* |
|
* @return {Tech~SourceObject} |
|
* The current source object from the HTML5 tech. With a fallback to the |
|
* elements source. |
|
*/ |
|
; |
|
|
|
_proto.currentSrc = function currentSrc() { |
|
if (this.currentSource_) { |
|
return this.currentSource_.src; |
|
} |
|
|
|
return this.el_.currentSrc; |
|
} |
|
/** |
|
* Set controls attribute for the HTML5 media Element. |
|
* |
|
* @param {string} val |
|
* Value to set the controls attribute to |
|
*/ |
|
; |
|
|
|
_proto.setControls = function setControls(val) { |
|
this.el_.controls = !!val; |
|
} |
|
/** |
|
* Create and returns a remote {@link TextTrack} object. |
|
* |
|
* @param {string} kind |
|
* `TextTrack` kind (subtitles, captions, descriptions, chapters, or metadata) |
|
* |
|
* @param {string} [label] |
|
* Label to identify the text track |
|
* |
|
* @param {string} [language] |
|
* Two letter language abbreviation |
|
* |
|
* @return {TextTrack} |
|
* The TextTrack that gets created. |
|
*/ |
|
; |
|
|
|
_proto.addTextTrack = function addTextTrack(kind, label, language) { |
|
if (!this.featuresNativeTextTracks) { |
|
return _Tech.prototype.addTextTrack.call(this, kind, label, language); |
|
} |
|
|
|
return this.el_.addTextTrack(kind, label, language); |
|
} |
|
/** |
|
* Creates either native TextTrack or an emulated TextTrack depending |
|
* on the value of `featuresNativeTextTracks` |
|
* |
|
* @param {Object} options |
|
* The object should contain the options to initialize the TextTrack with. |
|
* |
|
* @param {string} [options.kind] |
|
* `TextTrack` kind (subtitles, captions, descriptions, chapters, or metadata). |
|
* |
|
* @param {string} [options.label] |
|
* Label to identify the text track |
|
* |
|
* @param {string} [options.language] |
|
* Two letter language abbreviation. |
|
* |
|
* @param {boolean} [options.default] |
|
* Default this track to on. |
|
* |
|
* @param {string} [options.id] |
|
* The internal id to assign this track. |
|
* |
|
* @param {string} [options.src] |
|
* A source url for the track. |
|
* |
|
* @return {HTMLTrackElement} |
|
* The track element that gets created. |
|
*/ |
|
; |
|
|
|
_proto.createRemoteTextTrack = function createRemoteTextTrack(options) { |
|
if (!this.featuresNativeTextTracks) { |
|
return _Tech.prototype.createRemoteTextTrack.call(this, options); |
|
} |
|
|
|
var htmlTrackElement = document.createElement('track'); |
|
|
|
if (options.kind) { |
|
htmlTrackElement.kind = options.kind; |
|
} |
|
|
|
if (options.label) { |
|
htmlTrackElement.label = options.label; |
|
} |
|
|
|
if (options.language || options.srclang) { |
|
htmlTrackElement.srclang = options.language || options.srclang; |
|
} |
|
|
|
if (options.default) { |
|
htmlTrackElement.default = options.default; |
|
} |
|
|
|
if (options.id) { |
|
htmlTrackElement.id = options.id; |
|
} |
|
|
|
if (options.src) { |
|
htmlTrackElement.src = options.src; |
|
} |
|
|
|
return htmlTrackElement; |
|
} |
|
/** |
|
* Creates a remote text track object and returns an html track element. |
|
* |
|
* @param {Object} options The object should contain values for |
|
* kind, language, label, and src (location of the WebVTT file) |
|
* @param {boolean} [manualCleanup=true] if set to false, the TextTrack will be |
|
* automatically removed from the video element whenever the source changes |
|
* @return {HTMLTrackElement} An Html Track Element. |
|
* This can be an emulated {@link HTMLTrackElement} or a native one. |
|
* @deprecated The default value of the "manualCleanup" parameter will default |
|
* to "false" in upcoming versions of Video.js |
|
*/ |
|
; |
|
|
|
_proto.addRemoteTextTrack = function addRemoteTextTrack(options, manualCleanup) { |
|
var htmlTrackElement = _Tech.prototype.addRemoteTextTrack.call(this, options, manualCleanup); |
|
|
|
if (this.featuresNativeTextTracks) { |
|
this.el().appendChild(htmlTrackElement); |
|
} |
|
|
|
return htmlTrackElement; |
|
} |
|
/** |
|
* Remove remote `TextTrack` from `TextTrackList` object |
|
* |
|
* @param {TextTrack} track |
|
* `TextTrack` object to remove |
|
*/ |
|
; |
|
|
|
_proto.removeRemoteTextTrack = function removeRemoteTextTrack(track) { |
|
_Tech.prototype.removeRemoteTextTrack.call(this, track); |
|
|
|
if (this.featuresNativeTextTracks) { |
|
var tracks = this.$$('track'); |
|
var i = tracks.length; |
|
|
|
while (i--) { |
|
if (track === tracks[i] || track === tracks[i].track) { |
|
this.el().removeChild(tracks[i]); |
|
} |
|
} |
|
} |
|
} |
|
/** |
|
* Gets available media playback quality metrics as specified by the W3C's Media |
|
* Playback Quality API. |
|
* |
|
* @see [Spec]{@link https://wicg.github.io/media-playback-quality} |
|
* |
|
* @return {Object} |
|
* An object with supported media playback quality metrics |
|
*/ |
|
; |
|
|
|
_proto.getVideoPlaybackQuality = function getVideoPlaybackQuality() { |
|
if (typeof this.el().getVideoPlaybackQuality === 'function') { |
|
return this.el().getVideoPlaybackQuality(); |
|
} |
|
|
|
var videoPlaybackQuality = {}; |
|
|
|
if (typeof this.el().webkitDroppedFrameCount !== 'undefined' && typeof this.el().webkitDecodedFrameCount !== 'undefined') { |
|
videoPlaybackQuality.droppedVideoFrames = this.el().webkitDroppedFrameCount; |
|
videoPlaybackQuality.totalVideoFrames = this.el().webkitDecodedFrameCount; |
|
} |
|
|
|
if (window$1.performance && typeof window$1.performance.now === 'function') { |
|
videoPlaybackQuality.creationTime = window$1.performance.now(); |
|
} else if (window$1.performance && window$1.performance.timing && typeof window$1.performance.timing.navigationStart === 'number') { |
|
videoPlaybackQuality.creationTime = window$1.Date.now() - window$1.performance.timing.navigationStart; |
|
} |
|
|
|
return videoPlaybackQuality; |
|
}; |
|
|
|
return Html5; |
|
}(Tech); |
|
/* HTML5 Support Testing ---------------------------------------------------- */ |
|
|
|
|
|
if (isReal()) { |
|
/** |
|
* Element for testing browser HTML5 media capabilities |
|
* |
|
* @type {Element} |
|
* @constant |
|
* @private |
|
*/ |
|
Html5.TEST_VID = document.createElement('video'); |
|
var track = document.createElement('track'); |
|
track.kind = 'captions'; |
|
track.srclang = 'en'; |
|
track.label = 'English'; |
|
Html5.TEST_VID.appendChild(track); |
|
} |
|
/** |
|
* Check if HTML5 media is supported by this browser/device. |
|
* |
|
* @return {boolean} |
|
* - True if HTML5 media is supported. |
|
* - False if HTML5 media is not supported. |
|
*/ |
|
|
|
|
|
Html5.isSupported = function () { |
|
// IE with no Media Player is a LIAR! (#984) |
|
try { |
|
Html5.TEST_VID.volume = 0.5; |
|
} catch (e) { |
|
return false; |
|
} |
|
|
|
return !!(Html5.TEST_VID && Html5.TEST_VID.canPlayType); |
|
}; |
|
/** |
|
* Check if the tech can support the given type |
|
* |
|
* @param {string} type |
|
* The mimetype to check |
|
* @return {string} 'probably', 'maybe', or '' (empty string) |
|
*/ |
|
|
|
|
|
Html5.canPlayType = function (type) { |
|
return Html5.TEST_VID.canPlayType(type); |
|
}; |
|
/** |
|
* Check if the tech can support the given source |
|
* |
|
* @param {Object} srcObj |
|
* The source object |
|
* @param {Object} options |
|
* The options passed to the tech |
|
* @return {string} 'probably', 'maybe', or '' (empty string) |
|
*/ |
|
|
|
|
|
Html5.canPlaySource = function (srcObj, options) { |
|
return Html5.canPlayType(srcObj.type); |
|
}; |
|
/** |
|
* Check if the volume can be changed in this browser/device. |
|
* Volume cannot be changed in a lot of mobile devices. |
|
* Specifically, it can't be changed from 1 on iOS. |
|
* |
|
* @return {boolean} |
|
* - True if volume can be controlled |
|
* - False otherwise |
|
*/ |
|
|
|
|
|
Html5.canControlVolume = function () { |
|
// IE will error if Windows Media Player not installed #3315 |
|
try { |
|
var volume = Html5.TEST_VID.volume; |
|
Html5.TEST_VID.volume = volume / 2 + 0.1; |
|
return volume !== Html5.TEST_VID.volume; |
|
} catch (e) { |
|
return false; |
|
} |
|
}; |
|
/** |
|
* Check if the volume can be muted in this browser/device. |
|
* Some devices, e.g. iOS, don't allow changing volume |
|
* but permits muting/unmuting. |
|
* |
|
* @return {bolean} |
|
* - True if volume can be muted |
|
* - False otherwise |
|
*/ |
|
|
|
|
|
Html5.canMuteVolume = function () { |
|
try { |
|
var muted = Html5.TEST_VID.muted; // in some versions of iOS muted property doesn't always |
|
// work, so we want to set both property and attribute |
|
|
|
Html5.TEST_VID.muted = !muted; |
|
|
|
if (Html5.TEST_VID.muted) { |
|
setAttribute(Html5.TEST_VID, 'muted', 'muted'); |
|
} else { |
|
removeAttribute(Html5.TEST_VID, 'muted', 'muted'); |
|
} |
|
|
|
return muted !== Html5.TEST_VID.muted; |
|
} catch (e) { |
|
return false; |
|
} |
|
}; |
|
/** |
|
* Check if the playback rate can be changed in this browser/device. |
|
* |
|
* @return {boolean} |
|
* - True if playback rate can be controlled |
|
* - False otherwise |
|
*/ |
|
|
|
|
|
Html5.canControlPlaybackRate = function () { |
|
// Playback rate API is implemented in Android Chrome, but doesn't do anything |
|
// https://github.com/videojs/video.js/issues/3180 |
|
if (IS_ANDROID && IS_CHROME && CHROME_VERSION < 58) { |
|
return false; |
|
} // IE will error if Windows Media Player not installed #3315 |
|
|
|
|
|
try { |
|
var playbackRate = Html5.TEST_VID.playbackRate; |
|
Html5.TEST_VID.playbackRate = playbackRate / 2 + 0.1; |
|
return playbackRate !== Html5.TEST_VID.playbackRate; |
|
} catch (e) { |
|
return false; |
|
} |
|
}; |
|
/** |
|
* Check if we can override a video/audio elements attributes, with |
|
* Object.defineProperty. |
|
* |
|
* @return {boolean} |
|
* - True if builtin attributes can be overridden |
|
* - False otherwise |
|
*/ |
|
|
|
|
|
Html5.canOverrideAttributes = function () { |
|
// if we cannot overwrite the src/innerHTML property, there is no support |
|
// iOS 7 safari for instance cannot do this. |
|
try { |
|
var noop = function noop() {}; |
|
|
|
Object.defineProperty(document.createElement('video'), 'src', { |
|
get: noop, |
|
set: noop |
|
}); |
|
Object.defineProperty(document.createElement('audio'), 'src', { |
|
get: noop, |
|
set: noop |
|
}); |
|
Object.defineProperty(document.createElement('video'), 'innerHTML', { |
|
get: noop, |
|
set: noop |
|
}); |
|
Object.defineProperty(document.createElement('audio'), 'innerHTML', { |
|
get: noop, |
|
set: noop |
|
}); |
|
} catch (e) { |
|
return false; |
|
} |
|
|
|
return true; |
|
}; |
|
/** |
|
* Check to see if native `TextTrack`s are supported by this browser/device. |
|
* |
|
* @return {boolean} |
|
* - True if native `TextTrack`s are supported. |
|
* - False otherwise |
|
*/ |
|
|
|
|
|
Html5.supportsNativeTextTracks = function () { |
|
return IS_ANY_SAFARI || IS_IOS && IS_CHROME; |
|
}; |
|
/** |
|
* Check to see if native `VideoTrack`s are supported by this browser/device |
|
* |
|
* @return {boolean} |
|
* - True if native `VideoTrack`s are supported. |
|
* - False otherwise |
|
*/ |
|
|
|
|
|
Html5.supportsNativeVideoTracks = function () { |
|
return !!(Html5.TEST_VID && Html5.TEST_VID.videoTracks); |
|
}; |
|
/** |
|
* Check to see if native `AudioTrack`s are supported by this browser/device |
|
* |
|
* @return {boolean} |
|
* - True if native `AudioTrack`s are supported. |
|
* - False otherwise |
|
*/ |
|
|
|
|
|
Html5.supportsNativeAudioTracks = function () { |
|
return !!(Html5.TEST_VID && Html5.TEST_VID.audioTracks); |
|
}; |
|
/** |
|
* An array of events available on the Html5 tech. |
|
* |
|
* @private |
|
* @type {Array} |
|
*/ |
|
|
|
|
|
Html5.Events = ['loadstart', 'suspend', 'abort', 'error', 'emptied', 'stalled', 'loadedmetadata', 'loadeddata', 'canplay', 'canplaythrough', 'playing', 'waiting', 'seeking', 'seeked', 'ended', 'durationchange', 'timeupdate', 'progress', 'play', 'pause', 'ratechange', 'resize', 'volumechange']; |
|
/** |
|
* Boolean indicating whether the `Tech` supports volume control. |
|
* |
|
* @type {boolean} |
|
* @default {@link Html5.canControlVolume} |
|
*/ |
|
|
|
Html5.prototype.featuresVolumeControl = Html5.canControlVolume(); |
|
/** |
|
* Boolean indicating whether the `Tech` supports muting volume. |
|
* |
|
* @type {bolean} |
|
* @default {@link Html5.canMuteVolume} |
|
*/ |
|
|
|
Html5.prototype.featuresMuteControl = Html5.canMuteVolume(); |
|
/** |
|
* Boolean indicating whether the `Tech` supports changing the speed at which the media |
|
* plays. Examples: |
|
* - Set player to play 2x (twice) as fast |
|
* - Set player to play 0.5x (half) as fast |
|
* |
|
* @type {boolean} |
|
* @default {@link Html5.canControlPlaybackRate} |
|
*/ |
|
|
|
Html5.prototype.featuresPlaybackRate = Html5.canControlPlaybackRate(); |
|
/** |
|
* Boolean indicating whether the `Tech` supports the `sourceset` event. |
|
* |
|
* @type {boolean} |
|
* @default |
|
*/ |
|
|
|
Html5.prototype.featuresSourceset = Html5.canOverrideAttributes(); |
|
/** |
|
* Boolean indicating whether the `HTML5` tech currently supports the media element |
|
* moving in the DOM. iOS breaks if you move the media element, so this is set this to |
|
* false there. Everywhere else this should be true. |
|
* |
|
* @type {boolean} |
|
* @default |
|
*/ |
|
|
|
Html5.prototype.movingMediaElementInDOM = !IS_IOS; // TODO: Previous comment: No longer appears to be used. Can probably be removed. |
|
// Is this true? |
|
|
|
/** |
|
* Boolean indicating whether the `HTML5` tech currently supports automatic media resize |
|
* when going into fullscreen. |
|
* |
|
* @type {boolean} |
|
* @default |
|
*/ |
|
|
|
Html5.prototype.featuresFullscreenResize = true; |
|
/** |
|
* Boolean indicating whether the `HTML5` tech currently supports the progress event. |
|
* If this is false, manual `progress` events will be triggered instead. |
|
* |
|
* @type {boolean} |
|
* @default |
|
*/ |
|
|
|
Html5.prototype.featuresProgressEvents = true; |
|
/** |
|
* Boolean indicating whether the `HTML5` tech currently supports the timeupdate event. |
|
* If this is false, manual `timeupdate` events will be triggered instead. |
|
* |
|
* @default |
|
*/ |
|
|
|
Html5.prototype.featuresTimeupdateEvents = true; |
|
/** |
|
* Boolean indicating whether the `HTML5` tech currently supports native `TextTrack`s. |
|
* |
|
* @type {boolean} |
|
* @default {@link Html5.supportsNativeTextTracks} |
|
*/ |
|
|
|
Html5.prototype.featuresNativeTextTracks = Html5.supportsNativeTextTracks(); |
|
/** |
|
* Boolean indicating whether the `HTML5` tech currently supports native `VideoTrack`s. |
|
* |
|
* @type {boolean} |
|
* @default {@link Html5.supportsNativeVideoTracks} |
|
*/ |
|
|
|
Html5.prototype.featuresNativeVideoTracks = Html5.supportsNativeVideoTracks(); |
|
/** |
|
* Boolean indicating whether the `HTML5` tech currently supports native `AudioTrack`s. |
|
* |
|
* @type {boolean} |
|
* @default {@link Html5.supportsNativeAudioTracks} |
|
*/ |
|
|
|
Html5.prototype.featuresNativeAudioTracks = Html5.supportsNativeAudioTracks(); // HTML5 Feature detection and Device Fixes --------------------------------- // |
|
|
|
var canPlayType = Html5.TEST_VID && Html5.TEST_VID.constructor.prototype.canPlayType; |
|
var mpegurlRE = /^application\/(?:x-|vnd\.apple\.)mpegurl/i; |
|
|
|
Html5.patchCanPlayType = function () { |
|
// Android 4.0 and above can play HLS to some extent but it reports being unable to do so |
|
// Firefox and Chrome report correctly |
|
if (ANDROID_VERSION >= 4.0 && !IS_FIREFOX && !IS_CHROME) { |
|
Html5.TEST_VID.constructor.prototype.canPlayType = function (type) { |
|
if (type && mpegurlRE.test(type)) { |
|
return 'maybe'; |
|
} |
|
|
|
return canPlayType.call(this, type); |
|
}; |
|
} |
|
}; |
|
|
|
Html5.unpatchCanPlayType = function () { |
|
var r = Html5.TEST_VID.constructor.prototype.canPlayType; |
|
Html5.TEST_VID.constructor.prototype.canPlayType = canPlayType; |
|
return r; |
|
}; // by default, patch the media element |
|
|
|
|
|
Html5.patchCanPlayType(); |
|
|
|
Html5.disposeMediaElement = function (el) { |
|
if (!el) { |
|
return; |
|
} |
|
|
|
if (el.parentNode) { |
|
el.parentNode.removeChild(el); |
|
} // remove any child track or source nodes to prevent their loading |
|
|
|
|
|
while (el.hasChildNodes()) { |
|
el.removeChild(el.firstChild); |
|
} // remove any src reference. not setting `src=''` because that causes a warning |
|
// in firefox |
|
|
|
|
|
el.removeAttribute('src'); // force the media element to update its loading state by calling load() |
|
// however IE on Windows 7N has a bug that throws an error so need a try/catch (#793) |
|
|
|
if (typeof el.load === 'function') { |
|
// wrapping in an iife so it's not deoptimized (#1060#discussion_r10324473) |
|
(function () { |
|
try { |
|
el.load(); |
|
} catch (e) {// not supported |
|
} |
|
})(); |
|
} |
|
}; |
|
|
|
Html5.resetMediaElement = function (el) { |
|
if (!el) { |
|
return; |
|
} |
|
|
|
var sources = el.querySelectorAll('source'); |
|
var i = sources.length; |
|
|
|
while (i--) { |
|
el.removeChild(sources[i]); |
|
} // remove any src reference. |
|
// not setting `src=''` because that throws an error |
|
|
|
|
|
el.removeAttribute('src'); |
|
|
|
if (typeof el.load === 'function') { |
|
// wrapping in an iife so it's not deoptimized (#1060#discussion_r10324473) |
|
(function () { |
|
try { |
|
el.load(); |
|
} catch (e) {// satisfy linter |
|
} |
|
})(); |
|
} |
|
}; |
|
/* Native HTML5 element property wrapping ----------------------------------- */ |
|
// Wrap native boolean attributes with getters that check both property and attribute |
|
// The list is as followed: |
|
// muted, defaultMuted, autoplay, controls, loop, playsinline |
|
|
|
|
|
[ |
|
/** |
|
* Get the value of `muted` from the media element. `muted` indicates |
|
* that the volume for the media should be set to silent. This does not actually change |
|
* the `volume` attribute. |
|
* |
|
* @method Html5#muted |
|
* @return {boolean} |
|
* - True if the value of `volume` should be ignored and the audio set to silent. |
|
* - False if the value of `volume` should be used. |
|
* |
|
* @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-muted} |
|
*/ |
|
'muted', |
|
/** |
|
* Get the value of `defaultMuted` from the media element. `defaultMuted` indicates |
|
* whether the media should start muted or not. Only changes the default state of the |
|
* media. `muted` and `defaultMuted` can have different values. {@link Html5#muted} indicates the |
|
* current state. |
|
* |
|
* @method Html5#defaultMuted |
|
* @return {boolean} |
|
* - The value of `defaultMuted` from the media element. |
|
* - True indicates that the media should start muted. |
|
* - False indicates that the media should not start muted |
|
* |
|
* @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-defaultmuted} |
|
*/ |
|
'defaultMuted', |
|
/** |
|
* Get the value of `autoplay` from the media element. `autoplay` indicates |
|
* that the media should start to play as soon as the page is ready. |
|
* |
|
* @method Html5#autoplay |
|
* @return {boolean} |
|
* - The value of `autoplay` from the media element. |
|
* - True indicates that the media should start as soon as the page loads. |
|
* - False indicates that the media should not start as soon as the page loads. |
|
* |
|
* @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#attr-media-autoplay} |
|
*/ |
|
'autoplay', |
|
/** |
|
* Get the value of `controls` from the media element. `controls` indicates |
|
* whether the native media controls should be shown or hidden. |
|
* |
|
* @method Html5#controls |
|
* @return {boolean} |
|
* - The value of `controls` from the media element. |
|
* - True indicates that native controls should be showing. |
|
* - False indicates that native controls should be hidden. |
|
* |
|
* @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#attr-media-controls} |
|
*/ |
|
'controls', |
|
/** |
|
* Get the value of `loop` from the media element. `loop` indicates |
|
* that the media should return to the start of the media and continue playing once |
|
* it reaches the end. |
|
* |
|
* @method Html5#loop |
|
* @return {boolean} |
|
* - The value of `loop` from the media element. |
|
* - True indicates that playback should seek back to start once |
|
* the end of a media is reached. |
|
* - False indicates that playback should not loop back to the start when the |
|
* end of the media is reached. |
|
* |
|
* @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#attr-media-loop} |
|
*/ |
|
'loop', |
|
/** |
|
* Get the value of `playsinline` from the media element. `playsinline` indicates |
|
* to the browser that non-fullscreen playback is preferred when fullscreen |
|
* playback is the native default, such as in iOS Safari. |
|
* |
|
* @method Html5#playsinline |
|
* @return {boolean} |
|
* - The value of `playsinline` from the media element. |
|
* - True indicates that the media should play inline. |
|
* - False indicates that the media should not play inline. |
|
* |
|
* @see [Spec]{@link https://html.spec.whatwg.org/#attr-video-playsinline} |
|
*/ |
|
'playsinline'].forEach(function (prop) { |
|
Html5.prototype[prop] = function () { |
|
return this.el_[prop] || this.el_.hasAttribute(prop); |
|
}; |
|
}); // Wrap native boolean attributes with setters that set both property and attribute |
|
// The list is as followed: |
|
// setMuted, setDefaultMuted, setAutoplay, setLoop, setPlaysinline |
|
// setControls is special-cased above |
|
|
|
[ |
|
/** |
|
* Set the value of `muted` on the media element. `muted` indicates that the current |
|
* audio level should be silent. |
|
* |
|
* @method Html5#setMuted |
|
* @param {boolean} muted |
|
* - True if the audio should be set to silent |
|
* - False otherwise |
|
* |
|
* @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-muted} |
|
*/ |
|
'muted', |
|
/** |
|
* Set the value of `defaultMuted` on the media element. `defaultMuted` indicates that the current |
|
* audio level should be silent, but will only effect the muted level on intial playback.. |
|
* |
|
* @method Html5.prototype.setDefaultMuted |
|
* @param {boolean} defaultMuted |
|
* - True if the audio should be set to silent |
|
* - False otherwise |
|
* |
|
* @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-defaultmuted} |
|
*/ |
|
'defaultMuted', |
|
/** |
|
* Set the value of `autoplay` on the media element. `autoplay` indicates |
|
* that the media should start to play as soon as the page is ready. |
|
* |
|
* @method Html5#setAutoplay |
|
* @param {boolean} autoplay |
|
* - True indicates that the media should start as soon as the page loads. |
|
* - False indicates that the media should not start as soon as the page loads. |
|
* |
|
* @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#attr-media-autoplay} |
|
*/ |
|
'autoplay', |
|
/** |
|
* Set the value of `loop` on the media element. `loop` indicates |
|
* that the media should return to the start of the media and continue playing once |
|
* it reaches the end. |
|
* |
|
* @method Html5#setLoop |
|
* @param {boolean} loop |
|
* - True indicates that playback should seek back to start once |
|
* the end of a media is reached. |
|
* - False indicates that playback should not loop back to the start when the |
|
* end of the media is reached. |
|
* |
|
* @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#attr-media-loop} |
|
*/ |
|
'loop', |
|
/** |
|
* Set the value of `playsinline` from the media element. `playsinline` indicates |
|
* to the browser that non-fullscreen playback is preferred when fullscreen |
|
* playback is the native default, such as in iOS Safari. |
|
* |
|
* @method Html5#setPlaysinline |
|
* @param {boolean} playsinline |
|
* - True indicates that the media should play inline. |
|
* - False indicates that the media should not play inline. |
|
* |
|
* @see [Spec]{@link https://html.spec.whatwg.org/#attr-video-playsinline} |
|
*/ |
|
'playsinline'].forEach(function (prop) { |
|
Html5.prototype['set' + toTitleCase(prop)] = function (v) { |
|
this.el_[prop] = v; |
|
|
|
if (v) { |
|
this.el_.setAttribute(prop, prop); |
|
} else { |
|
this.el_.removeAttribute(prop); |
|
} |
|
}; |
|
}); // Wrap native properties with a getter |
|
// The list is as followed |
|
// paused, currentTime, buffered, volume, poster, preload, error, seeking |
|
// seekable, ended, playbackRate, defaultPlaybackRate, played, networkState |
|
// readyState, videoWidth, videoHeight |
|
|
|
[ |
|
/** |
|
* Get the value of `paused` from the media element. `paused` indicates whether the media element |
|
* is currently paused or not. |
|
* |
|
* @method Html5#paused |
|
* @return {boolean} |
|
* The value of `paused` from the media element. |
|
* |
|
* @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-paused} |
|
*/ |
|
'paused', |
|
/** |
|
* Get the value of `currentTime` from the media element. `currentTime` indicates |
|
* the current second that the media is at in playback. |
|
* |
|
* @method Html5#currentTime |
|
* @return {number} |
|
* The value of `currentTime` from the media element. |
|
* |
|
* @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-currenttime} |
|
*/ |
|
'currentTime', |
|
/** |
|
* Get the value of `buffered` from the media element. `buffered` is a `TimeRange` |
|
* object that represents the parts of the media that are already downloaded and |
|
* available for playback. |
|
* |
|
* @method Html5#buffered |
|
* @return {TimeRange} |
|
* The value of `buffered` from the media element. |
|
* |
|
* @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-buffered} |
|
*/ |
|
'buffered', |
|
/** |
|
* Get the value of `volume` from the media element. `volume` indicates |
|
* the current playback volume of audio for a media. `volume` will be a value from 0 |
|
* (silent) to 1 (loudest and default). |
|
* |
|
* @method Html5#volume |
|
* @return {number} |
|
* The value of `volume` from the media element. Value will be between 0-1. |
|
* |
|
* @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-a-volume} |
|
*/ |
|
'volume', |
|
/** |
|
* Get the value of `poster` from the media element. `poster` indicates |
|
* that the url of an image file that can/will be shown when no media data is available. |
|
* |
|
* @method Html5#poster |
|
* @return {string} |
|
* The value of `poster` from the media element. Value will be a url to an |
|
* image. |
|
* |
|
* @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#attr-video-poster} |
|
*/ |
|
'poster', |
|
/** |
|
* Get the value of `preload` from the media element. `preload` indicates |
|
* what should download before the media is interacted with. It can have the following |
|
* values: |
|
* - none: nothing should be downloaded |
|
* - metadata: poster and the first few frames of the media may be downloaded to get |
|
* media dimensions and other metadata |
|
* - auto: allow the media and metadata for the media to be downloaded before |
|
* interaction |
|
* |
|
* @method Html5#preload |
|
* @return {string} |
|
* The value of `preload` from the media element. Will be 'none', 'metadata', |
|
* or 'auto'. |
|
* |
|
* @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#attr-media-preload} |
|
*/ |
|
'preload', |
|
/** |
|
* Get the value of the `error` from the media element. `error` indicates any |
|
* MediaError that may have occurred during playback. If error returns null there is no |
|
* current error. |
|
* |
|
* @method Html5#error |
|
* @return {MediaError|null} |
|
* The value of `error` from the media element. Will be `MediaError` if there |
|
* is a current error and null otherwise. |
|
* |
|
* @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-error} |
|
*/ |
|
'error', |
|
/** |
|
* Get the value of `seeking` from the media element. `seeking` indicates whether the |
|
* media is currently seeking to a new position or not. |
|
* |
|
* @method Html5#seeking |
|
* @return {boolean} |
|
* - The value of `seeking` from the media element. |
|
* - True indicates that the media is currently seeking to a new position. |
|
* - False indicates that the media is not seeking to a new position at this time. |
|
* |
|
* @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-seeking} |
|
*/ |
|
'seeking', |
|
/** |
|
* Get the value of `seekable` from the media element. `seekable` returns a |
|
* `TimeRange` object indicating ranges of time that can currently be `seeked` to. |
|
* |
|
* @method Html5#seekable |
|
* @return {TimeRange} |
|
* The value of `seekable` from the media element. A `TimeRange` object |
|
* indicating the current ranges of time that can be seeked to. |
|
* |
|
* @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-seekable} |
|
*/ |
|
'seekable', |
|
/** |
|
* Get the value of `ended` from the media element. `ended` indicates whether |
|
* the media has reached the end or not. |
|
* |
|
* @method Html5#ended |
|
* @return {boolean} |
|
* - The value of `ended` from the media element. |
|
* - True indicates that the media has ended. |
|
* - False indicates that the media has not ended. |
|
* |
|
* @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-ended} |
|
*/ |
|
'ended', |
|
/** |
|
* Get the value of `playbackRate` from the media element. `playbackRate` indicates |
|
* the rate at which the media is currently playing back. Examples: |
|
* - if playbackRate is set to 2, media will play twice as fast. |
|
* - if playbackRate is set to 0.5, media will play half as fast. |
|
* |
|
* @method Html5#playbackRate |
|
* @return {number} |
|
* The value of `playbackRate` from the media element. A number indicating |
|
* the current playback speed of the media, where 1 is normal speed. |
|
* |
|
* @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-playbackrate} |
|
*/ |
|
'playbackRate', |
|
/** |
|
* Get the value of `defaultPlaybackRate` from the media element. `defaultPlaybackRate` indicates |
|
* the rate at which the media is currently playing back. This value will not indicate the current |
|
* `playbackRate` after playback has started, use {@link Html5#playbackRate} for that. |
|
* |
|
* Examples: |
|
* - if defaultPlaybackRate is set to 2, media will play twice as fast. |
|
* - if defaultPlaybackRate is set to 0.5, media will play half as fast. |
|
* |
|
* @method Html5.prototype.defaultPlaybackRate |
|
* @return {number} |
|
* The value of `defaultPlaybackRate` from the media element. A number indicating |
|
* the current playback speed of the media, where 1 is normal speed. |
|
* |
|
* @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-playbackrate} |
|
*/ |
|
'defaultPlaybackRate', |
|
/** |
|
* Get the value of `played` from the media element. `played` returns a `TimeRange` |
|
* object representing points in the media timeline that have been played. |
|
* |
|
* @method Html5#played |
|
* @return {TimeRange} |
|
* The value of `played` from the media element. A `TimeRange` object indicating |
|
* the ranges of time that have been played. |
|
* |
|
* @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-played} |
|
*/ |
|
'played', |
|
/** |
|
* Get the value of `networkState` from the media element. `networkState` indicates |
|
* the current network state. It returns an enumeration from the following list: |
|
* - 0: NETWORK_EMPTY |
|
* - 1: NETWORK_IDLE |
|
* - 2: NETWORK_LOADING |
|
* - 3: NETWORK_NO_SOURCE |
|
* |
|
* @method Html5#networkState |
|
* @return {number} |
|
* The value of `networkState` from the media element. This will be a number |
|
* from the list in the description. |
|
* |
|
* @see [Spec] {@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-networkstate} |
|
*/ |
|
'networkState', |
|
/** |
|
* Get the value of `readyState` from the media element. `readyState` indicates |
|
* the current state of the media element. It returns an enumeration from the |
|
* following list: |
|
* - 0: HAVE_NOTHING |
|
* - 1: HAVE_METADATA |
|
* - 2: HAVE_CURRENT_DATA |
|
* - 3: HAVE_FUTURE_DATA |
|
* - 4: HAVE_ENOUGH_DATA |
|
* |
|
* @method Html5#readyState |
|
* @return {number} |
|
* The value of `readyState` from the media element. This will be a number |
|
* from the list in the description. |
|
* |
|
* @see [Spec] {@link https://www.w3.org/TR/html5/embedded-content-0.html#ready-states} |
|
*/ |
|
'readyState', |
|
/** |
|
* Get the value of `videoWidth` from the video element. `videoWidth` indicates |
|
* the current width of the video in css pixels. |
|
* |
|
* @method Html5#videoWidth |
|
* @return {number} |
|
* The value of `videoWidth` from the video element. This will be a number |
|
* in css pixels. |
|
* |
|
* @see [Spec] {@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-video-videowidth} |
|
*/ |
|
'videoWidth', |
|
/** |
|
* Get the value of `videoHeight` from the video element. `videoHeight` indicates |
|
* the current height of the video in css pixels. |
|
* |
|
* @method Html5#videoHeight |
|
* @return {number} |
|
* The value of `videoHeight` from the video element. This will be a number |
|
* in css pixels. |
|
* |
|
* @see [Spec] {@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-video-videowidth} |
|
*/ |
|
'videoHeight'].forEach(function (prop) { |
|
Html5.prototype[prop] = function () { |
|
return this.el_[prop]; |
|
}; |
|
}); // Wrap native properties with a setter in this format: |
|
// set + toTitleCase(name) |
|
// The list is as follows: |
|
// setVolume, setSrc, setPoster, setPreload, setPlaybackRate, setDefaultPlaybackRate |
|
|
|
[ |
|
/** |
|
* Set the value of `volume` on the media element. `volume` indicates the current |
|
* audio level as a percentage in decimal form. This means that 1 is 100%, 0.5 is 50%, and |
|
* so on. |
|
* |
|
* @method Html5#setVolume |
|
* @param {number} percentAsDecimal |
|
* The volume percent as a decimal. Valid range is from 0-1. |
|
* |
|
* @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-a-volume} |
|
*/ |
|
'volume', |
|
/** |
|
* Set the value of `src` on the media element. `src` indicates the current |
|
* {@link Tech~SourceObject} for the media. |
|
* |
|
* @method Html5#setSrc |
|
* @param {Tech~SourceObject} src |
|
* The source object to set as the current source. |
|
* |
|
* @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-src} |
|
*/ |
|
'src', |
|
/** |
|
* Set the value of `poster` on the media element. `poster` is the url to |
|
* an image file that can/will be shown when no media data is available. |
|
* |
|
* @method Html5#setPoster |
|
* @param {string} poster |
|
* The url to an image that should be used as the `poster` for the media |
|
* element. |
|
* |
|
* @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#attr-media-poster} |
|
*/ |
|
'poster', |
|
/** |
|
* Set the value of `preload` on the media element. `preload` indicates |
|
* what should download before the media is interacted with. It can have the following |
|
* values: |
|
* - none: nothing should be downloaded |
|
* - metadata: poster and the first few frames of the media may be downloaded to get |
|
* media dimensions and other metadata |
|
* - auto: allow the media and metadata for the media to be downloaded before |
|
* interaction |
|
* |
|
* @method Html5#setPreload |
|
* @param {string} preload |
|
* The value of `preload` to set on the media element. Must be 'none', 'metadata', |
|
* or 'auto'. |
|
* |
|
* @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#attr-media-preload} |
|
*/ |
|
'preload', |
|
/** |
|
* Set the value of `playbackRate` on the media element. `playbackRate` indicates |
|
* the rate at which the media should play back. Examples: |
|
* - if playbackRate is set to 2, media will play twice as fast. |
|
* - if playbackRate is set to 0.5, media will play half as fast. |
|
* |
|
* @method Html5#setPlaybackRate |
|
* @return {number} |
|
* The value of `playbackRate` from the media element. A number indicating |
|
* the current playback speed of the media, where 1 is normal speed. |
|
* |
|
* @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-playbackrate} |
|
*/ |
|
'playbackRate', |
|
/** |
|
* Set the value of `defaultPlaybackRate` on the media element. `defaultPlaybackRate` indicates |
|
* the rate at which the media should play back upon initial startup. Changing this value |
|
* after a video has started will do nothing. Instead you should used {@link Html5#setPlaybackRate}. |
|
* |
|
* Example Values: |
|
* - if playbackRate is set to 2, media will play twice as fast. |
|
* - if playbackRate is set to 0.5, media will play half as fast. |
|
* |
|
* @method Html5.prototype.setDefaultPlaybackRate |
|
* @return {number} |
|
* The value of `defaultPlaybackRate` from the media element. A number indicating |
|
* the current playback speed of the media, where 1 is normal speed. |
|
* |
|
* @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-defaultplaybackrate} |
|
*/ |
|
'defaultPlaybackRate'].forEach(function (prop) { |
|
Html5.prototype['set' + toTitleCase(prop)] = function (v) { |
|
this.el_[prop] = v; |
|
}; |
|
}); // wrap native functions with a function |
|
// The list is as follows: |
|
// pause, load, play |
|
|
|
[ |
|
/** |
|
* A wrapper around the media elements `pause` function. This will call the `HTML5` |
|
* media elements `pause` function. |
|
* |
|
* @method Html5#pause |
|
* @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-pause} |
|
*/ |
|
'pause', |
|
/** |
|
* A wrapper around the media elements `load` function. This will call the `HTML5`s |
|
* media element `load` function. |
|
* |
|
* @method Html5#load |
|
* @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-load} |
|
*/ |
|
'load', |
|
/** |
|
* A wrapper around the media elements `play` function. This will call the `HTML5`s |
|
* media element `play` function. |
|
* |
|
* @method Html5#play |
|
* @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-play} |
|
*/ |
|
'play'].forEach(function (prop) { |
|
Html5.prototype[prop] = function () { |
|
return this.el_[prop](); |
|
}; |
|
}); |
|
Tech.withSourceHandlers(Html5); |
|
/** |
|
* Native source handler for Html5, simply passes the source to the media element. |
|
* |
|
* @property {Tech~SourceObject} source |
|
* The source object |
|
* |
|
* @property {Html5} tech |
|
* The instance of the HTML5 tech. |
|
*/ |
|
|
|
Html5.nativeSourceHandler = {}; |
|
/** |
|
* Check if the media element can play the given mime type. |
|
* |
|
* @param {string} type |
|
* The mimetype to check |
|
* |
|
* @return {string} |
|
* 'probably', 'maybe', or '' (empty string) |
|
*/ |
|
|
|
Html5.nativeSourceHandler.canPlayType = function (type) { |
|
// IE without MediaPlayer throws an error (#519) |
|
try { |
|
return Html5.TEST_VID.canPlayType(type); |
|
} catch (e) { |
|
return ''; |
|
} |
|
}; |
|
/** |
|
* Check if the media element can handle a source natively. |
|
* |
|
* @param {Tech~SourceObject} source |
|
* The source object |
|
* |
|
* @param {Object} [options] |
|
* Options to be passed to the tech. |
|
* |
|
* @return {string} |
|
* 'probably', 'maybe', or '' (empty string). |
|
*/ |
|
|
|
|
|
Html5.nativeSourceHandler.canHandleSource = function (source, options) { |
|
// If a type was provided we should rely on that |
|
if (source.type) { |
|
return Html5.nativeSourceHandler.canPlayType(source.type); // If no type, fall back to checking 'video/[EXTENSION]' |
|
} else if (source.src) { |
|
var ext = getFileExtension(source.src); |
|
return Html5.nativeSourceHandler.canPlayType("video/" + ext); |
|
} |
|
|
|
return ''; |
|
}; |
|
/** |
|
* Pass the source to the native media element. |
|
* |
|
* @param {Tech~SourceObject} source |
|
* The source object |
|
* |
|
* @param {Html5} tech |
|
* The instance of the Html5 tech |
|
* |
|
* @param {Object} [options] |
|
* The options to pass to the source |
|
*/ |
|
|
|
|
|
Html5.nativeSourceHandler.handleSource = function (source, tech, options) { |
|
tech.setSrc(source.src); |
|
}; |
|
/** |
|
* A noop for the native dispose function, as cleanup is not needed. |
|
*/ |
|
|
|
|
|
Html5.nativeSourceHandler.dispose = function () {}; // Register the native source handler |
|
|
|
|
|
Html5.registerSourceHandler(Html5.nativeSourceHandler); |
|
Tech.registerTech('Html5', Html5); |
|
|
|
function _templateObject$2() { |
|
var data = _taggedTemplateLiteralLoose(["\n Using the tech directly can be dangerous. I hope you know what you're doing.\n See https://github.com/videojs/video.js/issues/2617 for more info.\n "]); |
|
|
|
_templateObject$2 = function _templateObject() { |
|
return data; |
|
}; |
|
|
|
return data; |
|
} |
|
// on the player when they happen |
|
|
|
var TECH_EVENTS_RETRIGGER = [ |
|
/** |
|
* Fired while the user agent is downloading media data. |
|
* |
|
* @event Player#progress |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
/** |
|
* Retrigger the `progress` event that was triggered by the {@link Tech}. |
|
* |
|
* @private |
|
* @method Player#handleTechProgress_ |
|
* @fires Player#progress |
|
* @listens Tech#progress |
|
*/ |
|
'progress', |
|
/** |
|
* Fires when the loading of an audio/video is aborted. |
|
* |
|
* @event Player#abort |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
/** |
|
* Retrigger the `abort` event that was triggered by the {@link Tech}. |
|
* |
|
* @private |
|
* @method Player#handleTechAbort_ |
|
* @fires Player#abort |
|
* @listens Tech#abort |
|
*/ |
|
'abort', |
|
/** |
|
* Fires when the browser is intentionally not getting media data. |
|
* |
|
* @event Player#suspend |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
/** |
|
* Retrigger the `suspend` event that was triggered by the {@link Tech}. |
|
* |
|
* @private |
|
* @method Player#handleTechSuspend_ |
|
* @fires Player#suspend |
|
* @listens Tech#suspend |
|
*/ |
|
'suspend', |
|
/** |
|
* Fires when the current playlist is empty. |
|
* |
|
* @event Player#emptied |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
/** |
|
* Retrigger the `emptied` event that was triggered by the {@link Tech}. |
|
* |
|
* @private |
|
* @method Player#handleTechEmptied_ |
|
* @fires Player#emptied |
|
* @listens Tech#emptied |
|
*/ |
|
'emptied', |
|
/** |
|
* Fires when the browser is trying to get media data, but data is not available. |
|
* |
|
* @event Player#stalled |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
/** |
|
* Retrigger the `stalled` event that was triggered by the {@link Tech}. |
|
* |
|
* @private |
|
* @method Player#handleTechStalled_ |
|
* @fires Player#stalled |
|
* @listens Tech#stalled |
|
*/ |
|
'stalled', |
|
/** |
|
* Fires when the browser has loaded meta data for the audio/video. |
|
* |
|
* @event Player#loadedmetadata |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
/** |
|
* Retrigger the `stalled` event that was triggered by the {@link Tech}. |
|
* |
|
* @private |
|
* @method Player#handleTechLoadedmetadata_ |
|
* @fires Player#loadedmetadata |
|
* @listens Tech#loadedmetadata |
|
*/ |
|
'loadedmetadata', |
|
/** |
|
* Fires when the browser has loaded the current frame of the audio/video. |
|
* |
|
* @event Player#loadeddata |
|
* @type {event} |
|
*/ |
|
|
|
/** |
|
* Retrigger the `loadeddata` event that was triggered by the {@link Tech}. |
|
* |
|
* @private |
|
* @method Player#handleTechLoaddeddata_ |
|
* @fires Player#loadeddata |
|
* @listens Tech#loadeddata |
|
*/ |
|
'loadeddata', |
|
/** |
|
* Fires when the current playback position has changed. |
|
* |
|
* @event Player#timeupdate |
|
* @type {event} |
|
*/ |
|
|
|
/** |
|
* Retrigger the `timeupdate` event that was triggered by the {@link Tech}. |
|
* |
|
* @private |
|
* @method Player#handleTechTimeUpdate_ |
|
* @fires Player#timeupdate |
|
* @listens Tech#timeupdate |
|
*/ |
|
'timeupdate', |
|
/** |
|
* Fires when the video's intrinsic dimensions change |
|
* |
|
* @event Player#resize |
|
* @type {event} |
|
*/ |
|
|
|
/** |
|
* Retrigger the `resize` event that was triggered by the {@link Tech}. |
|
* |
|
* @private |
|
* @method Player#handleTechResize_ |
|
* @fires Player#resize |
|
* @listens Tech#resize |
|
*/ |
|
'resize', |
|
/** |
|
* Fires when the volume has been changed |
|
* |
|
* @event Player#volumechange |
|
* @type {event} |
|
*/ |
|
|
|
/** |
|
* Retrigger the `volumechange` event that was triggered by the {@link Tech}. |
|
* |
|
* @private |
|
* @method Player#handleTechVolumechange_ |
|
* @fires Player#volumechange |
|
* @listens Tech#volumechange |
|
*/ |
|
'volumechange', |
|
/** |
|
* Fires when the text track has been changed |
|
* |
|
* @event Player#texttrackchange |
|
* @type {event} |
|
*/ |
|
|
|
/** |
|
* Retrigger the `texttrackchange` event that was triggered by the {@link Tech}. |
|
* |
|
* @private |
|
* @method Player#handleTechTexttrackchange_ |
|
* @fires Player#texttrackchange |
|
* @listens Tech#texttrackchange |
|
*/ |
|
'texttrackchange']; // events to queue when playback rate is zero |
|
// this is a hash for the sole purpose of mapping non-camel-cased event names |
|
// to camel-cased function names |
|
|
|
var TECH_EVENTS_QUEUE = { |
|
canplay: 'CanPlay', |
|
canplaythrough: 'CanPlayThrough', |
|
playing: 'Playing', |
|
seeked: 'Seeked' |
|
}; |
|
var BREAKPOINT_ORDER = ['tiny', 'xsmall', 'small', 'medium', 'large', 'xlarge', 'huge']; |
|
var BREAKPOINT_CLASSES = {}; // grep: vjs-layout-tiny |
|
// grep: vjs-layout-x-small |
|
// grep: vjs-layout-small |
|
// grep: vjs-layout-medium |
|
// grep: vjs-layout-large |
|
// grep: vjs-layout-x-large |
|
// grep: vjs-layout-huge |
|
|
|
BREAKPOINT_ORDER.forEach(function (k) { |
|
var v = k.charAt(0) === 'x' ? "x-" + k.substring(1) : k; |
|
BREAKPOINT_CLASSES[k] = "vjs-layout-" + v; |
|
}); |
|
var DEFAULT_BREAKPOINTS = { |
|
tiny: 210, |
|
xsmall: 320, |
|
small: 425, |
|
medium: 768, |
|
large: 1440, |
|
xlarge: 2560, |
|
huge: Infinity |
|
}; |
|
/** |
|
* An instance of the `Player` class is created when any of the Video.js setup methods |
|
* are used to initialize a video. |
|
* |
|
* After an instance has been created it can be accessed globally in two ways: |
|
* 1. By calling `videojs('example_video_1');` |
|
* 2. By using it directly via `videojs.players.example_video_1;` |
|
* |
|
* @extends Component |
|
*/ |
|
|
|
var Player = |
|
/*#__PURE__*/ |
|
function (_Component) { |
|
_inheritsLoose(Player, _Component); |
|
|
|
/** |
|
* Create an instance of this class. |
|
* |
|
* @param {Element} tag |
|
* The original video DOM element used for configuring options. |
|
* |
|
* @param {Object} [options] |
|
* Object of option names and values. |
|
* |
|
* @param {Component~ReadyCallback} [ready] |
|
* Ready callback function. |
|
*/ |
|
function Player(tag, options, ready) { |
|
var _this; |
|
|
|
// Make sure tag ID exists |
|
tag.id = tag.id || options.id || "vjs_video_" + newGUID(); // Set Options |
|
// The options argument overrides options set in the video tag |
|
// which overrides globally set options. |
|
// This latter part coincides with the load order |
|
// (tag must exist before Player) |
|
|
|
options = assign(Player.getTagSettings(tag), options); // Delay the initialization of children because we need to set up |
|
// player properties first, and can't use `this` before `super()` |
|
|
|
options.initChildren = false; // Same with creating the element |
|
|
|
options.createEl = false; // don't auto mixin the evented mixin |
|
|
|
options.evented = false; // we don't want the player to report touch activity on itself |
|
// see enableTouchActivity in Component |
|
|
|
options.reportTouchActivity = false; // If language is not set, get the closest lang attribute |
|
|
|
if (!options.language) { |
|
if (typeof tag.closest === 'function') { |
|
var closest = tag.closest('[lang]'); |
|
|
|
if (closest && closest.getAttribute) { |
|
options.language = closest.getAttribute('lang'); |
|
} |
|
} else { |
|
var element = tag; |
|
|
|
while (element && element.nodeType === 1) { |
|
if (getAttributes(element).hasOwnProperty('lang')) { |
|
options.language = element.getAttribute('lang'); |
|
break; |
|
} |
|
|
|
element = element.parentNode; |
|
} |
|
} |
|
} // Run base component initializing with new options |
|
|
|
|
|
_this = _Component.call(this, null, options, ready) || this; // Create bound methods for document listeners. |
|
|
|
_this.boundDocumentFullscreenChange_ = bind(_assertThisInitialized(_assertThisInitialized(_this)), _this.documentFullscreenChange_); |
|
_this.boundFullWindowOnEscKey_ = bind(_assertThisInitialized(_assertThisInitialized(_this)), _this.fullWindowOnEscKey); // create logger |
|
|
|
_this.log = createLogger$1(_this.id_); // Tracks when a tech changes the poster |
|
|
|
_this.isPosterFromTech_ = false; // Holds callback info that gets queued when playback rate is zero |
|
// and a seek is happening |
|
|
|
_this.queuedCallbacks_ = []; // Turn off API access because we're loading a new tech that might load asynchronously |
|
|
|
_this.isReady_ = false; // Init state hasStarted_ |
|
|
|
_this.hasStarted_ = false; // Init state userActive_ |
|
|
|
_this.userActive_ = false; // if the global option object was accidentally blown away by |
|
// someone, bail early with an informative error |
|
|
|
if (!_this.options_ || !_this.options_.techOrder || !_this.options_.techOrder.length) { |
|
throw new Error('No techOrder specified. Did you overwrite ' + 'videojs.options instead of just changing the ' + 'properties you want to override?'); |
|
} // Store the original tag used to set options |
|
|
|
|
|
_this.tag = tag; // Store the tag attributes used to restore html5 element |
|
|
|
_this.tagAttributes = tag && getAttributes(tag); // Update current language |
|
|
|
_this.language(_this.options_.language); // Update Supported Languages |
|
|
|
|
|
if (options.languages) { |
|
// Normalise player option languages to lowercase |
|
var languagesToLower = {}; |
|
Object.getOwnPropertyNames(options.languages).forEach(function (name$$1) { |
|
languagesToLower[name$$1.toLowerCase()] = options.languages[name$$1]; |
|
}); |
|
_this.languages_ = languagesToLower; |
|
} else { |
|
_this.languages_ = Player.prototype.options_.languages; |
|
} |
|
|
|
_this.resetCache_(); // Set poster |
|
|
|
|
|
_this.poster_ = options.poster || ''; // Set controls |
|
|
|
_this.controls_ = !!options.controls; // Original tag settings stored in options |
|
// now remove immediately so native controls don't flash. |
|
// May be turned back on by HTML5 tech if nativeControlsForTouch is true |
|
|
|
tag.controls = false; |
|
tag.removeAttribute('controls'); |
|
_this.changingSrc_ = false; |
|
_this.playCallbacks_ = []; |
|
_this.playTerminatedQueue_ = []; // the attribute overrides the option |
|
|
|
if (tag.hasAttribute('autoplay')) { |
|
_this.autoplay(true); |
|
} else { |
|
// otherwise use the setter to validate and |
|
// set the correct value. |
|
_this.autoplay(_this.options_.autoplay); |
|
} // check plugins |
|
|
|
|
|
if (options.plugins) { |
|
Object.keys(options.plugins).forEach(function (name$$1) { |
|
if (typeof _this[name$$1] !== 'function') { |
|
throw new Error("plugin \"" + name$$1 + "\" does not exist"); |
|
} |
|
}); |
|
} |
|
/* |
|
* Store the internal state of scrubbing |
|
* |
|
* @private |
|
* @return {Boolean} True if the user is scrubbing |
|
*/ |
|
|
|
|
|
_this.scrubbing_ = false; |
|
_this.el_ = _this.createEl(); // Make this an evented object and use `el_` as its event bus. |
|
|
|
evented(_assertThisInitialized(_assertThisInitialized(_this)), { |
|
eventBusKey: 'el_' |
|
}); |
|
|
|
if (_this.fluid_) { |
|
_this.on('playerreset', _this.updateStyleEl_); |
|
} // We also want to pass the original player options to each component and plugin |
|
// as well so they don't need to reach back into the player for options later. |
|
// We also need to do another copy of this.options_ so we don't end up with |
|
// an infinite loop. |
|
|
|
|
|
var playerOptionsCopy = mergeOptions(_this.options_); // Load plugins |
|
|
|
if (options.plugins) { |
|
Object.keys(options.plugins).forEach(function (name$$1) { |
|
_this[name$$1](options.plugins[name$$1]); |
|
}); |
|
} |
|
|
|
_this.options_.playerOptions = playerOptionsCopy; |
|
_this.middleware_ = []; |
|
|
|
_this.initChildren(); // Set isAudio based on whether or not an audio tag was used |
|
|
|
|
|
_this.isAudio(tag.nodeName.toLowerCase() === 'audio'); // Update controls className. Can't do this when the controls are initially |
|
// set because the element doesn't exist yet. |
|
|
|
|
|
if (_this.controls()) { |
|
_this.addClass('vjs-controls-enabled'); |
|
} else { |
|
_this.addClass('vjs-controls-disabled'); |
|
} // Set ARIA label and region role depending on player type |
|
|
|
|
|
_this.el_.setAttribute('role', 'region'); |
|
|
|
if (_this.isAudio()) { |
|
_this.el_.setAttribute('aria-label', _this.localize('Audio Player')); |
|
} else { |
|
_this.el_.setAttribute('aria-label', _this.localize('Video Player')); |
|
} |
|
|
|
if (_this.isAudio()) { |
|
_this.addClass('vjs-audio'); |
|
} |
|
|
|
if (_this.flexNotSupported_()) { |
|
_this.addClass('vjs-no-flex'); |
|
} // TODO: Make this smarter. Toggle user state between touching/mousing |
|
// using events, since devices can have both touch and mouse events. |
|
// TODO: Make this check be performed again when the window switches between monitors |
|
// (See https://github.com/videojs/video.js/issues/5683) |
|
|
|
|
|
if (TOUCH_ENABLED) { |
|
_this.addClass('vjs-touch-enabled'); |
|
} // iOS Safari has broken hover handling |
|
|
|
|
|
if (!IS_IOS) { |
|
_this.addClass('vjs-workinghover'); |
|
} // Make player easily findable by ID |
|
|
|
|
|
Player.players[_this.id_] = _assertThisInitialized(_assertThisInitialized(_this)); // Add a major version class to aid css in plugins |
|
|
|
var majorVersion = version.split('.')[0]; |
|
|
|
_this.addClass("vjs-v" + majorVersion); // When the player is first initialized, trigger activity so components |
|
// like the control bar show themselves if needed |
|
|
|
|
|
_this.userActive(true); |
|
|
|
_this.reportUserActivity(); |
|
|
|
_this.one('play', _this.listenForUserActivity_); |
|
|
|
_this.on('stageclick', _this.handleStageClick_); |
|
|
|
_this.on('keydown', _this.handleKeyDown); |
|
|
|
_this.breakpoints(_this.options_.breakpoints); |
|
|
|
_this.responsive(_this.options_.responsive); |
|
|
|
return _this; |
|
} |
|
/** |
|
* Destroys the video player and does any necessary cleanup. |
|
* |
|
* This is especially helpful if you are dynamically adding and removing videos |
|
* to/from the DOM. |
|
* |
|
* @fires Player#dispose |
|
*/ |
|
|
|
|
|
var _proto = Player.prototype; |
|
|
|
_proto.dispose = function dispose() { |
|
var _this2 = this; |
|
|
|
/** |
|
* Called when the player is being disposed of. |
|
* |
|
* @event Player#dispose |
|
* @type {EventTarget~Event} |
|
*/ |
|
this.trigger('dispose'); // prevent dispose from being called twice |
|
|
|
this.off('dispose'); // Make sure all player-specific document listeners are unbound. This is |
|
|
|
off(document, FullscreenApi.fullscreenchange, this.boundDocumentFullscreenChange_); |
|
off(document, 'keydown', this.boundFullWindowOnEscKey_); |
|
|
|
if (this.styleEl_ && this.styleEl_.parentNode) { |
|
this.styleEl_.parentNode.removeChild(this.styleEl_); |
|
this.styleEl_ = null; |
|
} // Kill reference to this player |
|
|
|
|
|
Player.players[this.id_] = null; |
|
|
|
if (this.tag && this.tag.player) { |
|
this.tag.player = null; |
|
} |
|
|
|
if (this.el_ && this.el_.player) { |
|
this.el_.player = null; |
|
} |
|
|
|
if (this.tech_) { |
|
this.tech_.dispose(); |
|
this.isPosterFromTech_ = false; |
|
this.poster_ = ''; |
|
} |
|
|
|
if (this.playerElIngest_) { |
|
this.playerElIngest_ = null; |
|
} |
|
|
|
if (this.tag) { |
|
this.tag = null; |
|
} |
|
|
|
clearCacheForPlayer(this); // remove all event handlers for track lists |
|
// all tracks and track listeners are removed on |
|
// tech dispose |
|
|
|
ALL.names.forEach(function (name$$1) { |
|
var props = ALL[name$$1]; |
|
|
|
var list = _this2[props.getterName](); // if it is not a native list |
|
// we have to manually remove event listeners |
|
|
|
|
|
if (list && list.off) { |
|
list.off(); |
|
} |
|
}); // the actual .el_ is removed here |
|
|
|
_Component.prototype.dispose.call(this); |
|
} |
|
/** |
|
* Create the `Player`'s DOM element. |
|
* |
|
* @return {Element} |
|
* The DOM element that gets created. |
|
*/ |
|
; |
|
|
|
_proto.createEl = function createEl$$1() { |
|
var tag = this.tag; |
|
var el; |
|
var playerElIngest = this.playerElIngest_ = tag.parentNode && tag.parentNode.hasAttribute && tag.parentNode.hasAttribute('data-vjs-player'); |
|
var divEmbed = this.tag.tagName.toLowerCase() === 'video-js'; |
|
|
|
if (playerElIngest) { |
|
el = this.el_ = tag.parentNode; |
|
} else if (!divEmbed) { |
|
el = this.el_ = _Component.prototype.createEl.call(this, 'div'); |
|
} // Copy over all the attributes from the tag, including ID and class |
|
// ID will now reference player box, not the video tag |
|
|
|
|
|
var attrs = getAttributes(tag); |
|
|
|
if (divEmbed) { |
|
el = this.el_ = tag; |
|
tag = this.tag = document.createElement('video'); |
|
|
|
while (el.children.length) { |
|
tag.appendChild(el.firstChild); |
|
} |
|
|
|
if (!hasClass(el, 'video-js')) { |
|
addClass(el, 'video-js'); |
|
} |
|
|
|
el.appendChild(tag); |
|
playerElIngest = this.playerElIngest_ = el; // move properties over from our custom `video-js` element |
|
// to our new `video` element. This will move things like |
|
// `src` or `controls` that were set via js before the player |
|
// was initialized. |
|
|
|
Object.keys(el).forEach(function (k) { |
|
tag[k] = el[k]; |
|
}); |
|
} // set tabindex to -1 to remove the video element from the focus order |
|
|
|
|
|
tag.setAttribute('tabindex', '-1'); |
|
attrs.tabindex = '-1'; // Workaround for #4583 (JAWS+IE doesn't announce BPB or play button) |
|
// See https://github.com/FreedomScientific/VFO-standards-support/issues/78 |
|
// Note that we can't detect if JAWS is being used, but this ARIA attribute |
|
// doesn't change behavior of IE11 if JAWS is not being used |
|
|
|
if (IE_VERSION) { |
|
tag.setAttribute('role', 'application'); |
|
attrs.role = 'application'; |
|
} // Remove width/height attrs from tag so CSS can make it 100% width/height |
|
|
|
|
|
tag.removeAttribute('width'); |
|
tag.removeAttribute('height'); |
|
|
|
if ('width' in attrs) { |
|
delete attrs.width; |
|
} |
|
|
|
if ('height' in attrs) { |
|
delete attrs.height; |
|
} |
|
|
|
Object.getOwnPropertyNames(attrs).forEach(function (attr) { |
|
// don't copy over the class attribute to the player element when we're in a div embed |
|
// the class is already set up properly in the divEmbed case |
|
// and we want to make sure that the `video-js` class doesn't get lost |
|
if (!(divEmbed && attr === 'class')) { |
|
el.setAttribute(attr, attrs[attr]); |
|
} |
|
|
|
if (divEmbed) { |
|
tag.setAttribute(attr, attrs[attr]); |
|
} |
|
}); // Update tag id/class for use as HTML5 playback tech |
|
// Might think we should do this after embedding in container so .vjs-tech class |
|
// doesn't flash 100% width/height, but class only applies with .video-js parent |
|
|
|
tag.playerId = tag.id; |
|
tag.id += '_html5_api'; |
|
tag.className = 'vjs-tech'; // Make player findable on elements |
|
|
|
tag.player = el.player = this; // Default state of video is paused |
|
|
|
this.addClass('vjs-paused'); // Add a style element in the player that we'll use to set the width/height |
|
// of the player in a way that's still overrideable by CSS, just like the |
|
// video element |
|
|
|
if (window$1.VIDEOJS_NO_DYNAMIC_STYLE !== true) { |
|
this.styleEl_ = createStyleElement('vjs-styles-dimensions'); |
|
var defaultsStyleEl = $('.vjs-styles-defaults'); |
|
var head = $('head'); |
|
head.insertBefore(this.styleEl_, defaultsStyleEl ? defaultsStyleEl.nextSibling : head.firstChild); |
|
} |
|
|
|
this.fill_ = false; |
|
this.fluid_ = false; // Pass in the width/height/aspectRatio options which will update the style el |
|
|
|
this.width(this.options_.width); |
|
this.height(this.options_.height); |
|
this.fill(this.options_.fill); |
|
this.fluid(this.options_.fluid); |
|
this.aspectRatio(this.options_.aspectRatio); // Hide any links within the video/audio tag, |
|
// because IE doesn't hide them completely from screen readers. |
|
|
|
var links = tag.getElementsByTagName('a'); |
|
|
|
for (var i = 0; i < links.length; i++) { |
|
var linkEl = links.item(i); |
|
addClass(linkEl, 'vjs-hidden'); |
|
linkEl.setAttribute('hidden', 'hidden'); |
|
} // insertElFirst seems to cause the networkState to flicker from 3 to 2, so |
|
// keep track of the original for later so we can know if the source originally failed |
|
|
|
|
|
tag.initNetworkState_ = tag.networkState; // Wrap video tag in div (el/box) container |
|
|
|
if (tag.parentNode && !playerElIngest) { |
|
tag.parentNode.insertBefore(el, tag); |
|
} // insert the tag as the first child of the player element |
|
// then manually add it to the children array so that this.addChild |
|
// will work properly for other components |
|
// |
|
// Breaks iPhone, fixed in HTML5 setup. |
|
|
|
|
|
prependTo(tag, el); |
|
this.children_.unshift(tag); // Set lang attr on player to ensure CSS :lang() in consistent with player |
|
// if it's been set to something different to the doc |
|
|
|
this.el_.setAttribute('lang', this.language_); |
|
this.el_ = el; |
|
return el; |
|
} |
|
/** |
|
* A getter/setter for the `Player`'s width. Returns the player's configured value. |
|
* To get the current width use `currentWidth()`. |
|
* |
|
* @param {number} [value] |
|
* The value to set the `Player`'s width to. |
|
* |
|
* @return {number} |
|
* The current width of the `Player` when getting. |
|
*/ |
|
; |
|
|
|
_proto.width = function width(value) { |
|
return this.dimension('width', value); |
|
} |
|
/** |
|
* A getter/setter for the `Player`'s height. Returns the player's configured value. |
|
* To get the current height use `currentheight()`. |
|
* |
|
* @param {number} [value] |
|
* The value to set the `Player`'s heigth to. |
|
* |
|
* @return {number} |
|
* The current height of the `Player` when getting. |
|
*/ |
|
; |
|
|
|
_proto.height = function height(value) { |
|
return this.dimension('height', value); |
|
} |
|
/** |
|
* A getter/setter for the `Player`'s width & height. |
|
* |
|
* @param {string} dimension |
|
* This string can be: |
|
* - 'width' |
|
* - 'height' |
|
* |
|
* @param {number} [value] |
|
* Value for dimension specified in the first argument. |
|
* |
|
* @return {number} |
|
* The dimension arguments value when getting (width/height). |
|
*/ |
|
; |
|
|
|
_proto.dimension = function dimension(_dimension, value) { |
|
var privDimension = _dimension + '_'; |
|
|
|
if (value === undefined) { |
|
return this[privDimension] || 0; |
|
} |
|
|
|
if (value === '') { |
|
// If an empty string is given, reset the dimension to be automatic |
|
this[privDimension] = undefined; |
|
this.updateStyleEl_(); |
|
return; |
|
} |
|
|
|
var parsedVal = parseFloat(value); |
|
|
|
if (isNaN(parsedVal)) { |
|
log.error("Improper value \"" + value + "\" supplied for for " + _dimension); |
|
return; |
|
} |
|
|
|
this[privDimension] = parsedVal; |
|
this.updateStyleEl_(); |
|
} |
|
/** |
|
* A getter/setter/toggler for the vjs-fluid `className` on the `Player`. |
|
* |
|
* Turning this on will turn off fill mode. |
|
* |
|
* @param {boolean} [bool] |
|
* - A value of true adds the class. |
|
* - A value of false removes the class. |
|
* - No value will be a getter. |
|
* |
|
* @return {boolean|undefined} |
|
* - The value of fluid when getting. |
|
* - `undefined` when setting. |
|
*/ |
|
; |
|
|
|
_proto.fluid = function fluid(bool) { |
|
if (bool === undefined) { |
|
return !!this.fluid_; |
|
} |
|
|
|
this.fluid_ = !!bool; |
|
|
|
if (isEvented(this)) { |
|
this.off('playerreset', this.updateStyleEl_); |
|
} |
|
|
|
if (bool) { |
|
this.addClass('vjs-fluid'); |
|
this.fill(false); |
|
addEventedCallback(function () { |
|
this.on('playerreset', this.updateStyleEl_); |
|
}); |
|
} else { |
|
this.removeClass('vjs-fluid'); |
|
} |
|
|
|
this.updateStyleEl_(); |
|
} |
|
/** |
|
* A getter/setter/toggler for the vjs-fill `className` on the `Player`. |
|
* |
|
* Turning this on will turn off fluid mode. |
|
* |
|
* @param {boolean} [bool] |
|
* - A value of true adds the class. |
|
* - A value of false removes the class. |
|
* - No value will be a getter. |
|
* |
|
* @return {boolean|undefined} |
|
* - The value of fluid when getting. |
|
* - `undefined` when setting. |
|
*/ |
|
; |
|
|
|
_proto.fill = function fill(bool) { |
|
if (bool === undefined) { |
|
return !!this.fill_; |
|
} |
|
|
|
this.fill_ = !!bool; |
|
|
|
if (bool) { |
|
this.addClass('vjs-fill'); |
|
this.fluid(false); |
|
} else { |
|
this.removeClass('vjs-fill'); |
|
} |
|
} |
|
/** |
|
* Get/Set the aspect ratio |
|
* |
|
* @param {string} [ratio] |
|
* Aspect ratio for player |
|
* |
|
* @return {string|undefined} |
|
* returns the current aspect ratio when getting |
|
*/ |
|
|
|
/** |
|
* A getter/setter for the `Player`'s aspect ratio. |
|
* |
|
* @param {string} [ratio] |
|
* The value to set the `Player's aspect ratio to. |
|
* |
|
* @return {string|undefined} |
|
* - The current aspect ratio of the `Player` when getting. |
|
* - undefined when setting |
|
*/ |
|
; |
|
|
|
_proto.aspectRatio = function aspectRatio(ratio) { |
|
if (ratio === undefined) { |
|
return this.aspectRatio_; |
|
} // Check for width:height format |
|
|
|
|
|
if (!/^\d+\:\d+$/.test(ratio)) { |
|
throw new Error('Improper value supplied for aspect ratio. The format should be width:height, for example 16:9.'); |
|
} |
|
|
|
this.aspectRatio_ = ratio; // We're assuming if you set an aspect ratio you want fluid mode, |
|
// because in fixed mode you could calculate width and height yourself. |
|
|
|
this.fluid(true); |
|
this.updateStyleEl_(); |
|
} |
|
/** |
|
* Update styles of the `Player` element (height, width and aspect ratio). |
|
* |
|
* @private |
|
* @listens Tech#loadedmetadata |
|
*/ |
|
; |
|
|
|
_proto.updateStyleEl_ = function updateStyleEl_() { |
|
if (window$1.VIDEOJS_NO_DYNAMIC_STYLE === true) { |
|
var _width = typeof this.width_ === 'number' ? this.width_ : this.options_.width; |
|
|
|
var _height = typeof this.height_ === 'number' ? this.height_ : this.options_.height; |
|
|
|
var techEl = this.tech_ && this.tech_.el(); |
|
|
|
if (techEl) { |
|
if (_width >= 0) { |
|
techEl.width = _width; |
|
} |
|
|
|
if (_height >= 0) { |
|
techEl.height = _height; |
|
} |
|
} |
|
|
|
return; |
|
} |
|
|
|
var width; |
|
var height; |
|
var aspectRatio; |
|
var idClass; // The aspect ratio is either used directly or to calculate width and height. |
|
|
|
if (this.aspectRatio_ !== undefined && this.aspectRatio_ !== 'auto') { |
|
// Use any aspectRatio that's been specifically set |
|
aspectRatio = this.aspectRatio_; |
|
} else if (this.videoWidth() > 0) { |
|
// Otherwise try to get the aspect ratio from the video metadata |
|
aspectRatio = this.videoWidth() + ':' + this.videoHeight(); |
|
} else { |
|
// Or use a default. The video element's is 2:1, but 16:9 is more common. |
|
aspectRatio = '16:9'; |
|
} // Get the ratio as a decimal we can use to calculate dimensions |
|
|
|
|
|
var ratioParts = aspectRatio.split(':'); |
|
var ratioMultiplier = ratioParts[1] / ratioParts[0]; |
|
|
|
if (this.width_ !== undefined) { |
|
// Use any width that's been specifically set |
|
width = this.width_; |
|
} else if (this.height_ !== undefined) { |
|
// Or calulate the width from the aspect ratio if a height has been set |
|
width = this.height_ / ratioMultiplier; |
|
} else { |
|
// Or use the video's metadata, or use the video el's default of 300 |
|
width = this.videoWidth() || 300; |
|
} |
|
|
|
if (this.height_ !== undefined) { |
|
// Use any height that's been specifically set |
|
height = this.height_; |
|
} else { |
|
// Otherwise calculate the height from the ratio and the width |
|
height = width * ratioMultiplier; |
|
} // Ensure the CSS class is valid by starting with an alpha character |
|
|
|
|
|
if (/^[^a-zA-Z]/.test(this.id())) { |
|
idClass = 'dimensions-' + this.id(); |
|
} else { |
|
idClass = this.id() + '-dimensions'; |
|
} // Ensure the right class is still on the player for the style element |
|
|
|
|
|
this.addClass(idClass); |
|
setTextContent(this.styleEl_, "\n ." + idClass + " {\n width: " + width + "px;\n height: " + height + "px;\n }\n\n ." + idClass + ".vjs-fluid {\n padding-top: " + ratioMultiplier * 100 + "%;\n }\n "); |
|
} |
|
/** |
|
* Load/Create an instance of playback {@link Tech} including element |
|
* and API methods. Then append the `Tech` element in `Player` as a child. |
|
* |
|
* @param {string} techName |
|
* name of the playback technology |
|
* |
|
* @param {string} source |
|
* video source |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.loadTech_ = function loadTech_(techName, source) { |
|
var _this3 = this; |
|
|
|
// Pause and remove current playback technology |
|
if (this.tech_) { |
|
this.unloadTech_(); |
|
} |
|
|
|
var titleTechName = toTitleCase(techName); |
|
var camelTechName = techName.charAt(0).toLowerCase() + techName.slice(1); // get rid of the HTML5 video tag as soon as we are using another tech |
|
|
|
if (titleTechName !== 'Html5' && this.tag) { |
|
Tech.getTech('Html5').disposeMediaElement(this.tag); |
|
this.tag.player = null; |
|
this.tag = null; |
|
} |
|
|
|
this.techName_ = titleTechName; // Turn off API access because we're loading a new tech that might load asynchronously |
|
|
|
this.isReady_ = false; // if autoplay is a string we pass false to the tech |
|
// because the player is going to handle autoplay on `loadstart` |
|
|
|
var autoplay = typeof this.autoplay() === 'string' ? false : this.autoplay(); // Grab tech-specific options from player options and add source and parent element to use. |
|
|
|
var techOptions = { |
|
source: source, |
|
autoplay: autoplay, |
|
'nativeControlsForTouch': this.options_.nativeControlsForTouch, |
|
'playerId': this.id(), |
|
'techId': this.id() + "_" + camelTechName + "_api", |
|
'playsinline': this.options_.playsinline, |
|
'preload': this.options_.preload, |
|
'loop': this.options_.loop, |
|
'muted': this.options_.muted, |
|
'poster': this.poster(), |
|
'language': this.language(), |
|
'playerElIngest': this.playerElIngest_ || false, |
|
'vtt.js': this.options_['vtt.js'], |
|
'canOverridePoster': !!this.options_.techCanOverridePoster, |
|
'enableSourceset': this.options_.enableSourceset |
|
}; |
|
ALL.names.forEach(function (name$$1) { |
|
var props = ALL[name$$1]; |
|
techOptions[props.getterName] = _this3[props.privateName]; |
|
}); |
|
assign(techOptions, this.options_[titleTechName]); |
|
assign(techOptions, this.options_[camelTechName]); |
|
assign(techOptions, this.options_[techName.toLowerCase()]); |
|
|
|
if (this.tag) { |
|
techOptions.tag = this.tag; |
|
} |
|
|
|
if (source && source.src === this.cache_.src && this.cache_.currentTime > 0) { |
|
techOptions.startTime = this.cache_.currentTime; |
|
} // Initialize tech instance |
|
|
|
|
|
var TechClass = Tech.getTech(techName); |
|
|
|
if (!TechClass) { |
|
throw new Error("No Tech named '" + titleTechName + "' exists! '" + titleTechName + "' should be registered using videojs.registerTech()'"); |
|
} |
|
|
|
this.tech_ = new TechClass(techOptions); // player.triggerReady is always async, so don't need this to be async |
|
|
|
this.tech_.ready(bind(this, this.handleTechReady_), true); |
|
textTrackConverter.jsonToTextTracks(this.textTracksJson_ || [], this.tech_); // Listen to all HTML5-defined events and trigger them on the player |
|
|
|
TECH_EVENTS_RETRIGGER.forEach(function (event) { |
|
_this3.on(_this3.tech_, event, _this3["handleTech" + toTitleCase(event) + "_"]); |
|
}); |
|
Object.keys(TECH_EVENTS_QUEUE).forEach(function (event) { |
|
_this3.on(_this3.tech_, event, function (eventObj) { |
|
if (_this3.tech_.playbackRate() === 0 && _this3.tech_.seeking()) { |
|
_this3.queuedCallbacks_.push({ |
|
callback: _this3["handleTech" + TECH_EVENTS_QUEUE[event] + "_"].bind(_this3), |
|
event: eventObj |
|
}); |
|
|
|
return; |
|
} |
|
|
|
_this3["handleTech" + TECH_EVENTS_QUEUE[event] + "_"](eventObj); |
|
}); |
|
}); |
|
this.on(this.tech_, 'loadstart', this.handleTechLoadStart_); |
|
this.on(this.tech_, 'sourceset', this.handleTechSourceset_); |
|
this.on(this.tech_, 'waiting', this.handleTechWaiting_); |
|
this.on(this.tech_, 'ended', this.handleTechEnded_); |
|
this.on(this.tech_, 'seeking', this.handleTechSeeking_); |
|
this.on(this.tech_, 'play', this.handleTechPlay_); |
|
this.on(this.tech_, 'firstplay', this.handleTechFirstPlay_); |
|
this.on(this.tech_, 'pause', this.handleTechPause_); |
|
this.on(this.tech_, 'durationchange', this.handleTechDurationChange_); |
|
this.on(this.tech_, 'fullscreenchange', this.handleTechFullscreenChange_); |
|
this.on(this.tech_, 'error', this.handleTechError_); |
|
this.on(this.tech_, 'loadedmetadata', this.updateStyleEl_); |
|
this.on(this.tech_, 'posterchange', this.handleTechPosterChange_); |
|
this.on(this.tech_, 'textdata', this.handleTechTextData_); |
|
this.on(this.tech_, 'ratechange', this.handleTechRateChange_); |
|
this.usingNativeControls(this.techGet_('controls')); |
|
|
|
if (this.controls() && !this.usingNativeControls()) { |
|
this.addTechControlsListeners_(); |
|
} // Add the tech element in the DOM if it was not already there |
|
// Make sure to not insert the original video element if using Html5 |
|
|
|
|
|
if (this.tech_.el().parentNode !== this.el() && (titleTechName !== 'Html5' || !this.tag)) { |
|
prependTo(this.tech_.el(), this.el()); |
|
} // Get rid of the original video tag reference after the first tech is loaded |
|
|
|
|
|
if (this.tag) { |
|
this.tag.player = null; |
|
this.tag = null; |
|
} |
|
} |
|
/** |
|
* Unload and dispose of the current playback {@link Tech}. |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.unloadTech_ = function unloadTech_() { |
|
var _this4 = this; |
|
|
|
// Save the current text tracks so that we can reuse the same text tracks with the next tech |
|
ALL.names.forEach(function (name$$1) { |
|
var props = ALL[name$$1]; |
|
_this4[props.privateName] = _this4[props.getterName](); |
|
}); |
|
this.textTracksJson_ = textTrackConverter.textTracksToJson(this.tech_); |
|
this.isReady_ = false; |
|
this.tech_.dispose(); |
|
this.tech_ = false; |
|
|
|
if (this.isPosterFromTech_) { |
|
this.poster_ = ''; |
|
this.trigger('posterchange'); |
|
} |
|
|
|
this.isPosterFromTech_ = false; |
|
} |
|
/** |
|
* Return a reference to the current {@link Tech}. |
|
* It will print a warning by default about the danger of using the tech directly |
|
* but any argument that is passed in will silence the warning. |
|
* |
|
* @param {*} [safety] |
|
* Anything passed in to silence the warning |
|
* |
|
* @return {Tech} |
|
* The Tech |
|
*/ |
|
; |
|
|
|
_proto.tech = function tech(safety) { |
|
if (safety === undefined) { |
|
log.warn(tsml(_templateObject$2())); |
|
} |
|
|
|
return this.tech_; |
|
} |
|
/** |
|
* Set up click and touch listeners for the playback element |
|
* |
|
* - On desktops: a click on the video itself will toggle playback |
|
* - On mobile devices: a click on the video toggles controls |
|
* which is done by toggling the user state between active and |
|
* inactive |
|
* - A tap can signal that a user has become active or has become inactive |
|
* e.g. a quick tap on an iPhone movie should reveal the controls. Another |
|
* quick tap should hide them again (signaling the user is in an inactive |
|
* viewing state) |
|
* - In addition to this, we still want the user to be considered inactive after |
|
* a few seconds of inactivity. |
|
* |
|
* > Note: the only part of iOS interaction we can't mimic with this setup |
|
* is a touch and hold on the video element counting as activity in order to |
|
* keep the controls showing, but that shouldn't be an issue. A touch and hold |
|
* on any controls will still keep the user active |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.addTechControlsListeners_ = function addTechControlsListeners_() { |
|
// Make sure to remove all the previous listeners in case we are called multiple times. |
|
this.removeTechControlsListeners_(); // Some browsers (Chrome & IE) don't trigger a click on a flash swf, but do |
|
// trigger mousedown/up. |
|
// http://stackoverflow.com/questions/1444562/javascript-onclick-event-over-flash-object |
|
// Any touch events are set to block the mousedown event from happening |
|
|
|
this.on(this.tech_, 'mousedown', this.handleTechClick_); |
|
this.on(this.tech_, 'dblclick', this.handleTechDoubleClick_); // If the controls were hidden we don't want that to change without a tap event |
|
// so we'll check if the controls were already showing before reporting user |
|
// activity |
|
|
|
this.on(this.tech_, 'touchstart', this.handleTechTouchStart_); |
|
this.on(this.tech_, 'touchmove', this.handleTechTouchMove_); |
|
this.on(this.tech_, 'touchend', this.handleTechTouchEnd_); // The tap listener needs to come after the touchend listener because the tap |
|
// listener cancels out any reportedUserActivity when setting userActive(false) |
|
|
|
this.on(this.tech_, 'tap', this.handleTechTap_); |
|
} |
|
/** |
|
* Remove the listeners used for click and tap controls. This is needed for |
|
* toggling to controls disabled, where a tap/touch should do nothing. |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.removeTechControlsListeners_ = function removeTechControlsListeners_() { |
|
// We don't want to just use `this.off()` because there might be other needed |
|
// listeners added by techs that extend this. |
|
this.off(this.tech_, 'tap', this.handleTechTap_); |
|
this.off(this.tech_, 'touchstart', this.handleTechTouchStart_); |
|
this.off(this.tech_, 'touchmove', this.handleTechTouchMove_); |
|
this.off(this.tech_, 'touchend', this.handleTechTouchEnd_); |
|
this.off(this.tech_, 'mousedown', this.handleTechClick_); |
|
this.off(this.tech_, 'dblclick', this.handleTechDoubleClick_); |
|
} |
|
/** |
|
* Player waits for the tech to be ready |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.handleTechReady_ = function handleTechReady_() { |
|
this.triggerReady(); // Keep the same volume as before |
|
|
|
if (this.cache_.volume) { |
|
this.techCall_('setVolume', this.cache_.volume); |
|
} // Look if the tech found a higher resolution poster while loading |
|
|
|
|
|
this.handleTechPosterChange_(); // Update the duration if available |
|
|
|
this.handleTechDurationChange_(); |
|
} |
|
/** |
|
* Retrigger the `loadstart` event that was triggered by the {@link Tech}. This |
|
* function will also trigger {@link Player#firstplay} if it is the first loadstart |
|
* for a video. |
|
* |
|
* @fires Player#loadstart |
|
* @fires Player#firstplay |
|
* @listens Tech#loadstart |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.handleTechLoadStart_ = function handleTechLoadStart_() { |
|
// TODO: Update to use `emptied` event instead. See #1277. |
|
this.removeClass('vjs-ended'); |
|
this.removeClass('vjs-seeking'); // reset the error state |
|
|
|
this.error(null); // Update the duration |
|
|
|
this.handleTechDurationChange_(); // If it's already playing we want to trigger a firstplay event now. |
|
// The firstplay event relies on both the play and loadstart events |
|
// which can happen in any order for a new source |
|
|
|
if (!this.paused()) { |
|
/** |
|
* Fired when the user agent begins looking for media data |
|
* |
|
* @event Player#loadstart |
|
* @type {EventTarget~Event} |
|
*/ |
|
this.trigger('loadstart'); |
|
this.trigger('firstplay'); |
|
} else { |
|
// reset the hasStarted state |
|
this.hasStarted(false); |
|
this.trigger('loadstart'); |
|
} // autoplay happens after loadstart for the browser, |
|
// so we mimic that behavior |
|
|
|
|
|
this.manualAutoplay_(this.autoplay()); |
|
} |
|
/** |
|
* Handle autoplay string values, rather than the typical boolean |
|
* values that should be handled by the tech. Note that this is not |
|
* part of any specification. Valid values and what they do can be |
|
* found on the autoplay getter at Player#autoplay() |
|
*/ |
|
; |
|
|
|
_proto.manualAutoplay_ = function manualAutoplay_(type) { |
|
var _this5 = this; |
|
|
|
if (!this.tech_ || typeof type !== 'string') { |
|
return; |
|
} |
|
|
|
var muted = function muted() { |
|
var previouslyMuted = _this5.muted(); |
|
|
|
_this5.muted(true); |
|
|
|
var restoreMuted = function restoreMuted() { |
|
_this5.muted(previouslyMuted); |
|
}; // restore muted on play terminatation |
|
|
|
|
|
_this5.playTerminatedQueue_.push(restoreMuted); |
|
|
|
var mutedPromise = _this5.play(); |
|
|
|
if (!isPromise(mutedPromise)) { |
|
return; |
|
} |
|
|
|
return mutedPromise.catch(restoreMuted); |
|
}; |
|
|
|
var promise; // if muted defaults to true |
|
// the only thing we can do is call play |
|
|
|
if (type === 'any' && this.muted() !== true) { |
|
promise = this.play(); |
|
|
|
if (isPromise(promise)) { |
|
promise = promise.catch(muted); |
|
} |
|
} else if (type === 'muted' && this.muted() !== true) { |
|
promise = muted(); |
|
} else { |
|
promise = this.play(); |
|
} |
|
|
|
if (!isPromise(promise)) { |
|
return; |
|
} |
|
|
|
return promise.then(function () { |
|
_this5.trigger({ |
|
type: 'autoplay-success', |
|
autoplay: type |
|
}); |
|
}).catch(function (e) { |
|
_this5.trigger({ |
|
type: 'autoplay-failure', |
|
autoplay: type |
|
}); |
|
}); |
|
} |
|
/** |
|
* Update the internal source caches so that we return the correct source from |
|
* `src()`, `currentSource()`, and `currentSources()`. |
|
* |
|
* > Note: `currentSources` will not be updated if the source that is passed in exists |
|
* in the current `currentSources` cache. |
|
* |
|
* |
|
* @param {Tech~SourceObject} srcObj |
|
* A string or object source to update our caches to. |
|
*/ |
|
; |
|
|
|
_proto.updateSourceCaches_ = function updateSourceCaches_(srcObj) { |
|
if (srcObj === void 0) { |
|
srcObj = ''; |
|
} |
|
|
|
var src = srcObj; |
|
var type = ''; |
|
|
|
if (typeof src !== 'string') { |
|
src = srcObj.src; |
|
type = srcObj.type; |
|
} // make sure all the caches are set to default values |
|
// to prevent null checking |
|
|
|
|
|
this.cache_.source = this.cache_.source || {}; |
|
this.cache_.sources = this.cache_.sources || []; // try to get the type of the src that was passed in |
|
|
|
if (src && !type) { |
|
type = findMimetype(this, src); |
|
} // update `currentSource` cache always |
|
|
|
|
|
this.cache_.source = mergeOptions({}, srcObj, { |
|
src: src, |
|
type: type |
|
}); |
|
var matchingSources = this.cache_.sources.filter(function (s) { |
|
return s.src && s.src === src; |
|
}); |
|
var sourceElSources = []; |
|
var sourceEls = this.$$('source'); |
|
var matchingSourceEls = []; |
|
|
|
for (var i = 0; i < sourceEls.length; i++) { |
|
var sourceObj = getAttributes(sourceEls[i]); |
|
sourceElSources.push(sourceObj); |
|
|
|
if (sourceObj.src && sourceObj.src === src) { |
|
matchingSourceEls.push(sourceObj.src); |
|
} |
|
} // if we have matching source els but not matching sources |
|
// the current source cache is not up to date |
|
|
|
|
|
if (matchingSourceEls.length && !matchingSources.length) { |
|
this.cache_.sources = sourceElSources; // if we don't have matching source or source els set the |
|
// sources cache to the `currentSource` cache |
|
} else if (!matchingSources.length) { |
|
this.cache_.sources = [this.cache_.source]; |
|
} // update the tech `src` cache |
|
|
|
|
|
this.cache_.src = src; |
|
} |
|
/** |
|
* *EXPERIMENTAL* Fired when the source is set or changed on the {@link Tech} |
|
* causing the media element to reload. |
|
* |
|
* It will fire for the initial source and each subsequent source. |
|
* This event is a custom event from Video.js and is triggered by the {@link Tech}. |
|
* |
|
* The event object for this event contains a `src` property that will contain the source |
|
* that was available when the event was triggered. This is generally only necessary if Video.js |
|
* is switching techs while the source was being changed. |
|
* |
|
* It is also fired when `load` is called on the player (or media element) |
|
* because the {@link https://html.spec.whatwg.org/multipage/media.html#dom-media-load|specification for `load`} |
|
* says that the resource selection algorithm needs to be aborted and restarted. |
|
* In this case, it is very likely that the `src` property will be set to the |
|
* empty string `""` to indicate we do not know what the source will be but |
|
* that it is changing. |
|
* |
|
* *This event is currently still experimental and may change in minor releases.* |
|
* __To use this, pass `enableSourceset` option to the player.__ |
|
* |
|
* @event Player#sourceset |
|
* @type {EventTarget~Event} |
|
* @prop {string} src |
|
* The source url available when the `sourceset` was triggered. |
|
* It will be an empty string if we cannot know what the source is |
|
* but know that the source will change. |
|
*/ |
|
|
|
/** |
|
* Retrigger the `sourceset` event that was triggered by the {@link Tech}. |
|
* |
|
* @fires Player#sourceset |
|
* @listens Tech#sourceset |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.handleTechSourceset_ = function handleTechSourceset_(event) { |
|
var _this6 = this; |
|
|
|
// only update the source cache when the source |
|
// was not updated using the player api |
|
if (!this.changingSrc_) { |
|
var updateSourceCaches = function updateSourceCaches(src) { |
|
return _this6.updateSourceCaches_(src); |
|
}; |
|
|
|
var playerSrc = this.currentSource().src; |
|
var eventSrc = event.src; // if we have a playerSrc that is not a blob, and a tech src that is a blob |
|
|
|
if (playerSrc && !/^blob:/.test(playerSrc) && /^blob:/.test(eventSrc)) { |
|
// if both the tech source and the player source were updated we assume |
|
// something like @videojs/http-streaming did the sourceset and skip updating the source cache. |
|
if (!this.lastSource_ || this.lastSource_.tech !== eventSrc && this.lastSource_.player !== playerSrc) { |
|
updateSourceCaches = function updateSourceCaches() {}; |
|
} |
|
} // update the source to the intial source right away |
|
// in some cases this will be empty string |
|
|
|
|
|
updateSourceCaches(eventSrc); // if the `sourceset` `src` was an empty string |
|
// wait for a `loadstart` to update the cache to `currentSrc`. |
|
// If a sourceset happens before a `loadstart`, we reset the state |
|
// as this function will be called again. |
|
|
|
if (!event.src) { |
|
var updateCache = function updateCache(e) { |
|
if (e.type !== 'sourceset') { |
|
var techSrc = _this6.techGet('currentSrc'); |
|
|
|
_this6.lastSource_.tech = techSrc; |
|
|
|
_this6.updateSourceCaches_(techSrc); |
|
} |
|
|
|
_this6.tech_.off(['sourceset', 'loadstart'], updateCache); |
|
}; |
|
|
|
this.tech_.one(['sourceset', 'loadstart'], updateCache); |
|
} |
|
} |
|
|
|
this.lastSource_ = { |
|
player: this.currentSource().src, |
|
tech: event.src |
|
}; |
|
this.trigger({ |
|
src: event.src, |
|
type: 'sourceset' |
|
}); |
|
} |
|
/** |
|
* Add/remove the vjs-has-started class |
|
* |
|
* @fires Player#firstplay |
|
* |
|
* @param {boolean} request |
|
* - true: adds the class |
|
* - false: remove the class |
|
* |
|
* @return {boolean} |
|
* the boolean value of hasStarted_ |
|
*/ |
|
; |
|
|
|
_proto.hasStarted = function hasStarted(request) { |
|
if (request === undefined) { |
|
// act as getter, if we have no request to change |
|
return this.hasStarted_; |
|
} |
|
|
|
if (request === this.hasStarted_) { |
|
return; |
|
} |
|
|
|
this.hasStarted_ = request; |
|
|
|
if (this.hasStarted_) { |
|
this.addClass('vjs-has-started'); |
|
this.trigger('firstplay'); |
|
} else { |
|
this.removeClass('vjs-has-started'); |
|
} |
|
} |
|
/** |
|
* Fired whenever the media begins or resumes playback |
|
* |
|
* @see [Spec]{@link https://html.spec.whatwg.org/multipage/embedded-content.html#dom-media-play} |
|
* @fires Player#play |
|
* @listens Tech#play |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.handleTechPlay_ = function handleTechPlay_() { |
|
this.removeClass('vjs-ended'); |
|
this.removeClass('vjs-paused'); |
|
this.addClass('vjs-playing'); // hide the poster when the user hits play |
|
|
|
this.hasStarted(true); |
|
/** |
|
* Triggered whenever an {@link Tech#play} event happens. Indicates that |
|
* playback has started or resumed. |
|
* |
|
* @event Player#play |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
this.trigger('play'); |
|
} |
|
/** |
|
* Retrigger the `ratechange` event that was triggered by the {@link Tech}. |
|
* |
|
* If there were any events queued while the playback rate was zero, fire |
|
* those events now. |
|
* |
|
* @private |
|
* @method Player#handleTechRateChange_ |
|
* @fires Player#ratechange |
|
* @listens Tech#ratechange |
|
*/ |
|
; |
|
|
|
_proto.handleTechRateChange_ = function handleTechRateChange_() { |
|
if (this.tech_.playbackRate() > 0 && this.cache_.lastPlaybackRate === 0) { |
|
this.queuedCallbacks_.forEach(function (queued) { |
|
return queued.callback(queued.event); |
|
}); |
|
this.queuedCallbacks_ = []; |
|
} |
|
|
|
this.cache_.lastPlaybackRate = this.tech_.playbackRate(); |
|
/** |
|
* Fires when the playing speed of the audio/video is changed |
|
* |
|
* @event Player#ratechange |
|
* @type {event} |
|
*/ |
|
|
|
this.trigger('ratechange'); |
|
} |
|
/** |
|
* Retrigger the `waiting` event that was triggered by the {@link Tech}. |
|
* |
|
* @fires Player#waiting |
|
* @listens Tech#waiting |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.handleTechWaiting_ = function handleTechWaiting_() { |
|
var _this7 = this; |
|
|
|
this.addClass('vjs-waiting'); |
|
/** |
|
* A readyState change on the DOM element has caused playback to stop. |
|
* |
|
* @event Player#waiting |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
this.trigger('waiting'); // Browsers may emit a timeupdate event after a waiting event. In order to prevent |
|
// premature removal of the waiting class, wait for the time to change. |
|
|
|
var timeWhenWaiting = this.currentTime(); |
|
|
|
var timeUpdateListener = function timeUpdateListener() { |
|
if (timeWhenWaiting !== _this7.currentTime()) { |
|
_this7.removeClass('vjs-waiting'); |
|
|
|
_this7.off('timeupdate', timeUpdateListener); |
|
} |
|
}; |
|
|
|
this.on('timeupdate', timeUpdateListener); |
|
} |
|
/** |
|
* Retrigger the `canplay` event that was triggered by the {@link Tech}. |
|
* > Note: This is not consistent between browsers. See #1351 |
|
* |
|
* @fires Player#canplay |
|
* @listens Tech#canplay |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.handleTechCanPlay_ = function handleTechCanPlay_() { |
|
this.removeClass('vjs-waiting'); |
|
/** |
|
* The media has a readyState of HAVE_FUTURE_DATA or greater. |
|
* |
|
* @event Player#canplay |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
this.trigger('canplay'); |
|
} |
|
/** |
|
* Retrigger the `canplaythrough` event that was triggered by the {@link Tech}. |
|
* |
|
* @fires Player#canplaythrough |
|
* @listens Tech#canplaythrough |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.handleTechCanPlayThrough_ = function handleTechCanPlayThrough_() { |
|
this.removeClass('vjs-waiting'); |
|
/** |
|
* The media has a readyState of HAVE_ENOUGH_DATA or greater. This means that the |
|
* entire media file can be played without buffering. |
|
* |
|
* @event Player#canplaythrough |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
this.trigger('canplaythrough'); |
|
} |
|
/** |
|
* Retrigger the `playing` event that was triggered by the {@link Tech}. |
|
* |
|
* @fires Player#playing |
|
* @listens Tech#playing |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.handleTechPlaying_ = function handleTechPlaying_() { |
|
this.removeClass('vjs-waiting'); |
|
/** |
|
* The media is no longer blocked from playback, and has started playing. |
|
* |
|
* @event Player#playing |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
this.trigger('playing'); |
|
} |
|
/** |
|
* Retrigger the `seeking` event that was triggered by the {@link Tech}. |
|
* |
|
* @fires Player#seeking |
|
* @listens Tech#seeking |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.handleTechSeeking_ = function handleTechSeeking_() { |
|
this.addClass('vjs-seeking'); |
|
/** |
|
* Fired whenever the player is jumping to a new time |
|
* |
|
* @event Player#seeking |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
this.trigger('seeking'); |
|
} |
|
/** |
|
* Retrigger the `seeked` event that was triggered by the {@link Tech}. |
|
* |
|
* @fires Player#seeked |
|
* @listens Tech#seeked |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.handleTechSeeked_ = function handleTechSeeked_() { |
|
this.removeClass('vjs-seeking'); |
|
this.removeClass('vjs-ended'); |
|
/** |
|
* Fired when the player has finished jumping to a new time |
|
* |
|
* @event Player#seeked |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
this.trigger('seeked'); |
|
} |
|
/** |
|
* Retrigger the `firstplay` event that was triggered by the {@link Tech}. |
|
* |
|
* @fires Player#firstplay |
|
* @listens Tech#firstplay |
|
* @deprecated As of 6.0 firstplay event is deprecated. |
|
* As of 6.0 passing the `starttime` option to the player and the firstplay event are deprecated. |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.handleTechFirstPlay_ = function handleTechFirstPlay_() { |
|
// If the first starttime attribute is specified |
|
// then we will start at the given offset in seconds |
|
if (this.options_.starttime) { |
|
log.warn('Passing the `starttime` option to the player will be deprecated in 6.0'); |
|
this.currentTime(this.options_.starttime); |
|
} |
|
|
|
this.addClass('vjs-has-started'); |
|
/** |
|
* Fired the first time a video is played. Not part of the HLS spec, and this is |
|
* probably not the best implementation yet, so use sparingly. If you don't have a |
|
* reason to prevent playback, use `myPlayer.one('play');` instead. |
|
* |
|
* @event Player#firstplay |
|
* @deprecated As of 6.0 firstplay event is deprecated. |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
this.trigger('firstplay'); |
|
} |
|
/** |
|
* Retrigger the `pause` event that was triggered by the {@link Tech}. |
|
* |
|
* @fires Player#pause |
|
* @listens Tech#pause |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.handleTechPause_ = function handleTechPause_() { |
|
this.removeClass('vjs-playing'); |
|
this.addClass('vjs-paused'); |
|
/** |
|
* Fired whenever the media has been paused |
|
* |
|
* @event Player#pause |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
this.trigger('pause'); |
|
} |
|
/** |
|
* Retrigger the `ended` event that was triggered by the {@link Tech}. |
|
* |
|
* @fires Player#ended |
|
* @listens Tech#ended |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.handleTechEnded_ = function handleTechEnded_() { |
|
this.addClass('vjs-ended'); |
|
|
|
if (this.options_.loop) { |
|
this.currentTime(0); |
|
this.play(); |
|
} else if (!this.paused()) { |
|
this.pause(); |
|
} |
|
/** |
|
* Fired when the end of the media resource is reached (currentTime == duration) |
|
* |
|
* @event Player#ended |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
|
|
this.trigger('ended'); |
|
} |
|
/** |
|
* Fired when the duration of the media resource is first known or changed |
|
* |
|
* @listens Tech#durationchange |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.handleTechDurationChange_ = function handleTechDurationChange_() { |
|
this.duration(this.techGet_('duration')); |
|
} |
|
/** |
|
* Handle a click on the media element to play/pause |
|
* |
|
* @param {EventTarget~Event} event |
|
* the event that caused this function to trigger |
|
* |
|
* @listens Tech#mousedown |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.handleTechClick_ = function handleTechClick_(event) { |
|
if (!isSingleLeftClick(event)) { |
|
return; |
|
} // When controls are disabled a click should not toggle playback because |
|
// the click is considered a control |
|
|
|
|
|
if (!this.controls_) { |
|
return; |
|
} |
|
|
|
if (this.paused()) { |
|
silencePromise(this.play()); |
|
} else { |
|
this.pause(); |
|
} |
|
} |
|
/** |
|
* Handle a double-click on the media element to enter/exit fullscreen |
|
* |
|
* @param {EventTarget~Event} event |
|
* the event that caused this function to trigger |
|
* |
|
* @listens Tech#dblclick |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.handleTechDoubleClick_ = function handleTechDoubleClick_(event) { |
|
if (!this.controls_) { |
|
return; |
|
} // we do not want to toggle fullscreen state |
|
// when double-clicking inside a control bar or a modal |
|
|
|
|
|
var inAllowedEls = Array.prototype.some.call(this.$$('.vjs-control-bar, .vjs-modal-dialog'), function (el) { |
|
return el.contains(event.target); |
|
}); |
|
|
|
if (!inAllowedEls) { |
|
/* |
|
* options.userActions.doubleClick |
|
* |
|
* If `undefined` or `true`, double-click toggles fullscreen if controls are present |
|
* Set to `false` to disable double-click handling |
|
* Set to a function to substitute an external double-click handler |
|
*/ |
|
if (this.options_ === undefined || this.options_.userActions === undefined || this.options_.userActions.doubleClick === undefined || this.options_.userActions.doubleClick !== false) { |
|
if (this.options_ !== undefined && this.options_.userActions !== undefined && typeof this.options_.userActions.doubleClick === 'function') { |
|
this.options_.userActions.doubleClick.call(this, event); |
|
} else if (this.isFullscreen()) { |
|
this.exitFullscreen(); |
|
} else { |
|
this.requestFullscreen(); |
|
} |
|
} |
|
} |
|
} |
|
/** |
|
* Handle a tap on the media element. It will toggle the user |
|
* activity state, which hides and shows the controls. |
|
* |
|
* @listens Tech#tap |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.handleTechTap_ = function handleTechTap_() { |
|
this.userActive(!this.userActive()); |
|
} |
|
/** |
|
* Handle touch to start |
|
* |
|
* @listens Tech#touchstart |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.handleTechTouchStart_ = function handleTechTouchStart_() { |
|
this.userWasActive = this.userActive(); |
|
} |
|
/** |
|
* Handle touch to move |
|
* |
|
* @listens Tech#touchmove |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.handleTechTouchMove_ = function handleTechTouchMove_() { |
|
if (this.userWasActive) { |
|
this.reportUserActivity(); |
|
} |
|
} |
|
/** |
|
* Handle touch to end |
|
* |
|
* @param {EventTarget~Event} event |
|
* the touchend event that triggered |
|
* this function |
|
* |
|
* @listens Tech#touchend |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.handleTechTouchEnd_ = function handleTechTouchEnd_(event) { |
|
// Stop the mouse events from also happening |
|
event.preventDefault(); |
|
} |
|
/** |
|
* native click events on the SWF aren't triggered on IE11, Win8.1RT |
|
* use stageclick events triggered from inside the SWF instead |
|
* |
|
* @private |
|
* @listens stageclick |
|
*/ |
|
; |
|
|
|
_proto.handleStageClick_ = function handleStageClick_() { |
|
this.reportUserActivity(); |
|
} |
|
/** |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.toggleFullscreenClass_ = function toggleFullscreenClass_() { |
|
if (this.isFullscreen()) { |
|
this.addClass('vjs-fullscreen'); |
|
} else { |
|
this.removeClass('vjs-fullscreen'); |
|
} |
|
} |
|
/** |
|
* when the document fschange event triggers it calls this |
|
*/ |
|
; |
|
|
|
_proto.documentFullscreenChange_ = function documentFullscreenChange_(e) { |
|
var fsApi = FullscreenApi; |
|
var el = this.el(); |
|
var isFs = document[fsApi.fullscreenElement] === el; |
|
|
|
if (!isFs && el.matches) { |
|
isFs = el.matches(':' + fsApi.fullscreen); |
|
} else if (!isFs && el.msMatchesSelector) { |
|
isFs = el.msMatchesSelector(':' + fsApi.fullscreen); |
|
} |
|
|
|
this.isFullscreen(isFs); // If cancelling fullscreen, remove event listener. |
|
|
|
if (this.isFullscreen() === false) { |
|
off(document, fsApi.fullscreenchange, this.boundDocumentFullscreenChange_); |
|
} |
|
|
|
if (!prefixedAPI) { |
|
/** |
|
* @event Player#fullscreenchange |
|
* @type {EventTarget~Event} |
|
*/ |
|
this.trigger('fullscreenchange'); |
|
} |
|
} |
|
/** |
|
* Handle Tech Fullscreen Change |
|
* |
|
* @param {EventTarget~Event} event |
|
* the fullscreenchange event that triggered this function |
|
* |
|
* @param {Object} data |
|
* the data that was sent with the event |
|
* |
|
* @private |
|
* @listens Tech#fullscreenchange |
|
* @fires Player#fullscreenchange |
|
*/ |
|
; |
|
|
|
_proto.handleTechFullscreenChange_ = function handleTechFullscreenChange_(event, data) { |
|
if (data) { |
|
this.isFullscreen(data.isFullscreen); |
|
} |
|
/** |
|
* Fired when going in and out of fullscreen. |
|
* |
|
* @event Player#fullscreenchange |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
|
|
this.trigger('fullscreenchange'); |
|
} |
|
/** |
|
* Fires when an error occurred during the loading of an audio/video. |
|
* |
|
* @private |
|
* @listens Tech#error |
|
*/ |
|
; |
|
|
|
_proto.handleTechError_ = function handleTechError_() { |
|
var error = this.tech_.error(); |
|
this.error(error); |
|
} |
|
/** |
|
* Retrigger the `textdata` event that was triggered by the {@link Tech}. |
|
* |
|
* @fires Player#textdata |
|
* @listens Tech#textdata |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.handleTechTextData_ = function handleTechTextData_() { |
|
var data = null; |
|
|
|
if (arguments.length > 1) { |
|
data = arguments[1]; |
|
} |
|
/** |
|
* Fires when we get a textdata event from tech |
|
* |
|
* @event Player#textdata |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
|
|
this.trigger('textdata', data); |
|
} |
|
/** |
|
* Get object for cached values. |
|
* |
|
* @return {Object} |
|
* get the current object cache |
|
*/ |
|
; |
|
|
|
_proto.getCache = function getCache() { |
|
return this.cache_; |
|
} |
|
/** |
|
* Resets the internal cache object. |
|
* |
|
* Using this function outside the player constructor or reset method may |
|
* have unintended side-effects. |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.resetCache_ = function resetCache_() { |
|
this.cache_ = { |
|
// Right now, the currentTime is not _really_ cached because it is always |
|
// retrieved from the tech (see: currentTime). However, for completeness, |
|
// we set it to zero here to ensure that if we do start actually caching |
|
// it, we reset it along with everything else. |
|
currentTime: 0, |
|
inactivityTimeout: this.options_.inactivityTimeout, |
|
duration: NaN, |
|
lastVolume: 1, |
|
lastPlaybackRate: this.defaultPlaybackRate(), |
|
media: null, |
|
src: '', |
|
source: {}, |
|
sources: [], |
|
volume: 1 |
|
}; |
|
} |
|
/** |
|
* Pass values to the playback tech |
|
* |
|
* @param {string} [method] |
|
* the method to call |
|
* |
|
* @param {Object} arg |
|
* the argument to pass |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.techCall_ = function techCall_(method, arg) { |
|
// If it's not ready yet, call method when it is |
|
this.ready(function () { |
|
if (method in allowedSetters) { |
|
return set$1(this.middleware_, this.tech_, method, arg); |
|
} else if (method in allowedMediators) { |
|
return mediate(this.middleware_, this.tech_, method, arg); |
|
} |
|
|
|
try { |
|
if (this.tech_) { |
|
this.tech_[method](arg); |
|
} |
|
} catch (e) { |
|
log(e); |
|
throw e; |
|
} |
|
}, true); |
|
} |
|
/** |
|
* Get calls can't wait for the tech, and sometimes don't need to. |
|
* |
|
* @param {string} method |
|
* Tech method |
|
* |
|
* @return {Function|undefined} |
|
* the method or undefined |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.techGet_ = function techGet_(method) { |
|
if (!this.tech_ || !this.tech_.isReady_) { |
|
return; |
|
} |
|
|
|
if (method in allowedGetters) { |
|
return get(this.middleware_, this.tech_, method); |
|
} else if (method in allowedMediators) { |
|
return mediate(this.middleware_, this.tech_, method); |
|
} // Flash likes to die and reload when you hide or reposition it. |
|
// In these cases the object methods go away and we get errors. |
|
// When that happens we'll catch the errors and inform tech that it's not ready any more. |
|
|
|
|
|
try { |
|
return this.tech_[method](); |
|
} catch (e) { |
|
// When building additional tech libs, an expected method may not be defined yet |
|
if (this.tech_[method] === undefined) { |
|
log("Video.js: " + method + " method not defined for " + this.techName_ + " playback technology.", e); |
|
throw e; |
|
} // When a method isn't available on the object it throws a TypeError |
|
|
|
|
|
if (e.name === 'TypeError') { |
|
log("Video.js: " + method + " unavailable on " + this.techName_ + " playback technology element.", e); |
|
this.tech_.isReady_ = false; |
|
throw e; |
|
} // If error unknown, just log and throw |
|
|
|
|
|
log(e); |
|
throw e; |
|
} |
|
} |
|
/** |
|
* Attempt to begin playback at the first opportunity. |
|
* |
|
* @return {Promise|undefined} |
|
* Returns a promise if the browser supports Promises (or one |
|
* was passed in as an option). This promise will be resolved on |
|
* the return value of play. If this is undefined it will fulfill the |
|
* promise chain otherwise the promise chain will be fulfilled when |
|
* the promise from play is fulfilled. |
|
*/ |
|
; |
|
|
|
_proto.play = function play() { |
|
var _this8 = this; |
|
|
|
var PromiseClass = this.options_.Promise || window$1.Promise; |
|
|
|
if (PromiseClass) { |
|
return new PromiseClass(function (resolve) { |
|
_this8.play_(resolve); |
|
}); |
|
} |
|
|
|
return this.play_(); |
|
} |
|
/** |
|
* The actual logic for play, takes a callback that will be resolved on the |
|
* return value of play. This allows us to resolve to the play promise if there |
|
* is one on modern browsers. |
|
* |
|
* @private |
|
* @param {Function} [callback] |
|
* The callback that should be called when the techs play is actually called |
|
*/ |
|
; |
|
|
|
_proto.play_ = function play_(callback) { |
|
var _this9 = this; |
|
|
|
if (callback === void 0) { |
|
callback = silencePromise; |
|
} |
|
|
|
this.playCallbacks_.push(callback); |
|
var isSrcReady = Boolean(!this.changingSrc_ && (this.src() || this.currentSrc())); // treat calls to play_ somewhat like the `one` event function |
|
|
|
if (this.waitToPlay_) { |
|
this.off(['ready', 'loadstart'], this.waitToPlay_); |
|
this.waitToPlay_ = null; |
|
} // if the player/tech is not ready or the src itself is not ready |
|
// queue up a call to play on `ready` or `loadstart` |
|
|
|
|
|
if (!this.isReady_ || !isSrcReady) { |
|
this.waitToPlay_ = function (e) { |
|
_this9.play_(); |
|
}; |
|
|
|
this.one(['ready', 'loadstart'], this.waitToPlay_); // if we are in Safari, there is a high chance that loadstart will trigger after the gesture timeperiod |
|
// in that case, we need to prime the video element by calling load so it'll be ready in time |
|
|
|
if (!isSrcReady && (IS_ANY_SAFARI || IS_IOS)) { |
|
this.load(); |
|
} |
|
|
|
return; |
|
} // If the player/tech is ready and we have a source, we can attempt playback. |
|
|
|
|
|
var val = this.techGet_('play'); // play was terminated if the returned value is null |
|
|
|
if (val === null) { |
|
this.runPlayTerminatedQueue_(); |
|
} else { |
|
this.runPlayCallbacks_(val); |
|
} |
|
} |
|
/** |
|
* These functions will be run when if play is terminated. If play |
|
* runPlayCallbacks_ is run these function will not be run. This allows us |
|
* to differenciate between a terminated play and an actual call to play. |
|
*/ |
|
; |
|
|
|
_proto.runPlayTerminatedQueue_ = function runPlayTerminatedQueue_() { |
|
var queue = this.playTerminatedQueue_.slice(0); |
|
this.playTerminatedQueue_ = []; |
|
queue.forEach(function (q) { |
|
q(); |
|
}); |
|
} |
|
/** |
|
* When a callback to play is delayed we have to run these |
|
* callbacks when play is actually called on the tech. This function |
|
* runs the callbacks that were delayed and accepts the return value |
|
* from the tech. |
|
* |
|
* @param {undefined|Promise} val |
|
* The return value from the tech. |
|
*/ |
|
; |
|
|
|
_proto.runPlayCallbacks_ = function runPlayCallbacks_(val) { |
|
var callbacks = this.playCallbacks_.slice(0); |
|
this.playCallbacks_ = []; // clear play terminatedQueue since we finished a real play |
|
|
|
this.playTerminatedQueue_ = []; |
|
callbacks.forEach(function (cb) { |
|
cb(val); |
|
}); |
|
} |
|
/** |
|
* Pause the video playback |
|
* |
|
* @return {Player} |
|
* A reference to the player object this function was called on |
|
*/ |
|
; |
|
|
|
_proto.pause = function pause() { |
|
this.techCall_('pause'); |
|
} |
|
/** |
|
* Check if the player is paused or has yet to play |
|
* |
|
* @return {boolean} |
|
* - false: if the media is currently playing |
|
* - true: if media is not currently playing |
|
*/ |
|
; |
|
|
|
_proto.paused = function paused() { |
|
// The initial state of paused should be true (in Safari it's actually false) |
|
return this.techGet_('paused') === false ? false : true; |
|
} |
|
/** |
|
* Get a TimeRange object representing the current ranges of time that the user |
|
* has played. |
|
* |
|
* @return {TimeRange} |
|
* A time range object that represents all the increments of time that have |
|
* been played. |
|
*/ |
|
; |
|
|
|
_proto.played = function played() { |
|
return this.techGet_('played') || createTimeRanges(0, 0); |
|
} |
|
/** |
|
* Returns whether or not the user is "scrubbing". Scrubbing is |
|
* when the user has clicked the progress bar handle and is |
|
* dragging it along the progress bar. |
|
* |
|
* @param {boolean} [isScrubbing] |
|
* whether the user is or is not scrubbing |
|
* |
|
* @return {boolean} |
|
* The value of scrubbing when getting |
|
*/ |
|
; |
|
|
|
_proto.scrubbing = function scrubbing(isScrubbing) { |
|
if (typeof isScrubbing === 'undefined') { |
|
return this.scrubbing_; |
|
} |
|
|
|
this.scrubbing_ = !!isScrubbing; |
|
|
|
if (isScrubbing) { |
|
this.addClass('vjs-scrubbing'); |
|
} else { |
|
this.removeClass('vjs-scrubbing'); |
|
} |
|
} |
|
/** |
|
* Get or set the current time (in seconds) |
|
* |
|
* @param {number|string} [seconds] |
|
* The time to seek to in seconds |
|
* |
|
* @return {number} |
|
* - the current time in seconds when getting |
|
*/ |
|
; |
|
|
|
_proto.currentTime = function currentTime(seconds) { |
|
if (typeof seconds !== 'undefined') { |
|
if (seconds < 0) { |
|
seconds = 0; |
|
} |
|
|
|
this.techCall_('setCurrentTime', seconds); |
|
return; |
|
} // cache last currentTime and return. default to 0 seconds |
|
// |
|
// Caching the currentTime is meant to prevent a massive amount of reads on the tech's |
|
// currentTime when scrubbing, but may not provide much performance benefit afterall. |
|
// Should be tested. Also something has to read the actual current time or the cache will |
|
// never get updated. |
|
|
|
|
|
this.cache_.currentTime = this.techGet_('currentTime') || 0; |
|
return this.cache_.currentTime; |
|
} |
|
/** |
|
* Normally gets the length in time of the video in seconds; |
|
* in all but the rarest use cases an argument will NOT be passed to the method |
|
* |
|
* > **NOTE**: The video must have started loading before the duration can be |
|
* known, and in the case of Flash, may not be known until the video starts |
|
* playing. |
|
* |
|
* @fires Player#durationchange |
|
* |
|
* @param {number} [seconds] |
|
* The duration of the video to set in seconds |
|
* |
|
* @return {number} |
|
* - The duration of the video in seconds when getting |
|
*/ |
|
; |
|
|
|
_proto.duration = function duration(seconds) { |
|
if (seconds === undefined) { |
|
// return NaN if the duration is not known |
|
return this.cache_.duration !== undefined ? this.cache_.duration : NaN; |
|
} |
|
|
|
seconds = parseFloat(seconds); // Standardize on Infinity for signaling video is live |
|
|
|
if (seconds < 0) { |
|
seconds = Infinity; |
|
} |
|
|
|
if (seconds !== this.cache_.duration) { |
|
// Cache the last set value for optimized scrubbing (esp. Flash) |
|
this.cache_.duration = seconds; |
|
|
|
if (seconds === Infinity) { |
|
this.addClass('vjs-live'); |
|
|
|
if (this.options_.liveui && this.player_.liveTracker) { |
|
this.addClass('vjs-liveui'); |
|
} |
|
} else { |
|
this.removeClass('vjs-live'); |
|
this.removeClass('vjs-liveui'); |
|
} |
|
|
|
if (!isNaN(seconds)) { |
|
// Do not fire durationchange unless the duration value is known. |
|
// @see [Spec]{@link https://www.w3.org/TR/2011/WD-html5-20110113/video.html#media-element-load-algorithm} |
|
|
|
/** |
|
* @event Player#durationchange |
|
* @type {EventTarget~Event} |
|
*/ |
|
this.trigger('durationchange'); |
|
} |
|
} |
|
} |
|
/** |
|
* Calculates how much time is left in the video. Not part |
|
* of the native video API. |
|
* |
|
* @return {number} |
|
* The time remaining in seconds |
|
*/ |
|
; |
|
|
|
_proto.remainingTime = function remainingTime() { |
|
return this.duration() - this.currentTime(); |
|
} |
|
/** |
|
* A remaining time function that is intented to be used when |
|
* the time is to be displayed directly to the user. |
|
* |
|
* @return {number} |
|
* The rounded time remaining in seconds |
|
*/ |
|
; |
|
|
|
_proto.remainingTimeDisplay = function remainingTimeDisplay() { |
|
return Math.floor(this.duration()) - Math.floor(this.currentTime()); |
|
} // |
|
// Kind of like an array of portions of the video that have been downloaded. |
|
|
|
/** |
|
* Get a TimeRange object with an array of the times of the video |
|
* that have been downloaded. If you just want the percent of the |
|
* video that's been downloaded, use bufferedPercent. |
|
* |
|
* @see [Buffered Spec]{@link http://dev.w3.org/html5/spec/video.html#dom-media-buffered} |
|
* |
|
* @return {TimeRange} |
|
* A mock TimeRange object (following HTML spec) |
|
*/ |
|
; |
|
|
|
_proto.buffered = function buffered() { |
|
var buffered = this.techGet_('buffered'); |
|
|
|
if (!buffered || !buffered.length) { |
|
buffered = createTimeRanges(0, 0); |
|
} |
|
|
|
return buffered; |
|
} |
|
/** |
|
* Get the percent (as a decimal) of the video that's been downloaded. |
|
* This method is not a part of the native HTML video API. |
|
* |
|
* @return {number} |
|
* A decimal between 0 and 1 representing the percent |
|
* that is buffered 0 being 0% and 1 being 100% |
|
*/ |
|
; |
|
|
|
_proto.bufferedPercent = function bufferedPercent$$1() { |
|
return bufferedPercent(this.buffered(), this.duration()); |
|
} |
|
/** |
|
* Get the ending time of the last buffered time range |
|
* This is used in the progress bar to encapsulate all time ranges. |
|
* |
|
* @return {number} |
|
* The end of the last buffered time range |
|
*/ |
|
; |
|
|
|
_proto.bufferedEnd = function bufferedEnd() { |
|
var buffered = this.buffered(); |
|
var duration = this.duration(); |
|
var end = buffered.end(buffered.length - 1); |
|
|
|
if (end > duration) { |
|
end = duration; |
|
} |
|
|
|
return end; |
|
} |
|
/** |
|
* Get or set the current volume of the media |
|
* |
|
* @param {number} [percentAsDecimal] |
|
* The new volume as a decimal percent: |
|
* - 0 is muted/0%/off |
|
* - 1.0 is 100%/full |
|
* - 0.5 is half volume or 50% |
|
* |
|
* @return {number} |
|
* The current volume as a percent when getting |
|
*/ |
|
; |
|
|
|
_proto.volume = function volume(percentAsDecimal) { |
|
var vol; |
|
|
|
if (percentAsDecimal !== undefined) { |
|
// Force value to between 0 and 1 |
|
vol = Math.max(0, Math.min(1, parseFloat(percentAsDecimal))); |
|
this.cache_.volume = vol; |
|
this.techCall_('setVolume', vol); |
|
|
|
if (vol > 0) { |
|
this.lastVolume_(vol); |
|
} |
|
|
|
return; |
|
} // Default to 1 when returning current volume. |
|
|
|
|
|
vol = parseFloat(this.techGet_('volume')); |
|
return isNaN(vol) ? 1 : vol; |
|
} |
|
/** |
|
* Get the current muted state, or turn mute on or off |
|
* |
|
* @param {boolean} [muted] |
|
* - true to mute |
|
* - false to unmute |
|
* |
|
* @return {boolean} |
|
* - true if mute is on and getting |
|
* - false if mute is off and getting |
|
*/ |
|
; |
|
|
|
_proto.muted = function muted(_muted) { |
|
if (_muted !== undefined) { |
|
this.techCall_('setMuted', _muted); |
|
return; |
|
} |
|
|
|
return this.techGet_('muted') || false; |
|
} |
|
/** |
|
* Get the current defaultMuted state, or turn defaultMuted on or off. defaultMuted |
|
* indicates the state of muted on initial playback. |
|
* |
|
* ```js |
|
* var myPlayer = videojs('some-player-id'); |
|
* |
|
* myPlayer.src("http://www.example.com/path/to/video.mp4"); |
|
* |
|
* // get, should be false |
|
* console.log(myPlayer.defaultMuted()); |
|
* // set to true |
|
* myPlayer.defaultMuted(true); |
|
* // get should be true |
|
* console.log(myPlayer.defaultMuted()); |
|
* ``` |
|
* |
|
* @param {boolean} [defaultMuted] |
|
* - true to mute |
|
* - false to unmute |
|
* |
|
* @return {boolean|Player} |
|
* - true if defaultMuted is on and getting |
|
* - false if defaultMuted is off and getting |
|
* - A reference to the current player when setting |
|
*/ |
|
; |
|
|
|
_proto.defaultMuted = function defaultMuted(_defaultMuted) { |
|
if (_defaultMuted !== undefined) { |
|
return this.techCall_('setDefaultMuted', _defaultMuted); |
|
} |
|
|
|
return this.techGet_('defaultMuted') || false; |
|
} |
|
/** |
|
* Get the last volume, or set it |
|
* |
|
* @param {number} [percentAsDecimal] |
|
* The new last volume as a decimal percent: |
|
* - 0 is muted/0%/off |
|
* - 1.0 is 100%/full |
|
* - 0.5 is half volume or 50% |
|
* |
|
* @return {number} |
|
* the current value of lastVolume as a percent when getting |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.lastVolume_ = function lastVolume_(percentAsDecimal) { |
|
if (percentAsDecimal !== undefined && percentAsDecimal !== 0) { |
|
this.cache_.lastVolume = percentAsDecimal; |
|
return; |
|
} |
|
|
|
return this.cache_.lastVolume; |
|
} |
|
/** |
|
* Check if current tech can support native fullscreen |
|
* (e.g. with built in controls like iOS, so not our flash swf) |
|
* |
|
* @return {boolean} |
|
* if native fullscreen is supported |
|
*/ |
|
; |
|
|
|
_proto.supportsFullScreen = function supportsFullScreen() { |
|
return this.techGet_('supportsFullScreen') || false; |
|
} |
|
/** |
|
* Check if the player is in fullscreen mode or tell the player that it |
|
* is or is not in fullscreen mode. |
|
* |
|
* > NOTE: As of the latest HTML5 spec, isFullscreen is no longer an official |
|
* property and instead document.fullscreenElement is used. But isFullscreen is |
|
* still a valuable property for internal player workings. |
|
* |
|
* @param {boolean} [isFS] |
|
* Set the players current fullscreen state |
|
* |
|
* @return {boolean} |
|
* - true if fullscreen is on and getting |
|
* - false if fullscreen is off and getting |
|
*/ |
|
; |
|
|
|
_proto.isFullscreen = function isFullscreen(isFS) { |
|
if (isFS !== undefined) { |
|
this.isFullscreen_ = !!isFS; |
|
this.toggleFullscreenClass_(); |
|
return; |
|
} |
|
|
|
if (prefixedAPI) { |
|
var fsApi = FullscreenApi; |
|
var el = this.el(); |
|
var isFs = document[fsApi.fullscreenElement] === el; |
|
|
|
if (!isFs && el.matches) { |
|
isFs = el.matches(':' + fsApi.fullscreen); |
|
} else if (!isFs && el.msMatchesSelector) { |
|
isFs = el.msMatchesSelector(':' + fsApi.fullscreen); |
|
} |
|
|
|
return isFs; |
|
} |
|
|
|
return !!this.isFullscreen_; |
|
} |
|
/** |
|
* Increase the size of the video to full screen |
|
* In some browsers, full screen is not supported natively, so it enters |
|
* "full window mode", where the video fills the browser window. |
|
* In browsers and devices that support native full screen, sometimes the |
|
* browser's default controls will be shown, and not the Video.js custom skin. |
|
* This includes most mobile devices (iOS, Android) and older versions of |
|
* Safari. |
|
* |
|
* @fires Player#fullscreenchange |
|
*/ |
|
; |
|
|
|
_proto.requestFullscreen = function requestFullscreen() { |
|
var fsApi = FullscreenApi; |
|
this.isFullscreen(true); |
|
|
|
if (fsApi.requestFullscreen) { |
|
// the browser supports going fullscreen at the element level so we can |
|
// take the controls fullscreen as well as the video |
|
// Trigger fullscreenchange event after change |
|
// We have to specifically add this each time, and remove |
|
// when canceling fullscreen. Otherwise if there's multiple |
|
// players on a page, they would all be reacting to the same fullscreen |
|
// events |
|
on(document, fsApi.fullscreenchange, this.boundDocumentFullscreenChange_); |
|
this.el_[fsApi.requestFullscreen](); |
|
} else if (this.tech_.supportsFullScreen()) { |
|
// we can't take the video.js controls fullscreen but we can go fullscreen |
|
// with native controls |
|
this.techCall_('enterFullScreen'); |
|
} else { |
|
// fullscreen isn't supported so we'll just stretch the video element to |
|
// fill the viewport |
|
this.enterFullWindow(); |
|
/** |
|
* @event Player#fullscreenchange |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
this.trigger('fullscreenchange'); |
|
} |
|
} |
|
/** |
|
* Return the video to its normal size after having been in full screen mode |
|
* |
|
* @fires Player#fullscreenchange |
|
*/ |
|
; |
|
|
|
_proto.exitFullscreen = function exitFullscreen() { |
|
var fsApi = FullscreenApi; |
|
this.isFullscreen(false); // Check for browser element fullscreen support |
|
|
|
if (fsApi.requestFullscreen) { |
|
document[fsApi.exitFullscreen](); |
|
} else if (this.tech_.supportsFullScreen()) { |
|
this.techCall_('exitFullScreen'); |
|
} else { |
|
this.exitFullWindow(); |
|
/** |
|
* @event Player#fullscreenchange |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
this.trigger('fullscreenchange'); |
|
} |
|
} |
|
/** |
|
* When fullscreen isn't supported we can stretch the |
|
* video container to as wide as the browser will let us. |
|
* |
|
* @fires Player#enterFullWindow |
|
*/ |
|
; |
|
|
|
_proto.enterFullWindow = function enterFullWindow() { |
|
this.isFullWindow = true; // Storing original doc overflow value to return to when fullscreen is off |
|
|
|
this.docOrigOverflow = document.documentElement.style.overflow; // Add listener for esc key to exit fullscreen |
|
|
|
on(document, 'keydown', this.boundFullWindowOnEscKey_); // Hide any scroll bars |
|
|
|
document.documentElement.style.overflow = 'hidden'; // Apply fullscreen styles |
|
|
|
addClass(document.body, 'vjs-full-window'); |
|
/** |
|
* @event Player#enterFullWindow |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
this.trigger('enterFullWindow'); |
|
} |
|
/** |
|
* Check for call to either exit full window or |
|
* full screen on ESC key |
|
* |
|
* @param {string} event |
|
* Event to check for key press |
|
*/ |
|
; |
|
|
|
_proto.fullWindowOnEscKey = function fullWindowOnEscKey(event) { |
|
if (keycode.isEventKey(event, 'Esc')) { |
|
if (this.isFullscreen() === true) { |
|
this.exitFullscreen(); |
|
} else { |
|
this.exitFullWindow(); |
|
} |
|
} |
|
} |
|
/** |
|
* Exit full window |
|
* |
|
* @fires Player#exitFullWindow |
|
*/ |
|
; |
|
|
|
_proto.exitFullWindow = function exitFullWindow() { |
|
this.isFullWindow = false; |
|
off(document, 'keydown', this.boundFullWindowOnEscKey_); // Unhide scroll bars. |
|
|
|
document.documentElement.style.overflow = this.docOrigOverflow; // Remove fullscreen styles |
|
|
|
removeClass(document.body, 'vjs-full-window'); // Resize the box, controller, and poster to original sizes |
|
// this.positionAll(); |
|
|
|
/** |
|
* @event Player#exitFullWindow |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
this.trigger('exitFullWindow'); |
|
} |
|
/** |
|
* Called when this Player has focus and a key gets pressed down, or when |
|
* any Component of this player receives a key press that it doesn't handle. |
|
* This allows player-wide hotkeys (either as defined below, or optionally |
|
* by an external function). |
|
* |
|
* @param {EventTarget~Event} event |
|
* The `keydown` event that caused this function to be called. |
|
* |
|
* @listens keydown |
|
*/ |
|
; |
|
|
|
_proto.handleKeyDown = function handleKeyDown(event) { |
|
var userActions = this.options_.userActions; // Bail out if hotkeys are not configured. |
|
|
|
if (!userActions || !userActions.hotkeys) { |
|
return; |
|
} // Function that determines whether or not to exclude an element from |
|
// hotkeys handling. |
|
|
|
|
|
var excludeElement = function excludeElement(el) { |
|
var tagName = el.tagName.toLowerCase(); // These tags will be excluded entirely. |
|
|
|
var excludedTags = ['textarea']; // Inputs matching these types will still trigger hotkey handling as |
|
// they are not text inputs. |
|
|
|
var allowedInputTypes = ['button', 'checkbox', 'hidden', 'radio', 'reset', 'submit']; |
|
|
|
if (tagName === 'input') { |
|
return allowedInputTypes.indexOf(el.type) === -1; |
|
} |
|
|
|
return excludedTags.indexOf(tagName) !== -1; |
|
}; // Bail out if the user is focused on an interactive form element. |
|
|
|
|
|
if (excludeElement(this.el_.ownerDocument.activeElement)) { |
|
return; |
|
} |
|
|
|
if (typeof userActions.hotkeys === 'function') { |
|
userActions.hotkeys.call(this, event); |
|
} else { |
|
this.handleHotkeys(event); |
|
} |
|
} |
|
/** |
|
* Called when this Player receives a hotkey keydown event. |
|
* Supported player-wide hotkeys are: |
|
* |
|
* f - toggle fullscreen |
|
* m - toggle mute |
|
* k or Space - toggle play/pause |
|
* |
|
* @param {EventTarget~Event} event |
|
* The `keydown` event that caused this function to be called. |
|
*/ |
|
; |
|
|
|
_proto.handleHotkeys = function handleHotkeys(event) { |
|
var hotkeys = this.options_.userActions ? this.options_.userActions.hotkeys : {}; // set fullscreenKey, muteKey, playPauseKey from `hotkeys`, use defaults if not set |
|
|
|
var _hotkeys$fullscreenKe = hotkeys.fullscreenKey, |
|
fullscreenKey = _hotkeys$fullscreenKe === void 0 ? function (keydownEvent) { |
|
return keycode.isEventKey(keydownEvent, 'f'); |
|
} : _hotkeys$fullscreenKe, |
|
_hotkeys$muteKey = hotkeys.muteKey, |
|
muteKey = _hotkeys$muteKey === void 0 ? function (keydownEvent) { |
|
return keycode.isEventKey(keydownEvent, 'm'); |
|
} : _hotkeys$muteKey, |
|
_hotkeys$playPauseKey = hotkeys.playPauseKey, |
|
playPauseKey = _hotkeys$playPauseKey === void 0 ? function (keydownEvent) { |
|
return keycode.isEventKey(keydownEvent, 'k') || keycode.isEventKey(keydownEvent, 'Space'); |
|
} : _hotkeys$playPauseKey; |
|
|
|
if (fullscreenKey.call(this, event)) { |
|
event.preventDefault(); |
|
event.stopPropagation(); |
|
var FSToggle = Component.getComponent('FullscreenToggle'); |
|
|
|
if (document[FullscreenApi.fullscreenEnabled] !== false) { |
|
FSToggle.prototype.handleClick.call(this); |
|
} |
|
} else if (muteKey.call(this, event)) { |
|
event.preventDefault(); |
|
event.stopPropagation(); |
|
var MuteToggle = Component.getComponent('MuteToggle'); |
|
MuteToggle.prototype.handleClick.call(this); |
|
} else if (playPauseKey.call(this, event)) { |
|
event.preventDefault(); |
|
event.stopPropagation(); |
|
var PlayToggle = Component.getComponent('PlayToggle'); |
|
PlayToggle.prototype.handleClick.call(this); |
|
} |
|
} |
|
/** |
|
* Check whether the player can play a given mimetype |
|
* |
|
* @see https://www.w3.org/TR/2011/WD-html5-20110113/video.html#dom-navigator-canplaytype |
|
* |
|
* @param {string} type |
|
* The mimetype to check |
|
* |
|
* @return {string} |
|
* 'probably', 'maybe', or '' (empty string) |
|
*/ |
|
; |
|
|
|
_proto.canPlayType = function canPlayType(type) { |
|
var can; // Loop through each playback technology in the options order |
|
|
|
for (var i = 0, j = this.options_.techOrder; i < j.length; i++) { |
|
var techName = j[i]; |
|
var tech = Tech.getTech(techName); // Support old behavior of techs being registered as components. |
|
// Remove once that deprecated behavior is removed. |
|
|
|
if (!tech) { |
|
tech = Component.getComponent(techName); |
|
} // Check if the current tech is defined before continuing |
|
|
|
|
|
if (!tech) { |
|
log.error("The \"" + techName + "\" tech is undefined. Skipped browser support check for that tech."); |
|
continue; |
|
} // Check if the browser supports this technology |
|
|
|
|
|
if (tech.isSupported()) { |
|
can = tech.canPlayType(type); |
|
|
|
if (can) { |
|
return can; |
|
} |
|
} |
|
} |
|
|
|
return ''; |
|
} |
|
/** |
|
* Select source based on tech-order or source-order |
|
* Uses source-order selection if `options.sourceOrder` is truthy. Otherwise, |
|
* defaults to tech-order selection |
|
* |
|
* @param {Array} sources |
|
* The sources for a media asset |
|
* |
|
* @return {Object|boolean} |
|
* Object of source and tech order or false |
|
*/ |
|
; |
|
|
|
_proto.selectSource = function selectSource(sources) { |
|
var _this10 = this; |
|
|
|
// Get only the techs specified in `techOrder` that exist and are supported by the |
|
// current platform |
|
var techs = this.options_.techOrder.map(function (techName) { |
|
return [techName, Tech.getTech(techName)]; |
|
}).filter(function (_ref) { |
|
var techName = _ref[0], |
|
tech = _ref[1]; |
|
|
|
// Check if the current tech is defined before continuing |
|
if (tech) { |
|
// Check if the browser supports this technology |
|
return tech.isSupported(); |
|
} |
|
|
|
log.error("The \"" + techName + "\" tech is undefined. Skipped browser support check for that tech."); |
|
return false; |
|
}); // Iterate over each `innerArray` element once per `outerArray` element and execute |
|
// `tester` with both. If `tester` returns a non-falsy value, exit early and return |
|
// that value. |
|
|
|
var findFirstPassingTechSourcePair = function findFirstPassingTechSourcePair(outerArray, innerArray, tester) { |
|
var found; |
|
outerArray.some(function (outerChoice) { |
|
return innerArray.some(function (innerChoice) { |
|
found = tester(outerChoice, innerChoice); |
|
|
|
if (found) { |
|
return true; |
|
} |
|
}); |
|
}); |
|
return found; |
|
}; |
|
|
|
var foundSourceAndTech; |
|
|
|
var flip = function flip(fn) { |
|
return function (a, b) { |
|
return fn(b, a); |
|
}; |
|
}; |
|
|
|
var finder = function finder(_ref2, source) { |
|
var techName = _ref2[0], |
|
tech = _ref2[1]; |
|
|
|
if (tech.canPlaySource(source, _this10.options_[techName.toLowerCase()])) { |
|
return { |
|
source: source, |
|
tech: techName |
|
}; |
|
} |
|
}; // Depending on the truthiness of `options.sourceOrder`, we swap the order of techs and sources |
|
// to select from them based on their priority. |
|
|
|
|
|
if (this.options_.sourceOrder) { |
|
// Source-first ordering |
|
foundSourceAndTech = findFirstPassingTechSourcePair(sources, techs, flip(finder)); |
|
} else { |
|
// Tech-first ordering |
|
foundSourceAndTech = findFirstPassingTechSourcePair(techs, sources, finder); |
|
} |
|
|
|
return foundSourceAndTech || false; |
|
} |
|
/** |
|
* Get or set the video source. |
|
* |
|
* @param {Tech~SourceObject|Tech~SourceObject[]|string} [source] |
|
* A SourceObject, an array of SourceObjects, or a string referencing |
|
* a URL to a media source. It is _highly recommended_ that an object |
|
* or array of objects is used here, so that source selection |
|
* algorithms can take the `type` into account. |
|
* |
|
* If not provided, this method acts as a getter. |
|
* |
|
* @return {string|undefined} |
|
* If the `source` argument is missing, returns the current source |
|
* URL. Otherwise, returns nothing/undefined. |
|
*/ |
|
; |
|
|
|
_proto.src = function src(source) { |
|
var _this11 = this; |
|
|
|
// getter usage |
|
if (typeof source === 'undefined') { |
|
return this.cache_.src || ''; |
|
} // filter out invalid sources and turn our source into |
|
// an array of source objects |
|
|
|
|
|
var sources = filterSource(source); // if a source was passed in then it is invalid because |
|
// it was filtered to a zero length Array. So we have to |
|
// show an error |
|
|
|
if (!sources.length) { |
|
this.setTimeout(function () { |
|
this.error({ |
|
code: 4, |
|
message: this.localize(this.options_.notSupportedMessage) |
|
}); |
|
}, 0); |
|
return; |
|
} // intial sources |
|
|
|
|
|
this.changingSrc_ = true; |
|
this.cache_.sources = sources; |
|
this.updateSourceCaches_(sources[0]); // middlewareSource is the source after it has been changed by middleware |
|
|
|
setSource(this, sources[0], function (middlewareSource, mws) { |
|
_this11.middleware_ = mws; // since sourceSet is async we have to update the cache again after we select a source since |
|
// the source that is selected could be out of order from the cache update above this callback. |
|
|
|
_this11.cache_.sources = sources; |
|
|
|
_this11.updateSourceCaches_(middlewareSource); |
|
|
|
var err = _this11.src_(middlewareSource); |
|
|
|
if (err) { |
|
if (sources.length > 1) { |
|
return _this11.src(sources.slice(1)); |
|
} |
|
|
|
_this11.changingSrc_ = false; // We need to wrap this in a timeout to give folks a chance to add error event handlers |
|
|
|
_this11.setTimeout(function () { |
|
this.error({ |
|
code: 4, |
|
message: this.localize(this.options_.notSupportedMessage) |
|
}); |
|
}, 0); // we could not find an appropriate tech, but let's still notify the delegate that this is it |
|
// this needs a better comment about why this is needed |
|
|
|
|
|
_this11.triggerReady(); |
|
|
|
return; |
|
} |
|
|
|
setTech(mws, _this11.tech_); |
|
}); |
|
} |
|
/** |
|
* Set the source object on the tech, returns a boolean that indicates whether |
|
* there is a tech that can play the source or not |
|
* |
|
* @param {Tech~SourceObject} source |
|
* The source object to set on the Tech |
|
* |
|
* @return {boolean} |
|
* - True if there is no Tech to playback this source |
|
* - False otherwise |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.src_ = function src_(source) { |
|
var _this12 = this; |
|
|
|
var sourceTech = this.selectSource([source]); |
|
|
|
if (!sourceTech) { |
|
return true; |
|
} |
|
|
|
if (!titleCaseEquals(sourceTech.tech, this.techName_)) { |
|
this.changingSrc_ = true; // load this technology with the chosen source |
|
|
|
this.loadTech_(sourceTech.tech, sourceTech.source); |
|
this.tech_.ready(function () { |
|
_this12.changingSrc_ = false; |
|
}); |
|
return false; |
|
} // wait until the tech is ready to set the source |
|
// and set it synchronously if possible (#2326) |
|
|
|
|
|
this.ready(function () { |
|
// The setSource tech method was added with source handlers |
|
// so older techs won't support it |
|
// We need to check the direct prototype for the case where subclasses |
|
// of the tech do not support source handlers |
|
if (this.tech_.constructor.prototype.hasOwnProperty('setSource')) { |
|
this.techCall_('setSource', source); |
|
} else { |
|
this.techCall_('src', source.src); |
|
} |
|
|
|
this.changingSrc_ = false; |
|
}, true); |
|
return false; |
|
} |
|
/** |
|
* Begin loading the src data. |
|
*/ |
|
; |
|
|
|
_proto.load = function load() { |
|
this.techCall_('load'); |
|
} |
|
/** |
|
* Reset the player. Loads the first tech in the techOrder, |
|
* removes all the text tracks in the existing `tech`, |
|
* and calls `reset` on the `tech`. |
|
*/ |
|
; |
|
|
|
_proto.reset = function reset() { |
|
var _this13 = this; |
|
|
|
var PromiseClass = this.options_.Promise || window$1.Promise; |
|
|
|
if (this.paused() || !PromiseClass) { |
|
this.doReset_(); |
|
} else { |
|
var playPromise = this.play(); |
|
silencePromise(playPromise.then(function () { |
|
return _this13.doReset_(); |
|
})); |
|
} |
|
}; |
|
|
|
_proto.doReset_ = function doReset_() { |
|
if (this.tech_) { |
|
this.tech_.clearTracks('text'); |
|
} |
|
|
|
this.resetCache_(); |
|
this.poster(''); |
|
this.loadTech_(this.options_.techOrder[0], null); |
|
this.techCall_('reset'); |
|
this.resetControlBarUI_(); |
|
|
|
if (isEvented(this)) { |
|
this.trigger('playerreset'); |
|
} |
|
} |
|
/** |
|
* Reset Control Bar's UI by calling sub-methods that reset |
|
* all of Control Bar's components |
|
*/ |
|
; |
|
|
|
_proto.resetControlBarUI_ = function resetControlBarUI_() { |
|
this.resetProgressBar_(); |
|
this.resetPlaybackRate_(); |
|
this.resetVolumeBar_(); |
|
} |
|
/** |
|
* Reset tech's progress so progress bar is reset in the UI |
|
*/ |
|
; |
|
|
|
_proto.resetProgressBar_ = function resetProgressBar_() { |
|
this.currentTime(0); |
|
var _this$controlBar = this.controlBar, |
|
durationDisplay = _this$controlBar.durationDisplay, |
|
remainingTimeDisplay = _this$controlBar.remainingTimeDisplay; |
|
|
|
if (durationDisplay) { |
|
durationDisplay.updateContent(); |
|
} |
|
|
|
if (remainingTimeDisplay) { |
|
remainingTimeDisplay.updateContent(); |
|
} |
|
} |
|
/** |
|
* Reset Playback ratio |
|
*/ |
|
; |
|
|
|
_proto.resetPlaybackRate_ = function resetPlaybackRate_() { |
|
this.playbackRate(this.defaultPlaybackRate()); |
|
this.handleTechRateChange_(); |
|
} |
|
/** |
|
* Reset Volume bar |
|
*/ |
|
; |
|
|
|
_proto.resetVolumeBar_ = function resetVolumeBar_() { |
|
this.volume(1.0); |
|
this.trigger('volumechange'); |
|
} |
|
/** |
|
* Returns all of the current source objects. |
|
* |
|
* @return {Tech~SourceObject[]} |
|
* The current source objects |
|
*/ |
|
; |
|
|
|
_proto.currentSources = function currentSources() { |
|
var source = this.currentSource(); |
|
var sources = []; // assume `{}` or `{ src }` |
|
|
|
if (Object.keys(source).length !== 0) { |
|
sources.push(source); |
|
} |
|
|
|
return this.cache_.sources || sources; |
|
} |
|
/** |
|
* Returns the current source object. |
|
* |
|
* @return {Tech~SourceObject} |
|
* The current source object |
|
*/ |
|
; |
|
|
|
_proto.currentSource = function currentSource() { |
|
return this.cache_.source || {}; |
|
} |
|
/** |
|
* Returns the fully qualified URL of the current source value e.g. http://mysite.com/video.mp4 |
|
* Can be used in conjunction with `currentType` to assist in rebuilding the current source object. |
|
* |
|
* @return {string} |
|
* The current source |
|
*/ |
|
; |
|
|
|
_proto.currentSrc = function currentSrc() { |
|
return this.currentSource() && this.currentSource().src || ''; |
|
} |
|
/** |
|
* Get the current source type e.g. video/mp4 |
|
* This can allow you rebuild the current source object so that you could load the same |
|
* source and tech later |
|
* |
|
* @return {string} |
|
* The source MIME type |
|
*/ |
|
; |
|
|
|
_proto.currentType = function currentType() { |
|
return this.currentSource() && this.currentSource().type || ''; |
|
} |
|
/** |
|
* Get or set the preload attribute |
|
* |
|
* @param {boolean} [value] |
|
* - true means that we should preload |
|
* - false means that we should not preload |
|
* |
|
* @return {string} |
|
* The preload attribute value when getting |
|
*/ |
|
; |
|
|
|
_proto.preload = function preload(value) { |
|
if (value !== undefined) { |
|
this.techCall_('setPreload', value); |
|
this.options_.preload = value; |
|
return; |
|
} |
|
|
|
return this.techGet_('preload'); |
|
} |
|
/** |
|
* Get or set the autoplay option. When this is a boolean it will |
|
* modify the attribute on the tech. When this is a string the attribute on |
|
* the tech will be removed and `Player` will handle autoplay on loadstarts. |
|
* |
|
* @param {boolean|string} [value] |
|
* - true: autoplay using the browser behavior |
|
* - false: do not autoplay |
|
* - 'play': call play() on every loadstart |
|
* - 'muted': call muted() then play() on every loadstart |
|
* - 'any': call play() on every loadstart. if that fails call muted() then play(). |
|
* - *: values other than those listed here will be set `autoplay` to true |
|
* |
|
* @return {boolean|string} |
|
* The current value of autoplay when getting |
|
*/ |
|
; |
|
|
|
_proto.autoplay = function autoplay(value) { |
|
// getter usage |
|
if (value === undefined) { |
|
return this.options_.autoplay || false; |
|
} |
|
|
|
var techAutoplay; // if the value is a valid string set it to that |
|
|
|
if (typeof value === 'string' && /(any|play|muted)/.test(value)) { |
|
this.options_.autoplay = value; |
|
this.manualAutoplay_(value); |
|
techAutoplay = false; // any falsy value sets autoplay to false in the browser, |
|
// lets do the same |
|
} else if (!value) { |
|
this.options_.autoplay = false; // any other value (ie truthy) sets autoplay to true |
|
} else { |
|
this.options_.autoplay = true; |
|
} |
|
|
|
techAutoplay = typeof techAutoplay === 'undefined' ? this.options_.autoplay : techAutoplay; // if we don't have a tech then we do not queue up |
|
// a setAutoplay call on tech ready. We do this because the |
|
// autoplay option will be passed in the constructor and we |
|
// do not need to set it twice |
|
|
|
if (this.tech_) { |
|
this.techCall_('setAutoplay', techAutoplay); |
|
} |
|
} |
|
/** |
|
* Set or unset the playsinline attribute. |
|
* Playsinline tells the browser that non-fullscreen playback is preferred. |
|
* |
|
* @param {boolean} [value] |
|
* - true means that we should try to play inline by default |
|
* - false means that we should use the browser's default playback mode, |
|
* which in most cases is inline. iOS Safari is a notable exception |
|
* and plays fullscreen by default. |
|
* |
|
* @return {string|Player} |
|
* - the current value of playsinline |
|
* - the player when setting |
|
* |
|
* @see [Spec]{@link https://html.spec.whatwg.org/#attr-video-playsinline} |
|
*/ |
|
; |
|
|
|
_proto.playsinline = function playsinline(value) { |
|
if (value !== undefined) { |
|
this.techCall_('setPlaysinline', value); |
|
this.options_.playsinline = value; |
|
return this; |
|
} |
|
|
|
return this.techGet_('playsinline'); |
|
} |
|
/** |
|
* Get or set the loop attribute on the video element. |
|
* |
|
* @param {boolean} [value] |
|
* - true means that we should loop the video |
|
* - false means that we should not loop the video |
|
* |
|
* @return {boolean} |
|
* The current value of loop when getting |
|
*/ |
|
; |
|
|
|
_proto.loop = function loop(value) { |
|
if (value !== undefined) { |
|
this.techCall_('setLoop', value); |
|
this.options_.loop = value; |
|
return; |
|
} |
|
|
|
return this.techGet_('loop'); |
|
} |
|
/** |
|
* Get or set the poster image source url |
|
* |
|
* @fires Player#posterchange |
|
* |
|
* @param {string} [src] |
|
* Poster image source URL |
|
* |
|
* @return {string} |
|
* The current value of poster when getting |
|
*/ |
|
; |
|
|
|
_proto.poster = function poster(src) { |
|
if (src === undefined) { |
|
return this.poster_; |
|
} // The correct way to remove a poster is to set as an empty string |
|
// other falsey values will throw errors |
|
|
|
|
|
if (!src) { |
|
src = ''; |
|
} |
|
|
|
if (src === this.poster_) { |
|
return; |
|
} // update the internal poster variable |
|
|
|
|
|
this.poster_ = src; // update the tech's poster |
|
|
|
this.techCall_('setPoster', src); |
|
this.isPosterFromTech_ = false; // alert components that the poster has been set |
|
|
|
/** |
|
* This event fires when the poster image is changed on the player. |
|
* |
|
* @event Player#posterchange |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
this.trigger('posterchange'); |
|
} |
|
/** |
|
* Some techs (e.g. YouTube) can provide a poster source in an |
|
* asynchronous way. We want the poster component to use this |
|
* poster source so that it covers up the tech's controls. |
|
* (YouTube's play button). However we only want to use this |
|
* source if the player user hasn't set a poster through |
|
* the normal APIs. |
|
* |
|
* @fires Player#posterchange |
|
* @listens Tech#posterchange |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.handleTechPosterChange_ = function handleTechPosterChange_() { |
|
if ((!this.poster_ || this.options_.techCanOverridePoster) && this.tech_ && this.tech_.poster) { |
|
var newPoster = this.tech_.poster() || ''; |
|
|
|
if (newPoster !== this.poster_) { |
|
this.poster_ = newPoster; |
|
this.isPosterFromTech_ = true; // Let components know the poster has changed |
|
|
|
this.trigger('posterchange'); |
|
} |
|
} |
|
} |
|
/** |
|
* Get or set whether or not the controls are showing. |
|
* |
|
* @fires Player#controlsenabled |
|
* |
|
* @param {boolean} [bool] |
|
* - true to turn controls on |
|
* - false to turn controls off |
|
* |
|
* @return {boolean} |
|
* The current value of controls when getting |
|
*/ |
|
; |
|
|
|
_proto.controls = function controls(bool) { |
|
if (bool === undefined) { |
|
return !!this.controls_; |
|
} |
|
|
|
bool = !!bool; // Don't trigger a change event unless it actually changed |
|
|
|
if (this.controls_ === bool) { |
|
return; |
|
} |
|
|
|
this.controls_ = bool; |
|
|
|
if (this.usingNativeControls()) { |
|
this.techCall_('setControls', bool); |
|
} |
|
|
|
if (this.controls_) { |
|
this.removeClass('vjs-controls-disabled'); |
|
this.addClass('vjs-controls-enabled'); |
|
/** |
|
* @event Player#controlsenabled |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
this.trigger('controlsenabled'); |
|
|
|
if (!this.usingNativeControls()) { |
|
this.addTechControlsListeners_(); |
|
} |
|
} else { |
|
this.removeClass('vjs-controls-enabled'); |
|
this.addClass('vjs-controls-disabled'); |
|
/** |
|
* @event Player#controlsdisabled |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
this.trigger('controlsdisabled'); |
|
|
|
if (!this.usingNativeControls()) { |
|
this.removeTechControlsListeners_(); |
|
} |
|
} |
|
} |
|
/** |
|
* Toggle native controls on/off. Native controls are the controls built into |
|
* devices (e.g. default iPhone controls), Flash, or other techs |
|
* (e.g. Vimeo Controls) |
|
* **This should only be set by the current tech, because only the tech knows |
|
* if it can support native controls** |
|
* |
|
* @fires Player#usingnativecontrols |
|
* @fires Player#usingcustomcontrols |
|
* |
|
* @param {boolean} [bool] |
|
* - true to turn native controls on |
|
* - false to turn native controls off |
|
* |
|
* @return {boolean} |
|
* The current value of native controls when getting |
|
*/ |
|
; |
|
|
|
_proto.usingNativeControls = function usingNativeControls(bool) { |
|
if (bool === undefined) { |
|
return !!this.usingNativeControls_; |
|
} |
|
|
|
bool = !!bool; // Don't trigger a change event unless it actually changed |
|
|
|
if (this.usingNativeControls_ === bool) { |
|
return; |
|
} |
|
|
|
this.usingNativeControls_ = bool; |
|
|
|
if (this.usingNativeControls_) { |
|
this.addClass('vjs-using-native-controls'); |
|
/** |
|
* player is using the native device controls |
|
* |
|
* @event Player#usingnativecontrols |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
this.trigger('usingnativecontrols'); |
|
} else { |
|
this.removeClass('vjs-using-native-controls'); |
|
/** |
|
* player is using the custom HTML controls |
|
* |
|
* @event Player#usingcustomcontrols |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
this.trigger('usingcustomcontrols'); |
|
} |
|
} |
|
/** |
|
* Set or get the current MediaError |
|
* |
|
* @fires Player#error |
|
* |
|
* @param {MediaError|string|number} [err] |
|
* A MediaError or a string/number to be turned |
|
* into a MediaError |
|
* |
|
* @return {MediaError|null} |
|
* The current MediaError when getting (or null) |
|
*/ |
|
; |
|
|
|
_proto.error = function error(err) { |
|
if (err === undefined) { |
|
return this.error_ || null; |
|
} // restoring to default |
|
|
|
|
|
if (err === null) { |
|
this.error_ = err; |
|
this.removeClass('vjs-error'); |
|
|
|
if (this.errorDisplay) { |
|
this.errorDisplay.close(); |
|
} |
|
|
|
return; |
|
} |
|
|
|
this.error_ = new MediaError(err); // add the vjs-error classname to the player |
|
|
|
this.addClass('vjs-error'); // log the name of the error type and any message |
|
// IE11 logs "[object object]" and required you to expand message to see error object |
|
|
|
log.error("(CODE:" + this.error_.code + " " + MediaError.errorTypes[this.error_.code] + ")", this.error_.message, this.error_); |
|
/** |
|
* @event Player#error |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
this.trigger('error'); |
|
return; |
|
} |
|
/** |
|
* Report user activity |
|
* |
|
* @param {Object} event |
|
* Event object |
|
*/ |
|
; |
|
|
|
_proto.reportUserActivity = function reportUserActivity(event) { |
|
this.userActivity_ = true; |
|
} |
|
/** |
|
* Get/set if user is active |
|
* |
|
* @fires Player#useractive |
|
* @fires Player#userinactive |
|
* |
|
* @param {boolean} [bool] |
|
* - true if the user is active |
|
* - false if the user is inactive |
|
* |
|
* @return {boolean} |
|
* The current value of userActive when getting |
|
*/ |
|
; |
|
|
|
_proto.userActive = function userActive(bool) { |
|
if (bool === undefined) { |
|
return this.userActive_; |
|
} |
|
|
|
bool = !!bool; |
|
|
|
if (bool === this.userActive_) { |
|
return; |
|
} |
|
|
|
this.userActive_ = bool; |
|
|
|
if (this.userActive_) { |
|
this.userActivity_ = true; |
|
this.removeClass('vjs-user-inactive'); |
|
this.addClass('vjs-user-active'); |
|
/** |
|
* @event Player#useractive |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
this.trigger('useractive'); |
|
return; |
|
} // Chrome/Safari/IE have bugs where when you change the cursor it can |
|
// trigger a mousemove event. This causes an issue when you're hiding |
|
// the cursor when the user is inactive, and a mousemove signals user |
|
// activity. Making it impossible to go into inactive mode. Specifically |
|
// this happens in fullscreen when we really need to hide the cursor. |
|
// |
|
// When this gets resolved in ALL browsers it can be removed |
|
// https://code.google.com/p/chromium/issues/detail?id=103041 |
|
|
|
|
|
if (this.tech_) { |
|
this.tech_.one('mousemove', function (e) { |
|
e.stopPropagation(); |
|
e.preventDefault(); |
|
}); |
|
} |
|
|
|
this.userActivity_ = false; |
|
this.removeClass('vjs-user-active'); |
|
this.addClass('vjs-user-inactive'); |
|
/** |
|
* @event Player#userinactive |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
this.trigger('userinactive'); |
|
} |
|
/** |
|
* Listen for user activity based on timeout value |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.listenForUserActivity_ = function listenForUserActivity_() { |
|
var mouseInProgress; |
|
var lastMoveX; |
|
var lastMoveY; |
|
var handleActivity = bind(this, this.reportUserActivity); |
|
|
|
var handleMouseMove = function handleMouseMove(e) { |
|
// #1068 - Prevent mousemove spamming |
|
// Chrome Bug: https://code.google.com/p/chromium/issues/detail?id=366970 |
|
if (e.screenX !== lastMoveX || e.screenY !== lastMoveY) { |
|
lastMoveX = e.screenX; |
|
lastMoveY = e.screenY; |
|
handleActivity(); |
|
} |
|
}; |
|
|
|
var handleMouseDown = function handleMouseDown() { |
|
handleActivity(); // For as long as the they are touching the device or have their mouse down, |
|
// we consider them active even if they're not moving their finger or mouse. |
|
// So we want to continue to update that they are active |
|
|
|
this.clearInterval(mouseInProgress); // Setting userActivity=true now and setting the interval to the same time |
|
// as the activityCheck interval (250) should ensure we never miss the |
|
// next activityCheck |
|
|
|
mouseInProgress = this.setInterval(handleActivity, 250); |
|
}; |
|
|
|
var handleMouseUp = function handleMouseUp(event) { |
|
handleActivity(); // Stop the interval that maintains activity if the mouse/touch is down |
|
|
|
this.clearInterval(mouseInProgress); |
|
}; // Any mouse movement will be considered user activity |
|
|
|
|
|
this.on('mousedown', handleMouseDown); |
|
this.on('mousemove', handleMouseMove); |
|
this.on('mouseup', handleMouseUp); |
|
var controlBar = this.getChild('controlBar'); // Fixes bug on Android & iOS where when tapping progressBar (when control bar is displayed) |
|
// controlBar would no longer be hidden by default timeout. |
|
|
|
if (controlBar && !IS_IOS && !IS_ANDROID) { |
|
controlBar.on('mouseenter', function (event) { |
|
this.player().cache_.inactivityTimeout = this.player().options_.inactivityTimeout; |
|
this.player().options_.inactivityTimeout = 0; |
|
}); |
|
controlBar.on('mouseleave', function (event) { |
|
this.player().options_.inactivityTimeout = this.player().cache_.inactivityTimeout; |
|
}); |
|
} // Listen for keyboard navigation |
|
// Shouldn't need to use inProgress interval because of key repeat |
|
|
|
|
|
this.on('keydown', handleActivity); |
|
this.on('keyup', handleActivity); // Run an interval every 250 milliseconds instead of stuffing everything into |
|
// the mousemove/touchmove function itself, to prevent performance degradation. |
|
// `this.reportUserActivity` simply sets this.userActivity_ to true, which |
|
// then gets picked up by this loop |
|
// http://ejohn.org/blog/learning-from-twitter/ |
|
|
|
var inactivityTimeout; |
|
this.setInterval(function () { |
|
// Check to see if mouse/touch activity has happened |
|
if (!this.userActivity_) { |
|
return; |
|
} // Reset the activity tracker |
|
|
|
|
|
this.userActivity_ = false; // If the user state was inactive, set the state to active |
|
|
|
this.userActive(true); // Clear any existing inactivity timeout to start the timer over |
|
|
|
this.clearTimeout(inactivityTimeout); |
|
var timeout = this.options_.inactivityTimeout; |
|
|
|
if (timeout <= 0) { |
|
return; |
|
} // In <timeout> milliseconds, if no more activity has occurred the |
|
// user will be considered inactive |
|
|
|
|
|
inactivityTimeout = this.setTimeout(function () { |
|
// Protect against the case where the inactivityTimeout can trigger just |
|
// before the next user activity is picked up by the activity check loop |
|
// causing a flicker |
|
if (!this.userActivity_) { |
|
this.userActive(false); |
|
} |
|
}, timeout); |
|
}, 250); |
|
} |
|
/** |
|
* Gets or sets the current playback rate. A playback rate of |
|
* 1.0 represents normal speed and 0.5 would indicate half-speed |
|
* playback, for instance. |
|
* |
|
* @see https://html.spec.whatwg.org/multipage/embedded-content.html#dom-media-playbackrate |
|
* |
|
* @param {number} [rate] |
|
* New playback rate to set. |
|
* |
|
* @return {number} |
|
* The current playback rate when getting or 1.0 |
|
*/ |
|
; |
|
|
|
_proto.playbackRate = function playbackRate(rate) { |
|
if (rate !== undefined) { |
|
// NOTE: this.cache_.lastPlaybackRate is set from the tech handler |
|
// that is registered above |
|
this.techCall_('setPlaybackRate', rate); |
|
return; |
|
} |
|
|
|
if (this.tech_ && this.tech_.featuresPlaybackRate) { |
|
return this.cache_.lastPlaybackRate || this.techGet_('playbackRate'); |
|
} |
|
|
|
return 1.0; |
|
} |
|
/** |
|
* Gets or sets the current default playback rate. A default playback rate of |
|
* 1.0 represents normal speed and 0.5 would indicate half-speed playback, for instance. |
|
* defaultPlaybackRate will only represent what the initial playbackRate of a video was, not |
|
* not the current playbackRate. |
|
* |
|
* @see https://html.spec.whatwg.org/multipage/embedded-content.html#dom-media-defaultplaybackrate |
|
* |
|
* @param {number} [rate] |
|
* New default playback rate to set. |
|
* |
|
* @return {number|Player} |
|
* - The default playback rate when getting or 1.0 |
|
* - the player when setting |
|
*/ |
|
; |
|
|
|
_proto.defaultPlaybackRate = function defaultPlaybackRate(rate) { |
|
if (rate !== undefined) { |
|
return this.techCall_('setDefaultPlaybackRate', rate); |
|
} |
|
|
|
if (this.tech_ && this.tech_.featuresPlaybackRate) { |
|
return this.techGet_('defaultPlaybackRate'); |
|
} |
|
|
|
return 1.0; |
|
} |
|
/** |
|
* Gets or sets the audio flag |
|
* |
|
* @param {boolean} bool |
|
* - true signals that this is an audio player |
|
* - false signals that this is not an audio player |
|
* |
|
* @return {boolean} |
|
* The current value of isAudio when getting |
|
*/ |
|
; |
|
|
|
_proto.isAudio = function isAudio(bool) { |
|
if (bool !== undefined) { |
|
this.isAudio_ = !!bool; |
|
return; |
|
} |
|
|
|
return !!this.isAudio_; |
|
} |
|
/** |
|
* A helper method for adding a {@link TextTrack} to our |
|
* {@link TextTrackList}. |
|
* |
|
* In addition to the W3C settings we allow adding additional info through options. |
|
* |
|
* @see http://www.w3.org/html/wg/drafts/html/master/embedded-content-0.html#dom-media-addtexttrack |
|
* |
|
* @param {string} [kind] |
|
* the kind of TextTrack you are adding |
|
* |
|
* @param {string} [label] |
|
* the label to give the TextTrack label |
|
* |
|
* @param {string} [language] |
|
* the language to set on the TextTrack |
|
* |
|
* @return {TextTrack|undefined} |
|
* the TextTrack that was added or undefined |
|
* if there is no tech |
|
*/ |
|
; |
|
|
|
_proto.addTextTrack = function addTextTrack(kind, label, language) { |
|
if (this.tech_) { |
|
return this.tech_.addTextTrack(kind, label, language); |
|
} |
|
} |
|
/** |
|
* Create a remote {@link TextTrack} and an {@link HTMLTrackElement}. It will |
|
* automatically removed from the video element whenever the source changes, unless |
|
* manualCleanup is set to false. |
|
* |
|
* @param {Object} options |
|
* Options to pass to {@link HTMLTrackElement} during creation. See |
|
* {@link HTMLTrackElement} for object properties that you should use. |
|
* |
|
* @param {boolean} [manualCleanup=true] if set to false, the TextTrack will be |
|
* |
|
* @return {HtmlTrackElement} |
|
* the HTMLTrackElement that was created and added |
|
* to the HtmlTrackElementList and the remote |
|
* TextTrackList |
|
* |
|
* @deprecated The default value of the "manualCleanup" parameter will default |
|
* to "false" in upcoming versions of Video.js |
|
*/ |
|
; |
|
|
|
_proto.addRemoteTextTrack = function addRemoteTextTrack(options, manualCleanup) { |
|
if (this.tech_) { |
|
return this.tech_.addRemoteTextTrack(options, manualCleanup); |
|
} |
|
} |
|
/** |
|
* Remove a remote {@link TextTrack} from the respective |
|
* {@link TextTrackList} and {@link HtmlTrackElementList}. |
|
* |
|
* @param {Object} track |
|
* Remote {@link TextTrack} to remove |
|
* |
|
* @return {undefined} |
|
* does not return anything |
|
*/ |
|
; |
|
|
|
_proto.removeRemoteTextTrack = function removeRemoteTextTrack(obj) { |
|
if (obj === void 0) { |
|
obj = {}; |
|
} |
|
|
|
var _obj = obj, |
|
track = _obj.track; |
|
|
|
if (!track) { |
|
track = obj; |
|
} // destructure the input into an object with a track argument, defaulting to arguments[0] |
|
// default the whole argument to an empty object if nothing was passed in |
|
|
|
|
|
if (this.tech_) { |
|
return this.tech_.removeRemoteTextTrack(track); |
|
} |
|
} |
|
/** |
|
* Gets available media playback quality metrics as specified by the W3C's Media |
|
* Playback Quality API. |
|
* |
|
* @see [Spec]{@link https://wicg.github.io/media-playback-quality} |
|
* |
|
* @return {Object|undefined} |
|
* An object with supported media playback quality metrics or undefined if there |
|
* is no tech or the tech does not support it. |
|
*/ |
|
; |
|
|
|
_proto.getVideoPlaybackQuality = function getVideoPlaybackQuality() { |
|
return this.techGet_('getVideoPlaybackQuality'); |
|
} |
|
/** |
|
* Get video width |
|
* |
|
* @return {number} |
|
* current video width |
|
*/ |
|
; |
|
|
|
_proto.videoWidth = function videoWidth() { |
|
return this.tech_ && this.tech_.videoWidth && this.tech_.videoWidth() || 0; |
|
} |
|
/** |
|
* Get video height |
|
* |
|
* @return {number} |
|
* current video height |
|
*/ |
|
; |
|
|
|
_proto.videoHeight = function videoHeight() { |
|
return this.tech_ && this.tech_.videoHeight && this.tech_.videoHeight() || 0; |
|
} |
|
/** |
|
* The player's language code |
|
* NOTE: The language should be set in the player options if you want the |
|
* the controls to be built with a specific language. Changing the language |
|
* later will not update controls text. |
|
* |
|
* @param {string} [code] |
|
* the language code to set the player to |
|
* |
|
* @return {string} |
|
* The current language code when getting |
|
*/ |
|
; |
|
|
|
_proto.language = function language(code) { |
|
if (code === undefined) { |
|
return this.language_; |
|
} |
|
|
|
this.language_ = String(code).toLowerCase(); |
|
} |
|
/** |
|
* Get the player's language dictionary |
|
* Merge every time, because a newly added plugin might call videojs.addLanguage() at any time |
|
* Languages specified directly in the player options have precedence |
|
* |
|
* @return {Array} |
|
* An array of of supported languages |
|
*/ |
|
; |
|
|
|
_proto.languages = function languages() { |
|
return mergeOptions(Player.prototype.options_.languages, this.languages_); |
|
} |
|
/** |
|
* returns a JavaScript object reperesenting the current track |
|
* information. **DOES not return it as JSON** |
|
* |
|
* @return {Object} |
|
* Object representing the current of track info |
|
*/ |
|
; |
|
|
|
_proto.toJSON = function toJSON() { |
|
var options = mergeOptions(this.options_); |
|
var tracks = options.tracks; |
|
options.tracks = []; |
|
|
|
for (var i = 0; i < tracks.length; i++) { |
|
var track = tracks[i]; // deep merge tracks and null out player so no circular references |
|
|
|
track = mergeOptions(track); |
|
track.player = undefined; |
|
options.tracks[i] = track; |
|
} |
|
|
|
return options; |
|
} |
|
/** |
|
* Creates a simple modal dialog (an instance of the {@link ModalDialog} |
|
* component) that immediately overlays the player with arbitrary |
|
* content and removes itself when closed. |
|
* |
|
* @param {string|Function|Element|Array|null} content |
|
* Same as {@link ModalDialog#content}'s param of the same name. |
|
* The most straight-forward usage is to provide a string or DOM |
|
* element. |
|
* |
|
* @param {Object} [options] |
|
* Extra options which will be passed on to the {@link ModalDialog}. |
|
* |
|
* @return {ModalDialog} |
|
* the {@link ModalDialog} that was created |
|
*/ |
|
; |
|
|
|
_proto.createModal = function createModal(content, options) { |
|
var _this14 = this; |
|
|
|
options = options || {}; |
|
options.content = content || ''; |
|
var modal = new ModalDialog(this, options); |
|
this.addChild(modal); |
|
modal.on('dispose', function () { |
|
_this14.removeChild(modal); |
|
}); |
|
modal.open(); |
|
return modal; |
|
} |
|
/** |
|
* Change breakpoint classes when the player resizes. |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.updateCurrentBreakpoint_ = function updateCurrentBreakpoint_() { |
|
if (!this.responsive()) { |
|
return; |
|
} |
|
|
|
var currentBreakpoint = this.currentBreakpoint(); |
|
var currentWidth = this.currentWidth(); |
|
|
|
for (var i = 0; i < BREAKPOINT_ORDER.length; i++) { |
|
var candidateBreakpoint = BREAKPOINT_ORDER[i]; |
|
var maxWidth = this.breakpoints_[candidateBreakpoint]; |
|
|
|
if (currentWidth <= maxWidth) { |
|
// The current breakpoint did not change, nothing to do. |
|
if (currentBreakpoint === candidateBreakpoint) { |
|
return; |
|
} // Only remove a class if there is a current breakpoint. |
|
|
|
|
|
if (currentBreakpoint) { |
|
this.removeClass(BREAKPOINT_CLASSES[currentBreakpoint]); |
|
} |
|
|
|
this.addClass(BREAKPOINT_CLASSES[candidateBreakpoint]); |
|
this.breakpoint_ = candidateBreakpoint; |
|
break; |
|
} |
|
} |
|
} |
|
/** |
|
* Removes the current breakpoint. |
|
* |
|
* @private |
|
*/ |
|
; |
|
|
|
_proto.removeCurrentBreakpoint_ = function removeCurrentBreakpoint_() { |
|
var className = this.currentBreakpointClass(); |
|
this.breakpoint_ = ''; |
|
|
|
if (className) { |
|
this.removeClass(className); |
|
} |
|
} |
|
/** |
|
* Get or set breakpoints on the player. |
|
* |
|
* Calling this method with an object or `true` will remove any previous |
|
* custom breakpoints and start from the defaults again. |
|
* |
|
* @param {Object|boolean} [breakpoints] |
|
* If an object is given, it can be used to provide custom |
|
* breakpoints. If `true` is given, will set default breakpoints. |
|
* If this argument is not given, will simply return the current |
|
* breakpoints. |
|
* |
|
* @param {number} [breakpoints.tiny] |
|
* The maximum width for the "vjs-layout-tiny" class. |
|
* |
|
* @param {number} [breakpoints.xsmall] |
|
* The maximum width for the "vjs-layout-x-small" class. |
|
* |
|
* @param {number} [breakpoints.small] |
|
* The maximum width for the "vjs-layout-small" class. |
|
* |
|
* @param {number} [breakpoints.medium] |
|
* The maximum width for the "vjs-layout-medium" class. |
|
* |
|
* @param {number} [breakpoints.large] |
|
* The maximum width for the "vjs-layout-large" class. |
|
* |
|
* @param {number} [breakpoints.xlarge] |
|
* The maximum width for the "vjs-layout-x-large" class. |
|
* |
|
* @param {number} [breakpoints.huge] |
|
* The maximum width for the "vjs-layout-huge" class. |
|
* |
|
* @return {Object} |
|
* An object mapping breakpoint names to maximum width values. |
|
*/ |
|
; |
|
|
|
_proto.breakpoints = function breakpoints(_breakpoints) { |
|
// Used as a getter. |
|
if (_breakpoints === undefined) { |
|
return assign(this.breakpoints_); |
|
} |
|
|
|
this.breakpoint_ = ''; |
|
this.breakpoints_ = assign({}, DEFAULT_BREAKPOINTS, _breakpoints); // When breakpoint definitions change, we need to update the currently |
|
// selected breakpoint. |
|
|
|
this.updateCurrentBreakpoint_(); // Clone the breakpoints before returning. |
|
|
|
return assign(this.breakpoints_); |
|
} |
|
/** |
|
* Get or set a flag indicating whether or not this player should adjust |
|
* its UI based on its dimensions. |
|
* |
|
* @param {boolean} value |
|
* Should be `true` if the player should adjust its UI based on its |
|
* dimensions; otherwise, should be `false`. |
|
* |
|
* @return {boolean} |
|
* Will be `true` if this player should adjust its UI based on its |
|
* dimensions; otherwise, will be `false`. |
|
*/ |
|
; |
|
|
|
_proto.responsive = function responsive(value) { |
|
// Used as a getter. |
|
if (value === undefined) { |
|
return this.responsive_; |
|
} |
|
|
|
value = Boolean(value); |
|
var current = this.responsive_; // Nothing changed. |
|
|
|
if (value === current) { |
|
return; |
|
} // The value actually changed, set it. |
|
|
|
|
|
this.responsive_ = value; // Start listening for breakpoints and set the initial breakpoint if the |
|
// player is now responsive. |
|
|
|
if (value) { |
|
this.on('playerresize', this.updateCurrentBreakpoint_); |
|
this.updateCurrentBreakpoint_(); // Stop listening for breakpoints if the player is no longer responsive. |
|
} else { |
|
this.off('playerresize', this.updateCurrentBreakpoint_); |
|
this.removeCurrentBreakpoint_(); |
|
} |
|
|
|
return value; |
|
} |
|
/** |
|
* Get current breakpoint name, if any. |
|
* |
|
* @return {string} |
|
* If there is currently a breakpoint set, returns a the key from the |
|
* breakpoints object matching it. Otherwise, returns an empty string. |
|
*/ |
|
; |
|
|
|
_proto.currentBreakpoint = function currentBreakpoint() { |
|
return this.breakpoint_; |
|
} |
|
/** |
|
* Get the current breakpoint class name. |
|
* |
|
* @return {string} |
|
* The matching class name (e.g. `"vjs-layout-tiny"` or |
|
* `"vjs-layout-large"`) for the current breakpoint. Empty string if |
|
* there is no current breakpoint. |
|
*/ |
|
; |
|
|
|
_proto.currentBreakpointClass = function currentBreakpointClass() { |
|
return BREAKPOINT_CLASSES[this.breakpoint_] || ''; |
|
} |
|
/** |
|
* An object that describes a single piece of media. |
|
* |
|
* Properties that are not part of this type description will be retained; so, |
|
* this can be viewed as a generic metadata storage mechanism as well. |
|
* |
|
* @see {@link https://wicg.github.io/mediasession/#the-mediametadata-interface} |
|
* @typedef {Object} Player~MediaObject |
|
* |
|
* @property {string} [album] |
|
* Unused, except if this object is passed to the `MediaSession` |
|
* API. |
|
* |
|
* @property {string} [artist] |
|
* Unused, except if this object is passed to the `MediaSession` |
|
* API. |
|
* |
|
* @property {Object[]} [artwork] |
|
* Unused, except if this object is passed to the `MediaSession` |
|
* API. If not specified, will be populated via the `poster`, if |
|
* available. |
|
* |
|
* @property {string} [poster] |
|
* URL to an image that will display before playback. |
|
* |
|
* @property {Tech~SourceObject|Tech~SourceObject[]|string} [src] |
|
* A single source object, an array of source objects, or a string |
|
* referencing a URL to a media source. It is _highly recommended_ |
|
* that an object or array of objects is used here, so that source |
|
* selection algorithms can take the `type` into account. |
|
* |
|
* @property {string} [title] |
|
* Unused, except if this object is passed to the `MediaSession` |
|
* API. |
|
* |
|
* @property {Object[]} [textTracks] |
|
* An array of objects to be used to create text tracks, following |
|
* the {@link https://www.w3.org/TR/html50/embedded-content-0.html#the-track-element|native track element format}. |
|
* For ease of removal, these will be created as "remote" text |
|
* tracks and set to automatically clean up on source changes. |
|
* |
|
* These objects may have properties like `src`, `kind`, `label`, |
|
* and `language`, see {@link Tech#createRemoteTextTrack}. |
|
*/ |
|
|
|
/** |
|
* Populate the player using a {@link Player~MediaObject|MediaObject}. |
|
* |
|
* @param {Player~MediaObject} media |
|
* A media object. |
|
* |
|
* @param {Function} ready |
|
* A callback to be called when the player is ready. |
|
*/ |
|
; |
|
|
|
_proto.loadMedia = function loadMedia(media, ready) { |
|
var _this15 = this; |
|
|
|
if (!media || typeof media !== 'object') { |
|
return; |
|
} |
|
|
|
this.reset(); // Clone the media object so it cannot be mutated from outside. |
|
|
|
this.cache_.media = mergeOptions(media); |
|
var _this$cache_$media = this.cache_.media, |
|
artwork = _this$cache_$media.artwork, |
|
poster = _this$cache_$media.poster, |
|
src = _this$cache_$media.src, |
|
textTracks = _this$cache_$media.textTracks; // If `artwork` is not given, create it using `poster`. |
|
|
|
if (!artwork && poster) { |
|
this.cache_.media.artwork = [{ |
|
src: poster, |
|
type: getMimetype(poster) |
|
}]; |
|
} |
|
|
|
if (src) { |
|
this.src(src); |
|
} |
|
|
|
if (poster) { |
|
this.poster(poster); |
|
} |
|
|
|
if (Array.isArray(textTracks)) { |
|
textTracks.forEach(function (tt) { |
|
return _this15.addRemoteTextTrack(tt, false); |
|
}); |
|
} |
|
|
|
this.ready(ready); |
|
} |
|
/** |
|
* Get a clone of the current {@link Player~MediaObject} for this player. |
|
* |
|
* If the `loadMedia` method has not been used, will attempt to return a |
|
* {@link Player~MediaObject} based on the current state of the player. |
|
* |
|
* @return {Player~MediaObject} |
|
*/ |
|
; |
|
|
|
_proto.getMedia = function getMedia() { |
|
if (!this.cache_.media) { |
|
var poster = this.poster(); |
|
var src = this.currentSources(); |
|
var textTracks = Array.prototype.map.call(this.remoteTextTracks(), function (tt) { |
|
return { |
|
kind: tt.kind, |
|
label: tt.label, |
|
language: tt.language, |
|
src: tt.src |
|
}; |
|
}); |
|
var media = { |
|
src: src, |
|
textTracks: textTracks |
|
}; |
|
|
|
if (poster) { |
|
media.poster = poster; |
|
media.artwork = [{ |
|
src: media.poster, |
|
type: getMimetype(media.poster) |
|
}]; |
|
} |
|
|
|
return media; |
|
} |
|
|
|
return mergeOptions(this.cache_.media); |
|
} |
|
/** |
|
* Gets tag settings |
|
* |
|
* @param {Element} tag |
|
* The player tag |
|
* |
|
* @return {Object} |
|
* An object containing all of the settings |
|
* for a player tag |
|
*/ |
|
; |
|
|
|
Player.getTagSettings = function getTagSettings(tag) { |
|
var baseOptions = { |
|
sources: [], |
|
tracks: [] |
|
}; |
|
var tagOptions = getAttributes(tag); |
|
var dataSetup = tagOptions['data-setup']; |
|
|
|
if (hasClass(tag, 'vjs-fill')) { |
|
tagOptions.fill = true; |
|
} |
|
|
|
if (hasClass(tag, 'vjs-fluid')) { |
|
tagOptions.fluid = true; |
|
} // Check if data-setup attr exists. |
|
|
|
|
|
if (dataSetup !== null) { |
|
// Parse options JSON |
|
// If empty string, make it a parsable json object. |
|
var _safeParseTuple = tuple(dataSetup || '{}'), |
|
err = _safeParseTuple[0], |
|
data = _safeParseTuple[1]; |
|
|
|
if (err) { |
|
log.error(err); |
|
} |
|
|
|
assign(tagOptions, data); |
|
} |
|
|
|
assign(baseOptions, tagOptions); // Get tag children settings |
|
|
|
if (tag.hasChildNodes()) { |
|
var children = tag.childNodes; |
|
|
|
for (var i = 0, j = children.length; i < j; i++) { |
|
var child = children[i]; // Change case needed: http://ejohn.org/blog/nodename-case-sensitivity/ |
|
|
|
var childName = child.nodeName.toLowerCase(); |
|
|
|
if (childName === 'source') { |
|
baseOptions.sources.push(getAttributes(child)); |
|
} else if (childName === 'track') { |
|
baseOptions.tracks.push(getAttributes(child)); |
|
} |
|
} |
|
} |
|
|
|
return baseOptions; |
|
} |
|
/** |
|
* Determine whether or not flexbox is supported |
|
* |
|
* @return {boolean} |
|
* - true if flexbox is supported |
|
* - false if flexbox is not supported |
|
*/ |
|
; |
|
|
|
_proto.flexNotSupported_ = function flexNotSupported_() { |
|
var elem = document.createElement('i'); // Note: We don't actually use flexBasis (or flexOrder), but it's one of the more |
|
// common flex features that we can rely on when checking for flex support. |
|
|
|
return !('flexBasis' in elem.style || 'webkitFlexBasis' in elem.style || 'mozFlexBasis' in elem.style || 'msFlexBasis' in elem.style || // IE10-specific (2012 flex spec), available for completeness |
|
'msFlexOrder' in elem.style); |
|
}; |
|
|
|
return Player; |
|
}(Component); |
|
/** |
|
* Get the {@link VideoTrackList} |
|
* @link https://html.spec.whatwg.org/multipage/embedded-content.html#videotracklist |
|
* |
|
* @return {VideoTrackList} |
|
* the current video track list |
|
* |
|
* @method Player.prototype.videoTracks |
|
*/ |
|
|
|
/** |
|
* Get the {@link AudioTrackList} |
|
* @link https://html.spec.whatwg.org/multipage/embedded-content.html#audiotracklist |
|
* |
|
* @return {AudioTrackList} |
|
* the current audio track list |
|
* |
|
* @method Player.prototype.audioTracks |
|
*/ |
|
|
|
/** |
|
* Get the {@link TextTrackList} |
|
* |
|
* @link http://www.w3.org/html/wg/drafts/html/master/embedded-content-0.html#dom-media-texttracks |
|
* |
|
* @return {TextTrackList} |
|
* the current text track list |
|
* |
|
* @method Player.prototype.textTracks |
|
*/ |
|
|
|
/** |
|
* Get the remote {@link TextTrackList} |
|
* |
|
* @return {TextTrackList} |
|
* The current remote text track list |
|
* |
|
* @method Player.prototype.remoteTextTracks |
|
*/ |
|
|
|
/** |
|
* Get the remote {@link HtmlTrackElementList} tracks. |
|
* |
|
* @return {HtmlTrackElementList} |
|
* The current remote text track element list |
|
* |
|
* @method Player.prototype.remoteTextTrackEls |
|
*/ |
|
|
|
|
|
ALL.names.forEach(function (name$$1) { |
|
var props = ALL[name$$1]; |
|
|
|
Player.prototype[props.getterName] = function () { |
|
if (this.tech_) { |
|
return this.tech_[props.getterName](); |
|
} // if we have not yet loadTech_, we create {video,audio,text}Tracks_ |
|
// these will be passed to the tech during loading |
|
|
|
|
|
this[props.privateName] = this[props.privateName] || new props.ListClass(); |
|
return this[props.privateName]; |
|
}; |
|
}); |
|
/** |
|
* Global enumeration of players. |
|
* |
|
* The keys are the player IDs and the values are either the {@link Player} |
|
* instance or `null` for disposed players. |
|
* |
|
* @type {Object} |
|
*/ |
|
|
|
Player.players = {}; |
|
var navigator = window$1.navigator; |
|
/* |
|
* Player instance options, surfaced using options |
|
* options = Player.prototype.options_ |
|
* Make changes in options, not here. |
|
* |
|
* @type {Object} |
|
* @private |
|
*/ |
|
|
|
Player.prototype.options_ = { |
|
// Default order of fallback technology |
|
techOrder: Tech.defaultTechOrder_, |
|
html5: {}, |
|
flash: {}, |
|
// default inactivity timeout |
|
inactivityTimeout: 2000, |
|
// default playback rates |
|
playbackRates: [], |
|
// Add playback rate selection by adding rates |
|
// 'playbackRates': [0.5, 1, 1.5, 2], |
|
liveui: false, |
|
// Included control sets |
|
children: ['mediaLoader', 'posterImage', 'textTrackDisplay', 'loadingSpinner', 'bigPlayButton', 'liveTracker', 'controlBar', 'errorDisplay', 'textTrackSettings', 'resizeManager'], |
|
language: navigator && (navigator.languages && navigator.languages[0] || navigator.userLanguage || navigator.language) || 'en', |
|
// locales and their language translations |
|
languages: {}, |
|
// Default message to show when a video cannot be played. |
|
notSupportedMessage: 'No compatible source was found for this media.', |
|
breakpoints: {}, |
|
responsive: false |
|
}; |
|
[ |
|
/** |
|
* Returns whether or not the player is in the "ended" state. |
|
* |
|
* @return {Boolean} True if the player is in the ended state, false if not. |
|
* @method Player#ended |
|
*/ |
|
'ended', |
|
/** |
|
* Returns whether or not the player is in the "seeking" state. |
|
* |
|
* @return {Boolean} True if the player is in the seeking state, false if not. |
|
* @method Player#seeking |
|
*/ |
|
'seeking', |
|
/** |
|
* Returns the TimeRanges of the media that are currently available |
|
* for seeking to. |
|
* |
|
* @return {TimeRanges} the seekable intervals of the media timeline |
|
* @method Player#seekable |
|
*/ |
|
'seekable', |
|
/** |
|
* Returns the current state of network activity for the element, from |
|
* the codes in the list below. |
|
* - NETWORK_EMPTY (numeric value 0) |
|
* The element has not yet been initialised. All attributes are in |
|
* their initial states. |
|
* - NETWORK_IDLE (numeric value 1) |
|
* The element's resource selection algorithm is active and has |
|
* selected a resource, but it is not actually using the network at |
|
* this time. |
|
* - NETWORK_LOADING (numeric value 2) |
|
* The user agent is actively trying to download data. |
|
* - NETWORK_NO_SOURCE (numeric value 3) |
|
* The element's resource selection algorithm is active, but it has |
|
* not yet found a resource to use. |
|
* |
|
* @see https://html.spec.whatwg.org/multipage/embedded-content.html#network-states |
|
* @return {number} the current network activity state |
|
* @method Player#networkState |
|
*/ |
|
'networkState', |
|
/** |
|
* Returns a value that expresses the current state of the element |
|
* with respect to rendering the current playback position, from the |
|
* codes in the list below. |
|
* - HAVE_NOTHING (numeric value 0) |
|
* No information regarding the media resource is available. |
|
* - HAVE_METADATA (numeric value 1) |
|
* Enough of the resource has been obtained that the duration of the |
|
* resource is available. |
|
* - HAVE_CURRENT_DATA (numeric value 2) |
|
* Data for the immediate current playback position is available. |
|
* - HAVE_FUTURE_DATA (numeric value 3) |
|
* Data for the immediate current playback position is available, as |
|
* well as enough data for the user agent to advance the current |
|
* playback position in the direction of playback. |
|
* - HAVE_ENOUGH_DATA (numeric value 4) |
|
* The user agent estimates that enough data is available for |
|
* playback to proceed uninterrupted. |
|
* |
|
* @see https://html.spec.whatwg.org/multipage/embedded-content.html#dom-media-readystate |
|
* @return {number} the current playback rendering state |
|
* @method Player#readyState |
|
*/ |
|
'readyState'].forEach(function (fn) { |
|
Player.prototype[fn] = function () { |
|
return this.techGet_(fn); |
|
}; |
|
}); |
|
TECH_EVENTS_RETRIGGER.forEach(function (event) { |
|
Player.prototype["handleTech" + toTitleCase(event) + "_"] = function () { |
|
return this.trigger(event); |
|
}; |
|
}); |
|
/** |
|
* Fired when the player has initial duration and dimension information |
|
* |
|
* @event Player#loadedmetadata |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
/** |
|
* Fired when the player has downloaded data at the current playback position |
|
* |
|
* @event Player#loadeddata |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
/** |
|
* Fired when the current playback position has changed * |
|
* During playback this is fired every 15-250 milliseconds, depending on the |
|
* playback technology in use. |
|
* |
|
* @event Player#timeupdate |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
/** |
|
* Fired when the volume changes |
|
* |
|
* @event Player#volumechange |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
/** |
|
* Reports whether or not a player has a plugin available. |
|
* |
|
* This does not report whether or not the plugin has ever been initialized |
|
* on this player. For that, [usingPlugin]{@link Player#usingPlugin}. |
|
* |
|
* @method Player#hasPlugin |
|
* @param {string} name |
|
* The name of a plugin. |
|
* |
|
* @return {boolean} |
|
* Whether or not this player has the requested plugin available. |
|
*/ |
|
|
|
/** |
|
* Reports whether or not a player is using a plugin by name. |
|
* |
|
* For basic plugins, this only reports whether the plugin has _ever_ been |
|
* initialized on this player. |
|
* |
|
* @method Player#usingPlugin |
|
* @param {string} name |
|
* The name of a plugin. |
|
* |
|
* @return {boolean} |
|
* Whether or not this player is using the requested plugin. |
|
*/ |
|
|
|
Component.registerComponent('Player', Player); |
|
|
|
/** |
|
* The base plugin name. |
|
* |
|
* @private |
|
* @constant |
|
* @type {string} |
|
*/ |
|
|
|
var BASE_PLUGIN_NAME = 'plugin'; |
|
/** |
|
* The key on which a player's active plugins cache is stored. |
|
* |
|
* @private |
|
* @constant |
|
* @type {string} |
|
*/ |
|
|
|
var PLUGIN_CACHE_KEY = 'activePlugins_'; |
|
/** |
|
* Stores registered plugins in a private space. |
|
* |
|
* @private |
|
* @type {Object} |
|
*/ |
|
|
|
var pluginStorage = {}; |
|
/** |
|
* Reports whether or not a plugin has been registered. |
|
* |
|
* @private |
|
* @param {string} name |
|
* The name of a plugin. |
|
* |
|
* @return {boolean} |
|
* Whether or not the plugin has been registered. |
|
*/ |
|
|
|
var pluginExists = function pluginExists(name) { |
|
return pluginStorage.hasOwnProperty(name); |
|
}; |
|
/** |
|
* Get a single registered plugin by name. |
|
* |
|
* @private |
|
* @param {string} name |
|
* The name of a plugin. |
|
* |
|
* @return {Function|undefined} |
|
* The plugin (or undefined). |
|
*/ |
|
|
|
|
|
var getPlugin = function getPlugin(name) { |
|
return pluginExists(name) ? pluginStorage[name] : undefined; |
|
}; |
|
/** |
|
* Marks a plugin as "active" on a player. |
|
* |
|
* Also, ensures that the player has an object for tracking active plugins. |
|
* |
|
* @private |
|
* @param {Player} player |
|
* A Video.js player instance. |
|
* |
|
* @param {string} name |
|
* The name of a plugin. |
|
*/ |
|
|
|
|
|
var markPluginAsActive = function markPluginAsActive(player, name) { |
|
player[PLUGIN_CACHE_KEY] = player[PLUGIN_CACHE_KEY] || {}; |
|
player[PLUGIN_CACHE_KEY][name] = true; |
|
}; |
|
/** |
|
* Triggers a pair of plugin setup events. |
|
* |
|
* @private |
|
* @param {Player} player |
|
* A Video.js player instance. |
|
* |
|
* @param {Plugin~PluginEventHash} hash |
|
* A plugin event hash. |
|
* |
|
* @param {boolean} [before] |
|
* If true, prefixes the event name with "before". In other words, |
|
* use this to trigger "beforepluginsetup" instead of "pluginsetup". |
|
*/ |
|
|
|
|
|
var triggerSetupEvent = function triggerSetupEvent(player, hash, before) { |
|
var eventName = (before ? 'before' : '') + 'pluginsetup'; |
|
player.trigger(eventName, hash); |
|
player.trigger(eventName + ':' + hash.name, hash); |
|
}; |
|
/** |
|
* Takes a basic plugin function and returns a wrapper function which marks |
|
* on the player that the plugin has been activated. |
|
* |
|
* @private |
|
* @param {string} name |
|
* The name of the plugin. |
|
* |
|
* @param {Function} plugin |
|
* The basic plugin. |
|
* |
|
* @return {Function} |
|
* A wrapper function for the given plugin. |
|
*/ |
|
|
|
|
|
var createBasicPlugin = function createBasicPlugin(name, plugin) { |
|
var basicPluginWrapper = function basicPluginWrapper() { |
|
// We trigger the "beforepluginsetup" and "pluginsetup" events on the player |
|
// regardless, but we want the hash to be consistent with the hash provided |
|
// for advanced plugins. |
|
// |
|
// The only potentially counter-intuitive thing here is the `instance` in |
|
// the "pluginsetup" event is the value returned by the `plugin` function. |
|
triggerSetupEvent(this, { |
|
name: name, |
|
plugin: plugin, |
|
instance: null |
|
}, true); |
|
var instance = plugin.apply(this, arguments); |
|
markPluginAsActive(this, name); |
|
triggerSetupEvent(this, { |
|
name: name, |
|
plugin: plugin, |
|
instance: instance |
|
}); |
|
return instance; |
|
}; |
|
|
|
Object.keys(plugin).forEach(function (prop) { |
|
basicPluginWrapper[prop] = plugin[prop]; |
|
}); |
|
return basicPluginWrapper; |
|
}; |
|
/** |
|
* Takes a plugin sub-class and returns a factory function for generating |
|
* instances of it. |
|
* |
|
* This factory function will replace itself with an instance of the requested |
|
* sub-class of Plugin. |
|
* |
|
* @private |
|
* @param {string} name |
|
* The name of the plugin. |
|
* |
|
* @param {Plugin} PluginSubClass |
|
* The advanced plugin. |
|
* |
|
* @return {Function} |
|
*/ |
|
|
|
|
|
var createPluginFactory = function createPluginFactory(name, PluginSubClass) { |
|
// Add a `name` property to the plugin prototype so that each plugin can |
|
// refer to itself by name. |
|
PluginSubClass.prototype.name = name; |
|
return function () { |
|
triggerSetupEvent(this, { |
|
name: name, |
|
plugin: PluginSubClass, |
|
instance: null |
|
}, true); |
|
|
|
for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) { |
|
args[_key] = arguments[_key]; |
|
} |
|
|
|
var instance = _construct(PluginSubClass, [this].concat(args)); // The plugin is replaced by a function that returns the current instance. |
|
|
|
|
|
this[name] = function () { |
|
return instance; |
|
}; |
|
|
|
triggerSetupEvent(this, instance.getEventHash()); |
|
return instance; |
|
}; |
|
}; |
|
/** |
|
* Parent class for all advanced plugins. |
|
* |
|
* @mixes module:evented~EventedMixin |
|
* @mixes module:stateful~StatefulMixin |
|
* @fires Player#beforepluginsetup |
|
* @fires Player#beforepluginsetup:$name |
|
* @fires Player#pluginsetup |
|
* @fires Player#pluginsetup:$name |
|
* @listens Player#dispose |
|
* @throws {Error} |
|
* If attempting to instantiate the base {@link Plugin} class |
|
* directly instead of via a sub-class. |
|
*/ |
|
|
|
|
|
var Plugin = |
|
/*#__PURE__*/ |
|
function () { |
|
/** |
|
* Creates an instance of this class. |
|
* |
|
* Sub-classes should call `super` to ensure plugins are properly initialized. |
|
* |
|
* @param {Player} player |
|
* A Video.js player instance. |
|
*/ |
|
function Plugin(player) { |
|
if (this.constructor === Plugin) { |
|
throw new Error('Plugin must be sub-classed; not directly instantiated.'); |
|
} |
|
|
|
this.player = player; // Make this object evented, but remove the added `trigger` method so we |
|
// use the prototype version instead. |
|
|
|
evented(this); |
|
delete this.trigger; |
|
stateful(this, this.constructor.defaultState); |
|
markPluginAsActive(player, this.name); // Auto-bind the dispose method so we can use it as a listener and unbind |
|
// it later easily. |
|
|
|
this.dispose = bind(this, this.dispose); // If the player is disposed, dispose the plugin. |
|
|
|
player.on('dispose', this.dispose); |
|
} |
|
/** |
|
* Get the version of the plugin that was set on <pluginName>.VERSION |
|
*/ |
|
|
|
|
|
var _proto = Plugin.prototype; |
|
|
|
_proto.version = function version() { |
|
return this.constructor.VERSION; |
|
} |
|
/** |
|
* Each event triggered by plugins includes a hash of additional data with |
|
* conventional properties. |
|
* |
|
* This returns that object or mutates an existing hash. |
|
* |
|
* @param {Object} [hash={}] |
|
* An object to be used as event an event hash. |
|
* |
|
* @return {Plugin~PluginEventHash} |
|
* An event hash object with provided properties mixed-in. |
|
*/ |
|
; |
|
|
|
_proto.getEventHash = function getEventHash(hash) { |
|
if (hash === void 0) { |
|
hash = {}; |
|
} |
|
|
|
hash.name = this.name; |
|
hash.plugin = this.constructor; |
|
hash.instance = this; |
|
return hash; |
|
} |
|
/** |
|
* Triggers an event on the plugin object and overrides |
|
* {@link module:evented~EventedMixin.trigger|EventedMixin.trigger}. |
|
* |
|
* @param {string|Object} event |
|
* An event type or an object with a type property. |
|
* |
|
* @param {Object} [hash={}] |
|
* Additional data hash to merge with a |
|
* {@link Plugin~PluginEventHash|PluginEventHash}. |
|
* |
|
* @return {boolean} |
|
* Whether or not default was prevented. |
|
*/ |
|
; |
|
|
|
_proto.trigger = function trigger$$1(event, hash) { |
|
if (hash === void 0) { |
|
hash = {}; |
|
} |
|
|
|
return trigger(this.eventBusEl_, event, this.getEventHash(hash)); |
|
} |
|
/** |
|
* Handles "statechanged" events on the plugin. No-op by default, override by |
|
* subclassing. |
|
* |
|
* @abstract |
|
* @param {Event} e |
|
* An event object provided by a "statechanged" event. |
|
* |
|
* @param {Object} e.changes |
|
* An object describing changes that occurred with the "statechanged" |
|
* event. |
|
*/ |
|
; |
|
|
|
_proto.handleStateChanged = function handleStateChanged(e) {} |
|
/** |
|
* Disposes a plugin. |
|
* |
|
* Subclasses can override this if they want, but for the sake of safety, |
|
* it's probably best to subscribe the "dispose" event. |
|
* |
|
* @fires Plugin#dispose |
|
*/ |
|
; |
|
|
|
_proto.dispose = function dispose() { |
|
var name = this.name, |
|
player = this.player; |
|
/** |
|
* Signals that a advanced plugin is about to be disposed. |
|
* |
|
* @event Plugin#dispose |
|
* @type {EventTarget~Event} |
|
*/ |
|
|
|
this.trigger('dispose'); |
|
this.off(); |
|
player.off('dispose', this.dispose); // Eliminate any possible sources of leaking memory by clearing up |
|
// references between the player and the plugin instance and nulling out |
|
// the plugin's state and replacing methods with a function that throws. |
|
|
|
player[PLUGIN_CACHE_KEY][name] = false; |
|
this.player = this.state = null; // Finally, replace the plugin name on the player with a new factory |
|
// function, so that the plugin is ready to be set up again. |
|
|
|
player[name] = createPluginFactory(name, pluginStorage[name]); |
|
} |
|
/** |
|
* Determines if a plugin is a basic plugin (i.e. not a sub-class of `Plugin`). |
|
* |
|
* @param {string|Function} plugin |
|
* If a string, matches the name of a plugin. If a function, will be |
|
* tested directly. |
|
* |
|
* @return {boolean} |
|
* Whether or not a plugin is a basic plugin. |
|
*/ |
|
; |
|
|
|
Plugin.isBasic = function isBasic(plugin) { |
|
var p = typeof plugin === 'string' ? getPlugin(plugin) : plugin; |
|
return typeof p === 'function' && !Plugin.prototype.isPrototypeOf(p.prototype); |
|
} |
|
/** |
|
* Register a Video.js plugin. |
|
* |
|
* @param {string} name |
|
* The name of the plugin to be registered. Must be a string and |
|
* must not match an existing plugin or a method on the `Player` |
|
* prototype. |
|
* |
|
* @param {Function} plugin |
|
* A sub-class of `Plugin` or a function for basic plugins. |
|
* |
|
* @return {Function} |
|
* For advanced plugins, a factory function for that plugin. For |
|
* basic plugins, a wrapper function that initializes the plugin. |
|
*/ |
|
; |
|
|
|
Plugin.registerPlugin = function registerPlugin(name, plugin) { |
|
if (typeof name !== 'string') { |
|
throw new Error("Illegal plugin name, \"" + name + "\", must be a string, was " + typeof name + "."); |
|
} |
|
|
|
if (pluginExists(name)) { |
|
log.warn("A plugin named \"" + name + "\" already exists. You may want to avoid re-registering plugins!"); |
|
} else if (Player.prototype.hasOwnProperty(name)) { |
|
throw new Error("Illegal plugin name, \"" + name + "\", cannot share a name with an existing player method!"); |
|
} |
|
|
|
if (typeof plugin !== 'function') { |
|
throw new Error("Illegal plugin for \"" + name + "\", must be a function, was " + typeof plugin + "."); |
|
} |
|
|
|
pluginStorage[name] = plugin; // Add a player prototype method for all sub-classed plugins (but not for |
|
// the base Plugin class). |
|
|
|
if (name !== BASE_PLUGIN_NAME) { |
|
if (Plugin.isBasic(plugin)) { |
|
Player.prototype[name] = createBasicPlugin(name, plugin); |
|
} else { |
|
Player.prototype[name] = createPluginFactory(name, plugin); |
|
} |
|
} |
|
|
|
return plugin; |
|
} |
|
/** |
|
* De-register a Video.js plugin. |
|
* |
|
* @param {string} name |
|
* The name of the plugin to be de-registered. Must be a string that |
|
* matches an existing plugin. |
|
* |
|
* @throws {Error} |
|
* If an attempt is made to de-register the base plugin. |
|
*/ |
|
; |
|
|
|
Plugin.deregisterPlugin = function deregisterPlugin(name) { |
|
if (name === BASE_PLUGIN_NAME) { |
|
throw new Error('Cannot de-register base plugin.'); |
|
} |
|
|
|
if (pluginExists(name)) { |
|
delete pluginStorage[name]; |
|
delete Player.prototype[name]; |
|
} |
|
} |
|
/** |
|
* Gets an object containing multiple Video.js plugins. |
|
* |
|
* @param {Array} [names] |
|
* If provided, should be an array of plugin names. Defaults to _all_ |
|
* plugin names. |
|
* |
|
* @return {Object|undefined} |
|
* An object containing plugin(s) associated with their name(s) or |
|
* `undefined` if no matching plugins exist). |
|
*/ |
|
; |
|
|
|
Plugin.getPlugins = function getPlugins(names) { |
|
if (names === void 0) { |
|
names = Object.keys(pluginStorage); |
|
} |
|
|
|
var result; |
|
names.forEach(function (name) { |
|
var plugin = getPlugin(name); |
|
|
|
if (plugin) { |
|
result = result || {}; |
|
result[name] = plugin; |
|
} |
|
}); |
|
return result; |
|
} |
|
/** |
|
* Gets a plugin's version, if available |
|
* |
|
* @param {string} name |
|
* The name of a plugin. |
|
* |
|
* @return {string} |
|
* The plugin's version or an empty string. |
|
*/ |
|
; |
|
|
|
Plugin.getPluginVersion = function getPluginVersion(name) { |
|
var plugin = getPlugin(name); |
|
return plugin && plugin.VERSION || ''; |
|
}; |
|
|
|
return Plugin; |
|
}(); |
|
/** |
|
* Gets a plugin by name if it exists. |
|
* |
|
* @static |
|
* @method getPlugin |
|
* @memberOf Plugin |
|
* @param {string} name |
|
* The name of a plugin. |
|
* |
|
* @returns {Function|undefined} |
|
* The plugin (or `undefined`). |
|
*/ |
|
|
|
|
|
Plugin.getPlugin = getPlugin; |
|
/** |
|
* The name of the base plugin class as it is registered. |
|
* |
|
* @type {string} |
|
*/ |
|
|
|
Plugin.BASE_PLUGIN_NAME = BASE_PLUGIN_NAME; |
|
Plugin.registerPlugin(BASE_PLUGIN_NAME, Plugin); |
|
/** |
|
* Documented in player.js |
|
* |
|
* @ignore |
|
*/ |
|
|
|
Player.prototype.usingPlugin = function (name) { |
|
return !!this[PLUGIN_CACHE_KEY] && this[PLUGIN_CACHE_KEY][name] === true; |
|
}; |
|
/** |
|
* Documented in player.js |
|
* |
|
* @ignore |
|
*/ |
|
|
|
|
|
Player.prototype.hasPlugin = function (name) { |
|
return !!pluginExists(name); |
|
}; |
|
/** |
|
* Signals that a plugin is about to be set up on a player. |
|
* |
|
* @event Player#beforepluginsetup |
|
* @type {Plugin~PluginEventHash} |
|
*/ |
|
|
|
/** |
|
* Signals that a plugin is about to be set up on a player - by name. The name |
|
* is the name of the plugin. |
|
* |
|
* @event Player#beforepluginsetup:$name |
|
* @type {Plugin~PluginEventHash} |
|
*/ |
|
|
|
/** |
|
* Signals that a plugin has just been set up on a player. |
|
* |
|
* @event Player#pluginsetup |
|
* @type {Plugin~PluginEventHash} |
|
*/ |
|
|
|
/** |
|
* Signals that a plugin has just been set up on a player - by name. The name |
|
* is the name of the plugin. |
|
* |
|
* @event Player#pluginsetup:$name |
|
* @type {Plugin~PluginEventHash} |
|
*/ |
|
|
|
/** |
|
* @typedef {Object} Plugin~PluginEventHash |
|
* |
|
* @property {string} instance |
|
* For basic plugins, the return value of the plugin function. For |
|
* advanced plugins, the plugin instance on which the event is fired. |
|
* |
|
* @property {string} name |
|
* The name of the plugin. |
|
* |
|
* @property {string} plugin |
|
* For basic plugins, the plugin function. For advanced plugins, the |
|
* plugin class/constructor. |
|
*/ |
|
|
|
/** |
|
* @file extend.js |
|
* @module extend |
|
*/ |
|
|
|
/** |
|
* A combination of node inherits and babel's inherits (after transpile). |
|
* Both work the same but node adds `super_` to the subClass |
|
* and Bable adds the superClass as __proto__. Both seem useful. |
|
* |
|
* @param {Object} subClass |
|
* The class to inherit to |
|
* |
|
* @param {Object} superClass |
|
* The class to inherit from |
|
* |
|
* @private |
|
*/ |
|
var _inherits$1 = function _inherits(subClass, superClass) { |
|
if (typeof superClass !== 'function' && superClass !== null) { |
|
throw new TypeError('Super expression must either be null or a function, not ' + typeof superClass); |
|
} |
|
|
|
subClass.prototype = Object.create(superClass && superClass.prototype, { |
|
constructor: { |
|
value: subClass, |
|
enumerable: false, |
|
writable: true, |
|
configurable: true |
|
} |
|
}); |
|
|
|
if (superClass) { |
|
// node |
|
subClass.super_ = superClass; |
|
} |
|
}; |
|
/** |
|
* Used to subclass an existing class by emulating ES subclassing using the |
|
* `extends` keyword. |
|
* |
|
* @function |
|
* @example |
|
* var MyComponent = videojs.extend(videojs.getComponent('Component'), { |
|
* myCustomMethod: function() { |
|
* // Do things in my method. |
|
* } |
|
* }); |
|
* |
|
* @param {Function} superClass |
|
* The class to inherit from |
|
* |
|
* @param {Object} [subClassMethods={}] |
|
* Methods of the new class |
|
* |
|
* @return {Function} |
|
* The new class with subClassMethods that inherited superClass. |
|
*/ |
|
|
|
|
|
var extend$1 = function extend(superClass, subClassMethods) { |
|
if (subClassMethods === void 0) { |
|
subClassMethods = {}; |
|
} |
|
|
|
var subClass = function subClass() { |
|
superClass.apply(this, arguments); |
|
}; |
|
|
|
var methods = {}; |
|
|
|
if (typeof subClassMethods === 'object') { |
|
if (subClassMethods.constructor !== Object.prototype.constructor) { |
|
subClass = subClassMethods.constructor; |
|
} |
|
|
|
methods = subClassMethods; |
|
} else if (typeof subClassMethods === 'function') { |
|
subClass = subClassMethods; |
|
} |
|
|
|
_inherits$1(subClass, superClass); // Extend subObj's prototype with functions and other properties from props |
|
|
|
|
|
for (var name in methods) { |
|
if (methods.hasOwnProperty(name)) { |
|
subClass.prototype[name] = methods[name]; |
|
} |
|
} |
|
|
|
return subClass; |
|
}; |
|
|
|
/** |
|
* @file video.js |
|
* @module videojs |
|
*/ |
|
/** |
|
* Normalize an `id` value by trimming off a leading `#` |
|
* |
|
* @private |
|
* @param {string} id |
|
* A string, maybe with a leading `#`. |
|
* |
|
* @return {string} |
|
* The string, without any leading `#`. |
|
*/ |
|
|
|
var normalizeId = function normalizeId(id) { |
|
return id.indexOf('#') === 0 ? id.slice(1) : id; |
|
}; |
|
/** |
|
* The `videojs()` function doubles as the main function for users to create a |
|
* {@link Player} instance as well as the main library namespace. |
|
* |
|
* It can also be used as a getter for a pre-existing {@link Player} instance. |
|
* However, we _strongly_ recommend using `videojs.getPlayer()` for this |
|
* purpose because it avoids any potential for unintended initialization. |
|
* |
|
* Due to [limitations](https://github.com/jsdoc3/jsdoc/issues/955#issuecomment-313829149) |
|
* of our JSDoc template, we cannot properly document this as both a function |
|
* and a namespace, so its function signature is documented here. |
|
* |
|
* #### Arguments |
|
* ##### id |
|
* string|Element, **required** |
|
* |
|
* Video element or video element ID. |
|
* |
|
* ##### options |
|
* Object, optional |
|
* |
|
* Options object for providing settings. |
|
* See: [Options Guide](https://docs.videojs.com/tutorial-options.html). |
|
* |
|
* ##### ready |
|
* {@link Component~ReadyCallback}, optional |
|
* |
|
* A function to be called when the {@link Player} and {@link Tech} are ready. |
|
* |
|
* #### Return Value |
|
* |
|
* The `videojs()` function returns a {@link Player} instance. |
|
* |
|
* @namespace |
|
* |
|
* @borrows AudioTrack as AudioTrack |
|
* @borrows Component.getComponent as getComponent |
|
* @borrows module:computed-style~computedStyle as computedStyle |
|
* @borrows module:events.on as on |
|
* @borrows module:events.one as one |
|
* @borrows module:events.off as off |
|
* @borrows module:events.trigger as trigger |
|
* @borrows EventTarget as EventTarget |
|
* @borrows module:extend~extend as extend |
|
* @borrows module:fn.bind as bind |
|
* @borrows module:format-time.formatTime as formatTime |
|
* @borrows module:format-time.resetFormatTime as resetFormatTime |
|
* @borrows module:format-time.setFormatTime as setFormatTime |
|
* @borrows module:merge-options.mergeOptions as mergeOptions |
|
* @borrows module:middleware.use as use |
|
* @borrows Player.players as players |
|
* @borrows Plugin.registerPlugin as registerPlugin |
|
* @borrows Plugin.deregisterPlugin as deregisterPlugin |
|
* @borrows Plugin.getPlugins as getPlugins |
|
* @borrows Plugin.getPlugin as getPlugin |
|
* @borrows Plugin.getPluginVersion as getPluginVersion |
|
* @borrows Tech.getTech as getTech |
|
* @borrows Tech.registerTech as registerTech |
|
* @borrows TextTrack as TextTrack |
|
* @borrows module:time-ranges.createTimeRanges as createTimeRange |
|
* @borrows module:time-ranges.createTimeRanges as createTimeRanges |
|
* @borrows module:url.isCrossOrigin as isCrossOrigin |
|
* @borrows module:url.parseUrl as parseUrl |
|
* @borrows VideoTrack as VideoTrack |
|
* |
|
* @param {string|Element} id |
|
* Video element or video element ID. |
|
* |
|
* @param {Object} [options] |
|
* Options object for providing settings. |
|
* See: [Options Guide](https://docs.videojs.com/tutorial-options.html). |
|
* |
|
* @param {Component~ReadyCallback} [ready] |
|
* A function to be called when the {@link Player} and {@link Tech} are |
|
* ready. |
|
* |
|
* @return {Player} |
|
* The `videojs()` function returns a {@link Player|Player} instance. |
|
*/ |
|
|
|
|
|
function videojs$1(id, options, ready) { |
|
var player = videojs$1.getPlayer(id); |
|
|
|
if (player) { |
|
if (options) { |
|
log.warn("Player \"" + id + "\" is already initialised. Options will not be applied."); |
|
} |
|
|
|
if (ready) { |
|
player.ready(ready); |
|
} |
|
|
|
return player; |
|
} |
|
|
|
var el = typeof id === 'string' ? $('#' + normalizeId(id)) : id; |
|
|
|
if (!isEl(el)) { |
|
throw new TypeError('The element or ID supplied is not valid. (videojs)'); |
|
} // document.body.contains(el) will only check if el is contained within that one document. |
|
// This causes problems for elements in iframes. |
|
// Instead, use the element's ownerDocument instead of the global document. |
|
// This will make sure that the element is indeed in the dom of that document. |
|
// Additionally, check that the document in question has a default view. |
|
// If the document is no longer attached to the dom, the defaultView of the document will be null. |
|
|
|
|
|
if (!el.ownerDocument.defaultView || !el.ownerDocument.body.contains(el)) { |
|
log.warn('The element supplied is not included in the DOM'); |
|
} |
|
|
|
options = options || {}; |
|
videojs$1.hooks('beforesetup').forEach(function (hookFunction) { |
|
var opts = hookFunction(el, mergeOptions(options)); |
|
|
|
if (!isObject(opts) || Array.isArray(opts)) { |
|
log.error('please return an object in beforesetup hooks'); |
|
return; |
|
} |
|
|
|
options = mergeOptions(options, opts); |
|
}); // We get the current "Player" component here in case an integration has |
|
// replaced it with a custom player. |
|
|
|
var PlayerComponent = Component.getComponent('Player'); |
|
player = new PlayerComponent(el, options, ready); |
|
videojs$1.hooks('setup').forEach(function (hookFunction) { |
|
return hookFunction(player); |
|
}); |
|
return player; |
|
} |
|
/** |
|
* An Object that contains lifecycle hooks as keys which point to an array |
|
* of functions that are run when a lifecycle is triggered |
|
* |
|
* @private |
|
*/ |
|
|
|
|
|
videojs$1.hooks_ = {}; |
|
/** |
|
* Get a list of hooks for a specific lifecycle |
|
* |
|
* @param {string} type |
|
* the lifecyle to get hooks from |
|
* |
|
* @param {Function|Function[]} [fn] |
|
* Optionally add a hook (or hooks) to the lifecycle that your are getting. |
|
* |
|
* @return {Array} |
|
* an array of hooks, or an empty array if there are none. |
|
*/ |
|
|
|
videojs$1.hooks = function (type, fn) { |
|
videojs$1.hooks_[type] = videojs$1.hooks_[type] || []; |
|
|
|
if (fn) { |
|
videojs$1.hooks_[type] = videojs$1.hooks_[type].concat(fn); |
|
} |
|
|
|
return videojs$1.hooks_[type]; |
|
}; |
|
/** |
|
* Add a function hook to a specific videojs lifecycle. |
|
* |
|
* @param {string} type |
|
* the lifecycle to hook the function to. |
|
* |
|
* @param {Function|Function[]} |
|
* The function or array of functions to attach. |
|
*/ |
|
|
|
|
|
videojs$1.hook = function (type, fn) { |
|
videojs$1.hooks(type, fn); |
|
}; |
|
/** |
|
* Add a function hook that will only run once to a specific videojs lifecycle. |
|
* |
|
* @param {string} type |
|
* the lifecycle to hook the function to. |
|
* |
|
* @param {Function|Function[]} |
|
* The function or array of functions to attach. |
|
*/ |
|
|
|
|
|
videojs$1.hookOnce = function (type, fn) { |
|
videojs$1.hooks(type, [].concat(fn).map(function (original) { |
|
var wrapper = function wrapper() { |
|
videojs$1.removeHook(type, wrapper); |
|
return original.apply(void 0, arguments); |
|
}; |
|
|
|
return wrapper; |
|
})); |
|
}; |
|
/** |
|
* Remove a hook from a specific videojs lifecycle. |
|
* |
|
* @param {string} type |
|
* the lifecycle that the function hooked to |
|
* |
|
* @param {Function} fn |
|
* The hooked function to remove |
|
* |
|
* @return {boolean} |
|
* The function that was removed or undef |
|
*/ |
|
|
|
|
|
videojs$1.removeHook = function (type, fn) { |
|
var index = videojs$1.hooks(type).indexOf(fn); |
|
|
|
if (index <= -1) { |
|
return false; |
|
} |
|
|
|
videojs$1.hooks_[type] = videojs$1.hooks_[type].slice(); |
|
videojs$1.hooks_[type].splice(index, 1); |
|
return true; |
|
}; // Add default styles |
|
|
|
|
|
if (window$1.VIDEOJS_NO_DYNAMIC_STYLE !== true && isReal()) { |
|
var style$1 = $('.vjs-styles-defaults'); |
|
|
|
if (!style$1) { |
|
style$1 = createStyleElement('vjs-styles-defaults'); |
|
var head = $('head'); |
|
|
|
if (head) { |
|
head.insertBefore(style$1, head.firstChild); |
|
} |
|
|
|
setTextContent(style$1, "\n .video-js {\n width: 300px;\n height: 150px;\n }\n\n .vjs-fluid {\n padding-top: 56.25%\n }\n "); |
|
} |
|
} // Run Auto-load players |
|
// You have to wait at least once in case this script is loaded after your |
|
// video in the DOM (weird behavior only with minified version) |
|
|
|
|
|
autoSetupTimeout(1, videojs$1); |
|
/** |
|
* Current Video.js version. Follows [semantic versioning](https://semver.org/). |
|
* |
|
* @type {string} |
|
*/ |
|
|
|
videojs$1.VERSION = version; |
|
/** |
|
* The global options object. These are the settings that take effect |
|
* if no overrides are specified when the player is created. |
|
* |
|
* @type {Object} |
|
*/ |
|
|
|
videojs$1.options = Player.prototype.options_; |
|
/** |
|
* Get an object with the currently created players, keyed by player ID |
|
* |
|
* @return {Object} |
|
* The created players |
|
*/ |
|
|
|
videojs$1.getPlayers = function () { |
|
return Player.players; |
|
}; |
|
/** |
|
* Get a single player based on an ID or DOM element. |
|
* |
|
* This is useful if you want to check if an element or ID has an associated |
|
* Video.js player, but not create one if it doesn't. |
|
* |
|
* @param {string|Element} id |
|
* An HTML element - `<video>`, `<audio>`, or `<video-js>` - |
|
* or a string matching the `id` of such an element. |
|
* |
|
* @return {Player|undefined} |
|
* A player instance or `undefined` if there is no player instance |
|
* matching the argument. |
|
*/ |
|
|
|
|
|
videojs$1.getPlayer = function (id) { |
|
var players = Player.players; |
|
var tag; |
|
|
|
if (typeof id === 'string') { |
|
var nId = normalizeId(id); |
|
var player = players[nId]; |
|
|
|
if (player) { |
|
return player; |
|
} |
|
|
|
tag = $('#' + nId); |
|
} else { |
|
tag = id; |
|
} |
|
|
|
if (isEl(tag)) { |
|
var _tag = tag, |
|
_player = _tag.player, |
|
playerId = _tag.playerId; // Element may have a `player` property referring to an already created |
|
// player instance. If so, return that. |
|
|
|
if (_player || players[playerId]) { |
|
return _player || players[playerId]; |
|
} |
|
} |
|
}; |
|
/** |
|
* Returns an array of all current players. |
|
* |
|
* @return {Array} |
|
* An array of all players. The array will be in the order that |
|
* `Object.keys` provides, which could potentially vary between |
|
* JavaScript engines. |
|
* |
|
*/ |
|
|
|
|
|
videojs$1.getAllPlayers = function () { |
|
return (// Disposed players leave a key with a `null` value, so we need to make sure |
|
// we filter those out. |
|
Object.keys(Player.players).map(function (k) { |
|
return Player.players[k]; |
|
}).filter(Boolean) |
|
); |
|
}; |
|
|
|
videojs$1.players = Player.players; |
|
videojs$1.getComponent = Component.getComponent; |
|
/** |
|
* Register a component so it can referred to by name. Used when adding to other |
|
* components, either through addChild `component.addChild('myComponent')` or through |
|
* default children options `{ children: ['myComponent'] }`. |
|
* |
|
* > NOTE: You could also just initialize the component before adding. |
|
* `component.addChild(new MyComponent());` |
|
* |
|
* @param {string} name |
|
* The class name of the component |
|
* |
|
* @param {Component} comp |
|
* The component class |
|
* |
|
* @return {Component} |
|
* The newly registered component |
|
*/ |
|
|
|
videojs$1.registerComponent = function (name$$1, comp) { |
|
if (Tech.isTech(comp)) { |
|
log.warn("The " + name$$1 + " tech was registered as a component. It should instead be registered using videojs.registerTech(name, tech)"); |
|
} |
|
|
|
Component.registerComponent.call(Component, name$$1, comp); |
|
}; |
|
|
|
videojs$1.getTech = Tech.getTech; |
|
videojs$1.registerTech = Tech.registerTech; |
|
videojs$1.use = use; |
|
/** |
|
* An object that can be returned by a middleware to signify |
|
* that the middleware is being terminated. |
|
* |
|
* @type {object} |
|
* @property {object} middleware.TERMINATOR |
|
*/ |
|
|
|
Object.defineProperty(videojs$1, 'middleware', { |
|
value: {}, |
|
writeable: false, |
|
enumerable: true |
|
}); |
|
Object.defineProperty(videojs$1.middleware, 'TERMINATOR', { |
|
value: TERMINATOR, |
|
writeable: false, |
|
enumerable: true |
|
}); |
|
/** |
|
* A reference to the {@link module:browser|browser utility module} as an object. |
|
* |
|
* @type {Object} |
|
* @see {@link module:browser|browser} |
|
*/ |
|
|
|
videojs$1.browser = browser; |
|
/** |
|
* Use {@link module:browser.TOUCH_ENABLED|browser.TOUCH_ENABLED} instead; only |
|
* included for backward-compatibility with 4.x. |
|
* |
|
* @deprecated Since version 5.0, use {@link module:browser.TOUCH_ENABLED|browser.TOUCH_ENABLED instead. |
|
* @type {boolean} |
|
*/ |
|
|
|
videojs$1.TOUCH_ENABLED = TOUCH_ENABLED; |
|
videojs$1.extend = extend$1; |
|
videojs$1.mergeOptions = mergeOptions; |
|
videojs$1.bind = bind; |
|
videojs$1.registerPlugin = Plugin.registerPlugin; |
|
videojs$1.deregisterPlugin = Plugin.deregisterPlugin; |
|
/** |
|
* Deprecated method to register a plugin with Video.js |
|
* |
|
* @deprecated videojs.plugin() is deprecated; use videojs.registerPlugin() instead |
|
* |
|
* @param {string} name |
|
* The plugin name |
|
* |
|
* @param {Plugin|Function} plugin |
|
* The plugin sub-class or function |
|
*/ |
|
|
|
videojs$1.plugin = function (name$$1, plugin) { |
|
log.warn('videojs.plugin() is deprecated; use videojs.registerPlugin() instead'); |
|
return Plugin.registerPlugin(name$$1, plugin); |
|
}; |
|
|
|
videojs$1.getPlugins = Plugin.getPlugins; |
|
videojs$1.getPlugin = Plugin.getPlugin; |
|
videojs$1.getPluginVersion = Plugin.getPluginVersion; |
|
/** |
|
* Adding languages so that they're available to all players. |
|
* Example: `videojs.addLanguage('es', { 'Hello': 'Hola' });` |
|
* |
|
* @param {string} code |
|
* The language code or dictionary property |
|
* |
|
* @param {Object} data |
|
* The data values to be translated |
|
* |
|
* @return {Object} |
|
* The resulting language dictionary object |
|
*/ |
|
|
|
videojs$1.addLanguage = function (code, data) { |
|
var _mergeOptions; |
|
|
|
code = ('' + code).toLowerCase(); |
|
videojs$1.options.languages = mergeOptions(videojs$1.options.languages, (_mergeOptions = {}, _mergeOptions[code] = data, _mergeOptions)); |
|
return videojs$1.options.languages[code]; |
|
}; |
|
/** |
|
* A reference to the {@link module:log|log utility module} as an object. |
|
* |
|
* @type {Function} |
|
* @see {@link module:log|log} |
|
*/ |
|
|
|
|
|
videojs$1.log = log; |
|
videojs$1.createLogger = createLogger$1; |
|
videojs$1.createTimeRange = videojs$1.createTimeRanges = createTimeRanges; |
|
videojs$1.formatTime = formatTime; |
|
videojs$1.setFormatTime = setFormatTime; |
|
videojs$1.resetFormatTime = resetFormatTime; |
|
videojs$1.parseUrl = parseUrl; |
|
videojs$1.isCrossOrigin = isCrossOrigin; |
|
videojs$1.EventTarget = EventTarget; |
|
videojs$1.on = on; |
|
videojs$1.one = one; |
|
videojs$1.off = off; |
|
videojs$1.trigger = trigger; |
|
/** |
|
* A cross-browser XMLHttpRequest wrapper. |
|
* |
|
* @function |
|
* @param {Object} options |
|
* Settings for the request. |
|
* |
|
* @return {XMLHttpRequest|XDomainRequest} |
|
* The request object. |
|
* |
|
* @see https://github.com/Raynos/xhr |
|
*/ |
|
|
|
videojs$1.xhr = xhr; |
|
videojs$1.TextTrack = TextTrack; |
|
videojs$1.AudioTrack = AudioTrack; |
|
videojs$1.VideoTrack = VideoTrack; |
|
['isEl', 'isTextNode', 'createEl', 'hasClass', 'addClass', 'removeClass', 'toggleClass', 'setAttributes', 'getAttributes', 'emptyEl', 'appendContent', 'insertContent'].forEach(function (k) { |
|
videojs$1[k] = function () { |
|
log.warn("videojs." + k + "() is deprecated; use videojs.dom." + k + "() instead"); |
|
return Dom[k].apply(null, arguments); |
|
}; |
|
}); |
|
videojs$1.computedStyle = computedStyle; |
|
/** |
|
* A reference to the {@link module:dom|DOM utility module} as an object. |
|
* |
|
* @type {Object} |
|
* @see {@link module:dom|dom} |
|
*/ |
|
|
|
videojs$1.dom = Dom; |
|
/** |
|
* A reference to the {@link module:url|URL utility module} as an object. |
|
* |
|
* @type {Object} |
|
* @see {@link module:url|url} |
|
*/ |
|
|
|
videojs$1.url = Url; |
|
|
|
var urlToolkit = createCommonjsModule(function (module, exports) { |
|
// see https://tools.ietf.org/html/rfc1808 |
|
|
|
/* jshint ignore:start */ |
|
(function (root) { |
|
/* jshint ignore:end */ |
|
var URL_REGEX = /^((?:[a-zA-Z0-9+\-.]+:)?)(\/\/[^\/?#]*)?((?:[^\/\?#]*\/)*.*?)??(;.*?)?(\?.*?)?(#.*?)?$/; |
|
var FIRST_SEGMENT_REGEX = /^([^\/?#]*)(.*)$/; |
|
var SLASH_DOT_REGEX = /(?:\/|^)\.(?=\/)/g; |
|
var SLASH_DOT_DOT_REGEX = /(?:\/|^)\.\.\/(?!\.\.\/).*?(?=\/)/g; |
|
var URLToolkit = { |
|
// jshint ignore:line |
|
// If opts.alwaysNormalize is true then the path will always be normalized even when it starts with / or // |
|
// E.g |
|
// With opts.alwaysNormalize = false (default, spec compliant) |
|
// http://a.com/b/cd + /e/f/../g => http://a.com/e/f/../g |
|
// With opts.alwaysNormalize = true (not spec compliant) |
|
// http://a.com/b/cd + /e/f/../g => http://a.com/e/g |
|
buildAbsoluteURL: function buildAbsoluteURL(baseURL, relativeURL, opts) { |
|
opts = opts || {}; // remove any remaining space and CRLF |
|
|
|
baseURL = baseURL.trim(); |
|
relativeURL = relativeURL.trim(); |
|
|
|
if (!relativeURL) { |
|
// 2a) If the embedded URL is entirely empty, it inherits the |
|
// entire base URL (i.e., is set equal to the base URL) |
|
// and we are done. |
|
if (!opts.alwaysNormalize) { |
|
return baseURL; |
|
} |
|
|
|
var basePartsForNormalise = URLToolkit.parseURL(baseURL); |
|
|
|
if (!basePartsForNormalise) { |
|
throw new Error('Error trying to parse base URL.'); |
|
} |
|
|
|
basePartsForNormalise.path = URLToolkit.normalizePath(basePartsForNormalise.path); |
|
return URLToolkit.buildURLFromParts(basePartsForNormalise); |
|
} |
|
|
|
var relativeParts = URLToolkit.parseURL(relativeURL); |
|
|
|
if (!relativeParts) { |
|
throw new Error('Error trying to parse relative URL.'); |
|
} |
|
|
|
if (relativeParts.scheme) { |
|
// 2b) If the embedded URL starts with a scheme name, it is |
|
// interpreted as an absolute URL and we are done. |
|
if (!opts.alwaysNormalize) { |
|
return relativeURL; |
|
} |
|
|
|
relativeParts.path = URLToolkit.normalizePath(relativeParts.path); |
|
return URLToolkit.buildURLFromParts(relativeParts); |
|
} |
|
|
|
var baseParts = URLToolkit.parseURL(baseURL); |
|
|
|
if (!baseParts) { |
|
throw new Error('Error trying to parse base URL.'); |
|
} |
|
|
|
if (!baseParts.netLoc && baseParts.path && baseParts.path[0] !== '/') { |
|
// If netLoc missing and path doesn't start with '/', assume everthing before the first '/' is the netLoc |
|
// This causes 'example.com/a' to be handled as '//example.com/a' instead of '/example.com/a' |
|
var pathParts = FIRST_SEGMENT_REGEX.exec(baseParts.path); |
|
baseParts.netLoc = pathParts[1]; |
|
baseParts.path = pathParts[2]; |
|
} |
|
|
|
if (baseParts.netLoc && !baseParts.path) { |
|
baseParts.path = '/'; |
|
} |
|
|
|
var builtParts = { |
|
// 2c) Otherwise, the embedded URL inherits the scheme of |
|
// the base URL. |
|
scheme: baseParts.scheme, |
|
netLoc: relativeParts.netLoc, |
|
path: null, |
|
params: relativeParts.params, |
|
query: relativeParts.query, |
|
fragment: relativeParts.fragment |
|
}; |
|
|
|
if (!relativeParts.netLoc) { |
|
// 3) If the embedded URL's <net_loc> is non-empty, we skip to |
|
// Step 7. Otherwise, the embedded URL inherits the <net_loc> |
|
// (if any) of the base URL. |
|
builtParts.netLoc = baseParts.netLoc; // 4) If the embedded URL path is preceded by a slash "/", the |
|
// path is not relative and we skip to Step 7. |
|
|
|
if (relativeParts.path[0] !== '/') { |
|
if (!relativeParts.path) { |
|
// 5) If the embedded URL path is empty (and not preceded by a |
|
// slash), then the embedded URL inherits the base URL path |
|
builtParts.path = baseParts.path; // 5a) if the embedded URL's <params> is non-empty, we skip to |
|
// step 7; otherwise, it inherits the <params> of the base |
|
// URL (if any) and |
|
|
|
if (!relativeParts.params) { |
|
builtParts.params = baseParts.params; // 5b) if the embedded URL's <query> is non-empty, we skip to |
|
// step 7; otherwise, it inherits the <query> of the base |
|
// URL (if any) and we skip to step 7. |
|
|
|
if (!relativeParts.query) { |
|
builtParts.query = baseParts.query; |
|
} |
|
} |
|
} else { |
|
// 6) The last segment of the base URL's path (anything |
|
// following the rightmost slash "/", or the entire path if no |
|
// slash is present) is removed and the embedded URL's path is |
|
// appended in its place. |
|
var baseURLPath = baseParts.path; |
|
var newPath = baseURLPath.substring(0, baseURLPath.lastIndexOf('/') + 1) + relativeParts.path; |
|
builtParts.path = URLToolkit.normalizePath(newPath); |
|
} |
|
} |
|
} |
|
|
|
if (builtParts.path === null) { |
|
builtParts.path = opts.alwaysNormalize ? URLToolkit.normalizePath(relativeParts.path) : relativeParts.path; |
|
} |
|
|
|
return URLToolkit.buildURLFromParts(builtParts); |
|
}, |
|
parseURL: function parseURL(url) { |
|
var parts = URL_REGEX.exec(url); |
|
|
|
if (!parts) { |
|
return null; |
|
} |
|
|
|
return { |
|
scheme: parts[1] || '', |
|
netLoc: parts[2] || '', |
|
path: parts[3] || '', |
|
params: parts[4] || '', |
|
query: parts[5] || '', |
|
fragment: parts[6] || '' |
|
}; |
|
}, |
|
normalizePath: function normalizePath(path) { |
|
// The following operations are |
|
// then applied, in order, to the new path: |
|
// 6a) All occurrences of "./", where "." is a complete path |
|
// segment, are removed. |
|
// 6b) If the path ends with "." as a complete path segment, |
|
// that "." is removed. |
|
path = path.split('').reverse().join('').replace(SLASH_DOT_REGEX, ''); // 6c) All occurrences of "<segment>/../", where <segment> is a |
|
// complete path segment not equal to "..", are removed. |
|
// Removal of these path segments is performed iteratively, |
|
// removing the leftmost matching pattern on each iteration, |
|
// until no matching pattern remains. |
|
// 6d) If the path ends with "<segment>/..", where <segment> is a |
|
// complete path segment not equal to "..", that |
|
// "<segment>/.." is removed. |
|
|
|
while (path.length !== (path = path.replace(SLASH_DOT_DOT_REGEX, '')).length) {} // jshint ignore:line |
|
|
|
|
|
return path.split('').reverse().join(''); |
|
}, |
|
buildURLFromParts: function buildURLFromParts(parts) { |
|
return parts.scheme + parts.netLoc + parts.path + parts.params + parts.query + parts.fragment; |
|
} |
|
}; |
|
/* jshint ignore:start */ |
|
|
|
module.exports = URLToolkit; |
|
})(commonjsGlobal); |
|
/* jshint ignore:end */ |
|
|
|
}); |
|
|
|
/*! @name m3u8-parser @version 4.3.0 @license Apache-2.0 */ |
|
function _extends$1() { |
|
_extends$1 = Object.assign || function (target) { |
|
for (var i = 1; i < arguments.length; i++) { |
|
var source = arguments[i]; |
|
|
|
for (var key in source) { |
|
if (Object.prototype.hasOwnProperty.call(source, key)) { |
|
target[key] = source[key]; |
|
} |
|
} |
|
} |
|
|
|
return target; |
|
}; |
|
|
|
return _extends$1.apply(this, arguments); |
|
} |
|
|
|
function _inheritsLoose$1(subClass, superClass) { |
|
subClass.prototype = Object.create(superClass.prototype); |
|
subClass.prototype.constructor = subClass; |
|
subClass.__proto__ = superClass; |
|
} |
|
|
|
function _assertThisInitialized$1(self) { |
|
if (self === void 0) { |
|
throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); |
|
} |
|
|
|
return self; |
|
} |
|
/** |
|
* @file stream.js |
|
*/ |
|
|
|
/** |
|
* A lightweight readable stream implementation that handles event dispatching. |
|
* |
|
* @class Stream |
|
*/ |
|
|
|
|
|
var Stream = |
|
/*#__PURE__*/ |
|
function () { |
|
function Stream() { |
|
this.listeners = {}; |
|
} |
|
/** |
|
* Add a listener for a specified event type. |
|
* |
|
* @param {string} type the event name |
|
* @param {Function} listener the callback to be invoked when an event of |
|
* the specified type occurs |
|
*/ |
|
|
|
|
|
var _proto = Stream.prototype; |
|
|
|
_proto.on = function on(type, listener) { |
|
if (!this.listeners[type]) { |
|
this.listeners[type] = []; |
|
} |
|
|
|
this.listeners[type].push(listener); |
|
}; |
|
/** |
|
* Remove a listener for a specified event type. |
|
* |
|
* @param {string} type the event name |
|
* @param {Function} listener a function previously registered for this |
|
* type of event through `on` |
|
* @return {boolean} if we could turn it off or not |
|
*/ |
|
|
|
|
|
_proto.off = function off(type, listener) { |
|
if (!this.listeners[type]) { |
|
return false; |
|
} |
|
|
|
var index = this.listeners[type].indexOf(listener); |
|
this.listeners[type].splice(index, 1); |
|
return index > -1; |
|
}; |
|
/** |
|
* Trigger an event of the specified type on this stream. Any additional |
|
* arguments to this function are passed as parameters to event listeners. |
|
* |
|
* @param {string} type the event name |
|
*/ |
|
|
|
|
|
_proto.trigger = function trigger(type) { |
|
var callbacks = this.listeners[type]; |
|
var i; |
|
var length; |
|
var args; |
|
|
|
if (!callbacks) { |
|
return; |
|
} // Slicing the arguments on every invocation of this method |
|
// can add a significant amount of overhead. Avoid the |
|
// intermediate object creation for the common case of a |
|
// single callback argument |
|
|
|
|
|
if (arguments.length === 2) { |
|
length = callbacks.length; |
|
|
|
for (i = 0; i < length; ++i) { |
|
callbacks[i].call(this, arguments[1]); |
|
} |
|
} else { |
|
args = Array.prototype.slice.call(arguments, 1); |
|
length = callbacks.length; |
|
|
|
for (i = 0; i < length; ++i) { |
|
callbacks[i].apply(this, args); |
|
} |
|
} |
|
}; |
|
/** |
|
* Destroys the stream and cleans up. |
|
*/ |
|
|
|
|
|
_proto.dispose = function dispose() { |
|
this.listeners = {}; |
|
}; |
|
/** |
|
* Forwards all `data` events on this stream to the destination stream. The |
|
* destination stream should provide a method `push` to receive the data |
|
* events as they arrive. |
|
* |
|
* @param {Stream} destination the stream that will receive all `data` events |
|
* @see http://nodejs.org/api/stream.html#stream_readable_pipe_destination_options |
|
*/ |
|
|
|
|
|
_proto.pipe = function pipe(destination) { |
|
this.on('data', function (data) { |
|
destination.push(data); |
|
}); |
|
}; |
|
|
|
return Stream; |
|
}(); |
|
/** |
|
* A stream that buffers string input and generates a `data` event for each |
|
* line. |
|
* |
|
* @class LineStream |
|
* @extends Stream |
|
*/ |
|
|
|
|
|
var LineStream = |
|
/*#__PURE__*/ |
|
function (_Stream) { |
|
_inheritsLoose$1(LineStream, _Stream); |
|
|
|
function LineStream() { |
|
var _this; |
|
|
|
_this = _Stream.call(this) || this; |
|
_this.buffer = ''; |
|
return _this; |
|
} |
|
/** |
|
* Add new data to be parsed. |
|
* |
|
* @param {string} data the text to process |
|
*/ |
|
|
|
|
|
var _proto = LineStream.prototype; |
|
|
|
_proto.push = function push(data) { |
|
var nextNewline; |
|
this.buffer += data; |
|
nextNewline = this.buffer.indexOf('\n'); |
|
|
|
for (; nextNewline > -1; nextNewline = this.buffer.indexOf('\n')) { |
|
this.trigger('data', this.buffer.substring(0, nextNewline)); |
|
this.buffer = this.buffer.substring(nextNewline + 1); |
|
} |
|
}; |
|
|
|
return LineStream; |
|
}(Stream); |
|
/** |
|
* "forgiving" attribute list psuedo-grammar: |
|
* attributes -> keyvalue (',' keyvalue)* |
|
* keyvalue -> key '=' value |
|
* key -> [^=]* |
|
* value -> '"' [^"]* '"' | [^,]* |
|
*/ |
|
|
|
|
|
var attributeSeparator = function attributeSeparator() { |
|
var key = '[^=]*'; |
|
var value = '"[^"]*"|[^,]*'; |
|
var keyvalue = '(?:' + key + ')=(?:' + value + ')'; |
|
return new RegExp('(?:^|,)(' + keyvalue + ')'); |
|
}; |
|
/** |
|
* Parse attributes from a line given the separator |
|
* |
|
* @param {string} attributes the attribute line to parse |
|
*/ |
|
|
|
|
|
var parseAttributes = function parseAttributes(attributes) { |
|
// split the string using attributes as the separator |
|
var attrs = attributes.split(attributeSeparator()); |
|
var result = {}; |
|
var i = attrs.length; |
|
var attr; |
|
|
|
while (i--) { |
|
// filter out unmatched portions of the string |
|
if (attrs[i] === '') { |
|
continue; |
|
} // split the key and value |
|
|
|
|
|
attr = /([^=]*)=(.*)/.exec(attrs[i]).slice(1); // trim whitespace and remove optional quotes around the value |
|
|
|
attr[0] = attr[0].replace(/^\s+|\s+$/g, ''); |
|
attr[1] = attr[1].replace(/^\s+|\s+$/g, ''); |
|
attr[1] = attr[1].replace(/^['"](.*)['"]$/g, '$1'); |
|
result[attr[0]] = attr[1]; |
|
} |
|
|
|
return result; |
|
}; |
|
/** |
|
* A line-level M3U8 parser event stream. It expects to receive input one |
|
* line at a time and performs a context-free parse of its contents. A stream |
|
* interpretation of a manifest can be useful if the manifest is expected to |
|
* be too large to fit comfortably into memory or the entirety of the input |
|
* is not immediately available. Otherwise, it's probably much easier to work |
|
* with a regular `Parser` object. |
|
* |
|
* Produces `data` events with an object that captures the parser's |
|
* interpretation of the input. That object has a property `tag` that is one |
|
* of `uri`, `comment`, or `tag`. URIs only have a single additional |
|
* property, `line`, which captures the entirety of the input without |
|
* interpretation. Comments similarly have a single additional property |
|
* `text` which is the input without the leading `#`. |
|
* |
|
* Tags always have a property `tagType` which is the lower-cased version of |
|
* the M3U8 directive without the `#EXT` or `#EXT-X-` prefix. For instance, |
|
* `#EXT-X-MEDIA-SEQUENCE` becomes `media-sequence` when parsed. Unrecognized |
|
* tags are given the tag type `unknown` and a single additional property |
|
* `data` with the remainder of the input. |
|
* |
|
* @class ParseStream |
|
* @extends Stream |
|
*/ |
|
|
|
|
|
var ParseStream = |
|
/*#__PURE__*/ |
|
function (_Stream) { |
|
_inheritsLoose$1(ParseStream, _Stream); |
|
|
|
function ParseStream() { |
|
var _this; |
|
|
|
_this = _Stream.call(this) || this; |
|
_this.customParsers = []; |
|
_this.tagMappers = []; |
|
return _this; |
|
} |
|
/** |
|
* Parses an additional line of input. |
|
* |
|
* @param {string} line a single line of an M3U8 file to parse |
|
*/ |
|
|
|
|
|
var _proto = ParseStream.prototype; |
|
|
|
_proto.push = function push(line) { |
|
var _this2 = this; |
|
|
|
var match; |
|
var event; // strip whitespace |
|
|
|
line = line.trim(); |
|
|
|
if (line.length === 0) { |
|
// ignore empty lines |
|
return; |
|
} // URIs |
|
|
|
|
|
if (line[0] !== '#') { |
|
this.trigger('data', { |
|
type: 'uri', |
|
uri: line |
|
}); |
|
return; |
|
} // map tags |
|
|
|
|
|
var newLines = this.tagMappers.reduce(function (acc, mapper) { |
|
var mappedLine = mapper(line); // skip if unchanged |
|
|
|
if (mappedLine === line) { |
|
return acc; |
|
} |
|
|
|
return acc.concat([mappedLine]); |
|
}, [line]); |
|
newLines.forEach(function (newLine) { |
|
for (var i = 0; i < _this2.customParsers.length; i++) { |
|
if (_this2.customParsers[i].call(_this2, newLine)) { |
|
return; |
|
} |
|
} // Comments |
|
|
|
|
|
if (newLine.indexOf('#EXT') !== 0) { |
|
_this2.trigger('data', { |
|
type: 'comment', |
|
text: newLine.slice(1) |
|
}); |
|
|
|
return; |
|
} // strip off any carriage returns here so the regex matching |
|
// doesn't have to account for them. |
|
|
|
|
|
newLine = newLine.replace('\r', ''); // Tags |
|
|
|
match = /^#EXTM3U/.exec(newLine); |
|
|
|
if (match) { |
|
_this2.trigger('data', { |
|
type: 'tag', |
|
tagType: 'm3u' |
|
}); |
|
|
|
return; |
|
} |
|
|
|
match = /^#EXTINF:?([0-9\.]*)?,?(.*)?$/.exec(newLine); |
|
|
|
if (match) { |
|
event = { |
|
type: 'tag', |
|
tagType: 'inf' |
|
}; |
|
|
|
if (match[1]) { |
|
event.duration = parseFloat(match[1]); |
|
} |
|
|
|
if (match[2]) { |
|
event.title = match[2]; |
|
} |
|
|
|
_this2.trigger('data', event); |
|
|
|
return; |
|
} |
|
|
|
match = /^#EXT-X-TARGETDURATION:?([0-9.]*)?/.exec(newLine); |
|
|
|
if (match) { |
|
event = { |
|
type: 'tag', |
|
tagType: 'targetduration' |
|
}; |
|
|
|
if (match[1]) { |
|
event.duration = parseInt(match[1], 10); |
|
} |
|
|
|
_this2.trigger('data', event); |
|
|
|
return; |
|
} |
|
|
|
match = /^#ZEN-TOTAL-DURATION:?([0-9.]*)?/.exec(newLine); |
|
|
|
if (match) { |
|
event = { |
|
type: 'tag', |
|
tagType: 'totalduration' |
|
}; |
|
|
|
if (match[1]) { |
|
event.duration = parseInt(match[1], 10); |
|
} |
|
|
|
_this2.trigger('data', event); |
|
|
|
return; |
|
} |
|
|
|
match = /^#EXT-X-VERSION:?([0-9.]*)?/.exec(newLine); |
|
|
|
if (match) { |
|
event = { |
|
type: 'tag', |
|
tagType: 'version' |
|
}; |
|
|
|
if (match[1]) { |
|
event.version = parseInt(match[1], 10); |
|
} |
|
|
|
_this2.trigger('data', event); |
|
|
|
return; |
|
} |
|
|
|
match = /^#EXT-X-MEDIA-SEQUENCE:?(\-?[0-9.]*)?/.exec(newLine); |
|
|
|
if (match) { |
|
event = { |
|
type: 'tag', |
|
tagType: 'media-sequence' |
|
}; |
|
|
|
if (match[1]) { |
|
event.number = parseInt(match[1], 10); |
|
} |
|
|
|
_this2.trigger('data', event); |
|
|
|
return; |
|
} |
|
|
|
match = /^#EXT-X-DISCONTINUITY-SEQUENCE:?(\-?[0-9.]*)?/.exec(newLine); |
|
|
|
if (match) { |
|
event = { |
|
type: 'tag', |
|
tagType: 'discontinuity-sequence' |
|
}; |
|
|
|
if (match[1]) { |
|
event.number = parseInt(match[1], 10); |
|
} |
|
|
|
_this2.trigger('data', event); |
|
|
|
return; |
|
} |
|
|
|
match = /^#EXT-X-PLAYLIST-TYPE:?(.*)?$/.exec(newLine); |
|
|
|
if (match) { |
|
event = { |
|
type: 'tag', |
|
tagType: 'playlist-type' |
|
}; |
|
|
|
if (match[1]) { |
|
event.playlistType = match[1]; |
|
} |
|
|
|
_this2.trigger('data', event); |
|
|
|
return; |
|
} |
|
|
|
match = /^#EXT-X-BYTERANGE:?([0-9.]*)?@?([0-9.]*)?/.exec(newLine); |
|
|
|
if (match) { |
|
event = { |
|
type: 'tag', |
|
tagType: 'byterange' |
|
}; |
|
|
|
if (match[1]) { |
|
event.length = parseInt(match[1], 10); |
|
} |
|
|
|
if (match[2]) { |
|
event.offset = parseInt(match[2], 10); |
|
} |
|
|
|
_this2.trigger('data', event); |
|
|
|
return; |
|
} |
|
|
|
match = /^#EXT-X-ALLOW-CACHE:?(YES|NO)?/.exec(newLine); |
|
|
|
if (match) { |
|
event = { |
|
type: 'tag', |
|
tagType: 'allow-cache' |
|
}; |
|
|
|
if (match[1]) { |
|
event.allowed = !/NO/.test(match[1]); |
|
} |
|
|
|
_this2.trigger('data', event); |
|
|
|
return; |
|
} |
|
|
|
match = /^#EXT-X-MAP:?(.*)$/.exec(newLine); |
|
|
|
if (match) { |
|
event = { |
|
type: 'tag', |
|
tagType: 'map' |
|
}; |
|
|
|
if (match[1]) { |
|
var attributes = parseAttributes(match[1]); |
|
|
|
if (attributes.URI) { |
|
event.uri = attributes.URI; |
|
} |
|
|
|
if (attributes.BYTERANGE) { |
|
var _attributes$BYTERANGE = attributes.BYTERANGE.split('@'), |
|
length = _attributes$BYTERANGE[0], |
|
offset = _attributes$BYTERANGE[1]; |
|
|
|
event.byterange = {}; |
|
|
|
if (length) { |
|
event.byterange.length = parseInt(length, 10); |
|
} |
|
|
|
if (offset) { |
|
event.byterange.offset = parseInt(offset, 10); |
|
} |
|
} |
|
} |
|
|
|
_this2.trigger('data', event); |
|
|
|
return; |
|
} |
|
|
|
match = /^#EXT-X-STREAM-INF:?(.*)$/.exec(newLine); |
|
|
|
if (match) { |
|
event = { |
|
type: 'tag', |
|
tagType: 'stream-inf' |
|
}; |
|
|
|
if (match[1]) { |
|
event.attributes = parseAttributes(match[1]); |
|
|
|
if (event.attributes.RESOLUTION) { |
|
var split = event.attributes.RESOLUTION.split('x'); |
|
var resolution = {}; |
|
|
|
if (split[0]) { |
|
resolution.width = parseInt(split[0], 10); |
|
} |
|
|
|
if (split[1]) { |
|
resolution.height = parseInt(split[1], 10); |
|
} |
|
|
|
event.attributes.RESOLUTION = resolution; |
|
} |
|
|
|
if (event.attributes.BANDWIDTH) { |
|
event.attributes.BANDWIDTH = parseInt(event.attributes.BANDWIDTH, 10); |
|
} |
|
|
|
if (event.attributes['PROGRAM-ID']) { |
|
event.attributes['PROGRAM-ID'] = parseInt(event.attributes['PROGRAM-ID'], 10); |
|
} |
|
} |
|
|
|
_this2.trigger('data', event); |
|
|
|
return; |
|
} |
|
|
|
match = /^#EXT-X-MEDIA:?(.*)$/.exec(newLine); |
|
|
|
if (match) { |
|
event = { |
|
type: 'tag', |
|
tagType: 'media' |
|
}; |
|
|
|
if (match[1]) { |
|
event.attributes = parseAttributes(match[1]); |
|
} |
|
|
|
_this2.trigger('data', event); |
|
|
|
return; |
|
} |
|
|
|
match = /^#EXT-X-ENDLIST/.exec(newLine); |
|
|
|
if (match) { |
|
_this2.trigger('data', { |
|
type: 'tag', |
|
tagType: 'endlist' |
|
}); |
|
|
|
return; |
|
} |
|
|
|
match = /^#EXT-X-DISCONTINUITY/.exec(newLine); |
|
|
|
if (match) { |
|
_this2.trigger('data', { |
|
type: 'tag', |
|
tagType: 'discontinuity' |
|
}); |
|
|
|
return; |
|
} |
|
|
|
match = /^#EXT-X-PROGRAM-DATE-TIME:?(.*)$/.exec(newLine); |
|
|
|
if (match) { |
|
event = { |
|
type: 'tag', |
|
tagType: 'program-date-time' |
|
}; |
|
|
|
if (match[1]) { |
|
event.dateTimeString = match[1]; |
|
event.dateTimeObject = new Date(match[1]); |
|
} |
|
|
|
_this2.trigger('data', event); |
|
|
|
return; |
|
} |
|
|
|
match = /^#EXT-X-KEY:?(.*)$/.exec(newLine); |
|
|
|
if (match) { |
|
event = { |
|
type: 'tag', |
|
tagType: 'key' |
|
}; |
|
|
|
if (match[1]) { |
|
event.attributes = parseAttributes(match[1]); // parse the IV string into a Uint32Array |
|
|
|
if (event.attributes.IV) { |
|
if (event.attributes.IV.substring(0, 2).toLowerCase() === '0x') { |
|
event.attributes.IV = event.attributes.IV.substring(2); |
|
} |
|
|
|
event.attributes.IV = event.attributes.IV.match(/.{8}/g); |
|
event.attributes.IV[0] = parseInt(event.attributes.IV[0], 16); |
|
event.attributes.IV[1] = parseInt(event.attributes.IV[1], 16); |
|
event.attributes.IV[2] = parseInt(event.attributes.IV[2], 16); |
|
event.attributes.IV[3] = parseInt(event.attributes.IV[3], 16); |
|
event.attributes.IV = new Uint32Array(event.attributes.IV); |
|
} |
|
} |
|
|
|
_this2.trigger('data', event); |
|
|
|
return; |
|
} |
|
|
|
match = /^#EXT-X-START:?(.*)$/.exec(newLine); |
|
|
|
if (match) { |
|
event = { |
|
type: 'tag', |
|
tagType: 'start' |
|
}; |
|
|
|
if (match[1]) { |
|
event.attributes = parseAttributes(match[1]); |
|
event.attributes['TIME-OFFSET'] = parseFloat(event.attributes['TIME-OFFSET']); |
|
event.attributes.PRECISE = /YES/.test(event.attributes.PRECISE); |
|
} |
|
|
|
_this2.trigger('data', event); |
|
|
|
return; |
|
} |
|
|
|
match = /^#EXT-X-CUE-OUT-CONT:?(.*)?$/.exec(newLine); |
|
|
|
if (match) { |
|
event = { |
|
type: 'tag', |
|
tagType: 'cue-out-cont' |
|
}; |
|
|
|
if (match[1]) { |
|
event.data = match[1]; |
|
} else { |
|
event.data = ''; |
|
} |
|
|
|
_this2.trigger('data', event); |
|
|
|
return; |
|
} |
|
|
|
match = /^#EXT-X-CUE-OUT:?(.*)?$/.exec(newLine); |
|
|
|
if (match) { |
|
event = { |
|
type: 'tag', |
|
tagType: 'cue-out' |
|
}; |
|
|
|
if (match[1]) { |
|
event.data = match[1]; |
|
} else { |
|
event.data = ''; |
|
} |
|
|
|
_this2.trigger('data', event); |
|
|
|
return; |
|
} |
|
|
|
match = /^#EXT-X-CUE-IN:?(.*)?$/.exec(newLine); |
|
|
|
if (match) { |
|
event = { |
|
type: 'tag', |
|
tagType: 'cue-in' |
|
}; |
|
|
|
if (match[1]) { |
|
event.data = match[1]; |
|
} else { |
|
event.data = ''; |
|
} |
|
|
|
_this2.trigger('data', event); |
|
|
|
return; |
|
} // unknown tag type |
|
|
|
|
|
_this2.trigger('data', { |
|
type: 'tag', |
|
data: newLine.slice(4) |
|
}); |
|
}); |
|
}; |
|
/** |
|
* Add a parser for custom headers |
|
* |
|
* @param {Object} options a map of options for the added parser |
|
* @param {RegExp} options.expression a regular expression to match the custom header |
|
* @param {string} options.customType the custom type to register to the output |
|
* @param {Function} [options.dataParser] function to parse the line into an object |
|
* @param {boolean} [options.segment] should tag data be attached to the segment object |
|
*/ |
|
|
|
|
|
_proto.addParser = function addParser(_ref) { |
|
var _this3 = this; |
|
|
|
var expression = _ref.expression, |
|
customType = _ref.customType, |
|
dataParser = _ref.dataParser, |
|
segment = _ref.segment; |
|
|
|
if (typeof dataParser !== 'function') { |
|
dataParser = function dataParser(line) { |
|
return line; |
|
}; |
|
} |
|
|
|
this.customParsers.push(function (line) { |
|
var match = expression.exec(line); |
|
|
|
if (match) { |
|
_this3.trigger('data', { |
|
type: 'custom', |
|
data: dataParser(line), |
|
customType: customType, |
|
segment: segment |
|
}); |
|
|
|
return true; |
|
} |
|
}); |
|
}; |
|
/** |
|
* Add a custom header mapper |
|
* |
|
* @param {Object} options |
|
* @param {RegExp} options.expression a regular expression to match the custom header |
|
* @param {Function} options.map function to translate tag into a different tag |
|
*/ |
|
|
|
|
|
_proto.addTagMapper = function addTagMapper(_ref2) { |
|
var expression = _ref2.expression, |
|
map = _ref2.map; |
|
|
|
var mapFn = function mapFn(line) { |
|
if (expression.test(line)) { |
|
return map(line); |
|
} |
|
|
|
return line; |
|
}; |
|
|
|
this.tagMappers.push(mapFn); |
|
}; |
|
|
|
return ParseStream; |
|
}(Stream); |
|
/** |
|
* A parser for M3U8 files. The current interpretation of the input is |
|
* exposed as a property `manifest` on parser objects. It's just two lines to |
|
* create and parse a manifest once you have the contents available as a string: |
|
* |
|
* ```js |
|
* var parser = new m3u8.Parser(); |
|
* parser.push(xhr.responseText); |
|
* ``` |
|
* |
|
* New input can later be applied to update the manifest object by calling |
|
* `push` again. |
|
* |
|
* The parser attempts to create a usable manifest object even if the |
|
* underlying input is somewhat nonsensical. It emits `info` and `warning` |
|
* events during the parse if it encounters input that seems invalid or |
|
* requires some property of the manifest object to be defaulted. |
|
* |
|
* @class Parser |
|
* @extends Stream |
|
*/ |
|
|
|
|
|
var Parser = |
|
/*#__PURE__*/ |
|
function (_Stream) { |
|
_inheritsLoose$1(Parser, _Stream); |
|
|
|
function Parser() { |
|
var _this; |
|
|
|
_this = _Stream.call(this) || this; |
|
_this.lineStream = new LineStream(); |
|
_this.parseStream = new ParseStream(); |
|
|
|
_this.lineStream.pipe(_this.parseStream); |
|
/* eslint-disable consistent-this */ |
|
|
|
|
|
var self = _assertThisInitialized$1(_assertThisInitialized$1(_this)); |
|
/* eslint-enable consistent-this */ |
|
|
|
|
|
var uris = []; |
|
var currentUri = {}; // if specified, the active EXT-X-MAP definition |
|
|
|
var currentMap; // if specified, the active decryption key |
|
|
|
var _key; |
|
|
|
var noop = function noop() {}; |
|
|
|
var defaultMediaGroups = { |
|
'AUDIO': {}, |
|
'VIDEO': {}, |
|
'CLOSED-CAPTIONS': {}, |
|
'SUBTITLES': {} |
|
}; // group segments into numbered timelines delineated by discontinuities |
|
|
|
var currentTimeline = 0; // the manifest is empty until the parse stream begins delivering data |
|
|
|
_this.manifest = { |
|
allowCache: true, |
|
discontinuityStarts: [], |
|
segments: [] |
|
}; // update the manifest with the m3u8 entry from the parse stream |
|
|
|
_this.parseStream.on('data', function (entry) { |
|
var mediaGroup; |
|
var rendition; |
|
({ |
|
tag: function tag() { |
|
// switch based on the tag type |
|
(({ |
|
'allow-cache': function allowCache() { |
|
this.manifest.allowCache = entry.allowed; |
|
|
|
if (!('allowed' in entry)) { |
|
this.trigger('info', { |
|
message: 'defaulting allowCache to YES' |
|
}); |
|
this.manifest.allowCache = true; |
|
} |
|
}, |
|
byterange: function byterange() { |
|
var byterange = {}; |
|
|
|
if ('length' in entry) { |
|
currentUri.byterange = byterange; |
|
byterange.length = entry.length; |
|
|
|
if (!('offset' in entry)) { |
|
this.trigger('info', { |
|
message: 'defaulting offset to zero' |
|
}); |
|
entry.offset = 0; |
|
} |
|
} |
|
|
|
if ('offset' in entry) { |
|
currentUri.byterange = byterange; |
|
byterange.offset = entry.offset; |
|
} |
|
}, |
|
endlist: function endlist() { |
|
this.manifest.endList = true; |
|
}, |
|
inf: function inf() { |
|
if (!('mediaSequence' in this.manifest)) { |
|
this.manifest.mediaSequence = 0; |
|
this.trigger('info', { |
|
message: 'defaulting media sequence to zero' |
|
}); |
|
} |
|
|
|
if (!('discontinuitySequence' in this.manifest)) { |
|
this.manifest.discontinuitySequence = 0; |
|
this.trigger('info', { |
|
message: 'defaulting discontinuity sequence to zero' |
|
}); |
|
} |
|
|
|
if (entry.duration > 0) { |
|
currentUri.duration = entry.duration; |
|
} |
|
|
|
if (entry.duration === 0) { |
|
currentUri.duration = 0.01; |
|
this.trigger('info', { |
|
message: 'updating zero segment duration to a small value' |
|
}); |
|
} |
|
|
|
this.manifest.segments = uris; |
|
}, |
|
key: function key() { |
|
if (!entry.attributes) { |
|
this.trigger('warn', { |
|
message: 'ignoring key declaration without attribute list' |
|
}); |
|
return; |
|
} // clear the active encryption key |
|
|
|
|
|
if (entry.attributes.METHOD === 'NONE') { |
|
_key = null; |
|
return; |
|
} |
|
|
|
if (!entry.attributes.URI) { |
|
this.trigger('warn', { |
|
message: 'ignoring key declaration without URI' |
|
}); |
|
return; |
|
} |
|
|
|
if (!entry.attributes.METHOD) { |
|
this.trigger('warn', { |
|
message: 'defaulting key method to AES-128' |
|
}); |
|
} // setup an encryption key for upcoming segments |
|
|
|
|
|
_key = { |
|
method: entry.attributes.METHOD || 'AES-128', |
|
uri: entry.attributes.URI |
|
}; |
|
|
|
if (typeof entry.attributes.IV !== 'undefined') { |
|
_key.iv = entry.attributes.IV; |
|
} |
|
}, |
|
'media-sequence': function mediaSequence() { |
|
if (!isFinite(entry.number)) { |
|
this.trigger('warn', { |
|
message: 'ignoring invalid media sequence: ' + entry.number |
|
}); |
|
return; |
|
} |
|
|
|
this.manifest.mediaSequence = entry.number; |
|
}, |
|
'discontinuity-sequence': function discontinuitySequence() { |
|
if (!isFinite(entry.number)) { |
|
this.trigger('warn', { |
|
message: 'ignoring invalid discontinuity sequence: ' + entry.number |
|
}); |
|
return; |
|
} |
|
|
|
this.manifest.discontinuitySequence = entry.number; |
|
currentTimeline = entry.number; |
|
}, |
|
'playlist-type': function playlistType() { |
|
if (!/VOD|EVENT/.test(entry.playlistType)) { |
|
this.trigger('warn', { |
|
message: 'ignoring unknown playlist type: ' + entry.playlist |
|
}); |
|
return; |
|
} |
|
|
|
this.manifest.playlistType = entry.playlistType; |
|
}, |
|
map: function map() { |
|
currentMap = {}; |
|
|
|
if (entry.uri) { |
|
currentMap.uri = entry.uri; |
|
} |
|
|
|
if (entry.byterange) { |
|
currentMap.byterange = entry.byterange; |
|
} |
|
}, |
|
'stream-inf': function streamInf() { |
|
this.manifest.playlists = uris; |
|
this.manifest.mediaGroups = this.manifest.mediaGroups || defaultMediaGroups; |
|
|
|
if (!entry.attributes) { |
|
this.trigger('warn', { |
|
message: 'ignoring empty stream-inf attributes' |
|
}); |
|
return; |
|
} |
|
|
|
if (!currentUri.attributes) { |
|
currentUri.attributes = {}; |
|
} |
|
|
|
_extends$1(currentUri.attributes, entry.attributes); |
|
}, |
|
media: function media() { |
|
this.manifest.mediaGroups = this.manifest.mediaGroups || defaultMediaGroups; |
|
|
|
if (!(entry.attributes && entry.attributes.TYPE && entry.attributes['GROUP-ID'] && entry.attributes.NAME)) { |
|
this.trigger('warn', { |
|
message: 'ignoring incomplete or missing media group' |
|
}); |
|
return; |
|
} // find the media group, creating defaults as necessary |
|
|
|
|
|
var mediaGroupType = this.manifest.mediaGroups[entry.attributes.TYPE]; |
|
mediaGroupType[entry.attributes['GROUP-ID']] = mediaGroupType[entry.attributes['GROUP-ID']] || {}; |
|
mediaGroup = mediaGroupType[entry.attributes['GROUP-ID']]; // collect the rendition metadata |
|
|
|
rendition = { |
|
default: /yes/i.test(entry.attributes.DEFAULT) |
|
}; |
|
|
|
if (rendition.default) { |
|
rendition.autoselect = true; |
|
} else { |
|
rendition.autoselect = /yes/i.test(entry.attributes.AUTOSELECT); |
|
} |
|
|
|
if (entry.attributes.LANGUAGE) { |
|
rendition.language = entry.attributes.LANGUAGE; |
|
} |
|
|
|
if (entry.attributes.URI) { |
|
rendition.uri = entry.attributes.URI; |
|
} |
|
|
|
if (entry.attributes['INSTREAM-ID']) { |
|
rendition.instreamId = entry.attributes['INSTREAM-ID']; |
|
} |
|
|
|
if (entry.attributes.CHARACTERISTICS) { |
|
rendition.characteristics = entry.attributes.CHARACTERISTICS; |
|
} |
|
|
|
if (entry.attributes.FORCED) { |
|
rendition.forced = /yes/i.test(entry.attributes.FORCED); |
|
} // insert the new rendition |
|
|
|
|
|
mediaGroup[entry.attributes.NAME] = rendition; |
|
}, |
|
discontinuity: function discontinuity() { |
|
currentTimeline += 1; |
|
currentUri.discontinuity = true; |
|
this.manifest.discontinuityStarts.push(uris.length); |
|
}, |
|
'program-date-time': function programDateTime() { |
|
if (typeof this.manifest.dateTimeString === 'undefined') { |
|
// PROGRAM-DATE-TIME is a media-segment tag, but for backwards |
|
// compatibility, we add the first occurence of the PROGRAM-DATE-TIME tag |
|
// to the manifest object |
|
// TODO: Consider removing this in future major version |
|
this.manifest.dateTimeString = entry.dateTimeString; |
|
this.manifest.dateTimeObject = entry.dateTimeObject; |
|
} |
|
|
|
currentUri.dateTimeString = entry.dateTimeString; |
|
currentUri.dateTimeObject = entry.dateTimeObject; |
|
}, |
|
targetduration: function targetduration() { |
|
if (!isFinite(entry.duration) || entry.duration < 0) { |
|
this.trigger('warn', { |
|
message: 'ignoring invalid target duration: ' + entry.duration |
|
}); |
|
return; |
|
} |
|
|
|
this.manifest.targetDuration = entry.duration; |
|
}, |
|
totalduration: function totalduration() { |
|
if (!isFinite(entry.duration) || entry.duration < 0) { |
|
this.trigger('warn', { |
|
message: 'ignoring invalid total duration: ' + entry.duration |
|
}); |
|
return; |
|
} |
|
|
|
this.manifest.totalDuration = entry.duration; |
|
}, |
|
start: function start() { |
|
if (!entry.attributes || isNaN(entry.attributes['TIME-OFFSET'])) { |
|
this.trigger('warn', { |
|
message: 'ignoring start declaration without appropriate attribute list' |
|
}); |
|
return; |
|
} |
|
|
|
this.manifest.start = { |
|
timeOffset: entry.attributes['TIME-OFFSET'], |
|
precise: entry.attributes.PRECISE |
|
}; |
|
}, |
|
'cue-out': function cueOut() { |
|
currentUri.cueOut = entry.data; |
|
}, |
|
'cue-out-cont': function cueOutCont() { |
|
currentUri.cueOutCont = entry.data; |
|
}, |
|
'cue-in': function cueIn() { |
|
currentUri.cueIn = entry.data; |
|
} |
|
})[entry.tagType] || noop).call(self); |
|
}, |
|
uri: function uri() { |
|
currentUri.uri = entry.uri; |
|
uris.push(currentUri); // if no explicit duration was declared, use the target duration |
|
|
|
if (this.manifest.targetDuration && !('duration' in currentUri)) { |
|
this.trigger('warn', { |
|
message: 'defaulting segment duration to the target duration' |
|
}); |
|
currentUri.duration = this.manifest.targetDuration; |
|
} // annotate with encryption information, if necessary |
|
|
|
|
|
if (_key) { |
|
currentUri.key = _key; |
|
} |
|
|
|
currentUri.timeline = currentTimeline; // annotate with initialization segment information, if necessary |
|
|
|
if (currentMap) { |
|
currentUri.map = currentMap; |
|
} // prepare for the next URI |
|
|
|
|
|
currentUri = {}; |
|
}, |
|
comment: function comment() {// comments are not important for playback |
|
}, |
|
custom: function custom() { |
|
// if this is segment-level data attach the output to the segment |
|
if (entry.segment) { |
|
currentUri.custom = currentUri.custom || {}; |
|
currentUri.custom[entry.customType] = entry.data; // if this is manifest-level data attach to the top level manifest object |
|
} else { |
|
this.manifest.custom = this.manifest.custom || {}; |
|
this.manifest.custom[entry.customType] = entry.data; |
|
} |
|
} |
|
})[entry.type].call(self); |
|
}); |
|
|
|
return _this; |
|
} |
|
/** |
|
* Parse the input string and update the manifest object. |
|
* |
|
* @param {string} chunk a potentially incomplete portion of the manifest |
|
*/ |
|
|
|
|
|
var _proto = Parser.prototype; |
|
|
|
_proto.push = function push(chunk) { |
|
this.lineStream.push(chunk); |
|
}; |
|
/** |
|
* Flush any remaining input. This can be handy if the last line of an M3U8 |
|
* manifest did not contain a trailing newline but the file has been |
|
* completely received. |
|
*/ |
|
|
|
|
|
_proto.end = function end() { |
|
// flush any buffered input |
|
this.lineStream.push('\n'); |
|
}; |
|
/** |
|
* Add an additional parser for non-standard tags |
|
* |
|
* @param {Object} options a map of options for the added parser |
|
* @param {RegExp} options.expression a regular expression to match the custom header |
|
* @param {string} options.type the type to register to the output |
|
* @param {Function} [options.dataParser] function to parse the line into an object |
|
* @param {boolean} [options.segment] should tag data be attached to the segment object |
|
*/ |
|
|
|
|
|
_proto.addParser = function addParser(options) { |
|
this.parseStream.addParser(options); |
|
}; |
|
/** |
|
* Add a custom header mapper |
|
* |
|
* @param {Object} options |
|
* @param {RegExp} options.expression a regular expression to match the custom header |
|
* @param {Function} options.map function to translate tag into a different tag |
|
*/ |
|
|
|
|
|
_proto.addTagMapper = function addTagMapper(options) { |
|
this.parseStream.addTagMapper(options); |
|
}; |
|
|
|
return Parser; |
|
}(Stream); |
|
|
|
/*! @name mpd-parser @version 0.7.0 @license Apache-2.0 */ |
|
|
|
var isObject$1 = function isObject(obj) { |
|
return !!obj && typeof obj === 'object'; |
|
}; |
|
|
|
var merge = function merge() { |
|
for (var _len = arguments.length, objects = new Array(_len), _key = 0; _key < _len; _key++) { |
|
objects[_key] = arguments[_key]; |
|
} |
|
|
|
return objects.reduce(function (result, source) { |
|
Object.keys(source).forEach(function (key) { |
|
if (Array.isArray(result[key]) && Array.isArray(source[key])) { |
|
result[key] = result[key].concat(source[key]); |
|
} else if (isObject$1(result[key]) && isObject$1(source[key])) { |
|
result[key] = merge(result[key], source[key]); |
|
} else { |
|
result[key] = source[key]; |
|
} |
|
}); |
|
return result; |
|
}, {}); |
|
}; |
|
|
|
var values = function values(o) { |
|
return Object.keys(o).map(function (k) { |
|
return o[k]; |
|
}); |
|
}; |
|
|
|
var range = function range(start, end) { |
|
var result = []; |
|
|
|
for (var i = start; i < end; i++) { |
|
result.push(i); |
|
} |
|
|
|
return result; |
|
}; |
|
|
|
var flatten = function flatten(lists) { |
|
return lists.reduce(function (x, y) { |
|
return x.concat(y); |
|
}, []); |
|
}; |
|
|
|
var from = function from(list) { |
|
if (!list.length) { |
|
return []; |
|
} |
|
|
|
var result = []; |
|
|
|
for (var i = 0; i < list.length; i++) { |
|
result.push(list[i]); |
|
} |
|
|
|
return result; |
|
}; |
|
|
|
var findIndexes = function findIndexes(l, key) { |
|
return l.reduce(function (a, e, i) { |
|
if (e[key]) { |
|
a.push(i); |
|
} |
|
|
|
return a; |
|
}, []); |
|
}; |
|
|
|
var mergeDiscontiguousPlaylists = function mergeDiscontiguousPlaylists(playlists) { |
|
var mergedPlaylists = values(playlists.reduce(function (acc, playlist) { |
|
// assuming playlist IDs are the same across periods |
|
// TODO: handle multiperiod where representation sets are not the same |
|
// across periods |
|
var name = playlist.attributes.id + (playlist.attributes.lang || ''); // Periods after first |
|
|
|
if (acc[name]) { |
|
var _acc$name$segments; // first segment of subsequent periods signal a discontinuity |
|
|
|
|
|
playlist.segments[0].discontinuity = true; |
|
|
|
(_acc$name$segments = acc[name].segments).push.apply(_acc$name$segments, playlist.segments); // bubble up contentProtection, this assumes all DRM content |
|
// has the same contentProtection |
|
|
|
|
|
if (playlist.attributes.contentProtection) { |
|
acc[name].attributes.contentProtection = playlist.attributes.contentProtection; |
|
} |
|
} else { |
|
// first Period |
|
acc[name] = playlist; |
|
} |
|
|
|
return acc; |
|
}, {})); |
|
return mergedPlaylists.map(function (playlist) { |
|
playlist.discontinuityStarts = findIndexes(playlist.segments, 'discontinuity'); |
|
return playlist; |
|
}); |
|
}; |
|
|
|
var formatAudioPlaylist = function formatAudioPlaylist(_ref) { |
|
var _attributes; |
|
|
|
var attributes = _ref.attributes, |
|
segments = _ref.segments; |
|
var playlist = { |
|
attributes: (_attributes = { |
|
NAME: attributes.id, |
|
BANDWIDTH: attributes.bandwidth, |
|
CODECS: attributes.codecs |
|
}, _attributes['PROGRAM-ID'] = 1, _attributes), |
|
uri: '', |
|
endList: (attributes.type || 'static') === 'static', |
|
timeline: attributes.periodIndex, |
|
resolvedUri: '', |
|
targetDuration: attributes.duration, |
|
segments: segments, |
|
mediaSequence: segments.length ? segments[0].number : 1 |
|
}; |
|
|
|
if (attributes.contentProtection) { |
|
playlist.contentProtection = attributes.contentProtection; |
|
} |
|
|
|
return playlist; |
|
}; |
|
|
|
var formatVttPlaylist = function formatVttPlaylist(_ref2) { |
|
var _attributes2; |
|
|
|
var attributes = _ref2.attributes, |
|
segments = _ref2.segments; |
|
|
|
if (typeof segments === 'undefined') { |
|
// vtt tracks may use single file in BaseURL |
|
segments = [{ |
|
uri: attributes.baseUrl, |
|
timeline: attributes.periodIndex, |
|
resolvedUri: attributes.baseUrl || '', |
|
duration: attributes.sourceDuration, |
|
number: 0 |
|
}]; // targetDuration should be the same duration as the only segment |
|
|
|
attributes.duration = attributes.sourceDuration; |
|
} |
|
|
|
return { |
|
attributes: (_attributes2 = { |
|
NAME: attributes.id, |
|
BANDWIDTH: attributes.bandwidth |
|
}, _attributes2['PROGRAM-ID'] = 1, _attributes2), |
|
uri: '', |
|
endList: (attributes.type || 'static') === 'static', |
|
timeline: attributes.periodIndex, |
|
resolvedUri: attributes.baseUrl || '', |
|
targetDuration: attributes.duration, |
|
segments: segments, |
|
mediaSequence: segments.length ? segments[0].number : 1 |
|
}; |
|
}; |
|
|
|
var organizeAudioPlaylists = function organizeAudioPlaylists(playlists) { |
|
return playlists.reduce(function (a, playlist) { |
|
var role = playlist.attributes.role && playlist.attributes.role.value || 'main'; |
|
var language = playlist.attributes.lang || ''; |
|
var label = 'main'; |
|
|
|
if (language) { |
|
label = playlist.attributes.lang + " (" + role + ")"; |
|
} // skip if we already have the highest quality audio for a language |
|
|
|
|
|
if (a[label] && a[label].playlists[0].attributes.BANDWIDTH > playlist.attributes.bandwidth) { |
|
return a; |
|
} |
|
|
|
a[label] = { |
|
language: language, |
|
autoselect: true, |
|
default: role === 'main', |
|
playlists: [formatAudioPlaylist(playlist)], |
|
uri: '' |
|
}; |
|
return a; |
|
}, {}); |
|
}; |
|
|
|
var organizeVttPlaylists = function organizeVttPlaylists(playlists) { |
|
return playlists.reduce(function (a, playlist) { |
|
var label = playlist.attributes.lang || 'text'; // skip if we already have subtitles |
|
|
|
if (a[label]) { |
|
return a; |
|
} |
|
|
|
a[label] = { |
|
language: label, |
|
default: false, |
|
autoselect: false, |
|
playlists: [formatVttPlaylist(playlist)], |
|
uri: '' |
|
}; |
|
return a; |
|
}, {}); |
|
}; |
|
|
|
var formatVideoPlaylist = function formatVideoPlaylist(_ref3) { |
|
var _attributes3; |
|
|
|
var attributes = _ref3.attributes, |
|
segments = _ref3.segments; |
|
var playlist = { |
|
attributes: (_attributes3 = { |
|
NAME: attributes.id, |
|
AUDIO: 'audio', |
|
SUBTITLES: 'subs', |
|
RESOLUTION: { |
|
width: attributes.width, |
|
height: attributes.height |
|
}, |
|
CODECS: attributes.codecs, |
|
BANDWIDTH: attributes.bandwidth |
|
}, _attributes3['PROGRAM-ID'] = 1, _attributes3), |
|
uri: '', |
|
endList: (attributes.type || 'static') === 'static', |
|
timeline: attributes.periodIndex, |
|
resolvedUri: '', |
|
targetDuration: attributes.duration, |
|
segments: segments, |
|
mediaSequence: segments.length ? segments[0].number : 1 |
|
}; |
|
|
|
if (attributes.contentProtection) { |
|
playlist.contentProtection = attributes.contentProtection; |
|
} |
|
|
|
return playlist; |
|
}; |
|
|
|
var toM3u8 = function toM3u8(dashPlaylists) { |
|
var _mediaGroups; |
|
|
|
if (!dashPlaylists.length) { |
|
return {}; |
|
} // grab all master attributes |
|
|
|
|
|
var _dashPlaylists$0$attr = dashPlaylists[0].attributes, |
|
duration = _dashPlaylists$0$attr.sourceDuration, |
|
_dashPlaylists$0$attr2 = _dashPlaylists$0$attr.minimumUpdatePeriod, |
|
minimumUpdatePeriod = _dashPlaylists$0$attr2 === void 0 ? 0 : _dashPlaylists$0$attr2; |
|
|
|
var videoOnly = function videoOnly(_ref4) { |
|
var attributes = _ref4.attributes; |
|
return attributes.mimeType === 'video/mp4' || attributes.contentType === 'video'; |
|
}; |
|
|
|
var audioOnly = function audioOnly(_ref5) { |
|
var attributes = _ref5.attributes; |
|
return attributes.mimeType === 'audio/mp4' || attributes.contentType === 'audio'; |
|
}; |
|
|
|
var vttOnly = function vttOnly(_ref6) { |
|
var attributes = _ref6.attributes; |
|
return attributes.mimeType === 'text/vtt' || attributes.contentType === 'text'; |
|
}; |
|
|
|
var videoPlaylists = mergeDiscontiguousPlaylists(dashPlaylists.filter(videoOnly)).map(formatVideoPlaylist); |
|
var audioPlaylists = mergeDiscontiguousPlaylists(dashPlaylists.filter(audioOnly)); |
|
var vttPlaylists = dashPlaylists.filter(vttOnly); |
|
var master = { |
|
allowCache: true, |
|
discontinuityStarts: [], |
|
segments: [], |
|
endList: true, |
|
mediaGroups: (_mediaGroups = { |
|
AUDIO: {}, |
|
VIDEO: {} |
|
}, _mediaGroups['CLOSED-CAPTIONS'] = {}, _mediaGroups.SUBTITLES = {}, _mediaGroups), |
|
uri: '', |
|
duration: duration, |
|
playlists: videoPlaylists, |
|
minimumUpdatePeriod: minimumUpdatePeriod * 1000 |
|
}; |
|
|
|
if (audioPlaylists.length) { |
|
master.mediaGroups.AUDIO.audio = organizeAudioPlaylists(audioPlaylists); |
|
} |
|
|
|
if (vttPlaylists.length) { |
|
master.mediaGroups.SUBTITLES.subs = organizeVttPlaylists(vttPlaylists); |
|
} |
|
|
|
return master; |
|
}; |
|
|
|
var commonjsGlobal$1 = typeof window !== 'undefined' ? window : typeof global !== 'undefined' ? global : typeof self !== 'undefined' ? self : {}; |
|
|
|
function createCommonjsModule$1(fn, module) { |
|
return module = { |
|
exports: {} |
|
}, fn(module, module.exports), module.exports; |
|
} |
|
|
|
var urlToolkit$1 = createCommonjsModule$1(function (module, exports) { |
|
// see https://tools.ietf.org/html/rfc1808 |
|
|
|
/* jshint ignore:start */ |
|
(function (root) { |
|
/* jshint ignore:end */ |
|
var URL_REGEX = /^((?:[a-zA-Z0-9+\-.]+:)?)(\/\/[^\/?#]*)?((?:[^\/\?#]*\/)*.*?)??(;.*?)?(\?.*?)?(#.*?)?$/; |
|
var FIRST_SEGMENT_REGEX = /^([^\/?#]*)(.*)$/; |
|
var SLASH_DOT_REGEX = /(?:\/|^)\.(?=\/)/g; |
|
var SLASH_DOT_DOT_REGEX = /(?:\/|^)\.\.\/(?!\.\.\/).*?(?=\/)/g; |
|
var URLToolkit = { |
|
// jshint ignore:line |
|
// If opts.alwaysNormalize is true then the path will always be normalized even when it starts with / or // |
|
// E.g |
|
// With opts.alwaysNormalize = false (default, spec compliant) |
|
// http://a.com/b/cd + /e/f/../g => http://a.com/e/f/../g |
|
// With opts.alwaysNormalize = true (not spec compliant) |
|
// http://a.com/b/cd + /e/f/../g => http://a.com/e/g |
|
buildAbsoluteURL: function buildAbsoluteURL(baseURL, relativeURL, opts) { |
|
opts = opts || {}; // remove any remaining space and CRLF |
|
|
|
baseURL = baseURL.trim(); |
|
relativeURL = relativeURL.trim(); |
|
|
|
if (!relativeURL) { |
|
// 2a) If the embedded URL is entirely empty, it inherits the |
|
// entire base URL (i.e., is set equal to the base URL) |
|
// and we are done. |
|
if (!opts.alwaysNormalize) { |
|
return baseURL; |
|
} |
|
|
|
var basePartsForNormalise = URLToolkit.parseURL(baseURL); |
|
|
|
if (!basePartsForNormalise) { |
|
throw new Error('Error trying to parse base URL.'); |
|
} |
|
|
|
basePartsForNormalise.path = URLToolkit.normalizePath(basePartsForNormalise.path); |
|
return URLToolkit.buildURLFromParts(basePartsForNormalise); |
|
} |
|
|
|
var relativeParts = URLToolkit.parseURL(relativeURL); |
|
|
|
if (!relativeParts) { |
|
throw new Error('Error trying to parse relative URL.'); |
|
} |
|
|
|
if (relativeParts.scheme) { |
|
// 2b) If the embedded URL starts with a scheme name, it is |
|
// interpreted as an absolute URL and we are done. |
|
if (!opts.alwaysNormalize) { |
|
return relativeURL; |
|
} |
|
|
|
relativeParts.path = URLToolkit.normalizePath(relativeParts.path); |
|
return URLToolkit.buildURLFromParts(relativeParts); |
|
} |
|
|
|
var baseParts = URLToolkit.parseURL(baseURL); |
|
|
|
if (!baseParts) { |
|
throw new Error('Error trying to parse base URL.'); |
|
} |
|
|
|
if (!baseParts.netLoc && baseParts.path && baseParts.path[0] !== '/') { |
|
// If netLoc missing and path doesn't start with '/', assume everthing before the first '/' is the netLoc |
|
// This causes 'example.com/a' to be handled as '//example.com/a' instead of '/example.com/a' |
|
var pathParts = FIRST_SEGMENT_REGEX.exec(baseParts.path); |
|
baseParts.netLoc = pathParts[1]; |
|
baseParts.path = pathParts[2]; |
|
} |
|
|
|
if (baseParts.netLoc && !baseParts.path) { |
|
baseParts.path = '/'; |
|
} |
|
|
|
var builtParts = { |
|
// 2c) Otherwise, the embedded URL inherits the scheme of |
|
// the base URL. |
|
scheme: baseParts.scheme, |
|
netLoc: relativeParts.netLoc, |
|
path: null, |
|
params: relativeParts.params, |
|
query: relativeParts.query, |
|
fragment: relativeParts.fragment |
|
}; |
|
|
|
if (!relativeParts.netLoc) { |
|
// 3) If the embedded URL's <net_loc> is non-empty, we skip to |
|
// Step 7. Otherwise, the embedded URL inherits the <net_loc> |
|
// (if any) of the base URL. |
|
builtParts.netLoc = baseParts.netLoc; // 4) If the embedded URL path is preceded by a slash "/", the |
|
// path is not relative and we skip to Step 7. |
|
|
|
if (relativeParts.path[0] !== '/') { |
|
if (!relativeParts.path) { |
|
// 5) If the embedded URL path is empty (and not preceded by a |
|
// slash), then the embedded URL inherits the base URL path |
|
builtParts.path = baseParts.path; // 5a) if the embedded URL's <params> is non-empty, we skip to |
|
// step 7; otherwise, it inherits the <params> of the base |
|
// URL (if any) and |
|
|
|
if (!relativeParts.params) { |
|
builtParts.params = baseParts.params; // 5b) if the embedded URL's <query> is non-empty, we skip to |
|
// step 7; otherwise, it inherits the <query> of the base |
|
// URL (if any) and we skip to step 7. |
|
|
|
if (!relativeParts.query) { |
|
builtParts.query = baseParts.query; |
|
} |
|
} |
|
} else { |
|
// 6) The last segment of the base URL's path (anything |
|
// following the rightmost slash "/", or the entire path if no |
|
// slash is present) is removed and the embedded URL's path is |
|
// appended in its place. |
|
var baseURLPath = baseParts.path; |
|
var newPath = baseURLPath.substring(0, baseURLPath.lastIndexOf('/') + 1) + relativeParts.path; |
|
builtParts.path = URLToolkit.normalizePath(newPath); |
|
} |
|
} |
|
} |
|
|
|
if (builtParts.path === null) { |
|
builtParts.path = opts.alwaysNormalize ? URLToolkit.normalizePath(relativeParts.path) : relativeParts.path; |
|
} |
|
|
|
return URLToolkit.buildURLFromParts(builtParts); |
|
}, |
|
parseURL: function parseURL(url) { |
|
var parts = URL_REGEX.exec(url); |
|
|
|
if (!parts) { |
|
return null; |
|
} |
|
|
|
return { |
|
scheme: parts[1] || '', |
|
netLoc: parts[2] || '', |
|
path: parts[3] || '', |
|
params: parts[4] || '', |
|
query: parts[5] || '', |
|
fragment: parts[6] || '' |
|
}; |
|
}, |
|
normalizePath: function normalizePath(path) { |
|
// The following operations are |
|
// then applied, in order, to the new path: |
|
// 6a) All occurrences of "./", where "." is a complete path |
|
// segment, are removed. |
|
// 6b) If the path ends with "." as a complete path segment, |
|
// that "." is removed. |
|
path = path.split('').reverse().join('').replace(SLASH_DOT_REGEX, ''); // 6c) All occurrences of "<segment>/../", where <segment> is a |
|
// complete path segment not equal to "..", are removed. |
|
// Removal of these path segments is performed iteratively, |
|
// removing the leftmost matching pattern on each iteration, |
|
// until no matching pattern remains. |
|
// 6d) If the path ends with "<segment>/..", where <segment> is a |
|
// complete path segment not equal to "..", that |
|
// "<segment>/.." is removed. |
|
|
|
while (path.length !== (path = path.replace(SLASH_DOT_DOT_REGEX, '')).length) {} // jshint ignore:line |
|
|
|
|
|
return path.split('').reverse().join(''); |
|
}, |
|
buildURLFromParts: function buildURLFromParts(parts) { |
|
return parts.scheme + parts.netLoc + parts.path + parts.params + parts.query + parts.fragment; |
|
} |
|
}; |
|
/* jshint ignore:start */ |
|
|
|
module.exports = URLToolkit; |
|
})(commonjsGlobal$1); |
|
/* jshint ignore:end */ |
|
|
|
}); |
|
|
|
var resolveUrl = function resolveUrl(baseUrl, relativeUrl) { |
|
// return early if we don't need to resolve |
|
if (/^[a-z]+:/i.test(relativeUrl)) { |
|
return relativeUrl; |
|
} // if the base URL is relative then combine with the current location |
|
|
|
|
|
if (!/\/\//i.test(baseUrl)) { |
|
baseUrl = urlToolkit$1.buildAbsoluteURL(window$1.location.href, baseUrl); |
|
} |
|
|
|
return urlToolkit$1.buildAbsoluteURL(baseUrl, relativeUrl); |
|
}; |
|
/** |
|
* @typedef {Object} SingleUri |
|
* @property {string} uri - relative location of segment |
|
* @property {string} resolvedUri - resolved location of segment |
|
* @property {Object} byterange - Object containing information on how to make byte range |
|
* requests following byte-range-spec per RFC2616. |
|
* @property {String} byterange.length - length of range request |
|
* @property {String} byterange.offset - byte offset of range request |
|
* |
|
* @see https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.1 |
|
*/ |
|
|
|
/** |
|
* Converts a URLType node (5.3.9.2.3 Table 13) to a segment object |
|
* that conforms to how m3u8-parser is structured |
|
* |
|
* @see https://github.com/videojs/m3u8-parser |
|
* |
|
* @param {string} baseUrl - baseUrl provided by <BaseUrl> nodes |
|
* @param {string} source - source url for segment |
|
* @param {string} range - optional range used for range calls, follows |
|
* @return {SingleUri} full segment information transformed into a format similar |
|
* to m3u8-parser |
|
*/ |
|
|
|
|
|
var urlTypeToSegment = function urlTypeToSegment(_ref) { |
|
var _ref$baseUrl = _ref.baseUrl, |
|
baseUrl = _ref$baseUrl === void 0 ? '' : _ref$baseUrl, |
|
_ref$source = _ref.source, |
|
source = _ref$source === void 0 ? '' : _ref$source, |
|
_ref$range = _ref.range, |
|
range = _ref$range === void 0 ? '' : _ref$range; |
|
var init = { |
|
uri: source, |
|
resolvedUri: resolveUrl(baseUrl || '', source) |
|
}; |
|
|
|
if (range) { |
|
var ranges = range.split('-'); |
|
var startRange = parseInt(ranges[0], 10); |
|
var endRange = parseInt(ranges[1], 10); |
|
init.byterange = { |
|
length: endRange - startRange, |
|
offset: startRange |
|
}; |
|
} |
|
|
|
return init; |
|
}; |
|
/** |
|
* Calculates the R (repetition) value for a live stream (for the final segment |
|
* in a manifest where the r value is negative 1) |
|
* |
|
* @param {Object} attributes |
|
* Object containing all inherited attributes from parent elements with attribute |
|
* names as keys |
|
* @param {number} time |
|
* current time (typically the total time up until the final segment) |
|
* @param {number} duration |
|
* duration property for the given <S /> |
|
* |
|
* @return {number} |
|
* R value to reach the end of the given period |
|
*/ |
|
|
|
|
|
var getLiveRValue = function getLiveRValue(attributes, time, duration) { |
|
var NOW = attributes.NOW, |
|
clientOffset = attributes.clientOffset, |
|
availabilityStartTime = attributes.availabilityStartTime, |
|
_attributes$timescale = attributes.timescale, |
|
timescale = _attributes$timescale === void 0 ? 1 : _attributes$timescale, |
|
_attributes$start = attributes.start, |
|
start = _attributes$start === void 0 ? 0 : _attributes$start, |
|
_attributes$minimumUp = attributes.minimumUpdatePeriod, |
|
minimumUpdatePeriod = _attributes$minimumUp === void 0 ? 0 : _attributes$minimumUp; |
|
var now = (NOW + clientOffset) / 1000; |
|
var periodStartWC = availabilityStartTime + start; |
|
var periodEndWC = now + minimumUpdatePeriod; |
|
var periodDuration = periodEndWC - periodStartWC; |
|
return Math.ceil((periodDuration * timescale - time) / duration); |
|
}; |
|
/** |
|
* Uses information provided by SegmentTemplate.SegmentTimeline to determine segment |
|
* timing and duration |
|
* |
|
* @param {Object} attributes |
|
* Object containing all inherited attributes from parent elements with attribute |
|
* names as keys |
|
* @param {Object[]} segmentTimeline |
|
* List of objects representing the attributes of each S element contained within |
|
* |
|
* @return {{number: number, duration: number, time: number, timeline: number}[]} |
|
* List of Objects with segment timing and duration info |
|
*/ |
|
|
|
|
|
var parseByTimeline = function parseByTimeline(attributes, segmentTimeline) { |
|
var _attributes$type = attributes.type, |
|
type = _attributes$type === void 0 ? 'static' : _attributes$type, |
|
_attributes$minimumUp2 = attributes.minimumUpdatePeriod, |
|
minimumUpdatePeriod = _attributes$minimumUp2 === void 0 ? 0 : _attributes$minimumUp2, |
|
_attributes$media = attributes.media, |
|
media = _attributes$media === void 0 ? '' : _attributes$media, |
|
sourceDuration = attributes.sourceDuration, |
|
_attributes$timescale2 = attributes.timescale, |
|
timescale = _attributes$timescale2 === void 0 ? 1 : _attributes$timescale2, |
|
_attributes$startNumb = attributes.startNumber, |
|
startNumber = _attributes$startNumb === void 0 ? 1 : _attributes$startNumb, |
|
timeline = attributes.periodIndex; |
|
var segments = []; |
|
var time = -1; |
|
|
|
for (var sIndex = 0; sIndex < segmentTimeline.length; sIndex++) { |
|
var S = segmentTimeline[sIndex]; |
|
var duration = S.d; |
|
var repeat = S.r || 0; |
|
var segmentTime = S.t || 0; |
|
|
|
if (time < 0) { |
|
// first segment |
|
time = segmentTime; |
|
} |
|
|
|
if (segmentTime && segmentTime > time) { |
|
// discontinuity |
|
// TODO: How to handle this type of discontinuity |
|
// timeline++ here would treat it like HLS discontuity and content would |
|
// get appended without gap |
|
// E.G. |
|
// <S t="0" d="1" /> |
|
// <S d="1" /> |
|
// <S d="1" /> |
|
// <S t="5" d="1" /> |
|
// would have $Time$ values of [0, 1, 2, 5] |
|
// should this be appened at time positions [0, 1, 2, 3],(#EXT-X-DISCONTINUITY) |
|
// or [0, 1, 2, gap, gap, 5]? (#EXT-X-GAP) |
|
// does the value of sourceDuration consider this when calculating arbitrary |
|
// negative @r repeat value? |
|
// E.G. Same elements as above with this added at the end |
|
// <S d="1" r="-1" /> |
|
// with a sourceDuration of 10 |
|
// Would the 2 gaps be included in the time duration calculations resulting in |
|
// 8 segments with $Time$ values of [0, 1, 2, 5, 6, 7, 8, 9] or 10 segments |
|
// with $Time$ values of [0, 1, 2, 5, 6, 7, 8, 9, 10, 11] ? |
|
time = segmentTime; |
|
} |
|
|
|
var count = void 0; |
|
|
|
if (repeat < 0) { |
|
var nextS = sIndex + 1; |
|
|
|
if (nextS === segmentTimeline.length) { |
|
// last segment |
|
if (type === 'dynamic' && minimumUpdatePeriod > 0 && media.indexOf('$Number$') > 0) { |
|
count = getLiveRValue(attributes, time, duration); |
|
} else { |
|
// TODO: This may be incorrect depending on conclusion of TODO above |
|
count = (sourceDuration * timescale - time) / duration; |
|
} |
|
} else { |
|
count = (segmentTimeline[nextS].t - time) / duration; |
|
} |
|
} else { |
|
count = repeat + 1; |
|
} |
|
|
|
var end = startNumber + segments.length + count; |
|
var number = startNumber + segments.length; |
|
|
|
while (number < end) { |
|
segments.push({ |
|
number: number, |
|
duration: duration / timescale, |
|
time: time, |
|
timeline: timeline |
|
}); |
|
time += duration; |
|
number++; |
|
} |
|
} |
|
|
|
return segments; |
|
}; |
|
/** |
|
* Functions for calculating the range of available segments in static and dynamic |
|
* manifests. |
|
*/ |
|
|
|
|
|
var segmentRange = { |
|
/** |
|
* Returns the entire range of available segments for a static MPD |
|
* |
|
* @param {Object} attributes |
|
* Inheritied MPD attributes |
|
* @return {{ start: number, end: number }} |
|
* The start and end numbers for available segments |
|
*/ |
|
static: function _static(attributes) { |
|
var duration = attributes.duration, |
|
_attributes$timescale = attributes.timescale, |
|
timescale = _attributes$timescale === void 0 ? 1 : _attributes$timescale, |
|
sourceDuration = attributes.sourceDuration; |
|
return { |
|
start: 0, |
|
end: Math.ceil(sourceDuration / (duration / timescale)) |
|
}; |
|
}, |
|
|
|
/** |
|
* Returns the current live window range of available segments for a dynamic MPD |
|
* |
|
* @param {Object} attributes |
|
* Inheritied MPD attributes |
|
* @return {{ start: number, end: number }} |
|
* The start and end numbers for available segments |
|
*/ |
|
dynamic: function dynamic(attributes) { |
|
var NOW = attributes.NOW, |
|
clientOffset = attributes.clientOffset, |
|
availabilityStartTime = attributes.availabilityStartTime, |
|
_attributes$timescale2 = attributes.timescale, |
|
timescale = _attributes$timescale2 === void 0 ? 1 : _attributes$timescale2, |
|
duration = attributes.duration, |
|
_attributes$start = attributes.start, |
|
start = _attributes$start === void 0 ? 0 : _attributes$start, |
|
_attributes$minimumUp = attributes.minimumUpdatePeriod, |
|
minimumUpdatePeriod = _attributes$minimumUp === void 0 ? 0 : _attributes$minimumUp, |
|
_attributes$timeShift = attributes.timeShiftBufferDepth, |
|
timeShiftBufferDepth = _attributes$timeShift === void 0 ? Infinity : _attributes$timeShift; |
|
var now = (NOW + clientOffset) / 1000; |
|
var periodStartWC = availabilityStartTime + start; |
|
var periodEndWC = now + minimumUpdatePeriod; |
|
var periodDuration = periodEndWC - periodStartWC; |
|
var segmentCount = Math.ceil(periodDuration * timescale / duration); |
|
var availableStart = Math.floor((now - periodStartWC - timeShiftBufferDepth) * timescale / duration); |
|
var availableEnd = Math.floor((now - periodStartWC) * timescale / duration); |
|
return { |
|
start: Math.max(0, availableStart), |
|
end: Math.min(segmentCount, availableEnd) |
|
}; |
|
} |
|
}; |
|
/** |
|
* Maps a range of numbers to objects with information needed to build the corresponding |
|
* segment list |
|
* |
|
* @name toSegmentsCallback |
|
* @function |
|
* @param {number} number |
|
* Number of the segment |
|
* @param {number} index |
|
* Index of the number in the range list |
|
* @return {{ number: Number, duration: Number, timeline: Number, time: Number }} |
|
* Object with segment timing and duration info |
|
*/ |
|
|
|
/** |
|
* Returns a callback for Array.prototype.map for mapping a range of numbers to |
|
* information needed to build the segment list. |
|
* |
|
* @param {Object} attributes |
|
* Inherited MPD attributes |
|
* @return {toSegmentsCallback} |
|
* Callback map function |
|
*/ |
|
|
|
var toSegments = function toSegments(attributes) { |
|
return function (number, index) { |
|
var duration = attributes.duration, |
|
_attributes$timescale3 = attributes.timescale, |
|
timescale = _attributes$timescale3 === void 0 ? 1 : _attributes$timescale3, |
|
periodIndex = attributes.periodIndex, |
|
_attributes$startNumb = attributes.startNumber, |
|
startNumber = _attributes$startNumb === void 0 ? 1 : _attributes$startNumb; |
|
return { |
|
number: startNumber + number, |
|
duration: duration / timescale, |
|
timeline: periodIndex, |
|
time: index * duration |
|
}; |
|
}; |
|
}; |
|
/** |
|
* Returns a list of objects containing segment timing and duration info used for |
|
* building the list of segments. This uses the @duration attribute specified |
|
* in the MPD manifest to derive the range of segments. |
|
* |
|
* @param {Object} attributes |
|
* Inherited MPD attributes |
|
* @return {{number: number, duration: number, time: number, timeline: number}[]} |
|
* List of Objects with segment timing and duration info |
|
*/ |
|
|
|
|
|
var parseByDuration = function parseByDuration(attributes) { |
|
var _attributes$type = attributes.type, |
|
type = _attributes$type === void 0 ? 'static' : _attributes$type, |
|
duration = attributes.duration, |
|
_attributes$timescale4 = attributes.timescale, |
|
timescale = _attributes$timescale4 === void 0 ? 1 : _attributes$timescale4, |
|
sourceDuration = attributes.sourceDuration; |
|
|
|
var _segmentRange$type = segmentRange[type](attributes), |
|
start = _segmentRange$type.start, |
|
end = _segmentRange$type.end; |
|
|
|
var segments = range(start, end).map(toSegments(attributes)); |
|
|
|
if (type === 'static') { |
|
var index = segments.length - 1; // final segment may be less than full segment duration |
|
|
|
segments[index].duration = sourceDuration - duration / timescale * index; |
|
} |
|
|
|
return segments; |
|
}; |
|
|
|
var identifierPattern = /\$([A-z]*)(?:(%0)([0-9]+)d)?\$/g; |
|
/** |
|
* Replaces template identifiers with corresponding values. To be used as the callback |
|
* for String.prototype.replace |
|
* |
|
* @name replaceCallback |
|
* @function |
|
* @param {string} match |
|
* Entire match of identifier |
|
* @param {string} identifier |
|
* Name of matched identifier |
|
* @param {string} format |
|
* Format tag string. Its presence indicates that padding is expected |
|
* @param {string} width |
|
* Desired length of the replaced value. Values less than this width shall be left |
|
* zero padded |
|
* @return {string} |
|
* Replacement for the matched identifier |
|
*/ |
|
|
|
/** |
|
* Returns a function to be used as a callback for String.prototype.replace to replace |
|
* template identifiers |
|
* |
|
* @param {Obect} values |
|
* Object containing values that shall be used to replace known identifiers |
|
* @param {number} values.RepresentationID |
|
* Value of the Representation@id attribute |
|
* @param {number} values.Number |
|
* Number of the corresponding segment |
|
* @param {number} values.Bandwidth |
|
* Value of the Representation@bandwidth attribute. |
|
* @param {number} values.Time |
|
* Timestamp value of the corresponding segment |
|
* @return {replaceCallback} |
|
* Callback to be used with String.prototype.replace to replace identifiers |
|
*/ |
|
|
|
var identifierReplacement = function identifierReplacement(values) { |
|
return function (match, identifier, format, width) { |
|
if (match === '$$') { |
|
// escape sequence |
|
return '$'; |
|
} |
|
|
|
if (typeof values[identifier] === 'undefined') { |
|
return match; |
|
} |
|
|
|
var value = '' + values[identifier]; |
|
|
|
if (identifier === 'RepresentationID') { |
|
// Format tag shall not be present with RepresentationID |
|
return value; |
|
} |
|
|
|
if (!format) { |
|
width = 1; |
|
} else { |
|
width = parseInt(width, 10); |
|
} |
|
|
|
if (value.length >= width) { |
|
return value; |
|
} |
|
|
|
return "" + new Array(width - value.length + 1).join('0') + value; |
|
}; |
|
}; |
|
/** |
|
* Constructs a segment url from a template string |
|
* |
|
* @param {string} url |
|
* Template string to construct url from |
|
* @param {Obect} values |
|
* Object containing values that shall be used to replace known identifiers |
|
* @param {number} values.RepresentationID |
|
* Value of the Representation@id attribute |
|
* @param {number} values.Number |
|
* Number of the corresponding segment |
|
* @param {number} values.Bandwidth |
|
* Value of the Representation@bandwidth attribute. |
|
* @param {number} values.Time |
|
* Timestamp value of the corresponding segment |
|
* @return {string} |
|
* Segment url with identifiers replaced |
|
*/ |
|
|
|
|
|
var constructTemplateUrl = function constructTemplateUrl(url, values) { |
|
return url.replace(identifierPattern, identifierReplacement(values)); |
|
}; |
|
/** |
|
* Generates a list of objects containing timing and duration information about each |
|
* segment needed to generate segment uris and the complete segment object |
|
* |
|
* @param {Object} attributes |
|
* Object containing all inherited attributes from parent elements with attribute |
|
* names as keys |
|
* @param {Object[]|undefined} segmentTimeline |
|
* List of objects representing the attributes of each S element contained within |
|
* the SegmentTimeline element |
|
* @return {{number: number, duration: number, time: number, timeline: number}[]} |
|
* List of Objects with segment timing and duration info |
|
*/ |
|
|
|
|
|
var parseTemplateInfo = function parseTemplateInfo(attributes, segmentTimeline) { |
|
if (!attributes.duration && !segmentTimeline) { |
|
// if neither @duration or SegmentTimeline are present, then there shall be exactly |
|
// one media segment |
|
return [{ |
|
number: attributes.startNumber || 1, |
|
duration: attributes.sourceDuration, |
|
time: 0, |
|
timeline: attributes.periodIndex |
|
}]; |
|
} |
|
|
|
if (attributes.duration) { |
|
return parseByDuration(attributes); |
|
} |
|
|
|
return parseByTimeline(attributes, segmentTimeline); |
|
}; |
|
/** |
|
* Generates a list of segments using information provided by the SegmentTemplate element |
|
* |
|
* @param {Object} attributes |
|
* Object containing all inherited attributes from parent elements with attribute |
|
* names as keys |
|
* @param {Object[]|undefined} segmentTimeline |
|
* List of objects representing the attributes of each S element contained within |
|
* the SegmentTimeline element |
|
* @return {Object[]} |
|
* List of segment objects |
|
*/ |
|
|
|
|
|
var segmentsFromTemplate = function segmentsFromTemplate(attributes, segmentTimeline) { |
|
var templateValues = { |
|
RepresentationID: attributes.id, |
|
Bandwidth: attributes.bandwidth || 0 |
|
}; |
|
var _attributes$initializ = attributes.initialization, |
|
initialization = _attributes$initializ === void 0 ? { |
|
sourceURL: '', |
|
range: '' |
|
} : _attributes$initializ; |
|
var mapSegment = urlTypeToSegment({ |
|
baseUrl: attributes.baseUrl, |
|
source: constructTemplateUrl(initialization.sourceURL, templateValues), |
|
range: initialization.range |
|
}); |
|
var segments = parseTemplateInfo(attributes, segmentTimeline); |
|
return segments.map(function (segment) { |
|
templateValues.Number = segment.number; |
|
templateValues.Time = segment.time; |
|
var uri = constructTemplateUrl(attributes.media || '', templateValues); |
|
return { |
|
uri: uri, |
|
timeline: segment.timeline, |
|
duration: segment.duration, |
|
resolvedUri: resolveUrl(attributes.baseUrl || '', uri), |
|
map: mapSegment, |
|
number: segment.number |
|
}; |
|
}); |
|
}; |
|
|
|
var errors = { |
|
INVALID_NUMBER_OF_PERIOD: 'INVALID_NUMBER_OF_PERIOD', |
|
DASH_EMPTY_MANIFEST: 'DASH_EMPTY_MANIFEST', |
|
DASH_INVALID_XML: 'DASH_INVALID_XML', |
|
NO_BASE_URL: 'NO_BASE_URL', |
|
MISSING_SEGMENT_INFORMATION: 'MISSING_SEGMENT_INFORMATION', |
|
SEGMENT_TIME_UNSPECIFIED: 'SEGMENT_TIME_UNSPECIFIED', |
|
UNSUPPORTED_UTC_TIMING_SCHEME: 'UNSUPPORTED_UTC_TIMING_SCHEME' |
|
}; |
|
/** |
|
* Converts a <SegmentUrl> (of type URLType from the DASH spec 5.3.9.2 Table 14) |
|
* to an object that matches the output of a segment in videojs/mpd-parser |
|
* |
|
* @param {Object} attributes |
|
* Object containing all inherited attributes from parent elements with attribute |
|
* names as keys |
|
* @param {Object} segmentUrl |
|
* <SegmentURL> node to translate into a segment object |
|
* @return {Object} translated segment object |
|
*/ |
|
|
|
var SegmentURLToSegmentObject = function SegmentURLToSegmentObject(attributes, segmentUrl) { |
|
var baseUrl = attributes.baseUrl, |
|
_attributes$initializ = attributes.initialization, |
|
initialization = _attributes$initializ === void 0 ? {} : _attributes$initializ; |
|
var initSegment = urlTypeToSegment({ |
|
baseUrl: baseUrl, |
|
source: initialization.sourceURL, |
|
range: initialization.range |
|
}); |
|
var segment = urlTypeToSegment({ |
|
baseUrl: baseUrl, |
|
source: segmentUrl.media, |
|
range: segmentUrl.mediaRange |
|
}); |
|
segment.map = initSegment; |
|
return segment; |
|
}; |
|
/** |
|
* Generates a list of segments using information provided by the SegmentList element |
|
* SegmentList (DASH SPEC Section 5.3.9.3.2) contains a set of <SegmentURL> nodes. Each |
|
* node should be translated into segment. |
|
* |
|
* @param {Object} attributes |
|
* Object containing all inherited attributes from parent elements with attribute |
|
* names as keys |
|
* @param {Object[]|undefined} segmentTimeline |
|
* List of objects representing the attributes of each S element contained within |
|
* the SegmentTimeline element |
|
* @return {Object.<Array>} list of segments |
|
*/ |
|
|
|
|
|
var segmentsFromList = function segmentsFromList(attributes, segmentTimeline) { |
|
var duration = attributes.duration, |
|
_attributes$segmentUr = attributes.segmentUrls, |
|
segmentUrls = _attributes$segmentUr === void 0 ? [] : _attributes$segmentUr; // Per spec (5.3.9.2.1) no way to determine segment duration OR |
|
// if both SegmentTimeline and @duration are defined, it is outside of spec. |
|
|
|
if (!duration && !segmentTimeline || duration && segmentTimeline) { |
|
throw new Error(errors.SEGMENT_TIME_UNSPECIFIED); |
|
} |
|
|
|
var segmentUrlMap = segmentUrls.map(function (segmentUrlObject) { |
|
return SegmentURLToSegmentObject(attributes, segmentUrlObject); |
|
}); |
|
var segmentTimeInfo; |
|
|
|
if (duration) { |
|
segmentTimeInfo = parseByDuration(attributes); |
|
} |
|
|
|
if (segmentTimeline) { |
|
segmentTimeInfo = parseByTimeline(attributes, segmentTimeline); |
|
} |
|
|
|
var segments = segmentTimeInfo.map(function (segmentTime, index) { |
|
if (segmentUrlMap[index]) { |
|
var segment = segmentUrlMap[index]; |
|
segment.timeline = segmentTime.timeline; |
|
segment.duration = segmentTime.duration; |
|
segment.number = segmentTime.number; |
|
return segment; |
|
} // Since we're mapping we should get rid of any blank segments (in case |
|
// the given SegmentTimeline is handling for more elements than we have |
|
// SegmentURLs for). |
|
|
|
}).filter(function (segment) { |
|
return segment; |
|
}); |
|
return segments; |
|
}; |
|
/** |
|
* Translates SegmentBase into a set of segments. |
|
* (DASH SPEC Section 5.3.9.3.2) contains a set of <SegmentURL> nodes. Each |
|
* node should be translated into segment. |
|
* |
|
* @param {Object} attributes |
|
* Object containing all inherited attributes from parent elements with attribute |
|
* names as keys |
|
* @return {Object.<Array>} list of segments |
|
*/ |
|
|
|
|
|
var segmentsFromBase = function segmentsFromBase(attributes) { |
|
var baseUrl = attributes.baseUrl, |
|
_attributes$initializ = attributes.initialization, |
|
initialization = _attributes$initializ === void 0 ? {} : _attributes$initializ, |
|
sourceDuration = attributes.sourceDuration, |
|
_attributes$timescale = attributes.timescale, |
|
timescale = _attributes$timescale === void 0 ? 1 : _attributes$timescale, |
|
_attributes$indexRang = attributes.indexRange, |
|
indexRange = _attributes$indexRang === void 0 ? '' : _attributes$indexRang, |
|
duration = attributes.duration; // base url is required for SegmentBase to work, per spec (Section 5.3.9.2.1) |
|
|
|
if (!baseUrl) { |
|
throw new Error(errors.NO_BASE_URL); |
|
} |
|
|
|
var initSegment = urlTypeToSegment({ |
|
baseUrl: baseUrl, |
|
source: initialization.sourceURL, |
|
range: initialization.range |
|
}); |
|
var segment = urlTypeToSegment({ |
|
baseUrl: baseUrl, |
|
source: baseUrl, |
|
range: indexRange |
|
}); |
|
segment.map = initSegment; // If there is a duration, use it, otherwise use the given duration of the source |
|
// (since SegmentBase is only for one total segment) |
|
|
|
if (duration) { |
|
var segmentTimeInfo = parseByDuration(attributes); |
|
|
|
if (segmentTimeInfo.length) { |
|
segment.duration = segmentTimeInfo[0].duration; |
|
segment.timeline = segmentTimeInfo[0].timeline; |
|
} |
|
} else if (sourceDuration) { |
|
segment.duration = sourceDuration / timescale; |
|
segment.timeline = 0; |
|
} // This is used for mediaSequence |
|
|
|
|
|
segment.number = 0; |
|
return [segment]; |
|
}; |
|
|
|
var generateSegments = function generateSegments(_ref) { |
|
var attributes = _ref.attributes, |
|
segmentInfo = _ref.segmentInfo; |
|
var segmentAttributes; |
|
var segmentsFn; |
|
|
|
if (segmentInfo.template) { |
|
segmentsFn = segmentsFromTemplate; |
|
segmentAttributes = merge(attributes, segmentInfo.template); |
|
} else if (segmentInfo.base) { |
|
segmentsFn = segmentsFromBase; |
|
segmentAttributes = merge(attributes, segmentInfo.base); |
|
} else if (segmentInfo.list) { |
|
segmentsFn = segmentsFromList; |
|
segmentAttributes = merge(attributes, segmentInfo.list); |
|
} |
|
|
|
if (!segmentsFn) { |
|
return { |
|
attributes: attributes |
|
}; |
|
} |
|
|
|
var segments = segmentsFn(segmentAttributes, segmentInfo.timeline); // The @duration attribute will be used to determin the playlist's targetDuration which |
|
// must be in seconds. Since we've generated the segment list, we no longer need |
|
// @duration to be in @timescale units, so we can convert it here. |
|
|
|
if (segmentAttributes.duration) { |
|
var _segmentAttributes = segmentAttributes, |
|
duration = _segmentAttributes.duration, |
|
_segmentAttributes$ti = _segmentAttributes.timescale, |
|
timescale = _segmentAttributes$ti === void 0 ? 1 : _segmentAttributes$ti; |
|
segmentAttributes.duration = duration / timescale; |
|
} else if (segments.length) { |
|
// if there is no @duration attribute, use the largest segment duration as |
|
// as target duration |
|
segmentAttributes.duration = segments.reduce(function (max, segment) { |
|
return Math.max(max, Math.ceil(segment.duration)); |
|
}, 0); |
|
} else { |
|
segmentAttributes.duration = 0; |
|
} |
|
|
|
return { |
|
attributes: segmentAttributes, |
|
segments: segments |
|
}; |
|
}; |
|
|
|
var toPlaylists = function toPlaylists(representations) { |
|
return representations.map(generateSegments); |
|
}; |
|
|
|
var findChildren = function findChildren(element, name) { |
|
return from(element.childNodes).filter(function (_ref) { |
|
var tagName = _ref.tagName; |
|
return tagName === name; |
|
}); |
|
}; |
|
|
|
var getContent = function getContent(element) { |
|
return element.textContent.trim(); |
|
}; |
|
|
|
var parseDuration = function parseDuration(str) { |
|
var SECONDS_IN_YEAR = 365 * 24 * 60 * 60; |
|
var SECONDS_IN_MONTH = 30 * 24 * 60 * 60; |
|
var SECONDS_IN_DAY = 24 * 60 * 60; |
|
var SECONDS_IN_HOUR = 60 * 60; |
|
var SECONDS_IN_MIN = 60; // P10Y10M10DT10H10M10.1S |
|
|
|
var durationRegex = /P(?:(\d*)Y)?(?:(\d*)M)?(?:(\d*)D)?(?:T(?:(\d*)H)?(?:(\d*)M)?(?:([\d.]*)S)?)?/; |
|
var match = durationRegex.exec(str); |
|
|
|
if (!match) { |
|
return 0; |
|
} |
|
|
|
var _match$slice = match.slice(1), |
|
year = _match$slice[0], |
|
month = _match$slice[1], |
|
day = _match$slice[2], |
|
hour = _match$slice[3], |
|
minute = _match$slice[4], |
|
second = _match$slice[5]; |
|
|
|
return parseFloat(year || 0) * SECONDS_IN_YEAR + parseFloat(month || 0) * SECONDS_IN_MONTH + parseFloat(day || 0) * SECONDS_IN_DAY + parseFloat(hour || 0) * SECONDS_IN_HOUR + parseFloat(minute || 0) * SECONDS_IN_MIN + parseFloat(second || 0); |
|
}; |
|
|
|
var parseDate = function parseDate(str) { |
|
// Date format without timezone according to ISO 8601 |
|
// YYY-MM-DDThh:mm:ss.ssssss |
|
var dateRegex = /^\d+-\d+-\d+T\d+:\d+:\d+(\.\d+)?$/; // If the date string does not specifiy a timezone, we must specifiy UTC. This is |
|
// expressed by ending with 'Z' |
|
|
|
if (dateRegex.test(str)) { |
|
str += 'Z'; |
|
} |
|
|
|
return Date.parse(str); |
|
}; |
|
|
|
var parsers = { |
|
/** |
|
* Specifies the duration of the entire Media Presentation. Format is a duration string |
|
* as specified in ISO 8601 |
|
* |
|
* @param {string} value |
|
* value of attribute as a string |
|
* @return {number} |
|
* The duration in seconds |
|
*/ |
|
mediaPresentationDuration: function mediaPresentationDuration(value) { |
|
return parseDuration(value); |
|
}, |
|
|
|
/** |
|
* Specifies the Segment availability start time for all Segments referred to in this |
|
* MPD. For a dynamic manifest, it specifies the anchor for the earliest availability |
|
* time. Format is a date string as specified in ISO 8601 |
|
* |
|
* @param {string} value |
|
* value of attribute as a string |
|
* @return {number} |
|
* The date as seconds from unix epoch |
|
*/ |
|
availabilityStartTime: function availabilityStartTime(value) { |
|
return parseDate(value) / 1000; |
|
}, |
|
|
|
/** |
|
* Specifies the smallest period between potential changes to the MPD. Format is a |
|
* duration string as specified in ISO 8601 |
|
* |
|
* @param {string} value |
|
* value of attribute as a string |
|
* @return {number} |
|
* The duration in seconds |
|
*/ |
|
minimumUpdatePeriod: function minimumUpdatePeriod(value) { |
|
return parseDuration(value); |
|
}, |
|
|
|
/** |
|
* Specifies the duration of the smallest time shifting buffer for any Representation |
|
* in the MPD. Format is a duration string as specified in ISO 8601 |
|
* |
|
* @param {string} value |
|
* value of attribute as a string |
|
* @return {number} |
|
* The duration in seconds |
|
*/ |
|
timeShiftBufferDepth: function timeShiftBufferDepth(value) { |
|
return parseDuration(value); |
|
}, |
|
|
|
/** |
|
* Specifies the PeriodStart time of the Period relative to the availabilityStarttime. |
|
* Format is a duration string as specified in ISO 8601 |
|
* |
|
* @param {string} value |
|
* value of attribute as a string |
|
* @return {number} |
|
* The duration in seconds |
|
*/ |
|
start: function start(value) { |
|
return parseDuration(value); |
|
}, |
|
|
|
/** |
|
* Specifies the width of the visual presentation |
|
* |
|
* @param {string} value |
|
* value of attribute as a string |
|
* @return {number} |
|
* The parsed width |
|
*/ |
|
width: function width(value) { |
|
return parseInt(value, 10); |
|
}, |
|
|
|
/** |
|
* Specifies the height of the visual presentation |
|
* |
|
* @param {string} value |
|
* value of attribute as a string |
|
* @return {number} |
|
* The parsed height |
|
*/ |
|
height: function height(value) { |
|
return parseInt(value, 10); |
|
}, |
|
|
|
/** |
|
* Specifies the bitrate of the representation |
|
* |
|
* @param {string} value |
|
* value of attribute as a string |
|
* @return {number} |
|
* The parsed bandwidth |
|
*/ |
|
bandwidth: function bandwidth(value) { |
|
return parseInt(value, 10); |
|
}, |
|
|
|
/** |
|
* Specifies the number of the first Media Segment in this Representation in the Period |
|
* |
|
* @param {string} value |
|
* value of attribute as a string |
|
* @return {number} |
|
* The parsed number |
|
*/ |
|
startNumber: function startNumber(value) { |
|
return parseInt(value, 10); |
|
}, |
|
|
|
/** |
|
* Specifies the timescale in units per seconds |
|
* |
|
* @param {string} value |
|
* value of attribute as a string |
|
* @return {number} |
|
* The aprsed timescale |
|
*/ |
|
timescale: function timescale(value) { |
|
return parseInt(value, 10); |
|
}, |
|
|
|
/** |
|
* Specifies the constant approximate Segment duration |
|
* NOTE: The <Period> element also contains an @duration attribute. This duration |
|
* specifies the duration of the Period. This attribute is currently not |
|
* supported by the rest of the parser, however we still check for it to prevent |
|
* errors. |
|
* |
|
* @param {string} value |
|
* value of attribute as a string |
|
* @return {number} |
|
* The parsed duration |
|
*/ |
|
duration: function duration(value) { |
|
var parsedValue = parseInt(value, 10); |
|
|
|
if (isNaN(parsedValue)) { |
|
return parseDuration(value); |
|
} |
|
|
|
return parsedValue; |
|
}, |
|
|
|
/** |
|
* Specifies the Segment duration, in units of the value of the @timescale. |
|
* |
|
* @param {string} value |
|
* value of attribute as a string |
|
* @return {number} |
|
* The parsed duration |
|
*/ |
|
d: function d(value) { |
|
return parseInt(value, 10); |
|
}, |
|
|
|
/** |
|
* Specifies the MPD start time, in @timescale units, the first Segment in the series |
|
* starts relative to the beginning of the Period |
|
* |
|
* @param {string} value |
|
* value of attribute as a string |
|
* @return {number} |
|
* The parsed time |
|
*/ |
|
t: function t(value) { |
|
return parseInt(value, 10); |
|
}, |
|
|
|
/** |
|
* Specifies the repeat count of the number of following contiguous Segments with the |
|
* same duration expressed by the value of @d |
|
* |
|
* @param {string} value |
|
* value of attribute as a string |
|
* @return {number} |
|
* The parsed number |
|
*/ |
|
r: function r(value) { |
|
return parseInt(value, 10); |
|
}, |
|
|
|
/** |
|
* Default parser for all other attributes. Acts as a no-op and just returns the value |
|
* as a string |
|
* |
|
* @param {string} value |
|
* value of attribute as a string |
|
* @return {string} |
|
* Unparsed value |
|
*/ |
|
DEFAULT: function DEFAULT(value) { |
|
return value; |
|
} |
|
}; |
|
/** |
|
* Gets all the attributes and values of the provided node, parses attributes with known |
|
* types, and returns an object with attribute names mapped to values. |
|
* |
|
* @param {Node} el |
|
* The node to parse attributes from |
|
* @return {Object} |
|
* Object with all attributes of el parsed |
|
*/ |
|
|
|
var parseAttributes$1 = function parseAttributes(el) { |
|
if (!(el && el.attributes)) { |
|
return {}; |
|
} |
|
|
|
return from(el.attributes).reduce(function (a, e) { |
|
var parseFn = parsers[e.name] || parsers.DEFAULT; |
|
a[e.name] = parseFn(e.value); |
|
return a; |
|
}, {}); |
|
}; |
|
|
|
function decodeB64ToUint8Array(b64Text) { |
|
var decodedString = window$1.atob(b64Text); |
|
var array = new Uint8Array(decodedString.length); |
|
|
|
for (var i = 0; i < decodedString.length; i++) { |
|
array[i] = decodedString.charCodeAt(i); |
|
} |
|
|
|
return array; |
|
} |
|
|
|
var keySystemsMap = { |
|
'urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b': 'org.w3.clearkey', |
|
'urn:uuid:edef8ba9-79d6-4ace-a3c8-27dcd51d21ed': 'com.widevine.alpha', |
|
'urn:uuid:9a04f079-9840-4286-ab92-e65be0885f95': 'com.microsoft.playready', |
|
'urn:uuid:f239e769-efa3-4850-9c16-a903c6932efb': 'com.adobe.primetime' |
|
}; |
|
/** |
|
* Builds a list of urls that is the product of the reference urls and BaseURL values |
|
* |
|
* @param {string[]} referenceUrls |
|
* List of reference urls to resolve to |
|
* @param {Node[]} baseUrlElements |
|
* List of BaseURL nodes from the mpd |
|
* @return {string[]} |
|
* List of resolved urls |
|
*/ |
|
|
|
var buildBaseUrls = function buildBaseUrls(referenceUrls, baseUrlElements) { |
|
if (!baseUrlElements.length) { |
|
return referenceUrls; |
|
} |
|
|
|
return flatten(referenceUrls.map(function (reference) { |
|
return baseUrlElements.map(function (baseUrlElement) { |
|
return resolveUrl(reference, getContent(baseUrlElement)); |
|
}); |
|
})); |
|
}; |
|
/** |
|
* Contains all Segment information for its containing AdaptationSet |
|
* |
|
* @typedef {Object} SegmentInformation |
|
* @property {Object|undefined} template |
|
* Contains the attributes for the SegmentTemplate node |
|
* @property {Object[]|undefined} timeline |
|
* Contains a list of atrributes for each S node within the SegmentTimeline node |
|
* @property {Object|undefined} list |
|
* Contains the attributes for the SegmentList node |
|
* @property {Object|undefined} base |
|
* Contains the attributes for the SegmentBase node |
|
*/ |
|
|
|
/** |
|
* Returns all available Segment information contained within the AdaptationSet node |
|
* |
|
* @param {Node} adaptationSet |
|
* The AdaptationSet node to get Segment information from |
|
* @return {SegmentInformation} |
|
* The Segment information contained within the provided AdaptationSet |
|
*/ |
|
|
|
|
|
var getSegmentInformation = function getSegmentInformation(adaptationSet) { |
|
var segmentTemplate = findChildren(adaptationSet, 'SegmentTemplate')[0]; |
|
var segmentList = findChildren(adaptationSet, 'SegmentList')[0]; |
|
var segmentUrls = segmentList && findChildren(segmentList, 'SegmentURL').map(function (s) { |
|
return merge({ |
|
tag: 'SegmentURL' |
|
}, parseAttributes$1(s)); |
|
}); |
|
var segmentBase = findChildren(adaptationSet, 'SegmentBase')[0]; |
|
var segmentTimelineParentNode = segmentList || segmentTemplate; |
|
var segmentTimeline = segmentTimelineParentNode && findChildren(segmentTimelineParentNode, 'SegmentTimeline')[0]; |
|
var segmentInitializationParentNode = segmentList || segmentBase || segmentTemplate; |
|
var segmentInitialization = segmentInitializationParentNode && findChildren(segmentInitializationParentNode, 'Initialization')[0]; // SegmentTemplate is handled slightly differently, since it can have both |
|
// @initialization and an <Initialization> node. @initialization can be templated, |
|
// while the node can have a url and range specified. If the <SegmentTemplate> has |
|
// both @initialization and an <Initialization> subelement we opt to override with |
|
// the node, as this interaction is not defined in the spec. |
|
|
|
var template = segmentTemplate && parseAttributes$1(segmentTemplate); |
|
|
|
if (template && segmentInitialization) { |
|
template.initialization = segmentInitialization && parseAttributes$1(segmentInitialization); |
|
} else if (template && template.initialization) { |
|
// If it is @initialization we convert it to an object since this is the format that |
|
// later functions will rely on for the initialization segment. This is only valid |
|
// for <SegmentTemplate> |
|
template.initialization = { |
|
sourceURL: template.initialization |
|
}; |
|
} |
|
|
|
var segmentInfo = { |
|
template: template, |
|
timeline: segmentTimeline && findChildren(segmentTimeline, 'S').map(function (s) { |
|
return parseAttributes$1(s); |
|
}), |
|
list: segmentList && merge(parseAttributes$1(segmentList), { |
|
segmentUrls: segmentUrls, |
|
initialization: parseAttributes$1(segmentInitialization) |
|
}), |
|
base: segmentBase && merge(parseAttributes$1(segmentBase), { |
|
initialization: parseAttributes$1(segmentInitialization) |
|
}) |
|
}; |
|
Object.keys(segmentInfo).forEach(function (key) { |
|
if (!segmentInfo[key]) { |
|
delete segmentInfo[key]; |
|
} |
|
}); |
|
return segmentInfo; |
|
}; |
|
/** |
|
* Contains Segment information and attributes needed to construct a Playlist object |
|
* from a Representation |
|
* |
|
* @typedef {Object} RepresentationInformation |
|
* @property {SegmentInformation} segmentInfo |
|
* Segment information for this Representation |
|
* @property {Object} attributes |
|
* Inherited attributes for this Representation |
|
*/ |
|
|
|
/** |
|
* Maps a Representation node to an object containing Segment information and attributes |
|
* |
|
* @name inheritBaseUrlsCallback |
|
* @function |
|
* @param {Node} representation |
|
* Representation node from the mpd |
|
* @return {RepresentationInformation} |
|
* Representation information needed to construct a Playlist object |
|
*/ |
|
|
|
/** |
|
* Returns a callback for Array.prototype.map for mapping Representation nodes to |
|
* Segment information and attributes using inherited BaseURL nodes. |
|
* |
|
* @param {Object} adaptationSetAttributes |
|
* Contains attributes inherited by the AdaptationSet |
|
* @param {string[]} adaptationSetBaseUrls |
|
* Contains list of resolved base urls inherited by the AdaptationSet |
|
* @param {SegmentInformation} adaptationSetSegmentInfo |
|
* Contains Segment information for the AdaptationSet |
|
* @return {inheritBaseUrlsCallback} |
|
* Callback map function |
|
*/ |
|
|
|
|
|
var inheritBaseUrls = function inheritBaseUrls(adaptationSetAttributes, adaptationSetBaseUrls, adaptationSetSegmentInfo) { |
|
return function (representation) { |
|
var repBaseUrlElements = findChildren(representation, 'BaseURL'); |
|
var repBaseUrls = buildBaseUrls(adaptationSetBaseUrls, repBaseUrlElements); |
|
var attributes = merge(adaptationSetAttributes, parseAttributes$1(representation)); |
|
var representationSegmentInfo = getSegmentInformation(representation); |
|
return repBaseUrls.map(function (baseUrl) { |
|
return { |
|
segmentInfo: merge(adaptationSetSegmentInfo, representationSegmentInfo), |
|
attributes: merge(attributes, { |
|
baseUrl: baseUrl |
|
}) |
|
}; |
|
}); |
|
}; |
|
}; |
|
/** |
|
* Tranforms a series of content protection nodes to |
|
* an object containing pssh data by key system |
|
* |
|
* @param {Node[]} contentProtectionNodes |
|
* Content protection nodes |
|
* @return {Object} |
|
* Object containing pssh data by key system |
|
*/ |
|
|
|
|
|
var generateKeySystemInformation = function generateKeySystemInformation(contentProtectionNodes) { |
|
return contentProtectionNodes.reduce(function (acc, node) { |
|
var attributes = parseAttributes$1(node); |
|
var keySystem = keySystemsMap[attributes.schemeIdUri]; |
|
|
|
if (keySystem) { |
|
acc[keySystem] = { |
|
attributes: attributes |
|
}; |
|
var psshNode = findChildren(node, 'cenc:pssh')[0]; |
|
|
|
if (psshNode) { |
|
var pssh = getContent(psshNode); |
|
var psshBuffer = pssh && decodeB64ToUint8Array(pssh); |
|
acc[keySystem].pssh = psshBuffer; |
|
} |
|
} |
|
|
|
return acc; |
|
}, {}); |
|
}; |
|
/** |
|
* Maps an AdaptationSet node to a list of Representation information objects |
|
* |
|
* @name toRepresentationsCallback |
|
* @function |
|
* @param {Node} adaptationSet |
|
* AdaptationSet node from the mpd |
|
* @return {RepresentationInformation[]} |
|
* List of objects containing Representaion information |
|
*/ |
|
|
|
/** |
|
* Returns a callback for Array.prototype.map for mapping AdaptationSet nodes to a list of |
|
* Representation information objects |
|
* |
|
* @param {Object} periodAttributes |
|
* Contains attributes inherited by the Period |
|
* @param {string[]} periodBaseUrls |
|
* Contains list of resolved base urls inherited by the Period |
|
* @param {string[]} periodSegmentInfo |
|
* Contains Segment Information at the period level |
|
* @return {toRepresentationsCallback} |
|
* Callback map function |
|
*/ |
|
|
|
|
|
var toRepresentations = function toRepresentations(periodAttributes, periodBaseUrls, periodSegmentInfo) { |
|
return function (adaptationSet) { |
|
var adaptationSetAttributes = parseAttributes$1(adaptationSet); |
|
var adaptationSetBaseUrls = buildBaseUrls(periodBaseUrls, findChildren(adaptationSet, 'BaseURL')); |
|
var role = findChildren(adaptationSet, 'Role')[0]; |
|
var roleAttributes = { |
|
role: parseAttributes$1(role) |
|
}; |
|
var attrs = merge(periodAttributes, adaptationSetAttributes, roleAttributes); |
|
var contentProtection = generateKeySystemInformation(findChildren(adaptationSet, 'ContentProtection')); |
|
|
|
if (Object.keys(contentProtection).length) { |
|
attrs = merge(attrs, { |
|
contentProtection: contentProtection |
|
}); |
|
} |
|
|
|
var segmentInfo = getSegmentInformation(adaptationSet); |
|
var representations = findChildren(adaptationSet, 'Representation'); |
|
var adaptationSetSegmentInfo = merge(periodSegmentInfo, segmentInfo); |
|
return flatten(representations.map(inheritBaseUrls(attrs, adaptationSetBaseUrls, adaptationSetSegmentInfo))); |
|
}; |
|
}; |
|
/** |
|
* Maps an Period node to a list of Representation inforamtion objects for all |
|
* AdaptationSet nodes contained within the Period |
|
* |
|
* @name toAdaptationSetsCallback |
|
* @function |
|
* @param {Node} period |
|
* Period node from the mpd |
|
* @param {number} periodIndex |
|
* Index of the Period within the mpd |
|
* @return {RepresentationInformation[]} |
|
* List of objects containing Representaion information |
|
*/ |
|
|
|
/** |
|
* Returns a callback for Array.prototype.map for mapping Period nodes to a list of |
|
* Representation information objects |
|
* |
|
* @param {Object} mpdAttributes |
|
* Contains attributes inherited by the mpd |
|
* @param {string[]} mpdBaseUrls |
|
* Contains list of resolved base urls inherited by the mpd |
|
* @return {toAdaptationSetsCallback} |
|
* Callback map function |
|
*/ |
|
|
|
|
|
var toAdaptationSets = function toAdaptationSets(mpdAttributes, mpdBaseUrls) { |
|
return function (period, index) { |
|
var periodBaseUrls = buildBaseUrls(mpdBaseUrls, findChildren(period, 'BaseURL')); |
|
var periodAtt = parseAttributes$1(period); |
|
var parsedPeriodId = parseInt(periodAtt.id, 10); // fallback to mapping index if Period@id is not a number |
|
|
|
var periodIndex = window$1.isNaN(parsedPeriodId) ? index : parsedPeriodId; |
|
var periodAttributes = merge(mpdAttributes, { |
|
periodIndex: periodIndex |
|
}); |
|
var adaptationSets = findChildren(period, 'AdaptationSet'); |
|
var periodSegmentInfo = getSegmentInformation(period); |
|
return flatten(adaptationSets.map(toRepresentations(periodAttributes, periodBaseUrls, periodSegmentInfo))); |
|
}; |
|
}; |
|
/** |
|
* Traverses the mpd xml tree to generate a list of Representation information objects |
|
* that have inherited attributes from parent nodes |
|
* |
|
* @param {Node} mpd |
|
* The root node of the mpd |
|
* @param {Object} options |
|
* Available options for inheritAttributes |
|
* @param {string} options.manifestUri |
|
* The uri source of the mpd |
|
* @param {number} options.NOW |
|
* Current time per DASH IOP. Default is current time in ms since epoch |
|
* @param {number} options.clientOffset |
|
* Client time difference from NOW (in milliseconds) |
|
* @return {RepresentationInformation[]} |
|
* List of objects containing Representation information |
|
*/ |
|
|
|
|
|
var inheritAttributes = function inheritAttributes(mpd, options) { |
|
if (options === void 0) { |
|
options = {}; |
|
} |
|
|
|
var _options = options, |
|
_options$manifestUri = _options.manifestUri, |
|
manifestUri = _options$manifestUri === void 0 ? '' : _options$manifestUri, |
|
_options$NOW = _options.NOW, |
|
NOW = _options$NOW === void 0 ? Date.now() : _options$NOW, |
|
_options$clientOffset = _options.clientOffset, |
|
clientOffset = _options$clientOffset === void 0 ? 0 : _options$clientOffset; |
|
var periods = findChildren(mpd, 'Period'); |
|
|
|
if (!periods.length) { |
|
throw new Error(errors.INVALID_NUMBER_OF_PERIOD); |
|
} |
|
|
|
var mpdAttributes = parseAttributes$1(mpd); |
|
var mpdBaseUrls = buildBaseUrls([manifestUri], findChildren(mpd, 'BaseURL')); |
|
mpdAttributes.sourceDuration = mpdAttributes.mediaPresentationDuration || 0; |
|
mpdAttributes.NOW = NOW; |
|
mpdAttributes.clientOffset = clientOffset; |
|
return flatten(periods.map(toAdaptationSets(mpdAttributes, mpdBaseUrls))); |
|
}; |
|
|
|
var stringToMpdXml = function stringToMpdXml(manifestString) { |
|
if (manifestString === '') { |
|
throw new Error(errors.DASH_EMPTY_MANIFEST); |
|
} |
|
|
|
var parser = new window$1.DOMParser(); |
|
var xml = parser.parseFromString(manifestString, 'application/xml'); |
|
var mpd = xml && xml.documentElement.tagName === 'MPD' ? xml.documentElement : null; |
|
|
|
if (!mpd || mpd && mpd.getElementsByTagName('parsererror').length > 0) { |
|
throw new Error(errors.DASH_INVALID_XML); |
|
} |
|
|
|
return mpd; |
|
}; |
|
/** |
|
* Parses the manifest for a UTCTiming node, returning the nodes attributes if found |
|
* |
|
* @param {string} mpd |
|
* XML string of the MPD manifest |
|
* @return {Object|null} |
|
* Attributes of UTCTiming node specified in the manifest. Null if none found |
|
*/ |
|
|
|
|
|
var parseUTCTimingScheme = function parseUTCTimingScheme(mpd) { |
|
var UTCTimingNode = findChildren(mpd, 'UTCTiming')[0]; |
|
|
|
if (!UTCTimingNode) { |
|
return null; |
|
} |
|
|
|
var attributes = parseAttributes$1(UTCTimingNode); |
|
|
|
switch (attributes.schemeIdUri) { |
|
case 'urn:mpeg:dash:utc:http-head:2014': |
|
case 'urn:mpeg:dash:utc:http-head:2012': |
|
attributes.method = 'HEAD'; |
|
break; |
|
|
|
case 'urn:mpeg:dash:utc:http-xsdate:2014': |
|
case 'urn:mpeg:dash:utc:http-iso:2014': |
|
case 'urn:mpeg:dash:utc:http-xsdate:2012': |
|
case 'urn:mpeg:dash:utc:http-iso:2012': |
|
attributes.method = 'GET'; |
|
break; |
|
|
|
case 'urn:mpeg:dash:utc:direct:2014': |
|
case 'urn:mpeg:dash:utc:direct:2012': |
|
attributes.method = 'DIRECT'; |
|
attributes.value = Date.parse(attributes.value); |
|
break; |
|
|
|
case 'urn:mpeg:dash:utc:http-ntp:2014': |
|
case 'urn:mpeg:dash:utc:ntp:2014': |
|
case 'urn:mpeg:dash:utc:sntp:2014': |
|
default: |
|
throw new Error(errors.UNSUPPORTED_UTC_TIMING_SCHEME); |
|
} |
|
|
|
return attributes; |
|
}; |
|
|
|
var parse = function parse(manifestString, options) { |
|
return toM3u8(toPlaylists(inheritAttributes(stringToMpdXml(manifestString), options))); |
|
}; |
|
/** |
|
* Parses the manifest for a UTCTiming node, returning the nodes attributes if found |
|
* |
|
* @param {string} manifestString |
|
* XML string of the MPD manifest |
|
* @return {Object|null} |
|
* Attributes of UTCTiming node specified in the manifest. Null if none found |
|
*/ |
|
|
|
|
|
var parseUTCTiming = function parseUTCTiming(manifestString) { |
|
return parseUTCTimingScheme(stringToMpdXml(manifestString)); |
|
}; |
|
|
|
var toUnsigned = function toUnsigned(value) { |
|
return value >>> 0; |
|
}; |
|
|
|
var bin = { |
|
toUnsigned: toUnsigned |
|
}; |
|
|
|
var toUnsigned$1 = bin.toUnsigned; |
|
|
|
var _findBox, parseType, timescale, startTime, getVideoTrackIds; // Find the data for a box specified by its path |
|
|
|
|
|
_findBox = function findBox(data, path) { |
|
var results = [], |
|
i, |
|
size, |
|
type, |
|
end, |
|
subresults; |
|
|
|
if (!path.length) { |
|
// short-circuit the search for empty paths |
|
return null; |
|
} |
|
|
|
for (i = 0; i < data.byteLength;) { |
|
size = toUnsigned$1(data[i] << 24 | data[i + 1] << 16 | data[i + 2] << 8 | data[i + 3]); |
|
type = parseType(data.subarray(i + 4, i + 8)); |
|
end = size > 1 ? i + size : data.byteLength; |
|
|
|
if (type === path[0]) { |
|
if (path.length === 1) { |
|
// this is the end of the path and we've found the box we were |
|
// looking for |
|
results.push(data.subarray(i + 8, end)); |
|
} else { |
|
// recursively search for the next box along the path |
|
subresults = _findBox(data.subarray(i + 8, end), path.slice(1)); |
|
|
|
if (subresults.length) { |
|
results = results.concat(subresults); |
|
} |
|
} |
|
} |
|
|
|
i = end; |
|
} // we've finished searching all of data |
|
|
|
|
|
return results; |
|
}; |
|
/** |
|
* Returns the string representation of an ASCII encoded four byte buffer. |
|
* @param buffer {Uint8Array} a four-byte buffer to translate |
|
* @return {string} the corresponding string |
|
*/ |
|
|
|
|
|
parseType = function parseType(buffer) { |
|
var result = ''; |
|
result += String.fromCharCode(buffer[0]); |
|
result += String.fromCharCode(buffer[1]); |
|
result += String.fromCharCode(buffer[2]); |
|
result += String.fromCharCode(buffer[3]); |
|
return result; |
|
}; |
|
/** |
|
* Parses an MP4 initialization segment and extracts the timescale |
|
* values for any declared tracks. Timescale values indicate the |
|
* number of clock ticks per second to assume for time-based values |
|
* elsewhere in the MP4. |
|
* |
|
* To determine the start time of an MP4, you need two pieces of |
|
* information: the timescale unit and the earliest base media decode |
|
* time. Multiple timescales can be specified within an MP4 but the |
|
* base media decode time is always expressed in the timescale from |
|
* the media header box for the track: |
|
* ``` |
|
* moov > trak > mdia > mdhd.timescale |
|
* ``` |
|
* @param init {Uint8Array} the bytes of the init segment |
|
* @return {object} a hash of track ids to timescale values or null if |
|
* the init segment is malformed. |
|
*/ |
|
|
|
|
|
timescale = function timescale(init) { |
|
var result = {}, |
|
traks = _findBox(init, ['moov', 'trak']); // mdhd timescale |
|
|
|
|
|
return traks.reduce(function (result, trak) { |
|
var tkhd, version, index, id, mdhd; |
|
tkhd = _findBox(trak, ['tkhd'])[0]; |
|
|
|
if (!tkhd) { |
|
return null; |
|
} |
|
|
|
version = tkhd[0]; |
|
index = version === 0 ? 12 : 20; |
|
id = toUnsigned$1(tkhd[index] << 24 | tkhd[index + 1] << 16 | tkhd[index + 2] << 8 | tkhd[index + 3]); |
|
mdhd = _findBox(trak, ['mdia', 'mdhd'])[0]; |
|
|
|
if (!mdhd) { |
|
return null; |
|
} |
|
|
|
version = mdhd[0]; |
|
index = version === 0 ? 12 : 20; |
|
result[id] = toUnsigned$1(mdhd[index] << 24 | mdhd[index + 1] << 16 | mdhd[index + 2] << 8 | mdhd[index + 3]); |
|
return result; |
|
}, result); |
|
}; |
|
/** |
|
* Determine the base media decode start time, in seconds, for an MP4 |
|
* fragment. If multiple fragments are specified, the earliest time is |
|
* returned. |
|
* |
|
* The base media decode time can be parsed from track fragment |
|
* metadata: |
|
* ``` |
|
* moof > traf > tfdt.baseMediaDecodeTime |
|
* ``` |
|
* It requires the timescale value from the mdhd to interpret. |
|
* |
|
* @param timescale {object} a hash of track ids to timescale values. |
|
* @return {number} the earliest base media decode start time for the |
|
* fragment, in seconds |
|
*/ |
|
|
|
|
|
startTime = function startTime(timescale, fragment) { |
|
var trafs, baseTimes, result; // we need info from two childrend of each track fragment box |
|
|
|
trafs = _findBox(fragment, ['moof', 'traf']); // determine the start times for each track |
|
|
|
baseTimes = [].concat.apply([], trafs.map(function (traf) { |
|
return _findBox(traf, ['tfhd']).map(function (tfhd) { |
|
var id, scale, baseTime; // get the track id from the tfhd |
|
|
|
id = toUnsigned$1(tfhd[4] << 24 | tfhd[5] << 16 | tfhd[6] << 8 | tfhd[7]); // assume a 90kHz clock if no timescale was specified |
|
|
|
scale = timescale[id] || 90e3; // get the base media decode time from the tfdt |
|
|
|
baseTime = _findBox(traf, ['tfdt']).map(function (tfdt) { |
|
var version, result; |
|
version = tfdt[0]; |
|
result = toUnsigned$1(tfdt[4] << 24 | tfdt[5] << 16 | tfdt[6] << 8 | tfdt[7]); |
|
|
|
if (version === 1) { |
|
result *= Math.pow(2, 32); |
|
result += toUnsigned$1(tfdt[8] << 24 | tfdt[9] << 16 | tfdt[10] << 8 | tfdt[11]); |
|
} |
|
|
|
return result; |
|
})[0]; |
|
baseTime = baseTime || Infinity; // convert base time to seconds |
|
|
|
return baseTime / scale; |
|
}); |
|
})); // return the minimum |
|
|
|
result = Math.min.apply(null, baseTimes); |
|
return isFinite(result) ? result : 0; |
|
}; |
|
/** |
|
* Find the trackIds of the video tracks in this source. |
|
* Found by parsing the Handler Reference and Track Header Boxes: |
|
* moov > trak > mdia > hdlr |
|
* moov > trak > tkhd |
|
* |
|
* @param {Uint8Array} init - The bytes of the init segment for this source |
|
* @return {Number[]} A list of trackIds |
|
* |
|
* @see ISO-BMFF-12/2015, Section 8.4.3 |
|
**/ |
|
|
|
|
|
getVideoTrackIds = function getVideoTrackIds(init) { |
|
var traks = _findBox(init, ['moov', 'trak']); |
|
|
|
var videoTrackIds = []; |
|
traks.forEach(function (trak) { |
|
var hdlrs = _findBox(trak, ['mdia', 'hdlr']); |
|
|
|
var tkhds = _findBox(trak, ['tkhd']); |
|
|
|
hdlrs.forEach(function (hdlr, index) { |
|
var handlerType = parseType(hdlr.subarray(8, 12)); |
|
var tkhd = tkhds[index]; |
|
var view; |
|
var version; |
|
var trackId; |
|
|
|
if (handlerType === 'vide') { |
|
view = new DataView(tkhd.buffer, tkhd.byteOffset, tkhd.byteLength); |
|
version = view.getUint8(0); |
|
trackId = version === 0 ? view.getUint32(12) : view.getUint32(20); |
|
videoTrackIds.push(trackId); |
|
} |
|
}); |
|
}); |
|
return videoTrackIds; |
|
}; |
|
|
|
var probe = { |
|
findBox: _findBox, |
|
parseType: parseType, |
|
timescale: timescale, |
|
startTime: startTime, |
|
videoTrackIds: getVideoTrackIds |
|
}; |
|
|
|
/** |
|
* mux.js |
|
* |
|
* Copyright (c) 2015 Brightcove |
|
* All rights reserved. |
|
* |
|
* Functions that generate fragmented MP4s suitable for use with Media |
|
* Source Extensions. |
|
*/ |
|
|
|
var UINT32_MAX = Math.pow(2, 32) - 1; |
|
var box, dinf, esds, ftyp, mdat, mfhd, minf, moof, moov, mvex, mvhd, trak, tkhd, mdia, mdhd, hdlr, sdtp, stbl, stsd, traf, trex, trun, types, MAJOR_BRAND, MINOR_VERSION, AVC1_BRAND, VIDEO_HDLR, AUDIO_HDLR, HDLR_TYPES, VMHD, SMHD, DREF, STCO, STSC, STSZ, STTS; // pre-calculate constants |
|
|
|
(function () { |
|
var i; |
|
types = { |
|
avc1: [], |
|
// codingname |
|
avcC: [], |
|
btrt: [], |
|
dinf: [], |
|
dref: [], |
|
esds: [], |
|
ftyp: [], |
|
hdlr: [], |
|
mdat: [], |
|
mdhd: [], |
|
mdia: [], |
|
mfhd: [], |
|
minf: [], |
|
moof: [], |
|
moov: [], |
|
mp4a: [], |
|
// codingname |
|
mvex: [], |
|
mvhd: [], |
|
sdtp: [], |
|
smhd: [], |
|
stbl: [], |
|
stco: [], |
|
stsc: [], |
|
stsd: [], |
|
stsz: [], |
|
stts: [], |
|
styp: [], |
|
tfdt: [], |
|
tfhd: [], |
|
traf: [], |
|
trak: [], |
|
trun: [], |
|
trex: [], |
|
tkhd: [], |
|
vmhd: [] |
|
}; // In environments where Uint8Array is undefined (e.g., IE8), skip set up so that we |
|
// don't throw an error |
|
|
|
if (typeof Uint8Array === 'undefined') { |
|
return; |
|
} |
|
|
|
for (i in types) { |
|
if (types.hasOwnProperty(i)) { |
|
types[i] = [i.charCodeAt(0), i.charCodeAt(1), i.charCodeAt(2), i.charCodeAt(3)]; |
|
} |
|
} |
|
|
|
MAJOR_BRAND = new Uint8Array(['i'.charCodeAt(0), 's'.charCodeAt(0), 'o'.charCodeAt(0), 'm'.charCodeAt(0)]); |
|
AVC1_BRAND = new Uint8Array(['a'.charCodeAt(0), 'v'.charCodeAt(0), 'c'.charCodeAt(0), '1'.charCodeAt(0)]); |
|
MINOR_VERSION = new Uint8Array([0, 0, 0, 1]); |
|
VIDEO_HDLR = new Uint8Array([0x00, // version 0 |
|
0x00, 0x00, 0x00, // flags |
|
0x00, 0x00, 0x00, 0x00, // pre_defined |
|
0x76, 0x69, 0x64, 0x65, // handler_type: 'vide' |
|
0x00, 0x00, 0x00, 0x00, // reserved |
|
0x00, 0x00, 0x00, 0x00, // reserved |
|
0x00, 0x00, 0x00, 0x00, // reserved |
|
0x56, 0x69, 0x64, 0x65, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x00 // name: 'VideoHandler' |
|
]); |
|
AUDIO_HDLR = new Uint8Array([0x00, // version 0 |
|
0x00, 0x00, 0x00, // flags |
|
0x00, 0x00, 0x00, 0x00, // pre_defined |
|
0x73, 0x6f, 0x75, 0x6e, // handler_type: 'soun' |
|
0x00, 0x00, 0x00, 0x00, // reserved |
|
0x00, 0x00, 0x00, 0x00, // reserved |
|
0x00, 0x00, 0x00, 0x00, // reserved |
|
0x53, 0x6f, 0x75, 0x6e, 0x64, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x00 // name: 'SoundHandler' |
|
]); |
|
HDLR_TYPES = { |
|
video: VIDEO_HDLR, |
|
audio: AUDIO_HDLR |
|
}; |
|
DREF = new Uint8Array([0x00, // version 0 |
|
0x00, 0x00, 0x00, // flags |
|
0x00, 0x00, 0x00, 0x01, // entry_count |
|
0x00, 0x00, 0x00, 0x0c, // entry_size |
|
0x75, 0x72, 0x6c, 0x20, // 'url' type |
|
0x00, // version 0 |
|
0x00, 0x00, 0x01 // entry_flags |
|
]); |
|
SMHD = new Uint8Array([0x00, // version |
|
0x00, 0x00, 0x00, // flags |
|
0x00, 0x00, // balance, 0 means centered |
|
0x00, 0x00 // reserved |
|
]); |
|
STCO = new Uint8Array([0x00, // version |
|
0x00, 0x00, 0x00, // flags |
|
0x00, 0x00, 0x00, 0x00 // entry_count |
|
]); |
|
STSC = STCO; |
|
STSZ = new Uint8Array([0x00, // version |
|
0x00, 0x00, 0x00, // flags |
|
0x00, 0x00, 0x00, 0x00, // sample_size |
|
0x00, 0x00, 0x00, 0x00 // sample_count |
|
]); |
|
STTS = STCO; |
|
VMHD = new Uint8Array([0x00, // version |
|
0x00, 0x00, 0x01, // flags |
|
0x00, 0x00, // graphicsmode |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // opcolor |
|
]); |
|
})(); |
|
|
|
box = function box(type) { |
|
var payload = [], |
|
size = 0, |
|
i, |
|
result, |
|
view; |
|
|
|
for (i = 1; i < arguments.length; i++) { |
|
payload.push(arguments[i]); |
|
} |
|
|
|
i = payload.length; // calculate the total size we need to allocate |
|
|
|
while (i--) { |
|
size += payload[i].byteLength; |
|
} |
|
|
|
result = new Uint8Array(size + 8); |
|
view = new DataView(result.buffer, result.byteOffset, result.byteLength); |
|
view.setUint32(0, result.byteLength); |
|
result.set(type, 4); // copy the payload into the result |
|
|
|
for (i = 0, size = 8; i < payload.length; i++) { |
|
result.set(payload[i], size); |
|
size += payload[i].byteLength; |
|
} |
|
|
|
return result; |
|
}; |
|
|
|
dinf = function dinf() { |
|
return box(types.dinf, box(types.dref, DREF)); |
|
}; |
|
|
|
esds = function esds(track) { |
|
return box(types.esds, new Uint8Array([0x00, // version |
|
0x00, 0x00, 0x00, // flags |
|
// ES_Descriptor |
|
0x03, // tag, ES_DescrTag |
|
0x19, // length |
|
0x00, 0x00, // ES_ID |
|
0x00, // streamDependenceFlag, URL_flag, reserved, streamPriority |
|
// DecoderConfigDescriptor |
|
0x04, // tag, DecoderConfigDescrTag |
|
0x11, // length |
|
0x40, // object type |
|
0x15, // streamType |
|
0x00, 0x06, 0x00, // bufferSizeDB |
|
0x00, 0x00, 0xda, 0xc0, // maxBitrate |
|
0x00, 0x00, 0xda, 0xc0, // avgBitrate |
|
// DecoderSpecificInfo |
|
0x05, // tag, DecoderSpecificInfoTag |
|
0x02, // length |
|
// ISO/IEC 14496-3, AudioSpecificConfig |
|
// for samplingFrequencyIndex see ISO/IEC 13818-7:2006, 8.1.3.2.2, Table 35 |
|
track.audioobjecttype << 3 | track.samplingfrequencyindex >>> 1, track.samplingfrequencyindex << 7 | track.channelcount << 3, 0x06, 0x01, 0x02 // GASpecificConfig |
|
])); |
|
}; |
|
|
|
ftyp = function ftyp() { |
|
return box(types.ftyp, MAJOR_BRAND, MINOR_VERSION, MAJOR_BRAND, AVC1_BRAND); |
|
}; |
|
|
|
hdlr = function hdlr(type) { |
|
return box(types.hdlr, HDLR_TYPES[type]); |
|
}; |
|
|
|
mdat = function mdat(data) { |
|
return box(types.mdat, data); |
|
}; |
|
|
|
mdhd = function mdhd(track) { |
|
var result = new Uint8Array([0x00, // version 0 |
|
0x00, 0x00, 0x00, // flags |
|
0x00, 0x00, 0x00, 0x02, // creation_time |
|
0x00, 0x00, 0x00, 0x03, // modification_time |
|
0x00, 0x01, 0x5f, 0x90, // timescale, 90,000 "ticks" per second |
|
track.duration >>> 24 & 0xFF, track.duration >>> 16 & 0xFF, track.duration >>> 8 & 0xFF, track.duration & 0xFF, // duration |
|
0x55, 0xc4, // 'und' language (undetermined) |
|
0x00, 0x00]); // Use the sample rate from the track metadata, when it is |
|
// defined. The sample rate can be parsed out of an ADTS header, for |
|
// instance. |
|
|
|
if (track.samplerate) { |
|
result[12] = track.samplerate >>> 24 & 0xFF; |
|
result[13] = track.samplerate >>> 16 & 0xFF; |
|
result[14] = track.samplerate >>> 8 & 0xFF; |
|
result[15] = track.samplerate & 0xFF; |
|
} |
|
|
|
return box(types.mdhd, result); |
|
}; |
|
|
|
mdia = function mdia(track) { |
|
return box(types.mdia, mdhd(track), hdlr(track.type), minf(track)); |
|
}; |
|
|
|
mfhd = function mfhd(sequenceNumber) { |
|
return box(types.mfhd, new Uint8Array([0x00, 0x00, 0x00, 0x00, // flags |
|
(sequenceNumber & 0xFF000000) >> 24, (sequenceNumber & 0xFF0000) >> 16, (sequenceNumber & 0xFF00) >> 8, sequenceNumber & 0xFF // sequence_number |
|
])); |
|
}; |
|
|
|
minf = function minf(track) { |
|
return box(types.minf, track.type === 'video' ? box(types.vmhd, VMHD) : box(types.smhd, SMHD), dinf(), stbl(track)); |
|
}; |
|
|
|
moof = function moof(sequenceNumber, tracks) { |
|
var trackFragments = [], |
|
i = tracks.length; // build traf boxes for each track fragment |
|
|
|
while (i--) { |
|
trackFragments[i] = traf(tracks[i]); |
|
} |
|
|
|
return box.apply(null, [types.moof, mfhd(sequenceNumber)].concat(trackFragments)); |
|
}; |
|
/** |
|
* Returns a movie box. |
|
* @param tracks {array} the tracks associated with this movie |
|
* @see ISO/IEC 14496-12:2012(E), section 8.2.1 |
|
*/ |
|
|
|
|
|
moov = function moov(tracks) { |
|
var i = tracks.length, |
|
boxes = []; |
|
|
|
while (i--) { |
|
boxes[i] = trak(tracks[i]); |
|
} |
|
|
|
return box.apply(null, [types.moov, mvhd(0xffffffff)].concat(boxes).concat(mvex(tracks))); |
|
}; |
|
|
|
mvex = function mvex(tracks) { |
|
var i = tracks.length, |
|
boxes = []; |
|
|
|
while (i--) { |
|
boxes[i] = trex(tracks[i]); |
|
} |
|
|
|
return box.apply(null, [types.mvex].concat(boxes)); |
|
}; |
|
|
|
mvhd = function mvhd(duration) { |
|
var bytes = new Uint8Array([0x00, // version 0 |
|
0x00, 0x00, 0x00, // flags |
|
0x00, 0x00, 0x00, 0x01, // creation_time |
|
0x00, 0x00, 0x00, 0x02, // modification_time |
|
0x00, 0x01, 0x5f, 0x90, // timescale, 90,000 "ticks" per second |
|
(duration & 0xFF000000) >> 24, (duration & 0xFF0000) >> 16, (duration & 0xFF00) >> 8, duration & 0xFF, // duration |
|
0x00, 0x01, 0x00, 0x00, // 1.0 rate |
|
0x01, 0x00, // 1.0 volume |
|
0x00, 0x00, // reserved |
|
0x00, 0x00, 0x00, 0x00, // reserved |
|
0x00, 0x00, 0x00, 0x00, // reserved |
|
0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, // transformation: unity matrix |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // pre_defined |
|
0xff, 0xff, 0xff, 0xff // next_track_ID |
|
]); |
|
return box(types.mvhd, bytes); |
|
}; |
|
|
|
sdtp = function sdtp(track) { |
|
var samples = track.samples || [], |
|
bytes = new Uint8Array(4 + samples.length), |
|
flags, |
|
i; // leave the full box header (4 bytes) all zero |
|
// write the sample table |
|
|
|
for (i = 0; i < samples.length; i++) { |
|
flags = samples[i].flags; |
|
bytes[i + 4] = flags.dependsOn << 4 | flags.isDependedOn << 2 | flags.hasRedundancy; |
|
} |
|
|
|
return box(types.sdtp, bytes); |
|
}; |
|
|
|
stbl = function stbl(track) { |
|
return box(types.stbl, stsd(track), box(types.stts, STTS), box(types.stsc, STSC), box(types.stsz, STSZ), box(types.stco, STCO)); |
|
}; |
|
|
|
(function () { |
|
var videoSample, audioSample; |
|
|
|
stsd = function stsd(track) { |
|
return box(types.stsd, new Uint8Array([0x00, // version 0 |
|
0x00, 0x00, 0x00, // flags |
|
0x00, 0x00, 0x00, 0x01]), track.type === 'video' ? videoSample(track) : audioSample(track)); |
|
}; |
|
|
|
videoSample = function videoSample(track) { |
|
var sps = track.sps || [], |
|
pps = track.pps || [], |
|
sequenceParameterSets = [], |
|
pictureParameterSets = [], |
|
i; // assemble the SPSs |
|
|
|
for (i = 0; i < sps.length; i++) { |
|
sequenceParameterSets.push((sps[i].byteLength & 0xFF00) >>> 8); |
|
sequenceParameterSets.push(sps[i].byteLength & 0xFF); // sequenceParameterSetLength |
|
|
|
sequenceParameterSets = sequenceParameterSets.concat(Array.prototype.slice.call(sps[i])); // SPS |
|
} // assemble the PPSs |
|
|
|
|
|
for (i = 0; i < pps.length; i++) { |
|
pictureParameterSets.push((pps[i].byteLength & 0xFF00) >>> 8); |
|
pictureParameterSets.push(pps[i].byteLength & 0xFF); |
|
pictureParameterSets = pictureParameterSets.concat(Array.prototype.slice.call(pps[i])); |
|
} |
|
|
|
return box(types.avc1, new Uint8Array([0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // reserved |
|
0x00, 0x01, // data_reference_index |
|
0x00, 0x00, // pre_defined |
|
0x00, 0x00, // reserved |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // pre_defined |
|
(track.width & 0xff00) >> 8, track.width & 0xff, // width |
|
(track.height & 0xff00) >> 8, track.height & 0xff, // height |
|
0x00, 0x48, 0x00, 0x00, // horizresolution |
|
0x00, 0x48, 0x00, 0x00, // vertresolution |
|
0x00, 0x00, 0x00, 0x00, // reserved |
|
0x00, 0x01, // frame_count |
|
0x13, 0x76, 0x69, 0x64, 0x65, 0x6f, 0x6a, 0x73, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x69, 0x62, 0x2d, 0x68, 0x6c, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // compressorname |
|
0x00, 0x18, // depth = 24 |
|
0x11, 0x11 // pre_defined = -1 |
|
]), box(types.avcC, new Uint8Array([0x01, // configurationVersion |
|
track.profileIdc, // AVCProfileIndication |
|
track.profileCompatibility, // profile_compatibility |
|
track.levelIdc, // AVCLevelIndication |
|
0xff // lengthSizeMinusOne, hard-coded to 4 bytes |
|
].concat([sps.length // numOfSequenceParameterSets |
|
]).concat(sequenceParameterSets).concat([pps.length // numOfPictureParameterSets |
|
]).concat(pictureParameterSets))), // "PPS" |
|
box(types.btrt, new Uint8Array([0x00, 0x1c, 0x9c, 0x80, // bufferSizeDB |
|
0x00, 0x2d, 0xc6, 0xc0, // maxBitrate |
|
0x00, 0x2d, 0xc6, 0xc0])) // avgBitrate |
|
); |
|
}; |
|
|
|
audioSample = function audioSample(track) { |
|
return box(types.mp4a, new Uint8Array([// SampleEntry, ISO/IEC 14496-12 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // reserved |
|
0x00, 0x01, // data_reference_index |
|
// AudioSampleEntry, ISO/IEC 14496-12 |
|
0x00, 0x00, 0x00, 0x00, // reserved |
|
0x00, 0x00, 0x00, 0x00, // reserved |
|
(track.channelcount & 0xff00) >> 8, track.channelcount & 0xff, // channelcount |
|
(track.samplesize & 0xff00) >> 8, track.samplesize & 0xff, // samplesize |
|
0x00, 0x00, // pre_defined |
|
0x00, 0x00, // reserved |
|
(track.samplerate & 0xff00) >> 8, track.samplerate & 0xff, 0x00, 0x00 // samplerate, 16.16 |
|
// MP4AudioSampleEntry, ISO/IEC 14496-14 |
|
]), esds(track)); |
|
}; |
|
})(); |
|
|
|
tkhd = function tkhd(track) { |
|
var result = new Uint8Array([0x00, // version 0 |
|
0x00, 0x00, 0x07, // flags |
|
0x00, 0x00, 0x00, 0x00, // creation_time |
|
0x00, 0x00, 0x00, 0x00, // modification_time |
|
(track.id & 0xFF000000) >> 24, (track.id & 0xFF0000) >> 16, (track.id & 0xFF00) >> 8, track.id & 0xFF, // track_ID |
|
0x00, 0x00, 0x00, 0x00, // reserved |
|
(track.duration & 0xFF000000) >> 24, (track.duration & 0xFF0000) >> 16, (track.duration & 0xFF00) >> 8, track.duration & 0xFF, // duration |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // reserved |
|
0x00, 0x00, // layer |
|
0x00, 0x00, // alternate_group |
|
0x01, 0x00, // non-audio track volume |
|
0x00, 0x00, // reserved |
|
0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, // transformation: unity matrix |
|
(track.width & 0xFF00) >> 8, track.width & 0xFF, 0x00, 0x00, // width |
|
(track.height & 0xFF00) >> 8, track.height & 0xFF, 0x00, 0x00 // height |
|
]); |
|
return box(types.tkhd, result); |
|
}; |
|
/** |
|
* Generate a track fragment (traf) box. A traf box collects metadata |
|
* about tracks in a movie fragment (moof) box. |
|
*/ |
|
|
|
|
|
traf = function traf(track) { |
|
var trackFragmentHeader, trackFragmentDecodeTime, trackFragmentRun, sampleDependencyTable, dataOffset, upperWordBaseMediaDecodeTime, lowerWordBaseMediaDecodeTime; |
|
trackFragmentHeader = box(types.tfhd, new Uint8Array([0x00, // version 0 |
|
0x00, 0x00, 0x3a, // flags |
|
(track.id & 0xFF000000) >> 24, (track.id & 0xFF0000) >> 16, (track.id & 0xFF00) >> 8, track.id & 0xFF, // track_ID |
|
0x00, 0x00, 0x00, 0x01, // sample_description_index |
|
0x00, 0x00, 0x00, 0x00, // default_sample_duration |
|
0x00, 0x00, 0x00, 0x00, // default_sample_size |
|
0x00, 0x00, 0x00, 0x00 // default_sample_flags |
|
])); |
|
upperWordBaseMediaDecodeTime = Math.floor(track.baseMediaDecodeTime / (UINT32_MAX + 1)); |
|
lowerWordBaseMediaDecodeTime = Math.floor(track.baseMediaDecodeTime % (UINT32_MAX + 1)); |
|
trackFragmentDecodeTime = box(types.tfdt, new Uint8Array([0x01, // version 1 |
|
0x00, 0x00, 0x00, // flags |
|
// baseMediaDecodeTime |
|
upperWordBaseMediaDecodeTime >>> 24 & 0xFF, upperWordBaseMediaDecodeTime >>> 16 & 0xFF, upperWordBaseMediaDecodeTime >>> 8 & 0xFF, upperWordBaseMediaDecodeTime & 0xFF, lowerWordBaseMediaDecodeTime >>> 24 & 0xFF, lowerWordBaseMediaDecodeTime >>> 16 & 0xFF, lowerWordBaseMediaDecodeTime >>> 8 & 0xFF, lowerWordBaseMediaDecodeTime & 0xFF])); // the data offset specifies the number of bytes from the start of |
|
// the containing moof to the first payload byte of the associated |
|
// mdat |
|
|
|
dataOffset = 32 + // tfhd |
|
20 + // tfdt |
|
8 + // traf header |
|
16 + // mfhd |
|
8 + // moof header |
|
8; // mdat header |
|
// audio tracks require less metadata |
|
|
|
if (track.type === 'audio') { |
|
trackFragmentRun = trun(track, dataOffset); |
|
return box(types.traf, trackFragmentHeader, trackFragmentDecodeTime, trackFragmentRun); |
|
} // video tracks should contain an independent and disposable samples |
|
// box (sdtp) |
|
// generate one and adjust offsets to match |
|
|
|
|
|
sampleDependencyTable = sdtp(track); |
|
trackFragmentRun = trun(track, sampleDependencyTable.length + dataOffset); |
|
return box(types.traf, trackFragmentHeader, trackFragmentDecodeTime, trackFragmentRun, sampleDependencyTable); |
|
}; |
|
/** |
|
* Generate a track box. |
|
* @param track {object} a track definition |
|
* @return {Uint8Array} the track box |
|
*/ |
|
|
|
|
|
trak = function trak(track) { |
|
track.duration = track.duration || 0xffffffff; |
|
return box(types.trak, tkhd(track), mdia(track)); |
|
}; |
|
|
|
trex = function trex(track) { |
|
var result = new Uint8Array([0x00, // version 0 |
|
0x00, 0x00, 0x00, // flags |
|
(track.id & 0xFF000000) >> 24, (track.id & 0xFF0000) >> 16, (track.id & 0xFF00) >> 8, track.id & 0xFF, // track_ID |
|
0x00, 0x00, 0x00, 0x01, // default_sample_description_index |
|
0x00, 0x00, 0x00, 0x00, // default_sample_duration |
|
0x00, 0x00, 0x00, 0x00, // default_sample_size |
|
0x00, 0x01, 0x00, 0x01 // default_sample_flags |
|
]); // the last two bytes of default_sample_flags is the sample |
|
// degradation priority, a hint about the importance of this sample |
|
// relative to others. Lower the degradation priority for all sample |
|
// types other than video. |
|
|
|
if (track.type !== 'video') { |
|
result[result.length - 1] = 0x00; |
|
} |
|
|
|
return box(types.trex, result); |
|
}; |
|
|
|
(function () { |
|
var audioTrun, videoTrun, trunHeader; // This method assumes all samples are uniform. That is, if a |
|
// duration is present for the first sample, it will be present for |
|
// all subsequent samples. |
|
// see ISO/IEC 14496-12:2012, Section 8.8.8.1 |
|
|
|
trunHeader = function trunHeader(samples, offset) { |
|
var durationPresent = 0, |
|
sizePresent = 0, |
|
flagsPresent = 0, |
|
compositionTimeOffset = 0; // trun flag constants |
|
|
|
if (samples.length) { |
|
if (samples[0].duration !== undefined) { |
|
durationPresent = 0x1; |
|
} |
|
|
|
if (samples[0].size !== undefined) { |
|
sizePresent = 0x2; |
|
} |
|
|
|
if (samples[0].flags !== undefined) { |
|
flagsPresent = 0x4; |
|
} |
|
|
|
if (samples[0].compositionTimeOffset !== undefined) { |
|
compositionTimeOffset = 0x8; |
|
} |
|
} |
|
|
|
return [0x00, // version 0 |
|
0x00, durationPresent | sizePresent | flagsPresent | compositionTimeOffset, 0x01, // flags |
|
(samples.length & 0xFF000000) >>> 24, (samples.length & 0xFF0000) >>> 16, (samples.length & 0xFF00) >>> 8, samples.length & 0xFF, // sample_count |
|
(offset & 0xFF000000) >>> 24, (offset & 0xFF0000) >>> 16, (offset & 0xFF00) >>> 8, offset & 0xFF // data_offset |
|
]; |
|
}; |
|
|
|
videoTrun = function videoTrun(track, offset) { |
|
var bytes, samples, sample, i; |
|
samples = track.samples || []; |
|
offset += 8 + 12 + 16 * samples.length; |
|
bytes = trunHeader(samples, offset); |
|
|
|
for (i = 0; i < samples.length; i++) { |
|
sample = samples[i]; |
|
bytes = bytes.concat([(sample.duration & 0xFF000000) >>> 24, (sample.duration & 0xFF0000) >>> 16, (sample.duration & 0xFF00) >>> 8, sample.duration & 0xFF, // sample_duration |
|
(sample.size & 0xFF000000) >>> 24, (sample.size & 0xFF0000) >>> 16, (sample.size & 0xFF00) >>> 8, sample.size & 0xFF, // sample_size |
|
sample.flags.isLeading << 2 | sample.flags.dependsOn, sample.flags.isDependedOn << 6 | sample.flags.hasRedundancy << 4 | sample.flags.paddingValue << 1 | sample.flags.isNonSyncSample, sample.flags.degradationPriority & 0xF0 << 8, sample.flags.degradationPriority & 0x0F, // sample_flags |
|
(sample.compositionTimeOffset & 0xFF000000) >>> 24, (sample.compositionTimeOffset & 0xFF0000) >>> 16, (sample.compositionTimeOffset & 0xFF00) >>> 8, sample.compositionTimeOffset & 0xFF // sample_composition_time_offset |
|
]); |
|
} |
|
|
|
return box(types.trun, new Uint8Array(bytes)); |
|
}; |
|
|
|
audioTrun = function audioTrun(track, offset) { |
|
var bytes, samples, sample, i; |
|
samples = track.samples || []; |
|
offset += 8 + 12 + 8 * samples.length; |
|
bytes = trunHeader(samples, offset); |
|
|
|
for (i = 0; i < samples.length; i++) { |
|
sample = samples[i]; |
|
bytes = bytes.concat([(sample.duration & 0xFF000000) >>> 24, (sample.duration & 0xFF0000) >>> 16, (sample.duration & 0xFF00) >>> 8, sample.duration & 0xFF, // sample_duration |
|
(sample.size & 0xFF000000) >>> 24, (sample.size & 0xFF0000) >>> 16, (sample.size & 0xFF00) >>> 8, sample.size & 0xFF]); // sample_size |
|
} |
|
|
|
return box(types.trun, new Uint8Array(bytes)); |
|
}; |
|
|
|
trun = function trun(track, offset) { |
|
if (track.type === 'audio') { |
|
return audioTrun(track, offset); |
|
} |
|
|
|
return videoTrun(track, offset); |
|
}; |
|
})(); |
|
|
|
var mp4Generator = { |
|
ftyp: ftyp, |
|
mdat: mdat, |
|
moof: moof, |
|
moov: moov, |
|
initSegment: function initSegment(tracks) { |
|
var fileType = ftyp(), |
|
movie = moov(tracks), |
|
result; |
|
result = new Uint8Array(fileType.byteLength + movie.byteLength); |
|
result.set(fileType); |
|
result.set(movie, fileType.byteLength); |
|
return result; |
|
} |
|
}; |
|
|
|
/** |
|
* mux.js |
|
* |
|
* Copyright (c) 2014 Brightcove |
|
* All rights reserved. |
|
* |
|
* A lightweight readable stream implemention that handles event dispatching. |
|
* Objects that inherit from streams should call init in their constructors. |
|
*/ |
|
|
|
var Stream$1 = function Stream() { |
|
this.init = function () { |
|
var listeners = {}; |
|
/** |
|
* Add a listener for a specified event type. |
|
* @param type {string} the event name |
|
* @param listener {function} the callback to be invoked when an event of |
|
* the specified type occurs |
|
*/ |
|
|
|
this.on = function (type, listener) { |
|
if (!listeners[type]) { |
|
listeners[type] = []; |
|
} |
|
|
|
listeners[type] = listeners[type].concat(listener); |
|
}; |
|
/** |
|
* Remove a listener for a specified event type. |
|
* @param type {string} the event name |
|
* @param listener {function} a function previously registered for this |
|
* type of event through `on` |
|
*/ |
|
|
|
|
|
this.off = function (type, listener) { |
|
var index; |
|
|
|
if (!listeners[type]) { |
|
return false; |
|
} |
|
|
|
index = listeners[type].indexOf(listener); |
|
listeners[type] = listeners[type].slice(); |
|
listeners[type].splice(index, 1); |
|
return index > -1; |
|
}; |
|
/** |
|
* Trigger an event of the specified type on this stream. Any additional |
|
* arguments to this function are passed as parameters to event listeners. |
|
* @param type {string} the event name |
|
*/ |
|
|
|
|
|
this.trigger = function (type) { |
|
var callbacks, i, length, args; |
|
callbacks = listeners[type]; |
|
|
|
if (!callbacks) { |
|
return; |
|
} // Slicing the arguments on every invocation of this method |
|
// can add a significant amount of overhead. Avoid the |
|
// intermediate object creation for the common case of a |
|
// single callback argument |
|
|
|
|
|
if (arguments.length === 2) { |
|
length = callbacks.length; |
|
|
|
for (i = 0; i < length; ++i) { |
|
callbacks[i].call(this, arguments[1]); |
|
} |
|
} else { |
|
args = []; |
|
i = arguments.length; |
|
|
|
for (i = 1; i < arguments.length; ++i) { |
|
args.push(arguments[i]); |
|
} |
|
|
|
length = callbacks.length; |
|
|
|
for (i = 0; i < length; ++i) { |
|
callbacks[i].apply(this, args); |
|
} |
|
} |
|
}; |
|
/** |
|
* Destroys the stream and cleans up. |
|
*/ |
|
|
|
|
|
this.dispose = function () { |
|
listeners = {}; |
|
}; |
|
}; |
|
}; |
|
/** |
|
* Forwards all `data` events on this stream to the destination stream. The |
|
* destination stream should provide a method `push` to receive the data |
|
* events as they arrive. |
|
* @param destination {stream} the stream that will receive all `data` events |
|
* @param autoFlush {boolean} if false, we will not call `flush` on the destination |
|
* when the current stream emits a 'done' event |
|
* @see http://nodejs.org/api/stream.html#stream_readable_pipe_destination_options |
|
*/ |
|
|
|
|
|
Stream$1.prototype.pipe = function (destination) { |
|
this.on('data', function (data) { |
|
destination.push(data); |
|
}); |
|
this.on('done', function (flushSource) { |
|
destination.flush(flushSource); |
|
}); |
|
return destination; |
|
}; // Default stream functions that are expected to be overridden to perform |
|
// actual work. These are provided by the prototype as a sort of no-op |
|
// implementation so that we don't have to check for their existence in the |
|
// `pipe` function above. |
|
|
|
|
|
Stream$1.prototype.push = function (data) { |
|
this.trigger('data', data); |
|
}; |
|
|
|
Stream$1.prototype.flush = function (flushSource) { |
|
this.trigger('done', flushSource); |
|
}; |
|
|
|
var stream = Stream$1; |
|
|
|
// Convert an array of nal units into an array of frames with each frame being |
|
// composed of the nal units that make up that frame |
|
// Also keep track of cummulative data about the frame from the nal units such |
|
// as the frame duration, starting pts, etc. |
|
var groupNalsIntoFrames = function groupNalsIntoFrames(nalUnits) { |
|
var i, |
|
currentNal, |
|
currentFrame = [], |
|
frames = []; |
|
currentFrame.byteLength = 0; |
|
|
|
for (i = 0; i < nalUnits.length; i++) { |
|
currentNal = nalUnits[i]; // Split on 'aud'-type nal units |
|
|
|
if (currentNal.nalUnitType === 'access_unit_delimiter_rbsp') { |
|
// Since the very first nal unit is expected to be an AUD |
|
// only push to the frames array when currentFrame is not empty |
|
if (currentFrame.length) { |
|
currentFrame.duration = currentNal.dts - currentFrame.dts; |
|
frames.push(currentFrame); |
|
} |
|
|
|
currentFrame = [currentNal]; |
|
currentFrame.byteLength = currentNal.data.byteLength; |
|
currentFrame.pts = currentNal.pts; |
|
currentFrame.dts = currentNal.dts; |
|
} else { |
|
// Specifically flag key frames for ease of use later |
|
if (currentNal.nalUnitType === 'slice_layer_without_partitioning_rbsp_idr') { |
|
currentFrame.keyFrame = true; |
|
} |
|
|
|
currentFrame.duration = currentNal.dts - currentFrame.dts; |
|
currentFrame.byteLength += currentNal.data.byteLength; |
|
currentFrame.push(currentNal); |
|
} |
|
} // For the last frame, use the duration of the previous frame if we |
|
// have nothing better to go on |
|
|
|
|
|
if (frames.length && (!currentFrame.duration || currentFrame.duration <= 0)) { |
|
currentFrame.duration = frames[frames.length - 1].duration; |
|
} // Push the final frame |
|
|
|
|
|
frames.push(currentFrame); |
|
return frames; |
|
}; // Convert an array of frames into an array of Gop with each Gop being composed |
|
// of the frames that make up that Gop |
|
// Also keep track of cummulative data about the Gop from the frames such as the |
|
// Gop duration, starting pts, etc. |
|
|
|
|
|
var groupFramesIntoGops = function groupFramesIntoGops(frames) { |
|
var i, |
|
currentFrame, |
|
currentGop = [], |
|
gops = []; // We must pre-set some of the values on the Gop since we |
|
// keep running totals of these values |
|
|
|
currentGop.byteLength = 0; |
|
currentGop.nalCount = 0; |
|
currentGop.duration = 0; |
|
currentGop.pts = frames[0].pts; |
|
currentGop.dts = frames[0].dts; // store some metadata about all the Gops |
|
|
|
gops.byteLength = 0; |
|
gops.nalCount = 0; |
|
gops.duration = 0; |
|
gops.pts = frames[0].pts; |
|
gops.dts = frames[0].dts; |
|
|
|
for (i = 0; i < frames.length; i++) { |
|
currentFrame = frames[i]; |
|
|
|
if (currentFrame.keyFrame) { |
|
// Since the very first frame is expected to be an keyframe |
|
// only push to the gops array when currentGop is not empty |
|
if (currentGop.length) { |
|
gops.push(currentGop); |
|
gops.byteLength += currentGop.byteLength; |
|
gops.nalCount += currentGop.nalCount; |
|
gops.duration += currentGop.duration; |
|
} |
|
|
|
currentGop = [currentFrame]; |
|
currentGop.nalCount = currentFrame.length; |
|
currentGop.byteLength = currentFrame.byteLength; |
|
currentGop.pts = currentFrame.pts; |
|
currentGop.dts = currentFrame.dts; |
|
currentGop.duration = currentFrame.duration; |
|
} else { |
|
currentGop.duration += currentFrame.duration; |
|
currentGop.nalCount += currentFrame.length; |
|
currentGop.byteLength += currentFrame.byteLength; |
|
currentGop.push(currentFrame); |
|
} |
|
} |
|
|
|
if (gops.length && currentGop.duration <= 0) { |
|
currentGop.duration = gops[gops.length - 1].duration; |
|
} |
|
|
|
gops.byteLength += currentGop.byteLength; |
|
gops.nalCount += currentGop.nalCount; |
|
gops.duration += currentGop.duration; // push the final Gop |
|
|
|
gops.push(currentGop); |
|
return gops; |
|
}; |
|
/* |
|
* Search for the first keyframe in the GOPs and throw away all frames |
|
* until that keyframe. Then extend the duration of the pulled keyframe |
|
* and pull the PTS and DTS of the keyframe so that it covers the time |
|
* range of the frames that were disposed. |
|
* |
|
* @param {Array} gops video GOPs |
|
* @returns {Array} modified video GOPs |
|
*/ |
|
|
|
|
|
var extendFirstKeyFrame = function extendFirstKeyFrame(gops) { |
|
var currentGop; |
|
|
|
if (!gops[0][0].keyFrame && gops.length > 1) { |
|
// Remove the first GOP |
|
currentGop = gops.shift(); |
|
gops.byteLength -= currentGop.byteLength; |
|
gops.nalCount -= currentGop.nalCount; // Extend the first frame of what is now the |
|
// first gop to cover the time period of the |
|
// frames we just removed |
|
|
|
gops[0][0].dts = currentGop.dts; |
|
gops[0][0].pts = currentGop.pts; |
|
gops[0][0].duration += currentGop.duration; |
|
} |
|
|
|
return gops; |
|
}; |
|
/** |
|
* Default sample object |
|
* see ISO/IEC 14496-12:2012, section 8.6.4.3 |
|
*/ |
|
|
|
|
|
var createDefaultSample = function createDefaultSample() { |
|
return { |
|
size: 0, |
|
flags: { |
|
isLeading: 0, |
|
dependsOn: 1, |
|
isDependedOn: 0, |
|
hasRedundancy: 0, |
|
degradationPriority: 0, |
|
isNonSyncSample: 1 |
|
} |
|
}; |
|
}; |
|
/* |
|
* Collates information from a video frame into an object for eventual |
|
* entry into an MP4 sample table. |
|
* |
|
* @param {Object} frame the video frame |
|
* @param {Number} dataOffset the byte offset to position the sample |
|
* @return {Object} object containing sample table info for a frame |
|
*/ |
|
|
|
|
|
var sampleForFrame = function sampleForFrame(frame, dataOffset) { |
|
var sample = createDefaultSample(); |
|
sample.dataOffset = dataOffset; |
|
sample.compositionTimeOffset = frame.pts - frame.dts; |
|
sample.duration = frame.duration; |
|
sample.size = 4 * frame.length; // Space for nal unit size |
|
|
|
sample.size += frame.byteLength; |
|
|
|
if (frame.keyFrame) { |
|
sample.flags.dependsOn = 2; |
|
sample.flags.isNonSyncSample = 0; |
|
} |
|
|
|
return sample; |
|
}; // generate the track's sample table from an array of gops |
|
|
|
|
|
var generateSampleTable = function generateSampleTable(gops, baseDataOffset) { |
|
var h, |
|
i, |
|
sample, |
|
currentGop, |
|
currentFrame, |
|
dataOffset = baseDataOffset || 0, |
|
samples = []; |
|
|
|
for (h = 0; h < gops.length; h++) { |
|
currentGop = gops[h]; |
|
|
|
for (i = 0; i < currentGop.length; i++) { |
|
currentFrame = currentGop[i]; |
|
sample = sampleForFrame(currentFrame, dataOffset); |
|
dataOffset += sample.size; |
|
samples.push(sample); |
|
} |
|
} |
|
|
|
return samples; |
|
}; // generate the track's raw mdat data from an array of gops |
|
|
|
|
|
var concatenateNalData = function concatenateNalData(gops) { |
|
var h, |
|
i, |
|
j, |
|
currentGop, |
|
currentFrame, |
|
currentNal, |
|
dataOffset = 0, |
|
nalsByteLength = gops.byteLength, |
|
numberOfNals = gops.nalCount, |
|
totalByteLength = nalsByteLength + 4 * numberOfNals, |
|
data = new Uint8Array(totalByteLength), |
|
view = new DataView(data.buffer); // For each Gop.. |
|
|
|
for (h = 0; h < gops.length; h++) { |
|
currentGop = gops[h]; // For each Frame.. |
|
|
|
for (i = 0; i < currentGop.length; i++) { |
|
currentFrame = currentGop[i]; // For each NAL.. |
|
|
|
for (j = 0; j < currentFrame.length; j++) { |
|
currentNal = currentFrame[j]; |
|
view.setUint32(dataOffset, currentNal.data.byteLength); |
|
dataOffset += 4; |
|
data.set(currentNal.data, dataOffset); |
|
dataOffset += currentNal.data.byteLength; |
|
} |
|
} |
|
} |
|
|
|
return data; |
|
}; |
|
|
|
var frameUtils = { |
|
groupNalsIntoFrames: groupNalsIntoFrames, |
|
groupFramesIntoGops: groupFramesIntoGops, |
|
extendFirstKeyFrame: extendFirstKeyFrame, |
|
generateSampleTable: generateSampleTable, |
|
concatenateNalData: concatenateNalData |
|
}; |
|
|
|
var highPrefix = [33, 16, 5, 32, 164, 27]; |
|
var lowPrefix = [33, 65, 108, 84, 1, 2, 4, 8, 168, 2, 4, 8, 17, 191, 252]; |
|
|
|
var zeroFill = function zeroFill(count) { |
|
var a = []; |
|
|
|
while (count--) { |
|
a.push(0); |
|
} |
|
|
|
return a; |
|
}; |
|
|
|
var makeTable = function makeTable(metaTable) { |
|
return Object.keys(metaTable).reduce(function (obj, key) { |
|
obj[key] = new Uint8Array(metaTable[key].reduce(function (arr, part) { |
|
return arr.concat(part); |
|
}, [])); |
|
return obj; |
|
}, {}); |
|
}; // Frames-of-silence to use for filling in missing AAC frames |
|
|
|
|
|
var coneOfSilence = { |
|
96000: [highPrefix, [227, 64], zeroFill(154), [56]], |
|
88200: [highPrefix, [231], zeroFill(170), [56]], |
|
64000: [highPrefix, [248, 192], zeroFill(240), [56]], |
|
48000: [highPrefix, [255, 192], zeroFill(268), [55, 148, 128], zeroFill(54), [112]], |
|
44100: [highPrefix, [255, 192], zeroFill(268), [55, 163, 128], zeroFill(84), [112]], |
|
32000: [highPrefix, [255, 192], zeroFill(268), [55, 234], zeroFill(226), [112]], |
|
24000: [highPrefix, [255, 192], zeroFill(268), [55, 255, 128], zeroFill(268), [111, 112], zeroFill(126), [224]], |
|
16000: [highPrefix, [255, 192], zeroFill(268), [55, 255, 128], zeroFill(268), [111, 255], zeroFill(269), [223, 108], zeroFill(195), [1, 192]], |
|
12000: [lowPrefix, zeroFill(268), [3, 127, 248], zeroFill(268), [6, 255, 240], zeroFill(268), [13, 255, 224], zeroFill(268), [27, 253, 128], zeroFill(259), [56]], |
|
11025: [lowPrefix, zeroFill(268), [3, 127, 248], zeroFill(268), [6, 255, 240], zeroFill(268), [13, 255, 224], zeroFill(268), [27, 255, 192], zeroFill(268), [55, 175, 128], zeroFill(108), [112]], |
|
8000: [lowPrefix, zeroFill(268), [3, 121, 16], zeroFill(47), [7]] |
|
}; |
|
var silence = makeTable(coneOfSilence); |
|
|
|
var ONE_SECOND_IN_TS = 90000, |
|
// 90kHz clock |
|
secondsToVideoTs, |
|
secondsToAudioTs, |
|
videoTsToSeconds, |
|
audioTsToSeconds, |
|
audioTsToVideoTs, |
|
videoTsToAudioTs; |
|
|
|
secondsToVideoTs = function secondsToVideoTs(seconds) { |
|
return seconds * ONE_SECOND_IN_TS; |
|
}; |
|
|
|
secondsToAudioTs = function secondsToAudioTs(seconds, sampleRate) { |
|
return seconds * sampleRate; |
|
}; |
|
|
|
videoTsToSeconds = function videoTsToSeconds(timestamp) { |
|
return timestamp / ONE_SECOND_IN_TS; |
|
}; |
|
|
|
audioTsToSeconds = function audioTsToSeconds(timestamp, sampleRate) { |
|
return timestamp / sampleRate; |
|
}; |
|
|
|
audioTsToVideoTs = function audioTsToVideoTs(timestamp, sampleRate) { |
|
return secondsToVideoTs(audioTsToSeconds(timestamp, sampleRate)); |
|
}; |
|
|
|
videoTsToAudioTs = function videoTsToAudioTs(timestamp, sampleRate) { |
|
return secondsToAudioTs(videoTsToSeconds(timestamp), sampleRate); |
|
}; |
|
|
|
var clock = { |
|
secondsToVideoTs: secondsToVideoTs, |
|
secondsToAudioTs: secondsToAudioTs, |
|
videoTsToSeconds: videoTsToSeconds, |
|
audioTsToSeconds: audioTsToSeconds, |
|
audioTsToVideoTs: audioTsToVideoTs, |
|
videoTsToAudioTs: videoTsToAudioTs |
|
}; |
|
|
|
var ONE_SECOND_IN_TS$1 = 90000; // 90kHz clock |
|
|
|
/** |
|
* Sum the `byteLength` properties of the data in each AAC frame |
|
*/ |
|
|
|
var sumFrameByteLengths = function sumFrameByteLengths(array) { |
|
var i, |
|
currentObj, |
|
sum = 0; // sum the byteLength's all each nal unit in the frame |
|
|
|
for (i = 0; i < array.length; i++) { |
|
currentObj = array[i]; |
|
sum += currentObj.data.byteLength; |
|
} |
|
|
|
return sum; |
|
}; // Possibly pad (prefix) the audio track with silence if appending this track |
|
// would lead to the introduction of a gap in the audio buffer |
|
|
|
|
|
var prefixWithSilence = function prefixWithSilence(track, frames, audioAppendStartTs, videoBaseMediaDecodeTime) { |
|
var baseMediaDecodeTimeTs, |
|
frameDuration = 0, |
|
audioGapDuration = 0, |
|
audioFillFrameCount = 0, |
|
audioFillDuration = 0, |
|
silentFrame, |
|
i; |
|
|
|
if (!frames.length) { |
|
return; |
|
} |
|
|
|
baseMediaDecodeTimeTs = clock.audioTsToVideoTs(track.baseMediaDecodeTime, track.samplerate); // determine frame clock duration based on sample rate, round up to avoid overfills |
|
|
|
frameDuration = Math.ceil(ONE_SECOND_IN_TS$1 / (track.samplerate / 1024)); |
|
|
|
if (audioAppendStartTs && videoBaseMediaDecodeTime) { |
|
// insert the shortest possible amount (audio gap or audio to video gap) |
|
audioGapDuration = baseMediaDecodeTimeTs - Math.max(audioAppendStartTs, videoBaseMediaDecodeTime); // number of full frames in the audio gap |
|
|
|
audioFillFrameCount = Math.floor(audioGapDuration / frameDuration); |
|
audioFillDuration = audioFillFrameCount * frameDuration; |
|
} // don't attempt to fill gaps smaller than a single frame or larger |
|
// than a half second |
|
|
|
|
|
if (audioFillFrameCount < 1 || audioFillDuration > ONE_SECOND_IN_TS$1 / 2) { |
|
return; |
|
} |
|
|
|
silentFrame = silence[track.samplerate]; |
|
|
|
if (!silentFrame) { |
|
// we don't have a silent frame pregenerated for the sample rate, so use a frame |
|
// from the content instead |
|
silentFrame = frames[0].data; |
|
} |
|
|
|
for (i = 0; i < audioFillFrameCount; i++) { |
|
frames.splice(i, 0, { |
|
data: silentFrame |
|
}); |
|
} |
|
|
|
track.baseMediaDecodeTime -= Math.floor(clock.videoTsToAudioTs(audioFillDuration, track.samplerate)); |
|
}; // If the audio segment extends before the earliest allowed dts |
|
// value, remove AAC frames until starts at or after the earliest |
|
// allowed DTS so that we don't end up with a negative baseMedia- |
|
// DecodeTime for the audio track |
|
|
|
|
|
var trimAdtsFramesByEarliestDts = function trimAdtsFramesByEarliestDts(adtsFrames, track, earliestAllowedDts) { |
|
if (track.minSegmentDts >= earliestAllowedDts) { |
|
return adtsFrames; |
|
} // We will need to recalculate the earliest segment Dts |
|
|
|
|
|
track.minSegmentDts = Infinity; |
|
return adtsFrames.filter(function (currentFrame) { |
|
// If this is an allowed frame, keep it and record it's Dts |
|
if (currentFrame.dts >= earliestAllowedDts) { |
|
track.minSegmentDts = Math.min(track.minSegmentDts, currentFrame.dts); |
|
track.minSegmentPts = track.minSegmentDts; |
|
return true; |
|
} // Otherwise, discard it |
|
|
|
|
|
return false; |
|
}); |
|
}; // generate the track's raw mdat data from an array of frames |
|
|
|
|
|
var generateSampleTable$1 = function generateSampleTable(frames) { |
|
var i, |
|
currentFrame, |
|
samples = []; |
|
|
|
for (i = 0; i < frames.length; i++) { |
|
currentFrame = frames[i]; |
|
samples.push({ |
|
size: currentFrame.data.byteLength, |
|
duration: 1024 // For AAC audio, all samples contain 1024 samples |
|
|
|
}); |
|
} |
|
|
|
return samples; |
|
}; // generate the track's sample table from an array of frames |
|
|
|
|
|
var concatenateFrameData = function concatenateFrameData(frames) { |
|
var i, |
|
currentFrame, |
|
dataOffset = 0, |
|
data = new Uint8Array(sumFrameByteLengths(frames)); |
|
|
|
for (i = 0; i < frames.length; i++) { |
|
currentFrame = frames[i]; |
|
data.set(currentFrame.data, dataOffset); |
|
dataOffset += currentFrame.data.byteLength; |
|
} |
|
|
|
return data; |
|
}; |
|
|
|
var audioFrameUtils = { |
|
prefixWithSilence: prefixWithSilence, |
|
trimAdtsFramesByEarliestDts: trimAdtsFramesByEarliestDts, |
|
generateSampleTable: generateSampleTable$1, |
|
concatenateFrameData: concatenateFrameData |
|
}; |
|
|
|
var ONE_SECOND_IN_TS$2 = 90000; // 90kHz clock |
|
|
|
/** |
|
* Store information about the start and end of the track and the |
|
* duration for each frame/sample we process in order to calculate |
|
* the baseMediaDecodeTime |
|
*/ |
|
|
|
var collectDtsInfo = function collectDtsInfo(track, data) { |
|
if (typeof data.pts === 'number') { |
|
if (track.timelineStartInfo.pts === undefined) { |
|
track.timelineStartInfo.pts = data.pts; |
|
} |
|
|
|
if (track.minSegmentPts === undefined) { |
|
track.minSegmentPts = data.pts; |
|
} else { |
|
track.minSegmentPts = Math.min(track.minSegmentPts, data.pts); |
|
} |
|
|
|
if (track.maxSegmentPts === undefined) { |
|
track.maxSegmentPts = data.pts; |
|
} else { |
|
track.maxSegmentPts = Math.max(track.maxSegmentPts, data.pts); |
|
} |
|
} |
|
|
|
if (typeof data.dts === 'number') { |
|
if (track.timelineStartInfo.dts === undefined) { |
|
track.timelineStartInfo.dts = data.dts; |
|
} |
|
|
|
if (track.minSegmentDts === undefined) { |
|
track.minSegmentDts = data.dts; |
|
} else { |
|
track.minSegmentDts = Math.min(track.minSegmentDts, data.dts); |
|
} |
|
|
|
if (track.maxSegmentDts === undefined) { |
|
track.maxSegmentDts = data.dts; |
|
} else { |
|
track.maxSegmentDts = Math.max(track.maxSegmentDts, data.dts); |
|
} |
|
} |
|
}; |
|
/** |
|
* Clear values used to calculate the baseMediaDecodeTime between |
|
* tracks |
|
*/ |
|
|
|
|
|
var clearDtsInfo = function clearDtsInfo(track) { |
|
delete track.minSegmentDts; |
|
delete track.maxSegmentDts; |
|
delete track.minSegmentPts; |
|
delete track.maxSegmentPts; |
|
}; |
|
/** |
|
* Calculate the track's baseMediaDecodeTime based on the earliest |
|
* DTS the transmuxer has ever seen and the minimum DTS for the |
|
* current track |
|
* @param track {object} track metadata configuration |
|
* @param keepOriginalTimestamps {boolean} If true, keep the timestamps |
|
* in the source; false to adjust the first segment to start at 0. |
|
*/ |
|
|
|
|
|
var calculateTrackBaseMediaDecodeTime = function calculateTrackBaseMediaDecodeTime(track, keepOriginalTimestamps) { |
|
var baseMediaDecodeTime, |
|
scale, |
|
minSegmentDts = track.minSegmentDts; // Optionally adjust the time so the first segment starts at zero. |
|
|
|
if (!keepOriginalTimestamps) { |
|
minSegmentDts -= track.timelineStartInfo.dts; |
|
} // track.timelineStartInfo.baseMediaDecodeTime is the location, in time, where |
|
// we want the start of the first segment to be placed |
|
|
|
|
|
baseMediaDecodeTime = track.timelineStartInfo.baseMediaDecodeTime; // Add to that the distance this segment is from the very first |
|
|
|
baseMediaDecodeTime += minSegmentDts; // baseMediaDecodeTime must not become negative |
|
|
|
baseMediaDecodeTime = Math.max(0, baseMediaDecodeTime); |
|
|
|
if (track.type === 'audio') { |
|
// Audio has a different clock equal to the sampling_rate so we need to |
|
// scale the PTS values into the clock rate of the track |
|
scale = track.samplerate / ONE_SECOND_IN_TS$2; |
|
baseMediaDecodeTime *= scale; |
|
baseMediaDecodeTime = Math.floor(baseMediaDecodeTime); |
|
} |
|
|
|
return baseMediaDecodeTime; |
|
}; |
|
|
|
var trackDecodeInfo = { |
|
clearDtsInfo: clearDtsInfo, |
|
calculateTrackBaseMediaDecodeTime: calculateTrackBaseMediaDecodeTime, |
|
collectDtsInfo: collectDtsInfo |
|
}; |
|
|
|
/** |
|
* mux.js |
|
* |
|
* Copyright (c) 2015 Brightcove |
|
* All rights reserved. |
|
* |
|
* Reads in-band caption information from a video elementary |
|
* stream. Captions must follow the CEA-708 standard for injection |
|
* into an MPEG-2 transport streams. |
|
* @see https://en.wikipedia.org/wiki/CEA-708 |
|
* @see https://www.gpo.gov/fdsys/pkg/CFR-2007-title47-vol1/pdf/CFR-2007-title47-vol1-sec15-119.pdf |
|
*/ |
|
// payload type field to indicate how they are to be |
|
// interpreted. CEAS-708 caption content is always transmitted with |
|
// payload type 0x04. |
|
|
|
var USER_DATA_REGISTERED_ITU_T_T35 = 4, |
|
RBSP_TRAILING_BITS = 128; |
|
/** |
|
* Parse a supplemental enhancement information (SEI) NAL unit. |
|
* Stops parsing once a message of type ITU T T35 has been found. |
|
* |
|
* @param bytes {Uint8Array} the bytes of a SEI NAL unit |
|
* @return {object} the parsed SEI payload |
|
* @see Rec. ITU-T H.264, 7.3.2.3.1 |
|
*/ |
|
|
|
var parseSei = function parseSei(bytes) { |
|
var i = 0, |
|
result = { |
|
payloadType: -1, |
|
payloadSize: 0 |
|
}, |
|
payloadType = 0, |
|
payloadSize = 0; // go through the sei_rbsp parsing each each individual sei_message |
|
|
|
while (i < bytes.byteLength) { |
|
// stop once we have hit the end of the sei_rbsp |
|
if (bytes[i] === RBSP_TRAILING_BITS) { |
|
break; |
|
} // Parse payload type |
|
|
|
|
|
while (bytes[i] === 0xFF) { |
|
payloadType += 255; |
|
i++; |
|
} |
|
|
|
payloadType += bytes[i++]; // Parse payload size |
|
|
|
while (bytes[i] === 0xFF) { |
|
payloadSize += 255; |
|
i++; |
|
} |
|
|
|
payloadSize += bytes[i++]; // this sei_message is a 608/708 caption so save it and break |
|
// there can only ever be one caption message in a frame's sei |
|
|
|
if (!result.payload && payloadType === USER_DATA_REGISTERED_ITU_T_T35) { |
|
result.payloadType = payloadType; |
|
result.payloadSize = payloadSize; |
|
result.payload = bytes.subarray(i, i + payloadSize); |
|
break; |
|
} // skip the payload and parse the next message |
|
|
|
|
|
i += payloadSize; |
|
payloadType = 0; |
|
payloadSize = 0; |
|
} |
|
|
|
return result; |
|
}; // see ANSI/SCTE 128-1 (2013), section 8.1 |
|
|
|
|
|
var parseUserData = function parseUserData(sei) { |
|
// itu_t_t35_contry_code must be 181 (United States) for |
|
// captions |
|
if (sei.payload[0] !== 181) { |
|
return null; |
|
} // itu_t_t35_provider_code should be 49 (ATSC) for captions |
|
|
|
|
|
if ((sei.payload[1] << 8 | sei.payload[2]) !== 49) { |
|
return null; |
|
} // the user_identifier should be "GA94" to indicate ATSC1 data |
|
|
|
|
|
if (String.fromCharCode(sei.payload[3], sei.payload[4], sei.payload[5], sei.payload[6]) !== 'GA94') { |
|
return null; |
|
} // finally, user_data_type_code should be 0x03 for caption data |
|
|
|
|
|
if (sei.payload[7] !== 0x03) { |
|
return null; |
|
} // return the user_data_type_structure and strip the trailing |
|
// marker bits |
|
|
|
|
|
return sei.payload.subarray(8, sei.payload.length - 1); |
|
}; // see CEA-708-D, section 4.4 |
|
|
|
|
|
var parseCaptionPackets = function parseCaptionPackets(pts, userData) { |
|
var results = [], |
|
i, |
|
count, |
|
offset, |
|
data; // if this is just filler, return immediately |
|
|
|
if (!(userData[0] & 0x40)) { |
|
return results; |
|
} // parse out the cc_data_1 and cc_data_2 fields |
|
|
|
|
|
count = userData[0] & 0x1f; |
|
|
|
for (i = 0; i < count; i++) { |
|
offset = i * 3; |
|
data = { |
|
type: userData[offset + 2] & 0x03, |
|
pts: pts |
|
}; // capture cc data when cc_valid is 1 |
|
|
|
if (userData[offset + 2] & 0x04) { |
|
data.ccData = userData[offset + 3] << 8 | userData[offset + 4]; |
|
results.push(data); |
|
} |
|
} |
|
|
|
return results; |
|
}; |
|
|
|
var discardEmulationPreventionBytes = function discardEmulationPreventionBytes(data) { |
|
var length = data.byteLength, |
|
emulationPreventionBytesPositions = [], |
|
i = 1, |
|
newLength, |
|
newData; // Find all `Emulation Prevention Bytes` |
|
|
|
while (i < length - 2) { |
|
if (data[i] === 0 && data[i + 1] === 0 && data[i + 2] === 0x03) { |
|
emulationPreventionBytesPositions.push(i + 2); |
|
i += 2; |
|
} else { |
|
i++; |
|
} |
|
} // If no Emulation Prevention Bytes were found just return the original |
|
// array |
|
|
|
|
|
if (emulationPreventionBytesPositions.length === 0) { |
|
return data; |
|
} // Create a new array to hold the NAL unit data |
|
|
|
|
|
newLength = length - emulationPreventionBytesPositions.length; |
|
newData = new Uint8Array(newLength); |
|
var sourceIndex = 0; |
|
|
|
for (i = 0; i < newLength; sourceIndex++, i++) { |
|
if (sourceIndex === emulationPreventionBytesPositions[0]) { |
|
// Skip this byte |
|
sourceIndex++; // Remove this position index |
|
|
|
emulationPreventionBytesPositions.shift(); |
|
} |
|
|
|
newData[i] = data[sourceIndex]; |
|
} |
|
|
|
return newData; |
|
}; // exports |
|
|
|
|
|
var captionPacketParser = { |
|
parseSei: parseSei, |
|
parseUserData: parseUserData, |
|
parseCaptionPackets: parseCaptionPackets, |
|
discardEmulationPreventionBytes: discardEmulationPreventionBytes, |
|
USER_DATA_REGISTERED_ITU_T_T35: USER_DATA_REGISTERED_ITU_T_T35 |
|
}; |
|
|
|
// Link To Transport |
|
// ----------------- |
|
|
|
|
|
var CaptionStream = function CaptionStream() { |
|
CaptionStream.prototype.init.call(this); |
|
this.captionPackets_ = []; |
|
this.ccStreams_ = [new Cea608Stream(0, 0), // eslint-disable-line no-use-before-define |
|
new Cea608Stream(0, 1), // eslint-disable-line no-use-before-define |
|
new Cea608Stream(1, 0), // eslint-disable-line no-use-before-define |
|
new Cea608Stream(1, 1) // eslint-disable-line no-use-before-define |
|
]; |
|
this.reset(); // forward data and done events from CCs to this CaptionStream |
|
|
|
this.ccStreams_.forEach(function (cc) { |
|
cc.on('data', this.trigger.bind(this, 'data')); |
|
cc.on('done', this.trigger.bind(this, 'done')); |
|
}, this); |
|
}; |
|
|
|
CaptionStream.prototype = new stream(); |
|
|
|
CaptionStream.prototype.push = function (event) { |
|
var sei, userData, newCaptionPackets; // only examine SEI NALs |
|
|
|
if (event.nalUnitType !== 'sei_rbsp') { |
|
return; |
|
} // parse the sei |
|
|
|
|
|
sei = captionPacketParser.parseSei(event.escapedRBSP); // ignore everything but user_data_registered_itu_t_t35 |
|
|
|
if (sei.payloadType !== captionPacketParser.USER_DATA_REGISTERED_ITU_T_T35) { |
|
return; |
|
} // parse out the user data payload |
|
|
|
|
|
userData = captionPacketParser.parseUserData(sei); // ignore unrecognized userData |
|
|
|
if (!userData) { |
|
return; |
|
} // Sometimes, the same segment # will be downloaded twice. To stop the |
|
// caption data from being processed twice, we track the latest dts we've |
|
// received and ignore everything with a dts before that. However, since |
|
// data for a specific dts can be split across packets on either side of |
|
// a segment boundary, we need to make sure we *don't* ignore the packets |
|
// from the *next* segment that have dts === this.latestDts_. By constantly |
|
// tracking the number of packets received with dts === this.latestDts_, we |
|
// know how many should be ignored once we start receiving duplicates. |
|
|
|
|
|
if (event.dts < this.latestDts_) { |
|
// We've started getting older data, so set the flag. |
|
this.ignoreNextEqualDts_ = true; |
|
return; |
|
} else if (event.dts === this.latestDts_ && this.ignoreNextEqualDts_) { |
|
this.numSameDts_--; |
|
|
|
if (!this.numSameDts_) { |
|
// We've received the last duplicate packet, time to start processing again |
|
this.ignoreNextEqualDts_ = false; |
|
} |
|
|
|
return; |
|
} // parse out CC data packets and save them for later |
|
|
|
|
|
newCaptionPackets = captionPacketParser.parseCaptionPackets(event.pts, userData); |
|
this.captionPackets_ = this.captionPackets_.concat(newCaptionPackets); |
|
|
|
if (this.latestDts_ !== event.dts) { |
|
this.numSameDts_ = 0; |
|
} |
|
|
|
this.numSameDts_++; |
|
this.latestDts_ = event.dts; |
|
}; |
|
|
|
CaptionStream.prototype.flush = function () { |
|
// make sure we actually parsed captions before proceeding |
|
if (!this.captionPackets_.length) { |
|
this.ccStreams_.forEach(function (cc) { |
|
cc.flush(); |
|
}, this); |
|
return; |
|
} // In Chrome, the Array#sort function is not stable so add a |
|
// presortIndex that we can use to ensure we get a stable-sort |
|
|
|
|
|
this.captionPackets_.forEach(function (elem, idx) { |
|
elem.presortIndex = idx; |
|
}); // sort caption byte-pairs based on their PTS values |
|
|
|
this.captionPackets_.sort(function (a, b) { |
|
if (a.pts === b.pts) { |
|
return a.presortIndex - b.presortIndex; |
|
} |
|
|
|
return a.pts - b.pts; |
|
}); |
|
this.captionPackets_.forEach(function (packet) { |
|
if (packet.type < 2) { |
|
// Dispatch packet to the right Cea608Stream |
|
this.dispatchCea608Packet(packet); |
|
} // this is where an 'else' would go for a dispatching packets |
|
// to a theoretical Cea708Stream that handles SERVICEn data |
|
|
|
}, this); |
|
this.captionPackets_.length = 0; |
|
this.ccStreams_.forEach(function (cc) { |
|
cc.flush(); |
|
}, this); |
|
return; |
|
}; |
|
|
|
CaptionStream.prototype.reset = function () { |
|
this.latestDts_ = null; |
|
this.ignoreNextEqualDts_ = false; |
|
this.numSameDts_ = 0; |
|
this.activeCea608Channel_ = [null, null]; |
|
this.ccStreams_.forEach(function (ccStream) { |
|
ccStream.reset(); |
|
}); |
|
}; |
|
|
|
CaptionStream.prototype.dispatchCea608Packet = function (packet) { |
|
// NOTE: packet.type is the CEA608 field |
|
if (this.setsChannel1Active(packet)) { |
|
this.activeCea608Channel_[packet.type] = 0; |
|
} else if (this.setsChannel2Active(packet)) { |
|
this.activeCea608Channel_[packet.type] = 1; |
|
} |
|
|
|
if (this.activeCea608Channel_[packet.type] === null) { |
|
// If we haven't received anything to set the active channel, discard the |
|
// data; we don't want jumbled captions |
|
return; |
|
} |
|
|
|
this.ccStreams_[(packet.type << 1) + this.activeCea608Channel_[packet.type]].push(packet); |
|
}; |
|
|
|
CaptionStream.prototype.setsChannel1Active = function (packet) { |
|
return (packet.ccData & 0x7800) === 0x1000; |
|
}; |
|
|
|
CaptionStream.prototype.setsChannel2Active = function (packet) { |
|
return (packet.ccData & 0x7800) === 0x1800; |
|
}; // ---------------------- |
|
// Session to Application |
|
// ---------------------- |
|
// This hash maps non-ASCII, special, and extended character codes to their |
|
// proper Unicode equivalent. The first keys that are only a single byte |
|
// are the non-standard ASCII characters, which simply map the CEA608 byte |
|
// to the standard ASCII/Unicode. The two-byte keys that follow are the CEA608 |
|
// character codes, but have their MSB bitmasked with 0x03 so that a lookup |
|
// can be performed regardless of the field and data channel on which the |
|
// character code was received. |
|
|
|
|
|
var CHARACTER_TRANSLATION = { |
|
0x2a: 0xe1, |
|
// á |
|
0x5c: 0xe9, |
|
// é |
|
0x5e: 0xed, |
|
// í |
|
0x5f: 0xf3, |
|
// ó |
|
0x60: 0xfa, |
|
// ú |
|
0x7b: 0xe7, |
|
// ç |
|
0x7c: 0xf7, |
|
// ÷ |
|
0x7d: 0xd1, |
|
// Ñ |
|
0x7e: 0xf1, |
|
// ñ |
|
0x7f: 0x2588, |
|
// █ |
|
0x0130: 0xae, |
|
// ® |
|
0x0131: 0xb0, |
|
// ° |
|
0x0132: 0xbd, |
|
// ½ |
|
0x0133: 0xbf, |
|
// ¿ |
|
0x0134: 0x2122, |
|
// ™ |
|
0x0135: 0xa2, |
|
// ¢ |
|
0x0136: 0xa3, |
|
// £ |
|
0x0137: 0x266a, |
|
// ♪ |
|
0x0138: 0xe0, |
|
// à |
|
0x0139: 0xa0, |
|
// |
|
0x013a: 0xe8, |
|
// è |
|
0x013b: 0xe2, |
|
// â |
|
0x013c: 0xea, |
|
// ê |
|
0x013d: 0xee, |
|
// î |
|
0x013e: 0xf4, |
|
// ô |
|
0x013f: 0xfb, |
|
// û |
|
0x0220: 0xc1, |
|
// Á |
|
0x0221: 0xc9, |
|
// É |
|
0x0222: 0xd3, |
|
// Ó |
|
0x0223: 0xda, |
|
// Ú |
|
0x0224: 0xdc, |
|
// Ü |
|
0x0225: 0xfc, |
|
// ü |
|
0x0226: 0x2018, |
|
// ‘ |
|
0x0227: 0xa1, |
|
// ¡ |
|
0x0228: 0x2a, |
|
// * |
|
0x0229: 0x27, |
|
// ' |
|
0x022a: 0x2014, |
|
// — |
|
0x022b: 0xa9, |
|
// © |
|
0x022c: 0x2120, |
|
// ℠ |
|
0x022d: 0x2022, |
|
// • |
|
0x022e: 0x201c, |
|
// “ |
|
0x022f: 0x201d, |
|
// ” |
|
0x0230: 0xc0, |
|
// À |
|
0x0231: 0xc2, |
|
// Â |
|
0x0232: 0xc7, |
|
// Ç |
|
0x0233: 0xc8, |
|
// È |
|
0x0234: 0xca, |
|
// Ê |
|
0x0235: 0xcb, |
|
// Ë |
|
0x0236: 0xeb, |
|
// ë |
|
0x0237: 0xce, |
|
// Î |
|
0x0238: 0xcf, |
|
// Ï |
|
0x0239: 0xef, |
|
// ï |
|
0x023a: 0xd4, |
|
// Ô |
|
0x023b: 0xd9, |
|
// Ù |
|
0x023c: 0xf9, |
|
// ù |
|
0x023d: 0xdb, |
|
// Û |
|
0x023e: 0xab, |
|
// « |
|
0x023f: 0xbb, |
|
// » |
|
0x0320: 0xc3, |
|
// Ã |
|
0x0321: 0xe3, |
|
// ã |
|
0x0322: 0xcd, |
|
// Í |
|
0x0323: 0xcc, |
|
// Ì |
|
0x0324: 0xec, |
|
// ì |
|
0x0325: 0xd2, |
|
// Ò |
|
0x0326: 0xf2, |
|
// ò |
|
0x0327: 0xd5, |
|
// Õ |
|
0x0328: 0xf5, |
|
// õ |
|
0x0329: 0x7b, |
|
// { |
|
0x032a: 0x7d, |
|
// } |
|
0x032b: 0x5c, |
|
// \ |
|
0x032c: 0x5e, |
|
// ^ |
|
0x032d: 0x5f, |
|
// _ |
|
0x032e: 0x7c, |
|
// | |
|
0x032f: 0x7e, |
|
// ~ |
|
0x0330: 0xc4, |
|
// Ä |
|
0x0331: 0xe4, |
|
// ä |
|
0x0332: 0xd6, |
|
// Ö |
|
0x0333: 0xf6, |
|
// ö |
|
0x0334: 0xdf, |
|
// ß |
|
0x0335: 0xa5, |
|
// ¥ |
|
0x0336: 0xa4, |
|
// ¤ |
|
0x0337: 0x2502, |
|
// │ |
|
0x0338: 0xc5, |
|
// Å |
|
0x0339: 0xe5, |
|
// å |
|
0x033a: 0xd8, |
|
// Ø |
|
0x033b: 0xf8, |
|
// ø |
|
0x033c: 0x250c, |
|
// ┌ |
|
0x033d: 0x2510, |
|
// ┐ |
|
0x033e: 0x2514, |
|
// └ |
|
0x033f: 0x2518 // ┘ |
|
|
|
}; |
|
|
|
var getCharFromCode = function getCharFromCode(code) { |
|
if (code === null) { |
|
return ''; |
|
} |
|
|
|
code = CHARACTER_TRANSLATION[code] || code; |
|
return String.fromCharCode(code); |
|
}; // the index of the last row in a CEA-608 display buffer |
|
|
|
|
|
var BOTTOM_ROW = 14; // This array is used for mapping PACs -> row #, since there's no way of |
|
// getting it through bit logic. |
|
|
|
var ROWS = [0x1100, 0x1120, 0x1200, 0x1220, 0x1500, 0x1520, 0x1600, 0x1620, 0x1700, 0x1720, 0x1000, 0x1300, 0x1320, 0x1400, 0x1420]; // CEA-608 captions are rendered onto a 34x15 matrix of character |
|
// cells. The "bottom" row is the last element in the outer array. |
|
|
|
var createDisplayBuffer = function createDisplayBuffer() { |
|
var result = [], |
|
i = BOTTOM_ROW + 1; |
|
|
|
while (i--) { |
|
result.push(''); |
|
} |
|
|
|
return result; |
|
}; |
|
|
|
var Cea608Stream = function Cea608Stream(field, dataChannel) { |
|
Cea608Stream.prototype.init.call(this); |
|
this.field_ = field || 0; |
|
this.dataChannel_ = dataChannel || 0; |
|
this.name_ = 'CC' + ((this.field_ << 1 | this.dataChannel_) + 1); |
|
this.setConstants(); |
|
this.reset(); |
|
|
|
this.push = function (packet) { |
|
var data, swap, char0, char1, text; // remove the parity bits |
|
|
|
data = packet.ccData & 0x7f7f; // ignore duplicate control codes; the spec demands they're sent twice |
|
|
|
if (data === this.lastControlCode_) { |
|
this.lastControlCode_ = null; |
|
return; |
|
} // Store control codes |
|
|
|
|
|
if ((data & 0xf000) === 0x1000) { |
|
this.lastControlCode_ = data; |
|
} else if (data !== this.PADDING_) { |
|
this.lastControlCode_ = null; |
|
} |
|
|
|
char0 = data >>> 8; |
|
char1 = data & 0xff; |
|
|
|
if (data === this.PADDING_) { |
|
return; |
|
} else if (data === this.RESUME_CAPTION_LOADING_) { |
|
this.mode_ = 'popOn'; |
|
} else if (data === this.END_OF_CAPTION_) { |
|
// If an EOC is received while in paint-on mode, the displayed caption |
|
// text should be swapped to non-displayed memory as if it was a pop-on |
|
// caption. Because of that, we should explicitly switch back to pop-on |
|
// mode |
|
this.mode_ = 'popOn'; |
|
this.clearFormatting(packet.pts); // if a caption was being displayed, it's gone now |
|
|
|
this.flushDisplayed(packet.pts); // flip memory |
|
|
|
swap = this.displayed_; |
|
this.displayed_ = this.nonDisplayed_; |
|
this.nonDisplayed_ = swap; // start measuring the time to display the caption |
|
|
|
this.startPts_ = packet.pts; |
|
} else if (data === this.ROLL_UP_2_ROWS_) { |
|
this.rollUpRows_ = 2; |
|
this.setRollUp(packet.pts); |
|
} else if (data === this.ROLL_UP_3_ROWS_) { |
|
this.rollUpRows_ = 3; |
|
this.setRollUp(packet.pts); |
|
} else if (data === this.ROLL_UP_4_ROWS_) { |
|
this.rollUpRows_ = 4; |
|
this.setRollUp(packet.pts); |
|
} else if (data === this.CARRIAGE_RETURN_) { |
|
this.clearFormatting(packet.pts); |
|
this.flushDisplayed(packet.pts); |
|
this.shiftRowsUp_(); |
|
this.startPts_ = packet.pts; |
|
} else if (data === this.BACKSPACE_) { |
|
if (this.mode_ === 'popOn') { |
|
this.nonDisplayed_[this.row_] = this.nonDisplayed_[this.row_].slice(0, -1); |
|
} else { |
|
this.displayed_[this.row_] = this.displayed_[this.row_].slice(0, -1); |
|
} |
|
} else if (data === this.ERASE_DISPLAYED_MEMORY_) { |
|
this.flushDisplayed(packet.pts); |
|
this.displayed_ = createDisplayBuffer(); |
|
} else if (data === this.ERASE_NON_DISPLAYED_MEMORY_) { |
|
this.nonDisplayed_ = createDisplayBuffer(); |
|
} else if (data === this.RESUME_DIRECT_CAPTIONING_) { |
|
if (this.mode_ !== 'paintOn') { |
|
// NOTE: This should be removed when proper caption positioning is |
|
// implemented |
|
this.flushDisplayed(packet.pts); |
|
this.displayed_ = createDisplayBuffer(); |
|
} |
|
|
|
this.mode_ = 'paintOn'; |
|
this.startPts_ = packet.pts; // Append special characters to caption text |
|
} else if (this.isSpecialCharacter(char0, char1)) { |
|
// Bitmask char0 so that we can apply character transformations |
|
// regardless of field and data channel. |
|
// Then byte-shift to the left and OR with char1 so we can pass the |
|
// entire character code to `getCharFromCode`. |
|
char0 = (char0 & 0x03) << 8; |
|
text = getCharFromCode(char0 | char1); |
|
this[this.mode_](packet.pts, text); |
|
this.column_++; // Append extended characters to caption text |
|
} else if (this.isExtCharacter(char0, char1)) { |
|
// Extended characters always follow their "non-extended" equivalents. |
|
// IE if a "è" is desired, you'll always receive "eè"; non-compliant |
|
// decoders are supposed to drop the "è", while compliant decoders |
|
// backspace the "e" and insert "è". |
|
// Delete the previous character |
|
if (this.mode_ === 'popOn') { |
|
this.nonDisplayed_[this.row_] = this.nonDisplayed_[this.row_].slice(0, -1); |
|
} else { |
|
this.displayed_[this.row_] = this.displayed_[this.row_].slice(0, -1); |
|
} // Bitmask char0 so that we can apply character transformations |
|
// regardless of field and data channel. |
|
// Then byte-shift to the left and OR with char1 so we can pass the |
|
// entire character code to `getCharFromCode`. |
|
|
|
|
|
char0 = (char0 & 0x03) << 8; |
|
text = getCharFromCode(char0 | char1); |
|
this[this.mode_](packet.pts, text); |
|
this.column_++; // Process mid-row codes |
|
} else if (this.isMidRowCode(char0, char1)) { |
|
// Attributes are not additive, so clear all formatting |
|
this.clearFormatting(packet.pts); // According to the standard, mid-row codes |
|
// should be replaced with spaces, so add one now |
|
|
|
this[this.mode_](packet.pts, ' '); |
|
this.column_++; |
|
|
|
if ((char1 & 0xe) === 0xe) { |
|
this.addFormatting(packet.pts, ['i']); |
|
} |
|
|
|
if ((char1 & 0x1) === 0x1) { |
|
this.addFormatting(packet.pts, ['u']); |
|
} // Detect offset control codes and adjust cursor |
|
|
|
} else if (this.isOffsetControlCode(char0, char1)) { |
|
// Cursor position is set by indent PAC (see below) in 4-column |
|
// increments, with an additional offset code of 1-3 to reach any |
|
// of the 32 columns specified by CEA-608. So all we need to do |
|
// here is increment the column cursor by the given offset. |
|
this.column_ += char1 & 0x03; // Detect PACs (Preamble Address Codes) |
|
} else if (this.isPAC(char0, char1)) { |
|
// There's no logic for PAC -> row mapping, so we have to just |
|
// find the row code in an array and use its index :( |
|
var row = ROWS.indexOf(data & 0x1f20); // Configure the caption window if we're in roll-up mode |
|
|
|
if (this.mode_ === 'rollUp') { |
|
// This implies that the base row is incorrectly set. |
|
// As per the recommendation in CEA-608(Base Row Implementation), defer to the number |
|
// of roll-up rows set. |
|
if (row - this.rollUpRows_ + 1 < 0) { |
|
row = this.rollUpRows_ - 1; |
|
} |
|
|
|
this.setRollUp(packet.pts, row); |
|
} |
|
|
|
if (row !== this.row_) { |
|
// formatting is only persistent for current row |
|
this.clearFormatting(packet.pts); |
|
this.row_ = row; |
|
} // All PACs can apply underline, so detect and apply |
|
// (All odd-numbered second bytes set underline) |
|
|
|
|
|
if (char1 & 0x1 && this.formatting_.indexOf('u') === -1) { |
|
this.addFormatting(packet.pts, ['u']); |
|
} |
|
|
|
if ((data & 0x10) === 0x10) { |
|
// We've got an indent level code. Each successive even number |
|
// increments the column cursor by 4, so we can get the desired |
|
// column position by bit-shifting to the right (to get n/2) |
|
// and multiplying by 4. |
|
this.column_ = ((data & 0xe) >> 1) * 4; |
|
} |
|
|
|
if (this.isColorPAC(char1)) { |
|
// it's a color code, though we only support white, which |
|
// can be either normal or italicized. white italics can be |
|
// either 0x4e or 0x6e depending on the row, so we just |
|
// bitwise-and with 0xe to see if italics should be turned on |
|
if ((char1 & 0xe) === 0xe) { |
|
this.addFormatting(packet.pts, ['i']); |
|
} |
|
} // We have a normal character in char0, and possibly one in char1 |
|
|
|
} else if (this.isNormalChar(char0)) { |
|
if (char1 === 0x00) { |
|
char1 = null; |
|
} |
|
|
|
text = getCharFromCode(char0); |
|
text += getCharFromCode(char1); |
|
this[this.mode_](packet.pts, text); |
|
this.column_ += text.length; |
|
} // finish data processing |
|
|
|
}; |
|
}; |
|
|
|
Cea608Stream.prototype = new stream(); // Trigger a cue point that captures the current state of the |
|
// display buffer |
|
|
|
Cea608Stream.prototype.flushDisplayed = function (pts) { |
|
var content = this.displayed_ // remove spaces from the start and end of the string |
|
.map(function (row) { |
|
try { |
|
return row.trim(); |
|
} catch (e) { |
|
// Ordinarily, this shouldn't happen. However, caption |
|
// parsing errors should not throw exceptions and |
|
// break playback. |
|
// eslint-disable-next-line no-console |
|
console.error('Skipping malformed caption.'); |
|
return ''; |
|
} |
|
}) // combine all text rows to display in one cue |
|
.join('\n') // and remove blank rows from the start and end, but not the middle |
|
.replace(/^\n+|\n+$/g, ''); |
|
|
|
if (content.length) { |
|
this.trigger('data', { |
|
startPts: this.startPts_, |
|
endPts: pts, |
|
text: content, |
|
stream: this.name_ |
|
}); |
|
} |
|
}; |
|
/** |
|
* Zero out the data, used for startup and on seek |
|
*/ |
|
|
|
|
|
Cea608Stream.prototype.reset = function () { |
|
this.mode_ = 'popOn'; // When in roll-up mode, the index of the last row that will |
|
// actually display captions. If a caption is shifted to a row |
|
// with a lower index than this, it is cleared from the display |
|
// buffer |
|
|
|
this.topRow_ = 0; |
|
this.startPts_ = 0; |
|
this.displayed_ = createDisplayBuffer(); |
|
this.nonDisplayed_ = createDisplayBuffer(); |
|
this.lastControlCode_ = null; // Track row and column for proper line-breaking and spacing |
|
|
|
this.column_ = 0; |
|
this.row_ = BOTTOM_ROW; |
|
this.rollUpRows_ = 2; // This variable holds currently-applied formatting |
|
|
|
this.formatting_ = []; |
|
}; |
|
/** |
|
* Sets up control code and related constants for this instance |
|
*/ |
|
|
|
|
|
Cea608Stream.prototype.setConstants = function () { |
|
// The following attributes have these uses: |
|
// ext_ : char0 for mid-row codes, and the base for extended |
|
// chars (ext_+0, ext_+1, and ext_+2 are char0s for |
|
// extended codes) |
|
// control_: char0 for control codes, except byte-shifted to the |
|
// left so that we can do this.control_ | CONTROL_CODE |
|
// offset_: char0 for tab offset codes |
|
// |
|
// It's also worth noting that control codes, and _only_ control codes, |
|
// differ between field 1 and field2. Field 2 control codes are always |
|
// their field 1 value plus 1. That's why there's the "| field" on the |
|
// control value. |
|
if (this.dataChannel_ === 0) { |
|
this.BASE_ = 0x10; |
|
this.EXT_ = 0x11; |
|
this.CONTROL_ = (0x14 | this.field_) << 8; |
|
this.OFFSET_ = 0x17; |
|
} else if (this.dataChannel_ === 1) { |
|
this.BASE_ = 0x18; |
|
this.EXT_ = 0x19; |
|
this.CONTROL_ = (0x1c | this.field_) << 8; |
|
this.OFFSET_ = 0x1f; |
|
} // Constants for the LSByte command codes recognized by Cea608Stream. This |
|
// list is not exhaustive. For a more comprehensive listing and semantics see |
|
// http://www.gpo.gov/fdsys/pkg/CFR-2010-title47-vol1/pdf/CFR-2010-title47-vol1-sec15-119.pdf |
|
// Padding |
|
|
|
|
|
this.PADDING_ = 0x0000; // Pop-on Mode |
|
|
|
this.RESUME_CAPTION_LOADING_ = this.CONTROL_ | 0x20; |
|
this.END_OF_CAPTION_ = this.CONTROL_ | 0x2f; // Roll-up Mode |
|
|
|
this.ROLL_UP_2_ROWS_ = this.CONTROL_ | 0x25; |
|
this.ROLL_UP_3_ROWS_ = this.CONTROL_ | 0x26; |
|
this.ROLL_UP_4_ROWS_ = this.CONTROL_ | 0x27; |
|
this.CARRIAGE_RETURN_ = this.CONTROL_ | 0x2d; // paint-on mode |
|
|
|
this.RESUME_DIRECT_CAPTIONING_ = this.CONTROL_ | 0x29; // Erasure |
|
|
|
this.BACKSPACE_ = this.CONTROL_ | 0x21; |
|
this.ERASE_DISPLAYED_MEMORY_ = this.CONTROL_ | 0x2c; |
|
this.ERASE_NON_DISPLAYED_MEMORY_ = this.CONTROL_ | 0x2e; |
|
}; |
|
/** |
|
* Detects if the 2-byte packet data is a special character |
|
* |
|
* Special characters have a second byte in the range 0x30 to 0x3f, |
|
* with the first byte being 0x11 (for data channel 1) or 0x19 (for |
|
* data channel 2). |
|
* |
|
* @param {Integer} char0 The first byte |
|
* @param {Integer} char1 The second byte |
|
* @return {Boolean} Whether the 2 bytes are an special character |
|
*/ |
|
|
|
|
|
Cea608Stream.prototype.isSpecialCharacter = function (char0, char1) { |
|
return char0 === this.EXT_ && char1 >= 0x30 && char1 <= 0x3f; |
|
}; |
|
/** |
|
* Detects if the 2-byte packet data is an extended character |
|
* |
|
* Extended characters have a second byte in the range 0x20 to 0x3f, |
|
* with the first byte being 0x12 or 0x13 (for data channel 1) or |
|
* 0x1a or 0x1b (for data channel 2). |
|
* |
|
* @param {Integer} char0 The first byte |
|
* @param {Integer} char1 The second byte |
|
* @return {Boolean} Whether the 2 bytes are an extended character |
|
*/ |
|
|
|
|
|
Cea608Stream.prototype.isExtCharacter = function (char0, char1) { |
|
return (char0 === this.EXT_ + 1 || char0 === this.EXT_ + 2) && char1 >= 0x20 && char1 <= 0x3f; |
|
}; |
|
/** |
|
* Detects if the 2-byte packet is a mid-row code |
|
* |
|
* Mid-row codes have a second byte in the range 0x20 to 0x2f, with |
|
* the first byte being 0x11 (for data channel 1) or 0x19 (for data |
|
* channel 2). |
|
* |
|
* @param {Integer} char0 The first byte |
|
* @param {Integer} char1 The second byte |
|
* @return {Boolean} Whether the 2 bytes are a mid-row code |
|
*/ |
|
|
|
|
|
Cea608Stream.prototype.isMidRowCode = function (char0, char1) { |
|
return char0 === this.EXT_ && char1 >= 0x20 && char1 <= 0x2f; |
|
}; |
|
/** |
|
* Detects if the 2-byte packet is an offset control code |
|
* |
|
* Offset control codes have a second byte in the range 0x21 to 0x23, |
|
* with the first byte being 0x17 (for data channel 1) or 0x1f (for |
|
* data channel 2). |
|
* |
|
* @param {Integer} char0 The first byte |
|
* @param {Integer} char1 The second byte |
|
* @return {Boolean} Whether the 2 bytes are an offset control code |
|
*/ |
|
|
|
|
|
Cea608Stream.prototype.isOffsetControlCode = function (char0, char1) { |
|
return char0 === this.OFFSET_ && char1 >= 0x21 && char1 <= 0x23; |
|
}; |
|
/** |
|
* Detects if the 2-byte packet is a Preamble Address Code |
|
* |
|
* PACs have a first byte in the range 0x10 to 0x17 (for data channel 1) |
|
* or 0x18 to 0x1f (for data channel 2), with the second byte in the |
|
* range 0x40 to 0x7f. |
|
* |
|
* @param {Integer} char0 The first byte |
|
* @param {Integer} char1 The second byte |
|
* @return {Boolean} Whether the 2 bytes are a PAC |
|
*/ |
|
|
|
|
|
Cea608Stream.prototype.isPAC = function (char0, char1) { |
|
return char0 >= this.BASE_ && char0 < this.BASE_ + 8 && char1 >= 0x40 && char1 <= 0x7f; |
|
}; |
|
/** |
|
* Detects if a packet's second byte is in the range of a PAC color code |
|
* |
|
* PAC color codes have the second byte be in the range 0x40 to 0x4f, or |
|
* 0x60 to 0x6f. |
|
* |
|
* @param {Integer} char1 The second byte |
|
* @return {Boolean} Whether the byte is a color PAC |
|
*/ |
|
|
|
|
|
Cea608Stream.prototype.isColorPAC = function (char1) { |
|
return char1 >= 0x40 && char1 <= 0x4f || char1 >= 0x60 && char1 <= 0x7f; |
|
}; |
|
/** |
|
* Detects if a single byte is in the range of a normal character |
|
* |
|
* Normal text bytes are in the range 0x20 to 0x7f. |
|
* |
|
* @param {Integer} char The byte |
|
* @return {Boolean} Whether the byte is a normal character |
|
*/ |
|
|
|
|
|
Cea608Stream.prototype.isNormalChar = function (char) { |
|
return char >= 0x20 && char <= 0x7f; |
|
}; |
|
/** |
|
* Configures roll-up |
|
* |
|
* @param {Integer} pts Current PTS |
|
* @param {Integer} newBaseRow Used by PACs to slide the current window to |
|
* a new position |
|
*/ |
|
|
|
|
|
Cea608Stream.prototype.setRollUp = function (pts, newBaseRow) { |
|
// Reset the base row to the bottom row when switching modes |
|
if (this.mode_ !== 'rollUp') { |
|
this.row_ = BOTTOM_ROW; |
|
this.mode_ = 'rollUp'; // Spec says to wipe memories when switching to roll-up |
|
|
|
this.flushDisplayed(pts); |
|
this.nonDisplayed_ = createDisplayBuffer(); |
|
this.displayed_ = createDisplayBuffer(); |
|
} |
|
|
|
if (newBaseRow !== undefined && newBaseRow !== this.row_) { |
|
// move currently displayed captions (up or down) to the new base row |
|
for (var i = 0; i < this.rollUpRows_; i++) { |
|
this.displayed_[newBaseRow - i] = this.displayed_[this.row_ - i]; |
|
this.displayed_[this.row_ - i] = ''; |
|
} |
|
} |
|
|
|
if (newBaseRow === undefined) { |
|
newBaseRow = this.row_; |
|
} |
|
|
|
this.topRow_ = newBaseRow - this.rollUpRows_ + 1; |
|
}; // Adds the opening HTML tag for the passed character to the caption text, |
|
// and keeps track of it for later closing |
|
|
|
|
|
Cea608Stream.prototype.addFormatting = function (pts, format) { |
|
this.formatting_ = this.formatting_.concat(format); |
|
var text = format.reduce(function (text, format) { |
|
return text + '<' + format + '>'; |
|
}, ''); |
|
this[this.mode_](pts, text); |
|
}; // Adds HTML closing tags for current formatting to caption text and |
|
// clears remembered formatting |
|
|
|
|
|
Cea608Stream.prototype.clearFormatting = function (pts) { |
|
if (!this.formatting_.length) { |
|
return; |
|
} |
|
|
|
var text = this.formatting_.reverse().reduce(function (text, format) { |
|
return text + '</' + format + '>'; |
|
}, ''); |
|
this.formatting_ = []; |
|
this[this.mode_](pts, text); |
|
}; // Mode Implementations |
|
|
|
|
|
Cea608Stream.prototype.popOn = function (pts, text) { |
|
var baseRow = this.nonDisplayed_[this.row_]; // buffer characters |
|
|
|
baseRow += text; |
|
this.nonDisplayed_[this.row_] = baseRow; |
|
}; |
|
|
|
Cea608Stream.prototype.rollUp = function (pts, text) { |
|
var baseRow = this.displayed_[this.row_]; |
|
baseRow += text; |
|
this.displayed_[this.row_] = baseRow; |
|
}; |
|
|
|
Cea608Stream.prototype.shiftRowsUp_ = function () { |
|
var i; // clear out inactive rows |
|
|
|
for (i = 0; i < this.topRow_; i++) { |
|
this.displayed_[i] = ''; |
|
} |
|
|
|
for (i = this.row_ + 1; i < BOTTOM_ROW + 1; i++) { |
|
this.displayed_[i] = ''; |
|
} // shift displayed rows up |
|
|
|
|
|
for (i = this.topRow_; i < this.row_; i++) { |
|
this.displayed_[i] = this.displayed_[i + 1]; |
|
} // clear out the bottom row |
|
|
|
|
|
this.displayed_[this.row_] = ''; |
|
}; |
|
|
|
Cea608Stream.prototype.paintOn = function (pts, text) { |
|
var baseRow = this.displayed_[this.row_]; |
|
baseRow += text; |
|
this.displayed_[this.row_] = baseRow; |
|
}; // exports |
|
|
|
|
|
var captionStream = { |
|
CaptionStream: CaptionStream, |
|
Cea608Stream: Cea608Stream |
|
}; |
|
|
|
var streamTypes = { |
|
H264_STREAM_TYPE: 0x1B, |
|
ADTS_STREAM_TYPE: 0x0F, |
|
METADATA_STREAM_TYPE: 0x15 |
|
}; |
|
|
|
var MAX_TS = 8589934592; |
|
var RO_THRESH = 4294967296; |
|
|
|
var handleRollover = function handleRollover(value, reference) { |
|
var direction = 1; |
|
|
|
if (value > reference) { |
|
// If the current timestamp value is greater than our reference timestamp and we detect a |
|
// timestamp rollover, this means the roll over is happening in the opposite direction. |
|
// Example scenario: Enter a long stream/video just after a rollover occurred. The reference |
|
// point will be set to a small number, e.g. 1. The user then seeks backwards over the |
|
// rollover point. In loading this segment, the timestamp values will be very large, |
|
// e.g. 2^33 - 1. Since this comes before the data we loaded previously, we want to adjust |
|
// the time stamp to be `value - 2^33`. |
|
direction = -1; |
|
} // Note: A seek forwards or back that is greater than the RO_THRESH (2^32, ~13 hours) will |
|
// cause an incorrect adjustment. |
|
|
|
|
|
while (Math.abs(reference - value) > RO_THRESH) { |
|
value += direction * MAX_TS; |
|
} |
|
|
|
return value; |
|
}; |
|
|
|
var TimestampRolloverStream = function TimestampRolloverStream(type) { |
|
var lastDTS, referenceDTS; |
|
TimestampRolloverStream.prototype.init.call(this); |
|
this.type_ = type; |
|
|
|
this.push = function (data) { |
|
if (data.type !== this.type_) { |
|
return; |
|
} |
|
|
|
if (referenceDTS === undefined) { |
|
referenceDTS = data.dts; |
|
} |
|
|
|
data.dts = handleRollover(data.dts, referenceDTS); |
|
data.pts = handleRollover(data.pts, referenceDTS); |
|
lastDTS = data.dts; |
|
this.trigger('data', data); |
|
}; |
|
|
|
this.flush = function () { |
|
referenceDTS = lastDTS; |
|
this.trigger('done'); |
|
}; |
|
|
|
this.discontinuity = function () { |
|
referenceDTS = void 0; |
|
lastDTS = void 0; |
|
}; |
|
}; |
|
|
|
TimestampRolloverStream.prototype = new stream(); |
|
var timestampRolloverStream = { |
|
TimestampRolloverStream: TimestampRolloverStream, |
|
handleRollover: handleRollover |
|
}; |
|
|
|
var percentEncode = function percentEncode(bytes, start, end) { |
|
var i, |
|
result = ''; |
|
|
|
for (i = start; i < end; i++) { |
|
result += '%' + ('00' + bytes[i].toString(16)).slice(-2); |
|
} |
|
|
|
return result; |
|
}, |
|
// return the string representation of the specified byte range, |
|
// interpreted as UTf-8. |
|
parseUtf8 = function parseUtf8(bytes, start, end) { |
|
return decodeURIComponent(percentEncode(bytes, start, end)); |
|
}, |
|
// return the string representation of the specified byte range, |
|
// interpreted as ISO-8859-1. |
|
parseIso88591 = function parseIso88591(bytes, start, end) { |
|
return unescape(percentEncode(bytes, start, end)); // jshint ignore:line |
|
}, |
|
parseSyncSafeInteger = function parseSyncSafeInteger(data) { |
|
return data[0] << 21 | data[1] << 14 | data[2] << 7 | data[3]; |
|
}, |
|
tagParsers = { |
|
TXXX: function TXXX(tag) { |
|
var i; |
|
|
|
if (tag.data[0] !== 3) { |
|
// ignore frames with unrecognized character encodings |
|
return; |
|
} |
|
|
|
for (i = 1; i < tag.data.length; i++) { |
|
if (tag.data[i] === 0) { |
|
// parse the text fields |
|
tag.description = parseUtf8(tag.data, 1, i); // do not include the null terminator in the tag value |
|
|
|
tag.value = parseUtf8(tag.data, i + 1, tag.data.length).replace(/\0*$/, ''); |
|
break; |
|
} |
|
} |
|
|
|
tag.data = tag.value; |
|
}, |
|
WXXX: function WXXX(tag) { |
|
var i; |
|
|
|
if (tag.data[0] !== 3) { |
|
// ignore frames with unrecognized character encodings |
|
return; |
|
} |
|
|
|
for (i = 1; i < tag.data.length; i++) { |
|
if (tag.data[i] === 0) { |
|
// parse the description and URL fields |
|
tag.description = parseUtf8(tag.data, 1, i); |
|
tag.url = parseUtf8(tag.data, i + 1, tag.data.length); |
|
break; |
|
} |
|
} |
|
}, |
|
PRIV: function PRIV(tag) { |
|
var i; |
|
|
|
for (i = 0; i < tag.data.length; i++) { |
|
if (tag.data[i] === 0) { |
|
// parse the description and URL fields |
|
tag.owner = parseIso88591(tag.data, 0, i); |
|
break; |
|
} |
|
} |
|
|
|
tag.privateData = tag.data.subarray(i + 1); |
|
tag.data = tag.privateData; |
|
} |
|
}, |
|
_MetadataStream; |
|
|
|
_MetadataStream = function MetadataStream(options) { |
|
var settings = { |
|
debug: !!(options && options.debug), |
|
// the bytes of the program-level descriptor field in MP2T |
|
// see ISO/IEC 13818-1:2013 (E), section 2.6 "Program and |
|
// program element descriptors" |
|
descriptor: options && options.descriptor |
|
}, |
|
// the total size in bytes of the ID3 tag being parsed |
|
tagSize = 0, |
|
// tag data that is not complete enough to be parsed |
|
buffer = [], |
|
// the total number of bytes currently in the buffer |
|
bufferSize = 0, |
|
i; |
|
|
|
_MetadataStream.prototype.init.call(this); // calculate the text track in-band metadata track dispatch type |
|
// https://html.spec.whatwg.org/multipage/embedded-content.html#steps-to-expose-a-media-resource-specific-text-track |
|
|
|
|
|
this.dispatchType = streamTypes.METADATA_STREAM_TYPE.toString(16); |
|
|
|
if (settings.descriptor) { |
|
for (i = 0; i < settings.descriptor.length; i++) { |
|
this.dispatchType += ('00' + settings.descriptor[i].toString(16)).slice(-2); |
|
} |
|
} |
|
|
|
this.push = function (chunk) { |
|
var tag, frameStart, frameSize, frame, i, frameHeader; |
|
|
|
if (chunk.type !== 'timed-metadata') { |
|
return; |
|
} // if data_alignment_indicator is set in the PES header, |
|
// we must have the start of a new ID3 tag. Assume anything |
|
// remaining in the buffer was malformed and throw it out |
|
|
|
|
|
if (chunk.dataAlignmentIndicator) { |
|
bufferSize = 0; |
|
buffer.length = 0; |
|
} // ignore events that don't look like ID3 data |
|
|
|
|
|
if (buffer.length === 0 && (chunk.data.length < 10 || chunk.data[0] !== 'I'.charCodeAt(0) || chunk.data[1] !== 'D'.charCodeAt(0) || chunk.data[2] !== '3'.charCodeAt(0))) { |
|
if (settings.debug) { |
|
// eslint-disable-next-line no-console |
|
console.log('Skipping unrecognized metadata packet'); |
|
} |
|
|
|
return; |
|
} // add this chunk to the data we've collected so far |
|
|
|
|
|
buffer.push(chunk); |
|
bufferSize += chunk.data.byteLength; // grab the size of the entire frame from the ID3 header |
|
|
|
if (buffer.length === 1) { |
|
// the frame size is transmitted as a 28-bit integer in the |
|
// last four bytes of the ID3 header. |
|
// The most significant bit of each byte is dropped and the |
|
// results concatenated to recover the actual value. |
|
tagSize = parseSyncSafeInteger(chunk.data.subarray(6, 10)); // ID3 reports the tag size excluding the header but it's more |
|
// convenient for our comparisons to include it |
|
|
|
tagSize += 10; |
|
} // if the entire frame has not arrived, wait for more data |
|
|
|
|
|
if (bufferSize < tagSize) { |
|
return; |
|
} // collect the entire frame so it can be parsed |
|
|
|
|
|
tag = { |
|
data: new Uint8Array(tagSize), |
|
frames: [], |
|
pts: buffer[0].pts, |
|
dts: buffer[0].dts |
|
}; |
|
|
|
for (i = 0; i < tagSize;) { |
|
tag.data.set(buffer[0].data.subarray(0, tagSize - i), i); |
|
i += buffer[0].data.byteLength; |
|
bufferSize -= buffer[0].data.byteLength; |
|
buffer.shift(); |
|
} // find the start of the first frame and the end of the tag |
|
|
|
|
|
frameStart = 10; |
|
|
|
if (tag.data[5] & 0x40) { |
|
// advance the frame start past the extended header |
|
frameStart += 4; // header size field |
|
|
|
frameStart += parseSyncSafeInteger(tag.data.subarray(10, 14)); // clip any padding off the end |
|
|
|
tagSize -= parseSyncSafeInteger(tag.data.subarray(16, 20)); |
|
} // parse one or more ID3 frames |
|
// http://id3.org/id3v2.3.0#ID3v2_frame_overview |
|
|
|
|
|
do { |
|
// determine the number of bytes in this frame |
|
frameSize = parseSyncSafeInteger(tag.data.subarray(frameStart + 4, frameStart + 8)); |
|
|
|
if (frameSize < 1) { |
|
// eslint-disable-next-line no-console |
|
return console.log('Malformed ID3 frame encountered. Skipping metadata parsing.'); |
|
} |
|
|
|
frameHeader = String.fromCharCode(tag.data[frameStart], tag.data[frameStart + 1], tag.data[frameStart + 2], tag.data[frameStart + 3]); |
|
frame = { |
|
id: frameHeader, |
|
data: tag.data.subarray(frameStart + 10, frameStart + frameSize + 10) |
|
}; |
|
frame.key = frame.id; |
|
|
|
if (tagParsers[frame.id]) { |
|
tagParsers[frame.id](frame); // handle the special PRIV frame used to indicate the start |
|
// time for raw AAC data |
|
|
|
if (frame.owner === 'com.apple.streaming.transportStreamTimestamp') { |
|
var d = frame.data, |
|
size = (d[3] & 0x01) << 30 | d[4] << 22 | d[5] << 14 | d[6] << 6 | d[7] >>> 2; |
|
size *= 4; |
|
size += d[7] & 0x03; |
|
frame.timeStamp = size; // in raw AAC, all subsequent data will be timestamped based |
|
// on the value of this frame |
|
// we couldn't have known the appropriate pts and dts before |
|
// parsing this ID3 tag so set those values now |
|
|
|
if (tag.pts === undefined && tag.dts === undefined) { |
|
tag.pts = frame.timeStamp; |
|
tag.dts = frame.timeStamp; |
|
} |
|
|
|
this.trigger('timestamp', frame); |
|
} |
|
} |
|
|
|
tag.frames.push(frame); |
|
frameStart += 10; // advance past the frame header |
|
|
|
frameStart += frameSize; // advance past the frame body |
|
} while (frameStart < tagSize); |
|
|
|
this.trigger('data', tag); |
|
}; |
|
}; |
|
|
|
_MetadataStream.prototype = new stream(); |
|
var metadataStream = _MetadataStream; |
|
|
|
var TimestampRolloverStream$1 = timestampRolloverStream.TimestampRolloverStream; // object types |
|
|
|
var _TransportPacketStream, _TransportParseStream, _ElementaryStream; // constants |
|
|
|
|
|
var MP2T_PACKET_LENGTH = 188, |
|
// bytes |
|
SYNC_BYTE = 0x47; |
|
/** |
|
* Splits an incoming stream of binary data into MPEG-2 Transport |
|
* Stream packets. |
|
*/ |
|
|
|
_TransportPacketStream = function TransportPacketStream() { |
|
var buffer = new Uint8Array(MP2T_PACKET_LENGTH), |
|
bytesInBuffer = 0; |
|
|
|
_TransportPacketStream.prototype.init.call(this); // Deliver new bytes to the stream. |
|
|
|
/** |
|
* Split a stream of data into M2TS packets |
|
**/ |
|
|
|
|
|
this.push = function (bytes) { |
|
var startIndex = 0, |
|
endIndex = MP2T_PACKET_LENGTH, |
|
everything; // If there are bytes remaining from the last segment, prepend them to the |
|
// bytes that were pushed in |
|
|
|
if (bytesInBuffer) { |
|
everything = new Uint8Array(bytes.byteLength + bytesInBuffer); |
|
everything.set(buffer.subarray(0, bytesInBuffer)); |
|
everything.set(bytes, bytesInBuffer); |
|
bytesInBuffer = 0; |
|
} else { |
|
everything = bytes; |
|
} // While we have enough data for a packet |
|
|
|
|
|
while (endIndex < everything.byteLength) { |
|
// Look for a pair of start and end sync bytes in the data.. |
|
if (everything[startIndex] === SYNC_BYTE && everything[endIndex] === SYNC_BYTE) { |
|
// We found a packet so emit it and jump one whole packet forward in |
|
// the stream |
|
this.trigger('data', everything.subarray(startIndex, endIndex)); |
|
startIndex += MP2T_PACKET_LENGTH; |
|
endIndex += MP2T_PACKET_LENGTH; |
|
continue; |
|
} // If we get here, we have somehow become de-synchronized and we need to step |
|
// forward one byte at a time until we find a pair of sync bytes that denote |
|
// a packet |
|
|
|
|
|
startIndex++; |
|
endIndex++; |
|
} // If there was some data left over at the end of the segment that couldn't |
|
// possibly be a whole packet, keep it because it might be the start of a packet |
|
// that continues in the next segment |
|
|
|
|
|
if (startIndex < everything.byteLength) { |
|
buffer.set(everything.subarray(startIndex), 0); |
|
bytesInBuffer = everything.byteLength - startIndex; |
|
} |
|
}; |
|
/** |
|
* Passes identified M2TS packets to the TransportParseStream to be parsed |
|
**/ |
|
|
|
|
|
this.flush = function () { |
|
// If the buffer contains a whole packet when we are being flushed, emit it |
|
// and empty the buffer. Otherwise hold onto the data because it may be |
|
// important for decoding the next segment |
|
if (bytesInBuffer === MP2T_PACKET_LENGTH && buffer[0] === SYNC_BYTE) { |
|
this.trigger('data', buffer); |
|
bytesInBuffer = 0; |
|
} |
|
|
|
this.trigger('done'); |
|
}; |
|
}; |
|
|
|
_TransportPacketStream.prototype = new stream(); |
|
/** |
|
* Accepts an MP2T TransportPacketStream and emits data events with parsed |
|
* forms of the individual transport stream packets. |
|
*/ |
|
|
|
_TransportParseStream = function TransportParseStream() { |
|
var parsePsi, parsePat, parsePmt, self; |
|
|
|
_TransportParseStream.prototype.init.call(this); |
|
|
|
self = this; |
|
this.packetsWaitingForPmt = []; |
|
this.programMapTable = undefined; |
|
|
|
parsePsi = function parsePsi(payload, psi) { |
|
var offset = 0; // PSI packets may be split into multiple sections and those |
|
// sections may be split into multiple packets. If a PSI |
|
// section starts in this packet, the payload_unit_start_indicator |
|
// will be true and the first byte of the payload will indicate |
|
// the offset from the current position to the start of the |
|
// section. |
|
|
|
if (psi.payloadUnitStartIndicator) { |
|
offset += payload[offset] + 1; |
|
} |
|
|
|
if (psi.type === 'pat') { |
|
parsePat(payload.subarray(offset), psi); |
|
} else { |
|
parsePmt(payload.subarray(offset), psi); |
|
} |
|
}; |
|
|
|
parsePat = function parsePat(payload, pat) { |
|
pat.section_number = payload[7]; // eslint-disable-line camelcase |
|
|
|
pat.last_section_number = payload[8]; // eslint-disable-line camelcase |
|
// skip the PSI header and parse the first PMT entry |
|
|
|
self.pmtPid = (payload[10] & 0x1F) << 8 | payload[11]; |
|
pat.pmtPid = self.pmtPid; |
|
}; |
|
/** |
|
* Parse out the relevant fields of a Program Map Table (PMT). |
|
* @param payload {Uint8Array} the PMT-specific portion of an MP2T |
|
* packet. The first byte in this array should be the table_id |
|
* field. |
|
* @param pmt {object} the object that should be decorated with |
|
* fields parsed from the PMT. |
|
*/ |
|
|
|
|
|
parsePmt = function parsePmt(payload, pmt) { |
|
var sectionLength, tableEnd, programInfoLength, offset; // PMTs can be sent ahead of the time when they should actually |
|
// take effect. We don't believe this should ever be the case |
|
// for HLS but we'll ignore "forward" PMT declarations if we see |
|
// them. Future PMT declarations have the current_next_indicator |
|
// set to zero. |
|
|
|
if (!(payload[5] & 0x01)) { |
|
return; |
|
} // overwrite any existing program map table |
|
|
|
|
|
self.programMapTable = { |
|
video: null, |
|
audio: null, |
|
'timed-metadata': {} |
|
}; // the mapping table ends at the end of the current section |
|
|
|
sectionLength = (payload[1] & 0x0f) << 8 | payload[2]; |
|
tableEnd = 3 + sectionLength - 4; // to determine where the table is, we have to figure out how |
|
// long the program info descriptors are |
|
|
|
programInfoLength = (payload[10] & 0x0f) << 8 | payload[11]; // advance the offset to the first entry in the mapping table |
|
|
|
offset = 12 + programInfoLength; |
|
|
|
while (offset < tableEnd) { |
|
var streamType = payload[offset]; |
|
var pid = (payload[offset + 1] & 0x1F) << 8 | payload[offset + 2]; // only map a single elementary_pid for audio and video stream types |
|
// TODO: should this be done for metadata too? for now maintain behavior of |
|
// multiple metadata streams |
|
|
|
if (streamType === streamTypes.H264_STREAM_TYPE && self.programMapTable.video === null) { |
|
self.programMapTable.video = pid; |
|
} else if (streamType === streamTypes.ADTS_STREAM_TYPE && self.programMapTable.audio === null) { |
|
self.programMapTable.audio = pid; |
|
} else if (streamType === streamTypes.METADATA_STREAM_TYPE) { |
|
// map pid to stream type for metadata streams |
|
self.programMapTable['timed-metadata'][pid] = streamType; |
|
} // move to the next table entry |
|
// skip past the elementary stream descriptors, if present |
|
|
|
|
|
offset += ((payload[offset + 3] & 0x0F) << 8 | payload[offset + 4]) + 5; |
|
} // record the map on the packet as well |
|
|
|
|
|
pmt.programMapTable = self.programMapTable; |
|
}; |
|
/** |
|
* Deliver a new MP2T packet to the next stream in the pipeline. |
|
*/ |
|
|
|
|
|
this.push = function (packet) { |
|
var result = {}, |
|
offset = 4; |
|
result.payloadUnitStartIndicator = !!(packet[1] & 0x40); // pid is a 13-bit field starting at the last bit of packet[1] |
|
|
|
result.pid = packet[1] & 0x1f; |
|
result.pid <<= 8; |
|
result.pid |= packet[2]; // if an adaption field is present, its length is specified by the |
|
// fifth byte of the TS packet header. The adaptation field is |
|
// used to add stuffing to PES packets that don't fill a complete |
|
// TS packet, and to specify some forms of timing and control data |
|
// that we do not currently use. |
|
|
|
if ((packet[3] & 0x30) >>> 4 > 0x01) { |
|
offset += packet[offset] + 1; |
|
} // parse the rest of the packet based on the type |
|
|
|
|
|
if (result.pid === 0) { |
|
result.type = 'pat'; |
|
parsePsi(packet.subarray(offset), result); |
|
this.trigger('data', result); |
|
} else if (result.pid === this.pmtPid) { |
|
result.type = 'pmt'; |
|
parsePsi(packet.subarray(offset), result); |
|
this.trigger('data', result); // if there are any packets waiting for a PMT to be found, process them now |
|
|
|
while (this.packetsWaitingForPmt.length) { |
|
this.processPes_.apply(this, this.packetsWaitingForPmt.shift()); |
|
} |
|
} else if (this.programMapTable === undefined) { |
|
// When we have not seen a PMT yet, defer further processing of |
|
// PES packets until one has been parsed |
|
this.packetsWaitingForPmt.push([packet, offset, result]); |
|
} else { |
|
this.processPes_(packet, offset, result); |
|
} |
|
}; |
|
|
|
this.processPes_ = function (packet, offset, result) { |
|
// set the appropriate stream type |
|
if (result.pid === this.programMapTable.video) { |
|
result.streamType = streamTypes.H264_STREAM_TYPE; |
|
} else if (result.pid === this.programMapTable.audio) { |
|
result.streamType = streamTypes.ADTS_STREAM_TYPE; |
|
} else { |
|
// if not video or audio, it is timed-metadata or unknown |
|
// if unknown, streamType will be undefined |
|
result.streamType = this.programMapTable['timed-metadata'][result.pid]; |
|
} |
|
|
|
result.type = 'pes'; |
|
result.data = packet.subarray(offset); |
|
this.trigger('data', result); |
|
}; |
|
}; |
|
|
|
_TransportParseStream.prototype = new stream(); |
|
_TransportParseStream.STREAM_TYPES = { |
|
h264: 0x1b, |
|
adts: 0x0f |
|
}; |
|
/** |
|
* Reconsistutes program elementary stream (PES) packets from parsed |
|
* transport stream packets. That is, if you pipe an |
|
* mp2t.TransportParseStream into a mp2t.ElementaryStream, the output |
|
* events will be events which capture the bytes for individual PES |
|
* packets plus relevant metadata that has been extracted from the |
|
* container. |
|
*/ |
|
|
|
_ElementaryStream = function ElementaryStream() { |
|
var self = this, |
|
// PES packet fragments |
|
video = { |
|
data: [], |
|
size: 0 |
|
}, |
|
audio = { |
|
data: [], |
|
size: 0 |
|
}, |
|
timedMetadata = { |
|
data: [], |
|
size: 0 |
|
}, |
|
parsePes = function parsePes(payload, pes) { |
|
var ptsDtsFlags; // get the packet length, this will be 0 for video |
|
|
|
pes.packetLength = 6 + (payload[4] << 8 | payload[5]); // find out if this packets starts a new keyframe |
|
|
|
pes.dataAlignmentIndicator = (payload[6] & 0x04) !== 0; // PES packets may be annotated with a PTS value, or a PTS value |
|
// and a DTS value. Determine what combination of values is |
|
// available to work with. |
|
|
|
ptsDtsFlags = payload[7]; // PTS and DTS are normally stored as a 33-bit number. Javascript |
|
// performs all bitwise operations on 32-bit integers but javascript |
|
// supports a much greater range (52-bits) of integer using standard |
|
// mathematical operations. |
|
// We construct a 31-bit value using bitwise operators over the 31 |
|
// most significant bits and then multiply by 4 (equal to a left-shift |
|
// of 2) before we add the final 2 least significant bits of the |
|
// timestamp (equal to an OR.) |
|
|
|
if (ptsDtsFlags & 0xC0) { |
|
// the PTS and DTS are not written out directly. For information |
|
// on how they are encoded, see |
|
// http://dvd.sourceforge.net/dvdinfo/pes-hdr.html |
|
pes.pts = (payload[9] & 0x0E) << 27 | (payload[10] & 0xFF) << 20 | (payload[11] & 0xFE) << 12 | (payload[12] & 0xFF) << 5 | (payload[13] & 0xFE) >>> 3; |
|
pes.pts *= 4; // Left shift by 2 |
|
|
|
pes.pts += (payload[13] & 0x06) >>> 1; // OR by the two LSBs |
|
|
|
pes.dts = pes.pts; |
|
|
|
if (ptsDtsFlags & 0x40) { |
|
pes.dts = (payload[14] & 0x0E) << 27 | (payload[15] & 0xFF) << 20 | (payload[16] & 0xFE) << 12 | (payload[17] & 0xFF) << 5 | (payload[18] & 0xFE) >>> 3; |
|
pes.dts *= 4; // Left shift by 2 |
|
|
|
pes.dts += (payload[18] & 0x06) >>> 1; // OR by the two LSBs |
|
} |
|
} // the data section starts immediately after the PES header. |
|
// pes_header_data_length specifies the number of header bytes |
|
// that follow the last byte of the field. |
|
|
|
|
|
pes.data = payload.subarray(9 + payload[8]); |
|
}, |
|
|
|
/** |
|
* Pass completely parsed PES packets to the next stream in the pipeline |
|
**/ |
|
flushStream = function flushStream(stream$$1, type, forceFlush) { |
|
var packetData = new Uint8Array(stream$$1.size), |
|
event = { |
|
type: type |
|
}, |
|
i = 0, |
|
offset = 0, |
|
packetFlushable = false, |
|
fragment; // do nothing if there is not enough buffered data for a complete |
|
// PES header |
|
|
|
if (!stream$$1.data.length || stream$$1.size < 9) { |
|
return; |
|
} |
|
|
|
event.trackId = stream$$1.data[0].pid; // reassemble the packet |
|
|
|
for (i = 0; i < stream$$1.data.length; i++) { |
|
fragment = stream$$1.data[i]; |
|
packetData.set(fragment.data, offset); |
|
offset += fragment.data.byteLength; |
|
} // parse assembled packet's PES header |
|
|
|
|
|
parsePes(packetData, event); // non-video PES packets MUST have a non-zero PES_packet_length |
|
// check that there is enough stream data to fill the packet |
|
|
|
packetFlushable = type === 'video' || event.packetLength <= stream$$1.size; // flush pending packets if the conditions are right |
|
|
|
if (forceFlush || packetFlushable) { |
|
stream$$1.size = 0; |
|
stream$$1.data.length = 0; |
|
} // only emit packets that are complete. this is to avoid assembling |
|
// incomplete PES packets due to poor segmentation |
|
|
|
|
|
if (packetFlushable) { |
|
self.trigger('data', event); |
|
} |
|
}; |
|
|
|
_ElementaryStream.prototype.init.call(this); |
|
/** |
|
* Identifies M2TS packet types and parses PES packets using metadata |
|
* parsed from the PMT |
|
**/ |
|
|
|
|
|
this.push = function (data) { |
|
({ |
|
pat: function pat() {// we have to wait for the PMT to arrive as well before we |
|
// have any meaningful metadata |
|
}, |
|
pes: function pes() { |
|
var stream$$1, streamType; |
|
|
|
switch (data.streamType) { |
|
case streamTypes.H264_STREAM_TYPE: |
|
case streamTypes.H264_STREAM_TYPE: |
|
stream$$1 = video; |
|
streamType = 'video'; |
|
break; |
|
|
|
case streamTypes.ADTS_STREAM_TYPE: |
|
stream$$1 = audio; |
|
streamType = 'audio'; |
|
break; |
|
|
|
case streamTypes.METADATA_STREAM_TYPE: |
|
stream$$1 = timedMetadata; |
|
streamType = 'timed-metadata'; |
|
break; |
|
|
|
default: |
|
// ignore unknown stream types |
|
return; |
|
} // if a new packet is starting, we can flush the completed |
|
// packet |
|
|
|
|
|
if (data.payloadUnitStartIndicator) { |
|
flushStream(stream$$1, streamType, true); |
|
} // buffer this fragment until we are sure we've received the |
|
// complete payload |
|
|
|
|
|
stream$$1.data.push(data); |
|
stream$$1.size += data.data.byteLength; |
|
}, |
|
pmt: function pmt() { |
|
var event = { |
|
type: 'metadata', |
|
tracks: [] |
|
}, |
|
programMapTable = data.programMapTable; // translate audio and video streams to tracks |
|
|
|
if (programMapTable.video !== null) { |
|
event.tracks.push({ |
|
timelineStartInfo: { |
|
baseMediaDecodeTime: 0 |
|
}, |
|
id: +programMapTable.video, |
|
codec: 'avc', |
|
type: 'video' |
|
}); |
|
} |
|
|
|
if (programMapTable.audio !== null) { |
|
event.tracks.push({ |
|
timelineStartInfo: { |
|
baseMediaDecodeTime: 0 |
|
}, |
|
id: +programMapTable.audio, |
|
codec: 'adts', |
|
type: 'audio' |
|
}); |
|
} |
|
|
|
self.trigger('data', event); |
|
} |
|
})[data.type](); |
|
}; |
|
/** |
|
* Flush any remaining input. Video PES packets may be of variable |
|
* length. Normally, the start of a new video packet can trigger the |
|
* finalization of the previous packet. That is not possible if no |
|
* more video is forthcoming, however. In that case, some other |
|
* mechanism (like the end of the file) has to be employed. When it is |
|
* clear that no additional data is forthcoming, calling this method |
|
* will flush the buffered packets. |
|
*/ |
|
|
|
|
|
this.flush = function () { |
|
// !!THIS ORDER IS IMPORTANT!! |
|
// video first then audio |
|
flushStream(video, 'video'); |
|
flushStream(audio, 'audio'); |
|
flushStream(timedMetadata, 'timed-metadata'); |
|
this.trigger('done'); |
|
}; |
|
}; |
|
|
|
_ElementaryStream.prototype = new stream(); |
|
var m2ts = { |
|
PAT_PID: 0x0000, |
|
MP2T_PACKET_LENGTH: MP2T_PACKET_LENGTH, |
|
TransportPacketStream: _TransportPacketStream, |
|
TransportParseStream: _TransportParseStream, |
|
ElementaryStream: _ElementaryStream, |
|
TimestampRolloverStream: TimestampRolloverStream$1, |
|
CaptionStream: captionStream.CaptionStream, |
|
Cea608Stream: captionStream.Cea608Stream, |
|
MetadataStream: metadataStream |
|
}; |
|
|
|
for (var type$1 in streamTypes) { |
|
if (streamTypes.hasOwnProperty(type$1)) { |
|
m2ts[type$1] = streamTypes[type$1]; |
|
} |
|
} |
|
|
|
var m2ts_1 = m2ts; |
|
|
|
var _AdtsStream; |
|
|
|
var ADTS_SAMPLING_FREQUENCIES = [96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050, 16000, 12000, 11025, 8000, 7350]; |
|
/* |
|
* Accepts a ElementaryStream and emits data events with parsed |
|
* AAC Audio Frames of the individual packets. Input audio in ADTS |
|
* format is unpacked and re-emitted as AAC frames. |
|
* |
|
* @see http://wiki.multimedia.cx/index.php?title=ADTS |
|
* @see http://wiki.multimedia.cx/?title=Understanding_AAC |
|
*/ |
|
|
|
_AdtsStream = function AdtsStream() { |
|
var buffer; |
|
|
|
_AdtsStream.prototype.init.call(this); |
|
|
|
this.push = function (packet) { |
|
var i = 0, |
|
frameNum = 0, |
|
frameLength, |
|
protectionSkipBytes, |
|
frameEnd, |
|
oldBuffer, |
|
sampleCount, |
|
adtsFrameDuration; |
|
|
|
if (packet.type !== 'audio') { |
|
// ignore non-audio data |
|
return; |
|
} // Prepend any data in the buffer to the input data so that we can parse |
|
// aac frames the cross a PES packet boundary |
|
|
|
|
|
if (buffer) { |
|
oldBuffer = buffer; |
|
buffer = new Uint8Array(oldBuffer.byteLength + packet.data.byteLength); |
|
buffer.set(oldBuffer); |
|
buffer.set(packet.data, oldBuffer.byteLength); |
|
} else { |
|
buffer = packet.data; |
|
} // unpack any ADTS frames which have been fully received |
|
// for details on the ADTS header, see http://wiki.multimedia.cx/index.php?title=ADTS |
|
|
|
|
|
while (i + 5 < buffer.length) { |
|
// Loook for the start of an ADTS header.. |
|
if (buffer[i] !== 0xFF || (buffer[i + 1] & 0xF6) !== 0xF0) { |
|
// If a valid header was not found, jump one forward and attempt to |
|
// find a valid ADTS header starting at the next byte |
|
i++; |
|
continue; |
|
} // The protection skip bit tells us if we have 2 bytes of CRC data at the |
|
// end of the ADTS header |
|
|
|
|
|
protectionSkipBytes = (~buffer[i + 1] & 0x01) * 2; // Frame length is a 13 bit integer starting 16 bits from the |
|
// end of the sync sequence |
|
|
|
frameLength = (buffer[i + 3] & 0x03) << 11 | buffer[i + 4] << 3 | (buffer[i + 5] & 0xe0) >> 5; |
|
sampleCount = ((buffer[i + 6] & 0x03) + 1) * 1024; |
|
adtsFrameDuration = sampleCount * 90000 / ADTS_SAMPLING_FREQUENCIES[(buffer[i + 2] & 0x3c) >>> 2]; |
|
frameEnd = i + frameLength; // If we don't have enough data to actually finish this ADTS frame, return |
|
// and wait for more data |
|
|
|
if (buffer.byteLength < frameEnd) { |
|
return; |
|
} // Otherwise, deliver the complete AAC frame |
|
|
|
|
|
this.trigger('data', { |
|
pts: packet.pts + frameNum * adtsFrameDuration, |
|
dts: packet.dts + frameNum * adtsFrameDuration, |
|
sampleCount: sampleCount, |
|
audioobjecttype: (buffer[i + 2] >>> 6 & 0x03) + 1, |
|
channelcount: (buffer[i + 2] & 1) << 2 | (buffer[i + 3] & 0xc0) >>> 6, |
|
samplerate: ADTS_SAMPLING_FREQUENCIES[(buffer[i + 2] & 0x3c) >>> 2], |
|
samplingfrequencyindex: (buffer[i + 2] & 0x3c) >>> 2, |
|
// assume ISO/IEC 14496-12 AudioSampleEntry default of 16 |
|
samplesize: 16, |
|
data: buffer.subarray(i + 7 + protectionSkipBytes, frameEnd) |
|
}); // If the buffer is empty, clear it and return |
|
|
|
if (buffer.byteLength === frameEnd) { |
|
buffer = undefined; |
|
return; |
|
} |
|
|
|
frameNum++; // Remove the finished frame from the buffer and start the process again |
|
|
|
buffer = buffer.subarray(frameEnd); |
|
} |
|
}; |
|
|
|
this.flush = function () { |
|
this.trigger('done'); |
|
}; |
|
}; |
|
|
|
_AdtsStream.prototype = new stream(); |
|
var adts = _AdtsStream; |
|
|
|
var ExpGolomb; |
|
/** |
|
* Parser for exponential Golomb codes, a variable-bitwidth number encoding |
|
* scheme used by h264. |
|
*/ |
|
|
|
ExpGolomb = function ExpGolomb(workingData) { |
|
var // the number of bytes left to examine in workingData |
|
workingBytesAvailable = workingData.byteLength, |
|
// the current word being examined |
|
workingWord = 0, |
|
// :uint |
|
// the number of bits left to examine in the current word |
|
workingBitsAvailable = 0; // :uint; |
|
// ():uint |
|
|
|
this.length = function () { |
|
return 8 * workingBytesAvailable; |
|
}; // ():uint |
|
|
|
|
|
this.bitsAvailable = function () { |
|
return 8 * workingBytesAvailable + workingBitsAvailable; |
|
}; // ():void |
|
|
|
|
|
this.loadWord = function () { |
|
var position = workingData.byteLength - workingBytesAvailable, |
|
workingBytes = new Uint8Array(4), |
|
availableBytes = Math.min(4, workingBytesAvailable); |
|
|
|
if (availableBytes === 0) { |
|
throw new Error('no bytes available'); |
|
} |
|
|
|
workingBytes.set(workingData.subarray(position, position + availableBytes)); |
|
workingWord = new DataView(workingBytes.buffer).getUint32(0); // track the amount of workingData that has been processed |
|
|
|
workingBitsAvailable = availableBytes * 8; |
|
workingBytesAvailable -= availableBytes; |
|
}; // (count:int):void |
|
|
|
|
|
this.skipBits = function (count) { |
|
var skipBytes; // :int |
|
|
|
if (workingBitsAvailable > count) { |
|
workingWord <<= count; |
|
workingBitsAvailable -= count; |
|
} else { |
|
count -= workingBitsAvailable; |
|
skipBytes = Math.floor(count / 8); |
|
count -= skipBytes * 8; |
|
workingBytesAvailable -= skipBytes; |
|
this.loadWord(); |
|
workingWord <<= count; |
|
workingBitsAvailable -= count; |
|
} |
|
}; // (size:int):uint |
|
|
|
|
|
this.readBits = function (size) { |
|
var bits = Math.min(workingBitsAvailable, size), |
|
// :uint |
|
valu = workingWord >>> 32 - bits; // :uint |
|
// if size > 31, handle error |
|
|
|
workingBitsAvailable -= bits; |
|
|
|
if (workingBitsAvailable > 0) { |
|
workingWord <<= bits; |
|
} else if (workingBytesAvailable > 0) { |
|
this.loadWord(); |
|
} |
|
|
|
bits = size - bits; |
|
|
|
if (bits > 0) { |
|
return valu << bits | this.readBits(bits); |
|
} |
|
|
|
return valu; |
|
}; // ():uint |
|
|
|
|
|
this.skipLeadingZeros = function () { |
|
var leadingZeroCount; // :uint |
|
|
|
for (leadingZeroCount = 0; leadingZeroCount < workingBitsAvailable; ++leadingZeroCount) { |
|
if ((workingWord & 0x80000000 >>> leadingZeroCount) !== 0) { |
|
// the first bit of working word is 1 |
|
workingWord <<= leadingZeroCount; |
|
workingBitsAvailable -= leadingZeroCount; |
|
return leadingZeroCount; |
|
} |
|
} // we exhausted workingWord and still have not found a 1 |
|
|
|
|
|
this.loadWord(); |
|
return leadingZeroCount + this.skipLeadingZeros(); |
|
}; // ():void |
|
|
|
|
|
this.skipUnsignedExpGolomb = function () { |
|
this.skipBits(1 + this.skipLeadingZeros()); |
|
}; // ():void |
|
|
|
|
|
this.skipExpGolomb = function () { |
|
this.skipBits(1 + this.skipLeadingZeros()); |
|
}; // ():uint |
|
|
|
|
|
this.readUnsignedExpGolomb = function () { |
|
var clz = this.skipLeadingZeros(); // :uint |
|
|
|
return this.readBits(clz + 1) - 1; |
|
}; // ():int |
|
|
|
|
|
this.readExpGolomb = function () { |
|
var valu = this.readUnsignedExpGolomb(); // :int |
|
|
|
if (0x01 & valu) { |
|
// the number is odd if the low order bit is set |
|
return 1 + valu >>> 1; // add 1 to make it even, and divide by 2 |
|
} |
|
|
|
return -1 * (valu >>> 1); // divide by two then make it negative |
|
}; // Some convenience functions |
|
// :Boolean |
|
|
|
|
|
this.readBoolean = function () { |
|
return this.readBits(1) === 1; |
|
}; // ():int |
|
|
|
|
|
this.readUnsignedByte = function () { |
|
return this.readBits(8); |
|
}; |
|
|
|
this.loadWord(); |
|
}; |
|
|
|
var expGolomb = ExpGolomb; |
|
|
|
var _H264Stream, _NalByteStream; |
|
|
|
var PROFILES_WITH_OPTIONAL_SPS_DATA; |
|
/** |
|
* Accepts a NAL unit byte stream and unpacks the embedded NAL units. |
|
*/ |
|
|
|
_NalByteStream = function NalByteStream() { |
|
var syncPoint = 0, |
|
i, |
|
buffer; |
|
|
|
_NalByteStream.prototype.init.call(this); |
|
/* |
|
* Scans a byte stream and triggers a data event with the NAL units found. |
|
* @param {Object} data Event received from H264Stream |
|
* @param {Uint8Array} data.data The h264 byte stream to be scanned |
|
* |
|
* @see H264Stream.push |
|
*/ |
|
|
|
|
|
this.push = function (data) { |
|
var swapBuffer; |
|
|
|
if (!buffer) { |
|
buffer = data.data; |
|
} else { |
|
swapBuffer = new Uint8Array(buffer.byteLength + data.data.byteLength); |
|
swapBuffer.set(buffer); |
|
swapBuffer.set(data.data, buffer.byteLength); |
|
buffer = swapBuffer; |
|
} // Rec. ITU-T H.264, Annex B |
|
// scan for NAL unit boundaries |
|
// a match looks like this: |
|
// 0 0 1 .. NAL .. 0 0 1 |
|
// ^ sync point ^ i |
|
// or this: |
|
// 0 0 1 .. NAL .. 0 0 0 |
|
// ^ sync point ^ i |
|
// advance the sync point to a NAL start, if necessary |
|
|
|
|
|
for (; syncPoint < buffer.byteLength - 3; syncPoint++) { |
|
if (buffer[syncPoint + 2] === 1) { |
|
// the sync point is properly aligned |
|
i = syncPoint + 5; |
|
break; |
|
} |
|
} |
|
|
|
while (i < buffer.byteLength) { |
|
// look at the current byte to determine if we've hit the end of |
|
// a NAL unit boundary |
|
switch (buffer[i]) { |
|
case 0: |
|
// skip past non-sync sequences |
|
if (buffer[i - 1] !== 0) { |
|
i += 2; |
|
break; |
|
} else if (buffer[i - 2] !== 0) { |
|
i++; |
|
break; |
|
} // deliver the NAL unit if it isn't empty |
|
|
|
|
|
if (syncPoint + 3 !== i - 2) { |
|
this.trigger('data', buffer.subarray(syncPoint + 3, i - 2)); |
|
} // drop trailing zeroes |
|
|
|
|
|
do { |
|
i++; |
|
} while (buffer[i] !== 1 && i < buffer.length); |
|
|
|
syncPoint = i - 2; |
|
i += 3; |
|
break; |
|
|
|
case 1: |
|
// skip past non-sync sequences |
|
if (buffer[i - 1] !== 0 || buffer[i - 2] !== 0) { |
|
i += 3; |
|
break; |
|
} // deliver the NAL unit |
|
|
|
|
|
this.trigger('data', buffer.subarray(syncPoint + 3, i - 2)); |
|
syncPoint = i - 2; |
|
i += 3; |
|
break; |
|
|
|
default: |
|
// the current byte isn't a one or zero, so it cannot be part |
|
// of a sync sequence |
|
i += 3; |
|
break; |
|
} |
|
} // filter out the NAL units that were delivered |
|
|
|
|
|
buffer = buffer.subarray(syncPoint); |
|
i -= syncPoint; |
|
syncPoint = 0; |
|
}; |
|
|
|
this.flush = function () { |
|
// deliver the last buffered NAL unit |
|
if (buffer && buffer.byteLength > 3) { |
|
this.trigger('data', buffer.subarray(syncPoint + 3)); |
|
} // reset the stream state |
|
|
|
|
|
buffer = null; |
|
syncPoint = 0; |
|
this.trigger('done'); |
|
}; |
|
}; |
|
|
|
_NalByteStream.prototype = new stream(); // values of profile_idc that indicate additional fields are included in the SPS |
|
// see Recommendation ITU-T H.264 (4/2013), |
|
// 7.3.2.1.1 Sequence parameter set data syntax |
|
|
|
PROFILES_WITH_OPTIONAL_SPS_DATA = { |
|
100: true, |
|
110: true, |
|
122: true, |
|
244: true, |
|
44: true, |
|
83: true, |
|
86: true, |
|
118: true, |
|
128: true, |
|
138: true, |
|
139: true, |
|
134: true |
|
}; |
|
/** |
|
* Accepts input from a ElementaryStream and produces H.264 NAL unit data |
|
* events. |
|
*/ |
|
|
|
_H264Stream = function H264Stream() { |
|
var nalByteStream = new _NalByteStream(), |
|
self, |
|
trackId, |
|
currentPts, |
|
currentDts, |
|
discardEmulationPreventionBytes, |
|
readSequenceParameterSet, |
|
skipScalingList; |
|
|
|
_H264Stream.prototype.init.call(this); |
|
|
|
self = this; |
|
/* |
|
* Pushes a packet from a stream onto the NalByteStream |
|
* |
|
* @param {Object} packet - A packet received from a stream |
|
* @param {Uint8Array} packet.data - The raw bytes of the packet |
|
* @param {Number} packet.dts - Decode timestamp of the packet |
|
* @param {Number} packet.pts - Presentation timestamp of the packet |
|
* @param {Number} packet.trackId - The id of the h264 track this packet came from |
|
* @param {('video'|'audio')} packet.type - The type of packet |
|
* |
|
*/ |
|
|
|
this.push = function (packet) { |
|
if (packet.type !== 'video') { |
|
return; |
|
} |
|
|
|
trackId = packet.trackId; |
|
currentPts = packet.pts; |
|
currentDts = packet.dts; |
|
nalByteStream.push(packet); |
|
}; |
|
/* |
|
* Identify NAL unit types and pass on the NALU, trackId, presentation and decode timestamps |
|
* for the NALUs to the next stream component. |
|
* Also, preprocess caption and sequence parameter NALUs. |
|
* |
|
* @param {Uint8Array} data - A NAL unit identified by `NalByteStream.push` |
|
* @see NalByteStream.push |
|
*/ |
|
|
|
|
|
nalByteStream.on('data', function (data) { |
|
var event = { |
|
trackId: trackId, |
|
pts: currentPts, |
|
dts: currentDts, |
|
data: data |
|
}; |
|
|
|
switch (data[0] & 0x1f) { |
|
case 0x05: |
|
event.nalUnitType = 'slice_layer_without_partitioning_rbsp_idr'; |
|
break; |
|
|
|
case 0x06: |
|
event.nalUnitType = 'sei_rbsp'; |
|
event.escapedRBSP = discardEmulationPreventionBytes(data.subarray(1)); |
|
break; |
|
|
|
case 0x07: |
|
event.nalUnitType = 'seq_parameter_set_rbsp'; |
|
event.escapedRBSP = discardEmulationPreventionBytes(data.subarray(1)); |
|
event.config = readSequenceParameterSet(event.escapedRBSP); |
|
break; |
|
|
|
case 0x08: |
|
event.nalUnitType = 'pic_parameter_set_rbsp'; |
|
break; |
|
|
|
case 0x09: |
|
event.nalUnitType = 'access_unit_delimiter_rbsp'; |
|
break; |
|
|
|
default: |
|
break; |
|
} // This triggers data on the H264Stream |
|
|
|
|
|
self.trigger('data', event); |
|
}); |
|
nalByteStream.on('done', function () { |
|
self.trigger('done'); |
|
}); |
|
|
|
this.flush = function () { |
|
nalByteStream.flush(); |
|
}; |
|
/** |
|
* Advance the ExpGolomb decoder past a scaling list. The scaling |
|
* list is optionally transmitted as part of a sequence parameter |
|
* set and is not relevant to transmuxing. |
|
* @param count {number} the number of entries in this scaling list |
|
* @param expGolombDecoder {object} an ExpGolomb pointed to the |
|
* start of a scaling list |
|
* @see Recommendation ITU-T H.264, Section 7.3.2.1.1.1 |
|
*/ |
|
|
|
|
|
skipScalingList = function skipScalingList(count, expGolombDecoder) { |
|
var lastScale = 8, |
|
nextScale = 8, |
|
j, |
|
deltaScale; |
|
|
|
for (j = 0; j < count; j++) { |
|
if (nextScale !== 0) { |
|
deltaScale = expGolombDecoder.readExpGolomb(); |
|
nextScale = (lastScale + deltaScale + 256) % 256; |
|
} |
|
|
|
lastScale = nextScale === 0 ? lastScale : nextScale; |
|
} |
|
}; |
|
/** |
|
* Expunge any "Emulation Prevention" bytes from a "Raw Byte |
|
* Sequence Payload" |
|
* @param data {Uint8Array} the bytes of a RBSP from a NAL |
|
* unit |
|
* @return {Uint8Array} the RBSP without any Emulation |
|
* Prevention Bytes |
|
*/ |
|
|
|
|
|
discardEmulationPreventionBytes = function discardEmulationPreventionBytes(data) { |
|
var length = data.byteLength, |
|
emulationPreventionBytesPositions = [], |
|
i = 1, |
|
newLength, |
|
newData; // Find all `Emulation Prevention Bytes` |
|
|
|
while (i < length - 2) { |
|
if (data[i] === 0 && data[i + 1] === 0 && data[i + 2] === 0x03) { |
|
emulationPreventionBytesPositions.push(i + 2); |
|
i += 2; |
|
} else { |
|
i++; |
|
} |
|
} // If no Emulation Prevention Bytes were found just return the original |
|
// array |
|
|
|
|
|
if (emulationPreventionBytesPositions.length === 0) { |
|
return data; |
|
} // Create a new array to hold the NAL unit data |
|
|
|
|
|
newLength = length - emulationPreventionBytesPositions.length; |
|
newData = new Uint8Array(newLength); |
|
var sourceIndex = 0; |
|
|
|
for (i = 0; i < newLength; sourceIndex++, i++) { |
|
if (sourceIndex === emulationPreventionBytesPositions[0]) { |
|
// Skip this byte |
|
sourceIndex++; // Remove this position index |
|
|
|
emulationPreventionBytesPositions.shift(); |
|
} |
|
|
|
newData[i] = data[sourceIndex]; |
|
} |
|
|
|
return newData; |
|
}; |
|
/** |
|
* Read a sequence parameter set and return some interesting video |
|
* properties. A sequence parameter set is the H264 metadata that |
|
* describes the properties of upcoming video frames. |
|
* @param data {Uint8Array} the bytes of a sequence parameter set |
|
* @return {object} an object with configuration parsed from the |
|
* sequence parameter set, including the dimensions of the |
|
* associated video frames. |
|
*/ |
|
|
|
|
|
readSequenceParameterSet = function readSequenceParameterSet(data) { |
|
var frameCropLeftOffset = 0, |
|
frameCropRightOffset = 0, |
|
frameCropTopOffset = 0, |
|
frameCropBottomOffset = 0, |
|
sarScale = 1, |
|
expGolombDecoder, |
|
profileIdc, |
|
levelIdc, |
|
profileCompatibility, |
|
chromaFormatIdc, |
|
picOrderCntType, |
|
numRefFramesInPicOrderCntCycle, |
|
picWidthInMbsMinus1, |
|
picHeightInMapUnitsMinus1, |
|
frameMbsOnlyFlag, |
|
scalingListCount, |
|
sarRatio, |
|
aspectRatioIdc, |
|
i; |
|
expGolombDecoder = new expGolomb(data); |
|
profileIdc = expGolombDecoder.readUnsignedByte(); // profile_idc |
|
|
|
profileCompatibility = expGolombDecoder.readUnsignedByte(); // constraint_set[0-5]_flag |
|
|
|
levelIdc = expGolombDecoder.readUnsignedByte(); // level_idc u(8) |
|
|
|
expGolombDecoder.skipUnsignedExpGolomb(); // seq_parameter_set_id |
|
// some profiles have more optional data we don't need |
|
|
|
if (PROFILES_WITH_OPTIONAL_SPS_DATA[profileIdc]) { |
|
chromaFormatIdc = expGolombDecoder.readUnsignedExpGolomb(); |
|
|
|
if (chromaFormatIdc === 3) { |
|
expGolombDecoder.skipBits(1); // separate_colour_plane_flag |
|
} |
|
|
|
expGolombDecoder.skipUnsignedExpGolomb(); // bit_depth_luma_minus8 |
|
|
|
expGolombDecoder.skipUnsignedExpGolomb(); // bit_depth_chroma_minus8 |
|
|
|
expGolombDecoder.skipBits(1); // qpprime_y_zero_transform_bypass_flag |
|
|
|
if (expGolombDecoder.readBoolean()) { |
|
// seq_scaling_matrix_present_flag |
|
scalingListCount = chromaFormatIdc !== 3 ? 8 : 12; |
|
|
|
for (i = 0; i < scalingListCount; i++) { |
|
if (expGolombDecoder.readBoolean()) { |
|
// seq_scaling_list_present_flag[ i ] |
|
if (i < 6) { |
|
skipScalingList(16, expGolombDecoder); |
|
} else { |
|
skipScalingList(64, expGolombDecoder); |
|
} |
|
} |
|
} |
|
} |
|
} |
|
|
|
expGolombDecoder.skipUnsignedExpGolomb(); // log2_max_frame_num_minus4 |
|
|
|
picOrderCntType = expGolombDecoder.readUnsignedExpGolomb(); |
|
|
|
if (picOrderCntType === 0) { |
|
expGolombDecoder.readUnsignedExpGolomb(); // log2_max_pic_order_cnt_lsb_minus4 |
|
} else if (picOrderCntType === 1) { |
|
expGolombDecoder.skipBits(1); // delta_pic_order_always_zero_flag |
|
|
|
expGolombDecoder.skipExpGolomb(); // offset_for_non_ref_pic |
|
|
|
expGolombDecoder.skipExpGolomb(); // offset_for_top_to_bottom_field |
|
|
|
numRefFramesInPicOrderCntCycle = expGolombDecoder.readUnsignedExpGolomb(); |
|
|
|
for (i = 0; i < numRefFramesInPicOrderCntCycle; i++) { |
|
expGolombDecoder.skipExpGolomb(); // offset_for_ref_frame[ i ] |
|
} |
|
} |
|
|
|
expGolombDecoder.skipUnsignedExpGolomb(); // max_num_ref_frames |
|
|
|
expGolombDecoder.skipBits(1); // gaps_in_frame_num_value_allowed_flag |
|
|
|
picWidthInMbsMinus1 = expGolombDecoder.readUnsignedExpGolomb(); |
|
picHeightInMapUnitsMinus1 = expGolombDecoder.readUnsignedExpGolomb(); |
|
frameMbsOnlyFlag = expGolombDecoder.readBits(1); |
|
|
|
if (frameMbsOnlyFlag === 0) { |
|
expGolombDecoder.skipBits(1); // mb_adaptive_frame_field_flag |
|
} |
|
|
|
expGolombDecoder.skipBits(1); // direct_8x8_inference_flag |
|
|
|
if (expGolombDecoder.readBoolean()) { |
|
// frame_cropping_flag |
|
frameCropLeftOffset = expGolombDecoder.readUnsignedExpGolomb(); |
|
frameCropRightOffset = expGolombDecoder.readUnsignedExpGolomb(); |
|
frameCropTopOffset = expGolombDecoder.readUnsignedExpGolomb(); |
|
frameCropBottomOffset = expGolombDecoder.readUnsignedExpGolomb(); |
|
} |
|
|
|
if (expGolombDecoder.readBoolean()) { |
|
// vui_parameters_present_flag |
|
if (expGolombDecoder.readBoolean()) { |
|
// aspect_ratio_info_present_flag |
|
aspectRatioIdc = expGolombDecoder.readUnsignedByte(); |
|
|
|
switch (aspectRatioIdc) { |
|
case 1: |
|
sarRatio = [1, 1]; |
|
break; |
|
|
|
case 2: |
|
sarRatio = [12, 11]; |
|
break; |
|
|
|
case 3: |
|
sarRatio = [10, 11]; |
|
break; |
|
|
|
case 4: |
|
sarRatio = [16, 11]; |
|
break; |
|
|
|
case 5: |
|
sarRatio = [40, 33]; |
|
break; |
|
|
|
case 6: |
|
sarRatio = [24, 11]; |
|
break; |
|
|
|
case 7: |
|
sarRatio = [20, 11]; |
|
break; |
|
|
|
case 8: |
|
sarRatio = [32, 11]; |
|
break; |
|
|
|
case 9: |
|
sarRatio = [80, 33]; |
|
break; |
|
|
|
case 10: |
|
sarRatio = [18, 11]; |
|
break; |
|
|
|
case 11: |
|
sarRatio = [15, 11]; |
|
break; |
|
|
|
case 12: |
|
sarRatio = [64, 33]; |
|
break; |
|
|
|
case 13: |
|
sarRatio = [160, 99]; |
|
break; |
|
|
|
case 14: |
|
sarRatio = [4, 3]; |
|
break; |
|
|
|
case 15: |
|
sarRatio = [3, 2]; |
|
break; |
|
|
|
case 16: |
|
sarRatio = [2, 1]; |
|
break; |
|
|
|
case 255: |
|
{ |
|
sarRatio = [expGolombDecoder.readUnsignedByte() << 8 | expGolombDecoder.readUnsignedByte(), expGolombDecoder.readUnsignedByte() << 8 | expGolombDecoder.readUnsignedByte()]; |
|
break; |
|
} |
|
} |
|
|
|
if (sarRatio) { |
|
sarScale = sarRatio[0] / sarRatio[1]; |
|
} |
|
} |
|
} |
|
|
|
return { |
|
profileIdc: profileIdc, |
|
levelIdc: levelIdc, |
|
profileCompatibility: profileCompatibility, |
|
width: Math.ceil(((picWidthInMbsMinus1 + 1) * 16 - frameCropLeftOffset * 2 - frameCropRightOffset * 2) * sarScale), |
|
height: (2 - frameMbsOnlyFlag) * (picHeightInMapUnitsMinus1 + 1) * 16 - frameCropTopOffset * 2 - frameCropBottomOffset * 2 |
|
}; |
|
}; |
|
}; |
|
|
|
_H264Stream.prototype = new stream(); |
|
var h264 = { |
|
H264Stream: _H264Stream, |
|
NalByteStream: _NalByteStream |
|
}; |
|
|
|
/** |
|
* mux.js |
|
* |
|
* Copyright (c) 2016 Brightcove |
|
* All rights reserved. |
|
* |
|
* Utilities to detect basic properties and metadata about Aac data. |
|
*/ |
|
|
|
var ADTS_SAMPLING_FREQUENCIES$1 = [96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050, 16000, 12000, 11025, 8000, 7350]; |
|
|
|
var isLikelyAacData = function isLikelyAacData(data) { |
|
if (data[0] === 'I'.charCodeAt(0) && data[1] === 'D'.charCodeAt(0) && data[2] === '3'.charCodeAt(0)) { |
|
return true; |
|
} |
|
|
|
return false; |
|
}; |
|
|
|
var parseSyncSafeInteger$1 = function parseSyncSafeInteger(data) { |
|
return data[0] << 21 | data[1] << 14 | data[2] << 7 | data[3]; |
|
}; // return a percent-encoded representation of the specified byte range |
|
// @see http://en.wikipedia.org/wiki/Percent-encoding |
|
|
|
|
|
var percentEncode$1 = function percentEncode(bytes, start, end) { |
|
var i, |
|
result = ''; |
|
|
|
for (i = start; i < end; i++) { |
|
result += '%' + ('00' + bytes[i].toString(16)).slice(-2); |
|
} |
|
|
|
return result; |
|
}; // return the string representation of the specified byte range, |
|
// interpreted as ISO-8859-1. |
|
|
|
|
|
var parseIso88591$1 = function parseIso88591(bytes, start, end) { |
|
return unescape(percentEncode$1(bytes, start, end)); // jshint ignore:line |
|
}; |
|
|
|
var parseId3TagSize = function parseId3TagSize(header, byteIndex) { |
|
var returnSize = header[byteIndex + 6] << 21 | header[byteIndex + 7] << 14 | header[byteIndex + 8] << 7 | header[byteIndex + 9], |
|
flags = header[byteIndex + 5], |
|
footerPresent = (flags & 16) >> 4; |
|
|
|
if (footerPresent) { |
|
return returnSize + 20; |
|
} |
|
|
|
return returnSize + 10; |
|
}; |
|
|
|
var parseAdtsSize = function parseAdtsSize(header, byteIndex) { |
|
var lowThree = (header[byteIndex + 5] & 0xE0) >> 5, |
|
middle = header[byteIndex + 4] << 3, |
|
highTwo = header[byteIndex + 3] & 0x3 << 11; |
|
return highTwo | middle | lowThree; |
|
}; |
|
|
|
var parseType$1 = function parseType(header, byteIndex) { |
|
if (header[byteIndex] === 'I'.charCodeAt(0) && header[byteIndex + 1] === 'D'.charCodeAt(0) && header[byteIndex + 2] === '3'.charCodeAt(0)) { |
|
return 'timed-metadata'; |
|
} else if (header[byteIndex] & 0xff === 0xff && (header[byteIndex + 1] & 0xf0) === 0xf0) { |
|
return 'audio'; |
|
} |
|
|
|
return null; |
|
}; |
|
|
|
var parseSampleRate = function parseSampleRate(packet) { |
|
var i = 0; |
|
|
|
while (i + 5 < packet.length) { |
|
if (packet[i] !== 0xFF || (packet[i + 1] & 0xF6) !== 0xF0) { |
|
// If a valid header was not found, jump one forward and attempt to |
|
// find a valid ADTS header starting at the next byte |
|
i++; |
|
continue; |
|
} |
|
|
|
return ADTS_SAMPLING_FREQUENCIES$1[(packet[i + 2] & 0x3c) >>> 2]; |
|
} |
|
|
|
return null; |
|
}; |
|
|
|
var parseAacTimestamp = function parseAacTimestamp(packet) { |
|
var frameStart, frameSize, frame, frameHeader; // find the start of the first frame and the end of the tag |
|
|
|
frameStart = 10; |
|
|
|
if (packet[5] & 0x40) { |
|
// advance the frame start past the extended header |
|
frameStart += 4; // header size field |
|
|
|
frameStart += parseSyncSafeInteger$1(packet.subarray(10, 14)); |
|
} // parse one or more ID3 frames |
|
// http://id3.org/id3v2.3.0#ID3v2_frame_overview |
|
|
|
|
|
do { |
|
// determine the number of bytes in this frame |
|
frameSize = parseSyncSafeInteger$1(packet.subarray(frameStart + 4, frameStart + 8)); |
|
|
|
if (frameSize < 1) { |
|
return null; |
|
} |
|
|
|
frameHeader = String.fromCharCode(packet[frameStart], packet[frameStart + 1], packet[frameStart + 2], packet[frameStart + 3]); |
|
|
|
if (frameHeader === 'PRIV') { |
|
frame = packet.subarray(frameStart + 10, frameStart + frameSize + 10); |
|
|
|
for (var i = 0; i < frame.byteLength; i++) { |
|
if (frame[i] === 0) { |
|
var owner = parseIso88591$1(frame, 0, i); |
|
|
|
if (owner === 'com.apple.streaming.transportStreamTimestamp') { |
|
var d = frame.subarray(i + 1); |
|
var size = (d[3] & 0x01) << 30 | d[4] << 22 | d[5] << 14 | d[6] << 6 | d[7] >>> 2; |
|
size *= 4; |
|
size += d[7] & 0x03; |
|
return size; |
|
} |
|
|
|
break; |
|
} |
|
} |
|
} |
|
|
|
frameStart += 10; // advance past the frame header |
|
|
|
frameStart += frameSize; // advance past the frame body |
|
} while (frameStart < packet.byteLength); |
|
|
|
return null; |
|
}; |
|
|
|
var utils = { |
|
isLikelyAacData: isLikelyAacData, |
|
parseId3TagSize: parseId3TagSize, |
|
parseAdtsSize: parseAdtsSize, |
|
parseType: parseType$1, |
|
parseSampleRate: parseSampleRate, |
|
parseAacTimestamp: parseAacTimestamp |
|
}; |
|
|
|
var _AacStream; |
|
/** |
|
* Splits an incoming stream of binary data into ADTS and ID3 Frames. |
|
*/ |
|
|
|
|
|
_AacStream = function AacStream() { |
|
var everything = new Uint8Array(), |
|
timeStamp = 0; |
|
|
|
_AacStream.prototype.init.call(this); |
|
|
|
this.setTimestamp = function (timestamp) { |
|
timeStamp = timestamp; |
|
}; |
|
|
|
this.push = function (bytes) { |
|
var frameSize = 0, |
|
byteIndex = 0, |
|
bytesLeft, |
|
chunk, |
|
packet, |
|
tempLength; // If there are bytes remaining from the last segment, prepend them to the |
|
// bytes that were pushed in |
|
|
|
if (everything.length) { |
|
tempLength = everything.length; |
|
everything = new Uint8Array(bytes.byteLength + tempLength); |
|
everything.set(everything.subarray(0, tempLength)); |
|
everything.set(bytes, tempLength); |
|
} else { |
|
everything = bytes; |
|
} |
|
|
|
while (everything.length - byteIndex >= 3) { |
|
if (everything[byteIndex] === 'I'.charCodeAt(0) && everything[byteIndex + 1] === 'D'.charCodeAt(0) && everything[byteIndex + 2] === '3'.charCodeAt(0)) { |
|
// Exit early because we don't have enough to parse |
|
// the ID3 tag header |
|
if (everything.length - byteIndex < 10) { |
|
break; |
|
} // check framesize |
|
|
|
|
|
frameSize = utils.parseId3TagSize(everything, byteIndex); // Exit early if we don't have enough in the buffer |
|
// to emit a full packet |
|
// Add to byteIndex to support multiple ID3 tags in sequence |
|
|
|
if (byteIndex + frameSize > everything.length) { |
|
break; |
|
} |
|
|
|
chunk = { |
|
type: 'timed-metadata', |
|
data: everything.subarray(byteIndex, byteIndex + frameSize) |
|
}; |
|
this.trigger('data', chunk); |
|
byteIndex += frameSize; |
|
continue; |
|
} else if ((everything[byteIndex] & 0xff) === 0xff && (everything[byteIndex + 1] & 0xf0) === 0xf0) { |
|
// Exit early because we don't have enough to parse |
|
// the ADTS frame header |
|
if (everything.length - byteIndex < 7) { |
|
break; |
|
} |
|
|
|
frameSize = utils.parseAdtsSize(everything, byteIndex); // Exit early if we don't have enough in the buffer |
|
// to emit a full packet |
|
|
|
if (byteIndex + frameSize > everything.length) { |
|
break; |
|
} |
|
|
|
packet = { |
|
type: 'audio', |
|
data: everything.subarray(byteIndex, byteIndex + frameSize), |
|
pts: timeStamp, |
|
dts: timeStamp |
|
}; |
|
this.trigger('data', packet); |
|
byteIndex += frameSize; |
|
continue; |
|
} |
|
|
|
byteIndex++; |
|
} |
|
|
|
bytesLeft = everything.length - byteIndex; |
|
|
|
if (bytesLeft > 0) { |
|
everything = everything.subarray(byteIndex); |
|
} else { |
|
everything = new Uint8Array(); |
|
} |
|
}; |
|
}; |
|
|
|
_AacStream.prototype = new stream(); |
|
var aac = _AacStream; |
|
|
|
var H264Stream = h264.H264Stream; |
|
var isLikelyAacData$1 = utils.isLikelyAacData; // constants |
|
|
|
var AUDIO_PROPERTIES = ['audioobjecttype', 'channelcount', 'samplerate', 'samplingfrequencyindex', 'samplesize']; |
|
var VIDEO_PROPERTIES = ['width', 'height', 'profileIdc', 'levelIdc', 'profileCompatibility']; // object types |
|
|
|
var _VideoSegmentStream, _AudioSegmentStream, _Transmuxer, _CoalesceStream; |
|
/** |
|
* Compare two arrays (even typed) for same-ness |
|
*/ |
|
|
|
|
|
var arrayEquals = function arrayEquals(a, b) { |
|
var i; |
|
|
|
if (a.length !== b.length) { |
|
return false; |
|
} // compare the value of each element in the array |
|
|
|
|
|
for (i = 0; i < a.length; i++) { |
|
if (a[i] !== b[i]) { |
|
return false; |
|
} |
|
} |
|
|
|
return true; |
|
}; |
|
|
|
var generateVideoSegmentTimingInfo = function generateVideoSegmentTimingInfo(baseMediaDecodeTime, startDts, startPts, endDts, endPts, prependedContentDuration) { |
|
var ptsOffsetFromDts = startPts - startDts, |
|
decodeDuration = endDts - startDts, |
|
presentationDuration = endPts - startPts; // The PTS and DTS values are based on the actual stream times from the segment, |
|
// however, the player time values will reflect a start from the baseMediaDecodeTime. |
|
// In order to provide relevant values for the player times, base timing info on the |
|
// baseMediaDecodeTime and the DTS and PTS durations of the segment. |
|
|
|
return { |
|
start: { |
|
dts: baseMediaDecodeTime, |
|
pts: baseMediaDecodeTime + ptsOffsetFromDts |
|
}, |
|
end: { |
|
dts: baseMediaDecodeTime + decodeDuration, |
|
pts: baseMediaDecodeTime + presentationDuration |
|
}, |
|
prependedContentDuration: prependedContentDuration, |
|
baseMediaDecodeTime: baseMediaDecodeTime |
|
}; |
|
}; |
|
/** |
|
* Constructs a single-track, ISO BMFF media segment from AAC data |
|
* events. The output of this stream can be fed to a SourceBuffer |
|
* configured with a suitable initialization segment. |
|
* @param track {object} track metadata configuration |
|
* @param options {object} transmuxer options object |
|
* @param options.keepOriginalTimestamps {boolean} If true, keep the timestamps |
|
* in the source; false to adjust the first segment to start at 0. |
|
*/ |
|
|
|
|
|
_AudioSegmentStream = function AudioSegmentStream(track, options) { |
|
var adtsFrames = [], |
|
sequenceNumber = 0, |
|
earliestAllowedDts = 0, |
|
audioAppendStartTs = 0, |
|
videoBaseMediaDecodeTime = Infinity; |
|
options = options || {}; |
|
|
|
_AudioSegmentStream.prototype.init.call(this); |
|
|
|
this.push = function (data) { |
|
trackDecodeInfo.collectDtsInfo(track, data); |
|
|
|
if (track) { |
|
AUDIO_PROPERTIES.forEach(function (prop) { |
|
track[prop] = data[prop]; |
|
}); |
|
} // buffer audio data until end() is called |
|
|
|
|
|
adtsFrames.push(data); |
|
}; |
|
|
|
this.setEarliestDts = function (earliestDts) { |
|
earliestAllowedDts = earliestDts - track.timelineStartInfo.baseMediaDecodeTime; |
|
}; |
|
|
|
this.setVideoBaseMediaDecodeTime = function (baseMediaDecodeTime) { |
|
videoBaseMediaDecodeTime = baseMediaDecodeTime; |
|
}; |
|
|
|
this.setAudioAppendStart = function (timestamp) { |
|
audioAppendStartTs = timestamp; |
|
}; |
|
|
|
this.flush = function () { |
|
var frames, moof, mdat, boxes; // return early if no audio data has been observed |
|
|
|
if (adtsFrames.length === 0) { |
|
this.trigger('done', 'AudioSegmentStream'); |
|
return; |
|
} |
|
|
|
frames = audioFrameUtils.trimAdtsFramesByEarliestDts(adtsFrames, track, earliestAllowedDts); |
|
track.baseMediaDecodeTime = trackDecodeInfo.calculateTrackBaseMediaDecodeTime(track, options.keepOriginalTimestamps); |
|
audioFrameUtils.prefixWithSilence(track, frames, audioAppendStartTs, videoBaseMediaDecodeTime); // we have to build the index from byte locations to |
|
// samples (that is, adts frames) in the audio data |
|
|
|
track.samples = audioFrameUtils.generateSampleTable(frames); // concatenate the audio data to constuct the mdat |
|
|
|
mdat = mp4Generator.mdat(audioFrameUtils.concatenateFrameData(frames)); |
|
adtsFrames = []; |
|
moof = mp4Generator.moof(sequenceNumber, [track]); |
|
boxes = new Uint8Array(moof.byteLength + mdat.byteLength); // bump the sequence number for next time |
|
|
|
sequenceNumber++; |
|
boxes.set(moof); |
|
boxes.set(mdat, moof.byteLength); |
|
trackDecodeInfo.clearDtsInfo(track); |
|
this.trigger('data', { |
|
track: track, |
|
boxes: boxes |
|
}); |
|
this.trigger('done', 'AudioSegmentStream'); |
|
}; |
|
}; |
|
|
|
_AudioSegmentStream.prototype = new stream(); |
|
/** |
|
* Constructs a single-track, ISO BMFF media segment from H264 data |
|
* events. The output of this stream can be fed to a SourceBuffer |
|
* configured with a suitable initialization segment. |
|
* @param track {object} track metadata configuration |
|
* @param options {object} transmuxer options object |
|
* @param options.alignGopsAtEnd {boolean} If true, start from the end of the |
|
* gopsToAlignWith list when attempting to align gop pts |
|
* @param options.keepOriginalTimestamps {boolean} If true, keep the timestamps |
|
* in the source; false to adjust the first segment to start at 0. |
|
*/ |
|
|
|
_VideoSegmentStream = function VideoSegmentStream(track, options) { |
|
var sequenceNumber = 0, |
|
nalUnits = [], |
|
gopsToAlignWith = [], |
|
config, |
|
pps; |
|
options = options || {}; |
|
|
|
_VideoSegmentStream.prototype.init.call(this); |
|
|
|
delete track.minPTS; |
|
this.gopCache_ = []; |
|
/** |
|
* Constructs a ISO BMFF segment given H264 nalUnits |
|
* @param {Object} nalUnit A data event representing a nalUnit |
|
* @param {String} nalUnit.nalUnitType |
|
* @param {Object} nalUnit.config Properties for a mp4 track |
|
* @param {Uint8Array} nalUnit.data The nalUnit bytes |
|
* @see lib/codecs/h264.js |
|
**/ |
|
|
|
this.push = function (nalUnit) { |
|
trackDecodeInfo.collectDtsInfo(track, nalUnit); // record the track config |
|
|
|
if (nalUnit.nalUnitType === 'seq_parameter_set_rbsp' && !config) { |
|
config = nalUnit.config; |
|
track.sps = [nalUnit.data]; |
|
VIDEO_PROPERTIES.forEach(function (prop) { |
|
track[prop] = config[prop]; |
|
}, this); |
|
} |
|
|
|
if (nalUnit.nalUnitType === 'pic_parameter_set_rbsp' && !pps) { |
|
pps = nalUnit.data; |
|
track.pps = [nalUnit.data]; |
|
} // buffer video until flush() is called |
|
|
|
|
|
nalUnits.push(nalUnit); |
|
}; |
|
/** |
|
* Pass constructed ISO BMFF track and boxes on to the |
|
* next stream in the pipeline |
|
**/ |
|
|
|
|
|
this.flush = function () { |
|
var frames, |
|
gopForFusion, |
|
gops, |
|
moof, |
|
mdat, |
|
boxes, |
|
prependedContentDuration = 0, |
|
firstGop, |
|
lastGop; // Throw away nalUnits at the start of the byte stream until |
|
// we find the first AUD |
|
|
|
while (nalUnits.length) { |
|
if (nalUnits[0].nalUnitType === 'access_unit_delimiter_rbsp') { |
|
break; |
|
} |
|
|
|
nalUnits.shift(); |
|
} // Return early if no video data has been observed |
|
|
|
|
|
if (nalUnits.length === 0) { |
|
this.resetStream_(); |
|
this.trigger('done', 'VideoSegmentStream'); |
|
return; |
|
} // Organize the raw nal-units into arrays that represent |
|
// higher-level constructs such as frames and gops |
|
// (group-of-pictures) |
|
|
|
|
|
frames = frameUtils.groupNalsIntoFrames(nalUnits); |
|
gops = frameUtils.groupFramesIntoGops(frames); // If the first frame of this fragment is not a keyframe we have |
|
// a problem since MSE (on Chrome) requires a leading keyframe. |
|
// |
|
// We have two approaches to repairing this situation: |
|
// 1) GOP-FUSION: |
|
// This is where we keep track of the GOPS (group-of-pictures) |
|
// from previous fragments and attempt to find one that we can |
|
// prepend to the current fragment in order to create a valid |
|
// fragment. |
|
// 2) KEYFRAME-PULLING: |
|
// Here we search for the first keyframe in the fragment and |
|
// throw away all the frames between the start of the fragment |
|
// and that keyframe. We then extend the duration and pull the |
|
// PTS of the keyframe forward so that it covers the time range |
|
// of the frames that were disposed of. |
|
// |
|
// #1 is far prefereable over #2 which can cause "stuttering" but |
|
// requires more things to be just right. |
|
|
|
if (!gops[0][0].keyFrame) { |
|
// Search for a gop for fusion from our gopCache |
|
gopForFusion = this.getGopForFusion_(nalUnits[0], track); |
|
|
|
if (gopForFusion) { |
|
// in order to provide more accurate timing information about the segment, save |
|
// the number of seconds prepended to the original segment due to GOP fusion |
|
prependedContentDuration = gopForFusion.duration; |
|
gops.unshift(gopForFusion); // Adjust Gops' metadata to account for the inclusion of the |
|
// new gop at the beginning |
|
|
|
gops.byteLength += gopForFusion.byteLength; |
|
gops.nalCount += gopForFusion.nalCount; |
|
gops.pts = gopForFusion.pts; |
|
gops.dts = gopForFusion.dts; |
|
gops.duration += gopForFusion.duration; |
|
} else { |
|
// If we didn't find a candidate gop fall back to keyframe-pulling |
|
gops = frameUtils.extendFirstKeyFrame(gops); |
|
} |
|
} // Trim gops to align with gopsToAlignWith |
|
|
|
|
|
if (gopsToAlignWith.length) { |
|
var alignedGops; |
|
|
|
if (options.alignGopsAtEnd) { |
|
alignedGops = this.alignGopsAtEnd_(gops); |
|
} else { |
|
alignedGops = this.alignGopsAtStart_(gops); |
|
} |
|
|
|
if (!alignedGops) { |
|
// save all the nals in the last GOP into the gop cache |
|
this.gopCache_.unshift({ |
|
gop: gops.pop(), |
|
pps: track.pps, |
|
sps: track.sps |
|
}); // Keep a maximum of 6 GOPs in the cache |
|
|
|
this.gopCache_.length = Math.min(6, this.gopCache_.length); // Clear nalUnits |
|
|
|
nalUnits = []; // return early no gops can be aligned with desired gopsToAlignWith |
|
|
|
this.resetStream_(); |
|
this.trigger('done', 'VideoSegmentStream'); |
|
return; |
|
} // Some gops were trimmed. clear dts info so minSegmentDts and pts are correct |
|
// when recalculated before sending off to CoalesceStream |
|
|
|
|
|
trackDecodeInfo.clearDtsInfo(track); |
|
gops = alignedGops; |
|
} |
|
|
|
trackDecodeInfo.collectDtsInfo(track, gops); // First, we have to build the index from byte locations to |
|
// samples (that is, frames) in the video data |
|
|
|
track.samples = frameUtils.generateSampleTable(gops); // Concatenate the video data and construct the mdat |
|
|
|
mdat = mp4Generator.mdat(frameUtils.concatenateNalData(gops)); |
|
track.baseMediaDecodeTime = trackDecodeInfo.calculateTrackBaseMediaDecodeTime(track, options.keepOriginalTimestamps); |
|
this.trigger('processedGopsInfo', gops.map(function (gop) { |
|
return { |
|
pts: gop.pts, |
|
dts: gop.dts, |
|
byteLength: gop.byteLength |
|
}; |
|
})); |
|
firstGop = gops[0]; |
|
lastGop = gops[gops.length - 1]; |
|
this.trigger('segmentTimingInfo', generateVideoSegmentTimingInfo(track.baseMediaDecodeTime, firstGop.dts, firstGop.pts, lastGop.dts + lastGop.duration, lastGop.pts + lastGop.duration, prependedContentDuration)); // save all the nals in the last GOP into the gop cache |
|
|
|
this.gopCache_.unshift({ |
|
gop: gops.pop(), |
|
pps: track.pps, |
|
sps: track.sps |
|
}); // Keep a maximum of 6 GOPs in the cache |
|
|
|
this.gopCache_.length = Math.min(6, this.gopCache_.length); // Clear nalUnits |
|
|
|
nalUnits = []; |
|
this.trigger('baseMediaDecodeTime', track.baseMediaDecodeTime); |
|
this.trigger('timelineStartInfo', track.timelineStartInfo); |
|
moof = mp4Generator.moof(sequenceNumber, [track]); // it would be great to allocate this array up front instead of |
|
// throwing away hundreds of media segment fragments |
|
|
|
boxes = new Uint8Array(moof.byteLength + mdat.byteLength); // Bump the sequence number for next time |
|
|
|
sequenceNumber++; |
|
boxes.set(moof); |
|
boxes.set(mdat, moof.byteLength); |
|
this.trigger('data', { |
|
track: track, |
|
boxes: boxes |
|
}); |
|
this.resetStream_(); // Continue with the flush process now |
|
|
|
this.trigger('done', 'VideoSegmentStream'); |
|
}; |
|
|
|
this.resetStream_ = function () { |
|
trackDecodeInfo.clearDtsInfo(track); // reset config and pps because they may differ across segments |
|
// for instance, when we are rendition switching |
|
|
|
config = undefined; |
|
pps = undefined; |
|
}; // Search for a candidate Gop for gop-fusion from the gop cache and |
|
// return it or return null if no good candidate was found |
|
|
|
|
|
this.getGopForFusion_ = function (nalUnit) { |
|
var halfSecond = 45000, |
|
// Half-a-second in a 90khz clock |
|
allowableOverlap = 10000, |
|
// About 3 frames @ 30fps |
|
nearestDistance = Infinity, |
|
dtsDistance, |
|
nearestGopObj, |
|
currentGop, |
|
currentGopObj, |
|
i; // Search for the GOP nearest to the beginning of this nal unit |
|
|
|
for (i = 0; i < this.gopCache_.length; i++) { |
|
currentGopObj = this.gopCache_[i]; |
|
currentGop = currentGopObj.gop; // Reject Gops with different SPS or PPS |
|
|
|
if (!(track.pps && arrayEquals(track.pps[0], currentGopObj.pps[0])) || !(track.sps && arrayEquals(track.sps[0], currentGopObj.sps[0]))) { |
|
continue; |
|
} // Reject Gops that would require a negative baseMediaDecodeTime |
|
|
|
|
|
if (currentGop.dts < track.timelineStartInfo.dts) { |
|
continue; |
|
} // The distance between the end of the gop and the start of the nalUnit |
|
|
|
|
|
dtsDistance = nalUnit.dts - currentGop.dts - currentGop.duration; // Only consider GOPS that start before the nal unit and end within |
|
// a half-second of the nal unit |
|
|
|
if (dtsDistance >= -allowableOverlap && dtsDistance <= halfSecond) { |
|
// Always use the closest GOP we found if there is more than |
|
// one candidate |
|
if (!nearestGopObj || nearestDistance > dtsDistance) { |
|
nearestGopObj = currentGopObj; |
|
nearestDistance = dtsDistance; |
|
} |
|
} |
|
} |
|
|
|
if (nearestGopObj) { |
|
return nearestGopObj.gop; |
|
} |
|
|
|
return null; |
|
}; // trim gop list to the first gop found that has a matching pts with a gop in the list |
|
// of gopsToAlignWith starting from the START of the list |
|
|
|
|
|
this.alignGopsAtStart_ = function (gops) { |
|
var alignIndex, gopIndex, align, gop, byteLength, nalCount, duration, alignedGops; |
|
byteLength = gops.byteLength; |
|
nalCount = gops.nalCount; |
|
duration = gops.duration; |
|
alignIndex = gopIndex = 0; |
|
|
|
while (alignIndex < gopsToAlignWith.length && gopIndex < gops.length) { |
|
align = gopsToAlignWith[alignIndex]; |
|
gop = gops[gopIndex]; |
|
|
|
if (align.pts === gop.pts) { |
|
break; |
|
} |
|
|
|
if (gop.pts > align.pts) { |
|
// this current gop starts after the current gop we want to align on, so increment |
|
// align index |
|
alignIndex++; |
|
continue; |
|
} // current gop starts before the current gop we want to align on. so increment gop |
|
// index |
|
|
|
|
|
gopIndex++; |
|
byteLength -= gop.byteLength; |
|
nalCount -= gop.nalCount; |
|
duration -= gop.duration; |
|
} |
|
|
|
if (gopIndex === 0) { |
|
// no gops to trim |
|
return gops; |
|
} |
|
|
|
if (gopIndex === gops.length) { |
|
// all gops trimmed, skip appending all gops |
|
return null; |
|
} |
|
|
|
alignedGops = gops.slice(gopIndex); |
|
alignedGops.byteLength = byteLength; |
|
alignedGops.duration = duration; |
|
alignedGops.nalCount = nalCount; |
|
alignedGops.pts = alignedGops[0].pts; |
|
alignedGops.dts = alignedGops[0].dts; |
|
return alignedGops; |
|
}; // trim gop list to the first gop found that has a matching pts with a gop in the list |
|
// of gopsToAlignWith starting from the END of the list |
|
|
|
|
|
this.alignGopsAtEnd_ = function (gops) { |
|
var alignIndex, gopIndex, align, gop, alignEndIndex, matchFound; |
|
alignIndex = gopsToAlignWith.length - 1; |
|
gopIndex = gops.length - 1; |
|
alignEndIndex = null; |
|
matchFound = false; |
|
|
|
while (alignIndex >= 0 && gopIndex >= 0) { |
|
align = gopsToAlignWith[alignIndex]; |
|
gop = gops[gopIndex]; |
|
|
|
if (align.pts === gop.pts) { |
|
matchFound = true; |
|
break; |
|
} |
|
|
|
if (align.pts > gop.pts) { |
|
alignIndex--; |
|
continue; |
|
} |
|
|
|
if (alignIndex === gopsToAlignWith.length - 1) { |
|
// gop.pts is greater than the last alignment candidate. If no match is found |
|
// by the end of this loop, we still want to append gops that come after this |
|
// point |
|
alignEndIndex = gopIndex; |
|
} |
|
|
|
gopIndex--; |
|
} |
|
|
|
if (!matchFound && alignEndIndex === null) { |
|
return null; |
|
} |
|
|
|
var trimIndex; |
|
|
|
if (matchFound) { |
|
trimIndex = gopIndex; |
|
} else { |
|
trimIndex = alignEndIndex; |
|
} |
|
|
|
if (trimIndex === 0) { |
|
return gops; |
|
} |
|
|
|
var alignedGops = gops.slice(trimIndex); |
|
var metadata = alignedGops.reduce(function (total, gop) { |
|
total.byteLength += gop.byteLength; |
|
total.duration += gop.duration; |
|
total.nalCount += gop.nalCount; |
|
return total; |
|
}, { |
|
byteLength: 0, |
|
duration: 0, |
|
nalCount: 0 |
|
}); |
|
alignedGops.byteLength = metadata.byteLength; |
|
alignedGops.duration = metadata.duration; |
|
alignedGops.nalCount = metadata.nalCount; |
|
alignedGops.pts = alignedGops[0].pts; |
|
alignedGops.dts = alignedGops[0].dts; |
|
return alignedGops; |
|
}; |
|
|
|
this.alignGopsWith = function (newGopsToAlignWith) { |
|
gopsToAlignWith = newGopsToAlignWith; |
|
}; |
|
}; |
|
|
|
_VideoSegmentStream.prototype = new stream(); |
|
/** |
|
* A Stream that can combine multiple streams (ie. audio & video) |
|
* into a single output segment for MSE. Also supports audio-only |
|
* and video-only streams. |
|
* @param options {object} transmuxer options object |
|
* @param options.keepOriginalTimestamps {boolean} If true, keep the timestamps |
|
* in the source; false to adjust the first segment to start at media timeline start. |
|
*/ |
|
|
|
_CoalesceStream = function CoalesceStream(options, metadataStream) { |
|
// Number of Tracks per output segment |
|
// If greater than 1, we combine multiple |
|
// tracks into a single segment |
|
this.numberOfTracks = 0; |
|
this.metadataStream = metadataStream; |
|
options = options || {}; |
|
|
|
if (typeof options.remux !== 'undefined') { |
|
this.remuxTracks = !!options.remux; |
|
} else { |
|
this.remuxTracks = true; |
|
} |
|
|
|
if (typeof options.keepOriginalTimestamps === 'boolean') { |
|
this.keepOriginalTimestamps = options.keepOriginalTimestamps; |
|
} |
|
|
|
this.pendingTracks = []; |
|
this.videoTrack = null; |
|
this.pendingBoxes = []; |
|
this.pendingCaptions = []; |
|
this.pendingMetadata = []; |
|
this.pendingBytes = 0; |
|
this.emittedTracks = 0; |
|
|
|
_CoalesceStream.prototype.init.call(this); // Take output from multiple |
|
|
|
|
|
this.push = function (output) { |
|
// buffer incoming captions until the associated video segment |
|
// finishes |
|
if (output.text) { |
|
return this.pendingCaptions.push(output); |
|
} // buffer incoming id3 tags until the final flush |
|
|
|
|
|
if (output.frames) { |
|
return this.pendingMetadata.push(output); |
|
} // Add this track to the list of pending tracks and store |
|
// important information required for the construction of |
|
// the final segment |
|
|
|
|
|
this.pendingTracks.push(output.track); |
|
this.pendingBoxes.push(output.boxes); |
|
this.pendingBytes += output.boxes.byteLength; |
|
|
|
if (output.track.type === 'video') { |
|
this.videoTrack = output.track; |
|
} |
|
|
|
if (output.track.type === 'audio') { |
|
this.audioTrack = output.track; |
|
} |
|
}; |
|
}; |
|
|
|
_CoalesceStream.prototype = new stream(); |
|
|
|
_CoalesceStream.prototype.flush = function (flushSource) { |
|
var offset = 0, |
|
event = { |
|
captions: [], |
|
captionStreams: {}, |
|
metadata: [], |
|
info: {} |
|
}, |
|
caption, |
|
id3, |
|
initSegment, |
|
timelineStartPts = 0, |
|
i; |
|
|
|
if (this.pendingTracks.length < this.numberOfTracks) { |
|
if (flushSource !== 'VideoSegmentStream' && flushSource !== 'AudioSegmentStream') { |
|
// Return because we haven't received a flush from a data-generating |
|
// portion of the segment (meaning that we have only recieved meta-data |
|
// or captions.) |
|
return; |
|
} else if (this.remuxTracks) { |
|
// Return until we have enough tracks from the pipeline to remux (if we |
|
// are remuxing audio and video into a single MP4) |
|
return; |
|
} else if (this.pendingTracks.length === 0) { |
|
// In the case where we receive a flush without any data having been |
|
// received we consider it an emitted track for the purposes of coalescing |
|
// `done` events. |
|
// We do this for the case where there is an audio and video track in the |
|
// segment but no audio data. (seen in several playlists with alternate |
|
// audio tracks and no audio present in the main TS segments.) |
|
this.emittedTracks++; |
|
|
|
if (this.emittedTracks >= this.numberOfTracks) { |
|
this.trigger('done'); |
|
this.emittedTracks = 0; |
|
} |
|
|
|
return; |
|
} |
|
} |
|
|
|
if (this.videoTrack) { |
|
timelineStartPts = this.videoTrack.timelineStartInfo.pts; |
|
VIDEO_PROPERTIES.forEach(function (prop) { |
|
event.info[prop] = this.videoTrack[prop]; |
|
}, this); |
|
} else if (this.audioTrack) { |
|
timelineStartPts = this.audioTrack.timelineStartInfo.pts; |
|
AUDIO_PROPERTIES.forEach(function (prop) { |
|
event.info[prop] = this.audioTrack[prop]; |
|
}, this); |
|
} |
|
|
|
if (this.pendingTracks.length === 1) { |
|
event.type = this.pendingTracks[0].type; |
|
} else { |
|
event.type = 'combined'; |
|
} |
|
|
|
this.emittedTracks += this.pendingTracks.length; |
|
initSegment = mp4Generator.initSegment(this.pendingTracks); // Create a new typed array to hold the init segment |
|
|
|
event.initSegment = new Uint8Array(initSegment.byteLength); // Create an init segment containing a moov |
|
// and track definitions |
|
|
|
event.initSegment.set(initSegment); // Create a new typed array to hold the moof+mdats |
|
|
|
event.data = new Uint8Array(this.pendingBytes); // Append each moof+mdat (one per track) together |
|
|
|
for (i = 0; i < this.pendingBoxes.length; i++) { |
|
event.data.set(this.pendingBoxes[i], offset); |
|
offset += this.pendingBoxes[i].byteLength; |
|
} // Translate caption PTS times into second offsets to match the |
|
// video timeline for the segment, and add track info |
|
|
|
|
|
for (i = 0; i < this.pendingCaptions.length; i++) { |
|
caption = this.pendingCaptions[i]; |
|
caption.startTime = caption.startPts; |
|
|
|
if (!this.keepOriginalTimestamps) { |
|
caption.startTime -= timelineStartPts; |
|
} |
|
|
|
caption.startTime /= 90e3; |
|
caption.endTime = caption.endPts; |
|
|
|
if (!this.keepOriginalTimestamps) { |
|
caption.endTime -= timelineStartPts; |
|
} |
|
|
|
caption.endTime /= 90e3; |
|
event.captionStreams[caption.stream] = true; |
|
event.captions.push(caption); |
|
} // Translate ID3 frame PTS times into second offsets to match the |
|
// video timeline for the segment |
|
|
|
|
|
for (i = 0; i < this.pendingMetadata.length; i++) { |
|
id3 = this.pendingMetadata[i]; |
|
id3.cueTime = id3.pts; |
|
|
|
if (!this.keepOriginalTimestamps) { |
|
id3.cueTime -= timelineStartPts; |
|
} |
|
|
|
id3.cueTime /= 90e3; |
|
event.metadata.push(id3); |
|
} // We add this to every single emitted segment even though we only need |
|
// it for the first |
|
|
|
|
|
event.metadata.dispatchType = this.metadataStream.dispatchType; // Reset stream state |
|
|
|
this.pendingTracks.length = 0; |
|
this.videoTrack = null; |
|
this.pendingBoxes.length = 0; |
|
this.pendingCaptions.length = 0; |
|
this.pendingBytes = 0; |
|
this.pendingMetadata.length = 0; // Emit the built segment |
|
|
|
this.trigger('data', event); // Only emit `done` if all tracks have been flushed and emitted |
|
|
|
if (this.emittedTracks >= this.numberOfTracks) { |
|
this.trigger('done'); |
|
this.emittedTracks = 0; |
|
} |
|
}; |
|
/** |
|
* A Stream that expects MP2T binary data as input and produces |
|
* corresponding media segments, suitable for use with Media Source |
|
* Extension (MSE) implementations that support the ISO BMFF byte |
|
* stream format, like Chrome. |
|
*/ |
|
|
|
|
|
_Transmuxer = function Transmuxer(options) { |
|
var self = this, |
|
hasFlushed = true, |
|
videoTrack, |
|
audioTrack; |
|
|
|
_Transmuxer.prototype.init.call(this); |
|
|
|
options = options || {}; |
|
this.baseMediaDecodeTime = options.baseMediaDecodeTime || 0; |
|
this.transmuxPipeline_ = {}; |
|
|
|
this.setupAacPipeline = function () { |
|
var pipeline = {}; |
|
this.transmuxPipeline_ = pipeline; |
|
pipeline.type = 'aac'; |
|
pipeline.metadataStream = new m2ts_1.MetadataStream(); // set up the parsing pipeline |
|
|
|
pipeline.aacStream = new aac(); |
|
pipeline.audioTimestampRolloverStream = new m2ts_1.TimestampRolloverStream('audio'); |
|
pipeline.timedMetadataTimestampRolloverStream = new m2ts_1.TimestampRolloverStream('timed-metadata'); |
|
pipeline.adtsStream = new adts(); |
|
pipeline.coalesceStream = new _CoalesceStream(options, pipeline.metadataStream); |
|
pipeline.headOfPipeline = pipeline.aacStream; |
|
pipeline.aacStream.pipe(pipeline.audioTimestampRolloverStream).pipe(pipeline.adtsStream); |
|
pipeline.aacStream.pipe(pipeline.timedMetadataTimestampRolloverStream).pipe(pipeline.metadataStream).pipe(pipeline.coalesceStream); |
|
pipeline.metadataStream.on('timestamp', function (frame) { |
|
pipeline.aacStream.setTimestamp(frame.timeStamp); |
|
}); |
|
pipeline.aacStream.on('data', function (data) { |
|
if (data.type === 'timed-metadata' && !pipeline.audioSegmentStream) { |
|
audioTrack = audioTrack || { |
|
timelineStartInfo: { |
|
baseMediaDecodeTime: self.baseMediaDecodeTime |
|
}, |
|
codec: 'adts', |
|
type: 'audio' |
|
}; // hook up the audio segment stream to the first track with aac data |
|
|
|
pipeline.coalesceStream.numberOfTracks++; |
|
pipeline.audioSegmentStream = new _AudioSegmentStream(audioTrack, options); // Set up the final part of the audio pipeline |
|
|
|
pipeline.adtsStream.pipe(pipeline.audioSegmentStream).pipe(pipeline.coalesceStream); |
|
} |
|
}); // Re-emit any data coming from the coalesce stream to the outside world |
|
|
|
pipeline.coalesceStream.on('data', this.trigger.bind(this, 'data')); // Let the consumer know we have finished flushing the entire pipeline |
|
|
|
pipeline.coalesceStream.on('done', this.trigger.bind(this, 'done')); |
|
}; |
|
|
|
this.setupTsPipeline = function () { |
|
var pipeline = {}; |
|
this.transmuxPipeline_ = pipeline; |
|
pipeline.type = 'ts'; |
|
pipeline.metadataStream = new m2ts_1.MetadataStream(); // set up the parsing pipeline |
|
|
|
pipeline.packetStream = new m2ts_1.TransportPacketStream(); |
|
pipeline.parseStream = new m2ts_1.TransportParseStream(); |
|
pipeline.elementaryStream = new m2ts_1.ElementaryStream(); |
|
pipeline.videoTimestampRolloverStream = new m2ts_1.TimestampRolloverStream('video'); |
|
pipeline.audioTimestampRolloverStream = new m2ts_1.TimestampRolloverStream('audio'); |
|
pipeline.timedMetadataTimestampRolloverStream = new m2ts_1.TimestampRolloverStream('timed-metadata'); |
|
pipeline.adtsStream = new adts(); |
|
pipeline.h264Stream = new H264Stream(); |
|
pipeline.captionStream = new m2ts_1.CaptionStream(); |
|
pipeline.coalesceStream = new _CoalesceStream(options, pipeline.metadataStream); |
|
pipeline.headOfPipeline = pipeline.packetStream; // disassemble MPEG2-TS packets into elementary streams |
|
|
|
pipeline.packetStream.pipe(pipeline.parseStream).pipe(pipeline.elementaryStream); // !!THIS ORDER IS IMPORTANT!! |
|
// demux the streams |
|
|
|
pipeline.elementaryStream.pipe(pipeline.videoTimestampRolloverStream).pipe(pipeline.h264Stream); |
|
pipeline.elementaryStream.pipe(pipeline.audioTimestampRolloverStream).pipe(pipeline.adtsStream); |
|
pipeline.elementaryStream.pipe(pipeline.timedMetadataTimestampRolloverStream).pipe(pipeline.metadataStream).pipe(pipeline.coalesceStream); // Hook up CEA-608/708 caption stream |
|
|
|
pipeline.h264Stream.pipe(pipeline.captionStream).pipe(pipeline.coalesceStream); |
|
pipeline.elementaryStream.on('data', function (data) { |
|
var i; |
|
|
|
if (data.type === 'metadata') { |
|
i = data.tracks.length; // scan the tracks listed in the metadata |
|
|
|
while (i--) { |
|
if (!videoTrack && data.tracks[i].type === 'video') { |
|
videoTrack = data.tracks[i]; |
|
videoTrack.timelineStartInfo.baseMediaDecodeTime = self.baseMediaDecodeTime; |
|
} else if (!audioTrack && data.tracks[i].type === 'audio') { |
|
audioTrack = data.tracks[i]; |
|
audioTrack.timelineStartInfo.baseMediaDecodeTime = self.baseMediaDecodeTime; |
|
} |
|
} // hook up the video segment stream to the first track with h264 data |
|
|
|
|
|
if (videoTrack && !pipeline.videoSegmentStream) { |
|
pipeline.coalesceStream.numberOfTracks++; |
|
pipeline.videoSegmentStream = new _VideoSegmentStream(videoTrack, options); |
|
pipeline.videoSegmentStream.on('timelineStartInfo', function (timelineStartInfo) { |
|
// When video emits timelineStartInfo data after a flush, we forward that |
|
// info to the AudioSegmentStream, if it exists, because video timeline |
|
// data takes precedence. |
|
if (audioTrack) { |
|
audioTrack.timelineStartInfo = timelineStartInfo; // On the first segment we trim AAC frames that exist before the |
|
// very earliest DTS we have seen in video because Chrome will |
|
// interpret any video track with a baseMediaDecodeTime that is |
|
// non-zero as a gap. |
|
|
|
pipeline.audioSegmentStream.setEarliestDts(timelineStartInfo.dts); |
|
} |
|
}); |
|
pipeline.videoSegmentStream.on('processedGopsInfo', self.trigger.bind(self, 'gopInfo')); |
|
pipeline.videoSegmentStream.on('segmentTimingInfo', self.trigger.bind(self, 'videoSegmentTimingInfo')); |
|
pipeline.videoSegmentStream.on('baseMediaDecodeTime', function (baseMediaDecodeTime) { |
|
if (audioTrack) { |
|
pipeline.audioSegmentStream.setVideoBaseMediaDecodeTime(baseMediaDecodeTime); |
|
} |
|
}); // Set up the final part of the video pipeline |
|
|
|
pipeline.h264Stream.pipe(pipeline.videoSegmentStream).pipe(pipeline.coalesceStream); |
|
} |
|
|
|
if (audioTrack && !pipeline.audioSegmentStream) { |
|
// hook up the audio segment stream to the first track with aac data |
|
pipeline.coalesceStream.numberOfTracks++; |
|
pipeline.audioSegmentStream = new _AudioSegmentStream(audioTrack, options); // Set up the final part of the audio pipeline |
|
|
|
pipeline.adtsStream.pipe(pipeline.audioSegmentStream).pipe(pipeline.coalesceStream); |
|
} |
|
} |
|
}); // Re-emit any data coming from the coalesce stream to the outside world |
|
|
|
pipeline.coalesceStream.on('data', this.trigger.bind(this, 'data')); // Let the consumer know we have finished flushing the entire pipeline |
|
|
|
pipeline.coalesceStream.on('done', this.trigger.bind(this, 'done')); |
|
}; // hook up the segment streams once track metadata is delivered |
|
|
|
|
|
this.setBaseMediaDecodeTime = function (baseMediaDecodeTime) { |
|
var pipeline = this.transmuxPipeline_; |
|
|
|
if (!options.keepOriginalTimestamps) { |
|
this.baseMediaDecodeTime = baseMediaDecodeTime; |
|
} |
|
|
|
if (audioTrack) { |
|
audioTrack.timelineStartInfo.dts = undefined; |
|
audioTrack.timelineStartInfo.pts = undefined; |
|
trackDecodeInfo.clearDtsInfo(audioTrack); |
|
|
|
if (!options.keepOriginalTimestamps) { |
|
audioTrack.timelineStartInfo.baseMediaDecodeTime = baseMediaDecodeTime; |
|
} |
|
|
|
if (pipeline.audioTimestampRolloverStream) { |
|
pipeline.audioTimestampRolloverStream.discontinuity(); |
|
} |
|
} |
|
|
|
if (videoTrack) { |
|
if (pipeline.videoSegmentStream) { |
|
pipeline.videoSegmentStream.gopCache_ = []; |
|
pipeline.videoTimestampRolloverStream.discontinuity(); |
|
} |
|
|
|
videoTrack.timelineStartInfo.dts = undefined; |
|
videoTrack.timelineStartInfo.pts = undefined; |
|
trackDecodeInfo.clearDtsInfo(videoTrack); |
|
pipeline.captionStream.reset(); |
|
|
|
if (!options.keepOriginalTimestamps) { |
|
videoTrack.timelineStartInfo.baseMediaDecodeTime = baseMediaDecodeTime; |
|
} |
|
} |
|
|
|
if (pipeline.timedMetadataTimestampRolloverStream) { |
|
pipeline.timedMetadataTimestampRolloverStream.discontinuity(); |
|
} |
|
}; |
|
|
|
this.setAudioAppendStart = function (timestamp) { |
|
if (audioTrack) { |
|
this.transmuxPipeline_.audioSegmentStream.setAudioAppendStart(timestamp); |
|
} |
|
}; |
|
|
|
this.alignGopsWith = function (gopsToAlignWith) { |
|
if (videoTrack && this.transmuxPipeline_.videoSegmentStream) { |
|
this.transmuxPipeline_.videoSegmentStream.alignGopsWith(gopsToAlignWith); |
|
} |
|
}; // feed incoming data to the front of the parsing pipeline |
|
|
|
|
|
this.push = function (data) { |
|
if (hasFlushed) { |
|
var isAac = isLikelyAacData$1(data); |
|
|
|
if (isAac && this.transmuxPipeline_.type !== 'aac') { |
|
this.setupAacPipeline(); |
|
} else if (!isAac && this.transmuxPipeline_.type !== 'ts') { |
|
this.setupTsPipeline(); |
|
} |
|
|
|
hasFlushed = false; |
|
} |
|
|
|
this.transmuxPipeline_.headOfPipeline.push(data); |
|
}; // flush any buffered data |
|
|
|
|
|
this.flush = function () { |
|
hasFlushed = true; // Start at the top of the pipeline and flush all pending work |
|
|
|
this.transmuxPipeline_.headOfPipeline.flush(); |
|
}; // Caption data has to be reset when seeking outside buffered range |
|
|
|
|
|
this.resetCaptions = function () { |
|
if (this.transmuxPipeline_.captionStream) { |
|
this.transmuxPipeline_.captionStream.reset(); |
|
} |
|
}; |
|
}; |
|
|
|
_Transmuxer.prototype = new stream(); |
|
var transmuxer = { |
|
Transmuxer: _Transmuxer, |
|
VideoSegmentStream: _VideoSegmentStream, |
|
AudioSegmentStream: _AudioSegmentStream, |
|
AUDIO_PROPERTIES: AUDIO_PROPERTIES, |
|
VIDEO_PROPERTIES: VIDEO_PROPERTIES, |
|
// exported for testing |
|
generateVideoSegmentTimingInfo: generateVideoSegmentTimingInfo |
|
}; |
|
|
|
var inspectMp4, |
|
_textifyMp, |
|
parseType$2 = probe.parseType, |
|
parseMp4Date = function parseMp4Date(seconds) { |
|
return new Date(seconds * 1000 - 2082844800000); |
|
}, |
|
parseSampleFlags = function parseSampleFlags(flags) { |
|
return { |
|
isLeading: (flags[0] & 0x0c) >>> 2, |
|
dependsOn: flags[0] & 0x03, |
|
isDependedOn: (flags[1] & 0xc0) >>> 6, |
|
hasRedundancy: (flags[1] & 0x30) >>> 4, |
|
paddingValue: (flags[1] & 0x0e) >>> 1, |
|
isNonSyncSample: flags[1] & 0x01, |
|
degradationPriority: flags[2] << 8 | flags[3] |
|
}; |
|
}, |
|
nalParse = function nalParse(avcStream) { |
|
var avcView = new DataView(avcStream.buffer, avcStream.byteOffset, avcStream.byteLength), |
|
result = [], |
|
i, |
|
length; |
|
|
|
for (i = 0; i + 4 < avcStream.length; i += length) { |
|
length = avcView.getUint32(i); |
|
i += 4; // bail if this doesn't appear to be an H264 stream |
|
|
|
if (length <= 0) { |
|
result.push('<span style=\'color:red;\'>MALFORMED DATA</span>'); |
|
continue; |
|
} |
|
|
|
switch (avcStream[i] & 0x1F) { |
|
case 0x01: |
|
result.push('slice_layer_without_partitioning_rbsp'); |
|
break; |
|
|
|
case 0x05: |
|
result.push('slice_layer_without_partitioning_rbsp_idr'); |
|
break; |
|
|
|
case 0x06: |
|
result.push('sei_rbsp'); |
|
break; |
|
|
|
case 0x07: |
|
result.push('seq_parameter_set_rbsp'); |
|
break; |
|
|
|
case 0x08: |
|
result.push('pic_parameter_set_rbsp'); |
|
break; |
|
|
|
case 0x09: |
|
result.push('access_unit_delimiter_rbsp'); |
|
break; |
|
|
|
default: |
|
result.push('UNKNOWN NAL - ' + avcStream[i] & 0x1F); |
|
break; |
|
} |
|
} |
|
|
|
return result; |
|
}, |
|
// registry of handlers for individual mp4 box types |
|
parse$1 = { |
|
// codingname, not a first-class box type. stsd entries share the |
|
// same format as real boxes so the parsing infrastructure can be |
|
// shared |
|
avc1: function avc1(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength); |
|
return { |
|
dataReferenceIndex: view.getUint16(6), |
|
width: view.getUint16(24), |
|
height: view.getUint16(26), |
|
horizresolution: view.getUint16(28) + view.getUint16(30) / 16, |
|
vertresolution: view.getUint16(32) + view.getUint16(34) / 16, |
|
frameCount: view.getUint16(40), |
|
depth: view.getUint16(74), |
|
config: inspectMp4(data.subarray(78, data.byteLength)) |
|
}; |
|
}, |
|
avcC: function avcC(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength), |
|
result = { |
|
configurationVersion: data[0], |
|
avcProfileIndication: data[1], |
|
profileCompatibility: data[2], |
|
avcLevelIndication: data[3], |
|
lengthSizeMinusOne: data[4] & 0x03, |
|
sps: [], |
|
pps: [] |
|
}, |
|
numOfSequenceParameterSets = data[5] & 0x1f, |
|
numOfPictureParameterSets, |
|
nalSize, |
|
offset, |
|
i; // iterate past any SPSs |
|
|
|
offset = 6; |
|
|
|
for (i = 0; i < numOfSequenceParameterSets; i++) { |
|
nalSize = view.getUint16(offset); |
|
offset += 2; |
|
result.sps.push(new Uint8Array(data.subarray(offset, offset + nalSize))); |
|
offset += nalSize; |
|
} // iterate past any PPSs |
|
|
|
|
|
numOfPictureParameterSets = data[offset]; |
|
offset++; |
|
|
|
for (i = 0; i < numOfPictureParameterSets; i++) { |
|
nalSize = view.getUint16(offset); |
|
offset += 2; |
|
result.pps.push(new Uint8Array(data.subarray(offset, offset + nalSize))); |
|
offset += nalSize; |
|
} |
|
|
|
return result; |
|
}, |
|
btrt: function btrt(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength); |
|
return { |
|
bufferSizeDB: view.getUint32(0), |
|
maxBitrate: view.getUint32(4), |
|
avgBitrate: view.getUint32(8) |
|
}; |
|
}, |
|
esds: function esds(data) { |
|
return { |
|
version: data[0], |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
esId: data[6] << 8 | data[7], |
|
streamPriority: data[8] & 0x1f, |
|
decoderConfig: { |
|
objectProfileIndication: data[11], |
|
streamType: data[12] >>> 2 & 0x3f, |
|
bufferSize: data[13] << 16 | data[14] << 8 | data[15], |
|
maxBitrate: data[16] << 24 | data[17] << 16 | data[18] << 8 | data[19], |
|
avgBitrate: data[20] << 24 | data[21] << 16 | data[22] << 8 | data[23], |
|
decoderConfigDescriptor: { |
|
tag: data[24], |
|
length: data[25], |
|
audioObjectType: data[26] >>> 3 & 0x1f, |
|
samplingFrequencyIndex: (data[26] & 0x07) << 1 | data[27] >>> 7 & 0x01, |
|
channelConfiguration: data[27] >>> 3 & 0x0f |
|
} |
|
} |
|
}; |
|
}, |
|
ftyp: function ftyp(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength), |
|
result = { |
|
majorBrand: parseType$2(data.subarray(0, 4)), |
|
minorVersion: view.getUint32(4), |
|
compatibleBrands: [] |
|
}, |
|
i = 8; |
|
|
|
while (i < data.byteLength) { |
|
result.compatibleBrands.push(parseType$2(data.subarray(i, i + 4))); |
|
i += 4; |
|
} |
|
|
|
return result; |
|
}, |
|
dinf: function dinf(data) { |
|
return { |
|
boxes: inspectMp4(data) |
|
}; |
|
}, |
|
dref: function dref(data) { |
|
return { |
|
version: data[0], |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
dataReferences: inspectMp4(data.subarray(8)) |
|
}; |
|
}, |
|
hdlr: function hdlr(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength), |
|
result = { |
|
version: view.getUint8(0), |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
handlerType: parseType$2(data.subarray(8, 12)), |
|
name: '' |
|
}, |
|
i = 8; // parse out the name field |
|
|
|
for (i = 24; i < data.byteLength; i++) { |
|
if (data[i] === 0x00) { |
|
// the name field is null-terminated |
|
i++; |
|
break; |
|
} |
|
|
|
result.name += String.fromCharCode(data[i]); |
|
} // decode UTF-8 to javascript's internal representation |
|
// see http://ecmanaut.blogspot.com/2006/07/encoding-decoding-utf8-in-javascript.html |
|
|
|
|
|
result.name = decodeURIComponent(escape(result.name)); |
|
return result; |
|
}, |
|
mdat: function mdat(data) { |
|
return { |
|
byteLength: data.byteLength, |
|
nals: nalParse(data) |
|
}; |
|
}, |
|
mdhd: function mdhd(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength), |
|
i = 4, |
|
language, |
|
result = { |
|
version: view.getUint8(0), |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
language: '' |
|
}; |
|
|
|
if (result.version === 1) { |
|
i += 4; |
|
result.creationTime = parseMp4Date(view.getUint32(i)); // truncating top 4 bytes |
|
|
|
i += 8; |
|
result.modificationTime = parseMp4Date(view.getUint32(i)); // truncating top 4 bytes |
|
|
|
i += 4; |
|
result.timescale = view.getUint32(i); |
|
i += 8; |
|
result.duration = view.getUint32(i); // truncating top 4 bytes |
|
} else { |
|
result.creationTime = parseMp4Date(view.getUint32(i)); |
|
i += 4; |
|
result.modificationTime = parseMp4Date(view.getUint32(i)); |
|
i += 4; |
|
result.timescale = view.getUint32(i); |
|
i += 4; |
|
result.duration = view.getUint32(i); |
|
} |
|
|
|
i += 4; // language is stored as an ISO-639-2/T code in an array of three 5-bit fields |
|
// each field is the packed difference between its ASCII value and 0x60 |
|
|
|
language = view.getUint16(i); |
|
result.language += String.fromCharCode((language >> 10) + 0x60); |
|
result.language += String.fromCharCode(((language & 0x03e0) >> 5) + 0x60); |
|
result.language += String.fromCharCode((language & 0x1f) + 0x60); |
|
return result; |
|
}, |
|
mdia: function mdia(data) { |
|
return { |
|
boxes: inspectMp4(data) |
|
}; |
|
}, |
|
mfhd: function mfhd(data) { |
|
return { |
|
version: data[0], |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
sequenceNumber: data[4] << 24 | data[5] << 16 | data[6] << 8 | data[7] |
|
}; |
|
}, |
|
minf: function minf(data) { |
|
return { |
|
boxes: inspectMp4(data) |
|
}; |
|
}, |
|
// codingname, not a first-class box type. stsd entries share the |
|
// same format as real boxes so the parsing infrastructure can be |
|
// shared |
|
mp4a: function mp4a(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength), |
|
result = { |
|
// 6 bytes reserved |
|
dataReferenceIndex: view.getUint16(6), |
|
// 4 + 4 bytes reserved |
|
channelcount: view.getUint16(16), |
|
samplesize: view.getUint16(18), |
|
// 2 bytes pre_defined |
|
// 2 bytes reserved |
|
samplerate: view.getUint16(24) + view.getUint16(26) / 65536 |
|
}; // if there are more bytes to process, assume this is an ISO/IEC |
|
// 14496-14 MP4AudioSampleEntry and parse the ESDBox |
|
|
|
if (data.byteLength > 28) { |
|
result.streamDescriptor = inspectMp4(data.subarray(28))[0]; |
|
} |
|
|
|
return result; |
|
}, |
|
moof: function moof(data) { |
|
return { |
|
boxes: inspectMp4(data) |
|
}; |
|
}, |
|
moov: function moov(data) { |
|
return { |
|
boxes: inspectMp4(data) |
|
}; |
|
}, |
|
mvex: function mvex(data) { |
|
return { |
|
boxes: inspectMp4(data) |
|
}; |
|
}, |
|
mvhd: function mvhd(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength), |
|
i = 4, |
|
result = { |
|
version: view.getUint8(0), |
|
flags: new Uint8Array(data.subarray(1, 4)) |
|
}; |
|
|
|
if (result.version === 1) { |
|
i += 4; |
|
result.creationTime = parseMp4Date(view.getUint32(i)); // truncating top 4 bytes |
|
|
|
i += 8; |
|
result.modificationTime = parseMp4Date(view.getUint32(i)); // truncating top 4 bytes |
|
|
|
i += 4; |
|
result.timescale = view.getUint32(i); |
|
i += 8; |
|
result.duration = view.getUint32(i); // truncating top 4 bytes |
|
} else { |
|
result.creationTime = parseMp4Date(view.getUint32(i)); |
|
i += 4; |
|
result.modificationTime = parseMp4Date(view.getUint32(i)); |
|
i += 4; |
|
result.timescale = view.getUint32(i); |
|
i += 4; |
|
result.duration = view.getUint32(i); |
|
} |
|
|
|
i += 4; // convert fixed-point, base 16 back to a number |
|
|
|
result.rate = view.getUint16(i) + view.getUint16(i + 2) / 16; |
|
i += 4; |
|
result.volume = view.getUint8(i) + view.getUint8(i + 1) / 8; |
|
i += 2; |
|
i += 2; |
|
i += 2 * 4; |
|
result.matrix = new Uint32Array(data.subarray(i, i + 9 * 4)); |
|
i += 9 * 4; |
|
i += 6 * 4; |
|
result.nextTrackId = view.getUint32(i); |
|
return result; |
|
}, |
|
pdin: function pdin(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength); |
|
return { |
|
version: view.getUint8(0), |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
rate: view.getUint32(4), |
|
initialDelay: view.getUint32(8) |
|
}; |
|
}, |
|
sdtp: function sdtp(data) { |
|
var result = { |
|
version: data[0], |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
samples: [] |
|
}, |
|
i; |
|
|
|
for (i = 4; i < data.byteLength; i++) { |
|
result.samples.push({ |
|
dependsOn: (data[i] & 0x30) >> 4, |
|
isDependedOn: (data[i] & 0x0c) >> 2, |
|
hasRedundancy: data[i] & 0x03 |
|
}); |
|
} |
|
|
|
return result; |
|
}, |
|
sidx: function sidx(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength), |
|
result = { |
|
version: data[0], |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
references: [], |
|
referenceId: view.getUint32(4), |
|
timescale: view.getUint32(8), |
|
earliestPresentationTime: view.getUint32(12), |
|
firstOffset: view.getUint32(16) |
|
}, |
|
referenceCount = view.getUint16(22), |
|
i; |
|
|
|
for (i = 24; referenceCount; i += 12, referenceCount--) { |
|
result.references.push({ |
|
referenceType: (data[i] & 0x80) >>> 7, |
|
referencedSize: view.getUint32(i) & 0x7FFFFFFF, |
|
subsegmentDuration: view.getUint32(i + 4), |
|
startsWithSap: !!(data[i + 8] & 0x80), |
|
sapType: (data[i + 8] & 0x70) >>> 4, |
|
sapDeltaTime: view.getUint32(i + 8) & 0x0FFFFFFF |
|
}); |
|
} |
|
|
|
return result; |
|
}, |
|
smhd: function smhd(data) { |
|
return { |
|
version: data[0], |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
balance: data[4] + data[5] / 256 |
|
}; |
|
}, |
|
stbl: function stbl(data) { |
|
return { |
|
boxes: inspectMp4(data) |
|
}; |
|
}, |
|
stco: function stco(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength), |
|
result = { |
|
version: data[0], |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
chunkOffsets: [] |
|
}, |
|
entryCount = view.getUint32(4), |
|
i; |
|
|
|
for (i = 8; entryCount; i += 4, entryCount--) { |
|
result.chunkOffsets.push(view.getUint32(i)); |
|
} |
|
|
|
return result; |
|
}, |
|
stsc: function stsc(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength), |
|
entryCount = view.getUint32(4), |
|
result = { |
|
version: data[0], |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
sampleToChunks: [] |
|
}, |
|
i; |
|
|
|
for (i = 8; entryCount; i += 12, entryCount--) { |
|
result.sampleToChunks.push({ |
|
firstChunk: view.getUint32(i), |
|
samplesPerChunk: view.getUint32(i + 4), |
|
sampleDescriptionIndex: view.getUint32(i + 8) |
|
}); |
|
} |
|
|
|
return result; |
|
}, |
|
stsd: function stsd(data) { |
|
return { |
|
version: data[0], |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
sampleDescriptions: inspectMp4(data.subarray(8)) |
|
}; |
|
}, |
|
stsz: function stsz(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength), |
|
result = { |
|
version: data[0], |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
sampleSize: view.getUint32(4), |
|
entries: [] |
|
}, |
|
i; |
|
|
|
for (i = 12; i < data.byteLength; i += 4) { |
|
result.entries.push(view.getUint32(i)); |
|
} |
|
|
|
return result; |
|
}, |
|
stts: function stts(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength), |
|
result = { |
|
version: data[0], |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
timeToSamples: [] |
|
}, |
|
entryCount = view.getUint32(4), |
|
i; |
|
|
|
for (i = 8; entryCount; i += 8, entryCount--) { |
|
result.timeToSamples.push({ |
|
sampleCount: view.getUint32(i), |
|
sampleDelta: view.getUint32(i + 4) |
|
}); |
|
} |
|
|
|
return result; |
|
}, |
|
styp: function styp(data) { |
|
return parse$1.ftyp(data); |
|
}, |
|
tfdt: function tfdt(data) { |
|
var result = { |
|
version: data[0], |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
baseMediaDecodeTime: data[4] << 24 | data[5] << 16 | data[6] << 8 | data[7] |
|
}; |
|
|
|
if (result.version === 1) { |
|
result.baseMediaDecodeTime *= Math.pow(2, 32); |
|
result.baseMediaDecodeTime += data[8] << 24 | data[9] << 16 | data[10] << 8 | data[11]; |
|
} |
|
|
|
return result; |
|
}, |
|
tfhd: function tfhd(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength), |
|
result = { |
|
version: data[0], |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
trackId: view.getUint32(4) |
|
}, |
|
baseDataOffsetPresent = result.flags[2] & 0x01, |
|
sampleDescriptionIndexPresent = result.flags[2] & 0x02, |
|
defaultSampleDurationPresent = result.flags[2] & 0x08, |
|
defaultSampleSizePresent = result.flags[2] & 0x10, |
|
defaultSampleFlagsPresent = result.flags[2] & 0x20, |
|
durationIsEmpty = result.flags[0] & 0x010000, |
|
defaultBaseIsMoof = result.flags[0] & 0x020000, |
|
i; |
|
i = 8; |
|
|
|
if (baseDataOffsetPresent) { |
|
i += 4; // truncate top 4 bytes |
|
// FIXME: should we read the full 64 bits? |
|
|
|
result.baseDataOffset = view.getUint32(12); |
|
i += 4; |
|
} |
|
|
|
if (sampleDescriptionIndexPresent) { |
|
result.sampleDescriptionIndex = view.getUint32(i); |
|
i += 4; |
|
} |
|
|
|
if (defaultSampleDurationPresent) { |
|
result.defaultSampleDuration = view.getUint32(i); |
|
i += 4; |
|
} |
|
|
|
if (defaultSampleSizePresent) { |
|
result.defaultSampleSize = view.getUint32(i); |
|
i += 4; |
|
} |
|
|
|
if (defaultSampleFlagsPresent) { |
|
result.defaultSampleFlags = view.getUint32(i); |
|
} |
|
|
|
if (durationIsEmpty) { |
|
result.durationIsEmpty = true; |
|
} |
|
|
|
if (!baseDataOffsetPresent && defaultBaseIsMoof) { |
|
result.baseDataOffsetIsMoof = true; |
|
} |
|
|
|
return result; |
|
}, |
|
tkhd: function tkhd(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength), |
|
i = 4, |
|
result = { |
|
version: view.getUint8(0), |
|
flags: new Uint8Array(data.subarray(1, 4)) |
|
}; |
|
|
|
if (result.version === 1) { |
|
i += 4; |
|
result.creationTime = parseMp4Date(view.getUint32(i)); // truncating top 4 bytes |
|
|
|
i += 8; |
|
result.modificationTime = parseMp4Date(view.getUint32(i)); // truncating top 4 bytes |
|
|
|
i += 4; |
|
result.trackId = view.getUint32(i); |
|
i += 4; |
|
i += 8; |
|
result.duration = view.getUint32(i); // truncating top 4 bytes |
|
} else { |
|
result.creationTime = parseMp4Date(view.getUint32(i)); |
|
i += 4; |
|
result.modificationTime = parseMp4Date(view.getUint32(i)); |
|
i += 4; |
|
result.trackId = view.getUint32(i); |
|
i += 4; |
|
i += 4; |
|
result.duration = view.getUint32(i); |
|
} |
|
|
|
i += 4; |
|
i += 2 * 4; |
|
result.layer = view.getUint16(i); |
|
i += 2; |
|
result.alternateGroup = view.getUint16(i); |
|
i += 2; // convert fixed-point, base 16 back to a number |
|
|
|
result.volume = view.getUint8(i) + view.getUint8(i + 1) / 8; |
|
i += 2; |
|
i += 2; |
|
result.matrix = new Uint32Array(data.subarray(i, i + 9 * 4)); |
|
i += 9 * 4; |
|
result.width = view.getUint16(i) + view.getUint16(i + 2) / 16; |
|
i += 4; |
|
result.height = view.getUint16(i) + view.getUint16(i + 2) / 16; |
|
return result; |
|
}, |
|
traf: function traf(data) { |
|
return { |
|
boxes: inspectMp4(data) |
|
}; |
|
}, |
|
trak: function trak(data) { |
|
return { |
|
boxes: inspectMp4(data) |
|
}; |
|
}, |
|
trex: function trex(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength); |
|
return { |
|
version: data[0], |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
trackId: view.getUint32(4), |
|
defaultSampleDescriptionIndex: view.getUint32(8), |
|
defaultSampleDuration: view.getUint32(12), |
|
defaultSampleSize: view.getUint32(16), |
|
sampleDependsOn: data[20] & 0x03, |
|
sampleIsDependedOn: (data[21] & 0xc0) >> 6, |
|
sampleHasRedundancy: (data[21] & 0x30) >> 4, |
|
samplePaddingValue: (data[21] & 0x0e) >> 1, |
|
sampleIsDifferenceSample: !!(data[21] & 0x01), |
|
sampleDegradationPriority: view.getUint16(22) |
|
}; |
|
}, |
|
trun: function trun(data) { |
|
var result = { |
|
version: data[0], |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
samples: [] |
|
}, |
|
view = new DataView(data.buffer, data.byteOffset, data.byteLength), |
|
// Flag interpretation |
|
dataOffsetPresent = result.flags[2] & 0x01, |
|
// compare with 2nd byte of 0x1 |
|
firstSampleFlagsPresent = result.flags[2] & 0x04, |
|
// compare with 2nd byte of 0x4 |
|
sampleDurationPresent = result.flags[1] & 0x01, |
|
// compare with 2nd byte of 0x100 |
|
sampleSizePresent = result.flags[1] & 0x02, |
|
// compare with 2nd byte of 0x200 |
|
sampleFlagsPresent = result.flags[1] & 0x04, |
|
// compare with 2nd byte of 0x400 |
|
sampleCompositionTimeOffsetPresent = result.flags[1] & 0x08, |
|
// compare with 2nd byte of 0x800 |
|
sampleCount = view.getUint32(4), |
|
offset = 8, |
|
sample; |
|
|
|
if (dataOffsetPresent) { |
|
// 32 bit signed integer |
|
result.dataOffset = view.getInt32(offset); |
|
offset += 4; |
|
} // Overrides the flags for the first sample only. The order of |
|
// optional values will be: duration, size, compositionTimeOffset |
|
|
|
|
|
if (firstSampleFlagsPresent && sampleCount) { |
|
sample = { |
|
flags: parseSampleFlags(data.subarray(offset, offset + 4)) |
|
}; |
|
offset += 4; |
|
|
|
if (sampleDurationPresent) { |
|
sample.duration = view.getUint32(offset); |
|
offset += 4; |
|
} |
|
|
|
if (sampleSizePresent) { |
|
sample.size = view.getUint32(offset); |
|
offset += 4; |
|
} |
|
|
|
if (sampleCompositionTimeOffsetPresent) { |
|
// Note: this should be a signed int if version is 1 |
|
sample.compositionTimeOffset = view.getUint32(offset); |
|
offset += 4; |
|
} |
|
|
|
result.samples.push(sample); |
|
sampleCount--; |
|
} |
|
|
|
while (sampleCount--) { |
|
sample = {}; |
|
|
|
if (sampleDurationPresent) { |
|
sample.duration = view.getUint32(offset); |
|
offset += 4; |
|
} |
|
|
|
if (sampleSizePresent) { |
|
sample.size = view.getUint32(offset); |
|
offset += 4; |
|
} |
|
|
|
if (sampleFlagsPresent) { |
|
sample.flags = parseSampleFlags(data.subarray(offset, offset + 4)); |
|
offset += 4; |
|
} |
|
|
|
if (sampleCompositionTimeOffsetPresent) { |
|
// Note: this should be a signed int if version is 1 |
|
sample.compositionTimeOffset = view.getUint32(offset); |
|
offset += 4; |
|
} |
|
|
|
result.samples.push(sample); |
|
} |
|
|
|
return result; |
|
}, |
|
'url ': function url(data) { |
|
return { |
|
version: data[0], |
|
flags: new Uint8Array(data.subarray(1, 4)) |
|
}; |
|
}, |
|
vmhd: function vmhd(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength); |
|
return { |
|
version: data[0], |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
graphicsmode: view.getUint16(4), |
|
opcolor: new Uint16Array([view.getUint16(6), view.getUint16(8), view.getUint16(10)]) |
|
}; |
|
} |
|
}; |
|
/** |
|
* Return a javascript array of box objects parsed from an ISO base |
|
* media file. |
|
* @param data {Uint8Array} the binary data of the media to be inspected |
|
* @return {array} a javascript array of potentially nested box objects |
|
*/ |
|
|
|
|
|
inspectMp4 = function inspectMp4(data) { |
|
var i = 0, |
|
result = [], |
|
view, |
|
size, |
|
type, |
|
end, |
|
box; // Convert data from Uint8Array to ArrayBuffer, to follow Dataview API |
|
|
|
var ab = new ArrayBuffer(data.length); |
|
var v = new Uint8Array(ab); |
|
|
|
for (var z = 0; z < data.length; ++z) { |
|
v[z] = data[z]; |
|
} |
|
|
|
view = new DataView(ab); |
|
|
|
while (i < data.byteLength) { |
|
// parse box data |
|
size = view.getUint32(i); |
|
type = parseType$2(data.subarray(i + 4, i + 8)); |
|
end = size > 1 ? i + size : data.byteLength; // parse type-specific data |
|
|
|
box = (parse$1[type] || function (data) { |
|
return { |
|
data: data |
|
}; |
|
})(data.subarray(i + 8, end)); |
|
|
|
box.size = size; |
|
box.type = type; // store this box and move to the next |
|
|
|
result.push(box); |
|
i = end; |
|
} |
|
|
|
return result; |
|
}; |
|
/** |
|
* Returns a textual representation of the javascript represtentation |
|
* of an MP4 file. You can use it as an alternative to |
|
* JSON.stringify() to compare inspected MP4s. |
|
* @param inspectedMp4 {array} the parsed array of boxes in an MP4 |
|
* file |
|
* @param depth {number} (optional) the number of ancestor boxes of |
|
* the elements of inspectedMp4. Assumed to be zero if unspecified. |
|
* @return {string} a text representation of the parsed MP4 |
|
*/ |
|
|
|
|
|
_textifyMp = function textifyMp4(inspectedMp4, depth) { |
|
var indent; |
|
depth = depth || 0; |
|
indent = new Array(depth * 2 + 1).join(' '); // iterate over all the boxes |
|
|
|
return inspectedMp4.map(function (box, index) { |
|
// list the box type first at the current indentation level |
|
return indent + box.type + '\n' + // the type is already included and handle child boxes separately |
|
Object.keys(box).filter(function (key) { |
|
return key !== 'type' && key !== 'boxes'; // output all the box properties |
|
}).map(function (key) { |
|
var prefix = indent + ' ' + key + ': ', |
|
value = box[key]; // print out raw bytes as hexademical |
|
|
|
if (value instanceof Uint8Array || value instanceof Uint32Array) { |
|
var bytes = Array.prototype.slice.call(new Uint8Array(value.buffer, value.byteOffset, value.byteLength)).map(function (byte) { |
|
return ' ' + ('00' + byte.toString(16)).slice(-2); |
|
}).join('').match(/.{1,24}/g); |
|
|
|
if (!bytes) { |
|
return prefix + '<>'; |
|
} |
|
|
|
if (bytes.length === 1) { |
|
return prefix + '<' + bytes.join('').slice(1) + '>'; |
|
} |
|
|
|
return prefix + '<\n' + bytes.map(function (line) { |
|
return indent + ' ' + line; |
|
}).join('\n') + '\n' + indent + ' >'; |
|
} // stringify generic objects |
|
|
|
|
|
return prefix + JSON.stringify(value, null, 2).split('\n').map(function (line, index) { |
|
if (index === 0) { |
|
return line; |
|
} |
|
|
|
return indent + ' ' + line; |
|
}).join('\n'); |
|
}).join('\n') + ( // recursively textify the child boxes |
|
box.boxes ? '\n' + _textifyMp(box.boxes, depth + 1) : ''); |
|
}).join('\n'); |
|
}; |
|
|
|
var mp4Inspector = { |
|
inspect: inspectMp4, |
|
textify: _textifyMp, |
|
parseTfdt: parse$1.tfdt, |
|
parseHdlr: parse$1.hdlr, |
|
parseTfhd: parse$1.tfhd, |
|
parseTrun: parse$1.trun, |
|
parseSidx: parse$1.sidx |
|
}; |
|
|
|
var discardEmulationPreventionBytes$1 = captionPacketParser.discardEmulationPreventionBytes; |
|
var CaptionStream$1 = captionStream.CaptionStream; |
|
/** |
|
* Maps an offset in the mdat to a sample based on the the size of the samples. |
|
* Assumes that `parseSamples` has been called first. |
|
* |
|
* @param {Number} offset - The offset into the mdat |
|
* @param {Object[]} samples - An array of samples, parsed using `parseSamples` |
|
* @return {?Object} The matching sample, or null if no match was found. |
|
* |
|
* @see ISO-BMFF-12/2015, Section 8.8.8 |
|
**/ |
|
|
|
var mapToSample = function mapToSample(offset, samples) { |
|
var approximateOffset = offset; |
|
|
|
for (var i = 0; i < samples.length; i++) { |
|
var sample = samples[i]; |
|
|
|
if (approximateOffset < sample.size) { |
|
return sample; |
|
} |
|
|
|
approximateOffset -= sample.size; |
|
} |
|
|
|
return null; |
|
}; |
|
/** |
|
* Finds SEI nal units contained in a Media Data Box. |
|
* Assumes that `parseSamples` has been called first. |
|
* |
|
* @param {Uint8Array} avcStream - The bytes of the mdat |
|
* @param {Object[]} samples - The samples parsed out by `parseSamples` |
|
* @param {Number} trackId - The trackId of this video track |
|
* @return {Object[]} seiNals - the parsed SEI NALUs found. |
|
* The contents of the seiNal should match what is expected by |
|
* CaptionStream.push (nalUnitType, size, data, escapedRBSP, pts, dts) |
|
* |
|
* @see ISO-BMFF-12/2015, Section 8.1.1 |
|
* @see Rec. ITU-T H.264, 7.3.2.3.1 |
|
**/ |
|
|
|
|
|
var findSeiNals = function findSeiNals(avcStream, samples, trackId) { |
|
var avcView = new DataView(avcStream.buffer, avcStream.byteOffset, avcStream.byteLength), |
|
result = [], |
|
seiNal, |
|
i, |
|
length, |
|
lastMatchedSample; |
|
|
|
for (i = 0; i + 4 < avcStream.length; i += length) { |
|
length = avcView.getUint32(i); |
|
i += 4; // Bail if this doesn't appear to be an H264 stream |
|
|
|
if (length <= 0) { |
|
continue; |
|
} |
|
|
|
switch (avcStream[i] & 0x1F) { |
|
case 0x06: |
|
var data = avcStream.subarray(i + 1, i + 1 + length); |
|
var matchingSample = mapToSample(i, samples); |
|
seiNal = { |
|
nalUnitType: 'sei_rbsp', |
|
size: length, |
|
data: data, |
|
escapedRBSP: discardEmulationPreventionBytes$1(data), |
|
trackId: trackId |
|
}; |
|
|
|
if (matchingSample) { |
|
seiNal.pts = matchingSample.pts; |
|
seiNal.dts = matchingSample.dts; |
|
lastMatchedSample = matchingSample; |
|
} else { |
|
// If a matching sample cannot be found, use the last |
|
// sample's values as they should be as close as possible |
|
seiNal.pts = lastMatchedSample.pts; |
|
seiNal.dts = lastMatchedSample.dts; |
|
} |
|
|
|
result.push(seiNal); |
|
break; |
|
|
|
default: |
|
break; |
|
} |
|
} |
|
|
|
return result; |
|
}; |
|
/** |
|
* Parses sample information out of Track Run Boxes and calculates |
|
* the absolute presentation and decode timestamps of each sample. |
|
* |
|
* @param {Array<Uint8Array>} truns - The Trun Run boxes to be parsed |
|
* @param {Number} baseMediaDecodeTime - base media decode time from tfdt |
|
@see ISO-BMFF-12/2015, Section 8.8.12 |
|
* @param {Object} tfhd - The parsed Track Fragment Header |
|
* @see inspect.parseTfhd |
|
* @return {Object[]} the parsed samples |
|
* |
|
* @see ISO-BMFF-12/2015, Section 8.8.8 |
|
**/ |
|
|
|
|
|
var parseSamples = function parseSamples(truns, baseMediaDecodeTime, tfhd) { |
|
var currentDts = baseMediaDecodeTime; |
|
var defaultSampleDuration = tfhd.defaultSampleDuration || 0; |
|
var defaultSampleSize = tfhd.defaultSampleSize || 0; |
|
var trackId = tfhd.trackId; |
|
var allSamples = []; |
|
truns.forEach(function (trun) { |
|
// Note: We currently do not parse the sample table as well |
|
// as the trun. It's possible some sources will require this. |
|
// moov > trak > mdia > minf > stbl |
|
var trackRun = mp4Inspector.parseTrun(trun); |
|
var samples = trackRun.samples; |
|
samples.forEach(function (sample) { |
|
if (sample.duration === undefined) { |
|
sample.duration = defaultSampleDuration; |
|
} |
|
|
|
if (sample.size === undefined) { |
|
sample.size = defaultSampleSize; |
|
} |
|
|
|
sample.trackId = trackId; |
|
sample.dts = currentDts; |
|
|
|
if (sample.compositionTimeOffset === undefined) { |
|
sample.compositionTimeOffset = 0; |
|
} |
|
|
|
sample.pts = currentDts + sample.compositionTimeOffset; |
|
currentDts += sample.duration; |
|
}); |
|
allSamples = allSamples.concat(samples); |
|
}); |
|
return allSamples; |
|
}; |
|
/** |
|
* Parses out caption nals from an FMP4 segment's video tracks. |
|
* |
|
* @param {Uint8Array} segment - The bytes of a single segment |
|
* @param {Number} videoTrackId - The trackId of a video track in the segment |
|
* @return {Object.<Number, Object[]>} A mapping of video trackId to |
|
* a list of seiNals found in that track |
|
**/ |
|
|
|
|
|
var parseCaptionNals = function parseCaptionNals(segment, videoTrackId) { |
|
// To get the samples |
|
var trafs = probe.findBox(segment, ['moof', 'traf']); // To get SEI NAL units |
|
|
|
var mdats = probe.findBox(segment, ['mdat']); |
|
var captionNals = {}; |
|
var mdatTrafPairs = []; // Pair up each traf with a mdat as moofs and mdats are in pairs |
|
|
|
mdats.forEach(function (mdat, index) { |
|
var matchingTraf = trafs[index]; |
|
mdatTrafPairs.push({ |
|
mdat: mdat, |
|
traf: matchingTraf |
|
}); |
|
}); |
|
mdatTrafPairs.forEach(function (pair) { |
|
var mdat = pair.mdat; |
|
var traf = pair.traf; |
|
var tfhd = probe.findBox(traf, ['tfhd']); // Exactly 1 tfhd per traf |
|
|
|
var headerInfo = mp4Inspector.parseTfhd(tfhd[0]); |
|
var trackId = headerInfo.trackId; |
|
var tfdt = probe.findBox(traf, ['tfdt']); // Either 0 or 1 tfdt per traf |
|
|
|
var baseMediaDecodeTime = tfdt.length > 0 ? mp4Inspector.parseTfdt(tfdt[0]).baseMediaDecodeTime : 0; |
|
var truns = probe.findBox(traf, ['trun']); |
|
var samples; |
|
var seiNals; // Only parse video data for the chosen video track |
|
|
|
if (videoTrackId === trackId && truns.length > 0) { |
|
samples = parseSamples(truns, baseMediaDecodeTime, headerInfo); |
|
seiNals = findSeiNals(mdat, samples, trackId); |
|
|
|
if (!captionNals[trackId]) { |
|
captionNals[trackId] = []; |
|
} |
|
|
|
captionNals[trackId] = captionNals[trackId].concat(seiNals); |
|
} |
|
}); |
|
return captionNals; |
|
}; |
|
/** |
|
* Parses out inband captions from an MP4 container and returns |
|
* caption objects that can be used by WebVTT and the TextTrack API. |
|
* @see https://developer.mozilla.org/en-US/docs/Web/API/VTTCue |
|
* @see https://developer.mozilla.org/en-US/docs/Web/API/TextTrack |
|
* Assumes that `probe.getVideoTrackIds` and `probe.timescale` have been called first |
|
* |
|
* @param {Uint8Array} segment - The fmp4 segment containing embedded captions |
|
* @param {Number} trackId - The id of the video track to parse |
|
* @param {Number} timescale - The timescale for the video track from the init segment |
|
* |
|
* @return {?Object[]} parsedCaptions - A list of captions or null if no video tracks |
|
* @return {Number} parsedCaptions[].startTime - The time to show the caption in seconds |
|
* @return {Number} parsedCaptions[].endTime - The time to stop showing the caption in seconds |
|
* @return {String} parsedCaptions[].text - The visible content of the caption |
|
**/ |
|
|
|
|
|
var parseEmbeddedCaptions = function parseEmbeddedCaptions(segment, trackId, timescale) { |
|
var seiNals; |
|
|
|
if (!trackId) { |
|
return null; |
|
} |
|
|
|
seiNals = parseCaptionNals(segment, trackId); |
|
return { |
|
seiNals: seiNals[trackId], |
|
timescale: timescale |
|
}; |
|
}; |
|
/** |
|
* Converts SEI NALUs into captions that can be used by video.js |
|
**/ |
|
|
|
|
|
var CaptionParser = function CaptionParser() { |
|
var isInitialized = false; |
|
var captionStream$$1; // Stores segments seen before trackId and timescale are set |
|
|
|
var segmentCache; // Stores video track ID of the track being parsed |
|
|
|
var trackId; // Stores the timescale of the track being parsed |
|
|
|
var timescale; // Stores captions parsed so far |
|
|
|
var parsedCaptions; |
|
/** |
|
* A method to indicate whether a CaptionParser has been initalized |
|
* @returns {Boolean} |
|
**/ |
|
|
|
this.isInitialized = function () { |
|
return isInitialized; |
|
}; |
|
/** |
|
* Initializes the underlying CaptionStream, SEI NAL parsing |
|
* and management, and caption collection |
|
**/ |
|
|
|
|
|
this.init = function () { |
|
captionStream$$1 = new CaptionStream$1(); |
|
isInitialized = true; // Collect dispatched captions |
|
|
|
captionStream$$1.on('data', function (event) { |
|
// Convert to seconds in the source's timescale |
|
event.startTime = event.startPts / timescale; |
|
event.endTime = event.endPts / timescale; |
|
parsedCaptions.captions.push(event); |
|
parsedCaptions.captionStreams[event.stream] = true; |
|
}); |
|
}; |
|
/** |
|
* Determines if a new video track will be selected |
|
* or if the timescale changed |
|
* @return {Boolean} |
|
**/ |
|
|
|
|
|
this.isNewInit = function (videoTrackIds, timescales) { |
|
if (videoTrackIds && videoTrackIds.length === 0 || timescales && typeof timescales === 'object' && Object.keys(timescales).length === 0) { |
|
return false; |
|
} |
|
|
|
return trackId !== videoTrackIds[0] || timescale !== timescales[trackId]; |
|
}; |
|
/** |
|
* Parses out SEI captions and interacts with underlying |
|
* CaptionStream to return dispatched captions |
|
* |
|
* @param {Uint8Array} segment - The fmp4 segment containing embedded captions |
|
* @param {Number[]} videoTrackIds - A list of video tracks found in the init segment |
|
* @param {Object.<Number, Number>} timescales - The timescales found in the init segment |
|
* @see parseEmbeddedCaptions |
|
* @see m2ts/caption-stream.js |
|
**/ |
|
|
|
|
|
this.parse = function (segment, videoTrackIds, timescales) { |
|
var parsedData; |
|
|
|
if (!this.isInitialized()) { |
|
return null; // This is not likely to be a video segment |
|
} else if (!videoTrackIds || !timescales) { |
|
return null; |
|
} else if (this.isNewInit(videoTrackIds, timescales)) { |
|
// Use the first video track only as there is no |
|
// mechanism to switch to other video tracks |
|
trackId = videoTrackIds[0]; |
|
timescale = timescales[trackId]; // If an init segment has not been seen yet, hold onto segment |
|
// data until we have one |
|
} else if (!trackId || !timescale) { |
|
segmentCache.push(segment); |
|
return null; |
|
} // Now that a timescale and trackId is set, parse cached segments |
|
|
|
|
|
while (segmentCache.length > 0) { |
|
var cachedSegment = segmentCache.shift(); |
|
this.parse(cachedSegment, videoTrackIds, timescales); |
|
} |
|
|
|
parsedData = parseEmbeddedCaptions(segment, trackId, timescale); |
|
|
|
if (parsedData === null || !parsedData.seiNals) { |
|
return null; |
|
} |
|
|
|
this.pushNals(parsedData.seiNals); // Force the parsed captions to be dispatched |
|
|
|
this.flushStream(); |
|
return parsedCaptions; |
|
}; |
|
/** |
|
* Pushes SEI NALUs onto CaptionStream |
|
* @param {Object[]} nals - A list of SEI nals parsed using `parseCaptionNals` |
|
* Assumes that `parseCaptionNals` has been called first |
|
* @see m2ts/caption-stream.js |
|
**/ |
|
|
|
|
|
this.pushNals = function (nals) { |
|
if (!this.isInitialized() || !nals || nals.length === 0) { |
|
return null; |
|
} |
|
|
|
nals.forEach(function (nal) { |
|
captionStream$$1.push(nal); |
|
}); |
|
}; |
|
/** |
|
* Flushes underlying CaptionStream to dispatch processed, displayable captions |
|
* @see m2ts/caption-stream.js |
|
**/ |
|
|
|
|
|
this.flushStream = function () { |
|
if (!this.isInitialized()) { |
|
return null; |
|
} |
|
|
|
captionStream$$1.flush(); |
|
}; |
|
/** |
|
* Reset caption buckets for new data |
|
**/ |
|
|
|
|
|
this.clearParsedCaptions = function () { |
|
parsedCaptions.captions = []; |
|
parsedCaptions.captionStreams = {}; |
|
}; |
|
/** |
|
* Resets underlying CaptionStream |
|
* @see m2ts/caption-stream.js |
|
**/ |
|
|
|
|
|
this.resetCaptionStream = function () { |
|
if (!this.isInitialized()) { |
|
return null; |
|
} |
|
|
|
captionStream$$1.reset(); |
|
}; |
|
/** |
|
* Convenience method to clear all captions flushed from the |
|
* CaptionStream and still being parsed |
|
* @see m2ts/caption-stream.js |
|
**/ |
|
|
|
|
|
this.clearAllCaptions = function () { |
|
this.clearParsedCaptions(); |
|
this.resetCaptionStream(); |
|
}; |
|
/** |
|
* Reset caption parser |
|
**/ |
|
|
|
|
|
this.reset = function () { |
|
segmentCache = []; |
|
trackId = null; |
|
timescale = null; |
|
|
|
if (!parsedCaptions) { |
|
parsedCaptions = { |
|
captions: [], |
|
// CC1, CC2, CC3, CC4 |
|
captionStreams: {} |
|
}; |
|
} else { |
|
this.clearParsedCaptions(); |
|
} |
|
|
|
this.resetCaptionStream(); |
|
}; |
|
|
|
this.reset(); |
|
}; |
|
|
|
var captionParser = CaptionParser; |
|
|
|
var mp4 = { |
|
generator: mp4Generator, |
|
probe: probe, |
|
Transmuxer: transmuxer.Transmuxer, |
|
AudioSegmentStream: transmuxer.AudioSegmentStream, |
|
VideoSegmentStream: transmuxer.VideoSegmentStream, |
|
CaptionParser: captionParser |
|
}; |
|
var mp4_6 = mp4.CaptionParser; |
|
|
|
var parsePid = function parsePid(packet) { |
|
var pid = packet[1] & 0x1f; |
|
pid <<= 8; |
|
pid |= packet[2]; |
|
return pid; |
|
}; |
|
|
|
var parsePayloadUnitStartIndicator = function parsePayloadUnitStartIndicator(packet) { |
|
return !!(packet[1] & 0x40); |
|
}; |
|
|
|
var parseAdaptionField = function parseAdaptionField(packet) { |
|
var offset = 0; // if an adaption field is present, its length is specified by the |
|
// fifth byte of the TS packet header. The adaptation field is |
|
// used to add stuffing to PES packets that don't fill a complete |
|
// TS packet, and to specify some forms of timing and control data |
|
// that we do not currently use. |
|
|
|
if ((packet[3] & 0x30) >>> 4 > 0x01) { |
|
offset += packet[4] + 1; |
|
} |
|
|
|
return offset; |
|
}; |
|
|
|
var parseType$3 = function parseType(packet, pmtPid) { |
|
var pid = parsePid(packet); |
|
|
|
if (pid === 0) { |
|
return 'pat'; |
|
} else if (pid === pmtPid) { |
|
return 'pmt'; |
|
} else if (pmtPid) { |
|
return 'pes'; |
|
} |
|
|
|
return null; |
|
}; |
|
|
|
var parsePat = function parsePat(packet) { |
|
var pusi = parsePayloadUnitStartIndicator(packet); |
|
var offset = 4 + parseAdaptionField(packet); |
|
|
|
if (pusi) { |
|
offset += packet[offset] + 1; |
|
} |
|
|
|
return (packet[offset + 10] & 0x1f) << 8 | packet[offset + 11]; |
|
}; |
|
|
|
var parsePmt = function parsePmt(packet) { |
|
var programMapTable = {}; |
|
var pusi = parsePayloadUnitStartIndicator(packet); |
|
var payloadOffset = 4 + parseAdaptionField(packet); |
|
|
|
if (pusi) { |
|
payloadOffset += packet[payloadOffset] + 1; |
|
} // PMTs can be sent ahead of the time when they should actually |
|
// take effect. We don't believe this should ever be the case |
|
// for HLS but we'll ignore "forward" PMT declarations if we see |
|
// them. Future PMT declarations have the current_next_indicator |
|
// set to zero. |
|
|
|
|
|
if (!(packet[payloadOffset + 5] & 0x01)) { |
|
return; |
|
} |
|
|
|
var sectionLength, tableEnd, programInfoLength; // the mapping table ends at the end of the current section |
|
|
|
sectionLength = (packet[payloadOffset + 1] & 0x0f) << 8 | packet[payloadOffset + 2]; |
|
tableEnd = 3 + sectionLength - 4; // to determine where the table is, we have to figure out how |
|
// long the program info descriptors are |
|
|
|
programInfoLength = (packet[payloadOffset + 10] & 0x0f) << 8 | packet[payloadOffset + 11]; // advance the offset to the first entry in the mapping table |
|
|
|
var offset = 12 + programInfoLength; |
|
|
|
while (offset < tableEnd) { |
|
var i = payloadOffset + offset; // add an entry that maps the elementary_pid to the stream_type |
|
|
|
programMapTable[(packet[i + 1] & 0x1F) << 8 | packet[i + 2]] = packet[i]; // move to the next table entry |
|
// skip past the elementary stream descriptors, if present |
|
|
|
offset += ((packet[i + 3] & 0x0F) << 8 | packet[i + 4]) + 5; |
|
} |
|
|
|
return programMapTable; |
|
}; |
|
|
|
var parsePesType = function parsePesType(packet, programMapTable) { |
|
var pid = parsePid(packet); |
|
var type = programMapTable[pid]; |
|
|
|
switch (type) { |
|
case streamTypes.H264_STREAM_TYPE: |
|
return 'video'; |
|
|
|
case streamTypes.ADTS_STREAM_TYPE: |
|
return 'audio'; |
|
|
|
case streamTypes.METADATA_STREAM_TYPE: |
|
return 'timed-metadata'; |
|
|
|
default: |
|
return null; |
|
} |
|
}; |
|
|
|
var parsePesTime = function parsePesTime(packet) { |
|
var pusi = parsePayloadUnitStartIndicator(packet); |
|
|
|
if (!pusi) { |
|
return null; |
|
} |
|
|
|
var offset = 4 + parseAdaptionField(packet); |
|
|
|
if (offset >= packet.byteLength) { |
|
// From the H 222.0 MPEG-TS spec |
|
// "For transport stream packets carrying PES packets, stuffing is needed when there |
|
// is insufficient PES packet data to completely fill the transport stream packet |
|
// payload bytes. Stuffing is accomplished by defining an adaptation field longer than |
|
// the sum of the lengths of the data elements in it, so that the payload bytes |
|
// remaining after the adaptation field exactly accommodates the available PES packet |
|
// data." |
|
// |
|
// If the offset is >= the length of the packet, then the packet contains no data |
|
// and instead is just adaption field stuffing bytes |
|
return null; |
|
} |
|
|
|
var pes = null; |
|
var ptsDtsFlags; // PES packets may be annotated with a PTS value, or a PTS value |
|
// and a DTS value. Determine what combination of values is |
|
// available to work with. |
|
|
|
ptsDtsFlags = packet[offset + 7]; // PTS and DTS are normally stored as a 33-bit number. Javascript |
|
// performs all bitwise operations on 32-bit integers but javascript |
|
// supports a much greater range (52-bits) of integer using standard |
|
// mathematical operations. |
|
// We construct a 31-bit value using bitwise operators over the 31 |
|
// most significant bits and then multiply by 4 (equal to a left-shift |
|
// of 2) before we add the final 2 least significant bits of the |
|
// timestamp (equal to an OR.) |
|
|
|
if (ptsDtsFlags & 0xC0) { |
|
pes = {}; // the PTS and DTS are not written out directly. For information |
|
// on how they are encoded, see |
|
// http://dvd.sourceforge.net/dvdinfo/pes-hdr.html |
|
|
|
pes.pts = (packet[offset + 9] & 0x0E) << 27 | (packet[offset + 10] & 0xFF) << 20 | (packet[offset + 11] & 0xFE) << 12 | (packet[offset + 12] & 0xFF) << 5 | (packet[offset + 13] & 0xFE) >>> 3; |
|
pes.pts *= 4; // Left shift by 2 |
|
|
|
pes.pts += (packet[offset + 13] & 0x06) >>> 1; // OR by the two LSBs |
|
|
|
pes.dts = pes.pts; |
|
|
|
if (ptsDtsFlags & 0x40) { |
|
pes.dts = (packet[offset + 14] & 0x0E) << 27 | (packet[offset + 15] & 0xFF) << 20 | (packet[offset + 16] & 0xFE) << 12 | (packet[offset + 17] & 0xFF) << 5 | (packet[offset + 18] & 0xFE) >>> 3; |
|
pes.dts *= 4; // Left shift by 2 |
|
|
|
pes.dts += (packet[offset + 18] & 0x06) >>> 1; // OR by the two LSBs |
|
} |
|
} |
|
|
|
return pes; |
|
}; |
|
|
|
var parseNalUnitType = function parseNalUnitType(type) { |
|
switch (type) { |
|
case 0x05: |
|
return 'slice_layer_without_partitioning_rbsp_idr'; |
|
|
|
case 0x06: |
|
return 'sei_rbsp'; |
|
|
|
case 0x07: |
|
return 'seq_parameter_set_rbsp'; |
|
|
|
case 0x08: |
|
return 'pic_parameter_set_rbsp'; |
|
|
|
case 0x09: |
|
return 'access_unit_delimiter_rbsp'; |
|
|
|
default: |
|
return null; |
|
} |
|
}; |
|
|
|
var videoPacketContainsKeyFrame = function videoPacketContainsKeyFrame(packet) { |
|
var offset = 4 + parseAdaptionField(packet); |
|
var frameBuffer = packet.subarray(offset); |
|
var frameI = 0; |
|
var frameSyncPoint = 0; |
|
var foundKeyFrame = false; |
|
var nalType; // advance the sync point to a NAL start, if necessary |
|
|
|
for (; frameSyncPoint < frameBuffer.byteLength - 3; frameSyncPoint++) { |
|
if (frameBuffer[frameSyncPoint + 2] === 1) { |
|
// the sync point is properly aligned |
|
frameI = frameSyncPoint + 5; |
|
break; |
|
} |
|
} |
|
|
|
while (frameI < frameBuffer.byteLength) { |
|
// look at the current byte to determine if we've hit the end of |
|
// a NAL unit boundary |
|
switch (frameBuffer[frameI]) { |
|
case 0: |
|
// skip past non-sync sequences |
|
if (frameBuffer[frameI - 1] !== 0) { |
|
frameI += 2; |
|
break; |
|
} else if (frameBuffer[frameI - 2] !== 0) { |
|
frameI++; |
|
break; |
|
} |
|
|
|
if (frameSyncPoint + 3 !== frameI - 2) { |
|
nalType = parseNalUnitType(frameBuffer[frameSyncPoint + 3] & 0x1f); |
|
|
|
if (nalType === 'slice_layer_without_partitioning_rbsp_idr') { |
|
foundKeyFrame = true; |
|
} |
|
} // drop trailing zeroes |
|
|
|
|
|
do { |
|
frameI++; |
|
} while (frameBuffer[frameI] !== 1 && frameI < frameBuffer.length); |
|
|
|
frameSyncPoint = frameI - 2; |
|
frameI += 3; |
|
break; |
|
|
|
case 1: |
|
// skip past non-sync sequences |
|
if (frameBuffer[frameI - 1] !== 0 || frameBuffer[frameI - 2] !== 0) { |
|
frameI += 3; |
|
break; |
|
} |
|
|
|
nalType = parseNalUnitType(frameBuffer[frameSyncPoint + 3] & 0x1f); |
|
|
|
if (nalType === 'slice_layer_without_partitioning_rbsp_idr') { |
|
foundKeyFrame = true; |
|
} |
|
|
|
frameSyncPoint = frameI - 2; |
|
frameI += 3; |
|
break; |
|
|
|
default: |
|
// the current byte isn't a one or zero, so it cannot be part |
|
// of a sync sequence |
|
frameI += 3; |
|
break; |
|
} |
|
} |
|
|
|
frameBuffer = frameBuffer.subarray(frameSyncPoint); |
|
frameI -= frameSyncPoint; |
|
frameSyncPoint = 0; // parse the final nal |
|
|
|
if (frameBuffer && frameBuffer.byteLength > 3) { |
|
nalType = parseNalUnitType(frameBuffer[frameSyncPoint + 3] & 0x1f); |
|
|
|
if (nalType === 'slice_layer_without_partitioning_rbsp_idr') { |
|
foundKeyFrame = true; |
|
} |
|
} |
|
|
|
return foundKeyFrame; |
|
}; |
|
|
|
var probe$1 = { |
|
parseType: parseType$3, |
|
parsePat: parsePat, |
|
parsePmt: parsePmt, |
|
parsePayloadUnitStartIndicator: parsePayloadUnitStartIndicator, |
|
parsePesType: parsePesType, |
|
parsePesTime: parsePesTime, |
|
videoPacketContainsKeyFrame: videoPacketContainsKeyFrame |
|
}; |
|
|
|
var handleRollover$1 = timestampRolloverStream.handleRollover; |
|
var probe$2 = {}; |
|
probe$2.ts = probe$1; |
|
probe$2.aac = utils; |
|
var PES_TIMESCALE = 90000, |
|
MP2T_PACKET_LENGTH$1 = 188, |
|
// bytes |
|
SYNC_BYTE$1 = 0x47; |
|
/** |
|
* walks through segment data looking for pat and pmt packets to parse out |
|
* program map table information |
|
*/ |
|
|
|
var parsePsi_ = function parsePsi_(bytes, pmt) { |
|
var startIndex = 0, |
|
endIndex = MP2T_PACKET_LENGTH$1, |
|
packet, |
|
type; |
|
|
|
while (endIndex < bytes.byteLength) { |
|
// Look for a pair of start and end sync bytes in the data.. |
|
if (bytes[startIndex] === SYNC_BYTE$1 && bytes[endIndex] === SYNC_BYTE$1) { |
|
// We found a packet |
|
packet = bytes.subarray(startIndex, endIndex); |
|
type = probe$2.ts.parseType(packet, pmt.pid); |
|
|
|
switch (type) { |
|
case 'pat': |
|
if (!pmt.pid) { |
|
pmt.pid = probe$2.ts.parsePat(packet); |
|
} |
|
|
|
break; |
|
|
|
case 'pmt': |
|
if (!pmt.table) { |
|
pmt.table = probe$2.ts.parsePmt(packet); |
|
} |
|
|
|
break; |
|
|
|
default: |
|
break; |
|
} // Found the pat and pmt, we can stop walking the segment |
|
|
|
|
|
if (pmt.pid && pmt.table) { |
|
return; |
|
} |
|
|
|
startIndex += MP2T_PACKET_LENGTH$1; |
|
endIndex += MP2T_PACKET_LENGTH$1; |
|
continue; |
|
} // If we get here, we have somehow become de-synchronized and we need to step |
|
// forward one byte at a time until we find a pair of sync bytes that denote |
|
// a packet |
|
|
|
|
|
startIndex++; |
|
endIndex++; |
|
} |
|
}; |
|
/** |
|
* walks through the segment data from the start and end to get timing information |
|
* for the first and last audio pes packets |
|
*/ |
|
|
|
|
|
var parseAudioPes_ = function parseAudioPes_(bytes, pmt, result) { |
|
var startIndex = 0, |
|
endIndex = MP2T_PACKET_LENGTH$1, |
|
packet, |
|
type, |
|
pesType, |
|
pusi, |
|
parsed; |
|
var endLoop = false; // Start walking from start of segment to get first audio packet |
|
|
|
while (endIndex <= bytes.byteLength) { |
|
// Look for a pair of start and end sync bytes in the data.. |
|
if (bytes[startIndex] === SYNC_BYTE$1 && (bytes[endIndex] === SYNC_BYTE$1 || endIndex === bytes.byteLength)) { |
|
// We found a packet |
|
packet = bytes.subarray(startIndex, endIndex); |
|
type = probe$2.ts.parseType(packet, pmt.pid); |
|
|
|
switch (type) { |
|
case 'pes': |
|
pesType = probe$2.ts.parsePesType(packet, pmt.table); |
|
pusi = probe$2.ts.parsePayloadUnitStartIndicator(packet); |
|
|
|
if (pesType === 'audio' && pusi) { |
|
parsed = probe$2.ts.parsePesTime(packet); |
|
|
|
if (parsed) { |
|
parsed.type = 'audio'; |
|
result.audio.push(parsed); |
|
endLoop = true; |
|
} |
|
} |
|
|
|
break; |
|
|
|
default: |
|
break; |
|
} |
|
|
|
if (endLoop) { |
|
break; |
|
} |
|
|
|
startIndex += MP2T_PACKET_LENGTH$1; |
|
endIndex += MP2T_PACKET_LENGTH$1; |
|
continue; |
|
} // If we get here, we have somehow become de-synchronized and we need to step |
|
// forward one byte at a time until we find a pair of sync bytes that denote |
|
// a packet |
|
|
|
|
|
startIndex++; |
|
endIndex++; |
|
} // Start walking from end of segment to get last audio packet |
|
|
|
|
|
endIndex = bytes.byteLength; |
|
startIndex = endIndex - MP2T_PACKET_LENGTH$1; |
|
endLoop = false; |
|
|
|
while (startIndex >= 0) { |
|
// Look for a pair of start and end sync bytes in the data.. |
|
if (bytes[startIndex] === SYNC_BYTE$1 && (bytes[endIndex] === SYNC_BYTE$1 || endIndex === bytes.byteLength)) { |
|
// We found a packet |
|
packet = bytes.subarray(startIndex, endIndex); |
|
type = probe$2.ts.parseType(packet, pmt.pid); |
|
|
|
switch (type) { |
|
case 'pes': |
|
pesType = probe$2.ts.parsePesType(packet, pmt.table); |
|
pusi = probe$2.ts.parsePayloadUnitStartIndicator(packet); |
|
|
|
if (pesType === 'audio' && pusi) { |
|
parsed = probe$2.ts.parsePesTime(packet); |
|
|
|
if (parsed) { |
|
parsed.type = 'audio'; |
|
result.audio.push(parsed); |
|
endLoop = true; |
|
} |
|
} |
|
|
|
break; |
|
|
|
default: |
|
break; |
|
} |
|
|
|
if (endLoop) { |
|
break; |
|
} |
|
|
|
startIndex -= MP2T_PACKET_LENGTH$1; |
|
endIndex -= MP2T_PACKET_LENGTH$1; |
|
continue; |
|
} // If we get here, we have somehow become de-synchronized and we need to step |
|
// forward one byte at a time until we find a pair of sync bytes that denote |
|
// a packet |
|
|
|
|
|
startIndex--; |
|
endIndex--; |
|
} |
|
}; |
|
/** |
|
* walks through the segment data from the start and end to get timing information |
|
* for the first and last video pes packets as well as timing information for the first |
|
* key frame. |
|
*/ |
|
|
|
|
|
var parseVideoPes_ = function parseVideoPes_(bytes, pmt, result) { |
|
var startIndex = 0, |
|
endIndex = MP2T_PACKET_LENGTH$1, |
|
packet, |
|
type, |
|
pesType, |
|
pusi, |
|
parsed, |
|
frame, |
|
i, |
|
pes; |
|
var endLoop = false; |
|
var currentFrame = { |
|
data: [], |
|
size: 0 |
|
}; // Start walking from start of segment to get first video packet |
|
|
|
while (endIndex < bytes.byteLength) { |
|
// Look for a pair of start and end sync bytes in the data.. |
|
if (bytes[startIndex] === SYNC_BYTE$1 && bytes[endIndex] === SYNC_BYTE$1) { |
|
// We found a packet |
|
packet = bytes.subarray(startIndex, endIndex); |
|
type = probe$2.ts.parseType(packet, pmt.pid); |
|
|
|
switch (type) { |
|
case 'pes': |
|
pesType = probe$2.ts.parsePesType(packet, pmt.table); |
|
pusi = probe$2.ts.parsePayloadUnitStartIndicator(packet); |
|
|
|
if (pesType === 'video') { |
|
if (pusi && !endLoop) { |
|
parsed = probe$2.ts.parsePesTime(packet); |
|
|
|
if (parsed) { |
|
parsed.type = 'video'; |
|
result.video.push(parsed); |
|
endLoop = true; |
|
} |
|
} |
|
|
|
if (!result.firstKeyFrame) { |
|
if (pusi) { |
|
if (currentFrame.size !== 0) { |
|
frame = new Uint8Array(currentFrame.size); |
|
i = 0; |
|
|
|
while (currentFrame.data.length) { |
|
pes = currentFrame.data.shift(); |
|
frame.set(pes, i); |
|
i += pes.byteLength; |
|
} |
|
|
|
if (probe$2.ts.videoPacketContainsKeyFrame(frame)) { |
|
result.firstKeyFrame = probe$2.ts.parsePesTime(frame); |
|
result.firstKeyFrame.type = 'video'; |
|
} |
|
|
|
currentFrame.size = 0; |
|
} |
|
} |
|
|
|
currentFrame.data.push(packet); |
|
currentFrame.size += packet.byteLength; |
|
} |
|
} |
|
|
|
break; |
|
|
|
default: |
|
break; |
|
} |
|
|
|
if (endLoop && result.firstKeyFrame) { |
|
break; |
|
} |
|
|
|
startIndex += MP2T_PACKET_LENGTH$1; |
|
endIndex += MP2T_PACKET_LENGTH$1; |
|
continue; |
|
} // If we get here, we have somehow become de-synchronized and we need to step |
|
// forward one byte at a time until we find a pair of sync bytes that denote |
|
// a packet |
|
|
|
|
|
startIndex++; |
|
endIndex++; |
|
} // Start walking from end of segment to get last video packet |
|
|
|
|
|
endIndex = bytes.byteLength; |
|
startIndex = endIndex - MP2T_PACKET_LENGTH$1; |
|
endLoop = false; |
|
|
|
while (startIndex >= 0) { |
|
// Look for a pair of start and end sync bytes in the data.. |
|
if (bytes[startIndex] === SYNC_BYTE$1 && bytes[endIndex] === SYNC_BYTE$1) { |
|
// We found a packet |
|
packet = bytes.subarray(startIndex, endIndex); |
|
type = probe$2.ts.parseType(packet, pmt.pid); |
|
|
|
switch (type) { |
|
case 'pes': |
|
pesType = probe$2.ts.parsePesType(packet, pmt.table); |
|
pusi = probe$2.ts.parsePayloadUnitStartIndicator(packet); |
|
|
|
if (pesType === 'video' && pusi) { |
|
parsed = probe$2.ts.parsePesTime(packet); |
|
|
|
if (parsed) { |
|
parsed.type = 'video'; |
|
result.video.push(parsed); |
|
endLoop = true; |
|
} |
|
} |
|
|
|
break; |
|
|
|
default: |
|
break; |
|
} |
|
|
|
if (endLoop) { |
|
break; |
|
} |
|
|
|
startIndex -= MP2T_PACKET_LENGTH$1; |
|
endIndex -= MP2T_PACKET_LENGTH$1; |
|
continue; |
|
} // If we get here, we have somehow become de-synchronized and we need to step |
|
// forward one byte at a time until we find a pair of sync bytes that denote |
|
// a packet |
|
|
|
|
|
startIndex--; |
|
endIndex--; |
|
} |
|
}; |
|
/** |
|
* Adjusts the timestamp information for the segment to account for |
|
* rollover and convert to seconds based on pes packet timescale (90khz clock) |
|
*/ |
|
|
|
|
|
var adjustTimestamp_ = function adjustTimestamp_(segmentInfo, baseTimestamp) { |
|
if (segmentInfo.audio && segmentInfo.audio.length) { |
|
var audioBaseTimestamp = baseTimestamp; |
|
|
|
if (typeof audioBaseTimestamp === 'undefined') { |
|
audioBaseTimestamp = segmentInfo.audio[0].dts; |
|
} |
|
|
|
segmentInfo.audio.forEach(function (info) { |
|
info.dts = handleRollover$1(info.dts, audioBaseTimestamp); |
|
info.pts = handleRollover$1(info.pts, audioBaseTimestamp); // time in seconds |
|
|
|
info.dtsTime = info.dts / PES_TIMESCALE; |
|
info.ptsTime = info.pts / PES_TIMESCALE; |
|
}); |
|
} |
|
|
|
if (segmentInfo.video && segmentInfo.video.length) { |
|
var videoBaseTimestamp = baseTimestamp; |
|
|
|
if (typeof videoBaseTimestamp === 'undefined') { |
|
videoBaseTimestamp = segmentInfo.video[0].dts; |
|
} |
|
|
|
segmentInfo.video.forEach(function (info) { |
|
info.dts = handleRollover$1(info.dts, videoBaseTimestamp); |
|
info.pts = handleRollover$1(info.pts, videoBaseTimestamp); // time in seconds |
|
|
|
info.dtsTime = info.dts / PES_TIMESCALE; |
|
info.ptsTime = info.pts / PES_TIMESCALE; |
|
}); |
|
|
|
if (segmentInfo.firstKeyFrame) { |
|
var frame = segmentInfo.firstKeyFrame; |
|
frame.dts = handleRollover$1(frame.dts, videoBaseTimestamp); |
|
frame.pts = handleRollover$1(frame.pts, videoBaseTimestamp); // time in seconds |
|
|
|
frame.dtsTime = frame.dts / PES_TIMESCALE; |
|
frame.ptsTime = frame.dts / PES_TIMESCALE; |
|
} |
|
} |
|
}; |
|
/** |
|
* inspects the aac data stream for start and end time information |
|
*/ |
|
|
|
|
|
var inspectAac_ = function inspectAac_(bytes) { |
|
var endLoop = false, |
|
audioCount = 0, |
|
sampleRate = null, |
|
timestamp = null, |
|
frameSize = 0, |
|
byteIndex = 0, |
|
packet; |
|
|
|
while (bytes.length - byteIndex >= 3) { |
|
var type = probe$2.aac.parseType(bytes, byteIndex); |
|
|
|
switch (type) { |
|
case 'timed-metadata': |
|
// Exit early because we don't have enough to parse |
|
// the ID3 tag header |
|
if (bytes.length - byteIndex < 10) { |
|
endLoop = true; |
|
break; |
|
} |
|
|
|
frameSize = probe$2.aac.parseId3TagSize(bytes, byteIndex); // Exit early if we don't have enough in the buffer |
|
// to emit a full packet |
|
|
|
if (frameSize > bytes.length) { |
|
endLoop = true; |
|
break; |
|
} |
|
|
|
if (timestamp === null) { |
|
packet = bytes.subarray(byteIndex, byteIndex + frameSize); |
|
timestamp = probe$2.aac.parseAacTimestamp(packet); |
|
} |
|
|
|
byteIndex += frameSize; |
|
break; |
|
|
|
case 'audio': |
|
// Exit early because we don't have enough to parse |
|
// the ADTS frame header |
|
if (bytes.length - byteIndex < 7) { |
|
endLoop = true; |
|
break; |
|
} |
|
|
|
frameSize = probe$2.aac.parseAdtsSize(bytes, byteIndex); // Exit early if we don't have enough in the buffer |
|
// to emit a full packet |
|
|
|
if (frameSize > bytes.length) { |
|
endLoop = true; |
|
break; |
|
} |
|
|
|
if (sampleRate === null) { |
|
packet = bytes.subarray(byteIndex, byteIndex + frameSize); |
|
sampleRate = probe$2.aac.parseSampleRate(packet); |
|
} |
|
|
|
audioCount++; |
|
byteIndex += frameSize; |
|
break; |
|
|
|
default: |
|
byteIndex++; |
|
break; |
|
} |
|
|
|
if (endLoop) { |
|
return null; |
|
} |
|
} |
|
|
|
if (sampleRate === null || timestamp === null) { |
|
return null; |
|
} |
|
|
|
var audioTimescale = PES_TIMESCALE / sampleRate; |
|
var result = { |
|
audio: [{ |
|
type: 'audio', |
|
dts: timestamp, |
|
pts: timestamp |
|
}, { |
|
type: 'audio', |
|
dts: timestamp + audioCount * 1024 * audioTimescale, |
|
pts: timestamp + audioCount * 1024 * audioTimescale |
|
}] |
|
}; |
|
return result; |
|
}; |
|
/** |
|
* inspects the transport stream segment data for start and end time information |
|
* of the audio and video tracks (when present) as well as the first key frame's |
|
* start time. |
|
*/ |
|
|
|
|
|
var inspectTs_ = function inspectTs_(bytes) { |
|
var pmt = { |
|
pid: null, |
|
table: null |
|
}; |
|
var result = {}; |
|
parsePsi_(bytes, pmt); |
|
|
|
for (var pid in pmt.table) { |
|
if (pmt.table.hasOwnProperty(pid)) { |
|
var type = pmt.table[pid]; |
|
|
|
switch (type) { |
|
case streamTypes.H264_STREAM_TYPE: |
|
result.video = []; |
|
parseVideoPes_(bytes, pmt, result); |
|
|
|
if (result.video.length === 0) { |
|
delete result.video; |
|
} |
|
|
|
break; |
|
|
|
case streamTypes.ADTS_STREAM_TYPE: |
|
result.audio = []; |
|
parseAudioPes_(bytes, pmt, result); |
|
|
|
if (result.audio.length === 0) { |
|
delete result.audio; |
|
} |
|
|
|
break; |
|
|
|
default: |
|
break; |
|
} |
|
} |
|
} |
|
|
|
return result; |
|
}; |
|
/** |
|
* Inspects segment byte data and returns an object with start and end timing information |
|
* |
|
* @param {Uint8Array} bytes The segment byte data |
|
* @param {Number} baseTimestamp Relative reference timestamp used when adjusting frame |
|
* timestamps for rollover. This value must be in 90khz clock. |
|
* @return {Object} Object containing start and end frame timing info of segment. |
|
*/ |
|
|
|
|
|
var inspect = function inspect(bytes, baseTimestamp) { |
|
var isAacData = probe$2.aac.isLikelyAacData(bytes); |
|
var result; |
|
|
|
if (isAacData) { |
|
result = inspectAac_(bytes); |
|
} else { |
|
result = inspectTs_(bytes); |
|
} |
|
|
|
if (!result || !result.audio && !result.video) { |
|
return null; |
|
} |
|
|
|
adjustTimestamp_(result, baseTimestamp); |
|
return result; |
|
}; |
|
|
|
var tsInspector = { |
|
inspect: inspect, |
|
parseAudioPes_: parseAudioPes_ |
|
}; |
|
|
|
/* |
|
* pkcs7.pad |
|
* https://github.com/brightcove/pkcs7 |
|
* |
|
* Copyright (c) 2014 Brightcove |
|
* Licensed under the apache2 license. |
|
*/ |
|
/** |
|
* Returns the subarray of a Uint8Array without PKCS#7 padding. |
|
* @param padded {Uint8Array} unencrypted bytes that have been padded |
|
* @return {Uint8Array} the unpadded bytes |
|
* @see http://tools.ietf.org/html/rfc5652 |
|
*/ |
|
|
|
function unpad(padded) { |
|
return padded.subarray(0, padded.byteLength - padded[padded.byteLength - 1]); |
|
} |
|
|
|
var classCallCheck = function classCallCheck(instance, Constructor) { |
|
if (!(instance instanceof Constructor)) { |
|
throw new TypeError("Cannot call a class as a function"); |
|
} |
|
}; |
|
|
|
var createClass = function () { |
|
function defineProperties(target, props) { |
|
for (var i = 0; i < props.length; i++) { |
|
var descriptor = props[i]; |
|
descriptor.enumerable = descriptor.enumerable || false; |
|
descriptor.configurable = true; |
|
if ("value" in descriptor) descriptor.writable = true; |
|
Object.defineProperty(target, descriptor.key, descriptor); |
|
} |
|
} |
|
|
|
return function (Constructor, protoProps, staticProps) { |
|
if (protoProps) defineProperties(Constructor.prototype, protoProps); |
|
if (staticProps) defineProperties(Constructor, staticProps); |
|
return Constructor; |
|
}; |
|
}(); |
|
|
|
var inherits = function inherits(subClass, superClass) { |
|
if (typeof superClass !== "function" && superClass !== null) { |
|
throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); |
|
} |
|
|
|
subClass.prototype = Object.create(superClass && superClass.prototype, { |
|
constructor: { |
|
value: subClass, |
|
enumerable: false, |
|
writable: true, |
|
configurable: true |
|
} |
|
}); |
|
if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; |
|
}; |
|
|
|
var possibleConstructorReturn = function possibleConstructorReturn(self, call) { |
|
if (!self) { |
|
throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); |
|
} |
|
|
|
return call && (typeof call === "object" || typeof call === "function") ? call : self; |
|
}; |
|
/** |
|
* @file aes.js |
|
* |
|
* This file contains an adaptation of the AES decryption algorithm |
|
* from the Standford Javascript Cryptography Library. That work is |
|
* covered by the following copyright and permissions notice: |
|
* |
|
* Copyright 2009-2010 Emily Stark, Mike Hamburg, Dan Boneh. |
|
* All rights reserved. |
|
* |
|
* Redistribution and use in source and binary forms, with or without |
|
* modification, are permitted provided that the following conditions are |
|
* met: |
|
* |
|
* 1. Redistributions of source code must retain the above copyright |
|
* notice, this list of conditions and the following disclaimer. |
|
* |
|
* 2. Redistributions in binary form must reproduce the above |
|
* copyright notice, this list of conditions and the following |
|
* disclaimer in the documentation and/or other materials provided |
|
* with the distribution. |
|
* |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR |
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
|
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
|
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> OR CONTRIBUTORS BE |
|
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR |
|
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
|
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE |
|
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN |
|
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
|
* |
|
* The views and conclusions contained in the software and documentation |
|
* are those of the authors and should not be interpreted as representing |
|
* official policies, either expressed or implied, of the authors. |
|
*/ |
|
|
|
/** |
|
* Expand the S-box tables. |
|
* |
|
* @private |
|
*/ |
|
|
|
|
|
var precompute = function precompute() { |
|
var tables = [[[], [], [], [], []], [[], [], [], [], []]]; |
|
var encTable = tables[0]; |
|
var decTable = tables[1]; |
|
var sbox = encTable[4]; |
|
var sboxInv = decTable[4]; |
|
var i = void 0; |
|
var x = void 0; |
|
var xInv = void 0; |
|
var d = []; |
|
var th = []; |
|
var x2 = void 0; |
|
var x4 = void 0; |
|
var x8 = void 0; |
|
var s = void 0; |
|
var tEnc = void 0; |
|
var tDec = void 0; // Compute double and third tables |
|
|
|
for (i = 0; i < 256; i++) { |
|
th[(d[i] = i << 1 ^ (i >> 7) * 283) ^ i] = i; |
|
} |
|
|
|
for (x = xInv = 0; !sbox[x]; x ^= x2 || 1, xInv = th[xInv] || 1) { |
|
// Compute sbox |
|
s = xInv ^ xInv << 1 ^ xInv << 2 ^ xInv << 3 ^ xInv << 4; |
|
s = s >> 8 ^ s & 255 ^ 99; |
|
sbox[x] = s; |
|
sboxInv[s] = x; // Compute MixColumns |
|
|
|
x8 = d[x4 = d[x2 = d[x]]]; |
|
tDec = x8 * 0x1010101 ^ x4 * 0x10001 ^ x2 * 0x101 ^ x * 0x1010100; |
|
tEnc = d[s] * 0x101 ^ s * 0x1010100; |
|
|
|
for (i = 0; i < 4; i++) { |
|
encTable[i][x] = tEnc = tEnc << 24 ^ tEnc >>> 8; |
|
decTable[i][s] = tDec = tDec << 24 ^ tDec >>> 8; |
|
} |
|
} // Compactify. Considerable speedup on Firefox. |
|
|
|
|
|
for (i = 0; i < 5; i++) { |
|
encTable[i] = encTable[i].slice(0); |
|
decTable[i] = decTable[i].slice(0); |
|
} |
|
|
|
return tables; |
|
}; |
|
|
|
var aesTables = null; |
|
/** |
|
* Schedule out an AES key for both encryption and decryption. This |
|
* is a low-level class. Use a cipher mode to do bulk encryption. |
|
* |
|
* @class AES |
|
* @param key {Array} The key as an array of 4, 6 or 8 words. |
|
*/ |
|
|
|
var AES = function () { |
|
function AES(key) { |
|
classCallCheck(this, AES); |
|
/** |
|
* The expanded S-box and inverse S-box tables. These will be computed |
|
* on the client so that we don't have to send them down the wire. |
|
* |
|
* There are two tables, _tables[0] is for encryption and |
|
* _tables[1] is for decryption. |
|
* |
|
* The first 4 sub-tables are the expanded S-box with MixColumns. The |
|
* last (_tables[01][4]) is the S-box itself. |
|
* |
|
* @private |
|
*/ |
|
// if we have yet to precompute the S-box tables |
|
// do so now |
|
|
|
if (!aesTables) { |
|
aesTables = precompute(); |
|
} // then make a copy of that object for use |
|
|
|
|
|
this._tables = [[aesTables[0][0].slice(), aesTables[0][1].slice(), aesTables[0][2].slice(), aesTables[0][3].slice(), aesTables[0][4].slice()], [aesTables[1][0].slice(), aesTables[1][1].slice(), aesTables[1][2].slice(), aesTables[1][3].slice(), aesTables[1][4].slice()]]; |
|
var i = void 0; |
|
var j = void 0; |
|
var tmp = void 0; |
|
var encKey = void 0; |
|
var decKey = void 0; |
|
var sbox = this._tables[0][4]; |
|
var decTable = this._tables[1]; |
|
var keyLen = key.length; |
|
var rcon = 1; |
|
|
|
if (keyLen !== 4 && keyLen !== 6 && keyLen !== 8) { |
|
throw new Error('Invalid aes key size'); |
|
} |
|
|
|
encKey = key.slice(0); |
|
decKey = []; |
|
this._key = [encKey, decKey]; // schedule encryption keys |
|
|
|
for (i = keyLen; i < 4 * keyLen + 28; i++) { |
|
tmp = encKey[i - 1]; // apply sbox |
|
|
|
if (i % keyLen === 0 || keyLen === 8 && i % keyLen === 4) { |
|
tmp = sbox[tmp >>> 24] << 24 ^ sbox[tmp >> 16 & 255] << 16 ^ sbox[tmp >> 8 & 255] << 8 ^ sbox[tmp & 255]; // shift rows and add rcon |
|
|
|
if (i % keyLen === 0) { |
|
tmp = tmp << 8 ^ tmp >>> 24 ^ rcon << 24; |
|
rcon = rcon << 1 ^ (rcon >> 7) * 283; |
|
} |
|
} |
|
|
|
encKey[i] = encKey[i - keyLen] ^ tmp; |
|
} // schedule decryption keys |
|
|
|
|
|
for (j = 0; i; j++, i--) { |
|
tmp = encKey[j & 3 ? i : i - 4]; |
|
|
|
if (i <= 4 || j < 4) { |
|
decKey[j] = tmp; |
|
} else { |
|
decKey[j] = decTable[0][sbox[tmp >>> 24]] ^ decTable[1][sbox[tmp >> 16 & 255]] ^ decTable[2][sbox[tmp >> 8 & 255]] ^ decTable[3][sbox[tmp & 255]]; |
|
} |
|
} |
|
} |
|
/** |
|
* Decrypt 16 bytes, specified as four 32-bit words. |
|
* |
|
* @param {Number} encrypted0 the first word to decrypt |
|
* @param {Number} encrypted1 the second word to decrypt |
|
* @param {Number} encrypted2 the third word to decrypt |
|
* @param {Number} encrypted3 the fourth word to decrypt |
|
* @param {Int32Array} out the array to write the decrypted words |
|
* into |
|
* @param {Number} offset the offset into the output array to start |
|
* writing results |
|
* @return {Array} The plaintext. |
|
*/ |
|
|
|
|
|
AES.prototype.decrypt = function decrypt(encrypted0, encrypted1, encrypted2, encrypted3, out, offset) { |
|
var key = this._key[1]; // state variables a,b,c,d are loaded with pre-whitened data |
|
|
|
var a = encrypted0 ^ key[0]; |
|
var b = encrypted3 ^ key[1]; |
|
var c = encrypted2 ^ key[2]; |
|
var d = encrypted1 ^ key[3]; |
|
var a2 = void 0; |
|
var b2 = void 0; |
|
var c2 = void 0; // key.length === 2 ? |
|
|
|
var nInnerRounds = key.length / 4 - 2; |
|
var i = void 0; |
|
var kIndex = 4; |
|
var table = this._tables[1]; // load up the tables |
|
|
|
var table0 = table[0]; |
|
var table1 = table[1]; |
|
var table2 = table[2]; |
|
var table3 = table[3]; |
|
var sbox = table[4]; // Inner rounds. Cribbed from OpenSSL. |
|
|
|
for (i = 0; i < nInnerRounds; i++) { |
|
a2 = table0[a >>> 24] ^ table1[b >> 16 & 255] ^ table2[c >> 8 & 255] ^ table3[d & 255] ^ key[kIndex]; |
|
b2 = table0[b >>> 24] ^ table1[c >> 16 & 255] ^ table2[d >> 8 & 255] ^ table3[a & 255] ^ key[kIndex + 1]; |
|
c2 = table0[c >>> 24] ^ table1[d >> 16 & 255] ^ table2[a >> 8 & 255] ^ table3[b & 255] ^ key[kIndex + 2]; |
|
d = table0[d >>> 24] ^ table1[a >> 16 & 255] ^ table2[b >> 8 & 255] ^ table3[c & 255] ^ key[kIndex + 3]; |
|
kIndex += 4; |
|
a = a2; |
|
b = b2; |
|
c = c2; |
|
} // Last round. |
|
|
|
|
|
for (i = 0; i < 4; i++) { |
|
out[(3 & -i) + offset] = sbox[a >>> 24] << 24 ^ sbox[b >> 16 & 255] << 16 ^ sbox[c >> 8 & 255] << 8 ^ sbox[d & 255] ^ key[kIndex++]; |
|
a2 = a; |
|
a = b; |
|
b = c; |
|
c = d; |
|
d = a2; |
|
} |
|
}; |
|
|
|
return AES; |
|
}(); |
|
/** |
|
* @file stream.js |
|
*/ |
|
|
|
/** |
|
* A lightweight readable stream implemention that handles event dispatching. |
|
* |
|
* @class Stream |
|
*/ |
|
|
|
|
|
var Stream$2 = function () { |
|
function Stream() { |
|
classCallCheck(this, Stream); |
|
this.listeners = {}; |
|
} |
|
/** |
|
* Add a listener for a specified event type. |
|
* |
|
* @param {String} type the event name |
|
* @param {Function} listener the callback to be invoked when an event of |
|
* the specified type occurs |
|
*/ |
|
|
|
|
|
Stream.prototype.on = function on(type, listener) { |
|
if (!this.listeners[type]) { |
|
this.listeners[type] = []; |
|
} |
|
|
|
this.listeners[type].push(listener); |
|
}; |
|
/** |
|
* Remove a listener for a specified event type. |
|
* |
|
* @param {String} type the event name |
|
* @param {Function} listener a function previously registered for this |
|
* type of event through `on` |
|
* @return {Boolean} if we could turn it off or not |
|
*/ |
|
|
|
|
|
Stream.prototype.off = function off(type, listener) { |
|
if (!this.listeners[type]) { |
|
return false; |
|
} |
|
|
|
var index = this.listeners[type].indexOf(listener); |
|
this.listeners[type].splice(index, 1); |
|
return index > -1; |
|
}; |
|
/** |
|
* Trigger an event of the specified type on this stream. Any additional |
|
* arguments to this function are passed as parameters to event listeners. |
|
* |
|
* @param {String} type the event name |
|
*/ |
|
|
|
|
|
Stream.prototype.trigger = function trigger(type) { |
|
var callbacks = this.listeners[type]; |
|
|
|
if (!callbacks) { |
|
return; |
|
} // Slicing the arguments on every invocation of this method |
|
// can add a significant amount of overhead. Avoid the |
|
// intermediate object creation for the common case of a |
|
// single callback argument |
|
|
|
|
|
if (arguments.length === 2) { |
|
var length = callbacks.length; |
|
|
|
for (var i = 0; i < length; ++i) { |
|
callbacks[i].call(this, arguments[1]); |
|
} |
|
} else { |
|
var args = Array.prototype.slice.call(arguments, 1); |
|
var _length = callbacks.length; |
|
|
|
for (var _i = 0; _i < _length; ++_i) { |
|
callbacks[_i].apply(this, args); |
|
} |
|
} |
|
}; |
|
/** |
|
* Destroys the stream and cleans up. |
|
*/ |
|
|
|
|
|
Stream.prototype.dispose = function dispose() { |
|
this.listeners = {}; |
|
}; |
|
/** |
|
* Forwards all `data` events on this stream to the destination stream. The |
|
* destination stream should provide a method `push` to receive the data |
|
* events as they arrive. |
|
* |
|
* @param {Stream} destination the stream that will receive all `data` events |
|
* @see http://nodejs.org/api/stream.html#stream_readable_pipe_destination_options |
|
*/ |
|
|
|
|
|
Stream.prototype.pipe = function pipe(destination) { |
|
this.on('data', function (data) { |
|
destination.push(data); |
|
}); |
|
}; |
|
|
|
return Stream; |
|
}(); |
|
/** |
|
* @file async-stream.js |
|
*/ |
|
|
|
/** |
|
* A wrapper around the Stream class to use setTiemout |
|
* and run stream "jobs" Asynchronously |
|
* |
|
* @class AsyncStream |
|
* @extends Stream |
|
*/ |
|
|
|
|
|
var AsyncStream = function (_Stream) { |
|
inherits(AsyncStream, _Stream); |
|
|
|
function AsyncStream() { |
|
classCallCheck(this, AsyncStream); |
|
|
|
var _this = possibleConstructorReturn(this, _Stream.call(this, Stream$2)); |
|
|
|
_this.jobs = []; |
|
_this.delay = 1; |
|
_this.timeout_ = null; |
|
return _this; |
|
} |
|
/** |
|
* process an async job |
|
* |
|
* @private |
|
*/ |
|
|
|
|
|
AsyncStream.prototype.processJob_ = function processJob_() { |
|
this.jobs.shift()(); |
|
|
|
if (this.jobs.length) { |
|
this.timeout_ = setTimeout(this.processJob_.bind(this), this.delay); |
|
} else { |
|
this.timeout_ = null; |
|
} |
|
}; |
|
/** |
|
* push a job into the stream |
|
* |
|
* @param {Function} job the job to push into the stream |
|
*/ |
|
|
|
|
|
AsyncStream.prototype.push = function push(job) { |
|
this.jobs.push(job); |
|
|
|
if (!this.timeout_) { |
|
this.timeout_ = setTimeout(this.processJob_.bind(this), this.delay); |
|
} |
|
}; |
|
|
|
return AsyncStream; |
|
}(Stream$2); |
|
/** |
|
* @file decrypter.js |
|
* |
|
* An asynchronous implementation of AES-128 CBC decryption with |
|
* PKCS#7 padding. |
|
*/ |
|
|
|
/** |
|
* Convert network-order (big-endian) bytes into their little-endian |
|
* representation. |
|
*/ |
|
|
|
|
|
var ntoh = function ntoh(word) { |
|
return word << 24 | (word & 0xff00) << 8 | (word & 0xff0000) >> 8 | word >>> 24; |
|
}; |
|
/** |
|
* Decrypt bytes using AES-128 with CBC and PKCS#7 padding. |
|
* |
|
* @param {Uint8Array} encrypted the encrypted bytes |
|
* @param {Uint32Array} key the bytes of the decryption key |
|
* @param {Uint32Array} initVector the initialization vector (IV) to |
|
* use for the first round of CBC. |
|
* @return {Uint8Array} the decrypted bytes |
|
* |
|
* @see http://en.wikipedia.org/wiki/Advanced_Encryption_Standard |
|
* @see http://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Cipher_Block_Chaining_.28CBC.29 |
|
* @see https://tools.ietf.org/html/rfc2315 |
|
*/ |
|
|
|
|
|
var decrypt = function decrypt(encrypted, key, initVector) { |
|
// word-level access to the encrypted bytes |
|
var encrypted32 = new Int32Array(encrypted.buffer, encrypted.byteOffset, encrypted.byteLength >> 2); |
|
var decipher = new AES(Array.prototype.slice.call(key)); // byte and word-level access for the decrypted output |
|
|
|
var decrypted = new Uint8Array(encrypted.byteLength); |
|
var decrypted32 = new Int32Array(decrypted.buffer); // temporary variables for working with the IV, encrypted, and |
|
// decrypted data |
|
|
|
var init0 = void 0; |
|
var init1 = void 0; |
|
var init2 = void 0; |
|
var init3 = void 0; |
|
var encrypted0 = void 0; |
|
var encrypted1 = void 0; |
|
var encrypted2 = void 0; |
|
var encrypted3 = void 0; // iteration variable |
|
|
|
var wordIx = void 0; // pull out the words of the IV to ensure we don't modify the |
|
// passed-in reference and easier access |
|
|
|
init0 = initVector[0]; |
|
init1 = initVector[1]; |
|
init2 = initVector[2]; |
|
init3 = initVector[3]; // decrypt four word sequences, applying cipher-block chaining (CBC) |
|
// to each decrypted block |
|
|
|
for (wordIx = 0; wordIx < encrypted32.length; wordIx += 4) { |
|
// convert big-endian (network order) words into little-endian |
|
// (javascript order) |
|
encrypted0 = ntoh(encrypted32[wordIx]); |
|
encrypted1 = ntoh(encrypted32[wordIx + 1]); |
|
encrypted2 = ntoh(encrypted32[wordIx + 2]); |
|
encrypted3 = ntoh(encrypted32[wordIx + 3]); // decrypt the block |
|
|
|
decipher.decrypt(encrypted0, encrypted1, encrypted2, encrypted3, decrypted32, wordIx); // XOR with the IV, and restore network byte-order to obtain the |
|
// plaintext |
|
|
|
decrypted32[wordIx] = ntoh(decrypted32[wordIx] ^ init0); |
|
decrypted32[wordIx + 1] = ntoh(decrypted32[wordIx + 1] ^ init1); |
|
decrypted32[wordIx + 2] = ntoh(decrypted32[wordIx + 2] ^ init2); |
|
decrypted32[wordIx + 3] = ntoh(decrypted32[wordIx + 3] ^ init3); // setup the IV for the next round |
|
|
|
init0 = encrypted0; |
|
init1 = encrypted1; |
|
init2 = encrypted2; |
|
init3 = encrypted3; |
|
} |
|
|
|
return decrypted; |
|
}; |
|
/** |
|
* The `Decrypter` class that manages decryption of AES |
|
* data through `AsyncStream` objects and the `decrypt` |
|
* function |
|
* |
|
* @param {Uint8Array} encrypted the encrypted bytes |
|
* @param {Uint32Array} key the bytes of the decryption key |
|
* @param {Uint32Array} initVector the initialization vector (IV) to |
|
* @param {Function} done the function to run when done |
|
* @class Decrypter |
|
*/ |
|
|
|
|
|
var Decrypter = function () { |
|
function Decrypter(encrypted, key, initVector, done) { |
|
classCallCheck(this, Decrypter); |
|
var step = Decrypter.STEP; |
|
var encrypted32 = new Int32Array(encrypted.buffer); |
|
var decrypted = new Uint8Array(encrypted.byteLength); |
|
var i = 0; |
|
this.asyncStream_ = new AsyncStream(); // split up the encryption job and do the individual chunks asynchronously |
|
|
|
this.asyncStream_.push(this.decryptChunk_(encrypted32.subarray(i, i + step), key, initVector, decrypted)); |
|
|
|
for (i = step; i < encrypted32.length; i += step) { |
|
initVector = new Uint32Array([ntoh(encrypted32[i - 4]), ntoh(encrypted32[i - 3]), ntoh(encrypted32[i - 2]), ntoh(encrypted32[i - 1])]); |
|
this.asyncStream_.push(this.decryptChunk_(encrypted32.subarray(i, i + step), key, initVector, decrypted)); |
|
} // invoke the done() callback when everything is finished |
|
|
|
|
|
this.asyncStream_.push(function () { |
|
// remove pkcs#7 padding from the decrypted bytes |
|
done(null, unpad(decrypted)); |
|
}); |
|
} |
|
/** |
|
* a getter for step the maximum number of bytes to process at one time |
|
* |
|
* @return {Number} the value of step 32000 |
|
*/ |
|
|
|
/** |
|
* @private |
|
*/ |
|
|
|
|
|
Decrypter.prototype.decryptChunk_ = function decryptChunk_(encrypted, key, initVector, decrypted) { |
|
return function () { |
|
var bytes = decrypt(encrypted, key, initVector); |
|
decrypted.set(bytes, encrypted.byteOffset); |
|
}; |
|
}; |
|
|
|
createClass(Decrypter, null, [{ |
|
key: 'STEP', |
|
get: function get$$1() { |
|
// 4 * 8000; |
|
return 32000; |
|
} |
|
}]); |
|
return Decrypter; |
|
}(); |
|
|
|
/** |
|
* @videojs/http-streaming |
|
* @version 1.9.3 |
|
* @copyright 2019 Brightcove, Inc |
|
* @license Apache-2.0 |
|
*/ |
|
/** |
|
* @file resolve-url.js - Handling how URLs are resolved and manipulated |
|
*/ |
|
|
|
var resolveUrl$1 = function resolveUrl(baseURL, relativeURL) { |
|
// return early if we don't need to resolve |
|
if (/^[a-z]+:/i.test(relativeURL)) { |
|
return relativeURL; |
|
} // if the base URL is relative then combine with the current location |
|
|
|
|
|
if (!/\/\//i.test(baseURL)) { |
|
baseURL = urlToolkit.buildAbsoluteURL(window$1.location.href, baseURL); |
|
} |
|
|
|
return urlToolkit.buildAbsoluteURL(baseURL, relativeURL); |
|
}; |
|
/** |
|
* Checks whether xhr request was redirected and returns correct url depending |
|
* on `handleManifestRedirects` option |
|
* |
|
* @api private |
|
* |
|
* @param {String} url - an url being requested |
|
* @param {XMLHttpRequest} req - xhr request result |
|
* |
|
* @return {String} |
|
*/ |
|
|
|
|
|
var resolveManifestRedirect = function resolveManifestRedirect(handleManifestRedirect, url, req) { |
|
// To understand how the responseURL below is set and generated: |
|
// - https://fetch.spec.whatwg.org/#concept-response-url |
|
// - https://fetch.spec.whatwg.org/#atomic-http-redirect-handling |
|
if (handleManifestRedirect && req.responseURL && url !== req.responseURL) { |
|
return req.responseURL; |
|
} |
|
|
|
return url; |
|
}; |
|
|
|
var classCallCheck$1 = function classCallCheck(instance, Constructor) { |
|
if (!(instance instanceof Constructor)) { |
|
throw new TypeError("Cannot call a class as a function"); |
|
} |
|
}; |
|
|
|
var createClass$1 = function () { |
|
function defineProperties(target, props) { |
|
for (var i = 0; i < props.length; i++) { |
|
var descriptor = props[i]; |
|
descriptor.enumerable = descriptor.enumerable || false; |
|
descriptor.configurable = true; |
|
if ("value" in descriptor) descriptor.writable = true; |
|
Object.defineProperty(target, descriptor.key, descriptor); |
|
} |
|
} |
|
|
|
return function (Constructor, protoProps, staticProps) { |
|
if (protoProps) defineProperties(Constructor.prototype, protoProps); |
|
if (staticProps) defineProperties(Constructor, staticProps); |
|
return Constructor; |
|
}; |
|
}(); |
|
|
|
var get$1 = function get(object, property, receiver) { |
|
if (object === null) object = Function.prototype; |
|
var desc = Object.getOwnPropertyDescriptor(object, property); |
|
|
|
if (desc === undefined) { |
|
var parent = Object.getPrototypeOf(object); |
|
|
|
if (parent === null) { |
|
return undefined; |
|
} else { |
|
return get(parent, property, receiver); |
|
} |
|
} else if ("value" in desc) { |
|
return desc.value; |
|
} else { |
|
var getter = desc.get; |
|
|
|
if (getter === undefined) { |
|
return undefined; |
|
} |
|
|
|
return getter.call(receiver); |
|
} |
|
}; |
|
|
|
var inherits$1 = function inherits(subClass, superClass) { |
|
if (typeof superClass !== "function" && superClass !== null) { |
|
throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); |
|
} |
|
|
|
subClass.prototype = Object.create(superClass && superClass.prototype, { |
|
constructor: { |
|
value: subClass, |
|
enumerable: false, |
|
writable: true, |
|
configurable: true |
|
} |
|
}); |
|
if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; |
|
}; |
|
|
|
var possibleConstructorReturn$1 = function possibleConstructorReturn(self, call) { |
|
if (!self) { |
|
throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); |
|
} |
|
|
|
return call && (typeof call === "object" || typeof call === "function") ? call : self; |
|
}; |
|
|
|
var slicedToArray = function () { |
|
function sliceIterator(arr, i) { |
|
var _arr = []; |
|
var _n = true; |
|
var _d = false; |
|
var _e = undefined; |
|
|
|
try { |
|
for (var _i = arr[Symbol.iterator](), _s; !(_n = (_s = _i.next()).done); _n = true) { |
|
_arr.push(_s.value); |
|
|
|
if (i && _arr.length === i) break; |
|
} |
|
} catch (err) { |
|
_d = true; |
|
_e = err; |
|
} finally { |
|
try { |
|
if (!_n && _i["return"]) _i["return"](); |
|
} finally { |
|
if (_d) throw _e; |
|
} |
|
} |
|
|
|
return _arr; |
|
} |
|
|
|
return function (arr, i) { |
|
if (Array.isArray(arr)) { |
|
return arr; |
|
} else if (Symbol.iterator in Object(arr)) { |
|
return sliceIterator(arr, i); |
|
} else { |
|
throw new TypeError("Invalid attempt to destructure non-iterable instance"); |
|
} |
|
}; |
|
}(); |
|
/** |
|
* @file playlist-loader.js |
|
* |
|
* A state machine that manages the loading, caching, and updating of |
|
* M3U8 playlists. |
|
* |
|
*/ |
|
|
|
|
|
var mergeOptions$1 = videojs$1.mergeOptions, |
|
EventTarget$1 = videojs$1.EventTarget, |
|
log$1 = videojs$1.log; |
|
/** |
|
* Loops through all supported media groups in master and calls the provided |
|
* callback for each group |
|
* |
|
* @param {Object} master |
|
* The parsed master manifest object |
|
* @param {Function} callback |
|
* Callback to call for each media group |
|
*/ |
|
|
|
var forEachMediaGroup = function forEachMediaGroup(master, callback) { |
|
['AUDIO', 'SUBTITLES'].forEach(function (mediaType) { |
|
for (var groupKey in master.mediaGroups[mediaType]) { |
|
for (var labelKey in master.mediaGroups[mediaType][groupKey]) { |
|
var mediaProperties = master.mediaGroups[mediaType][groupKey][labelKey]; |
|
callback(mediaProperties, mediaType, groupKey, labelKey); |
|
} |
|
} |
|
}); |
|
}; |
|
/** |
|
* Returns a new array of segments that is the result of merging |
|
* properties from an older list of segments onto an updated |
|
* list. No properties on the updated playlist will be overridden. |
|
* |
|
* @param {Array} original the outdated list of segments |
|
* @param {Array} update the updated list of segments |
|
* @param {Number=} offset the index of the first update |
|
* segment in the original segment list. For non-live playlists, |
|
* this should always be zero and does not need to be |
|
* specified. For live playlists, it should be the difference |
|
* between the media sequence numbers in the original and updated |
|
* playlists. |
|
* @return a list of merged segment objects |
|
*/ |
|
|
|
|
|
var updateSegments = function updateSegments(original, update, offset) { |
|
var result = update.slice(); |
|
offset = offset || 0; |
|
var length = Math.min(original.length, update.length + offset); |
|
|
|
for (var i = offset; i < length; i++) { |
|
result[i - offset] = mergeOptions$1(original[i], result[i - offset]); |
|
} |
|
|
|
return result; |
|
}; |
|
|
|
var resolveSegmentUris = function resolveSegmentUris(segment, baseUri) { |
|
if (!segment.resolvedUri) { |
|
segment.resolvedUri = resolveUrl$1(baseUri, segment.uri); |
|
} |
|
|
|
if (segment.key && !segment.key.resolvedUri) { |
|
segment.key.resolvedUri = resolveUrl$1(baseUri, segment.key.uri); |
|
} |
|
|
|
if (segment.map && !segment.map.resolvedUri) { |
|
segment.map.resolvedUri = resolveUrl$1(baseUri, segment.map.uri); |
|
} |
|
}; |
|
/** |
|
* Returns a new master playlist that is the result of merging an |
|
* updated media playlist into the original version. If the |
|
* updated media playlist does not match any of the playlist |
|
* entries in the original master playlist, null is returned. |
|
* |
|
* @param {Object} master a parsed master M3U8 object |
|
* @param {Object} media a parsed media M3U8 object |
|
* @return {Object} a new object that represents the original |
|
* master playlist with the updated media playlist merged in, or |
|
* null if the merge produced no change. |
|
*/ |
|
|
|
|
|
var updateMaster = function updateMaster(master, media) { |
|
var result = mergeOptions$1(master, {}); |
|
var playlist = result.playlists[media.uri]; |
|
|
|
if (!playlist) { |
|
return null; |
|
} // consider the playlist unchanged if the number of segments is equal, the media |
|
// sequence number is unchanged, and this playlist hasn't become the end of the playlist |
|
|
|
|
|
if (playlist.segments && media.segments && playlist.segments.length === media.segments.length && playlist.endList === media.endList && playlist.mediaSequence === media.mediaSequence) { |
|
return null; |
|
} |
|
|
|
var mergedPlaylist = mergeOptions$1(playlist, media); // if the update could overlap existing segment information, merge the two segment lists |
|
|
|
if (playlist.segments) { |
|
mergedPlaylist.segments = updateSegments(playlist.segments, media.segments, media.mediaSequence - playlist.mediaSequence); |
|
} // resolve any segment URIs to prevent us from having to do it later |
|
|
|
|
|
mergedPlaylist.segments.forEach(function (segment) { |
|
resolveSegmentUris(segment, mergedPlaylist.resolvedUri); |
|
}); // TODO Right now in the playlists array there are two references to each playlist, one |
|
// that is referenced by index, and one by URI. The index reference may no longer be |
|
// necessary. |
|
|
|
for (var i = 0; i < result.playlists.length; i++) { |
|
if (result.playlists[i].uri === media.uri) { |
|
result.playlists[i] = mergedPlaylist; |
|
} |
|
} |
|
|
|
result.playlists[media.uri] = mergedPlaylist; |
|
return result; |
|
}; |
|
|
|
var setupMediaPlaylists = function setupMediaPlaylists(master) { |
|
// setup by-URI lookups and resolve media playlist URIs |
|
var i = master.playlists.length; |
|
|
|
while (i--) { |
|
var playlist = master.playlists[i]; |
|
master.playlists[playlist.uri] = playlist; |
|
playlist.resolvedUri = resolveUrl$1(master.uri, playlist.uri); |
|
playlist.id = i; |
|
|
|
if (!playlist.attributes) { |
|
// Although the spec states an #EXT-X-STREAM-INF tag MUST have a |
|
// BANDWIDTH attribute, we can play the stream without it. This means a poorly |
|
// formatted master playlist may not have an attribute list. An attributes |
|
// property is added here to prevent undefined references when we encounter |
|
// this scenario. |
|
playlist.attributes = {}; |
|
log$1.warn('Invalid playlist STREAM-INF detected. Missing BANDWIDTH attribute.'); |
|
} |
|
} |
|
}; |
|
|
|
var resolveMediaGroupUris = function resolveMediaGroupUris(master) { |
|
forEachMediaGroup(master, function (properties) { |
|
if (properties.uri) { |
|
properties.resolvedUri = resolveUrl$1(master.uri, properties.uri); |
|
} |
|
}); |
|
}; |
|
/** |
|
* Calculates the time to wait before refreshing a live playlist |
|
* |
|
* @param {Object} media |
|
* The current media |
|
* @param {Boolean} update |
|
* True if there were any updates from the last refresh, false otherwise |
|
* @return {Number} |
|
* The time in ms to wait before refreshing the live playlist |
|
*/ |
|
|
|
|
|
var refreshDelay = function refreshDelay(media, update) { |
|
var lastSegment = media.segments[media.segments.length - 1]; |
|
var delay = void 0; |
|
|
|
if (update && lastSegment && lastSegment.duration) { |
|
delay = lastSegment.duration * 1000; |
|
} else { |
|
// if the playlist is unchanged since the last reload or last segment duration |
|
// cannot be determined, try again after half the target duration |
|
delay = (media.targetDuration || 10) * 500; |
|
} |
|
|
|
return delay; |
|
}; |
|
/** |
|
* Load a playlist from a remote location |
|
* |
|
* @class PlaylistLoader |
|
* @extends Stream |
|
* @param {String} srcUrl the url to start with |
|
* @param {Boolean} withCredentials the withCredentials xhr option |
|
* @constructor |
|
*/ |
|
|
|
|
|
var PlaylistLoader = function (_EventTarget) { |
|
inherits$1(PlaylistLoader, _EventTarget); |
|
|
|
function PlaylistLoader(srcUrl, hls) { |
|
var options = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {}; |
|
classCallCheck$1(this, PlaylistLoader); |
|
|
|
var _this = possibleConstructorReturn$1(this, (PlaylistLoader.__proto__ || Object.getPrototypeOf(PlaylistLoader)).call(this)); |
|
|
|
var _options$withCredenti = options.withCredentials, |
|
withCredentials = _options$withCredenti === undefined ? false : _options$withCredenti, |
|
_options$handleManife = options.handleManifestRedirects, |
|
handleManifestRedirects = _options$handleManife === undefined ? false : _options$handleManife; |
|
_this.srcUrl = srcUrl; |
|
_this.hls_ = hls; |
|
_this.withCredentials = withCredentials; |
|
_this.handleManifestRedirects = handleManifestRedirects; |
|
var hlsOptions = hls.options_; |
|
_this.customTagParsers = hlsOptions && hlsOptions.customTagParsers || []; |
|
_this.customTagMappers = hlsOptions && hlsOptions.customTagMappers || []; |
|
|
|
if (!_this.srcUrl) { |
|
throw new Error('A non-empty playlist URL is required'); |
|
} // initialize the loader state |
|
|
|
|
|
_this.state = 'HAVE_NOTHING'; // live playlist staleness timeout |
|
|
|
_this.on('mediaupdatetimeout', function () { |
|
if (_this.state !== 'HAVE_METADATA') { |
|
// only refresh the media playlist if no other activity is going on |
|
return; |
|
} |
|
|
|
_this.state = 'HAVE_CURRENT_METADATA'; |
|
_this.request = _this.hls_.xhr({ |
|
uri: resolveUrl$1(_this.master.uri, _this.media().uri), |
|
withCredentials: _this.withCredentials |
|
}, function (error, req) { |
|
// disposed |
|
if (!_this.request) { |
|
return; |
|
} |
|
|
|
if (error) { |
|
return _this.playlistRequestError(_this.request, _this.media().uri, 'HAVE_METADATA'); |
|
} |
|
|
|
_this.haveMetadata(_this.request, _this.media().uri); |
|
}); |
|
}); |
|
|
|
return _this; |
|
} |
|
|
|
createClass$1(PlaylistLoader, [{ |
|
key: 'playlistRequestError', |
|
value: function playlistRequestError(xhr, url, startingState) { |
|
// any in-flight request is now finished |
|
this.request = null; |
|
|
|
if (startingState) { |
|
this.state = startingState; |
|
} |
|
|
|
this.error = { |
|
playlist: this.master.playlists[url], |
|
status: xhr.status, |
|
message: 'HLS playlist request error at URL: ' + url, |
|
responseText: xhr.responseText, |
|
code: xhr.status >= 500 ? 4 : 2 |
|
}; |
|
this.trigger('error'); |
|
} // update the playlist loader's state in response to a new or |
|
// updated playlist. |
|
|
|
}, { |
|
key: 'haveMetadata', |
|
value: function haveMetadata(xhr, url) { |
|
var _this2 = this; // any in-flight request is now finished |
|
|
|
|
|
this.request = null; |
|
this.state = 'HAVE_METADATA'; |
|
var parser = new Parser(); // adding custom tag parsers |
|
|
|
this.customTagParsers.forEach(function (customParser) { |
|
return parser.addParser(customParser); |
|
}); // adding custom tag mappers |
|
|
|
this.customTagMappers.forEach(function (mapper) { |
|
return parser.addTagMapper(mapper); |
|
}); |
|
parser.push(xhr.responseText); |
|
parser.end(); |
|
parser.manifest.uri = url; // m3u8-parser does not attach an attributes property to media playlists so make |
|
// sure that the property is attached to avoid undefined reference errors |
|
|
|
parser.manifest.attributes = parser.manifest.attributes || {}; // merge this playlist into the master |
|
|
|
var update = updateMaster(this.master, parser.manifest); |
|
this.targetDuration = parser.manifest.targetDuration; |
|
|
|
if (update) { |
|
this.master = update; |
|
this.media_ = this.master.playlists[parser.manifest.uri]; |
|
} else { |
|
this.trigger('playlistunchanged'); |
|
} // refresh live playlists after a target duration passes |
|
|
|
|
|
if (!this.media().endList) { |
|
window$1.clearTimeout(this.mediaUpdateTimeout); |
|
this.mediaUpdateTimeout = window$1.setTimeout(function () { |
|
_this2.trigger('mediaupdatetimeout'); |
|
}, refreshDelay(this.media(), !!update)); |
|
} |
|
|
|
this.trigger('loadedplaylist'); |
|
} |
|
/** |
|
* Abort any outstanding work and clean up. |
|
*/ |
|
|
|
}, { |
|
key: 'dispose', |
|
value: function dispose() { |
|
this.stopRequest(); |
|
window$1.clearTimeout(this.mediaUpdateTimeout); |
|
} |
|
}, { |
|
key: 'stopRequest', |
|
value: function stopRequest() { |
|
if (this.request) { |
|
var oldRequest = this.request; |
|
this.request = null; |
|
oldRequest.onreadystatechange = null; |
|
oldRequest.abort(); |
|
} |
|
} |
|
/** |
|
* When called without any arguments, returns the currently |
|
* active media playlist. When called with a single argument, |
|
* triggers the playlist loader to asynchronously switch to the |
|
* specified media playlist. Calling this method while the |
|
* loader is in the HAVE_NOTHING causes an error to be emitted |
|
* but otherwise has no effect. |
|
* |
|
* @param {Object=} playlist the parsed media playlist |
|
* object to switch to |
|
* @return {Playlist} the current loaded media |
|
*/ |
|
|
|
}, { |
|
key: 'media', |
|
value: function media(playlist) { |
|
var _this3 = this; // getter |
|
|
|
|
|
if (!playlist) { |
|
return this.media_; |
|
} // setter |
|
|
|
|
|
if (this.state === 'HAVE_NOTHING') { |
|
throw new Error('Cannot switch media playlist from ' + this.state); |
|
} |
|
|
|
var startingState = this.state; // find the playlist object if the target playlist has been |
|
// specified by URI |
|
|
|
if (typeof playlist === 'string') { |
|
if (!this.master.playlists[playlist]) { |
|
throw new Error('Unknown playlist URI: ' + playlist); |
|
} |
|
|
|
playlist = this.master.playlists[playlist]; |
|
} |
|
|
|
var mediaChange = !this.media_ || playlist.uri !== this.media_.uri; // switch to fully loaded playlists immediately |
|
|
|
if (this.master.playlists[playlist.uri].endList) { |
|
// abort outstanding playlist requests |
|
if (this.request) { |
|
this.request.onreadystatechange = null; |
|
this.request.abort(); |
|
this.request = null; |
|
} |
|
|
|
this.state = 'HAVE_METADATA'; |
|
this.media_ = playlist; // trigger media change if the active media has been updated |
|
|
|
if (mediaChange) { |
|
this.trigger('mediachanging'); |
|
this.trigger('mediachange'); |
|
} |
|
|
|
return; |
|
} // switching to the active playlist is a no-op |
|
|
|
|
|
if (!mediaChange) { |
|
return; |
|
} |
|
|
|
this.state = 'SWITCHING_MEDIA'; // there is already an outstanding playlist request |
|
|
|
if (this.request) { |
|
if (playlist.resolvedUri === this.request.url) { |
|
// requesting to switch to the same playlist multiple times |
|
// has no effect after the first |
|
return; |
|
} |
|
|
|
this.request.onreadystatechange = null; |
|
this.request.abort(); |
|
this.request = null; |
|
} // request the new playlist |
|
|
|
|
|
if (this.media_) { |
|
this.trigger('mediachanging'); |
|
} |
|
|
|
this.request = this.hls_.xhr({ |
|
uri: playlist.resolvedUri, |
|
withCredentials: this.withCredentials |
|
}, function (error, req) { |
|
// disposed |
|
if (!_this3.request) { |
|
return; |
|
} |
|
|
|
playlist.resolvedUri = resolveManifestRedirect(_this3.handleManifestRedirects, playlist.resolvedUri, req); |
|
|
|
if (error) { |
|
return _this3.playlistRequestError(_this3.request, playlist.uri, startingState); |
|
} |
|
|
|
_this3.haveMetadata(req, playlist.uri); // fire loadedmetadata the first time a media playlist is loaded |
|
|
|
|
|
if (startingState === 'HAVE_MASTER') { |
|
_this3.trigger('loadedmetadata'); |
|
} else { |
|
_this3.trigger('mediachange'); |
|
} |
|
}); |
|
} |
|
/** |
|
* pause loading of the playlist |
|
*/ |
|
|
|
}, { |
|
key: 'pause', |
|
value: function pause() { |
|
this.stopRequest(); |
|
window$1.clearTimeout(this.mediaUpdateTimeout); |
|
|
|
if (this.state === 'HAVE_NOTHING') { |
|
// If we pause the loader before any data has been retrieved, its as if we never |
|
// started, so reset to an unstarted state. |
|
this.started = false; |
|
} // Need to restore state now that no activity is happening |
|
|
|
|
|
if (this.state === 'SWITCHING_MEDIA') { |
|
// if the loader was in the process of switching media, it should either return to |
|
// HAVE_MASTER or HAVE_METADATA depending on if the loader has loaded a media |
|
// playlist yet. This is determined by the existence of loader.media_ |
|
if (this.media_) { |
|
this.state = 'HAVE_METADATA'; |
|
} else { |
|
this.state = 'HAVE_MASTER'; |
|
} |
|
} else if (this.state === 'HAVE_CURRENT_METADATA') { |
|
this.state = 'HAVE_METADATA'; |
|
} |
|
} |
|
/** |
|
* start loading of the playlist |
|
*/ |
|
|
|
}, { |
|
key: 'load', |
|
value: function load(isFinalRendition) { |
|
var _this4 = this; |
|
|
|
window$1.clearTimeout(this.mediaUpdateTimeout); |
|
var media = this.media(); |
|
|
|
if (isFinalRendition) { |
|
var delay = media ? media.targetDuration / 2 * 1000 : 5 * 1000; |
|
this.mediaUpdateTimeout = window$1.setTimeout(function () { |
|
return _this4.load(); |
|
}, delay); |
|
return; |
|
} |
|
|
|
if (!this.started) { |
|
this.start(); |
|
return; |
|
} |
|
|
|
if (media && !media.endList) { |
|
this.trigger('mediaupdatetimeout'); |
|
} else { |
|
this.trigger('loadedplaylist'); |
|
} |
|
} |
|
/** |
|
* start loading of the playlist |
|
*/ |
|
|
|
}, { |
|
key: 'start', |
|
value: function start() { |
|
var _this5 = this; |
|
|
|
this.started = true; // request the specified URL |
|
|
|
this.request = this.hls_.xhr({ |
|
uri: this.srcUrl, |
|
withCredentials: this.withCredentials |
|
}, function (error, req) { |
|
// disposed |
|
if (!_this5.request) { |
|
return; |
|
} // clear the loader's request reference |
|
|
|
|
|
_this5.request = null; |
|
|
|
if (error) { |
|
_this5.error = { |
|
status: req.status, |
|
message: 'HLS playlist request error at URL: ' + _this5.srcUrl, |
|
responseText: req.responseText, |
|
// MEDIA_ERR_NETWORK |
|
code: 2 |
|
}; |
|
|
|
if (_this5.state === 'HAVE_NOTHING') { |
|
_this5.started = false; |
|
} |
|
|
|
return _this5.trigger('error'); |
|
} |
|
|
|
var parser = new Parser(); // adding custom tag parsers |
|
|
|
_this5.customTagParsers.forEach(function (customParser) { |
|
return parser.addParser(customParser); |
|
}); // adding custom tag mappers |
|
|
|
|
|
_this5.customTagMappers.forEach(function (mapper) { |
|
return parser.addTagMapper(mapper); |
|
}); |
|
|
|
parser.push(req.responseText); |
|
parser.end(); |
|
_this5.state = 'HAVE_MASTER'; |
|
_this5.srcUrl = resolveManifestRedirect(_this5.handleManifestRedirects, _this5.srcUrl, req); |
|
parser.manifest.uri = _this5.srcUrl; // loaded a master playlist |
|
|
|
if (parser.manifest.playlists) { |
|
_this5.master = parser.manifest; |
|
setupMediaPlaylists(_this5.master); |
|
resolveMediaGroupUris(_this5.master); |
|
|
|
_this5.trigger('loadedplaylist'); |
|
|
|
if (!_this5.request) { |
|
// no media playlist was specifically selected so start |
|
// from the first listed one |
|
_this5.media(parser.manifest.playlists[0]); |
|
} |
|
|
|
return; |
|
} // loaded a media playlist |
|
// infer a master playlist if none was previously requested |
|
|
|
|
|
_this5.master = { |
|
mediaGroups: { |
|
'AUDIO': {}, |
|
'VIDEO': {}, |
|
'CLOSED-CAPTIONS': {}, |
|
'SUBTITLES': {} |
|
}, |
|
uri: window$1.location.href, |
|
playlists: [{ |
|
uri: _this5.srcUrl, |
|
id: 0, |
|
resolvedUri: _this5.srcUrl, |
|
// m3u8-parser does not attach an attributes property to media playlists so make |
|
// sure that the property is attached to avoid undefined reference errors |
|
attributes: {} |
|
}] |
|
}; |
|
_this5.master.playlists[_this5.srcUrl] = _this5.master.playlists[0]; |
|
|
|
_this5.haveMetadata(req, _this5.srcUrl); |
|
|
|
return _this5.trigger('loadedmetadata'); |
|
}); |
|
} |
|
}]); |
|
return PlaylistLoader; |
|
}(EventTarget$1); |
|
/** |
|
* @file playlist.js |
|
* |
|
* Playlist related utilities. |
|
*/ |
|
|
|
|
|
var createTimeRange = videojs$1.createTimeRange; |
|
/** |
|
* walk backward until we find a duration we can use |
|
* or return a failure |
|
* |
|
* @param {Playlist} playlist the playlist to walk through |
|
* @param {Number} endSequence the mediaSequence to stop walking on |
|
*/ |
|
|
|
var backwardDuration = function backwardDuration(playlist, endSequence) { |
|
var result = 0; |
|
var i = endSequence - playlist.mediaSequence; // if a start time is available for segment immediately following |
|
// the interval, use it |
|
|
|
var segment = playlist.segments[i]; // Walk backward until we find the latest segment with timeline |
|
// information that is earlier than endSequence |
|
|
|
if (segment) { |
|
if (typeof segment.start !== 'undefined') { |
|
return { |
|
result: segment.start, |
|
precise: true |
|
}; |
|
} |
|
|
|
if (typeof segment.end !== 'undefined') { |
|
return { |
|
result: segment.end - segment.duration, |
|
precise: true |
|
}; |
|
} |
|
} |
|
|
|
while (i--) { |
|
segment = playlist.segments[i]; |
|
|
|
if (typeof segment.end !== 'undefined') { |
|
return { |
|
result: result + segment.end, |
|
precise: true |
|
}; |
|
} |
|
|
|
result += segment.duration; |
|
|
|
if (typeof segment.start !== 'undefined') { |
|
return { |
|
result: result + segment.start, |
|
precise: true |
|
}; |
|
} |
|
} |
|
|
|
return { |
|
result: result, |
|
precise: false |
|
}; |
|
}; |
|
/** |
|
* walk forward until we find a duration we can use |
|
* or return a failure |
|
* |
|
* @param {Playlist} playlist the playlist to walk through |
|
* @param {Number} endSequence the mediaSequence to stop walking on |
|
*/ |
|
|
|
|
|
var forwardDuration = function forwardDuration(playlist, endSequence) { |
|
var result = 0; |
|
var segment = void 0; |
|
var i = endSequence - playlist.mediaSequence; // Walk forward until we find the earliest segment with timeline |
|
// information |
|
|
|
for (; i < playlist.segments.length; i++) { |
|
segment = playlist.segments[i]; |
|
|
|
if (typeof segment.start !== 'undefined') { |
|
return { |
|
result: segment.start - result, |
|
precise: true |
|
}; |
|
} |
|
|
|
result += segment.duration; |
|
|
|
if (typeof segment.end !== 'undefined') { |
|
return { |
|
result: segment.end - result, |
|
precise: true |
|
}; |
|
} |
|
} // indicate we didn't find a useful duration estimate |
|
|
|
|
|
return { |
|
result: -1, |
|
precise: false |
|
}; |
|
}; |
|
/** |
|
* Calculate the media duration from the segments associated with a |
|
* playlist. The duration of a subinterval of the available segments |
|
* may be calculated by specifying an end index. |
|
* |
|
* @param {Object} playlist a media playlist object |
|
* @param {Number=} endSequence an exclusive upper boundary |
|
* for the playlist. Defaults to playlist length. |
|
* @param {Number} expired the amount of time that has dropped |
|
* off the front of the playlist in a live scenario |
|
* @return {Number} the duration between the first available segment |
|
* and end index. |
|
*/ |
|
|
|
|
|
var intervalDuration = function intervalDuration(playlist, endSequence, expired) { |
|
var backward = void 0; |
|
var forward = void 0; |
|
|
|
if (typeof endSequence === 'undefined') { |
|
endSequence = playlist.mediaSequence + playlist.segments.length; |
|
} |
|
|
|
if (endSequence < playlist.mediaSequence) { |
|
return 0; |
|
} // do a backward walk to estimate the duration |
|
|
|
|
|
backward = backwardDuration(playlist, endSequence); |
|
|
|
if (backward.precise) { |
|
// if we were able to base our duration estimate on timing |
|
// information provided directly from the Media Source, return |
|
// it |
|
return backward.result; |
|
} // walk forward to see if a precise duration estimate can be made |
|
// that way |
|
|
|
|
|
forward = forwardDuration(playlist, endSequence); |
|
|
|
if (forward.precise) { |
|
// we found a segment that has been buffered and so it's |
|
// position is known precisely |
|
return forward.result; |
|
} // return the less-precise, playlist-based duration estimate |
|
|
|
|
|
return backward.result + expired; |
|
}; |
|
/** |
|
* Calculates the duration of a playlist. If a start and end index |
|
* are specified, the duration will be for the subset of the media |
|
* timeline between those two indices. The total duration for live |
|
* playlists is always Infinity. |
|
* |
|
* @param {Object} playlist a media playlist object |
|
* @param {Number=} endSequence an exclusive upper |
|
* boundary for the playlist. Defaults to the playlist media |
|
* sequence number plus its length. |
|
* @param {Number=} expired the amount of time that has |
|
* dropped off the front of the playlist in a live scenario |
|
* @return {Number} the duration between the start index and end |
|
* index. |
|
*/ |
|
|
|
|
|
var duration = function duration(playlist, endSequence, expired) { |
|
if (!playlist) { |
|
return 0; |
|
} |
|
|
|
if (typeof expired !== 'number') { |
|
expired = 0; |
|
} // if a slice of the total duration is not requested, use |
|
// playlist-level duration indicators when they're present |
|
|
|
|
|
if (typeof endSequence === 'undefined') { |
|
// if present, use the duration specified in the playlist |
|
if (playlist.totalDuration) { |
|
return playlist.totalDuration; |
|
} // duration should be Infinity for live playlists |
|
|
|
|
|
if (!playlist.endList) { |
|
return window$1.Infinity; |
|
} |
|
} // calculate the total duration based on the segment durations |
|
|
|
|
|
return intervalDuration(playlist, endSequence, expired); |
|
}; |
|
/** |
|
* Calculate the time between two indexes in the current playlist |
|
* neight the start- nor the end-index need to be within the current |
|
* playlist in which case, the targetDuration of the playlist is used |
|
* to approximate the durations of the segments |
|
* |
|
* @param {Object} playlist a media playlist object |
|
* @param {Number} startIndex |
|
* @param {Number} endIndex |
|
* @return {Number} the number of seconds between startIndex and endIndex |
|
*/ |
|
|
|
|
|
var sumDurations = function sumDurations(playlist, startIndex, endIndex) { |
|
var durations = 0; |
|
|
|
if (startIndex > endIndex) { |
|
var _ref = [endIndex, startIndex]; |
|
startIndex = _ref[0]; |
|
endIndex = _ref[1]; |
|
} |
|
|
|
if (startIndex < 0) { |
|
for (var i = startIndex; i < Math.min(0, endIndex); i++) { |
|
durations += playlist.targetDuration; |
|
} |
|
|
|
startIndex = 0; |
|
} |
|
|
|
for (var _i = startIndex; _i < endIndex; _i++) { |
|
durations += playlist.segments[_i].duration; |
|
} |
|
|
|
return durations; |
|
}; |
|
/** |
|
* Determines the media index of the segment corresponding to the safe edge of the live |
|
* window which is the duration of the last segment plus 2 target durations from the end |
|
* of the playlist. |
|
* |
|
* @param {Object} playlist |
|
* a media playlist object |
|
* @return {Number} |
|
* The media index of the segment at the safe live point. 0 if there is no "safe" |
|
* point. |
|
* @function safeLiveIndex |
|
*/ |
|
|
|
|
|
var safeLiveIndex = function safeLiveIndex(playlist) { |
|
if (!playlist.segments.length) { |
|
return 0; |
|
} |
|
|
|
var i = playlist.segments.length - 1; |
|
var distanceFromEnd = playlist.segments[i].duration || playlist.targetDuration; |
|
var safeDistance = distanceFromEnd + playlist.targetDuration * 2; |
|
|
|
while (i--) { |
|
distanceFromEnd += playlist.segments[i].duration; |
|
|
|
if (distanceFromEnd >= safeDistance) { |
|
break; |
|
} |
|
} |
|
|
|
return Math.max(0, i); |
|
}; |
|
/** |
|
* Calculates the playlist end time |
|
* |
|
* @param {Object} playlist a media playlist object |
|
* @param {Number=} expired the amount of time that has |
|
* dropped off the front of the playlist in a live scenario |
|
* @param {Boolean|false} useSafeLiveEnd a boolean value indicating whether or not the |
|
* playlist end calculation should consider the safe live end |
|
* (truncate the playlist end by three segments). This is normally |
|
* used for calculating the end of the playlist's seekable range. |
|
* @returns {Number} the end time of playlist |
|
* @function playlistEnd |
|
*/ |
|
|
|
|
|
var playlistEnd = function playlistEnd(playlist, expired, useSafeLiveEnd) { |
|
if (!playlist || !playlist.segments) { |
|
return null; |
|
} |
|
|
|
if (playlist.endList) { |
|
return duration(playlist); |
|
} |
|
|
|
if (expired === null) { |
|
return null; |
|
} |
|
|
|
expired = expired || 0; |
|
var endSequence = useSafeLiveEnd ? safeLiveIndex(playlist) : playlist.segments.length; |
|
return intervalDuration(playlist, playlist.mediaSequence + endSequence, expired); |
|
}; |
|
/** |
|
* Calculates the interval of time that is currently seekable in a |
|
* playlist. The returned time ranges are relative to the earliest |
|
* moment in the specified playlist that is still available. A full |
|
* seekable implementation for live streams would need to offset |
|
* these values by the duration of content that has expired from the |
|
* stream. |
|
* |
|
* @param {Object} playlist a media playlist object |
|
* dropped off the front of the playlist in a live scenario |
|
* @param {Number=} expired the amount of time that has |
|
* dropped off the front of the playlist in a live scenario |
|
* @return {TimeRanges} the periods of time that are valid targets |
|
* for seeking |
|
*/ |
|
|
|
|
|
var seekable = function seekable(playlist, expired) { |
|
var useSafeLiveEnd = true; |
|
var seekableStart = expired || 0; |
|
var seekableEnd = playlistEnd(playlist, expired, useSafeLiveEnd); |
|
|
|
if (seekableEnd === null) { |
|
return createTimeRange(); |
|
} |
|
|
|
return createTimeRange(seekableStart, seekableEnd); |
|
}; |
|
|
|
var isWholeNumber = function isWholeNumber(num) { |
|
return num - Math.floor(num) === 0; |
|
}; |
|
|
|
var roundSignificantDigit = function roundSignificantDigit(increment, num) { |
|
// If we have a whole number, just add 1 to it |
|
if (isWholeNumber(num)) { |
|
return num + increment * 0.1; |
|
} |
|
|
|
var numDecimalDigits = num.toString().split('.')[1].length; |
|
|
|
for (var i = 1; i <= numDecimalDigits; i++) { |
|
var scale = Math.pow(10, i); |
|
var temp = num * scale; |
|
|
|
if (isWholeNumber(temp) || i === numDecimalDigits) { |
|
return (temp + increment) / scale; |
|
} |
|
} |
|
}; |
|
|
|
var ceilLeastSignificantDigit = roundSignificantDigit.bind(null, 1); |
|
var floorLeastSignificantDigit = roundSignificantDigit.bind(null, -1); |
|
/** |
|
* Determine the index and estimated starting time of the segment that |
|
* contains a specified playback position in a media playlist. |
|
* |
|
* @param {Object} playlist the media playlist to query |
|
* @param {Number} currentTime The number of seconds since the earliest |
|
* possible position to determine the containing segment for |
|
* @param {Number} startIndex |
|
* @param {Number} startTime |
|
* @return {Object} |
|
*/ |
|
|
|
var getMediaInfoForTime = function getMediaInfoForTime(playlist, currentTime, startIndex, startTime) { |
|
var i = void 0; |
|
var segment = void 0; |
|
var numSegments = playlist.segments.length; |
|
var time = currentTime - startTime; |
|
|
|
if (time < 0) { |
|
// Walk backward from startIndex in the playlist, adding durations |
|
// until we find a segment that contains `time` and return it |
|
if (startIndex > 0) { |
|
for (i = startIndex - 1; i >= 0; i--) { |
|
segment = playlist.segments[i]; |
|
time += floorLeastSignificantDigit(segment.duration); |
|
|
|
if (time > 0) { |
|
return { |
|
mediaIndex: i, |
|
startTime: startTime - sumDurations(playlist, startIndex, i) |
|
}; |
|
} |
|
} |
|
} // We were unable to find a good segment within the playlist |
|
// so select the first segment |
|
|
|
|
|
return { |
|
mediaIndex: 0, |
|
startTime: currentTime |
|
}; |
|
} // When startIndex is negative, we first walk forward to first segment |
|
// adding target durations. If we "run out of time" before getting to |
|
// the first segment, return the first segment |
|
|
|
|
|
if (startIndex < 0) { |
|
for (i = startIndex; i < 0; i++) { |
|
time -= playlist.targetDuration; |
|
|
|
if (time < 0) { |
|
return { |
|
mediaIndex: 0, |
|
startTime: currentTime |
|
}; |
|
} |
|
} |
|
|
|
startIndex = 0; |
|
} // Walk forward from startIndex in the playlist, subtracting durations |
|
// until we find a segment that contains `time` and return it |
|
|
|
|
|
for (i = startIndex; i < numSegments; i++) { |
|
segment = playlist.segments[i]; |
|
time -= ceilLeastSignificantDigit(segment.duration); |
|
|
|
if (time < 0) { |
|
return { |
|
mediaIndex: i, |
|
startTime: startTime + sumDurations(playlist, startIndex, i) |
|
}; |
|
} |
|
} // We are out of possible candidates so load the last one... |
|
|
|
|
|
return { |
|
mediaIndex: numSegments - 1, |
|
startTime: currentTime |
|
}; |
|
}; |
|
/** |
|
* Check whether the playlist is blacklisted or not. |
|
* |
|
* @param {Object} playlist the media playlist object |
|
* @return {boolean} whether the playlist is blacklisted or not |
|
* @function isBlacklisted |
|
*/ |
|
|
|
|
|
var isBlacklisted = function isBlacklisted(playlist) { |
|
return playlist.excludeUntil && playlist.excludeUntil > Date.now(); |
|
}; |
|
/** |
|
* Check whether the playlist is compatible with current playback configuration or has |
|
* been blacklisted permanently for being incompatible. |
|
* |
|
* @param {Object} playlist the media playlist object |
|
* @return {boolean} whether the playlist is incompatible or not |
|
* @function isIncompatible |
|
*/ |
|
|
|
|
|
var isIncompatible = function isIncompatible(playlist) { |
|
return playlist.excludeUntil && playlist.excludeUntil === Infinity; |
|
}; |
|
/** |
|
* Check whether the playlist is enabled or not. |
|
* |
|
* @param {Object} playlist the media playlist object |
|
* @return {boolean} whether the playlist is enabled or not |
|
* @function isEnabled |
|
*/ |
|
|
|
|
|
var isEnabled = function isEnabled(playlist) { |
|
var blacklisted = isBlacklisted(playlist); |
|
return !playlist.disabled && !blacklisted; |
|
}; |
|
/** |
|
* Check whether the playlist has been manually disabled through the representations api. |
|
* |
|
* @param {Object} playlist the media playlist object |
|
* @return {boolean} whether the playlist is disabled manually or not |
|
* @function isDisabled |
|
*/ |
|
|
|
|
|
var isDisabled = function isDisabled(playlist) { |
|
return playlist.disabled; |
|
}; |
|
/** |
|
* Returns whether the current playlist is an AES encrypted HLS stream |
|
* |
|
* @return {Boolean} true if it's an AES encrypted HLS stream |
|
*/ |
|
|
|
|
|
var isAes = function isAes(media) { |
|
for (var i = 0; i < media.segments.length; i++) { |
|
if (media.segments[i].key) { |
|
return true; |
|
} |
|
} |
|
|
|
return false; |
|
}; |
|
/** |
|
* Returns whether the current playlist contains fMP4 |
|
* |
|
* @return {Boolean} true if the playlist contains fMP4 |
|
*/ |
|
|
|
|
|
var isFmp4 = function isFmp4(media) { |
|
for (var i = 0; i < media.segments.length; i++) { |
|
if (media.segments[i].map) { |
|
return true; |
|
} |
|
} |
|
|
|
return false; |
|
}; |
|
/** |
|
* Checks if the playlist has a value for the specified attribute |
|
* |
|
* @param {String} attr |
|
* Attribute to check for |
|
* @param {Object} playlist |
|
* The media playlist object |
|
* @return {Boolean} |
|
* Whether the playlist contains a value for the attribute or not |
|
* @function hasAttribute |
|
*/ |
|
|
|
|
|
var hasAttribute = function hasAttribute(attr, playlist) { |
|
return playlist.attributes && playlist.attributes[attr]; |
|
}; |
|
/** |
|
* Estimates the time required to complete a segment download from the specified playlist |
|
* |
|
* @param {Number} segmentDuration |
|
* Duration of requested segment |
|
* @param {Number} bandwidth |
|
* Current measured bandwidth of the player |
|
* @param {Object} playlist |
|
* The media playlist object |
|
* @param {Number=} bytesReceived |
|
* Number of bytes already received for the request. Defaults to 0 |
|
* @return {Number|NaN} |
|
* The estimated time to request the segment. NaN if bandwidth information for |
|
* the given playlist is unavailable |
|
* @function estimateSegmentRequestTime |
|
*/ |
|
|
|
|
|
var estimateSegmentRequestTime = function estimateSegmentRequestTime(segmentDuration, bandwidth, playlist) { |
|
var bytesReceived = arguments.length > 3 && arguments[3] !== undefined ? arguments[3] : 0; |
|
|
|
if (!hasAttribute('BANDWIDTH', playlist)) { |
|
return NaN; |
|
} |
|
|
|
var size = segmentDuration * playlist.attributes.BANDWIDTH; |
|
return (size - bytesReceived * 8) / bandwidth; |
|
}; |
|
/* |
|
* Returns whether the current playlist is the lowest rendition |
|
* |
|
* @return {Boolean} true if on lowest rendition |
|
*/ |
|
|
|
|
|
var isLowestEnabledRendition = function isLowestEnabledRendition(master, media) { |
|
if (master.playlists.length === 1) { |
|
return true; |
|
} |
|
|
|
var currentBandwidth = media.attributes.BANDWIDTH || Number.MAX_VALUE; |
|
return master.playlists.filter(function (playlist) { |
|
if (!isEnabled(playlist)) { |
|
return false; |
|
} |
|
|
|
return (playlist.attributes.BANDWIDTH || 0) < currentBandwidth; |
|
}).length === 0; |
|
}; // exports |
|
|
|
|
|
var Playlist = { |
|
duration: duration, |
|
seekable: seekable, |
|
safeLiveIndex: safeLiveIndex, |
|
getMediaInfoForTime: getMediaInfoForTime, |
|
isEnabled: isEnabled, |
|
isDisabled: isDisabled, |
|
isBlacklisted: isBlacklisted, |
|
isIncompatible: isIncompatible, |
|
playlistEnd: playlistEnd, |
|
isAes: isAes, |
|
isFmp4: isFmp4, |
|
hasAttribute: hasAttribute, |
|
estimateSegmentRequestTime: estimateSegmentRequestTime, |
|
isLowestEnabledRendition: isLowestEnabledRendition |
|
}; |
|
/** |
|
* @file xhr.js |
|
*/ |
|
|
|
var videojsXHR = videojs$1.xhr, |
|
mergeOptions$1$1 = videojs$1.mergeOptions; |
|
|
|
var xhrFactory = function xhrFactory() { |
|
var xhr = function XhrFunction(options, callback) { |
|
// Add a default timeout for all hls requests |
|
options = mergeOptions$1$1({ |
|
timeout: 45e3 |
|
}, options); // Allow an optional user-specified function to modify the option |
|
// object before we construct the xhr request |
|
|
|
var beforeRequest = XhrFunction.beforeRequest || videojs$1.Hls.xhr.beforeRequest; |
|
|
|
if (beforeRequest && typeof beforeRequest === 'function') { |
|
var newOptions = beforeRequest(options); |
|
|
|
if (newOptions) { |
|
options = newOptions; |
|
} |
|
} |
|
|
|
var request = videojsXHR(options, function (error, response) { |
|
var reqResponse = request.response; |
|
|
|
if (!error && reqResponse) { |
|
request.responseTime = Date.now(); |
|
request.roundTripTime = request.responseTime - request.requestTime; |
|
request.bytesReceived = reqResponse.byteLength || reqResponse.length; |
|
|
|
if (!request.bandwidth) { |
|
request.bandwidth = Math.floor(request.bytesReceived / request.roundTripTime * 8 * 1000); |
|
} |
|
} |
|
|
|
if (response.headers) { |
|
request.responseHeaders = response.headers; |
|
} // videojs.xhr now uses a specific code on the error |
|
// object to signal that a request has timed out instead |
|
// of setting a boolean on the request object |
|
|
|
|
|
if (error && error.code === 'ETIMEDOUT') { |
|
request.timedout = true; |
|
} // videojs.xhr no longer considers status codes outside of 200 and 0 |
|
// (for file uris) to be errors, but the old XHR did, so emulate that |
|
// behavior. Status 206 may be used in response to byterange requests. |
|
|
|
|
|
if (!error && !request.aborted && response.statusCode !== 200 && response.statusCode !== 206 && response.statusCode !== 0) { |
|
error = new Error('XHR Failed with a response of: ' + (request && (reqResponse || request.responseText))); |
|
} |
|
|
|
callback(error, request); |
|
}); |
|
var originalAbort = request.abort; |
|
|
|
request.abort = function () { |
|
request.aborted = true; |
|
return originalAbort.apply(request, arguments); |
|
}; |
|
|
|
request.uri = options.uri; |
|
request.requestTime = Date.now(); |
|
return request; |
|
}; |
|
|
|
return xhr; |
|
}; |
|
/** |
|
* @file bin-utils.js |
|
*/ |
|
|
|
/** |
|
* convert a TimeRange to text |
|
* |
|
* @param {TimeRange} range the timerange to use for conversion |
|
* @param {Number} i the iterator on the range to convert |
|
*/ |
|
|
|
|
|
var textRange = function textRange(range, i) { |
|
return range.start(i) + '-' + range.end(i); |
|
}; |
|
/** |
|
* format a number as hex string |
|
* |
|
* @param {Number} e The number |
|
* @param {Number} i the iterator |
|
*/ |
|
|
|
|
|
var formatHexString = function formatHexString(e, i) { |
|
var value = e.toString(16); |
|
return '00'.substring(0, 2 - value.length) + value + (i % 2 ? ' ' : ''); |
|
}; |
|
|
|
var formatAsciiString = function formatAsciiString(e) { |
|
if (e >= 0x20 && e < 0x7e) { |
|
return String.fromCharCode(e); |
|
} |
|
|
|
return '.'; |
|
}; |
|
/** |
|
* Creates an object for sending to a web worker modifying properties that are TypedArrays |
|
* into a new object with seperated properties for the buffer, byteOffset, and byteLength. |
|
* |
|
* @param {Object} message |
|
* Object of properties and values to send to the web worker |
|
* @return {Object} |
|
* Modified message with TypedArray values expanded |
|
* @function createTransferableMessage |
|
*/ |
|
|
|
|
|
var createTransferableMessage = function createTransferableMessage(message) { |
|
var transferable = {}; |
|
Object.keys(message).forEach(function (key) { |
|
var value = message[key]; |
|
|
|
if (ArrayBuffer.isView(value)) { |
|
transferable[key] = { |
|
bytes: value.buffer, |
|
byteOffset: value.byteOffset, |
|
byteLength: value.byteLength |
|
}; |
|
} else { |
|
transferable[key] = value; |
|
} |
|
}); |
|
return transferable; |
|
}; |
|
/** |
|
* Returns a unique string identifier for a media initialization |
|
* segment. |
|
*/ |
|
|
|
|
|
var initSegmentId = function initSegmentId(initSegment) { |
|
var byterange = initSegment.byterange || { |
|
length: Infinity, |
|
offset: 0 |
|
}; |
|
return [byterange.length, byterange.offset, initSegment.resolvedUri].join(','); |
|
}; |
|
/** |
|
* utils to help dump binary data to the console |
|
*/ |
|
|
|
|
|
var hexDump = function hexDump(data) { |
|
var bytes = Array.prototype.slice.call(data); |
|
var step = 16; |
|
var result = ''; |
|
var hex = void 0; |
|
var ascii = void 0; |
|
|
|
for (var j = 0; j < bytes.length / step; j++) { |
|
hex = bytes.slice(j * step, j * step + step).map(formatHexString).join(''); |
|
ascii = bytes.slice(j * step, j * step + step).map(formatAsciiString).join(''); |
|
result += hex + ' ' + ascii + '\n'; |
|
} |
|
|
|
return result; |
|
}; |
|
|
|
var tagDump = function tagDump(_ref) { |
|
var bytes = _ref.bytes; |
|
return hexDump(bytes); |
|
}; |
|
|
|
var textRanges = function textRanges(ranges) { |
|
var result = ''; |
|
var i = void 0; |
|
|
|
for (i = 0; i < ranges.length; i++) { |
|
result += textRange(ranges, i) + ' '; |
|
} |
|
|
|
return result; |
|
}; |
|
|
|
var utils$1 = |
|
/*#__PURE__*/ |
|
Object.freeze({ |
|
createTransferableMessage: createTransferableMessage, |
|
initSegmentId: initSegmentId, |
|
hexDump: hexDump, |
|
tagDump: tagDump, |
|
textRanges: textRanges |
|
}); // TODO handle fmp4 case where the timing info is accurate and doesn't involve transmux |
|
// Add 25% to the segment duration to account for small discrepencies in segment timing. |
|
// 25% was arbitrarily chosen, and may need to be refined over time. |
|
|
|
var SEGMENT_END_FUDGE_PERCENT = 0.25; |
|
/** |
|
* Converts a player time (any time that can be gotten/set from player.currentTime(), |
|
* e.g., any time within player.seekable().start(0) to player.seekable().end(0)) to a |
|
* program time (any time referencing the real world (e.g., EXT-X-PROGRAM-DATE-TIME)). |
|
* |
|
* The containing segment is required as the EXT-X-PROGRAM-DATE-TIME serves as an "anchor |
|
* point" (a point where we have a mapping from program time to player time, with player |
|
* time being the post transmux start of the segment). |
|
* |
|
* For more details, see [this doc](../../docs/program-time-from-player-time.md). |
|
* |
|
* @param {Number} playerTime the player time |
|
* @param {Object} segment the segment which contains the player time |
|
* @return {Date} program time |
|
*/ |
|
|
|
var playerTimeToProgramTime = function playerTimeToProgramTime(playerTime, segment) { |
|
if (!segment.dateTimeObject) { |
|
// Can't convert without an "anchor point" for the program time (i.e., a time that can |
|
// be used to map the start of a segment with a real world time). |
|
return null; |
|
} |
|
|
|
var transmuxerPrependedSeconds = segment.videoTimingInfo.transmuxerPrependedSeconds; |
|
var transmuxedStart = segment.videoTimingInfo.transmuxedPresentationStart; // get the start of the content from before old content is prepended |
|
|
|
var startOfSegment = transmuxedStart + transmuxerPrependedSeconds; |
|
var offsetFromSegmentStart = playerTime - startOfSegment; |
|
return new Date(segment.dateTimeObject.getTime() + offsetFromSegmentStart * 1000); |
|
}; |
|
|
|
var originalSegmentVideoDuration = function originalSegmentVideoDuration(videoTimingInfo) { |
|
return videoTimingInfo.transmuxedPresentationEnd - videoTimingInfo.transmuxedPresentationStart - videoTimingInfo.transmuxerPrependedSeconds; |
|
}; |
|
/** |
|
* Finds a segment that contains the time requested given as an ISO-8601 string. The |
|
* returned segment might be an estimate or an accurate match. |
|
* |
|
* @param {String} programTime The ISO-8601 programTime to find a match for |
|
* @param {Object} playlist A playlist object to search within |
|
*/ |
|
|
|
|
|
var findSegmentForProgramTime = function findSegmentForProgramTime(programTime, playlist) { |
|
// Assumptions: |
|
// - verifyProgramDateTimeTags has already been run |
|
// - live streams have been started |
|
var dateTimeObject = void 0; |
|
|
|
try { |
|
dateTimeObject = new Date(programTime); |
|
} catch (e) { |
|
return null; |
|
} |
|
|
|
if (!playlist || !playlist.segments || playlist.segments.length === 0) { |
|
return null; |
|
} |
|
|
|
var segment = playlist.segments[0]; |
|
|
|
if (dateTimeObject < segment.dateTimeObject) { |
|
// Requested time is before stream start. |
|
return null; |
|
} |
|
|
|
for (var i = 0; i < playlist.segments.length - 1; i++) { |
|
segment = playlist.segments[i]; |
|
var nextSegmentStart = playlist.segments[i + 1].dateTimeObject; |
|
|
|
if (dateTimeObject < nextSegmentStart) { |
|
break; |
|
} |
|
} |
|
|
|
var lastSegment = playlist.segments[playlist.segments.length - 1]; |
|
var lastSegmentStart = lastSegment.dateTimeObject; |
|
var lastSegmentDuration = lastSegment.videoTimingInfo ? originalSegmentVideoDuration(lastSegment.videoTimingInfo) : lastSegment.duration + lastSegment.duration * SEGMENT_END_FUDGE_PERCENT; |
|
var lastSegmentEnd = new Date(lastSegmentStart.getTime() + lastSegmentDuration * 1000); |
|
|
|
if (dateTimeObject > lastSegmentEnd) { |
|
// Beyond the end of the stream, or our best guess of the end of the stream. |
|
return null; |
|
} |
|
|
|
if (dateTimeObject > lastSegmentStart) { |
|
segment = lastSegment; |
|
} |
|
|
|
return { |
|
segment: segment, |
|
estimatedStart: segment.videoTimingInfo ? segment.videoTimingInfo.transmuxedPresentationStart : Playlist.duration(playlist, playlist.mediaSequence + playlist.segments.indexOf(segment)), |
|
// Although, given that all segments have accurate date time objects, the segment |
|
// selected should be accurate, unless the video has been transmuxed at some point |
|
// (determined by the presence of the videoTimingInfo object), the segment's "player |
|
// time" (the start time in the player) can't be considered accurate. |
|
type: segment.videoTimingInfo ? 'accurate' : 'estimate' |
|
}; |
|
}; |
|
/** |
|
* Finds a segment that contains the given player time(in seconds). |
|
* |
|
* @param {Number} time The player time to find a match for |
|
* @param {Object} playlist A playlist object to search within |
|
*/ |
|
|
|
|
|
var findSegmentForPlayerTime = function findSegmentForPlayerTime(time, playlist) { |
|
// Assumptions: |
|
// - there will always be a segment.duration |
|
// - we can start from zero |
|
// - segments are in time order |
|
if (!playlist || !playlist.segments || playlist.segments.length === 0) { |
|
return null; |
|
} |
|
|
|
var segmentEnd = 0; |
|
var segment = void 0; |
|
|
|
for (var i = 0; i < playlist.segments.length; i++) { |
|
segment = playlist.segments[i]; // videoTimingInfo is set after the segment is downloaded and transmuxed, and |
|
// should contain the most accurate values we have for the segment's player times. |
|
// |
|
// Use the accurate transmuxedPresentationEnd value if it is available, otherwise fall |
|
// back to an estimate based on the manifest derived (inaccurate) segment.duration, to |
|
// calculate an end value. |
|
|
|
segmentEnd = segment.videoTimingInfo ? segment.videoTimingInfo.transmuxedPresentationEnd : segmentEnd + segment.duration; |
|
|
|
if (time <= segmentEnd) { |
|
break; |
|
} |
|
} |
|
|
|
var lastSegment = playlist.segments[playlist.segments.length - 1]; |
|
|
|
if (lastSegment.videoTimingInfo && lastSegment.videoTimingInfo.transmuxedPresentationEnd < time) { |
|
// The time requested is beyond the stream end. |
|
return null; |
|
} |
|
|
|
if (time > segmentEnd) { |
|
// The time is within or beyond the last segment. |
|
// |
|
// Check to see if the time is beyond a reasonable guess of the end of the stream. |
|
if (time > segmentEnd + lastSegment.duration * SEGMENT_END_FUDGE_PERCENT) { |
|
// Technically, because the duration value is only an estimate, the time may still |
|
// exist in the last segment, however, there isn't enough information to make even |
|
// a reasonable estimate. |
|
return null; |
|
} |
|
|
|
segment = lastSegment; |
|
} |
|
|
|
return { |
|
segment: segment, |
|
estimatedStart: segment.videoTimingInfo ? segment.videoTimingInfo.transmuxedPresentationStart : segmentEnd - segment.duration, |
|
// Because videoTimingInfo is only set after transmux, it is the only way to get |
|
// accurate timing values. |
|
type: segment.videoTimingInfo ? 'accurate' : 'estimate' |
|
}; |
|
}; |
|
/** |
|
* Gives the offset of the comparisonTimestamp from the programTime timestamp in seconds. |
|
* If the offset returned is positive, the programTime occurs after the |
|
* comparisonTimestamp. |
|
* If the offset is negative, the programTime occurs before the comparisonTimestamp. |
|
* |
|
* @param {String} comparisonTimeStamp An ISO-8601 timestamp to compare against |
|
* @param {String} programTime The programTime as an ISO-8601 string |
|
* @return {Number} offset |
|
*/ |
|
|
|
|
|
var getOffsetFromTimestamp = function getOffsetFromTimestamp(comparisonTimeStamp, programTime) { |
|
var segmentDateTime = void 0; |
|
var programDateTime = void 0; |
|
|
|
try { |
|
segmentDateTime = new Date(comparisonTimeStamp); |
|
programDateTime = new Date(programTime); |
|
} catch (e) {// TODO handle error |
|
} |
|
|
|
var segmentTimeEpoch = segmentDateTime.getTime(); |
|
var programTimeEpoch = programDateTime.getTime(); |
|
return (programTimeEpoch - segmentTimeEpoch) / 1000; |
|
}; |
|
/** |
|
* Checks that all segments in this playlist have programDateTime tags. |
|
* |
|
* @param {Object} playlist A playlist object |
|
*/ |
|
|
|
|
|
var verifyProgramDateTimeTags = function verifyProgramDateTimeTags(playlist) { |
|
if (!playlist.segments || playlist.segments.length === 0) { |
|
return false; |
|
} |
|
|
|
for (var i = 0; i < playlist.segments.length; i++) { |
|
var segment = playlist.segments[i]; |
|
|
|
if (!segment.dateTimeObject) { |
|
return false; |
|
} |
|
} |
|
|
|
return true; |
|
}; |
|
/** |
|
* Returns the programTime of the media given a playlist and a playerTime. |
|
* The playlist must have programDateTime tags for a programDateTime tag to be returned. |
|
* If the segments containing the time requested have not been buffered yet, an estimate |
|
* may be returned to the callback. |
|
* |
|
* @param {Object} args |
|
* @param {Object} args.playlist A playlist object to search within |
|
* @param {Number} time A playerTime in seconds |
|
* @param {Function} callback(err, programTime) |
|
* @returns {String} err.message A detailed error message |
|
* @returns {Object} programTime |
|
* @returns {Number} programTime.mediaSeconds The streamTime in seconds |
|
* @returns {String} programTime.programDateTime The programTime as an ISO-8601 String |
|
*/ |
|
|
|
|
|
var getProgramTime = function getProgramTime(_ref) { |
|
var playlist = _ref.playlist, |
|
_ref$time = _ref.time, |
|
time = _ref$time === undefined ? undefined : _ref$time, |
|
callback = _ref.callback; |
|
|
|
if (!callback) { |
|
throw new Error('getProgramTime: callback must be provided'); |
|
} |
|
|
|
if (!playlist || time === undefined) { |
|
return callback({ |
|
message: 'getProgramTime: playlist and time must be provided' |
|
}); |
|
} |
|
|
|
var matchedSegment = findSegmentForPlayerTime(time, playlist); |
|
|
|
if (!matchedSegment) { |
|
return callback({ |
|
message: 'valid programTime was not found' |
|
}); |
|
} |
|
|
|
if (matchedSegment.type === 'estimate') { |
|
return callback({ |
|
message: 'Accurate programTime could not be determined.' + ' Please seek to e.seekTime and try again', |
|
seekTime: matchedSegment.estimatedStart |
|
}); |
|
} |
|
|
|
var programTimeObject = { |
|
mediaSeconds: time |
|
}; |
|
var programTime = playerTimeToProgramTime(time, matchedSegment.segment); |
|
|
|
if (programTime) { |
|
programTimeObject.programDateTime = programTime.toISOString(); |
|
} |
|
|
|
return callback(null, programTimeObject); |
|
}; |
|
/** |
|
* Seeks in the player to a time that matches the given programTime ISO-8601 string. |
|
* |
|
* @param {Object} args |
|
* @param {String} args.programTime A programTime to seek to as an ISO-8601 String |
|
* @param {Object} args.playlist A playlist to look within |
|
* @param {Number} args.retryCount The number of times to try for an accurate seek. Default is 2. |
|
* @param {Function} args.seekTo A method to perform a seek |
|
* @param {Boolean} args.pauseAfterSeek Whether to end in a paused state after seeking. Default is true. |
|
* @param {Object} args.tech The tech to seek on |
|
* @param {Function} args.callback(err, newTime) A callback to return the new time to |
|
* @returns {String} err.message A detailed error message |
|
* @returns {Number} newTime The exact time that was seeked to in seconds |
|
*/ |
|
|
|
|
|
var seekToProgramTime = function seekToProgramTime(_ref2) { |
|
var programTime = _ref2.programTime, |
|
playlist = _ref2.playlist, |
|
_ref2$retryCount = _ref2.retryCount, |
|
retryCount = _ref2$retryCount === undefined ? 2 : _ref2$retryCount, |
|
seekTo = _ref2.seekTo, |
|
_ref2$pauseAfterSeek = _ref2.pauseAfterSeek, |
|
pauseAfterSeek = _ref2$pauseAfterSeek === undefined ? true : _ref2$pauseAfterSeek, |
|
tech = _ref2.tech, |
|
callback = _ref2.callback; |
|
|
|
if (!callback) { |
|
throw new Error('seekToProgramTime: callback must be provided'); |
|
} |
|
|
|
if (typeof programTime === 'undefined' || !playlist || !seekTo) { |
|
return callback({ |
|
message: 'seekToProgramTime: programTime, seekTo and playlist must be provided' |
|
}); |
|
} |
|
|
|
if (!playlist.endList && !tech.hasStarted_) { |
|
return callback({ |
|
message: 'player must be playing a live stream to start buffering' |
|
}); |
|
} |
|
|
|
if (!verifyProgramDateTimeTags(playlist)) { |
|
return callback({ |
|
message: 'programDateTime tags must be provided in the manifest ' + playlist.resolvedUri |
|
}); |
|
} |
|
|
|
var matchedSegment = findSegmentForProgramTime(programTime, playlist); // no match |
|
|
|
if (!matchedSegment) { |
|
return callback({ |
|
message: programTime + ' was not found in the stream' |
|
}); |
|
} |
|
|
|
var segment = matchedSegment.segment; |
|
var mediaOffset = getOffsetFromTimestamp(segment.dateTimeObject, programTime); |
|
|
|
if (matchedSegment.type === 'estimate') { |
|
// we've run out of retries |
|
if (retryCount === 0) { |
|
return callback({ |
|
message: programTime + ' is not buffered yet. Try again' |
|
}); |
|
} |
|
|
|
seekTo(matchedSegment.estimatedStart + mediaOffset); |
|
tech.one('seeked', function () { |
|
seekToProgramTime({ |
|
programTime: programTime, |
|
playlist: playlist, |
|
retryCount: retryCount - 1, |
|
seekTo: seekTo, |
|
pauseAfterSeek: pauseAfterSeek, |
|
tech: tech, |
|
callback: callback |
|
}); |
|
}); |
|
return; |
|
} // Since the segment.start value is determined from the buffered end or ending time |
|
// of the prior segment, the seekToTime doesn't need to account for any transmuxer |
|
// modifications. |
|
|
|
|
|
var seekToTime = segment.start + mediaOffset; |
|
|
|
var seekedCallback = function seekedCallback() { |
|
return callback(null, tech.currentTime()); |
|
}; // listen for seeked event |
|
|
|
|
|
tech.one('seeked', seekedCallback); // pause before seeking as video.js will restore this state |
|
|
|
if (pauseAfterSeek) { |
|
tech.pause(); |
|
} |
|
|
|
seekTo(seekToTime); |
|
}; |
|
/** |
|
* ranges |
|
* |
|
* Utilities for working with TimeRanges. |
|
* |
|
*/ |
|
// Fudge factor to account for TimeRanges rounding |
|
|
|
|
|
var TIME_FUDGE_FACTOR = 1 / 30; // Comparisons between time values such as current time and the end of the buffered range |
|
// can be misleading because of precision differences or when the current media has poorly |
|
// aligned audio and video, which can cause values to be slightly off from what you would |
|
// expect. This value is what we consider to be safe to use in such comparisons to account |
|
// for these scenarios. |
|
|
|
var SAFE_TIME_DELTA = TIME_FUDGE_FACTOR * 3; |
|
|
|
var filterRanges = function filterRanges(timeRanges, predicate) { |
|
var results = []; |
|
var i = void 0; |
|
|
|
if (timeRanges && timeRanges.length) { |
|
// Search for ranges that match the predicate |
|
for (i = 0; i < timeRanges.length; i++) { |
|
if (predicate(timeRanges.start(i), timeRanges.end(i))) { |
|
results.push([timeRanges.start(i), timeRanges.end(i)]); |
|
} |
|
} |
|
} |
|
|
|
return videojs$1.createTimeRanges(results); |
|
}; |
|
/** |
|
* Attempts to find the buffered TimeRange that contains the specified |
|
* time. |
|
* @param {TimeRanges} buffered - the TimeRanges object to query |
|
* @param {number} time - the time to filter on. |
|
* @returns {TimeRanges} a new TimeRanges object |
|
*/ |
|
|
|
|
|
var findRange = function findRange(buffered, time) { |
|
return filterRanges(buffered, function (start, end) { |
|
return start - TIME_FUDGE_FACTOR <= time && end + TIME_FUDGE_FACTOR >= time; |
|
}); |
|
}; |
|
/** |
|
* Returns the TimeRanges that begin later than the specified time. |
|
* @param {TimeRanges} timeRanges - the TimeRanges object to query |
|
* @param {number} time - the time to filter on. |
|
* @returns {TimeRanges} a new TimeRanges object. |
|
*/ |
|
|
|
|
|
var findNextRange = function findNextRange(timeRanges, time) { |
|
return filterRanges(timeRanges, function (start) { |
|
return start - TIME_FUDGE_FACTOR >= time; |
|
}); |
|
}; |
|
/** |
|
* Returns gaps within a list of TimeRanges |
|
* @param {TimeRanges} buffered - the TimeRanges object |
|
* @return {TimeRanges} a TimeRanges object of gaps |
|
*/ |
|
|
|
|
|
var findGaps = function findGaps(buffered) { |
|
if (buffered.length < 2) { |
|
return videojs$1.createTimeRanges(); |
|
} |
|
|
|
var ranges = []; |
|
|
|
for (var i = 1; i < buffered.length; i++) { |
|
var start = buffered.end(i - 1); |
|
var end = buffered.start(i); |
|
ranges.push([start, end]); |
|
} |
|
|
|
return videojs$1.createTimeRanges(ranges); |
|
}; |
|
/** |
|
* Gets a human readable string for a TimeRange |
|
* |
|
* @param {TimeRange} range |
|
* @returns {String} a human readable string |
|
*/ |
|
|
|
|
|
var printableRange = function printableRange(range) { |
|
var strArr = []; |
|
|
|
if (!range || !range.length) { |
|
return ''; |
|
} |
|
|
|
for (var i = 0; i < range.length; i++) { |
|
strArr.push(range.start(i) + ' => ' + range.end(i)); |
|
} |
|
|
|
return strArr.join(', '); |
|
}; |
|
/** |
|
* Calculates the amount of time left in seconds until the player hits the end of the |
|
* buffer and causes a rebuffer |
|
* |
|
* @param {TimeRange} buffered |
|
* The state of the buffer |
|
* @param {Numnber} currentTime |
|
* The current time of the player |
|
* @param {Number} playbackRate |
|
* The current playback rate of the player. Defaults to 1. |
|
* @return {Number} |
|
* Time until the player has to start rebuffering in seconds. |
|
* @function timeUntilRebuffer |
|
*/ |
|
|
|
|
|
var timeUntilRebuffer = function timeUntilRebuffer(buffered, currentTime) { |
|
var playbackRate = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : 1; |
|
var bufferedEnd = buffered.length ? buffered.end(buffered.length - 1) : 0; |
|
return (bufferedEnd - currentTime) / playbackRate; |
|
}; |
|
/** |
|
* Converts a TimeRanges object into an array representation |
|
* @param {TimeRanges} timeRanges |
|
* @returns {Array} |
|
*/ |
|
|
|
|
|
var timeRangesToArray = function timeRangesToArray(timeRanges) { |
|
var timeRangesList = []; |
|
|
|
for (var i = 0; i < timeRanges.length; i++) { |
|
timeRangesList.push({ |
|
start: timeRanges.start(i), |
|
end: timeRanges.end(i) |
|
}); |
|
} |
|
|
|
return timeRangesList; |
|
}; |
|
/** |
|
* @file create-text-tracks-if-necessary.js |
|
*/ |
|
|
|
/** |
|
* Create text tracks on video.js if they exist on a segment. |
|
* |
|
* @param {Object} sourceBuffer the VSB or FSB |
|
* @param {Object} mediaSource the HTML media source |
|
* @param {Object} segment the segment that may contain the text track |
|
* @private |
|
*/ |
|
|
|
|
|
var createTextTracksIfNecessary = function createTextTracksIfNecessary(sourceBuffer, mediaSource, segment) { |
|
var player = mediaSource.player_; // create an in-band caption track if one is present in the segment |
|
|
|
if (segment.captions && segment.captions.length) { |
|
if (!sourceBuffer.inbandTextTracks_) { |
|
sourceBuffer.inbandTextTracks_ = {}; |
|
} |
|
|
|
for (var trackId in segment.captionStreams) { |
|
if (!sourceBuffer.inbandTextTracks_[trackId]) { |
|
player.tech_.trigger({ |
|
type: 'usage', |
|
name: 'hls-608' |
|
}); |
|
var track = player.textTracks().getTrackById(trackId); |
|
|
|
if (track) { |
|
// Resuse an existing track with a CC# id because this was |
|
// very likely created by videojs-contrib-hls from information |
|
// in the m3u8 for us to use |
|
sourceBuffer.inbandTextTracks_[trackId] = track; |
|
} else { |
|
// Otherwise, create a track with the default `CC#` label and |
|
// without a language |
|
sourceBuffer.inbandTextTracks_[trackId] = player.addRemoteTextTrack({ |
|
kind: 'captions', |
|
id: trackId, |
|
label: trackId |
|
}, false).track; |
|
} |
|
} |
|
} |
|
} |
|
|
|
if (segment.metadata && segment.metadata.length && !sourceBuffer.metadataTrack_) { |
|
sourceBuffer.metadataTrack_ = player.addRemoteTextTrack({ |
|
kind: 'metadata', |
|
label: 'Timed Metadata' |
|
}, false).track; |
|
sourceBuffer.metadataTrack_.inBandMetadataTrackDispatchType = segment.metadata.dispatchType; |
|
} |
|
}; |
|
/** |
|
* @file remove-cues-from-track.js |
|
*/ |
|
|
|
/** |
|
* Remove cues from a track on video.js. |
|
* |
|
* @param {Double} start start of where we should remove the cue |
|
* @param {Double} end end of where the we should remove the cue |
|
* @param {Object} track the text track to remove the cues from |
|
* @private |
|
*/ |
|
|
|
|
|
var removeCuesFromTrack = function removeCuesFromTrack(start, end, track) { |
|
var i = void 0; |
|
var cue = void 0; |
|
|
|
if (!track) { |
|
return; |
|
} |
|
|
|
if (!track.cues) { |
|
return; |
|
} |
|
|
|
i = track.cues.length; |
|
|
|
while (i--) { |
|
cue = track.cues[i]; // Remove any overlapping cue |
|
|
|
if (cue.startTime <= end && cue.endTime >= start) { |
|
track.removeCue(cue); |
|
} |
|
} |
|
}; |
|
/** |
|
* @file add-text-track-data.js |
|
*/ |
|
|
|
/** |
|
* Define properties on a cue for backwards compatability, |
|
* but warn the user that the way that they are using it |
|
* is depricated and will be removed at a later date. |
|
* |
|
* @param {Cue} cue the cue to add the properties on |
|
* @private |
|
*/ |
|
|
|
|
|
var deprecateOldCue = function deprecateOldCue(cue) { |
|
Object.defineProperties(cue.frame, { |
|
id: { |
|
get: function get() { |
|
videojs$1.log.warn('cue.frame.id is deprecated. Use cue.value.key instead.'); |
|
return cue.value.key; |
|
} |
|
}, |
|
value: { |
|
get: function get() { |
|
videojs$1.log.warn('cue.frame.value is deprecated. Use cue.value.data instead.'); |
|
return cue.value.data; |
|
} |
|
}, |
|
privateData: { |
|
get: function get() { |
|
videojs$1.log.warn('cue.frame.privateData is deprecated. Use cue.value.data instead.'); |
|
return cue.value.data; |
|
} |
|
} |
|
}); |
|
}; |
|
|
|
var durationOfVideo = function durationOfVideo(duration) { |
|
var dur = void 0; |
|
|
|
if (isNaN(duration) || Math.abs(duration) === Infinity) { |
|
dur = Number.MAX_VALUE; |
|
} else { |
|
dur = duration; |
|
} |
|
|
|
return dur; |
|
}; |
|
/** |
|
* Add text track data to a source handler given the captions and |
|
* metadata from the buffer. |
|
* |
|
* @param {Object} sourceHandler the virtual source buffer |
|
* @param {Array} captionArray an array of caption data |
|
* @param {Array} metadataArray an array of meta data |
|
* @private |
|
*/ |
|
|
|
|
|
var addTextTrackData = function addTextTrackData(sourceHandler, captionArray, metadataArray) { |
|
var Cue = window$1.WebKitDataCue || window$1.VTTCue; |
|
|
|
if (captionArray) { |
|
captionArray.forEach(function (caption) { |
|
var track = caption.stream; |
|
this.inbandTextTracks_[track].addCue(new Cue(caption.startTime + this.timestampOffset, caption.endTime + this.timestampOffset, caption.text)); |
|
}, sourceHandler); |
|
} |
|
|
|
if (metadataArray) { |
|
var videoDuration = durationOfVideo(sourceHandler.mediaSource_.duration); |
|
metadataArray.forEach(function (metadata) { |
|
var time = metadata.cueTime + this.timestampOffset; // if time isn't a finite number between 0 and Infinity, like NaN, |
|
// ignore this bit of metadata. |
|
// This likely occurs when you have an non-timed ID3 tag like TIT2, |
|
// which is the "Title/Songname/Content description" frame |
|
|
|
if (typeof time !== 'number' || window$1.isNaN(time) || time < 0 || !(time < Infinity)) { |
|
return; |
|
} |
|
|
|
metadata.frames.forEach(function (frame) { |
|
var cue = new Cue(time, time, frame.value || frame.url || frame.data || ''); |
|
cue.frame = frame; |
|
cue.value = frame; |
|
deprecateOldCue(cue); |
|
this.metadataTrack_.addCue(cue); |
|
}, this); |
|
}, sourceHandler); // Updating the metadeta cues so that |
|
// the endTime of each cue is the startTime of the next cue |
|
// the endTime of last cue is the duration of the video |
|
|
|
if (sourceHandler.metadataTrack_ && sourceHandler.metadataTrack_.cues && sourceHandler.metadataTrack_.cues.length) { |
|
var cues = sourceHandler.metadataTrack_.cues; |
|
var cuesArray = []; // Create a copy of the TextTrackCueList... |
|
// ...disregarding cues with a falsey value |
|
|
|
for (var i = 0; i < cues.length; i++) { |
|
if (cues[i]) { |
|
cuesArray.push(cues[i]); |
|
} |
|
} // Group cues by their startTime value |
|
|
|
|
|
var cuesGroupedByStartTime = cuesArray.reduce(function (obj, cue) { |
|
var timeSlot = obj[cue.startTime] || []; |
|
timeSlot.push(cue); |
|
obj[cue.startTime] = timeSlot; |
|
return obj; |
|
}, {}); // Sort startTimes by ascending order |
|
|
|
var sortedStartTimes = Object.keys(cuesGroupedByStartTime).sort(function (a, b) { |
|
return Number(a) - Number(b); |
|
}); // Map each cue group's endTime to the next group's startTime |
|
|
|
sortedStartTimes.forEach(function (startTime, idx) { |
|
var cueGroup = cuesGroupedByStartTime[startTime]; |
|
var nextTime = Number(sortedStartTimes[idx + 1]) || videoDuration; // Map each cue's endTime the next group's startTime |
|
|
|
cueGroup.forEach(function (cue) { |
|
cue.endTime = nextTime; |
|
}); |
|
}); |
|
} |
|
} |
|
}; |
|
|
|
var win = typeof window !== 'undefined' ? window : {}, |
|
TARGET = typeof Symbol === 'undefined' ? '__target' : Symbol(), |
|
SCRIPT_TYPE = 'application/javascript', |
|
BlobBuilder = win.BlobBuilder || win.WebKitBlobBuilder || win.MozBlobBuilder || win.MSBlobBuilder, |
|
URL = win.URL || win.webkitURL || URL && URL.msURL, |
|
Worker = win.Worker; |
|
/** |
|
* Returns a wrapper around Web Worker code that is constructible. |
|
* |
|
* @function shimWorker |
|
* |
|
* @param { String } filename The name of the file |
|
* @param { Function } fn Function wrapping the code of the worker |
|
*/ |
|
|
|
function shimWorker(filename, fn) { |
|
return function ShimWorker(forceFallback) { |
|
var o = this; |
|
|
|
if (!fn) { |
|
return new Worker(filename); |
|
} else if (Worker && !forceFallback) { |
|
// Convert the function's inner code to a string to construct the worker |
|
var source = fn.toString().replace(/^function.+?{/, '').slice(0, -1), |
|
objURL = createSourceObject(source); |
|
this[TARGET] = new Worker(objURL); |
|
wrapTerminate(this[TARGET], objURL); |
|
return this[TARGET]; |
|
} else { |
|
var selfShim = { |
|
postMessage: function postMessage(m) { |
|
if (o.onmessage) { |
|
setTimeout(function () { |
|
o.onmessage({ |
|
data: m, |
|
target: selfShim |
|
}); |
|
}); |
|
} |
|
} |
|
}; |
|
fn.call(selfShim); |
|
|
|
this.postMessage = function (m) { |
|
setTimeout(function () { |
|
selfShim.onmessage({ |
|
data: m, |
|
target: o |
|
}); |
|
}); |
|
}; |
|
|
|
this.isThisThread = true; |
|
} |
|
}; |
|
} // Test Worker capabilities |
|
|
|
|
|
if (Worker) { |
|
var testWorker, |
|
objURL = createSourceObject('self.onmessage = function () {}'), |
|
testArray = new Uint8Array(1); |
|
|
|
try { |
|
testWorker = new Worker(objURL); // Native browser on some Samsung devices throws for transferables, let's detect it |
|
|
|
testWorker.postMessage(testArray, [testArray.buffer]); |
|
} catch (e) { |
|
Worker = null; |
|
} finally { |
|
URL.revokeObjectURL(objURL); |
|
|
|
if (testWorker) { |
|
testWorker.terminate(); |
|
} |
|
} |
|
} |
|
|
|
function createSourceObject(str) { |
|
try { |
|
return URL.createObjectURL(new Blob([str], { |
|
type: SCRIPT_TYPE |
|
})); |
|
} catch (e) { |
|
var blob = new BlobBuilder(); |
|
blob.append(str); |
|
return URL.createObjectURL(blob.getBlob(type)); |
|
} |
|
} |
|
|
|
function wrapTerminate(worker, objURL) { |
|
if (!worker || !objURL) return; |
|
var term = worker.terminate; |
|
worker.objURL = objURL; |
|
|
|
worker.terminate = function () { |
|
if (worker.objURL) URL.revokeObjectURL(worker.objURL); |
|
term.call(worker); |
|
}; |
|
} |
|
|
|
var TransmuxWorker = new shimWorker("./transmuxer-worker.worker.js", function (window, document$$1) { |
|
var self = this; |
|
|
|
var transmuxerWorker = function () { |
|
/** |
|
* mux.js |
|
* |
|
* Copyright (c) 2015 Brightcove |
|
* All rights reserved. |
|
* |
|
* Functions that generate fragmented MP4s suitable for use with Media |
|
* Source Extensions. |
|
*/ |
|
var UINT32_MAX = Math.pow(2, 32) - 1; |
|
var box, dinf, esds, ftyp, mdat, mfhd, minf, moof, moov, mvex, mvhd, trak, tkhd, mdia, mdhd, hdlr, sdtp, stbl, stsd, traf, trex, trun, types, MAJOR_BRAND, MINOR_VERSION, AVC1_BRAND, VIDEO_HDLR, AUDIO_HDLR, HDLR_TYPES, VMHD, SMHD, DREF, STCO, STSC, STSZ, STTS; // pre-calculate constants |
|
|
|
(function () { |
|
var i; |
|
types = { |
|
avc1: [], |
|
// codingname |
|
avcC: [], |
|
btrt: [], |
|
dinf: [], |
|
dref: [], |
|
esds: [], |
|
ftyp: [], |
|
hdlr: [], |
|
mdat: [], |
|
mdhd: [], |
|
mdia: [], |
|
mfhd: [], |
|
minf: [], |
|
moof: [], |
|
moov: [], |
|
mp4a: [], |
|
// codingname |
|
mvex: [], |
|
mvhd: [], |
|
sdtp: [], |
|
smhd: [], |
|
stbl: [], |
|
stco: [], |
|
stsc: [], |
|
stsd: [], |
|
stsz: [], |
|
stts: [], |
|
styp: [], |
|
tfdt: [], |
|
tfhd: [], |
|
traf: [], |
|
trak: [], |
|
trun: [], |
|
trex: [], |
|
tkhd: [], |
|
vmhd: [] |
|
}; // In environments where Uint8Array is undefined (e.g., IE8), skip set up so that we |
|
// don't throw an error |
|
|
|
if (typeof Uint8Array === 'undefined') { |
|
return; |
|
} |
|
|
|
for (i in types) { |
|
if (types.hasOwnProperty(i)) { |
|
types[i] = [i.charCodeAt(0), i.charCodeAt(1), i.charCodeAt(2), i.charCodeAt(3)]; |
|
} |
|
} |
|
|
|
MAJOR_BRAND = new Uint8Array(['i'.charCodeAt(0), 's'.charCodeAt(0), 'o'.charCodeAt(0), 'm'.charCodeAt(0)]); |
|
AVC1_BRAND = new Uint8Array(['a'.charCodeAt(0), 'v'.charCodeAt(0), 'c'.charCodeAt(0), '1'.charCodeAt(0)]); |
|
MINOR_VERSION = new Uint8Array([0, 0, 0, 1]); |
|
VIDEO_HDLR = new Uint8Array([0x00, // version 0 |
|
0x00, 0x00, 0x00, // flags |
|
0x00, 0x00, 0x00, 0x00, // pre_defined |
|
0x76, 0x69, 0x64, 0x65, // handler_type: 'vide' |
|
0x00, 0x00, 0x00, 0x00, // reserved |
|
0x00, 0x00, 0x00, 0x00, // reserved |
|
0x00, 0x00, 0x00, 0x00, // reserved |
|
0x56, 0x69, 0x64, 0x65, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x00 // name: 'VideoHandler' |
|
]); |
|
AUDIO_HDLR = new Uint8Array([0x00, // version 0 |
|
0x00, 0x00, 0x00, // flags |
|
0x00, 0x00, 0x00, 0x00, // pre_defined |
|
0x73, 0x6f, 0x75, 0x6e, // handler_type: 'soun' |
|
0x00, 0x00, 0x00, 0x00, // reserved |
|
0x00, 0x00, 0x00, 0x00, // reserved |
|
0x00, 0x00, 0x00, 0x00, // reserved |
|
0x53, 0x6f, 0x75, 0x6e, 0x64, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x00 // name: 'SoundHandler' |
|
]); |
|
HDLR_TYPES = { |
|
video: VIDEO_HDLR, |
|
audio: AUDIO_HDLR |
|
}; |
|
DREF = new Uint8Array([0x00, // version 0 |
|
0x00, 0x00, 0x00, // flags |
|
0x00, 0x00, 0x00, 0x01, // entry_count |
|
0x00, 0x00, 0x00, 0x0c, // entry_size |
|
0x75, 0x72, 0x6c, 0x20, // 'url' type |
|
0x00, // version 0 |
|
0x00, 0x00, 0x01 // entry_flags |
|
]); |
|
SMHD = new Uint8Array([0x00, // version |
|
0x00, 0x00, 0x00, // flags |
|
0x00, 0x00, // balance, 0 means centered |
|
0x00, 0x00 // reserved |
|
]); |
|
STCO = new Uint8Array([0x00, // version |
|
0x00, 0x00, 0x00, // flags |
|
0x00, 0x00, 0x00, 0x00 // entry_count |
|
]); |
|
STSC = STCO; |
|
STSZ = new Uint8Array([0x00, // version |
|
0x00, 0x00, 0x00, // flags |
|
0x00, 0x00, 0x00, 0x00, // sample_size |
|
0x00, 0x00, 0x00, 0x00 // sample_count |
|
]); |
|
STTS = STCO; |
|
VMHD = new Uint8Array([0x00, // version |
|
0x00, 0x00, 0x01, // flags |
|
0x00, 0x00, // graphicsmode |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // opcolor |
|
]); |
|
})(); |
|
|
|
box = function box(type) { |
|
var payload = [], |
|
size = 0, |
|
i, |
|
result, |
|
view; |
|
|
|
for (i = 1; i < arguments.length; i++) { |
|
payload.push(arguments[i]); |
|
} |
|
|
|
i = payload.length; // calculate the total size we need to allocate |
|
|
|
while (i--) { |
|
size += payload[i].byteLength; |
|
} |
|
|
|
result = new Uint8Array(size + 8); |
|
view = new DataView(result.buffer, result.byteOffset, result.byteLength); |
|
view.setUint32(0, result.byteLength); |
|
result.set(type, 4); // copy the payload into the result |
|
|
|
for (i = 0, size = 8; i < payload.length; i++) { |
|
result.set(payload[i], size); |
|
size += payload[i].byteLength; |
|
} |
|
|
|
return result; |
|
}; |
|
|
|
dinf = function dinf() { |
|
return box(types.dinf, box(types.dref, DREF)); |
|
}; |
|
|
|
esds = function esds(track) { |
|
return box(types.esds, new Uint8Array([0x00, // version |
|
0x00, 0x00, 0x00, // flags |
|
// ES_Descriptor |
|
0x03, // tag, ES_DescrTag |
|
0x19, // length |
|
0x00, 0x00, // ES_ID |
|
0x00, // streamDependenceFlag, URL_flag, reserved, streamPriority |
|
// DecoderConfigDescriptor |
|
0x04, // tag, DecoderConfigDescrTag |
|
0x11, // length |
|
0x40, // object type |
|
0x15, // streamType |
|
0x00, 0x06, 0x00, // bufferSizeDB |
|
0x00, 0x00, 0xda, 0xc0, // maxBitrate |
|
0x00, 0x00, 0xda, 0xc0, // avgBitrate |
|
// DecoderSpecificInfo |
|
0x05, // tag, DecoderSpecificInfoTag |
|
0x02, // length |
|
// ISO/IEC 14496-3, AudioSpecificConfig |
|
// for samplingFrequencyIndex see ISO/IEC 13818-7:2006, 8.1.3.2.2, Table 35 |
|
track.audioobjecttype << 3 | track.samplingfrequencyindex >>> 1, track.samplingfrequencyindex << 7 | track.channelcount << 3, 0x06, 0x01, 0x02 // GASpecificConfig |
|
])); |
|
}; |
|
|
|
ftyp = function ftyp() { |
|
return box(types.ftyp, MAJOR_BRAND, MINOR_VERSION, MAJOR_BRAND, AVC1_BRAND); |
|
}; |
|
|
|
hdlr = function hdlr(type) { |
|
return box(types.hdlr, HDLR_TYPES[type]); |
|
}; |
|
|
|
mdat = function mdat(data) { |
|
return box(types.mdat, data); |
|
}; |
|
|
|
mdhd = function mdhd(track) { |
|
var result = new Uint8Array([0x00, // version 0 |
|
0x00, 0x00, 0x00, // flags |
|
0x00, 0x00, 0x00, 0x02, // creation_time |
|
0x00, 0x00, 0x00, 0x03, // modification_time |
|
0x00, 0x01, 0x5f, 0x90, // timescale, 90,000 "ticks" per second |
|
track.duration >>> 24 & 0xFF, track.duration >>> 16 & 0xFF, track.duration >>> 8 & 0xFF, track.duration & 0xFF, // duration |
|
0x55, 0xc4, // 'und' language (undetermined) |
|
0x00, 0x00]); // Use the sample rate from the track metadata, when it is |
|
// defined. The sample rate can be parsed out of an ADTS header, for |
|
// instance. |
|
|
|
if (track.samplerate) { |
|
result[12] = track.samplerate >>> 24 & 0xFF; |
|
result[13] = track.samplerate >>> 16 & 0xFF; |
|
result[14] = track.samplerate >>> 8 & 0xFF; |
|
result[15] = track.samplerate & 0xFF; |
|
} |
|
|
|
return box(types.mdhd, result); |
|
}; |
|
|
|
mdia = function mdia(track) { |
|
return box(types.mdia, mdhd(track), hdlr(track.type), minf(track)); |
|
}; |
|
|
|
mfhd = function mfhd(sequenceNumber) { |
|
return box(types.mfhd, new Uint8Array([0x00, 0x00, 0x00, 0x00, // flags |
|
(sequenceNumber & 0xFF000000) >> 24, (sequenceNumber & 0xFF0000) >> 16, (sequenceNumber & 0xFF00) >> 8, sequenceNumber & 0xFF // sequence_number |
|
])); |
|
}; |
|
|
|
minf = function minf(track) { |
|
return box(types.minf, track.type === 'video' ? box(types.vmhd, VMHD) : box(types.smhd, SMHD), dinf(), stbl(track)); |
|
}; |
|
|
|
moof = function moof(sequenceNumber, tracks) { |
|
var trackFragments = [], |
|
i = tracks.length; // build traf boxes for each track fragment |
|
|
|
while (i--) { |
|
trackFragments[i] = traf(tracks[i]); |
|
} |
|
|
|
return box.apply(null, [types.moof, mfhd(sequenceNumber)].concat(trackFragments)); |
|
}; |
|
/** |
|
* Returns a movie box. |
|
* @param tracks {array} the tracks associated with this movie |
|
* @see ISO/IEC 14496-12:2012(E), section 8.2.1 |
|
*/ |
|
|
|
|
|
moov = function moov(tracks) { |
|
var i = tracks.length, |
|
boxes = []; |
|
|
|
while (i--) { |
|
boxes[i] = trak(tracks[i]); |
|
} |
|
|
|
return box.apply(null, [types.moov, mvhd(0xffffffff)].concat(boxes).concat(mvex(tracks))); |
|
}; |
|
|
|
mvex = function mvex(tracks) { |
|
var i = tracks.length, |
|
boxes = []; |
|
|
|
while (i--) { |
|
boxes[i] = trex(tracks[i]); |
|
} |
|
|
|
return box.apply(null, [types.mvex].concat(boxes)); |
|
}; |
|
|
|
mvhd = function mvhd(duration) { |
|
var bytes = new Uint8Array([0x00, // version 0 |
|
0x00, 0x00, 0x00, // flags |
|
0x00, 0x00, 0x00, 0x01, // creation_time |
|
0x00, 0x00, 0x00, 0x02, // modification_time |
|
0x00, 0x01, 0x5f, 0x90, // timescale, 90,000 "ticks" per second |
|
(duration & 0xFF000000) >> 24, (duration & 0xFF0000) >> 16, (duration & 0xFF00) >> 8, duration & 0xFF, // duration |
|
0x00, 0x01, 0x00, 0x00, // 1.0 rate |
|
0x01, 0x00, // 1.0 volume |
|
0x00, 0x00, // reserved |
|
0x00, 0x00, 0x00, 0x00, // reserved |
|
0x00, 0x00, 0x00, 0x00, // reserved |
|
0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, // transformation: unity matrix |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // pre_defined |
|
0xff, 0xff, 0xff, 0xff // next_track_ID |
|
]); |
|
return box(types.mvhd, bytes); |
|
}; |
|
|
|
sdtp = function sdtp(track) { |
|
var samples = track.samples || [], |
|
bytes = new Uint8Array(4 + samples.length), |
|
flags, |
|
i; // leave the full box header (4 bytes) all zero |
|
// write the sample table |
|
|
|
for (i = 0; i < samples.length; i++) { |
|
flags = samples[i].flags; |
|
bytes[i + 4] = flags.dependsOn << 4 | flags.isDependedOn << 2 | flags.hasRedundancy; |
|
} |
|
|
|
return box(types.sdtp, bytes); |
|
}; |
|
|
|
stbl = function stbl(track) { |
|
return box(types.stbl, stsd(track), box(types.stts, STTS), box(types.stsc, STSC), box(types.stsz, STSZ), box(types.stco, STCO)); |
|
}; |
|
|
|
(function () { |
|
var videoSample, audioSample; |
|
|
|
stsd = function stsd(track) { |
|
return box(types.stsd, new Uint8Array([0x00, // version 0 |
|
0x00, 0x00, 0x00, // flags |
|
0x00, 0x00, 0x00, 0x01]), track.type === 'video' ? videoSample(track) : audioSample(track)); |
|
}; |
|
|
|
videoSample = function videoSample(track) { |
|
var sps = track.sps || [], |
|
pps = track.pps || [], |
|
sequenceParameterSets = [], |
|
pictureParameterSets = [], |
|
i; // assemble the SPSs |
|
|
|
for (i = 0; i < sps.length; i++) { |
|
sequenceParameterSets.push((sps[i].byteLength & 0xFF00) >>> 8); |
|
sequenceParameterSets.push(sps[i].byteLength & 0xFF); // sequenceParameterSetLength |
|
|
|
sequenceParameterSets = sequenceParameterSets.concat(Array.prototype.slice.call(sps[i])); // SPS |
|
} // assemble the PPSs |
|
|
|
|
|
for (i = 0; i < pps.length; i++) { |
|
pictureParameterSets.push((pps[i].byteLength & 0xFF00) >>> 8); |
|
pictureParameterSets.push(pps[i].byteLength & 0xFF); |
|
pictureParameterSets = pictureParameterSets.concat(Array.prototype.slice.call(pps[i])); |
|
} |
|
|
|
return box(types.avc1, new Uint8Array([0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // reserved |
|
0x00, 0x01, // data_reference_index |
|
0x00, 0x00, // pre_defined |
|
0x00, 0x00, // reserved |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // pre_defined |
|
(track.width & 0xff00) >> 8, track.width & 0xff, // width |
|
(track.height & 0xff00) >> 8, track.height & 0xff, // height |
|
0x00, 0x48, 0x00, 0x00, // horizresolution |
|
0x00, 0x48, 0x00, 0x00, // vertresolution |
|
0x00, 0x00, 0x00, 0x00, // reserved |
|
0x00, 0x01, // frame_count |
|
0x13, 0x76, 0x69, 0x64, 0x65, 0x6f, 0x6a, 0x73, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x69, 0x62, 0x2d, 0x68, 0x6c, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // compressorname |
|
0x00, 0x18, // depth = 24 |
|
0x11, 0x11 // pre_defined = -1 |
|
]), box(types.avcC, new Uint8Array([0x01, // configurationVersion |
|
track.profileIdc, // AVCProfileIndication |
|
track.profileCompatibility, // profile_compatibility |
|
track.levelIdc, // AVCLevelIndication |
|
0xff // lengthSizeMinusOne, hard-coded to 4 bytes |
|
].concat([sps.length // numOfSequenceParameterSets |
|
]).concat(sequenceParameterSets).concat([pps.length // numOfPictureParameterSets |
|
]).concat(pictureParameterSets))), // "PPS" |
|
box(types.btrt, new Uint8Array([0x00, 0x1c, 0x9c, 0x80, // bufferSizeDB |
|
0x00, 0x2d, 0xc6, 0xc0, // maxBitrate |
|
0x00, 0x2d, 0xc6, 0xc0])) // avgBitrate |
|
); |
|
}; |
|
|
|
audioSample = function audioSample(track) { |
|
return box(types.mp4a, new Uint8Array([// SampleEntry, ISO/IEC 14496-12 |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // reserved |
|
0x00, 0x01, // data_reference_index |
|
// AudioSampleEntry, ISO/IEC 14496-12 |
|
0x00, 0x00, 0x00, 0x00, // reserved |
|
0x00, 0x00, 0x00, 0x00, // reserved |
|
(track.channelcount & 0xff00) >> 8, track.channelcount & 0xff, // channelcount |
|
(track.samplesize & 0xff00) >> 8, track.samplesize & 0xff, // samplesize |
|
0x00, 0x00, // pre_defined |
|
0x00, 0x00, // reserved |
|
(track.samplerate & 0xff00) >> 8, track.samplerate & 0xff, 0x00, 0x00 // samplerate, 16.16 |
|
// MP4AudioSampleEntry, ISO/IEC 14496-14 |
|
]), esds(track)); |
|
}; |
|
})(); |
|
|
|
tkhd = function tkhd(track) { |
|
var result = new Uint8Array([0x00, // version 0 |
|
0x00, 0x00, 0x07, // flags |
|
0x00, 0x00, 0x00, 0x00, // creation_time |
|
0x00, 0x00, 0x00, 0x00, // modification_time |
|
(track.id & 0xFF000000) >> 24, (track.id & 0xFF0000) >> 16, (track.id & 0xFF00) >> 8, track.id & 0xFF, // track_ID |
|
0x00, 0x00, 0x00, 0x00, // reserved |
|
(track.duration & 0xFF000000) >> 24, (track.duration & 0xFF0000) >> 16, (track.duration & 0xFF00) >> 8, track.duration & 0xFF, // duration |
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // reserved |
|
0x00, 0x00, // layer |
|
0x00, 0x00, // alternate_group |
|
0x01, 0x00, // non-audio track volume |
|
0x00, 0x00, // reserved |
|
0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, // transformation: unity matrix |
|
(track.width & 0xFF00) >> 8, track.width & 0xFF, 0x00, 0x00, // width |
|
(track.height & 0xFF00) >> 8, track.height & 0xFF, 0x00, 0x00 // height |
|
]); |
|
return box(types.tkhd, result); |
|
}; |
|
/** |
|
* Generate a track fragment (traf) box. A traf box collects metadata |
|
* about tracks in a movie fragment (moof) box. |
|
*/ |
|
|
|
|
|
traf = function traf(track) { |
|
var trackFragmentHeader, trackFragmentDecodeTime, trackFragmentRun, sampleDependencyTable, dataOffset, upperWordBaseMediaDecodeTime, lowerWordBaseMediaDecodeTime; |
|
trackFragmentHeader = box(types.tfhd, new Uint8Array([0x00, // version 0 |
|
0x00, 0x00, 0x3a, // flags |
|
(track.id & 0xFF000000) >> 24, (track.id & 0xFF0000) >> 16, (track.id & 0xFF00) >> 8, track.id & 0xFF, // track_ID |
|
0x00, 0x00, 0x00, 0x01, // sample_description_index |
|
0x00, 0x00, 0x00, 0x00, // default_sample_duration |
|
0x00, 0x00, 0x00, 0x00, // default_sample_size |
|
0x00, 0x00, 0x00, 0x00 // default_sample_flags |
|
])); |
|
upperWordBaseMediaDecodeTime = Math.floor(track.baseMediaDecodeTime / (UINT32_MAX + 1)); |
|
lowerWordBaseMediaDecodeTime = Math.floor(track.baseMediaDecodeTime % (UINT32_MAX + 1)); |
|
trackFragmentDecodeTime = box(types.tfdt, new Uint8Array([0x01, // version 1 |
|
0x00, 0x00, 0x00, // flags |
|
// baseMediaDecodeTime |
|
upperWordBaseMediaDecodeTime >>> 24 & 0xFF, upperWordBaseMediaDecodeTime >>> 16 & 0xFF, upperWordBaseMediaDecodeTime >>> 8 & 0xFF, upperWordBaseMediaDecodeTime & 0xFF, lowerWordBaseMediaDecodeTime >>> 24 & 0xFF, lowerWordBaseMediaDecodeTime >>> 16 & 0xFF, lowerWordBaseMediaDecodeTime >>> 8 & 0xFF, lowerWordBaseMediaDecodeTime & 0xFF])); // the data offset specifies the number of bytes from the start of |
|
// the containing moof to the first payload byte of the associated |
|
// mdat |
|
|
|
dataOffset = 32 + // tfhd |
|
20 + // tfdt |
|
8 + // traf header |
|
16 + // mfhd |
|
8 + // moof header |
|
8; // mdat header |
|
// audio tracks require less metadata |
|
|
|
if (track.type === 'audio') { |
|
trackFragmentRun = trun(track, dataOffset); |
|
return box(types.traf, trackFragmentHeader, trackFragmentDecodeTime, trackFragmentRun); |
|
} // video tracks should contain an independent and disposable samples |
|
// box (sdtp) |
|
// generate one and adjust offsets to match |
|
|
|
|
|
sampleDependencyTable = sdtp(track); |
|
trackFragmentRun = trun(track, sampleDependencyTable.length + dataOffset); |
|
return box(types.traf, trackFragmentHeader, trackFragmentDecodeTime, trackFragmentRun, sampleDependencyTable); |
|
}; |
|
/** |
|
* Generate a track box. |
|
* @param track {object} a track definition |
|
* @return {Uint8Array} the track box |
|
*/ |
|
|
|
|
|
trak = function trak(track) { |
|
track.duration = track.duration || 0xffffffff; |
|
return box(types.trak, tkhd(track), mdia(track)); |
|
}; |
|
|
|
trex = function trex(track) { |
|
var result = new Uint8Array([0x00, // version 0 |
|
0x00, 0x00, 0x00, // flags |
|
(track.id & 0xFF000000) >> 24, (track.id & 0xFF0000) >> 16, (track.id & 0xFF00) >> 8, track.id & 0xFF, // track_ID |
|
0x00, 0x00, 0x00, 0x01, // default_sample_description_index |
|
0x00, 0x00, 0x00, 0x00, // default_sample_duration |
|
0x00, 0x00, 0x00, 0x00, // default_sample_size |
|
0x00, 0x01, 0x00, 0x01 // default_sample_flags |
|
]); // the last two bytes of default_sample_flags is the sample |
|
// degradation priority, a hint about the importance of this sample |
|
// relative to others. Lower the degradation priority for all sample |
|
// types other than video. |
|
|
|
if (track.type !== 'video') { |
|
result[result.length - 1] = 0x00; |
|
} |
|
|
|
return box(types.trex, result); |
|
}; |
|
|
|
(function () { |
|
var audioTrun, videoTrun, trunHeader; // This method assumes all samples are uniform. That is, if a |
|
// duration is present for the first sample, it will be present for |
|
// all subsequent samples. |
|
// see ISO/IEC 14496-12:2012, Section 8.8.8.1 |
|
|
|
trunHeader = function trunHeader(samples, offset) { |
|
var durationPresent = 0, |
|
sizePresent = 0, |
|
flagsPresent = 0, |
|
compositionTimeOffset = 0; // trun flag constants |
|
|
|
if (samples.length) { |
|
if (samples[0].duration !== undefined) { |
|
durationPresent = 0x1; |
|
} |
|
|
|
if (samples[0].size !== undefined) { |
|
sizePresent = 0x2; |
|
} |
|
|
|
if (samples[0].flags !== undefined) { |
|
flagsPresent = 0x4; |
|
} |
|
|
|
if (samples[0].compositionTimeOffset !== undefined) { |
|
compositionTimeOffset = 0x8; |
|
} |
|
} |
|
|
|
return [0x00, // version 0 |
|
0x00, durationPresent | sizePresent | flagsPresent | compositionTimeOffset, 0x01, // flags |
|
(samples.length & 0xFF000000) >>> 24, (samples.length & 0xFF0000) >>> 16, (samples.length & 0xFF00) >>> 8, samples.length & 0xFF, // sample_count |
|
(offset & 0xFF000000) >>> 24, (offset & 0xFF0000) >>> 16, (offset & 0xFF00) >>> 8, offset & 0xFF // data_offset |
|
]; |
|
}; |
|
|
|
videoTrun = function videoTrun(track, offset) { |
|
var bytes, samples, sample, i; |
|
samples = track.samples || []; |
|
offset += 8 + 12 + 16 * samples.length; |
|
bytes = trunHeader(samples, offset); |
|
|
|
for (i = 0; i < samples.length; i++) { |
|
sample = samples[i]; |
|
bytes = bytes.concat([(sample.duration & 0xFF000000) >>> 24, (sample.duration & 0xFF0000) >>> 16, (sample.duration & 0xFF00) >>> 8, sample.duration & 0xFF, // sample_duration |
|
(sample.size & 0xFF000000) >>> 24, (sample.size & 0xFF0000) >>> 16, (sample.size & 0xFF00) >>> 8, sample.size & 0xFF, // sample_size |
|
sample.flags.isLeading << 2 | sample.flags.dependsOn, sample.flags.isDependedOn << 6 | sample.flags.hasRedundancy << 4 | sample.flags.paddingValue << 1 | sample.flags.isNonSyncSample, sample.flags.degradationPriority & 0xF0 << 8, sample.flags.degradationPriority & 0x0F, // sample_flags |
|
(sample.compositionTimeOffset & 0xFF000000) >>> 24, (sample.compositionTimeOffset & 0xFF0000) >>> 16, (sample.compositionTimeOffset & 0xFF00) >>> 8, sample.compositionTimeOffset & 0xFF // sample_composition_time_offset |
|
]); |
|
} |
|
|
|
return box(types.trun, new Uint8Array(bytes)); |
|
}; |
|
|
|
audioTrun = function audioTrun(track, offset) { |
|
var bytes, samples, sample, i; |
|
samples = track.samples || []; |
|
offset += 8 + 12 + 8 * samples.length; |
|
bytes = trunHeader(samples, offset); |
|
|
|
for (i = 0; i < samples.length; i++) { |
|
sample = samples[i]; |
|
bytes = bytes.concat([(sample.duration & 0xFF000000) >>> 24, (sample.duration & 0xFF0000) >>> 16, (sample.duration & 0xFF00) >>> 8, sample.duration & 0xFF, // sample_duration |
|
(sample.size & 0xFF000000) >>> 24, (sample.size & 0xFF0000) >>> 16, (sample.size & 0xFF00) >>> 8, sample.size & 0xFF]); // sample_size |
|
} |
|
|
|
return box(types.trun, new Uint8Array(bytes)); |
|
}; |
|
|
|
trun = function trun(track, offset) { |
|
if (track.type === 'audio') { |
|
return audioTrun(track, offset); |
|
} |
|
|
|
return videoTrun(track, offset); |
|
}; |
|
})(); |
|
|
|
var mp4Generator = { |
|
ftyp: ftyp, |
|
mdat: mdat, |
|
moof: moof, |
|
moov: moov, |
|
initSegment: function initSegment(tracks) { |
|
var fileType = ftyp(), |
|
movie = moov(tracks), |
|
result; |
|
result = new Uint8Array(fileType.byteLength + movie.byteLength); |
|
result.set(fileType); |
|
result.set(movie, fileType.byteLength); |
|
return result; |
|
} |
|
}; |
|
|
|
var toUnsigned = function toUnsigned(value) { |
|
return value >>> 0; |
|
}; |
|
|
|
var bin = { |
|
toUnsigned: toUnsigned |
|
}; |
|
var toUnsigned$1 = bin.toUnsigned; |
|
|
|
var _findBox, parseType, timescale, startTime, getVideoTrackIds; // Find the data for a box specified by its path |
|
|
|
|
|
_findBox = function findBox(data, path) { |
|
var results = [], |
|
i, |
|
size, |
|
type, |
|
end, |
|
subresults; |
|
|
|
if (!path.length) { |
|
// short-circuit the search for empty paths |
|
return null; |
|
} |
|
|
|
for (i = 0; i < data.byteLength;) { |
|
size = toUnsigned$1(data[i] << 24 | data[i + 1] << 16 | data[i + 2] << 8 | data[i + 3]); |
|
type = parseType(data.subarray(i + 4, i + 8)); |
|
end = size > 1 ? i + size : data.byteLength; |
|
|
|
if (type === path[0]) { |
|
if (path.length === 1) { |
|
// this is the end of the path and we've found the box we were |
|
// looking for |
|
results.push(data.subarray(i + 8, end)); |
|
} else { |
|
// recursively search for the next box along the path |
|
subresults = _findBox(data.subarray(i + 8, end), path.slice(1)); |
|
|
|
if (subresults.length) { |
|
results = results.concat(subresults); |
|
} |
|
} |
|
} |
|
|
|
i = end; |
|
} // we've finished searching all of data |
|
|
|
|
|
return results; |
|
}; |
|
/** |
|
* Returns the string representation of an ASCII encoded four byte buffer. |
|
* @param buffer {Uint8Array} a four-byte buffer to translate |
|
* @return {string} the corresponding string |
|
*/ |
|
|
|
|
|
parseType = function parseType(buffer) { |
|
var result = ''; |
|
result += String.fromCharCode(buffer[0]); |
|
result += String.fromCharCode(buffer[1]); |
|
result += String.fromCharCode(buffer[2]); |
|
result += String.fromCharCode(buffer[3]); |
|
return result; |
|
}; |
|
/** |
|
* Parses an MP4 initialization segment and extracts the timescale |
|
* values for any declared tracks. Timescale values indicate the |
|
* number of clock ticks per second to assume for time-based values |
|
* elsewhere in the MP4. |
|
* |
|
* To determine the start time of an MP4, you need two pieces of |
|
* information: the timescale unit and the earliest base media decode |
|
* time. Multiple timescales can be specified within an MP4 but the |
|
* base media decode time is always expressed in the timescale from |
|
* the media header box for the track: |
|
* ``` |
|
* moov > trak > mdia > mdhd.timescale |
|
* ``` |
|
* @param init {Uint8Array} the bytes of the init segment |
|
* @return {object} a hash of track ids to timescale values or null if |
|
* the init segment is malformed. |
|
*/ |
|
|
|
|
|
timescale = function timescale(init) { |
|
var result = {}, |
|
traks = _findBox(init, ['moov', 'trak']); // mdhd timescale |
|
|
|
|
|
return traks.reduce(function (result, trak) { |
|
var tkhd, version, index, id, mdhd; |
|
tkhd = _findBox(trak, ['tkhd'])[0]; |
|
|
|
if (!tkhd) { |
|
return null; |
|
} |
|
|
|
version = tkhd[0]; |
|
index = version === 0 ? 12 : 20; |
|
id = toUnsigned$1(tkhd[index] << 24 | tkhd[index + 1] << 16 | tkhd[index + 2] << 8 | tkhd[index + 3]); |
|
mdhd = _findBox(trak, ['mdia', 'mdhd'])[0]; |
|
|
|
if (!mdhd) { |
|
return null; |
|
} |
|
|
|
version = mdhd[0]; |
|
index = version === 0 ? 12 : 20; |
|
result[id] = toUnsigned$1(mdhd[index] << 24 | mdhd[index + 1] << 16 | mdhd[index + 2] << 8 | mdhd[index + 3]); |
|
return result; |
|
}, result); |
|
}; |
|
/** |
|
* Determine the base media decode start time, in seconds, for an MP4 |
|
* fragment. If multiple fragments are specified, the earliest time is |
|
* returned. |
|
* |
|
* The base media decode time can be parsed from track fragment |
|
* metadata: |
|
* ``` |
|
* moof > traf > tfdt.baseMediaDecodeTime |
|
* ``` |
|
* It requires the timescale value from the mdhd to interpret. |
|
* |
|
* @param timescale {object} a hash of track ids to timescale values. |
|
* @return {number} the earliest base media decode start time for the |
|
* fragment, in seconds |
|
*/ |
|
|
|
|
|
startTime = function startTime(timescale, fragment) { |
|
var trafs, baseTimes, result; // we need info from two childrend of each track fragment box |
|
|
|
trafs = _findBox(fragment, ['moof', 'traf']); // determine the start times for each track |
|
|
|
baseTimes = [].concat.apply([], trafs.map(function (traf) { |
|
return _findBox(traf, ['tfhd']).map(function (tfhd) { |
|
var id, scale, baseTime; // get the track id from the tfhd |
|
|
|
id = toUnsigned$1(tfhd[4] << 24 | tfhd[5] << 16 | tfhd[6] << 8 | tfhd[7]); // assume a 90kHz clock if no timescale was specified |
|
|
|
scale = timescale[id] || 90e3; // get the base media decode time from the tfdt |
|
|
|
baseTime = _findBox(traf, ['tfdt']).map(function (tfdt) { |
|
var version, result; |
|
version = tfdt[0]; |
|
result = toUnsigned$1(tfdt[4] << 24 | tfdt[5] << 16 | tfdt[6] << 8 | tfdt[7]); |
|
|
|
if (version === 1) { |
|
result *= Math.pow(2, 32); |
|
result += toUnsigned$1(tfdt[8] << 24 | tfdt[9] << 16 | tfdt[10] << 8 | tfdt[11]); |
|
} |
|
|
|
return result; |
|
})[0]; |
|
baseTime = baseTime || Infinity; // convert base time to seconds |
|
|
|
return baseTime / scale; |
|
}); |
|
})); // return the minimum |
|
|
|
result = Math.min.apply(null, baseTimes); |
|
return isFinite(result) ? result : 0; |
|
}; |
|
/** |
|
* Find the trackIds of the video tracks in this source. |
|
* Found by parsing the Handler Reference and Track Header Boxes: |
|
* moov > trak > mdia > hdlr |
|
* moov > trak > tkhd |
|
* |
|
* @param {Uint8Array} init - The bytes of the init segment for this source |
|
* @return {Number[]} A list of trackIds |
|
* |
|
* @see ISO-BMFF-12/2015, Section 8.4.3 |
|
**/ |
|
|
|
|
|
getVideoTrackIds = function getVideoTrackIds(init) { |
|
var traks = _findBox(init, ['moov', 'trak']); |
|
|
|
var videoTrackIds = []; |
|
traks.forEach(function (trak) { |
|
var hdlrs = _findBox(trak, ['mdia', 'hdlr']); |
|
|
|
var tkhds = _findBox(trak, ['tkhd']); |
|
|
|
hdlrs.forEach(function (hdlr, index) { |
|
var handlerType = parseType(hdlr.subarray(8, 12)); |
|
var tkhd = tkhds[index]; |
|
var view; |
|
var version; |
|
var trackId; |
|
|
|
if (handlerType === 'vide') { |
|
view = new DataView(tkhd.buffer, tkhd.byteOffset, tkhd.byteLength); |
|
version = view.getUint8(0); |
|
trackId = version === 0 ? view.getUint32(12) : view.getUint32(20); |
|
videoTrackIds.push(trackId); |
|
} |
|
}); |
|
}); |
|
return videoTrackIds; |
|
}; |
|
|
|
var probe$$1 = { |
|
findBox: _findBox, |
|
parseType: parseType, |
|
timescale: timescale, |
|
startTime: startTime, |
|
videoTrackIds: getVideoTrackIds |
|
}; |
|
/** |
|
* mux.js |
|
* |
|
* Copyright (c) 2014 Brightcove |
|
* All rights reserved. |
|
* |
|
* A lightweight readable stream implemention that handles event dispatching. |
|
* Objects that inherit from streams should call init in their constructors. |
|
*/ |
|
|
|
var Stream = function Stream() { |
|
this.init = function () { |
|
var listeners = {}; |
|
/** |
|
* Add a listener for a specified event type. |
|
* @param type {string} the event name |
|
* @param listener {function} the callback to be invoked when an event of |
|
* the specified type occurs |
|
*/ |
|
|
|
this.on = function (type, listener) { |
|
if (!listeners[type]) { |
|
listeners[type] = []; |
|
} |
|
|
|
listeners[type] = listeners[type].concat(listener); |
|
}; |
|
/** |
|
* Remove a listener for a specified event type. |
|
* @param type {string} the event name |
|
* @param listener {function} a function previously registered for this |
|
* type of event through `on` |
|
*/ |
|
|
|
|
|
this.off = function (type, listener) { |
|
var index; |
|
|
|
if (!listeners[type]) { |
|
return false; |
|
} |
|
|
|
index = listeners[type].indexOf(listener); |
|
listeners[type] = listeners[type].slice(); |
|
listeners[type].splice(index, 1); |
|
return index > -1; |
|
}; |
|
/** |
|
* Trigger an event of the specified type on this stream. Any additional |
|
* arguments to this function are passed as parameters to event listeners. |
|
* @param type {string} the event name |
|
*/ |
|
|
|
|
|
this.trigger = function (type) { |
|
var callbacks, i, length, args; |
|
callbacks = listeners[type]; |
|
|
|
if (!callbacks) { |
|
return; |
|
} // Slicing the arguments on every invocation of this method |
|
// can add a significant amount of overhead. Avoid the |
|
// intermediate object creation for the common case of a |
|
// single callback argument |
|
|
|
|
|
if (arguments.length === 2) { |
|
length = callbacks.length; |
|
|
|
for (i = 0; i < length; ++i) { |
|
callbacks[i].call(this, arguments[1]); |
|
} |
|
} else { |
|
args = []; |
|
i = arguments.length; |
|
|
|
for (i = 1; i < arguments.length; ++i) { |
|
args.push(arguments[i]); |
|
} |
|
|
|
length = callbacks.length; |
|
|
|
for (i = 0; i < length; ++i) { |
|
callbacks[i].apply(this, args); |
|
} |
|
} |
|
}; |
|
/** |
|
* Destroys the stream and cleans up. |
|
*/ |
|
|
|
|
|
this.dispose = function () { |
|
listeners = {}; |
|
}; |
|
}; |
|
}; |
|
/** |
|
* Forwards all `data` events on this stream to the destination stream. The |
|
* destination stream should provide a method `push` to receive the data |
|
* events as they arrive. |
|
* @param destination {stream} the stream that will receive all `data` events |
|
* @param autoFlush {boolean} if false, we will not call `flush` on the destination |
|
* when the current stream emits a 'done' event |
|
* @see http://nodejs.org/api/stream.html#stream_readable_pipe_destination_options |
|
*/ |
|
|
|
|
|
Stream.prototype.pipe = function (destination) { |
|
this.on('data', function (data) { |
|
destination.push(data); |
|
}); |
|
this.on('done', function (flushSource) { |
|
destination.flush(flushSource); |
|
}); |
|
return destination; |
|
}; // Default stream functions that are expected to be overridden to perform |
|
// actual work. These are provided by the prototype as a sort of no-op |
|
// implementation so that we don't have to check for their existence in the |
|
// `pipe` function above. |
|
|
|
|
|
Stream.prototype.push = function (data) { |
|
this.trigger('data', data); |
|
}; |
|
|
|
Stream.prototype.flush = function (flushSource) { |
|
this.trigger('done', flushSource); |
|
}; |
|
|
|
var stream = Stream; // Convert an array of nal units into an array of frames with each frame being |
|
// composed of the nal units that make up that frame |
|
// Also keep track of cummulative data about the frame from the nal units such |
|
// as the frame duration, starting pts, etc. |
|
|
|
var groupNalsIntoFrames = function groupNalsIntoFrames(nalUnits) { |
|
var i, |
|
currentNal, |
|
currentFrame = [], |
|
frames = []; |
|
currentFrame.byteLength = 0; |
|
|
|
for (i = 0; i < nalUnits.length; i++) { |
|
currentNal = nalUnits[i]; // Split on 'aud'-type nal units |
|
|
|
if (currentNal.nalUnitType === 'access_unit_delimiter_rbsp') { |
|
// Since the very first nal unit is expected to be an AUD |
|
// only push to the frames array when currentFrame is not empty |
|
if (currentFrame.length) { |
|
currentFrame.duration = currentNal.dts - currentFrame.dts; |
|
frames.push(currentFrame); |
|
} |
|
|
|
currentFrame = [currentNal]; |
|
currentFrame.byteLength = currentNal.data.byteLength; |
|
currentFrame.pts = currentNal.pts; |
|
currentFrame.dts = currentNal.dts; |
|
} else { |
|
// Specifically flag key frames for ease of use later |
|
if (currentNal.nalUnitType === 'slice_layer_without_partitioning_rbsp_idr') { |
|
currentFrame.keyFrame = true; |
|
} |
|
|
|
currentFrame.duration = currentNal.dts - currentFrame.dts; |
|
currentFrame.byteLength += currentNal.data.byteLength; |
|
currentFrame.push(currentNal); |
|
} |
|
} // For the last frame, use the duration of the previous frame if we |
|
// have nothing better to go on |
|
|
|
|
|
if (frames.length && (!currentFrame.duration || currentFrame.duration <= 0)) { |
|
currentFrame.duration = frames[frames.length - 1].duration; |
|
} // Push the final frame |
|
|
|
|
|
frames.push(currentFrame); |
|
return frames; |
|
}; // Convert an array of frames into an array of Gop with each Gop being composed |
|
// of the frames that make up that Gop |
|
// Also keep track of cummulative data about the Gop from the frames such as the |
|
// Gop duration, starting pts, etc. |
|
|
|
|
|
var groupFramesIntoGops = function groupFramesIntoGops(frames) { |
|
var i, |
|
currentFrame, |
|
currentGop = [], |
|
gops = []; // We must pre-set some of the values on the Gop since we |
|
// keep running totals of these values |
|
|
|
currentGop.byteLength = 0; |
|
currentGop.nalCount = 0; |
|
currentGop.duration = 0; |
|
currentGop.pts = frames[0].pts; |
|
currentGop.dts = frames[0].dts; // store some metadata about all the Gops |
|
|
|
gops.byteLength = 0; |
|
gops.nalCount = 0; |
|
gops.duration = 0; |
|
gops.pts = frames[0].pts; |
|
gops.dts = frames[0].dts; |
|
|
|
for (i = 0; i < frames.length; i++) { |
|
currentFrame = frames[i]; |
|
|
|
if (currentFrame.keyFrame) { |
|
// Since the very first frame is expected to be an keyframe |
|
// only push to the gops array when currentGop is not empty |
|
if (currentGop.length) { |
|
gops.push(currentGop); |
|
gops.byteLength += currentGop.byteLength; |
|
gops.nalCount += currentGop.nalCount; |
|
gops.duration += currentGop.duration; |
|
} |
|
|
|
currentGop = [currentFrame]; |
|
currentGop.nalCount = currentFrame.length; |
|
currentGop.byteLength = currentFrame.byteLength; |
|
currentGop.pts = currentFrame.pts; |
|
currentGop.dts = currentFrame.dts; |
|
currentGop.duration = currentFrame.duration; |
|
} else { |
|
currentGop.duration += currentFrame.duration; |
|
currentGop.nalCount += currentFrame.length; |
|
currentGop.byteLength += currentFrame.byteLength; |
|
currentGop.push(currentFrame); |
|
} |
|
} |
|
|
|
if (gops.length && currentGop.duration <= 0) { |
|
currentGop.duration = gops[gops.length - 1].duration; |
|
} |
|
|
|
gops.byteLength += currentGop.byteLength; |
|
gops.nalCount += currentGop.nalCount; |
|
gops.duration += currentGop.duration; // push the final Gop |
|
|
|
gops.push(currentGop); |
|
return gops; |
|
}; |
|
/* |
|
* Search for the first keyframe in the GOPs and throw away all frames |
|
* until that keyframe. Then extend the duration of the pulled keyframe |
|
* and pull the PTS and DTS of the keyframe so that it covers the time |
|
* range of the frames that were disposed. |
|
* |
|
* @param {Array} gops video GOPs |
|
* @returns {Array} modified video GOPs |
|
*/ |
|
|
|
|
|
var extendFirstKeyFrame = function extendFirstKeyFrame(gops) { |
|
var currentGop; |
|
|
|
if (!gops[0][0].keyFrame && gops.length > 1) { |
|
// Remove the first GOP |
|
currentGop = gops.shift(); |
|
gops.byteLength -= currentGop.byteLength; |
|
gops.nalCount -= currentGop.nalCount; // Extend the first frame of what is now the |
|
// first gop to cover the time period of the |
|
// frames we just removed |
|
|
|
gops[0][0].dts = currentGop.dts; |
|
gops[0][0].pts = currentGop.pts; |
|
gops[0][0].duration += currentGop.duration; |
|
} |
|
|
|
return gops; |
|
}; |
|
/** |
|
* Default sample object |
|
* see ISO/IEC 14496-12:2012, section 8.6.4.3 |
|
*/ |
|
|
|
|
|
var createDefaultSample = function createDefaultSample() { |
|
return { |
|
size: 0, |
|
flags: { |
|
isLeading: 0, |
|
dependsOn: 1, |
|
isDependedOn: 0, |
|
hasRedundancy: 0, |
|
degradationPriority: 0, |
|
isNonSyncSample: 1 |
|
} |
|
}; |
|
}; |
|
/* |
|
* Collates information from a video frame into an object for eventual |
|
* entry into an MP4 sample table. |
|
* |
|
* @param {Object} frame the video frame |
|
* @param {Number} dataOffset the byte offset to position the sample |
|
* @return {Object} object containing sample table info for a frame |
|
*/ |
|
|
|
|
|
var sampleForFrame = function sampleForFrame(frame, dataOffset) { |
|
var sample = createDefaultSample(); |
|
sample.dataOffset = dataOffset; |
|
sample.compositionTimeOffset = frame.pts - frame.dts; |
|
sample.duration = frame.duration; |
|
sample.size = 4 * frame.length; // Space for nal unit size |
|
|
|
sample.size += frame.byteLength; |
|
|
|
if (frame.keyFrame) { |
|
sample.flags.dependsOn = 2; |
|
sample.flags.isNonSyncSample = 0; |
|
} |
|
|
|
return sample; |
|
}; // generate the track's sample table from an array of gops |
|
|
|
|
|
var generateSampleTable = function generateSampleTable(gops, baseDataOffset) { |
|
var h, |
|
i, |
|
sample, |
|
currentGop, |
|
currentFrame, |
|
dataOffset = baseDataOffset || 0, |
|
samples = []; |
|
|
|
for (h = 0; h < gops.length; h++) { |
|
currentGop = gops[h]; |
|
|
|
for (i = 0; i < currentGop.length; i++) { |
|
currentFrame = currentGop[i]; |
|
sample = sampleForFrame(currentFrame, dataOffset); |
|
dataOffset += sample.size; |
|
samples.push(sample); |
|
} |
|
} |
|
|
|
return samples; |
|
}; // generate the track's raw mdat data from an array of gops |
|
|
|
|
|
var concatenateNalData = function concatenateNalData(gops) { |
|
var h, |
|
i, |
|
j, |
|
currentGop, |
|
currentFrame, |
|
currentNal, |
|
dataOffset = 0, |
|
nalsByteLength = gops.byteLength, |
|
numberOfNals = gops.nalCount, |
|
totalByteLength = nalsByteLength + 4 * numberOfNals, |
|
data = new Uint8Array(totalByteLength), |
|
view = new DataView(data.buffer); // For each Gop.. |
|
|
|
for (h = 0; h < gops.length; h++) { |
|
currentGop = gops[h]; // For each Frame.. |
|
|
|
for (i = 0; i < currentGop.length; i++) { |
|
currentFrame = currentGop[i]; // For each NAL.. |
|
|
|
for (j = 0; j < currentFrame.length; j++) { |
|
currentNal = currentFrame[j]; |
|
view.setUint32(dataOffset, currentNal.data.byteLength); |
|
dataOffset += 4; |
|
data.set(currentNal.data, dataOffset); |
|
dataOffset += currentNal.data.byteLength; |
|
} |
|
} |
|
} |
|
|
|
return data; |
|
}; |
|
|
|
var frameUtils = { |
|
groupNalsIntoFrames: groupNalsIntoFrames, |
|
groupFramesIntoGops: groupFramesIntoGops, |
|
extendFirstKeyFrame: extendFirstKeyFrame, |
|
generateSampleTable: generateSampleTable, |
|
concatenateNalData: concatenateNalData |
|
}; |
|
var highPrefix = [33, 16, 5, 32, 164, 27]; |
|
var lowPrefix = [33, 65, 108, 84, 1, 2, 4, 8, 168, 2, 4, 8, 17, 191, 252]; |
|
|
|
var zeroFill = function zeroFill(count) { |
|
var a = []; |
|
|
|
while (count--) { |
|
a.push(0); |
|
} |
|
|
|
return a; |
|
}; |
|
|
|
var makeTable = function makeTable(metaTable) { |
|
return Object.keys(metaTable).reduce(function (obj, key) { |
|
obj[key] = new Uint8Array(metaTable[key].reduce(function (arr, part) { |
|
return arr.concat(part); |
|
}, [])); |
|
return obj; |
|
}, {}); |
|
}; // Frames-of-silence to use for filling in missing AAC frames |
|
|
|
|
|
var coneOfSilence = { |
|
96000: [highPrefix, [227, 64], zeroFill(154), [56]], |
|
88200: [highPrefix, [231], zeroFill(170), [56]], |
|
64000: [highPrefix, [248, 192], zeroFill(240), [56]], |
|
48000: [highPrefix, [255, 192], zeroFill(268), [55, 148, 128], zeroFill(54), [112]], |
|
44100: [highPrefix, [255, 192], zeroFill(268), [55, 163, 128], zeroFill(84), [112]], |
|
32000: [highPrefix, [255, 192], zeroFill(268), [55, 234], zeroFill(226), [112]], |
|
24000: [highPrefix, [255, 192], zeroFill(268), [55, 255, 128], zeroFill(268), [111, 112], zeroFill(126), [224]], |
|
16000: [highPrefix, [255, 192], zeroFill(268), [55, 255, 128], zeroFill(268), [111, 255], zeroFill(269), [223, 108], zeroFill(195), [1, 192]], |
|
12000: [lowPrefix, zeroFill(268), [3, 127, 248], zeroFill(268), [6, 255, 240], zeroFill(268), [13, 255, 224], zeroFill(268), [27, 253, 128], zeroFill(259), [56]], |
|
11025: [lowPrefix, zeroFill(268), [3, 127, 248], zeroFill(268), [6, 255, 240], zeroFill(268), [13, 255, 224], zeroFill(268), [27, 255, 192], zeroFill(268), [55, 175, 128], zeroFill(108), [112]], |
|
8000: [lowPrefix, zeroFill(268), [3, 121, 16], zeroFill(47), [7]] |
|
}; |
|
var silence = makeTable(coneOfSilence); |
|
var ONE_SECOND_IN_TS = 90000, |
|
// 90kHz clock |
|
secondsToVideoTs, |
|
secondsToAudioTs, |
|
videoTsToSeconds, |
|
audioTsToSeconds, |
|
audioTsToVideoTs, |
|
videoTsToAudioTs; |
|
|
|
secondsToVideoTs = function secondsToVideoTs(seconds) { |
|
return seconds * ONE_SECOND_IN_TS; |
|
}; |
|
|
|
secondsToAudioTs = function secondsToAudioTs(seconds, sampleRate) { |
|
return seconds * sampleRate; |
|
}; |
|
|
|
videoTsToSeconds = function videoTsToSeconds(timestamp) { |
|
return timestamp / ONE_SECOND_IN_TS; |
|
}; |
|
|
|
audioTsToSeconds = function audioTsToSeconds(timestamp, sampleRate) { |
|
return timestamp / sampleRate; |
|
}; |
|
|
|
audioTsToVideoTs = function audioTsToVideoTs(timestamp, sampleRate) { |
|
return secondsToVideoTs(audioTsToSeconds(timestamp, sampleRate)); |
|
}; |
|
|
|
videoTsToAudioTs = function videoTsToAudioTs(timestamp, sampleRate) { |
|
return secondsToAudioTs(videoTsToSeconds(timestamp), sampleRate); |
|
}; |
|
|
|
var clock = { |
|
secondsToVideoTs: secondsToVideoTs, |
|
secondsToAudioTs: secondsToAudioTs, |
|
videoTsToSeconds: videoTsToSeconds, |
|
audioTsToSeconds: audioTsToSeconds, |
|
audioTsToVideoTs: audioTsToVideoTs, |
|
videoTsToAudioTs: videoTsToAudioTs |
|
}; |
|
var ONE_SECOND_IN_TS$1 = 90000; // 90kHz clock |
|
|
|
/** |
|
* Sum the `byteLength` properties of the data in each AAC frame |
|
*/ |
|
|
|
var sumFrameByteLengths = function sumFrameByteLengths(array) { |
|
var i, |
|
currentObj, |
|
sum = 0; // sum the byteLength's all each nal unit in the frame |
|
|
|
for (i = 0; i < array.length; i++) { |
|
currentObj = array[i]; |
|
sum += currentObj.data.byteLength; |
|
} |
|
|
|
return sum; |
|
}; // Possibly pad (prefix) the audio track with silence if appending this track |
|
// would lead to the introduction of a gap in the audio buffer |
|
|
|
|
|
var prefixWithSilence = function prefixWithSilence(track, frames, audioAppendStartTs, videoBaseMediaDecodeTime) { |
|
var baseMediaDecodeTimeTs, |
|
frameDuration = 0, |
|
audioGapDuration = 0, |
|
audioFillFrameCount = 0, |
|
audioFillDuration = 0, |
|
silentFrame, |
|
i; |
|
|
|
if (!frames.length) { |
|
return; |
|
} |
|
|
|
baseMediaDecodeTimeTs = clock.audioTsToVideoTs(track.baseMediaDecodeTime, track.samplerate); // determine frame clock duration based on sample rate, round up to avoid overfills |
|
|
|
frameDuration = Math.ceil(ONE_SECOND_IN_TS$1 / (track.samplerate / 1024)); |
|
|
|
if (audioAppendStartTs && videoBaseMediaDecodeTime) { |
|
// insert the shortest possible amount (audio gap or audio to video gap) |
|
audioGapDuration = baseMediaDecodeTimeTs - Math.max(audioAppendStartTs, videoBaseMediaDecodeTime); // number of full frames in the audio gap |
|
|
|
audioFillFrameCount = Math.floor(audioGapDuration / frameDuration); |
|
audioFillDuration = audioFillFrameCount * frameDuration; |
|
} // don't attempt to fill gaps smaller than a single frame or larger |
|
// than a half second |
|
|
|
|
|
if (audioFillFrameCount < 1 || audioFillDuration > ONE_SECOND_IN_TS$1 / 2) { |
|
return; |
|
} |
|
|
|
silentFrame = silence[track.samplerate]; |
|
|
|
if (!silentFrame) { |
|
// we don't have a silent frame pregenerated for the sample rate, so use a frame |
|
// from the content instead |
|
silentFrame = frames[0].data; |
|
} |
|
|
|
for (i = 0; i < audioFillFrameCount; i++) { |
|
frames.splice(i, 0, { |
|
data: silentFrame |
|
}); |
|
} |
|
|
|
track.baseMediaDecodeTime -= Math.floor(clock.videoTsToAudioTs(audioFillDuration, track.samplerate)); |
|
}; // If the audio segment extends before the earliest allowed dts |
|
// value, remove AAC frames until starts at or after the earliest |
|
// allowed DTS so that we don't end up with a negative baseMedia- |
|
// DecodeTime for the audio track |
|
|
|
|
|
var trimAdtsFramesByEarliestDts = function trimAdtsFramesByEarliestDts(adtsFrames, track, earliestAllowedDts) { |
|
if (track.minSegmentDts >= earliestAllowedDts) { |
|
return adtsFrames; |
|
} // We will need to recalculate the earliest segment Dts |
|
|
|
|
|
track.minSegmentDts = Infinity; |
|
return adtsFrames.filter(function (currentFrame) { |
|
// If this is an allowed frame, keep it and record it's Dts |
|
if (currentFrame.dts >= earliestAllowedDts) { |
|
track.minSegmentDts = Math.min(track.minSegmentDts, currentFrame.dts); |
|
track.minSegmentPts = track.minSegmentDts; |
|
return true; |
|
} // Otherwise, discard it |
|
|
|
|
|
return false; |
|
}); |
|
}; // generate the track's raw mdat data from an array of frames |
|
|
|
|
|
var generateSampleTable$1 = function generateSampleTable(frames) { |
|
var i, |
|
currentFrame, |
|
samples = []; |
|
|
|
for (i = 0; i < frames.length; i++) { |
|
currentFrame = frames[i]; |
|
samples.push({ |
|
size: currentFrame.data.byteLength, |
|
duration: 1024 // For AAC audio, all samples contain 1024 samples |
|
|
|
}); |
|
} |
|
|
|
return samples; |
|
}; // generate the track's sample table from an array of frames |
|
|
|
|
|
var concatenateFrameData = function concatenateFrameData(frames) { |
|
var i, |
|
currentFrame, |
|
dataOffset = 0, |
|
data = new Uint8Array(sumFrameByteLengths(frames)); |
|
|
|
for (i = 0; i < frames.length; i++) { |
|
currentFrame = frames[i]; |
|
data.set(currentFrame.data, dataOffset); |
|
dataOffset += currentFrame.data.byteLength; |
|
} |
|
|
|
return data; |
|
}; |
|
|
|
var audioFrameUtils = { |
|
prefixWithSilence: prefixWithSilence, |
|
trimAdtsFramesByEarliestDts: trimAdtsFramesByEarliestDts, |
|
generateSampleTable: generateSampleTable$1, |
|
concatenateFrameData: concatenateFrameData |
|
}; |
|
var ONE_SECOND_IN_TS$2 = 90000; // 90kHz clock |
|
|
|
/** |
|
* Store information about the start and end of the track and the |
|
* duration for each frame/sample we process in order to calculate |
|
* the baseMediaDecodeTime |
|
*/ |
|
|
|
var collectDtsInfo = function collectDtsInfo(track, data) { |
|
if (typeof data.pts === 'number') { |
|
if (track.timelineStartInfo.pts === undefined) { |
|
track.timelineStartInfo.pts = data.pts; |
|
} |
|
|
|
if (track.minSegmentPts === undefined) { |
|
track.minSegmentPts = data.pts; |
|
} else { |
|
track.minSegmentPts = Math.min(track.minSegmentPts, data.pts); |
|
} |
|
|
|
if (track.maxSegmentPts === undefined) { |
|
track.maxSegmentPts = data.pts; |
|
} else { |
|
track.maxSegmentPts = Math.max(track.maxSegmentPts, data.pts); |
|
} |
|
} |
|
|
|
if (typeof data.dts === 'number') { |
|
if (track.timelineStartInfo.dts === undefined) { |
|
track.timelineStartInfo.dts = data.dts; |
|
} |
|
|
|
if (track.minSegmentDts === undefined) { |
|
track.minSegmentDts = data.dts; |
|
} else { |
|
track.minSegmentDts = Math.min(track.minSegmentDts, data.dts); |
|
} |
|
|
|
if (track.maxSegmentDts === undefined) { |
|
track.maxSegmentDts = data.dts; |
|
} else { |
|
track.maxSegmentDts = Math.max(track.maxSegmentDts, data.dts); |
|
} |
|
} |
|
}; |
|
/** |
|
* Clear values used to calculate the baseMediaDecodeTime between |
|
* tracks |
|
*/ |
|
|
|
|
|
var clearDtsInfo = function clearDtsInfo(track) { |
|
delete track.minSegmentDts; |
|
delete track.maxSegmentDts; |
|
delete track.minSegmentPts; |
|
delete track.maxSegmentPts; |
|
}; |
|
/** |
|
* Calculate the track's baseMediaDecodeTime based on the earliest |
|
* DTS the transmuxer has ever seen and the minimum DTS for the |
|
* current track |
|
* @param track {object} track metadata configuration |
|
* @param keepOriginalTimestamps {boolean} If true, keep the timestamps |
|
* in the source; false to adjust the first segment to start at 0. |
|
*/ |
|
|
|
|
|
var calculateTrackBaseMediaDecodeTime = function calculateTrackBaseMediaDecodeTime(track, keepOriginalTimestamps) { |
|
var baseMediaDecodeTime, |
|
scale, |
|
minSegmentDts = track.minSegmentDts; // Optionally adjust the time so the first segment starts at zero. |
|
|
|
if (!keepOriginalTimestamps) { |
|
minSegmentDts -= track.timelineStartInfo.dts; |
|
} // track.timelineStartInfo.baseMediaDecodeTime is the location, in time, where |
|
// we want the start of the first segment to be placed |
|
|
|
|
|
baseMediaDecodeTime = track.timelineStartInfo.baseMediaDecodeTime; // Add to that the distance this segment is from the very first |
|
|
|
baseMediaDecodeTime += minSegmentDts; // baseMediaDecodeTime must not become negative |
|
|
|
baseMediaDecodeTime = Math.max(0, baseMediaDecodeTime); |
|
|
|
if (track.type === 'audio') { |
|
// Audio has a different clock equal to the sampling_rate so we need to |
|
// scale the PTS values into the clock rate of the track |
|
scale = track.samplerate / ONE_SECOND_IN_TS$2; |
|
baseMediaDecodeTime *= scale; |
|
baseMediaDecodeTime = Math.floor(baseMediaDecodeTime); |
|
} |
|
|
|
return baseMediaDecodeTime; |
|
}; |
|
|
|
var trackDecodeInfo = { |
|
clearDtsInfo: clearDtsInfo, |
|
calculateTrackBaseMediaDecodeTime: calculateTrackBaseMediaDecodeTime, |
|
collectDtsInfo: collectDtsInfo |
|
}; |
|
/** |
|
* mux.js |
|
* |
|
* Copyright (c) 2015 Brightcove |
|
* All rights reserved. |
|
* |
|
* Reads in-band caption information from a video elementary |
|
* stream. Captions must follow the CEA-708 standard for injection |
|
* into an MPEG-2 transport streams. |
|
* @see https://en.wikipedia.org/wiki/CEA-708 |
|
* @see https://www.gpo.gov/fdsys/pkg/CFR-2007-title47-vol1/pdf/CFR-2007-title47-vol1-sec15-119.pdf |
|
*/ |
|
// Supplemental enhancement information (SEI) NAL units have a |
|
// payload type field to indicate how they are to be |
|
// interpreted. CEAS-708 caption content is always transmitted with |
|
// payload type 0x04. |
|
|
|
var USER_DATA_REGISTERED_ITU_T_T35 = 4, |
|
RBSP_TRAILING_BITS = 128; |
|
/** |
|
* Parse a supplemental enhancement information (SEI) NAL unit. |
|
* Stops parsing once a message of type ITU T T35 has been found. |
|
* |
|
* @param bytes {Uint8Array} the bytes of a SEI NAL unit |
|
* @return {object} the parsed SEI payload |
|
* @see Rec. ITU-T H.264, 7.3.2.3.1 |
|
*/ |
|
|
|
var parseSei = function parseSei(bytes) { |
|
var i = 0, |
|
result = { |
|
payloadType: -1, |
|
payloadSize: 0 |
|
}, |
|
payloadType = 0, |
|
payloadSize = 0; // go through the sei_rbsp parsing each each individual sei_message |
|
|
|
while (i < bytes.byteLength) { |
|
// stop once we have hit the end of the sei_rbsp |
|
if (bytes[i] === RBSP_TRAILING_BITS) { |
|
break; |
|
} // Parse payload type |
|
|
|
|
|
while (bytes[i] === 0xFF) { |
|
payloadType += 255; |
|
i++; |
|
} |
|
|
|
payloadType += bytes[i++]; // Parse payload size |
|
|
|
while (bytes[i] === 0xFF) { |
|
payloadSize += 255; |
|
i++; |
|
} |
|
|
|
payloadSize += bytes[i++]; // this sei_message is a 608/708 caption so save it and break |
|
// there can only ever be one caption message in a frame's sei |
|
|
|
if (!result.payload && payloadType === USER_DATA_REGISTERED_ITU_T_T35) { |
|
result.payloadType = payloadType; |
|
result.payloadSize = payloadSize; |
|
result.payload = bytes.subarray(i, i + payloadSize); |
|
break; |
|
} // skip the payload and parse the next message |
|
|
|
|
|
i += payloadSize; |
|
payloadType = 0; |
|
payloadSize = 0; |
|
} |
|
|
|
return result; |
|
}; // see ANSI/SCTE 128-1 (2013), section 8.1 |
|
|
|
|
|
var parseUserData = function parseUserData(sei) { |
|
// itu_t_t35_contry_code must be 181 (United States) for |
|
// captions |
|
if (sei.payload[0] !== 181) { |
|
return null; |
|
} // itu_t_t35_provider_code should be 49 (ATSC) for captions |
|
|
|
|
|
if ((sei.payload[1] << 8 | sei.payload[2]) !== 49) { |
|
return null; |
|
} // the user_identifier should be "GA94" to indicate ATSC1 data |
|
|
|
|
|
if (String.fromCharCode(sei.payload[3], sei.payload[4], sei.payload[5], sei.payload[6]) !== 'GA94') { |
|
return null; |
|
} // finally, user_data_type_code should be 0x03 for caption data |
|
|
|
|
|
if (sei.payload[7] !== 0x03) { |
|
return null; |
|
} // return the user_data_type_structure and strip the trailing |
|
// marker bits |
|
|
|
|
|
return sei.payload.subarray(8, sei.payload.length - 1); |
|
}; // see CEA-708-D, section 4.4 |
|
|
|
|
|
var parseCaptionPackets = function parseCaptionPackets(pts, userData) { |
|
var results = [], |
|
i, |
|
count, |
|
offset, |
|
data; // if this is just filler, return immediately |
|
|
|
if (!(userData[0] & 0x40)) { |
|
return results; |
|
} // parse out the cc_data_1 and cc_data_2 fields |
|
|
|
|
|
count = userData[0] & 0x1f; |
|
|
|
for (i = 0; i < count; i++) { |
|
offset = i * 3; |
|
data = { |
|
type: userData[offset + 2] & 0x03, |
|
pts: pts |
|
}; // capture cc data when cc_valid is 1 |
|
|
|
if (userData[offset + 2] & 0x04) { |
|
data.ccData = userData[offset + 3] << 8 | userData[offset + 4]; |
|
results.push(data); |
|
} |
|
} |
|
|
|
return results; |
|
}; |
|
|
|
var discardEmulationPreventionBytes = function discardEmulationPreventionBytes(data) { |
|
var length = data.byteLength, |
|
emulationPreventionBytesPositions = [], |
|
i = 1, |
|
newLength, |
|
newData; // Find all `Emulation Prevention Bytes` |
|
|
|
while (i < length - 2) { |
|
if (data[i] === 0 && data[i + 1] === 0 && data[i + 2] === 0x03) { |
|
emulationPreventionBytesPositions.push(i + 2); |
|
i += 2; |
|
} else { |
|
i++; |
|
} |
|
} // If no Emulation Prevention Bytes were found just return the original |
|
// array |
|
|
|
|
|
if (emulationPreventionBytesPositions.length === 0) { |
|
return data; |
|
} // Create a new array to hold the NAL unit data |
|
|
|
|
|
newLength = length - emulationPreventionBytesPositions.length; |
|
newData = new Uint8Array(newLength); |
|
var sourceIndex = 0; |
|
|
|
for (i = 0; i < newLength; sourceIndex++, i++) { |
|
if (sourceIndex === emulationPreventionBytesPositions[0]) { |
|
// Skip this byte |
|
sourceIndex++; // Remove this position index |
|
|
|
emulationPreventionBytesPositions.shift(); |
|
} |
|
|
|
newData[i] = data[sourceIndex]; |
|
} |
|
|
|
return newData; |
|
}; // exports |
|
|
|
|
|
var captionPacketParser = { |
|
parseSei: parseSei, |
|
parseUserData: parseUserData, |
|
parseCaptionPackets: parseCaptionPackets, |
|
discardEmulationPreventionBytes: discardEmulationPreventionBytes, |
|
USER_DATA_REGISTERED_ITU_T_T35: USER_DATA_REGISTERED_ITU_T_T35 |
|
}; // ----------------- |
|
// Link To Transport |
|
// ----------------- |
|
|
|
var CaptionStream = function CaptionStream() { |
|
CaptionStream.prototype.init.call(this); |
|
this.captionPackets_ = []; |
|
this.ccStreams_ = [new Cea608Stream(0, 0), // eslint-disable-line no-use-before-define |
|
new Cea608Stream(0, 1), // eslint-disable-line no-use-before-define |
|
new Cea608Stream(1, 0), // eslint-disable-line no-use-before-define |
|
new Cea608Stream(1, 1) // eslint-disable-line no-use-before-define |
|
]; |
|
this.reset(); // forward data and done events from CCs to this CaptionStream |
|
|
|
this.ccStreams_.forEach(function (cc) { |
|
cc.on('data', this.trigger.bind(this, 'data')); |
|
cc.on('done', this.trigger.bind(this, 'done')); |
|
}, this); |
|
}; |
|
|
|
CaptionStream.prototype = new stream(); |
|
|
|
CaptionStream.prototype.push = function (event) { |
|
var sei, userData, newCaptionPackets; // only examine SEI NALs |
|
|
|
if (event.nalUnitType !== 'sei_rbsp') { |
|
return; |
|
} // parse the sei |
|
|
|
|
|
sei = captionPacketParser.parseSei(event.escapedRBSP); // ignore everything but user_data_registered_itu_t_t35 |
|
|
|
if (sei.payloadType !== captionPacketParser.USER_DATA_REGISTERED_ITU_T_T35) { |
|
return; |
|
} // parse out the user data payload |
|
|
|
|
|
userData = captionPacketParser.parseUserData(sei); // ignore unrecognized userData |
|
|
|
if (!userData) { |
|
return; |
|
} // Sometimes, the same segment # will be downloaded twice. To stop the |
|
// caption data from being processed twice, we track the latest dts we've |
|
// received and ignore everything with a dts before that. However, since |
|
// data for a specific dts can be split across packets on either side of |
|
// a segment boundary, we need to make sure we *don't* ignore the packets |
|
// from the *next* segment that have dts === this.latestDts_. By constantly |
|
// tracking the number of packets received with dts === this.latestDts_, we |
|
// know how many should be ignored once we start receiving duplicates. |
|
|
|
|
|
if (event.dts < this.latestDts_) { |
|
// We've started getting older data, so set the flag. |
|
this.ignoreNextEqualDts_ = true; |
|
return; |
|
} else if (event.dts === this.latestDts_ && this.ignoreNextEqualDts_) { |
|
this.numSameDts_--; |
|
|
|
if (!this.numSameDts_) { |
|
// We've received the last duplicate packet, time to start processing again |
|
this.ignoreNextEqualDts_ = false; |
|
} |
|
|
|
return; |
|
} // parse out CC data packets and save them for later |
|
|
|
|
|
newCaptionPackets = captionPacketParser.parseCaptionPackets(event.pts, userData); |
|
this.captionPackets_ = this.captionPackets_.concat(newCaptionPackets); |
|
|
|
if (this.latestDts_ !== event.dts) { |
|
this.numSameDts_ = 0; |
|
} |
|
|
|
this.numSameDts_++; |
|
this.latestDts_ = event.dts; |
|
}; |
|
|
|
CaptionStream.prototype.flush = function () { |
|
// make sure we actually parsed captions before proceeding |
|
if (!this.captionPackets_.length) { |
|
this.ccStreams_.forEach(function (cc) { |
|
cc.flush(); |
|
}, this); |
|
return; |
|
} // In Chrome, the Array#sort function is not stable so add a |
|
// presortIndex that we can use to ensure we get a stable-sort |
|
|
|
|
|
this.captionPackets_.forEach(function (elem, idx) { |
|
elem.presortIndex = idx; |
|
}); // sort caption byte-pairs based on their PTS values |
|
|
|
this.captionPackets_.sort(function (a, b) { |
|
if (a.pts === b.pts) { |
|
return a.presortIndex - b.presortIndex; |
|
} |
|
|
|
return a.pts - b.pts; |
|
}); |
|
this.captionPackets_.forEach(function (packet) { |
|
if (packet.type < 2) { |
|
// Dispatch packet to the right Cea608Stream |
|
this.dispatchCea608Packet(packet); |
|
} // this is where an 'else' would go for a dispatching packets |
|
// to a theoretical Cea708Stream that handles SERVICEn data |
|
|
|
}, this); |
|
this.captionPackets_.length = 0; |
|
this.ccStreams_.forEach(function (cc) { |
|
cc.flush(); |
|
}, this); |
|
return; |
|
}; |
|
|
|
CaptionStream.prototype.reset = function () { |
|
this.latestDts_ = null; |
|
this.ignoreNextEqualDts_ = false; |
|
this.numSameDts_ = 0; |
|
this.activeCea608Channel_ = [null, null]; |
|
this.ccStreams_.forEach(function (ccStream) { |
|
ccStream.reset(); |
|
}); |
|
}; |
|
|
|
CaptionStream.prototype.dispatchCea608Packet = function (packet) { |
|
// NOTE: packet.type is the CEA608 field |
|
if (this.setsChannel1Active(packet)) { |
|
this.activeCea608Channel_[packet.type] = 0; |
|
} else if (this.setsChannel2Active(packet)) { |
|
this.activeCea608Channel_[packet.type] = 1; |
|
} |
|
|
|
if (this.activeCea608Channel_[packet.type] === null) { |
|
// If we haven't received anything to set the active channel, discard the |
|
// data; we don't want jumbled captions |
|
return; |
|
} |
|
|
|
this.ccStreams_[(packet.type << 1) + this.activeCea608Channel_[packet.type]].push(packet); |
|
}; |
|
|
|
CaptionStream.prototype.setsChannel1Active = function (packet) { |
|
return (packet.ccData & 0x7800) === 0x1000; |
|
}; |
|
|
|
CaptionStream.prototype.setsChannel2Active = function (packet) { |
|
return (packet.ccData & 0x7800) === 0x1800; |
|
}; // ---------------------- |
|
// Session to Application |
|
// ---------------------- |
|
// This hash maps non-ASCII, special, and extended character codes to their |
|
// proper Unicode equivalent. The first keys that are only a single byte |
|
// are the non-standard ASCII characters, which simply map the CEA608 byte |
|
// to the standard ASCII/Unicode. The two-byte keys that follow are the CEA608 |
|
// character codes, but have their MSB bitmasked with 0x03 so that a lookup |
|
// can be performed regardless of the field and data channel on which the |
|
// character code was received. |
|
|
|
|
|
var CHARACTER_TRANSLATION = { |
|
0x2a: 0xe1, |
|
// á |
|
0x5c: 0xe9, |
|
// é |
|
0x5e: 0xed, |
|
// í |
|
0x5f: 0xf3, |
|
// ó |
|
0x60: 0xfa, |
|
// ú |
|
0x7b: 0xe7, |
|
// ç |
|
0x7c: 0xf7, |
|
// ÷ |
|
0x7d: 0xd1, |
|
// Ñ |
|
0x7e: 0xf1, |
|
// ñ |
|
0x7f: 0x2588, |
|
// █ |
|
0x0130: 0xae, |
|
// ® |
|
0x0131: 0xb0, |
|
// ° |
|
0x0132: 0xbd, |
|
// ½ |
|
0x0133: 0xbf, |
|
// ¿ |
|
0x0134: 0x2122, |
|
// ™ |
|
0x0135: 0xa2, |
|
// ¢ |
|
0x0136: 0xa3, |
|
// £ |
|
0x0137: 0x266a, |
|
// ♪ |
|
0x0138: 0xe0, |
|
// à |
|
0x0139: 0xa0, |
|
// |
|
0x013a: 0xe8, |
|
// è |
|
0x013b: 0xe2, |
|
// â |
|
0x013c: 0xea, |
|
// ê |
|
0x013d: 0xee, |
|
// î |
|
0x013e: 0xf4, |
|
// ô |
|
0x013f: 0xfb, |
|
// û |
|
0x0220: 0xc1, |
|
// Á |
|
0x0221: 0xc9, |
|
// É |
|
0x0222: 0xd3, |
|
// Ó |
|
0x0223: 0xda, |
|
// Ú |
|
0x0224: 0xdc, |
|
// Ü |
|
0x0225: 0xfc, |
|
// ü |
|
0x0226: 0x2018, |
|
// ‘ |
|
0x0227: 0xa1, |
|
// ¡ |
|
0x0228: 0x2a, |
|
// * |
|
0x0229: 0x27, |
|
// ' |
|
0x022a: 0x2014, |
|
// — |
|
0x022b: 0xa9, |
|
// © |
|
0x022c: 0x2120, |
|
// ℠ |
|
0x022d: 0x2022, |
|
// • |
|
0x022e: 0x201c, |
|
// “ |
|
0x022f: 0x201d, |
|
// ” |
|
0x0230: 0xc0, |
|
// À |
|
0x0231: 0xc2, |
|
// Â |
|
0x0232: 0xc7, |
|
// Ç |
|
0x0233: 0xc8, |
|
// È |
|
0x0234: 0xca, |
|
// Ê |
|
0x0235: 0xcb, |
|
// Ë |
|
0x0236: 0xeb, |
|
// ë |
|
0x0237: 0xce, |
|
// Î |
|
0x0238: 0xcf, |
|
// Ï |
|
0x0239: 0xef, |
|
// ï |
|
0x023a: 0xd4, |
|
// Ô |
|
0x023b: 0xd9, |
|
// Ù |
|
0x023c: 0xf9, |
|
// ù |
|
0x023d: 0xdb, |
|
// Û |
|
0x023e: 0xab, |
|
// « |
|
0x023f: 0xbb, |
|
// » |
|
0x0320: 0xc3, |
|
// Ã |
|
0x0321: 0xe3, |
|
// ã |
|
0x0322: 0xcd, |
|
// Í |
|
0x0323: 0xcc, |
|
// Ì |
|
0x0324: 0xec, |
|
// ì |
|
0x0325: 0xd2, |
|
// Ò |
|
0x0326: 0xf2, |
|
// ò |
|
0x0327: 0xd5, |
|
// Õ |
|
0x0328: 0xf5, |
|
// õ |
|
0x0329: 0x7b, |
|
// { |
|
0x032a: 0x7d, |
|
// } |
|
0x032b: 0x5c, |
|
// \ |
|
0x032c: 0x5e, |
|
// ^ |
|
0x032d: 0x5f, |
|
// _ |
|
0x032e: 0x7c, |
|
// | |
|
0x032f: 0x7e, |
|
// ~ |
|
0x0330: 0xc4, |
|
// Ä |
|
0x0331: 0xe4, |
|
// ä |
|
0x0332: 0xd6, |
|
// Ö |
|
0x0333: 0xf6, |
|
// ö |
|
0x0334: 0xdf, |
|
// ß |
|
0x0335: 0xa5, |
|
// ¥ |
|
0x0336: 0xa4, |
|
// ¤ |
|
0x0337: 0x2502, |
|
// │ |
|
0x0338: 0xc5, |
|
// Å |
|
0x0339: 0xe5, |
|
// å |
|
0x033a: 0xd8, |
|
// Ø |
|
0x033b: 0xf8, |
|
// ø |
|
0x033c: 0x250c, |
|
// ┌ |
|
0x033d: 0x2510, |
|
// ┐ |
|
0x033e: 0x2514, |
|
// └ |
|
0x033f: 0x2518 // ┘ |
|
|
|
}; |
|
|
|
var getCharFromCode = function getCharFromCode(code) { |
|
if (code === null) { |
|
return ''; |
|
} |
|
|
|
code = CHARACTER_TRANSLATION[code] || code; |
|
return String.fromCharCode(code); |
|
}; // the index of the last row in a CEA-608 display buffer |
|
|
|
|
|
var BOTTOM_ROW = 14; // This array is used for mapping PACs -> row #, since there's no way of |
|
// getting it through bit logic. |
|
|
|
var ROWS = [0x1100, 0x1120, 0x1200, 0x1220, 0x1500, 0x1520, 0x1600, 0x1620, 0x1700, 0x1720, 0x1000, 0x1300, 0x1320, 0x1400, 0x1420]; // CEA-608 captions are rendered onto a 34x15 matrix of character |
|
// cells. The "bottom" row is the last element in the outer array. |
|
|
|
var createDisplayBuffer = function createDisplayBuffer() { |
|
var result = [], |
|
i = BOTTOM_ROW + 1; |
|
|
|
while (i--) { |
|
result.push(''); |
|
} |
|
|
|
return result; |
|
}; |
|
|
|
var Cea608Stream = function Cea608Stream(field, dataChannel) { |
|
Cea608Stream.prototype.init.call(this); |
|
this.field_ = field || 0; |
|
this.dataChannel_ = dataChannel || 0; |
|
this.name_ = 'CC' + ((this.field_ << 1 | this.dataChannel_) + 1); |
|
this.setConstants(); |
|
this.reset(); |
|
|
|
this.push = function (packet) { |
|
var data, swap, char0, char1, text; // remove the parity bits |
|
|
|
data = packet.ccData & 0x7f7f; // ignore duplicate control codes; the spec demands they're sent twice |
|
|
|
if (data === this.lastControlCode_) { |
|
this.lastControlCode_ = null; |
|
return; |
|
} // Store control codes |
|
|
|
|
|
if ((data & 0xf000) === 0x1000) { |
|
this.lastControlCode_ = data; |
|
} else if (data !== this.PADDING_) { |
|
this.lastControlCode_ = null; |
|
} |
|
|
|
char0 = data >>> 8; |
|
char1 = data & 0xff; |
|
|
|
if (data === this.PADDING_) { |
|
return; |
|
} else if (data === this.RESUME_CAPTION_LOADING_) { |
|
this.mode_ = 'popOn'; |
|
} else if (data === this.END_OF_CAPTION_) { |
|
// If an EOC is received while in paint-on mode, the displayed caption |
|
// text should be swapped to non-displayed memory as if it was a pop-on |
|
// caption. Because of that, we should explicitly switch back to pop-on |
|
// mode |
|
this.mode_ = 'popOn'; |
|
this.clearFormatting(packet.pts); // if a caption was being displayed, it's gone now |
|
|
|
this.flushDisplayed(packet.pts); // flip memory |
|
|
|
swap = this.displayed_; |
|
this.displayed_ = this.nonDisplayed_; |
|
this.nonDisplayed_ = swap; // start measuring the time to display the caption |
|
|
|
this.startPts_ = packet.pts; |
|
} else if (data === this.ROLL_UP_2_ROWS_) { |
|
this.rollUpRows_ = 2; |
|
this.setRollUp(packet.pts); |
|
} else if (data === this.ROLL_UP_3_ROWS_) { |
|
this.rollUpRows_ = 3; |
|
this.setRollUp(packet.pts); |
|
} else if (data === this.ROLL_UP_4_ROWS_) { |
|
this.rollUpRows_ = 4; |
|
this.setRollUp(packet.pts); |
|
} else if (data === this.CARRIAGE_RETURN_) { |
|
this.clearFormatting(packet.pts); |
|
this.flushDisplayed(packet.pts); |
|
this.shiftRowsUp_(); |
|
this.startPts_ = packet.pts; |
|
} else if (data === this.BACKSPACE_) { |
|
if (this.mode_ === 'popOn') { |
|
this.nonDisplayed_[this.row_] = this.nonDisplayed_[this.row_].slice(0, -1); |
|
} else { |
|
this.displayed_[this.row_] = this.displayed_[this.row_].slice(0, -1); |
|
} |
|
} else if (data === this.ERASE_DISPLAYED_MEMORY_) { |
|
this.flushDisplayed(packet.pts); |
|
this.displayed_ = createDisplayBuffer(); |
|
} else if (data === this.ERASE_NON_DISPLAYED_MEMORY_) { |
|
this.nonDisplayed_ = createDisplayBuffer(); |
|
} else if (data === this.RESUME_DIRECT_CAPTIONING_) { |
|
if (this.mode_ !== 'paintOn') { |
|
// NOTE: This should be removed when proper caption positioning is |
|
// implemented |
|
this.flushDisplayed(packet.pts); |
|
this.displayed_ = createDisplayBuffer(); |
|
} |
|
|
|
this.mode_ = 'paintOn'; |
|
this.startPts_ = packet.pts; // Append special characters to caption text |
|
} else if (this.isSpecialCharacter(char0, char1)) { |
|
// Bitmask char0 so that we can apply character transformations |
|
// regardless of field and data channel. |
|
// Then byte-shift to the left and OR with char1 so we can pass the |
|
// entire character code to `getCharFromCode`. |
|
char0 = (char0 & 0x03) << 8; |
|
text = getCharFromCode(char0 | char1); |
|
this[this.mode_](packet.pts, text); |
|
this.column_++; // Append extended characters to caption text |
|
} else if (this.isExtCharacter(char0, char1)) { |
|
// Extended characters always follow their "non-extended" equivalents. |
|
// IE if a "è" is desired, you'll always receive "eè"; non-compliant |
|
// decoders are supposed to drop the "è", while compliant decoders |
|
// backspace the "e" and insert "è". |
|
// Delete the previous character |
|
if (this.mode_ === 'popOn') { |
|
this.nonDisplayed_[this.row_] = this.nonDisplayed_[this.row_].slice(0, -1); |
|
} else { |
|
this.displayed_[this.row_] = this.displayed_[this.row_].slice(0, -1); |
|
} // Bitmask char0 so that we can apply character transformations |
|
// regardless of field and data channel. |
|
// Then byte-shift to the left and OR with char1 so we can pass the |
|
// entire character code to `getCharFromCode`. |
|
|
|
|
|
char0 = (char0 & 0x03) << 8; |
|
text = getCharFromCode(char0 | char1); |
|
this[this.mode_](packet.pts, text); |
|
this.column_++; // Process mid-row codes |
|
} else if (this.isMidRowCode(char0, char1)) { |
|
// Attributes are not additive, so clear all formatting |
|
this.clearFormatting(packet.pts); // According to the standard, mid-row codes |
|
// should be replaced with spaces, so add one now |
|
|
|
this[this.mode_](packet.pts, ' '); |
|
this.column_++; |
|
|
|
if ((char1 & 0xe) === 0xe) { |
|
this.addFormatting(packet.pts, ['i']); |
|
} |
|
|
|
if ((char1 & 0x1) === 0x1) { |
|
this.addFormatting(packet.pts, ['u']); |
|
} // Detect offset control codes and adjust cursor |
|
|
|
} else if (this.isOffsetControlCode(char0, char1)) { |
|
// Cursor position is set by indent PAC (see below) in 4-column |
|
// increments, with an additional offset code of 1-3 to reach any |
|
// of the 32 columns specified by CEA-608. So all we need to do |
|
// here is increment the column cursor by the given offset. |
|
this.column_ += char1 & 0x03; // Detect PACs (Preamble Address Codes) |
|
} else if (this.isPAC(char0, char1)) { |
|
// There's no logic for PAC -> row mapping, so we have to just |
|
// find the row code in an array and use its index :( |
|
var row = ROWS.indexOf(data & 0x1f20); // Configure the caption window if we're in roll-up mode |
|
|
|
if (this.mode_ === 'rollUp') { |
|
// This implies that the base row is incorrectly set. |
|
// As per the recommendation in CEA-608(Base Row Implementation), defer to the number |
|
// of roll-up rows set. |
|
if (row - this.rollUpRows_ + 1 < 0) { |
|
row = this.rollUpRows_ - 1; |
|
} |
|
|
|
this.setRollUp(packet.pts, row); |
|
} |
|
|
|
if (row !== this.row_) { |
|
// formatting is only persistent for current row |
|
this.clearFormatting(packet.pts); |
|
this.row_ = row; |
|
} // All PACs can apply underline, so detect and apply |
|
// (All odd-numbered second bytes set underline) |
|
|
|
|
|
if (char1 & 0x1 && this.formatting_.indexOf('u') === -1) { |
|
this.addFormatting(packet.pts, ['u']); |
|
} |
|
|
|
if ((data & 0x10) === 0x10) { |
|
// We've got an indent level code. Each successive even number |
|
// increments the column cursor by 4, so we can get the desired |
|
// column position by bit-shifting to the right (to get n/2) |
|
// and multiplying by 4. |
|
this.column_ = ((data & 0xe) >> 1) * 4; |
|
} |
|
|
|
if (this.isColorPAC(char1)) { |
|
// it's a color code, though we only support white, which |
|
// can be either normal or italicized. white italics can be |
|
// either 0x4e or 0x6e depending on the row, so we just |
|
// bitwise-and with 0xe to see if italics should be turned on |
|
if ((char1 & 0xe) === 0xe) { |
|
this.addFormatting(packet.pts, ['i']); |
|
} |
|
} // We have a normal character in char0, and possibly one in char1 |
|
|
|
} else if (this.isNormalChar(char0)) { |
|
if (char1 === 0x00) { |
|
char1 = null; |
|
} |
|
|
|
text = getCharFromCode(char0); |
|
text += getCharFromCode(char1); |
|
this[this.mode_](packet.pts, text); |
|
this.column_ += text.length; |
|
} // finish data processing |
|
|
|
}; |
|
}; |
|
|
|
Cea608Stream.prototype = new stream(); // Trigger a cue point that captures the current state of the |
|
// display buffer |
|
|
|
Cea608Stream.prototype.flushDisplayed = function (pts) { |
|
var content = this.displayed_ // remove spaces from the start and end of the string |
|
.map(function (row) { |
|
try { |
|
return row.trim(); |
|
} catch (e) { |
|
// Ordinarily, this shouldn't happen. However, caption |
|
// parsing errors should not throw exceptions and |
|
// break playback. |
|
// eslint-disable-next-line no-console |
|
console.error('Skipping malformed caption.'); |
|
return ''; |
|
} |
|
}) // combine all text rows to display in one cue |
|
.join('\n') // and remove blank rows from the start and end, but not the middle |
|
.replace(/^\n+|\n+$/g, ''); |
|
|
|
if (content.length) { |
|
this.trigger('data', { |
|
startPts: this.startPts_, |
|
endPts: pts, |
|
text: content, |
|
stream: this.name_ |
|
}); |
|
} |
|
}; |
|
/** |
|
* Zero out the data, used for startup and on seek |
|
*/ |
|
|
|
|
|
Cea608Stream.prototype.reset = function () { |
|
this.mode_ = 'popOn'; // When in roll-up mode, the index of the last row that will |
|
// actually display captions. If a caption is shifted to a row |
|
// with a lower index than this, it is cleared from the display |
|
// buffer |
|
|
|
this.topRow_ = 0; |
|
this.startPts_ = 0; |
|
this.displayed_ = createDisplayBuffer(); |
|
this.nonDisplayed_ = createDisplayBuffer(); |
|
this.lastControlCode_ = null; // Track row and column for proper line-breaking and spacing |
|
|
|
this.column_ = 0; |
|
this.row_ = BOTTOM_ROW; |
|
this.rollUpRows_ = 2; // This variable holds currently-applied formatting |
|
|
|
this.formatting_ = []; |
|
}; |
|
/** |
|
* Sets up control code and related constants for this instance |
|
*/ |
|
|
|
|
|
Cea608Stream.prototype.setConstants = function () { |
|
// The following attributes have these uses: |
|
// ext_ : char0 for mid-row codes, and the base for extended |
|
// chars (ext_+0, ext_+1, and ext_+2 are char0s for |
|
// extended codes) |
|
// control_: char0 for control codes, except byte-shifted to the |
|
// left so that we can do this.control_ | CONTROL_CODE |
|
// offset_: char0 for tab offset codes |
|
// |
|
// It's also worth noting that control codes, and _only_ control codes, |
|
// differ between field 1 and field2. Field 2 control codes are always |
|
// their field 1 value plus 1. That's why there's the "| field" on the |
|
// control value. |
|
if (this.dataChannel_ === 0) { |
|
this.BASE_ = 0x10; |
|
this.EXT_ = 0x11; |
|
this.CONTROL_ = (0x14 | this.field_) << 8; |
|
this.OFFSET_ = 0x17; |
|
} else if (this.dataChannel_ === 1) { |
|
this.BASE_ = 0x18; |
|
this.EXT_ = 0x19; |
|
this.CONTROL_ = (0x1c | this.field_) << 8; |
|
this.OFFSET_ = 0x1f; |
|
} // Constants for the LSByte command codes recognized by Cea608Stream. This |
|
// list is not exhaustive. For a more comprehensive listing and semantics see |
|
// http://www.gpo.gov/fdsys/pkg/CFR-2010-title47-vol1/pdf/CFR-2010-title47-vol1-sec15-119.pdf |
|
// Padding |
|
|
|
|
|
this.PADDING_ = 0x0000; // Pop-on Mode |
|
|
|
this.RESUME_CAPTION_LOADING_ = this.CONTROL_ | 0x20; |
|
this.END_OF_CAPTION_ = this.CONTROL_ | 0x2f; // Roll-up Mode |
|
|
|
this.ROLL_UP_2_ROWS_ = this.CONTROL_ | 0x25; |
|
this.ROLL_UP_3_ROWS_ = this.CONTROL_ | 0x26; |
|
this.ROLL_UP_4_ROWS_ = this.CONTROL_ | 0x27; |
|
this.CARRIAGE_RETURN_ = this.CONTROL_ | 0x2d; // paint-on mode |
|
|
|
this.RESUME_DIRECT_CAPTIONING_ = this.CONTROL_ | 0x29; // Erasure |
|
|
|
this.BACKSPACE_ = this.CONTROL_ | 0x21; |
|
this.ERASE_DISPLAYED_MEMORY_ = this.CONTROL_ | 0x2c; |
|
this.ERASE_NON_DISPLAYED_MEMORY_ = this.CONTROL_ | 0x2e; |
|
}; |
|
/** |
|
* Detects if the 2-byte packet data is a special character |
|
* |
|
* Special characters have a second byte in the range 0x30 to 0x3f, |
|
* with the first byte being 0x11 (for data channel 1) or 0x19 (for |
|
* data channel 2). |
|
* |
|
* @param {Integer} char0 The first byte |
|
* @param {Integer} char1 The second byte |
|
* @return {Boolean} Whether the 2 bytes are an special character |
|
*/ |
|
|
|
|
|
Cea608Stream.prototype.isSpecialCharacter = function (char0, char1) { |
|
return char0 === this.EXT_ && char1 >= 0x30 && char1 <= 0x3f; |
|
}; |
|
/** |
|
* Detects if the 2-byte packet data is an extended character |
|
* |
|
* Extended characters have a second byte in the range 0x20 to 0x3f, |
|
* with the first byte being 0x12 or 0x13 (for data channel 1) or |
|
* 0x1a or 0x1b (for data channel 2). |
|
* |
|
* @param {Integer} char0 The first byte |
|
* @param {Integer} char1 The second byte |
|
* @return {Boolean} Whether the 2 bytes are an extended character |
|
*/ |
|
|
|
|
|
Cea608Stream.prototype.isExtCharacter = function (char0, char1) { |
|
return (char0 === this.EXT_ + 1 || char0 === this.EXT_ + 2) && char1 >= 0x20 && char1 <= 0x3f; |
|
}; |
|
/** |
|
* Detects if the 2-byte packet is a mid-row code |
|
* |
|
* Mid-row codes have a second byte in the range 0x20 to 0x2f, with |
|
* the first byte being 0x11 (for data channel 1) or 0x19 (for data |
|
* channel 2). |
|
* |
|
* @param {Integer} char0 The first byte |
|
* @param {Integer} char1 The second byte |
|
* @return {Boolean} Whether the 2 bytes are a mid-row code |
|
*/ |
|
|
|
|
|
Cea608Stream.prototype.isMidRowCode = function (char0, char1) { |
|
return char0 === this.EXT_ && char1 >= 0x20 && char1 <= 0x2f; |
|
}; |
|
/** |
|
* Detects if the 2-byte packet is an offset control code |
|
* |
|
* Offset control codes have a second byte in the range 0x21 to 0x23, |
|
* with the first byte being 0x17 (for data channel 1) or 0x1f (for |
|
* data channel 2). |
|
* |
|
* @param {Integer} char0 The first byte |
|
* @param {Integer} char1 The second byte |
|
* @return {Boolean} Whether the 2 bytes are an offset control code |
|
*/ |
|
|
|
|
|
Cea608Stream.prototype.isOffsetControlCode = function (char0, char1) { |
|
return char0 === this.OFFSET_ && char1 >= 0x21 && char1 <= 0x23; |
|
}; |
|
/** |
|
* Detects if the 2-byte packet is a Preamble Address Code |
|
* |
|
* PACs have a first byte in the range 0x10 to 0x17 (for data channel 1) |
|
* or 0x18 to 0x1f (for data channel 2), with the second byte in the |
|
* range 0x40 to 0x7f. |
|
* |
|
* @param {Integer} char0 The first byte |
|
* @param {Integer} char1 The second byte |
|
* @return {Boolean} Whether the 2 bytes are a PAC |
|
*/ |
|
|
|
|
|
Cea608Stream.prototype.isPAC = function (char0, char1) { |
|
return char0 >= this.BASE_ && char0 < this.BASE_ + 8 && char1 >= 0x40 && char1 <= 0x7f; |
|
}; |
|
/** |
|
* Detects if a packet's second byte is in the range of a PAC color code |
|
* |
|
* PAC color codes have the second byte be in the range 0x40 to 0x4f, or |
|
* 0x60 to 0x6f. |
|
* |
|
* @param {Integer} char1 The second byte |
|
* @return {Boolean} Whether the byte is a color PAC |
|
*/ |
|
|
|
|
|
Cea608Stream.prototype.isColorPAC = function (char1) { |
|
return char1 >= 0x40 && char1 <= 0x4f || char1 >= 0x60 && char1 <= 0x7f; |
|
}; |
|
/** |
|
* Detects if a single byte is in the range of a normal character |
|
* |
|
* Normal text bytes are in the range 0x20 to 0x7f. |
|
* |
|
* @param {Integer} char The byte |
|
* @return {Boolean} Whether the byte is a normal character |
|
*/ |
|
|
|
|
|
Cea608Stream.prototype.isNormalChar = function (char) { |
|
return char >= 0x20 && char <= 0x7f; |
|
}; |
|
/** |
|
* Configures roll-up |
|
* |
|
* @param {Integer} pts Current PTS |
|
* @param {Integer} newBaseRow Used by PACs to slide the current window to |
|
* a new position |
|
*/ |
|
|
|
|
|
Cea608Stream.prototype.setRollUp = function (pts, newBaseRow) { |
|
// Reset the base row to the bottom row when switching modes |
|
if (this.mode_ !== 'rollUp') { |
|
this.row_ = BOTTOM_ROW; |
|
this.mode_ = 'rollUp'; // Spec says to wipe memories when switching to roll-up |
|
|
|
this.flushDisplayed(pts); |
|
this.nonDisplayed_ = createDisplayBuffer(); |
|
this.displayed_ = createDisplayBuffer(); |
|
} |
|
|
|
if (newBaseRow !== undefined && newBaseRow !== this.row_) { |
|
// move currently displayed captions (up or down) to the new base row |
|
for (var i = 0; i < this.rollUpRows_; i++) { |
|
this.displayed_[newBaseRow - i] = this.displayed_[this.row_ - i]; |
|
this.displayed_[this.row_ - i] = ''; |
|
} |
|
} |
|
|
|
if (newBaseRow === undefined) { |
|
newBaseRow = this.row_; |
|
} |
|
|
|
this.topRow_ = newBaseRow - this.rollUpRows_ + 1; |
|
}; // Adds the opening HTML tag for the passed character to the caption text, |
|
// and keeps track of it for later closing |
|
|
|
|
|
Cea608Stream.prototype.addFormatting = function (pts, format) { |
|
this.formatting_ = this.formatting_.concat(format); |
|
var text = format.reduce(function (text, format) { |
|
return text + '<' + format + '>'; |
|
}, ''); |
|
this[this.mode_](pts, text); |
|
}; // Adds HTML closing tags for current formatting to caption text and |
|
// clears remembered formatting |
|
|
|
|
|
Cea608Stream.prototype.clearFormatting = function (pts) { |
|
if (!this.formatting_.length) { |
|
return; |
|
} |
|
|
|
var text = this.formatting_.reverse().reduce(function (text, format) { |
|
return text + '</' + format + '>'; |
|
}, ''); |
|
this.formatting_ = []; |
|
this[this.mode_](pts, text); |
|
}; // Mode Implementations |
|
|
|
|
|
Cea608Stream.prototype.popOn = function (pts, text) { |
|
var baseRow = this.nonDisplayed_[this.row_]; // buffer characters |
|
|
|
baseRow += text; |
|
this.nonDisplayed_[this.row_] = baseRow; |
|
}; |
|
|
|
Cea608Stream.prototype.rollUp = function (pts, text) { |
|
var baseRow = this.displayed_[this.row_]; |
|
baseRow += text; |
|
this.displayed_[this.row_] = baseRow; |
|
}; |
|
|
|
Cea608Stream.prototype.shiftRowsUp_ = function () { |
|
var i; // clear out inactive rows |
|
|
|
for (i = 0; i < this.topRow_; i++) { |
|
this.displayed_[i] = ''; |
|
} |
|
|
|
for (i = this.row_ + 1; i < BOTTOM_ROW + 1; i++) { |
|
this.displayed_[i] = ''; |
|
} // shift displayed rows up |
|
|
|
|
|
for (i = this.topRow_; i < this.row_; i++) { |
|
this.displayed_[i] = this.displayed_[i + 1]; |
|
} // clear out the bottom row |
|
|
|
|
|
this.displayed_[this.row_] = ''; |
|
}; |
|
|
|
Cea608Stream.prototype.paintOn = function (pts, text) { |
|
var baseRow = this.displayed_[this.row_]; |
|
baseRow += text; |
|
this.displayed_[this.row_] = baseRow; |
|
}; // exports |
|
|
|
|
|
var captionStream = { |
|
CaptionStream: CaptionStream, |
|
Cea608Stream: Cea608Stream |
|
}; |
|
var streamTypes = { |
|
H264_STREAM_TYPE: 0x1B, |
|
ADTS_STREAM_TYPE: 0x0F, |
|
METADATA_STREAM_TYPE: 0x15 |
|
}; |
|
var MAX_TS = 8589934592; |
|
var RO_THRESH = 4294967296; |
|
|
|
var handleRollover = function handleRollover(value, reference) { |
|
var direction = 1; |
|
|
|
if (value > reference) { |
|
// If the current timestamp value is greater than our reference timestamp and we detect a |
|
// timestamp rollover, this means the roll over is happening in the opposite direction. |
|
// Example scenario: Enter a long stream/video just after a rollover occurred. The reference |
|
// point will be set to a small number, e.g. 1. The user then seeks backwards over the |
|
// rollover point. In loading this segment, the timestamp values will be very large, |
|
// e.g. 2^33 - 1. Since this comes before the data we loaded previously, we want to adjust |
|
// the time stamp to be `value - 2^33`. |
|
direction = -1; |
|
} // Note: A seek forwards or back that is greater than the RO_THRESH (2^32, ~13 hours) will |
|
// cause an incorrect adjustment. |
|
|
|
|
|
while (Math.abs(reference - value) > RO_THRESH) { |
|
value += direction * MAX_TS; |
|
} |
|
|
|
return value; |
|
}; |
|
|
|
var TimestampRolloverStream = function TimestampRolloverStream(type) { |
|
var lastDTS, referenceDTS; |
|
TimestampRolloverStream.prototype.init.call(this); |
|
this.type_ = type; |
|
|
|
this.push = function (data) { |
|
if (data.type !== this.type_) { |
|
return; |
|
} |
|
|
|
if (referenceDTS === undefined) { |
|
referenceDTS = data.dts; |
|
} |
|
|
|
data.dts = handleRollover(data.dts, referenceDTS); |
|
data.pts = handleRollover(data.pts, referenceDTS); |
|
lastDTS = data.dts; |
|
this.trigger('data', data); |
|
}; |
|
|
|
this.flush = function () { |
|
referenceDTS = lastDTS; |
|
this.trigger('done'); |
|
}; |
|
|
|
this.discontinuity = function () { |
|
referenceDTS = void 0; |
|
lastDTS = void 0; |
|
}; |
|
}; |
|
|
|
TimestampRolloverStream.prototype = new stream(); |
|
var timestampRolloverStream = { |
|
TimestampRolloverStream: TimestampRolloverStream, |
|
handleRollover: handleRollover |
|
}; |
|
|
|
var percentEncode = function percentEncode(bytes, start, end) { |
|
var i, |
|
result = ''; |
|
|
|
for (i = start; i < end; i++) { |
|
result += '%' + ('00' + bytes[i].toString(16)).slice(-2); |
|
} |
|
|
|
return result; |
|
}, |
|
// return the string representation of the specified byte range, |
|
// interpreted as UTf-8. |
|
parseUtf8 = function parseUtf8(bytes, start, end) { |
|
return decodeURIComponent(percentEncode(bytes, start, end)); |
|
}, |
|
// return the string representation of the specified byte range, |
|
// interpreted as ISO-8859-1. |
|
parseIso88591 = function parseIso88591(bytes, start, end) { |
|
return unescape(percentEncode(bytes, start, end)); // jshint ignore:line |
|
}, |
|
parseSyncSafeInteger = function parseSyncSafeInteger(data) { |
|
return data[0] << 21 | data[1] << 14 | data[2] << 7 | data[3]; |
|
}, |
|
tagParsers = { |
|
TXXX: function TXXX(tag) { |
|
var i; |
|
|
|
if (tag.data[0] !== 3) { |
|
// ignore frames with unrecognized character encodings |
|
return; |
|
} |
|
|
|
for (i = 1; i < tag.data.length; i++) { |
|
if (tag.data[i] === 0) { |
|
// parse the text fields |
|
tag.description = parseUtf8(tag.data, 1, i); // do not include the null terminator in the tag value |
|
|
|
tag.value = parseUtf8(tag.data, i + 1, tag.data.length).replace(/\0*$/, ''); |
|
break; |
|
} |
|
} |
|
|
|
tag.data = tag.value; |
|
}, |
|
WXXX: function WXXX(tag) { |
|
var i; |
|
|
|
if (tag.data[0] !== 3) { |
|
// ignore frames with unrecognized character encodings |
|
return; |
|
} |
|
|
|
for (i = 1; i < tag.data.length; i++) { |
|
if (tag.data[i] === 0) { |
|
// parse the description and URL fields |
|
tag.description = parseUtf8(tag.data, 1, i); |
|
tag.url = parseUtf8(tag.data, i + 1, tag.data.length); |
|
break; |
|
} |
|
} |
|
}, |
|
PRIV: function PRIV(tag) { |
|
var i; |
|
|
|
for (i = 0; i < tag.data.length; i++) { |
|
if (tag.data[i] === 0) { |
|
// parse the description and URL fields |
|
tag.owner = parseIso88591(tag.data, 0, i); |
|
break; |
|
} |
|
} |
|
|
|
tag.privateData = tag.data.subarray(i + 1); |
|
tag.data = tag.privateData; |
|
} |
|
}, |
|
_MetadataStream; |
|
|
|
_MetadataStream = function MetadataStream(options) { |
|
var settings = { |
|
debug: !!(options && options.debug), |
|
// the bytes of the program-level descriptor field in MP2T |
|
// see ISO/IEC 13818-1:2013 (E), section 2.6 "Program and |
|
// program element descriptors" |
|
descriptor: options && options.descriptor |
|
}, |
|
// the total size in bytes of the ID3 tag being parsed |
|
tagSize = 0, |
|
// tag data that is not complete enough to be parsed |
|
buffer = [], |
|
// the total number of bytes currently in the buffer |
|
bufferSize = 0, |
|
i; |
|
|
|
_MetadataStream.prototype.init.call(this); // calculate the text track in-band metadata track dispatch type |
|
// https://html.spec.whatwg.org/multipage/embedded-content.html#steps-to-expose-a-media-resource-specific-text-track |
|
|
|
|
|
this.dispatchType = streamTypes.METADATA_STREAM_TYPE.toString(16); |
|
|
|
if (settings.descriptor) { |
|
for (i = 0; i < settings.descriptor.length; i++) { |
|
this.dispatchType += ('00' + settings.descriptor[i].toString(16)).slice(-2); |
|
} |
|
} |
|
|
|
this.push = function (chunk) { |
|
var tag, frameStart, frameSize, frame, i, frameHeader; |
|
|
|
if (chunk.type !== 'timed-metadata') { |
|
return; |
|
} // if data_alignment_indicator is set in the PES header, |
|
// we must have the start of a new ID3 tag. Assume anything |
|
// remaining in the buffer was malformed and throw it out |
|
|
|
|
|
if (chunk.dataAlignmentIndicator) { |
|
bufferSize = 0; |
|
buffer.length = 0; |
|
} // ignore events that don't look like ID3 data |
|
|
|
|
|
if (buffer.length === 0 && (chunk.data.length < 10 || chunk.data[0] !== 'I'.charCodeAt(0) || chunk.data[1] !== 'D'.charCodeAt(0) || chunk.data[2] !== '3'.charCodeAt(0))) { |
|
if (settings.debug) { |
|
// eslint-disable-next-line no-console |
|
console.log('Skipping unrecognized metadata packet'); |
|
} |
|
|
|
return; |
|
} // add this chunk to the data we've collected so far |
|
|
|
|
|
buffer.push(chunk); |
|
bufferSize += chunk.data.byteLength; // grab the size of the entire frame from the ID3 header |
|
|
|
if (buffer.length === 1) { |
|
// the frame size is transmitted as a 28-bit integer in the |
|
// last four bytes of the ID3 header. |
|
// The most significant bit of each byte is dropped and the |
|
// results concatenated to recover the actual value. |
|
tagSize = parseSyncSafeInteger(chunk.data.subarray(6, 10)); // ID3 reports the tag size excluding the header but it's more |
|
// convenient for our comparisons to include it |
|
|
|
tagSize += 10; |
|
} // if the entire frame has not arrived, wait for more data |
|
|
|
|
|
if (bufferSize < tagSize) { |
|
return; |
|
} // collect the entire frame so it can be parsed |
|
|
|
|
|
tag = { |
|
data: new Uint8Array(tagSize), |
|
frames: [], |
|
pts: buffer[0].pts, |
|
dts: buffer[0].dts |
|
}; |
|
|
|
for (i = 0; i < tagSize;) { |
|
tag.data.set(buffer[0].data.subarray(0, tagSize - i), i); |
|
i += buffer[0].data.byteLength; |
|
bufferSize -= buffer[0].data.byteLength; |
|
buffer.shift(); |
|
} // find the start of the first frame and the end of the tag |
|
|
|
|
|
frameStart = 10; |
|
|
|
if (tag.data[5] & 0x40) { |
|
// advance the frame start past the extended header |
|
frameStart += 4; // header size field |
|
|
|
frameStart += parseSyncSafeInteger(tag.data.subarray(10, 14)); // clip any padding off the end |
|
|
|
tagSize -= parseSyncSafeInteger(tag.data.subarray(16, 20)); |
|
} // parse one or more ID3 frames |
|
// http://id3.org/id3v2.3.0#ID3v2_frame_overview |
|
|
|
|
|
do { |
|
// determine the number of bytes in this frame |
|
frameSize = parseSyncSafeInteger(tag.data.subarray(frameStart + 4, frameStart + 8)); |
|
|
|
if (frameSize < 1) { |
|
// eslint-disable-next-line no-console |
|
return console.log('Malformed ID3 frame encountered. Skipping metadata parsing.'); |
|
} |
|
|
|
frameHeader = String.fromCharCode(tag.data[frameStart], tag.data[frameStart + 1], tag.data[frameStart + 2], tag.data[frameStart + 3]); |
|
frame = { |
|
id: frameHeader, |
|
data: tag.data.subarray(frameStart + 10, frameStart + frameSize + 10) |
|
}; |
|
frame.key = frame.id; |
|
|
|
if (tagParsers[frame.id]) { |
|
tagParsers[frame.id](frame); // handle the special PRIV frame used to indicate the start |
|
// time for raw AAC data |
|
|
|
if (frame.owner === 'com.apple.streaming.transportStreamTimestamp') { |
|
var d = frame.data, |
|
size = (d[3] & 0x01) << 30 | d[4] << 22 | d[5] << 14 | d[6] << 6 | d[7] >>> 2; |
|
size *= 4; |
|
size += d[7] & 0x03; |
|
frame.timeStamp = size; // in raw AAC, all subsequent data will be timestamped based |
|
// on the value of this frame |
|
// we couldn't have known the appropriate pts and dts before |
|
// parsing this ID3 tag so set those values now |
|
|
|
if (tag.pts === undefined && tag.dts === undefined) { |
|
tag.pts = frame.timeStamp; |
|
tag.dts = frame.timeStamp; |
|
} |
|
|
|
this.trigger('timestamp', frame); |
|
} |
|
} |
|
|
|
tag.frames.push(frame); |
|
frameStart += 10; // advance past the frame header |
|
|
|
frameStart += frameSize; // advance past the frame body |
|
} while (frameStart < tagSize); |
|
|
|
this.trigger('data', tag); |
|
}; |
|
}; |
|
|
|
_MetadataStream.prototype = new stream(); |
|
var metadataStream = _MetadataStream; |
|
var TimestampRolloverStream$1 = timestampRolloverStream.TimestampRolloverStream; // object types |
|
|
|
var _TransportPacketStream, _TransportParseStream, _ElementaryStream; // constants |
|
|
|
|
|
var MP2T_PACKET_LENGTH = 188, |
|
// bytes |
|
SYNC_BYTE = 0x47; |
|
/** |
|
* Splits an incoming stream of binary data into MPEG-2 Transport |
|
* Stream packets. |
|
*/ |
|
|
|
_TransportPacketStream = function TransportPacketStream() { |
|
var buffer = new Uint8Array(MP2T_PACKET_LENGTH), |
|
bytesInBuffer = 0; |
|
|
|
_TransportPacketStream.prototype.init.call(this); // Deliver new bytes to the stream. |
|
|
|
/** |
|
* Split a stream of data into M2TS packets |
|
**/ |
|
|
|
|
|
this.push = function (bytes) { |
|
var startIndex = 0, |
|
endIndex = MP2T_PACKET_LENGTH, |
|
everything; // If there are bytes remaining from the last segment, prepend them to the |
|
// bytes that were pushed in |
|
|
|
if (bytesInBuffer) { |
|
everything = new Uint8Array(bytes.byteLength + bytesInBuffer); |
|
everything.set(buffer.subarray(0, bytesInBuffer)); |
|
everything.set(bytes, bytesInBuffer); |
|
bytesInBuffer = 0; |
|
} else { |
|
everything = bytes; |
|
} // While we have enough data for a packet |
|
|
|
|
|
while (endIndex < everything.byteLength) { |
|
// Look for a pair of start and end sync bytes in the data.. |
|
if (everything[startIndex] === SYNC_BYTE && everything[endIndex] === SYNC_BYTE) { |
|
// We found a packet so emit it and jump one whole packet forward in |
|
// the stream |
|
this.trigger('data', everything.subarray(startIndex, endIndex)); |
|
startIndex += MP2T_PACKET_LENGTH; |
|
endIndex += MP2T_PACKET_LENGTH; |
|
continue; |
|
} // If we get here, we have somehow become de-synchronized and we need to step |
|
// forward one byte at a time until we find a pair of sync bytes that denote |
|
// a packet |
|
|
|
|
|
startIndex++; |
|
endIndex++; |
|
} // If there was some data left over at the end of the segment that couldn't |
|
// possibly be a whole packet, keep it because it might be the start of a packet |
|
// that continues in the next segment |
|
|
|
|
|
if (startIndex < everything.byteLength) { |
|
buffer.set(everything.subarray(startIndex), 0); |
|
bytesInBuffer = everything.byteLength - startIndex; |
|
} |
|
}; |
|
/** |
|
* Passes identified M2TS packets to the TransportParseStream to be parsed |
|
**/ |
|
|
|
|
|
this.flush = function () { |
|
// If the buffer contains a whole packet when we are being flushed, emit it |
|
// and empty the buffer. Otherwise hold onto the data because it may be |
|
// important for decoding the next segment |
|
if (bytesInBuffer === MP2T_PACKET_LENGTH && buffer[0] === SYNC_BYTE) { |
|
this.trigger('data', buffer); |
|
bytesInBuffer = 0; |
|
} |
|
|
|
this.trigger('done'); |
|
}; |
|
}; |
|
|
|
_TransportPacketStream.prototype = new stream(); |
|
/** |
|
* Accepts an MP2T TransportPacketStream and emits data events with parsed |
|
* forms of the individual transport stream packets. |
|
*/ |
|
|
|
_TransportParseStream = function TransportParseStream() { |
|
var parsePsi, parsePat, parsePmt, self; |
|
|
|
_TransportParseStream.prototype.init.call(this); |
|
|
|
self = this; |
|
this.packetsWaitingForPmt = []; |
|
this.programMapTable = undefined; |
|
|
|
parsePsi = function parsePsi(payload, psi) { |
|
var offset = 0; // PSI packets may be split into multiple sections and those |
|
// sections may be split into multiple packets. If a PSI |
|
// section starts in this packet, the payload_unit_start_indicator |
|
// will be true and the first byte of the payload will indicate |
|
// the offset from the current position to the start of the |
|
// section. |
|
|
|
if (psi.payloadUnitStartIndicator) { |
|
offset += payload[offset] + 1; |
|
} |
|
|
|
if (psi.type === 'pat') { |
|
parsePat(payload.subarray(offset), psi); |
|
} else { |
|
parsePmt(payload.subarray(offset), psi); |
|
} |
|
}; |
|
|
|
parsePat = function parsePat(payload, pat) { |
|
pat.section_number = payload[7]; // eslint-disable-line camelcase |
|
|
|
pat.last_section_number = payload[8]; // eslint-disable-line camelcase |
|
// skip the PSI header and parse the first PMT entry |
|
|
|
self.pmtPid = (payload[10] & 0x1F) << 8 | payload[11]; |
|
pat.pmtPid = self.pmtPid; |
|
}; |
|
/** |
|
* Parse out the relevant fields of a Program Map Table (PMT). |
|
* @param payload {Uint8Array} the PMT-specific portion of an MP2T |
|
* packet. The first byte in this array should be the table_id |
|
* field. |
|
* @param pmt {object} the object that should be decorated with |
|
* fields parsed from the PMT. |
|
*/ |
|
|
|
|
|
parsePmt = function parsePmt(payload, pmt) { |
|
var sectionLength, tableEnd, programInfoLength, offset; // PMTs can be sent ahead of the time when they should actually |
|
// take effect. We don't believe this should ever be the case |
|
// for HLS but we'll ignore "forward" PMT declarations if we see |
|
// them. Future PMT declarations have the current_next_indicator |
|
// set to zero. |
|
|
|
if (!(payload[5] & 0x01)) { |
|
return; |
|
} // overwrite any existing program map table |
|
|
|
|
|
self.programMapTable = { |
|
video: null, |
|
audio: null, |
|
'timed-metadata': {} |
|
}; // the mapping table ends at the end of the current section |
|
|
|
sectionLength = (payload[1] & 0x0f) << 8 | payload[2]; |
|
tableEnd = 3 + sectionLength - 4; // to determine where the table is, we have to figure out how |
|
// long the program info descriptors are |
|
|
|
programInfoLength = (payload[10] & 0x0f) << 8 | payload[11]; // advance the offset to the first entry in the mapping table |
|
|
|
offset = 12 + programInfoLength; |
|
|
|
while (offset < tableEnd) { |
|
var streamType = payload[offset]; |
|
var pid = (payload[offset + 1] & 0x1F) << 8 | payload[offset + 2]; // only map a single elementary_pid for audio and video stream types |
|
// TODO: should this be done for metadata too? for now maintain behavior of |
|
// multiple metadata streams |
|
|
|
if (streamType === streamTypes.H264_STREAM_TYPE && self.programMapTable.video === null) { |
|
self.programMapTable.video = pid; |
|
} else if (streamType === streamTypes.ADTS_STREAM_TYPE && self.programMapTable.audio === null) { |
|
self.programMapTable.audio = pid; |
|
} else if (streamType === streamTypes.METADATA_STREAM_TYPE) { |
|
// map pid to stream type for metadata streams |
|
self.programMapTable['timed-metadata'][pid] = streamType; |
|
} // move to the next table entry |
|
// skip past the elementary stream descriptors, if present |
|
|
|
|
|
offset += ((payload[offset + 3] & 0x0F) << 8 | payload[offset + 4]) + 5; |
|
} // record the map on the packet as well |
|
|
|
|
|
pmt.programMapTable = self.programMapTable; |
|
}; |
|
/** |
|
* Deliver a new MP2T packet to the next stream in the pipeline. |
|
*/ |
|
|
|
|
|
this.push = function (packet) { |
|
var result = {}, |
|
offset = 4; |
|
result.payloadUnitStartIndicator = !!(packet[1] & 0x40); // pid is a 13-bit field starting at the last bit of packet[1] |
|
|
|
result.pid = packet[1] & 0x1f; |
|
result.pid <<= 8; |
|
result.pid |= packet[2]; // if an adaption field is present, its length is specified by the |
|
// fifth byte of the TS packet header. The adaptation field is |
|
// used to add stuffing to PES packets that don't fill a complete |
|
// TS packet, and to specify some forms of timing and control data |
|
// that we do not currently use. |
|
|
|
if ((packet[3] & 0x30) >>> 4 > 0x01) { |
|
offset += packet[offset] + 1; |
|
} // parse the rest of the packet based on the type |
|
|
|
|
|
if (result.pid === 0) { |
|
result.type = 'pat'; |
|
parsePsi(packet.subarray(offset), result); |
|
this.trigger('data', result); |
|
} else if (result.pid === this.pmtPid) { |
|
result.type = 'pmt'; |
|
parsePsi(packet.subarray(offset), result); |
|
this.trigger('data', result); // if there are any packets waiting for a PMT to be found, process them now |
|
|
|
while (this.packetsWaitingForPmt.length) { |
|
this.processPes_.apply(this, this.packetsWaitingForPmt.shift()); |
|
} |
|
} else if (this.programMapTable === undefined) { |
|
// When we have not seen a PMT yet, defer further processing of |
|
// PES packets until one has been parsed |
|
this.packetsWaitingForPmt.push([packet, offset, result]); |
|
} else { |
|
this.processPes_(packet, offset, result); |
|
} |
|
}; |
|
|
|
this.processPes_ = function (packet, offset, result) { |
|
// set the appropriate stream type |
|
if (result.pid === this.programMapTable.video) { |
|
result.streamType = streamTypes.H264_STREAM_TYPE; |
|
} else if (result.pid === this.programMapTable.audio) { |
|
result.streamType = streamTypes.ADTS_STREAM_TYPE; |
|
} else { |
|
// if not video or audio, it is timed-metadata or unknown |
|
// if unknown, streamType will be undefined |
|
result.streamType = this.programMapTable['timed-metadata'][result.pid]; |
|
} |
|
|
|
result.type = 'pes'; |
|
result.data = packet.subarray(offset); |
|
this.trigger('data', result); |
|
}; |
|
}; |
|
|
|
_TransportParseStream.prototype = new stream(); |
|
_TransportParseStream.STREAM_TYPES = { |
|
h264: 0x1b, |
|
adts: 0x0f |
|
}; |
|
/** |
|
* Reconsistutes program elementary stream (PES) packets from parsed |
|
* transport stream packets. That is, if you pipe an |
|
* mp2t.TransportParseStream into a mp2t.ElementaryStream, the output |
|
* events will be events which capture the bytes for individual PES |
|
* packets plus relevant metadata that has been extracted from the |
|
* container. |
|
*/ |
|
|
|
_ElementaryStream = function ElementaryStream() { |
|
var self = this, |
|
// PES packet fragments |
|
video = { |
|
data: [], |
|
size: 0 |
|
}, |
|
audio = { |
|
data: [], |
|
size: 0 |
|
}, |
|
timedMetadata = { |
|
data: [], |
|
size: 0 |
|
}, |
|
parsePes = function parsePes(payload, pes) { |
|
var ptsDtsFlags; // get the packet length, this will be 0 for video |
|
|
|
pes.packetLength = 6 + (payload[4] << 8 | payload[5]); // find out if this packets starts a new keyframe |
|
|
|
pes.dataAlignmentIndicator = (payload[6] & 0x04) !== 0; // PES packets may be annotated with a PTS value, or a PTS value |
|
// and a DTS value. Determine what combination of values is |
|
// available to work with. |
|
|
|
ptsDtsFlags = payload[7]; // PTS and DTS are normally stored as a 33-bit number. Javascript |
|
// performs all bitwise operations on 32-bit integers but javascript |
|
// supports a much greater range (52-bits) of integer using standard |
|
// mathematical operations. |
|
// We construct a 31-bit value using bitwise operators over the 31 |
|
// most significant bits and then multiply by 4 (equal to a left-shift |
|
// of 2) before we add the final 2 least significant bits of the |
|
// timestamp (equal to an OR.) |
|
|
|
if (ptsDtsFlags & 0xC0) { |
|
// the PTS and DTS are not written out directly. For information |
|
// on how they are encoded, see |
|
// http://dvd.sourceforge.net/dvdinfo/pes-hdr.html |
|
pes.pts = (payload[9] & 0x0E) << 27 | (payload[10] & 0xFF) << 20 | (payload[11] & 0xFE) << 12 | (payload[12] & 0xFF) << 5 | (payload[13] & 0xFE) >>> 3; |
|
pes.pts *= 4; // Left shift by 2 |
|
|
|
pes.pts += (payload[13] & 0x06) >>> 1; // OR by the two LSBs |
|
|
|
pes.dts = pes.pts; |
|
|
|
if (ptsDtsFlags & 0x40) { |
|
pes.dts = (payload[14] & 0x0E) << 27 | (payload[15] & 0xFF) << 20 | (payload[16] & 0xFE) << 12 | (payload[17] & 0xFF) << 5 | (payload[18] & 0xFE) >>> 3; |
|
pes.dts *= 4; // Left shift by 2 |
|
|
|
pes.dts += (payload[18] & 0x06) >>> 1; // OR by the two LSBs |
|
} |
|
} // the data section starts immediately after the PES header. |
|
// pes_header_data_length specifies the number of header bytes |
|
// that follow the last byte of the field. |
|
|
|
|
|
pes.data = payload.subarray(9 + payload[8]); |
|
}, |
|
|
|
/** |
|
* Pass completely parsed PES packets to the next stream in the pipeline |
|
**/ |
|
flushStream = function flushStream(stream$$1, type, forceFlush) { |
|
var packetData = new Uint8Array(stream$$1.size), |
|
event = { |
|
type: type |
|
}, |
|
i = 0, |
|
offset = 0, |
|
packetFlushable = false, |
|
fragment; // do nothing if there is not enough buffered data for a complete |
|
// PES header |
|
|
|
if (!stream$$1.data.length || stream$$1.size < 9) { |
|
return; |
|
} |
|
|
|
event.trackId = stream$$1.data[0].pid; // reassemble the packet |
|
|
|
for (i = 0; i < stream$$1.data.length; i++) { |
|
fragment = stream$$1.data[i]; |
|
packetData.set(fragment.data, offset); |
|
offset += fragment.data.byteLength; |
|
} // parse assembled packet's PES header |
|
|
|
|
|
parsePes(packetData, event); // non-video PES packets MUST have a non-zero PES_packet_length |
|
// check that there is enough stream data to fill the packet |
|
|
|
packetFlushable = type === 'video' || event.packetLength <= stream$$1.size; // flush pending packets if the conditions are right |
|
|
|
if (forceFlush || packetFlushable) { |
|
stream$$1.size = 0; |
|
stream$$1.data.length = 0; |
|
} // only emit packets that are complete. this is to avoid assembling |
|
// incomplete PES packets due to poor segmentation |
|
|
|
|
|
if (packetFlushable) { |
|
self.trigger('data', event); |
|
} |
|
}; |
|
|
|
_ElementaryStream.prototype.init.call(this); |
|
/** |
|
* Identifies M2TS packet types and parses PES packets using metadata |
|
* parsed from the PMT |
|
**/ |
|
|
|
|
|
this.push = function (data) { |
|
({ |
|
pat: function pat() {// we have to wait for the PMT to arrive as well before we |
|
// have any meaningful metadata |
|
}, |
|
pes: function pes() { |
|
var stream$$1, streamType; |
|
|
|
switch (data.streamType) { |
|
case streamTypes.H264_STREAM_TYPE: |
|
case streamTypes.H264_STREAM_TYPE: |
|
stream$$1 = video; |
|
streamType = 'video'; |
|
break; |
|
|
|
case streamTypes.ADTS_STREAM_TYPE: |
|
stream$$1 = audio; |
|
streamType = 'audio'; |
|
break; |
|
|
|
case streamTypes.METADATA_STREAM_TYPE: |
|
stream$$1 = timedMetadata; |
|
streamType = 'timed-metadata'; |
|
break; |
|
|
|
default: |
|
// ignore unknown stream types |
|
return; |
|
} // if a new packet is starting, we can flush the completed |
|
// packet |
|
|
|
|
|
if (data.payloadUnitStartIndicator) { |
|
flushStream(stream$$1, streamType, true); |
|
} // buffer this fragment until we are sure we've received the |
|
// complete payload |
|
|
|
|
|
stream$$1.data.push(data); |
|
stream$$1.size += data.data.byteLength; |
|
}, |
|
pmt: function pmt() { |
|
var event = { |
|
type: 'metadata', |
|
tracks: [] |
|
}, |
|
programMapTable = data.programMapTable; // translate audio and video streams to tracks |
|
|
|
if (programMapTable.video !== null) { |
|
event.tracks.push({ |
|
timelineStartInfo: { |
|
baseMediaDecodeTime: 0 |
|
}, |
|
id: +programMapTable.video, |
|
codec: 'avc', |
|
type: 'video' |
|
}); |
|
} |
|
|
|
if (programMapTable.audio !== null) { |
|
event.tracks.push({ |
|
timelineStartInfo: { |
|
baseMediaDecodeTime: 0 |
|
}, |
|
id: +programMapTable.audio, |
|
codec: 'adts', |
|
type: 'audio' |
|
}); |
|
} |
|
|
|
self.trigger('data', event); |
|
} |
|
})[data.type](); |
|
}; |
|
/** |
|
* Flush any remaining input. Video PES packets may be of variable |
|
* length. Normally, the start of a new video packet can trigger the |
|
* finalization of the previous packet. That is not possible if no |
|
* more video is forthcoming, however. In that case, some other |
|
* mechanism (like the end of the file) has to be employed. When it is |
|
* clear that no additional data is forthcoming, calling this method |
|
* will flush the buffered packets. |
|
*/ |
|
|
|
|
|
this.flush = function () { |
|
// !!THIS ORDER IS IMPORTANT!! |
|
// video first then audio |
|
flushStream(video, 'video'); |
|
flushStream(audio, 'audio'); |
|
flushStream(timedMetadata, 'timed-metadata'); |
|
this.trigger('done'); |
|
}; |
|
}; |
|
|
|
_ElementaryStream.prototype = new stream(); |
|
var m2ts = { |
|
PAT_PID: 0x0000, |
|
MP2T_PACKET_LENGTH: MP2T_PACKET_LENGTH, |
|
TransportPacketStream: _TransportPacketStream, |
|
TransportParseStream: _TransportParseStream, |
|
ElementaryStream: _ElementaryStream, |
|
TimestampRolloverStream: TimestampRolloverStream$1, |
|
CaptionStream: captionStream.CaptionStream, |
|
Cea608Stream: captionStream.Cea608Stream, |
|
MetadataStream: metadataStream |
|
}; |
|
|
|
for (var type in streamTypes) { |
|
if (streamTypes.hasOwnProperty(type)) { |
|
m2ts[type] = streamTypes[type]; |
|
} |
|
} |
|
|
|
var m2ts_1 = m2ts; |
|
|
|
var _AdtsStream; |
|
|
|
var ADTS_SAMPLING_FREQUENCIES = [96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050, 16000, 12000, 11025, 8000, 7350]; |
|
/* |
|
* Accepts a ElementaryStream and emits data events with parsed |
|
* AAC Audio Frames of the individual packets. Input audio in ADTS |
|
* format is unpacked and re-emitted as AAC frames. |
|
* |
|
* @see http://wiki.multimedia.cx/index.php?title=ADTS |
|
* @see http://wiki.multimedia.cx/?title=Understanding_AAC |
|
*/ |
|
|
|
_AdtsStream = function AdtsStream() { |
|
var buffer; |
|
|
|
_AdtsStream.prototype.init.call(this); |
|
|
|
this.push = function (packet) { |
|
var i = 0, |
|
frameNum = 0, |
|
frameLength, |
|
protectionSkipBytes, |
|
frameEnd, |
|
oldBuffer, |
|
sampleCount, |
|
adtsFrameDuration; |
|
|
|
if (packet.type !== 'audio') { |
|
// ignore non-audio data |
|
return; |
|
} // Prepend any data in the buffer to the input data so that we can parse |
|
// aac frames the cross a PES packet boundary |
|
|
|
|
|
if (buffer) { |
|
oldBuffer = buffer; |
|
buffer = new Uint8Array(oldBuffer.byteLength + packet.data.byteLength); |
|
buffer.set(oldBuffer); |
|
buffer.set(packet.data, oldBuffer.byteLength); |
|
} else { |
|
buffer = packet.data; |
|
} // unpack any ADTS frames which have been fully received |
|
// for details on the ADTS header, see http://wiki.multimedia.cx/index.php?title=ADTS |
|
|
|
|
|
while (i + 5 < buffer.length) { |
|
// Loook for the start of an ADTS header.. |
|
if (buffer[i] !== 0xFF || (buffer[i + 1] & 0xF6) !== 0xF0) { |
|
// If a valid header was not found, jump one forward and attempt to |
|
// find a valid ADTS header starting at the next byte |
|
i++; |
|
continue; |
|
} // The protection skip bit tells us if we have 2 bytes of CRC data at the |
|
// end of the ADTS header |
|
|
|
|
|
protectionSkipBytes = (~buffer[i + 1] & 0x01) * 2; // Frame length is a 13 bit integer starting 16 bits from the |
|
// end of the sync sequence |
|
|
|
frameLength = (buffer[i + 3] & 0x03) << 11 | buffer[i + 4] << 3 | (buffer[i + 5] & 0xe0) >> 5; |
|
sampleCount = ((buffer[i + 6] & 0x03) + 1) * 1024; |
|
adtsFrameDuration = sampleCount * 90000 / ADTS_SAMPLING_FREQUENCIES[(buffer[i + 2] & 0x3c) >>> 2]; |
|
frameEnd = i + frameLength; // If we don't have enough data to actually finish this ADTS frame, return |
|
// and wait for more data |
|
|
|
if (buffer.byteLength < frameEnd) { |
|
return; |
|
} // Otherwise, deliver the complete AAC frame |
|
|
|
|
|
this.trigger('data', { |
|
pts: packet.pts + frameNum * adtsFrameDuration, |
|
dts: packet.dts + frameNum * adtsFrameDuration, |
|
sampleCount: sampleCount, |
|
audioobjecttype: (buffer[i + 2] >>> 6 & 0x03) + 1, |
|
channelcount: (buffer[i + 2] & 1) << 2 | (buffer[i + 3] & 0xc0) >>> 6, |
|
samplerate: ADTS_SAMPLING_FREQUENCIES[(buffer[i + 2] & 0x3c) >>> 2], |
|
samplingfrequencyindex: (buffer[i + 2] & 0x3c) >>> 2, |
|
// assume ISO/IEC 14496-12 AudioSampleEntry default of 16 |
|
samplesize: 16, |
|
data: buffer.subarray(i + 7 + protectionSkipBytes, frameEnd) |
|
}); // If the buffer is empty, clear it and return |
|
|
|
if (buffer.byteLength === frameEnd) { |
|
buffer = undefined; |
|
return; |
|
} |
|
|
|
frameNum++; // Remove the finished frame from the buffer and start the process again |
|
|
|
buffer = buffer.subarray(frameEnd); |
|
} |
|
}; |
|
|
|
this.flush = function () { |
|
this.trigger('done'); |
|
}; |
|
}; |
|
|
|
_AdtsStream.prototype = new stream(); |
|
var adts = _AdtsStream; |
|
var ExpGolomb; |
|
/** |
|
* Parser for exponential Golomb codes, a variable-bitwidth number encoding |
|
* scheme used by h264. |
|
*/ |
|
|
|
ExpGolomb = function ExpGolomb(workingData) { |
|
var // the number of bytes left to examine in workingData |
|
workingBytesAvailable = workingData.byteLength, |
|
// the current word being examined |
|
workingWord = 0, |
|
// :uint |
|
// the number of bits left to examine in the current word |
|
workingBitsAvailable = 0; // :uint; |
|
// ():uint |
|
|
|
this.length = function () { |
|
return 8 * workingBytesAvailable; |
|
}; // ():uint |
|
|
|
|
|
this.bitsAvailable = function () { |
|
return 8 * workingBytesAvailable + workingBitsAvailable; |
|
}; // ():void |
|
|
|
|
|
this.loadWord = function () { |
|
var position = workingData.byteLength - workingBytesAvailable, |
|
workingBytes = new Uint8Array(4), |
|
availableBytes = Math.min(4, workingBytesAvailable); |
|
|
|
if (availableBytes === 0) { |
|
throw new Error('no bytes available'); |
|
} |
|
|
|
workingBytes.set(workingData.subarray(position, position + availableBytes)); |
|
workingWord = new DataView(workingBytes.buffer).getUint32(0); // track the amount of workingData that has been processed |
|
|
|
workingBitsAvailable = availableBytes * 8; |
|
workingBytesAvailable -= availableBytes; |
|
}; // (count:int):void |
|
|
|
|
|
this.skipBits = function (count) { |
|
var skipBytes; // :int |
|
|
|
if (workingBitsAvailable > count) { |
|
workingWord <<= count; |
|
workingBitsAvailable -= count; |
|
} else { |
|
count -= workingBitsAvailable; |
|
skipBytes = Math.floor(count / 8); |
|
count -= skipBytes * 8; |
|
workingBytesAvailable -= skipBytes; |
|
this.loadWord(); |
|
workingWord <<= count; |
|
workingBitsAvailable -= count; |
|
} |
|
}; // (size:int):uint |
|
|
|
|
|
this.readBits = function (size) { |
|
var bits = Math.min(workingBitsAvailable, size), |
|
// :uint |
|
valu = workingWord >>> 32 - bits; // :uint |
|
// if size > 31, handle error |
|
|
|
workingBitsAvailable -= bits; |
|
|
|
if (workingBitsAvailable > 0) { |
|
workingWord <<= bits; |
|
} else if (workingBytesAvailable > 0) { |
|
this.loadWord(); |
|
} |
|
|
|
bits = size - bits; |
|
|
|
if (bits > 0) { |
|
return valu << bits | this.readBits(bits); |
|
} |
|
|
|
return valu; |
|
}; // ():uint |
|
|
|
|
|
this.skipLeadingZeros = function () { |
|
var leadingZeroCount; // :uint |
|
|
|
for (leadingZeroCount = 0; leadingZeroCount < workingBitsAvailable; ++leadingZeroCount) { |
|
if ((workingWord & 0x80000000 >>> leadingZeroCount) !== 0) { |
|
// the first bit of working word is 1 |
|
workingWord <<= leadingZeroCount; |
|
workingBitsAvailable -= leadingZeroCount; |
|
return leadingZeroCount; |
|
} |
|
} // we exhausted workingWord and still have not found a 1 |
|
|
|
|
|
this.loadWord(); |
|
return leadingZeroCount + this.skipLeadingZeros(); |
|
}; // ():void |
|
|
|
|
|
this.skipUnsignedExpGolomb = function () { |
|
this.skipBits(1 + this.skipLeadingZeros()); |
|
}; // ():void |
|
|
|
|
|
this.skipExpGolomb = function () { |
|
this.skipBits(1 + this.skipLeadingZeros()); |
|
}; // ():uint |
|
|
|
|
|
this.readUnsignedExpGolomb = function () { |
|
var clz = this.skipLeadingZeros(); // :uint |
|
|
|
return this.readBits(clz + 1) - 1; |
|
}; // ():int |
|
|
|
|
|
this.readExpGolomb = function () { |
|
var valu = this.readUnsignedExpGolomb(); // :int |
|
|
|
if (0x01 & valu) { |
|
// the number is odd if the low order bit is set |
|
return 1 + valu >>> 1; // add 1 to make it even, and divide by 2 |
|
} |
|
|
|
return -1 * (valu >>> 1); // divide by two then make it negative |
|
}; // Some convenience functions |
|
// :Boolean |
|
|
|
|
|
this.readBoolean = function () { |
|
return this.readBits(1) === 1; |
|
}; // ():int |
|
|
|
|
|
this.readUnsignedByte = function () { |
|
return this.readBits(8); |
|
}; |
|
|
|
this.loadWord(); |
|
}; |
|
|
|
var expGolomb = ExpGolomb; |
|
|
|
var _H264Stream, _NalByteStream; |
|
|
|
var PROFILES_WITH_OPTIONAL_SPS_DATA; |
|
/** |
|
* Accepts a NAL unit byte stream and unpacks the embedded NAL units. |
|
*/ |
|
|
|
_NalByteStream = function NalByteStream() { |
|
var syncPoint = 0, |
|
i, |
|
buffer; |
|
|
|
_NalByteStream.prototype.init.call(this); |
|
/* |
|
* Scans a byte stream and triggers a data event with the NAL units found. |
|
* @param {Object} data Event received from H264Stream |
|
* @param {Uint8Array} data.data The h264 byte stream to be scanned |
|
* |
|
* @see H264Stream.push |
|
*/ |
|
|
|
|
|
this.push = function (data) { |
|
var swapBuffer; |
|
|
|
if (!buffer) { |
|
buffer = data.data; |
|
} else { |
|
swapBuffer = new Uint8Array(buffer.byteLength + data.data.byteLength); |
|
swapBuffer.set(buffer); |
|
swapBuffer.set(data.data, buffer.byteLength); |
|
buffer = swapBuffer; |
|
} // Rec. ITU-T H.264, Annex B |
|
// scan for NAL unit boundaries |
|
// a match looks like this: |
|
// 0 0 1 .. NAL .. 0 0 1 |
|
// ^ sync point ^ i |
|
// or this: |
|
// 0 0 1 .. NAL .. 0 0 0 |
|
// ^ sync point ^ i |
|
// advance the sync point to a NAL start, if necessary |
|
|
|
|
|
for (; syncPoint < buffer.byteLength - 3; syncPoint++) { |
|
if (buffer[syncPoint + 2] === 1) { |
|
// the sync point is properly aligned |
|
i = syncPoint + 5; |
|
break; |
|
} |
|
} |
|
|
|
while (i < buffer.byteLength) { |
|
// look at the current byte to determine if we've hit the end of |
|
// a NAL unit boundary |
|
switch (buffer[i]) { |
|
case 0: |
|
// skip past non-sync sequences |
|
if (buffer[i - 1] !== 0) { |
|
i += 2; |
|
break; |
|
} else if (buffer[i - 2] !== 0) { |
|
i++; |
|
break; |
|
} // deliver the NAL unit if it isn't empty |
|
|
|
|
|
if (syncPoint + 3 !== i - 2) { |
|
this.trigger('data', buffer.subarray(syncPoint + 3, i - 2)); |
|
} // drop trailing zeroes |
|
|
|
|
|
do { |
|
i++; |
|
} while (buffer[i] !== 1 && i < buffer.length); |
|
|
|
syncPoint = i - 2; |
|
i += 3; |
|
break; |
|
|
|
case 1: |
|
// skip past non-sync sequences |
|
if (buffer[i - 1] !== 0 || buffer[i - 2] !== 0) { |
|
i += 3; |
|
break; |
|
} // deliver the NAL unit |
|
|
|
|
|
this.trigger('data', buffer.subarray(syncPoint + 3, i - 2)); |
|
syncPoint = i - 2; |
|
i += 3; |
|
break; |
|
|
|
default: |
|
// the current byte isn't a one or zero, so it cannot be part |
|
// of a sync sequence |
|
i += 3; |
|
break; |
|
} |
|
} // filter out the NAL units that were delivered |
|
|
|
|
|
buffer = buffer.subarray(syncPoint); |
|
i -= syncPoint; |
|
syncPoint = 0; |
|
}; |
|
|
|
this.flush = function () { |
|
// deliver the last buffered NAL unit |
|
if (buffer && buffer.byteLength > 3) { |
|
this.trigger('data', buffer.subarray(syncPoint + 3)); |
|
} // reset the stream state |
|
|
|
|
|
buffer = null; |
|
syncPoint = 0; |
|
this.trigger('done'); |
|
}; |
|
}; |
|
|
|
_NalByteStream.prototype = new stream(); // values of profile_idc that indicate additional fields are included in the SPS |
|
// see Recommendation ITU-T H.264 (4/2013), |
|
// 7.3.2.1.1 Sequence parameter set data syntax |
|
|
|
PROFILES_WITH_OPTIONAL_SPS_DATA = { |
|
100: true, |
|
110: true, |
|
122: true, |
|
244: true, |
|
44: true, |
|
83: true, |
|
86: true, |
|
118: true, |
|
128: true, |
|
138: true, |
|
139: true, |
|
134: true |
|
}; |
|
/** |
|
* Accepts input from a ElementaryStream and produces H.264 NAL unit data |
|
* events. |
|
*/ |
|
|
|
_H264Stream = function H264Stream() { |
|
var nalByteStream = new _NalByteStream(), |
|
self, |
|
trackId, |
|
currentPts, |
|
currentDts, |
|
discardEmulationPreventionBytes, |
|
readSequenceParameterSet, |
|
skipScalingList; |
|
|
|
_H264Stream.prototype.init.call(this); |
|
|
|
self = this; |
|
/* |
|
* Pushes a packet from a stream onto the NalByteStream |
|
* |
|
* @param {Object} packet - A packet received from a stream |
|
* @param {Uint8Array} packet.data - The raw bytes of the packet |
|
* @param {Number} packet.dts - Decode timestamp of the packet |
|
* @param {Number} packet.pts - Presentation timestamp of the packet |
|
* @param {Number} packet.trackId - The id of the h264 track this packet came from |
|
* @param {('video'|'audio')} packet.type - The type of packet |
|
* |
|
*/ |
|
|
|
this.push = function (packet) { |
|
if (packet.type !== 'video') { |
|
return; |
|
} |
|
|
|
trackId = packet.trackId; |
|
currentPts = packet.pts; |
|
currentDts = packet.dts; |
|
nalByteStream.push(packet); |
|
}; |
|
/* |
|
* Identify NAL unit types and pass on the NALU, trackId, presentation and decode timestamps |
|
* for the NALUs to the next stream component. |
|
* Also, preprocess caption and sequence parameter NALUs. |
|
* |
|
* @param {Uint8Array} data - A NAL unit identified by `NalByteStream.push` |
|
* @see NalByteStream.push |
|
*/ |
|
|
|
|
|
nalByteStream.on('data', function (data) { |
|
var event = { |
|
trackId: trackId, |
|
pts: currentPts, |
|
dts: currentDts, |
|
data: data |
|
}; |
|
|
|
switch (data[0] & 0x1f) { |
|
case 0x05: |
|
event.nalUnitType = 'slice_layer_without_partitioning_rbsp_idr'; |
|
break; |
|
|
|
case 0x06: |
|
event.nalUnitType = 'sei_rbsp'; |
|
event.escapedRBSP = discardEmulationPreventionBytes(data.subarray(1)); |
|
break; |
|
|
|
case 0x07: |
|
event.nalUnitType = 'seq_parameter_set_rbsp'; |
|
event.escapedRBSP = discardEmulationPreventionBytes(data.subarray(1)); |
|
event.config = readSequenceParameterSet(event.escapedRBSP); |
|
break; |
|
|
|
case 0x08: |
|
event.nalUnitType = 'pic_parameter_set_rbsp'; |
|
break; |
|
|
|
case 0x09: |
|
event.nalUnitType = 'access_unit_delimiter_rbsp'; |
|
break; |
|
|
|
default: |
|
break; |
|
} // This triggers data on the H264Stream |
|
|
|
|
|
self.trigger('data', event); |
|
}); |
|
nalByteStream.on('done', function () { |
|
self.trigger('done'); |
|
}); |
|
|
|
this.flush = function () { |
|
nalByteStream.flush(); |
|
}; |
|
/** |
|
* Advance the ExpGolomb decoder past a scaling list. The scaling |
|
* list is optionally transmitted as part of a sequence parameter |
|
* set and is not relevant to transmuxing. |
|
* @param count {number} the number of entries in this scaling list |
|
* @param expGolombDecoder {object} an ExpGolomb pointed to the |
|
* start of a scaling list |
|
* @see Recommendation ITU-T H.264, Section 7.3.2.1.1.1 |
|
*/ |
|
|
|
|
|
skipScalingList = function skipScalingList(count, expGolombDecoder) { |
|
var lastScale = 8, |
|
nextScale = 8, |
|
j, |
|
deltaScale; |
|
|
|
for (j = 0; j < count; j++) { |
|
if (nextScale !== 0) { |
|
deltaScale = expGolombDecoder.readExpGolomb(); |
|
nextScale = (lastScale + deltaScale + 256) % 256; |
|
} |
|
|
|
lastScale = nextScale === 0 ? lastScale : nextScale; |
|
} |
|
}; |
|
/** |
|
* Expunge any "Emulation Prevention" bytes from a "Raw Byte |
|
* Sequence Payload" |
|
* @param data {Uint8Array} the bytes of a RBSP from a NAL |
|
* unit |
|
* @return {Uint8Array} the RBSP without any Emulation |
|
* Prevention Bytes |
|
*/ |
|
|
|
|
|
discardEmulationPreventionBytes = function discardEmulationPreventionBytes(data) { |
|
var length = data.byteLength, |
|
emulationPreventionBytesPositions = [], |
|
i = 1, |
|
newLength, |
|
newData; // Find all `Emulation Prevention Bytes` |
|
|
|
while (i < length - 2) { |
|
if (data[i] === 0 && data[i + 1] === 0 && data[i + 2] === 0x03) { |
|
emulationPreventionBytesPositions.push(i + 2); |
|
i += 2; |
|
} else { |
|
i++; |
|
} |
|
} // If no Emulation Prevention Bytes were found just return the original |
|
// array |
|
|
|
|
|
if (emulationPreventionBytesPositions.length === 0) { |
|
return data; |
|
} // Create a new array to hold the NAL unit data |
|
|
|
|
|
newLength = length - emulationPreventionBytesPositions.length; |
|
newData = new Uint8Array(newLength); |
|
var sourceIndex = 0; |
|
|
|
for (i = 0; i < newLength; sourceIndex++, i++) { |
|
if (sourceIndex === emulationPreventionBytesPositions[0]) { |
|
// Skip this byte |
|
sourceIndex++; // Remove this position index |
|
|
|
emulationPreventionBytesPositions.shift(); |
|
} |
|
|
|
newData[i] = data[sourceIndex]; |
|
} |
|
|
|
return newData; |
|
}; |
|
/** |
|
* Read a sequence parameter set and return some interesting video |
|
* properties. A sequence parameter set is the H264 metadata that |
|
* describes the properties of upcoming video frames. |
|
* @param data {Uint8Array} the bytes of a sequence parameter set |
|
* @return {object} an object with configuration parsed from the |
|
* sequence parameter set, including the dimensions of the |
|
* associated video frames. |
|
*/ |
|
|
|
|
|
readSequenceParameterSet = function readSequenceParameterSet(data) { |
|
var frameCropLeftOffset = 0, |
|
frameCropRightOffset = 0, |
|
frameCropTopOffset = 0, |
|
frameCropBottomOffset = 0, |
|
sarScale = 1, |
|
expGolombDecoder, |
|
profileIdc, |
|
levelIdc, |
|
profileCompatibility, |
|
chromaFormatIdc, |
|
picOrderCntType, |
|
numRefFramesInPicOrderCntCycle, |
|
picWidthInMbsMinus1, |
|
picHeightInMapUnitsMinus1, |
|
frameMbsOnlyFlag, |
|
scalingListCount, |
|
sarRatio, |
|
aspectRatioIdc, |
|
i; |
|
expGolombDecoder = new expGolomb(data); |
|
profileIdc = expGolombDecoder.readUnsignedByte(); // profile_idc |
|
|
|
profileCompatibility = expGolombDecoder.readUnsignedByte(); // constraint_set[0-5]_flag |
|
|
|
levelIdc = expGolombDecoder.readUnsignedByte(); // level_idc u(8) |
|
|
|
expGolombDecoder.skipUnsignedExpGolomb(); // seq_parameter_set_id |
|
// some profiles have more optional data we don't need |
|
|
|
if (PROFILES_WITH_OPTIONAL_SPS_DATA[profileIdc]) { |
|
chromaFormatIdc = expGolombDecoder.readUnsignedExpGolomb(); |
|
|
|
if (chromaFormatIdc === 3) { |
|
expGolombDecoder.skipBits(1); // separate_colour_plane_flag |
|
} |
|
|
|
expGolombDecoder.skipUnsignedExpGolomb(); // bit_depth_luma_minus8 |
|
|
|
expGolombDecoder.skipUnsignedExpGolomb(); // bit_depth_chroma_minus8 |
|
|
|
expGolombDecoder.skipBits(1); // qpprime_y_zero_transform_bypass_flag |
|
|
|
if (expGolombDecoder.readBoolean()) { |
|
// seq_scaling_matrix_present_flag |
|
scalingListCount = chromaFormatIdc !== 3 ? 8 : 12; |
|
|
|
for (i = 0; i < scalingListCount; i++) { |
|
if (expGolombDecoder.readBoolean()) { |
|
// seq_scaling_list_present_flag[ i ] |
|
if (i < 6) { |
|
skipScalingList(16, expGolombDecoder); |
|
} else { |
|
skipScalingList(64, expGolombDecoder); |
|
} |
|
} |
|
} |
|
} |
|
} |
|
|
|
expGolombDecoder.skipUnsignedExpGolomb(); // log2_max_frame_num_minus4 |
|
|
|
picOrderCntType = expGolombDecoder.readUnsignedExpGolomb(); |
|
|
|
if (picOrderCntType === 0) { |
|
expGolombDecoder.readUnsignedExpGolomb(); // log2_max_pic_order_cnt_lsb_minus4 |
|
} else if (picOrderCntType === 1) { |
|
expGolombDecoder.skipBits(1); // delta_pic_order_always_zero_flag |
|
|
|
expGolombDecoder.skipExpGolomb(); // offset_for_non_ref_pic |
|
|
|
expGolombDecoder.skipExpGolomb(); // offset_for_top_to_bottom_field |
|
|
|
numRefFramesInPicOrderCntCycle = expGolombDecoder.readUnsignedExpGolomb(); |
|
|
|
for (i = 0; i < numRefFramesInPicOrderCntCycle; i++) { |
|
expGolombDecoder.skipExpGolomb(); // offset_for_ref_frame[ i ] |
|
} |
|
} |
|
|
|
expGolombDecoder.skipUnsignedExpGolomb(); // max_num_ref_frames |
|
|
|
expGolombDecoder.skipBits(1); // gaps_in_frame_num_value_allowed_flag |
|
|
|
picWidthInMbsMinus1 = expGolombDecoder.readUnsignedExpGolomb(); |
|
picHeightInMapUnitsMinus1 = expGolombDecoder.readUnsignedExpGolomb(); |
|
frameMbsOnlyFlag = expGolombDecoder.readBits(1); |
|
|
|
if (frameMbsOnlyFlag === 0) { |
|
expGolombDecoder.skipBits(1); // mb_adaptive_frame_field_flag |
|
} |
|
|
|
expGolombDecoder.skipBits(1); // direct_8x8_inference_flag |
|
|
|
if (expGolombDecoder.readBoolean()) { |
|
// frame_cropping_flag |
|
frameCropLeftOffset = expGolombDecoder.readUnsignedExpGolomb(); |
|
frameCropRightOffset = expGolombDecoder.readUnsignedExpGolomb(); |
|
frameCropTopOffset = expGolombDecoder.readUnsignedExpGolomb(); |
|
frameCropBottomOffset = expGolombDecoder.readUnsignedExpGolomb(); |
|
} |
|
|
|
if (expGolombDecoder.readBoolean()) { |
|
// vui_parameters_present_flag |
|
if (expGolombDecoder.readBoolean()) { |
|
// aspect_ratio_info_present_flag |
|
aspectRatioIdc = expGolombDecoder.readUnsignedByte(); |
|
|
|
switch (aspectRatioIdc) { |
|
case 1: |
|
sarRatio = [1, 1]; |
|
break; |
|
|
|
case 2: |
|
sarRatio = [12, 11]; |
|
break; |
|
|
|
case 3: |
|
sarRatio = [10, 11]; |
|
break; |
|
|
|
case 4: |
|
sarRatio = [16, 11]; |
|
break; |
|
|
|
case 5: |
|
sarRatio = [40, 33]; |
|
break; |
|
|
|
case 6: |
|
sarRatio = [24, 11]; |
|
break; |
|
|
|
case 7: |
|
sarRatio = [20, 11]; |
|
break; |
|
|
|
case 8: |
|
sarRatio = [32, 11]; |
|
break; |
|
|
|
case 9: |
|
sarRatio = [80, 33]; |
|
break; |
|
|
|
case 10: |
|
sarRatio = [18, 11]; |
|
break; |
|
|
|
case 11: |
|
sarRatio = [15, 11]; |
|
break; |
|
|
|
case 12: |
|
sarRatio = [64, 33]; |
|
break; |
|
|
|
case 13: |
|
sarRatio = [160, 99]; |
|
break; |
|
|
|
case 14: |
|
sarRatio = [4, 3]; |
|
break; |
|
|
|
case 15: |
|
sarRatio = [3, 2]; |
|
break; |
|
|
|
case 16: |
|
sarRatio = [2, 1]; |
|
break; |
|
|
|
case 255: |
|
{ |
|
sarRatio = [expGolombDecoder.readUnsignedByte() << 8 | expGolombDecoder.readUnsignedByte(), expGolombDecoder.readUnsignedByte() << 8 | expGolombDecoder.readUnsignedByte()]; |
|
break; |
|
} |
|
} |
|
|
|
if (sarRatio) { |
|
sarScale = sarRatio[0] / sarRatio[1]; |
|
} |
|
} |
|
} |
|
|
|
return { |
|
profileIdc: profileIdc, |
|
levelIdc: levelIdc, |
|
profileCompatibility: profileCompatibility, |
|
width: Math.ceil(((picWidthInMbsMinus1 + 1) * 16 - frameCropLeftOffset * 2 - frameCropRightOffset * 2) * sarScale), |
|
height: (2 - frameMbsOnlyFlag) * (picHeightInMapUnitsMinus1 + 1) * 16 - frameCropTopOffset * 2 - frameCropBottomOffset * 2 |
|
}; |
|
}; |
|
}; |
|
|
|
_H264Stream.prototype = new stream(); |
|
var h264 = { |
|
H264Stream: _H264Stream, |
|
NalByteStream: _NalByteStream |
|
}; |
|
/** |
|
* mux.js |
|
* |
|
* Copyright (c) 2016 Brightcove |
|
* All rights reserved. |
|
* |
|
* Utilities to detect basic properties and metadata about Aac data. |
|
*/ |
|
|
|
var ADTS_SAMPLING_FREQUENCIES$1 = [96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050, 16000, 12000, 11025, 8000, 7350]; |
|
|
|
var isLikelyAacData = function isLikelyAacData(data) { |
|
if (data[0] === 'I'.charCodeAt(0) && data[1] === 'D'.charCodeAt(0) && data[2] === '3'.charCodeAt(0)) { |
|
return true; |
|
} |
|
|
|
return false; |
|
}; |
|
|
|
var parseSyncSafeInteger$1 = function parseSyncSafeInteger(data) { |
|
return data[0] << 21 | data[1] << 14 | data[2] << 7 | data[3]; |
|
}; // return a percent-encoded representation of the specified byte range |
|
// @see http://en.wikipedia.org/wiki/Percent-encoding |
|
|
|
|
|
var percentEncode$1 = function percentEncode(bytes, start, end) { |
|
var i, |
|
result = ''; |
|
|
|
for (i = start; i < end; i++) { |
|
result += '%' + ('00' + bytes[i].toString(16)).slice(-2); |
|
} |
|
|
|
return result; |
|
}; // return the string representation of the specified byte range, |
|
// interpreted as ISO-8859-1. |
|
|
|
|
|
var parseIso88591$1 = function parseIso88591(bytes, start, end) { |
|
return unescape(percentEncode$1(bytes, start, end)); // jshint ignore:line |
|
}; |
|
|
|
var parseId3TagSize = function parseId3TagSize(header, byteIndex) { |
|
var returnSize = header[byteIndex + 6] << 21 | header[byteIndex + 7] << 14 | header[byteIndex + 8] << 7 | header[byteIndex + 9], |
|
flags = header[byteIndex + 5], |
|
footerPresent = (flags & 16) >> 4; |
|
|
|
if (footerPresent) { |
|
return returnSize + 20; |
|
} |
|
|
|
return returnSize + 10; |
|
}; |
|
|
|
var parseAdtsSize = function parseAdtsSize(header, byteIndex) { |
|
var lowThree = (header[byteIndex + 5] & 0xE0) >> 5, |
|
middle = header[byteIndex + 4] << 3, |
|
highTwo = header[byteIndex + 3] & 0x3 << 11; |
|
return highTwo | middle | lowThree; |
|
}; |
|
|
|
var parseType$1 = function parseType(header, byteIndex) { |
|
if (header[byteIndex] === 'I'.charCodeAt(0) && header[byteIndex + 1] === 'D'.charCodeAt(0) && header[byteIndex + 2] === '3'.charCodeAt(0)) { |
|
return 'timed-metadata'; |
|
} else if (header[byteIndex] & 0xff === 0xff && (header[byteIndex + 1] & 0xf0) === 0xf0) { |
|
return 'audio'; |
|
} |
|
|
|
return null; |
|
}; |
|
|
|
var parseSampleRate = function parseSampleRate(packet) { |
|
var i = 0; |
|
|
|
while (i + 5 < packet.length) { |
|
if (packet[i] !== 0xFF || (packet[i + 1] & 0xF6) !== 0xF0) { |
|
// If a valid header was not found, jump one forward and attempt to |
|
// find a valid ADTS header starting at the next byte |
|
i++; |
|
continue; |
|
} |
|
|
|
return ADTS_SAMPLING_FREQUENCIES$1[(packet[i + 2] & 0x3c) >>> 2]; |
|
} |
|
|
|
return null; |
|
}; |
|
|
|
var parseAacTimestamp = function parseAacTimestamp(packet) { |
|
var frameStart, frameSize, frame, frameHeader; // find the start of the first frame and the end of the tag |
|
|
|
frameStart = 10; |
|
|
|
if (packet[5] & 0x40) { |
|
// advance the frame start past the extended header |
|
frameStart += 4; // header size field |
|
|
|
frameStart += parseSyncSafeInteger$1(packet.subarray(10, 14)); |
|
} // parse one or more ID3 frames |
|
// http://id3.org/id3v2.3.0#ID3v2_frame_overview |
|
|
|
|
|
do { |
|
// determine the number of bytes in this frame |
|
frameSize = parseSyncSafeInteger$1(packet.subarray(frameStart + 4, frameStart + 8)); |
|
|
|
if (frameSize < 1) { |
|
return null; |
|
} |
|
|
|
frameHeader = String.fromCharCode(packet[frameStart], packet[frameStart + 1], packet[frameStart + 2], packet[frameStart + 3]); |
|
|
|
if (frameHeader === 'PRIV') { |
|
frame = packet.subarray(frameStart + 10, frameStart + frameSize + 10); |
|
|
|
for (var i = 0; i < frame.byteLength; i++) { |
|
if (frame[i] === 0) { |
|
var owner = parseIso88591$1(frame, 0, i); |
|
|
|
if (owner === 'com.apple.streaming.transportStreamTimestamp') { |
|
var d = frame.subarray(i + 1); |
|
var size = (d[3] & 0x01) << 30 | d[4] << 22 | d[5] << 14 | d[6] << 6 | d[7] >>> 2; |
|
size *= 4; |
|
size += d[7] & 0x03; |
|
return size; |
|
} |
|
|
|
break; |
|
} |
|
} |
|
} |
|
|
|
frameStart += 10; // advance past the frame header |
|
|
|
frameStart += frameSize; // advance past the frame body |
|
} while (frameStart < packet.byteLength); |
|
|
|
return null; |
|
}; |
|
|
|
var utils = { |
|
isLikelyAacData: isLikelyAacData, |
|
parseId3TagSize: parseId3TagSize, |
|
parseAdtsSize: parseAdtsSize, |
|
parseType: parseType$1, |
|
parseSampleRate: parseSampleRate, |
|
parseAacTimestamp: parseAacTimestamp |
|
}; // Constants |
|
|
|
var _AacStream; |
|
/** |
|
* Splits an incoming stream of binary data into ADTS and ID3 Frames. |
|
*/ |
|
|
|
|
|
_AacStream = function AacStream() { |
|
var everything = new Uint8Array(), |
|
timeStamp = 0; |
|
|
|
_AacStream.prototype.init.call(this); |
|
|
|
this.setTimestamp = function (timestamp) { |
|
timeStamp = timestamp; |
|
}; |
|
|
|
this.push = function (bytes) { |
|
var frameSize = 0, |
|
byteIndex = 0, |
|
bytesLeft, |
|
chunk, |
|
packet, |
|
tempLength; // If there are bytes remaining from the last segment, prepend them to the |
|
// bytes that were pushed in |
|
|
|
if (everything.length) { |
|
tempLength = everything.length; |
|
everything = new Uint8Array(bytes.byteLength + tempLength); |
|
everything.set(everything.subarray(0, tempLength)); |
|
everything.set(bytes, tempLength); |
|
} else { |
|
everything = bytes; |
|
} |
|
|
|
while (everything.length - byteIndex >= 3) { |
|
if (everything[byteIndex] === 'I'.charCodeAt(0) && everything[byteIndex + 1] === 'D'.charCodeAt(0) && everything[byteIndex + 2] === '3'.charCodeAt(0)) { |
|
// Exit early because we don't have enough to parse |
|
// the ID3 tag header |
|
if (everything.length - byteIndex < 10) { |
|
break; |
|
} // check framesize |
|
|
|
|
|
frameSize = utils.parseId3TagSize(everything, byteIndex); // Exit early if we don't have enough in the buffer |
|
// to emit a full packet |
|
// Add to byteIndex to support multiple ID3 tags in sequence |
|
|
|
if (byteIndex + frameSize > everything.length) { |
|
break; |
|
} |
|
|
|
chunk = { |
|
type: 'timed-metadata', |
|
data: everything.subarray(byteIndex, byteIndex + frameSize) |
|
}; |
|
this.trigger('data', chunk); |
|
byteIndex += frameSize; |
|
continue; |
|
} else if ((everything[byteIndex] & 0xff) === 0xff && (everything[byteIndex + 1] & 0xf0) === 0xf0) { |
|
// Exit early because we don't have enough to parse |
|
// the ADTS frame header |
|
if (everything.length - byteIndex < 7) { |
|
break; |
|
} |
|
|
|
frameSize = utils.parseAdtsSize(everything, byteIndex); // Exit early if we don't have enough in the buffer |
|
// to emit a full packet |
|
|
|
if (byteIndex + frameSize > everything.length) { |
|
break; |
|
} |
|
|
|
packet = { |
|
type: 'audio', |
|
data: everything.subarray(byteIndex, byteIndex + frameSize), |
|
pts: timeStamp, |
|
dts: timeStamp |
|
}; |
|
this.trigger('data', packet); |
|
byteIndex += frameSize; |
|
continue; |
|
} |
|
|
|
byteIndex++; |
|
} |
|
|
|
bytesLeft = everything.length - byteIndex; |
|
|
|
if (bytesLeft > 0) { |
|
everything = everything.subarray(byteIndex); |
|
} else { |
|
everything = new Uint8Array(); |
|
} |
|
}; |
|
}; |
|
|
|
_AacStream.prototype = new stream(); |
|
var aac = _AacStream; |
|
var H264Stream = h264.H264Stream; |
|
var isLikelyAacData$1 = utils.isLikelyAacData; // constants |
|
|
|
var AUDIO_PROPERTIES = ['audioobjecttype', 'channelcount', 'samplerate', 'samplingfrequencyindex', 'samplesize']; |
|
var VIDEO_PROPERTIES = ['width', 'height', 'profileIdc', 'levelIdc', 'profileCompatibility']; // object types |
|
|
|
var _VideoSegmentStream, _AudioSegmentStream, _Transmuxer, _CoalesceStream; |
|
/** |
|
* Compare two arrays (even typed) for same-ness |
|
*/ |
|
|
|
|
|
var arrayEquals = function arrayEquals(a, b) { |
|
var i; |
|
|
|
if (a.length !== b.length) { |
|
return false; |
|
} // compare the value of each element in the array |
|
|
|
|
|
for (i = 0; i < a.length; i++) { |
|
if (a[i] !== b[i]) { |
|
return false; |
|
} |
|
} |
|
|
|
return true; |
|
}; |
|
|
|
var generateVideoSegmentTimingInfo = function generateVideoSegmentTimingInfo(baseMediaDecodeTime, startDts, startPts, endDts, endPts, prependedContentDuration) { |
|
var ptsOffsetFromDts = startPts - startDts, |
|
decodeDuration = endDts - startDts, |
|
presentationDuration = endPts - startPts; // The PTS and DTS values are based on the actual stream times from the segment, |
|
// however, the player time values will reflect a start from the baseMediaDecodeTime. |
|
// In order to provide relevant values for the player times, base timing info on the |
|
// baseMediaDecodeTime and the DTS and PTS durations of the segment. |
|
|
|
return { |
|
start: { |
|
dts: baseMediaDecodeTime, |
|
pts: baseMediaDecodeTime + ptsOffsetFromDts |
|
}, |
|
end: { |
|
dts: baseMediaDecodeTime + decodeDuration, |
|
pts: baseMediaDecodeTime + presentationDuration |
|
}, |
|
prependedContentDuration: prependedContentDuration, |
|
baseMediaDecodeTime: baseMediaDecodeTime |
|
}; |
|
}; |
|
/** |
|
* Constructs a single-track, ISO BMFF media segment from AAC data |
|
* events. The output of this stream can be fed to a SourceBuffer |
|
* configured with a suitable initialization segment. |
|
* @param track {object} track metadata configuration |
|
* @param options {object} transmuxer options object |
|
* @param options.keepOriginalTimestamps {boolean} If true, keep the timestamps |
|
* in the source; false to adjust the first segment to start at 0. |
|
*/ |
|
|
|
|
|
_AudioSegmentStream = function AudioSegmentStream(track, options) { |
|
var adtsFrames = [], |
|
sequenceNumber = 0, |
|
earliestAllowedDts = 0, |
|
audioAppendStartTs = 0, |
|
videoBaseMediaDecodeTime = Infinity; |
|
options = options || {}; |
|
|
|
_AudioSegmentStream.prototype.init.call(this); |
|
|
|
this.push = function (data) { |
|
trackDecodeInfo.collectDtsInfo(track, data); |
|
|
|
if (track) { |
|
AUDIO_PROPERTIES.forEach(function (prop) { |
|
track[prop] = data[prop]; |
|
}); |
|
} // buffer audio data until end() is called |
|
|
|
|
|
adtsFrames.push(data); |
|
}; |
|
|
|
this.setEarliestDts = function (earliestDts) { |
|
earliestAllowedDts = earliestDts - track.timelineStartInfo.baseMediaDecodeTime; |
|
}; |
|
|
|
this.setVideoBaseMediaDecodeTime = function (baseMediaDecodeTime) { |
|
videoBaseMediaDecodeTime = baseMediaDecodeTime; |
|
}; |
|
|
|
this.setAudioAppendStart = function (timestamp) { |
|
audioAppendStartTs = timestamp; |
|
}; |
|
|
|
this.flush = function () { |
|
var frames, moof, mdat, boxes; // return early if no audio data has been observed |
|
|
|
if (adtsFrames.length === 0) { |
|
this.trigger('done', 'AudioSegmentStream'); |
|
return; |
|
} |
|
|
|
frames = audioFrameUtils.trimAdtsFramesByEarliestDts(adtsFrames, track, earliestAllowedDts); |
|
track.baseMediaDecodeTime = trackDecodeInfo.calculateTrackBaseMediaDecodeTime(track, options.keepOriginalTimestamps); |
|
audioFrameUtils.prefixWithSilence(track, frames, audioAppendStartTs, videoBaseMediaDecodeTime); // we have to build the index from byte locations to |
|
// samples (that is, adts frames) in the audio data |
|
|
|
track.samples = audioFrameUtils.generateSampleTable(frames); // concatenate the audio data to constuct the mdat |
|
|
|
mdat = mp4Generator.mdat(audioFrameUtils.concatenateFrameData(frames)); |
|
adtsFrames = []; |
|
moof = mp4Generator.moof(sequenceNumber, [track]); |
|
boxes = new Uint8Array(moof.byteLength + mdat.byteLength); // bump the sequence number for next time |
|
|
|
sequenceNumber++; |
|
boxes.set(moof); |
|
boxes.set(mdat, moof.byteLength); |
|
trackDecodeInfo.clearDtsInfo(track); |
|
this.trigger('data', { |
|
track: track, |
|
boxes: boxes |
|
}); |
|
this.trigger('done', 'AudioSegmentStream'); |
|
}; |
|
}; |
|
|
|
_AudioSegmentStream.prototype = new stream(); |
|
/** |
|
* Constructs a single-track, ISO BMFF media segment from H264 data |
|
* events. The output of this stream can be fed to a SourceBuffer |
|
* configured with a suitable initialization segment. |
|
* @param track {object} track metadata configuration |
|
* @param options {object} transmuxer options object |
|
* @param options.alignGopsAtEnd {boolean} If true, start from the end of the |
|
* gopsToAlignWith list when attempting to align gop pts |
|
* @param options.keepOriginalTimestamps {boolean} If true, keep the timestamps |
|
* in the source; false to adjust the first segment to start at 0. |
|
*/ |
|
|
|
_VideoSegmentStream = function VideoSegmentStream(track, options) { |
|
var sequenceNumber = 0, |
|
nalUnits = [], |
|
gopsToAlignWith = [], |
|
config, |
|
pps; |
|
options = options || {}; |
|
|
|
_VideoSegmentStream.prototype.init.call(this); |
|
|
|
delete track.minPTS; |
|
this.gopCache_ = []; |
|
/** |
|
* Constructs a ISO BMFF segment given H264 nalUnits |
|
* @param {Object} nalUnit A data event representing a nalUnit |
|
* @param {String} nalUnit.nalUnitType |
|
* @param {Object} nalUnit.config Properties for a mp4 track |
|
* @param {Uint8Array} nalUnit.data The nalUnit bytes |
|
* @see lib/codecs/h264.js |
|
**/ |
|
|
|
this.push = function (nalUnit) { |
|
trackDecodeInfo.collectDtsInfo(track, nalUnit); // record the track config |
|
|
|
if (nalUnit.nalUnitType === 'seq_parameter_set_rbsp' && !config) { |
|
config = nalUnit.config; |
|
track.sps = [nalUnit.data]; |
|
VIDEO_PROPERTIES.forEach(function (prop) { |
|
track[prop] = config[prop]; |
|
}, this); |
|
} |
|
|
|
if (nalUnit.nalUnitType === 'pic_parameter_set_rbsp' && !pps) { |
|
pps = nalUnit.data; |
|
track.pps = [nalUnit.data]; |
|
} // buffer video until flush() is called |
|
|
|
|
|
nalUnits.push(nalUnit); |
|
}; |
|
/** |
|
* Pass constructed ISO BMFF track and boxes on to the |
|
* next stream in the pipeline |
|
**/ |
|
|
|
|
|
this.flush = function () { |
|
var frames, |
|
gopForFusion, |
|
gops, |
|
moof, |
|
mdat, |
|
boxes, |
|
prependedContentDuration = 0, |
|
firstGop, |
|
lastGop; // Throw away nalUnits at the start of the byte stream until |
|
// we find the first AUD |
|
|
|
while (nalUnits.length) { |
|
if (nalUnits[0].nalUnitType === 'access_unit_delimiter_rbsp') { |
|
break; |
|
} |
|
|
|
nalUnits.shift(); |
|
} // Return early if no video data has been observed |
|
|
|
|
|
if (nalUnits.length === 0) { |
|
this.resetStream_(); |
|
this.trigger('done', 'VideoSegmentStream'); |
|
return; |
|
} // Organize the raw nal-units into arrays that represent |
|
// higher-level constructs such as frames and gops |
|
// (group-of-pictures) |
|
|
|
|
|
frames = frameUtils.groupNalsIntoFrames(nalUnits); |
|
gops = frameUtils.groupFramesIntoGops(frames); // If the first frame of this fragment is not a keyframe we have |
|
// a problem since MSE (on Chrome) requires a leading keyframe. |
|
// |
|
// We have two approaches to repairing this situation: |
|
// 1) GOP-FUSION: |
|
// This is where we keep track of the GOPS (group-of-pictures) |
|
// from previous fragments and attempt to find one that we can |
|
// prepend to the current fragment in order to create a valid |
|
// fragment. |
|
// 2) KEYFRAME-PULLING: |
|
// Here we search for the first keyframe in the fragment and |
|
// throw away all the frames between the start of the fragment |
|
// and that keyframe. We then extend the duration and pull the |
|
// PTS of the keyframe forward so that it covers the time range |
|
// of the frames that were disposed of. |
|
// |
|
// #1 is far prefereable over #2 which can cause "stuttering" but |
|
// requires more things to be just right. |
|
|
|
if (!gops[0][0].keyFrame) { |
|
// Search for a gop for fusion from our gopCache |
|
gopForFusion = this.getGopForFusion_(nalUnits[0], track); |
|
|
|
if (gopForFusion) { |
|
// in order to provide more accurate timing information about the segment, save |
|
// the number of seconds prepended to the original segment due to GOP fusion |
|
prependedContentDuration = gopForFusion.duration; |
|
gops.unshift(gopForFusion); // Adjust Gops' metadata to account for the inclusion of the |
|
// new gop at the beginning |
|
|
|
gops.byteLength += gopForFusion.byteLength; |
|
gops.nalCount += gopForFusion.nalCount; |
|
gops.pts = gopForFusion.pts; |
|
gops.dts = gopForFusion.dts; |
|
gops.duration += gopForFusion.duration; |
|
} else { |
|
// If we didn't find a candidate gop fall back to keyframe-pulling |
|
gops = frameUtils.extendFirstKeyFrame(gops); |
|
} |
|
} // Trim gops to align with gopsToAlignWith |
|
|
|
|
|
if (gopsToAlignWith.length) { |
|
var alignedGops; |
|
|
|
if (options.alignGopsAtEnd) { |
|
alignedGops = this.alignGopsAtEnd_(gops); |
|
} else { |
|
alignedGops = this.alignGopsAtStart_(gops); |
|
} |
|
|
|
if (!alignedGops) { |
|
// save all the nals in the last GOP into the gop cache |
|
this.gopCache_.unshift({ |
|
gop: gops.pop(), |
|
pps: track.pps, |
|
sps: track.sps |
|
}); // Keep a maximum of 6 GOPs in the cache |
|
|
|
this.gopCache_.length = Math.min(6, this.gopCache_.length); // Clear nalUnits |
|
|
|
nalUnits = []; // return early no gops can be aligned with desired gopsToAlignWith |
|
|
|
this.resetStream_(); |
|
this.trigger('done', 'VideoSegmentStream'); |
|
return; |
|
} // Some gops were trimmed. clear dts info so minSegmentDts and pts are correct |
|
// when recalculated before sending off to CoalesceStream |
|
|
|
|
|
trackDecodeInfo.clearDtsInfo(track); |
|
gops = alignedGops; |
|
} |
|
|
|
trackDecodeInfo.collectDtsInfo(track, gops); // First, we have to build the index from byte locations to |
|
// samples (that is, frames) in the video data |
|
|
|
track.samples = frameUtils.generateSampleTable(gops); // Concatenate the video data and construct the mdat |
|
|
|
mdat = mp4Generator.mdat(frameUtils.concatenateNalData(gops)); |
|
track.baseMediaDecodeTime = trackDecodeInfo.calculateTrackBaseMediaDecodeTime(track, options.keepOriginalTimestamps); |
|
this.trigger('processedGopsInfo', gops.map(function (gop) { |
|
return { |
|
pts: gop.pts, |
|
dts: gop.dts, |
|
byteLength: gop.byteLength |
|
}; |
|
})); |
|
firstGop = gops[0]; |
|
lastGop = gops[gops.length - 1]; |
|
this.trigger('segmentTimingInfo', generateVideoSegmentTimingInfo(track.baseMediaDecodeTime, firstGop.dts, firstGop.pts, lastGop.dts + lastGop.duration, lastGop.pts + lastGop.duration, prependedContentDuration)); // save all the nals in the last GOP into the gop cache |
|
|
|
this.gopCache_.unshift({ |
|
gop: gops.pop(), |
|
pps: track.pps, |
|
sps: track.sps |
|
}); // Keep a maximum of 6 GOPs in the cache |
|
|
|
this.gopCache_.length = Math.min(6, this.gopCache_.length); // Clear nalUnits |
|
|
|
nalUnits = []; |
|
this.trigger('baseMediaDecodeTime', track.baseMediaDecodeTime); |
|
this.trigger('timelineStartInfo', track.timelineStartInfo); |
|
moof = mp4Generator.moof(sequenceNumber, [track]); // it would be great to allocate this array up front instead of |
|
// throwing away hundreds of media segment fragments |
|
|
|
boxes = new Uint8Array(moof.byteLength + mdat.byteLength); // Bump the sequence number for next time |
|
|
|
sequenceNumber++; |
|
boxes.set(moof); |
|
boxes.set(mdat, moof.byteLength); |
|
this.trigger('data', { |
|
track: track, |
|
boxes: boxes |
|
}); |
|
this.resetStream_(); // Continue with the flush process now |
|
|
|
this.trigger('done', 'VideoSegmentStream'); |
|
}; |
|
|
|
this.resetStream_ = function () { |
|
trackDecodeInfo.clearDtsInfo(track); // reset config and pps because they may differ across segments |
|
// for instance, when we are rendition switching |
|
|
|
config = undefined; |
|
pps = undefined; |
|
}; // Search for a candidate Gop for gop-fusion from the gop cache and |
|
// return it or return null if no good candidate was found |
|
|
|
|
|
this.getGopForFusion_ = function (nalUnit) { |
|
var halfSecond = 45000, |
|
// Half-a-second in a 90khz clock |
|
allowableOverlap = 10000, |
|
// About 3 frames @ 30fps |
|
nearestDistance = Infinity, |
|
dtsDistance, |
|
nearestGopObj, |
|
currentGop, |
|
currentGopObj, |
|
i; // Search for the GOP nearest to the beginning of this nal unit |
|
|
|
for (i = 0; i < this.gopCache_.length; i++) { |
|
currentGopObj = this.gopCache_[i]; |
|
currentGop = currentGopObj.gop; // Reject Gops with different SPS or PPS |
|
|
|
if (!(track.pps && arrayEquals(track.pps[0], currentGopObj.pps[0])) || !(track.sps && arrayEquals(track.sps[0], currentGopObj.sps[0]))) { |
|
continue; |
|
} // Reject Gops that would require a negative baseMediaDecodeTime |
|
|
|
|
|
if (currentGop.dts < track.timelineStartInfo.dts) { |
|
continue; |
|
} // The distance between the end of the gop and the start of the nalUnit |
|
|
|
|
|
dtsDistance = nalUnit.dts - currentGop.dts - currentGop.duration; // Only consider GOPS that start before the nal unit and end within |
|
// a half-second of the nal unit |
|
|
|
if (dtsDistance >= -allowableOverlap && dtsDistance <= halfSecond) { |
|
// Always use the closest GOP we found if there is more than |
|
// one candidate |
|
if (!nearestGopObj || nearestDistance > dtsDistance) { |
|
nearestGopObj = currentGopObj; |
|
nearestDistance = dtsDistance; |
|
} |
|
} |
|
} |
|
|
|
if (nearestGopObj) { |
|
return nearestGopObj.gop; |
|
} |
|
|
|
return null; |
|
}; // trim gop list to the first gop found that has a matching pts with a gop in the list |
|
// of gopsToAlignWith starting from the START of the list |
|
|
|
|
|
this.alignGopsAtStart_ = function (gops) { |
|
var alignIndex, gopIndex, align, gop, byteLength, nalCount, duration, alignedGops; |
|
byteLength = gops.byteLength; |
|
nalCount = gops.nalCount; |
|
duration = gops.duration; |
|
alignIndex = gopIndex = 0; |
|
|
|
while (alignIndex < gopsToAlignWith.length && gopIndex < gops.length) { |
|
align = gopsToAlignWith[alignIndex]; |
|
gop = gops[gopIndex]; |
|
|
|
if (align.pts === gop.pts) { |
|
break; |
|
} |
|
|
|
if (gop.pts > align.pts) { |
|
// this current gop starts after the current gop we want to align on, so increment |
|
// align index |
|
alignIndex++; |
|
continue; |
|
} // current gop starts before the current gop we want to align on. so increment gop |
|
// index |
|
|
|
|
|
gopIndex++; |
|
byteLength -= gop.byteLength; |
|
nalCount -= gop.nalCount; |
|
duration -= gop.duration; |
|
} |
|
|
|
if (gopIndex === 0) { |
|
// no gops to trim |
|
return gops; |
|
} |
|
|
|
if (gopIndex === gops.length) { |
|
// all gops trimmed, skip appending all gops |
|
return null; |
|
} |
|
|
|
alignedGops = gops.slice(gopIndex); |
|
alignedGops.byteLength = byteLength; |
|
alignedGops.duration = duration; |
|
alignedGops.nalCount = nalCount; |
|
alignedGops.pts = alignedGops[0].pts; |
|
alignedGops.dts = alignedGops[0].dts; |
|
return alignedGops; |
|
}; // trim gop list to the first gop found that has a matching pts with a gop in the list |
|
// of gopsToAlignWith starting from the END of the list |
|
|
|
|
|
this.alignGopsAtEnd_ = function (gops) { |
|
var alignIndex, gopIndex, align, gop, alignEndIndex, matchFound; |
|
alignIndex = gopsToAlignWith.length - 1; |
|
gopIndex = gops.length - 1; |
|
alignEndIndex = null; |
|
matchFound = false; |
|
|
|
while (alignIndex >= 0 && gopIndex >= 0) { |
|
align = gopsToAlignWith[alignIndex]; |
|
gop = gops[gopIndex]; |
|
|
|
if (align.pts === gop.pts) { |
|
matchFound = true; |
|
break; |
|
} |
|
|
|
if (align.pts > gop.pts) { |
|
alignIndex--; |
|
continue; |
|
} |
|
|
|
if (alignIndex === gopsToAlignWith.length - 1) { |
|
// gop.pts is greater than the last alignment candidate. If no match is found |
|
// by the end of this loop, we still want to append gops that come after this |
|
// point |
|
alignEndIndex = gopIndex; |
|
} |
|
|
|
gopIndex--; |
|
} |
|
|
|
if (!matchFound && alignEndIndex === null) { |
|
return null; |
|
} |
|
|
|
var trimIndex; |
|
|
|
if (matchFound) { |
|
trimIndex = gopIndex; |
|
} else { |
|
trimIndex = alignEndIndex; |
|
} |
|
|
|
if (trimIndex === 0) { |
|
return gops; |
|
} |
|
|
|
var alignedGops = gops.slice(trimIndex); |
|
var metadata = alignedGops.reduce(function (total, gop) { |
|
total.byteLength += gop.byteLength; |
|
total.duration += gop.duration; |
|
total.nalCount += gop.nalCount; |
|
return total; |
|
}, { |
|
byteLength: 0, |
|
duration: 0, |
|
nalCount: 0 |
|
}); |
|
alignedGops.byteLength = metadata.byteLength; |
|
alignedGops.duration = metadata.duration; |
|
alignedGops.nalCount = metadata.nalCount; |
|
alignedGops.pts = alignedGops[0].pts; |
|
alignedGops.dts = alignedGops[0].dts; |
|
return alignedGops; |
|
}; |
|
|
|
this.alignGopsWith = function (newGopsToAlignWith) { |
|
gopsToAlignWith = newGopsToAlignWith; |
|
}; |
|
}; |
|
|
|
_VideoSegmentStream.prototype = new stream(); |
|
/** |
|
* A Stream that can combine multiple streams (ie. audio & video) |
|
* into a single output segment for MSE. Also supports audio-only |
|
* and video-only streams. |
|
* @param options {object} transmuxer options object |
|
* @param options.keepOriginalTimestamps {boolean} If true, keep the timestamps |
|
* in the source; false to adjust the first segment to start at media timeline start. |
|
*/ |
|
|
|
_CoalesceStream = function CoalesceStream(options, metadataStream) { |
|
// Number of Tracks per output segment |
|
// If greater than 1, we combine multiple |
|
// tracks into a single segment |
|
this.numberOfTracks = 0; |
|
this.metadataStream = metadataStream; |
|
options = options || {}; |
|
|
|
if (typeof options.remux !== 'undefined') { |
|
this.remuxTracks = !!options.remux; |
|
} else { |
|
this.remuxTracks = true; |
|
} |
|
|
|
if (typeof options.keepOriginalTimestamps === 'boolean') { |
|
this.keepOriginalTimestamps = options.keepOriginalTimestamps; |
|
} |
|
|
|
this.pendingTracks = []; |
|
this.videoTrack = null; |
|
this.pendingBoxes = []; |
|
this.pendingCaptions = []; |
|
this.pendingMetadata = []; |
|
this.pendingBytes = 0; |
|
this.emittedTracks = 0; |
|
|
|
_CoalesceStream.prototype.init.call(this); // Take output from multiple |
|
|
|
|
|
this.push = function (output) { |
|
// buffer incoming captions until the associated video segment |
|
// finishes |
|
if (output.text) { |
|
return this.pendingCaptions.push(output); |
|
} // buffer incoming id3 tags until the final flush |
|
|
|
|
|
if (output.frames) { |
|
return this.pendingMetadata.push(output); |
|
} // Add this track to the list of pending tracks and store |
|
// important information required for the construction of |
|
// the final segment |
|
|
|
|
|
this.pendingTracks.push(output.track); |
|
this.pendingBoxes.push(output.boxes); |
|
this.pendingBytes += output.boxes.byteLength; |
|
|
|
if (output.track.type === 'video') { |
|
this.videoTrack = output.track; |
|
} |
|
|
|
if (output.track.type === 'audio') { |
|
this.audioTrack = output.track; |
|
} |
|
}; |
|
}; |
|
|
|
_CoalesceStream.prototype = new stream(); |
|
|
|
_CoalesceStream.prototype.flush = function (flushSource) { |
|
var offset = 0, |
|
event = { |
|
captions: [], |
|
captionStreams: {}, |
|
metadata: [], |
|
info: {} |
|
}, |
|
caption, |
|
id3, |
|
initSegment, |
|
timelineStartPts = 0, |
|
i; |
|
|
|
if (this.pendingTracks.length < this.numberOfTracks) { |
|
if (flushSource !== 'VideoSegmentStream' && flushSource !== 'AudioSegmentStream') { |
|
// Return because we haven't received a flush from a data-generating |
|
// portion of the segment (meaning that we have only recieved meta-data |
|
// or captions.) |
|
return; |
|
} else if (this.remuxTracks) { |
|
// Return until we have enough tracks from the pipeline to remux (if we |
|
// are remuxing audio and video into a single MP4) |
|
return; |
|
} else if (this.pendingTracks.length === 0) { |
|
// In the case where we receive a flush without any data having been |
|
// received we consider it an emitted track for the purposes of coalescing |
|
// `done` events. |
|
// We do this for the case where there is an audio and video track in the |
|
// segment but no audio data. (seen in several playlists with alternate |
|
// audio tracks and no audio present in the main TS segments.) |
|
this.emittedTracks++; |
|
|
|
if (this.emittedTracks >= this.numberOfTracks) { |
|
this.trigger('done'); |
|
this.emittedTracks = 0; |
|
} |
|
|
|
return; |
|
} |
|
} |
|
|
|
if (this.videoTrack) { |
|
timelineStartPts = this.videoTrack.timelineStartInfo.pts; |
|
VIDEO_PROPERTIES.forEach(function (prop) { |
|
event.info[prop] = this.videoTrack[prop]; |
|
}, this); |
|
} else if (this.audioTrack) { |
|
timelineStartPts = this.audioTrack.timelineStartInfo.pts; |
|
AUDIO_PROPERTIES.forEach(function (prop) { |
|
event.info[prop] = this.audioTrack[prop]; |
|
}, this); |
|
} |
|
|
|
if (this.pendingTracks.length === 1) { |
|
event.type = this.pendingTracks[0].type; |
|
} else { |
|
event.type = 'combined'; |
|
} |
|
|
|
this.emittedTracks += this.pendingTracks.length; |
|
initSegment = mp4Generator.initSegment(this.pendingTracks); // Create a new typed array to hold the init segment |
|
|
|
event.initSegment = new Uint8Array(initSegment.byteLength); // Create an init segment containing a moov |
|
// and track definitions |
|
|
|
event.initSegment.set(initSegment); // Create a new typed array to hold the moof+mdats |
|
|
|
event.data = new Uint8Array(this.pendingBytes); // Append each moof+mdat (one per track) together |
|
|
|
for (i = 0; i < this.pendingBoxes.length; i++) { |
|
event.data.set(this.pendingBoxes[i], offset); |
|
offset += this.pendingBoxes[i].byteLength; |
|
} // Translate caption PTS times into second offsets to match the |
|
// video timeline for the segment, and add track info |
|
|
|
|
|
for (i = 0; i < this.pendingCaptions.length; i++) { |
|
caption = this.pendingCaptions[i]; |
|
caption.startTime = caption.startPts; |
|
|
|
if (!this.keepOriginalTimestamps) { |
|
caption.startTime -= timelineStartPts; |
|
} |
|
|
|
caption.startTime /= 90e3; |
|
caption.endTime = caption.endPts; |
|
|
|
if (!this.keepOriginalTimestamps) { |
|
caption.endTime -= timelineStartPts; |
|
} |
|
|
|
caption.endTime /= 90e3; |
|
event.captionStreams[caption.stream] = true; |
|
event.captions.push(caption); |
|
} // Translate ID3 frame PTS times into second offsets to match the |
|
// video timeline for the segment |
|
|
|
|
|
for (i = 0; i < this.pendingMetadata.length; i++) { |
|
id3 = this.pendingMetadata[i]; |
|
id3.cueTime = id3.pts; |
|
|
|
if (!this.keepOriginalTimestamps) { |
|
id3.cueTime -= timelineStartPts; |
|
} |
|
|
|
id3.cueTime /= 90e3; |
|
event.metadata.push(id3); |
|
} // We add this to every single emitted segment even though we only need |
|
// it for the first |
|
|
|
|
|
event.metadata.dispatchType = this.metadataStream.dispatchType; // Reset stream state |
|
|
|
this.pendingTracks.length = 0; |
|
this.videoTrack = null; |
|
this.pendingBoxes.length = 0; |
|
this.pendingCaptions.length = 0; |
|
this.pendingBytes = 0; |
|
this.pendingMetadata.length = 0; // Emit the built segment |
|
|
|
this.trigger('data', event); // Only emit `done` if all tracks have been flushed and emitted |
|
|
|
if (this.emittedTracks >= this.numberOfTracks) { |
|
this.trigger('done'); |
|
this.emittedTracks = 0; |
|
} |
|
}; |
|
/** |
|
* A Stream that expects MP2T binary data as input and produces |
|
* corresponding media segments, suitable for use with Media Source |
|
* Extension (MSE) implementations that support the ISO BMFF byte |
|
* stream format, like Chrome. |
|
*/ |
|
|
|
|
|
_Transmuxer = function Transmuxer(options) { |
|
var self = this, |
|
hasFlushed = true, |
|
videoTrack, |
|
audioTrack; |
|
|
|
_Transmuxer.prototype.init.call(this); |
|
|
|
options = options || {}; |
|
this.baseMediaDecodeTime = options.baseMediaDecodeTime || 0; |
|
this.transmuxPipeline_ = {}; |
|
|
|
this.setupAacPipeline = function () { |
|
var pipeline = {}; |
|
this.transmuxPipeline_ = pipeline; |
|
pipeline.type = 'aac'; |
|
pipeline.metadataStream = new m2ts_1.MetadataStream(); // set up the parsing pipeline |
|
|
|
pipeline.aacStream = new aac(); |
|
pipeline.audioTimestampRolloverStream = new m2ts_1.TimestampRolloverStream('audio'); |
|
pipeline.timedMetadataTimestampRolloverStream = new m2ts_1.TimestampRolloverStream('timed-metadata'); |
|
pipeline.adtsStream = new adts(); |
|
pipeline.coalesceStream = new _CoalesceStream(options, pipeline.metadataStream); |
|
pipeline.headOfPipeline = pipeline.aacStream; |
|
pipeline.aacStream.pipe(pipeline.audioTimestampRolloverStream).pipe(pipeline.adtsStream); |
|
pipeline.aacStream.pipe(pipeline.timedMetadataTimestampRolloverStream).pipe(pipeline.metadataStream).pipe(pipeline.coalesceStream); |
|
pipeline.metadataStream.on('timestamp', function (frame) { |
|
pipeline.aacStream.setTimestamp(frame.timeStamp); |
|
}); |
|
pipeline.aacStream.on('data', function (data) { |
|
if (data.type === 'timed-metadata' && !pipeline.audioSegmentStream) { |
|
audioTrack = audioTrack || { |
|
timelineStartInfo: { |
|
baseMediaDecodeTime: self.baseMediaDecodeTime |
|
}, |
|
codec: 'adts', |
|
type: 'audio' |
|
}; // hook up the audio segment stream to the first track with aac data |
|
|
|
pipeline.coalesceStream.numberOfTracks++; |
|
pipeline.audioSegmentStream = new _AudioSegmentStream(audioTrack, options); // Set up the final part of the audio pipeline |
|
|
|
pipeline.adtsStream.pipe(pipeline.audioSegmentStream).pipe(pipeline.coalesceStream); |
|
} |
|
}); // Re-emit any data coming from the coalesce stream to the outside world |
|
|
|
pipeline.coalesceStream.on('data', this.trigger.bind(this, 'data')); // Let the consumer know we have finished flushing the entire pipeline |
|
|
|
pipeline.coalesceStream.on('done', this.trigger.bind(this, 'done')); |
|
}; |
|
|
|
this.setupTsPipeline = function () { |
|
var pipeline = {}; |
|
this.transmuxPipeline_ = pipeline; |
|
pipeline.type = 'ts'; |
|
pipeline.metadataStream = new m2ts_1.MetadataStream(); // set up the parsing pipeline |
|
|
|
pipeline.packetStream = new m2ts_1.TransportPacketStream(); |
|
pipeline.parseStream = new m2ts_1.TransportParseStream(); |
|
pipeline.elementaryStream = new m2ts_1.ElementaryStream(); |
|
pipeline.videoTimestampRolloverStream = new m2ts_1.TimestampRolloverStream('video'); |
|
pipeline.audioTimestampRolloverStream = new m2ts_1.TimestampRolloverStream('audio'); |
|
pipeline.timedMetadataTimestampRolloverStream = new m2ts_1.TimestampRolloverStream('timed-metadata'); |
|
pipeline.adtsStream = new adts(); |
|
pipeline.h264Stream = new H264Stream(); |
|
pipeline.captionStream = new m2ts_1.CaptionStream(); |
|
pipeline.coalesceStream = new _CoalesceStream(options, pipeline.metadataStream); |
|
pipeline.headOfPipeline = pipeline.packetStream; // disassemble MPEG2-TS packets into elementary streams |
|
|
|
pipeline.packetStream.pipe(pipeline.parseStream).pipe(pipeline.elementaryStream); // !!THIS ORDER IS IMPORTANT!! |
|
// demux the streams |
|
|
|
pipeline.elementaryStream.pipe(pipeline.videoTimestampRolloverStream).pipe(pipeline.h264Stream); |
|
pipeline.elementaryStream.pipe(pipeline.audioTimestampRolloverStream).pipe(pipeline.adtsStream); |
|
pipeline.elementaryStream.pipe(pipeline.timedMetadataTimestampRolloverStream).pipe(pipeline.metadataStream).pipe(pipeline.coalesceStream); // Hook up CEA-608/708 caption stream |
|
|
|
pipeline.h264Stream.pipe(pipeline.captionStream).pipe(pipeline.coalesceStream); |
|
pipeline.elementaryStream.on('data', function (data) { |
|
var i; |
|
|
|
if (data.type === 'metadata') { |
|
i = data.tracks.length; // scan the tracks listed in the metadata |
|
|
|
while (i--) { |
|
if (!videoTrack && data.tracks[i].type === 'video') { |
|
videoTrack = data.tracks[i]; |
|
videoTrack.timelineStartInfo.baseMediaDecodeTime = self.baseMediaDecodeTime; |
|
} else if (!audioTrack && data.tracks[i].type === 'audio') { |
|
audioTrack = data.tracks[i]; |
|
audioTrack.timelineStartInfo.baseMediaDecodeTime = self.baseMediaDecodeTime; |
|
} |
|
} // hook up the video segment stream to the first track with h264 data |
|
|
|
|
|
if (videoTrack && !pipeline.videoSegmentStream) { |
|
pipeline.coalesceStream.numberOfTracks++; |
|
pipeline.videoSegmentStream = new _VideoSegmentStream(videoTrack, options); |
|
pipeline.videoSegmentStream.on('timelineStartInfo', function (timelineStartInfo) { |
|
// When video emits timelineStartInfo data after a flush, we forward that |
|
// info to the AudioSegmentStream, if it exists, because video timeline |
|
// data takes precedence. |
|
if (audioTrack) { |
|
audioTrack.timelineStartInfo = timelineStartInfo; // On the first segment we trim AAC frames that exist before the |
|
// very earliest DTS we have seen in video because Chrome will |
|
// interpret any video track with a baseMediaDecodeTime that is |
|
// non-zero as a gap. |
|
|
|
pipeline.audioSegmentStream.setEarliestDts(timelineStartInfo.dts); |
|
} |
|
}); |
|
pipeline.videoSegmentStream.on('processedGopsInfo', self.trigger.bind(self, 'gopInfo')); |
|
pipeline.videoSegmentStream.on('segmentTimingInfo', self.trigger.bind(self, 'videoSegmentTimingInfo')); |
|
pipeline.videoSegmentStream.on('baseMediaDecodeTime', function (baseMediaDecodeTime) { |
|
if (audioTrack) { |
|
pipeline.audioSegmentStream.setVideoBaseMediaDecodeTime(baseMediaDecodeTime); |
|
} |
|
}); // Set up the final part of the video pipeline |
|
|
|
pipeline.h264Stream.pipe(pipeline.videoSegmentStream).pipe(pipeline.coalesceStream); |
|
} |
|
|
|
if (audioTrack && !pipeline.audioSegmentStream) { |
|
// hook up the audio segment stream to the first track with aac data |
|
pipeline.coalesceStream.numberOfTracks++; |
|
pipeline.audioSegmentStream = new _AudioSegmentStream(audioTrack, options); // Set up the final part of the audio pipeline |
|
|
|
pipeline.adtsStream.pipe(pipeline.audioSegmentStream).pipe(pipeline.coalesceStream); |
|
} |
|
} |
|
}); // Re-emit any data coming from the coalesce stream to the outside world |
|
|
|
pipeline.coalesceStream.on('data', this.trigger.bind(this, 'data')); // Let the consumer know we have finished flushing the entire pipeline |
|
|
|
pipeline.coalesceStream.on('done', this.trigger.bind(this, 'done')); |
|
}; // hook up the segment streams once track metadata is delivered |
|
|
|
|
|
this.setBaseMediaDecodeTime = function (baseMediaDecodeTime) { |
|
var pipeline = this.transmuxPipeline_; |
|
|
|
if (!options.keepOriginalTimestamps) { |
|
this.baseMediaDecodeTime = baseMediaDecodeTime; |
|
} |
|
|
|
if (audioTrack) { |
|
audioTrack.timelineStartInfo.dts = undefined; |
|
audioTrack.timelineStartInfo.pts = undefined; |
|
trackDecodeInfo.clearDtsInfo(audioTrack); |
|
|
|
if (!options.keepOriginalTimestamps) { |
|
audioTrack.timelineStartInfo.baseMediaDecodeTime = baseMediaDecodeTime; |
|
} |
|
|
|
if (pipeline.audioTimestampRolloverStream) { |
|
pipeline.audioTimestampRolloverStream.discontinuity(); |
|
} |
|
} |
|
|
|
if (videoTrack) { |
|
if (pipeline.videoSegmentStream) { |
|
pipeline.videoSegmentStream.gopCache_ = []; |
|
pipeline.videoTimestampRolloverStream.discontinuity(); |
|
} |
|
|
|
videoTrack.timelineStartInfo.dts = undefined; |
|
videoTrack.timelineStartInfo.pts = undefined; |
|
trackDecodeInfo.clearDtsInfo(videoTrack); |
|
pipeline.captionStream.reset(); |
|
|
|
if (!options.keepOriginalTimestamps) { |
|
videoTrack.timelineStartInfo.baseMediaDecodeTime = baseMediaDecodeTime; |
|
} |
|
} |
|
|
|
if (pipeline.timedMetadataTimestampRolloverStream) { |
|
pipeline.timedMetadataTimestampRolloverStream.discontinuity(); |
|
} |
|
}; |
|
|
|
this.setAudioAppendStart = function (timestamp) { |
|
if (audioTrack) { |
|
this.transmuxPipeline_.audioSegmentStream.setAudioAppendStart(timestamp); |
|
} |
|
}; |
|
|
|
this.alignGopsWith = function (gopsToAlignWith) { |
|
if (videoTrack && this.transmuxPipeline_.videoSegmentStream) { |
|
this.transmuxPipeline_.videoSegmentStream.alignGopsWith(gopsToAlignWith); |
|
} |
|
}; // feed incoming data to the front of the parsing pipeline |
|
|
|
|
|
this.push = function (data) { |
|
if (hasFlushed) { |
|
var isAac = isLikelyAacData$1(data); |
|
|
|
if (isAac && this.transmuxPipeline_.type !== 'aac') { |
|
this.setupAacPipeline(); |
|
} else if (!isAac && this.transmuxPipeline_.type !== 'ts') { |
|
this.setupTsPipeline(); |
|
} |
|
|
|
hasFlushed = false; |
|
} |
|
|
|
this.transmuxPipeline_.headOfPipeline.push(data); |
|
}; // flush any buffered data |
|
|
|
|
|
this.flush = function () { |
|
hasFlushed = true; // Start at the top of the pipeline and flush all pending work |
|
|
|
this.transmuxPipeline_.headOfPipeline.flush(); |
|
}; // Caption data has to be reset when seeking outside buffered range |
|
|
|
|
|
this.resetCaptions = function () { |
|
if (this.transmuxPipeline_.captionStream) { |
|
this.transmuxPipeline_.captionStream.reset(); |
|
} |
|
}; |
|
}; |
|
|
|
_Transmuxer.prototype = new stream(); |
|
var transmuxer = { |
|
Transmuxer: _Transmuxer, |
|
VideoSegmentStream: _VideoSegmentStream, |
|
AudioSegmentStream: _AudioSegmentStream, |
|
AUDIO_PROPERTIES: AUDIO_PROPERTIES, |
|
VIDEO_PROPERTIES: VIDEO_PROPERTIES, |
|
// exported for testing |
|
generateVideoSegmentTimingInfo: generateVideoSegmentTimingInfo |
|
}; |
|
|
|
var inspectMp4, |
|
_textifyMp, |
|
parseType$2 = probe$$1.parseType, |
|
parseMp4Date = function parseMp4Date(seconds) { |
|
return new Date(seconds * 1000 - 2082844800000); |
|
}, |
|
parseSampleFlags = function parseSampleFlags(flags) { |
|
return { |
|
isLeading: (flags[0] & 0x0c) >>> 2, |
|
dependsOn: flags[0] & 0x03, |
|
isDependedOn: (flags[1] & 0xc0) >>> 6, |
|
hasRedundancy: (flags[1] & 0x30) >>> 4, |
|
paddingValue: (flags[1] & 0x0e) >>> 1, |
|
isNonSyncSample: flags[1] & 0x01, |
|
degradationPriority: flags[2] << 8 | flags[3] |
|
}; |
|
}, |
|
nalParse = function nalParse(avcStream) { |
|
var avcView = new DataView(avcStream.buffer, avcStream.byteOffset, avcStream.byteLength), |
|
result = [], |
|
i, |
|
length; |
|
|
|
for (i = 0; i + 4 < avcStream.length; i += length) { |
|
length = avcView.getUint32(i); |
|
i += 4; // bail if this doesn't appear to be an H264 stream |
|
|
|
if (length <= 0) { |
|
result.push('<span style=\'color:red;\'>MALFORMED DATA</span>'); |
|
continue; |
|
} |
|
|
|
switch (avcStream[i] & 0x1F) { |
|
case 0x01: |
|
result.push('slice_layer_without_partitioning_rbsp'); |
|
break; |
|
|
|
case 0x05: |
|
result.push('slice_layer_without_partitioning_rbsp_idr'); |
|
break; |
|
|
|
case 0x06: |
|
result.push('sei_rbsp'); |
|
break; |
|
|
|
case 0x07: |
|
result.push('seq_parameter_set_rbsp'); |
|
break; |
|
|
|
case 0x08: |
|
result.push('pic_parameter_set_rbsp'); |
|
break; |
|
|
|
case 0x09: |
|
result.push('access_unit_delimiter_rbsp'); |
|
break; |
|
|
|
default: |
|
result.push('UNKNOWN NAL - ' + avcStream[i] & 0x1F); |
|
break; |
|
} |
|
} |
|
|
|
return result; |
|
}, |
|
// registry of handlers for individual mp4 box types |
|
parse$$1 = { |
|
// codingname, not a first-class box type. stsd entries share the |
|
// same format as real boxes so the parsing infrastructure can be |
|
// shared |
|
avc1: function avc1(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength); |
|
return { |
|
dataReferenceIndex: view.getUint16(6), |
|
width: view.getUint16(24), |
|
height: view.getUint16(26), |
|
horizresolution: view.getUint16(28) + view.getUint16(30) / 16, |
|
vertresolution: view.getUint16(32) + view.getUint16(34) / 16, |
|
frameCount: view.getUint16(40), |
|
depth: view.getUint16(74), |
|
config: inspectMp4(data.subarray(78, data.byteLength)) |
|
}; |
|
}, |
|
avcC: function avcC(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength), |
|
result = { |
|
configurationVersion: data[0], |
|
avcProfileIndication: data[1], |
|
profileCompatibility: data[2], |
|
avcLevelIndication: data[3], |
|
lengthSizeMinusOne: data[4] & 0x03, |
|
sps: [], |
|
pps: [] |
|
}, |
|
numOfSequenceParameterSets = data[5] & 0x1f, |
|
numOfPictureParameterSets, |
|
nalSize, |
|
offset, |
|
i; // iterate past any SPSs |
|
|
|
offset = 6; |
|
|
|
for (i = 0; i < numOfSequenceParameterSets; i++) { |
|
nalSize = view.getUint16(offset); |
|
offset += 2; |
|
result.sps.push(new Uint8Array(data.subarray(offset, offset + nalSize))); |
|
offset += nalSize; |
|
} // iterate past any PPSs |
|
|
|
|
|
numOfPictureParameterSets = data[offset]; |
|
offset++; |
|
|
|
for (i = 0; i < numOfPictureParameterSets; i++) { |
|
nalSize = view.getUint16(offset); |
|
offset += 2; |
|
result.pps.push(new Uint8Array(data.subarray(offset, offset + nalSize))); |
|
offset += nalSize; |
|
} |
|
|
|
return result; |
|
}, |
|
btrt: function btrt(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength); |
|
return { |
|
bufferSizeDB: view.getUint32(0), |
|
maxBitrate: view.getUint32(4), |
|
avgBitrate: view.getUint32(8) |
|
}; |
|
}, |
|
esds: function esds(data) { |
|
return { |
|
version: data[0], |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
esId: data[6] << 8 | data[7], |
|
streamPriority: data[8] & 0x1f, |
|
decoderConfig: { |
|
objectProfileIndication: data[11], |
|
streamType: data[12] >>> 2 & 0x3f, |
|
bufferSize: data[13] << 16 | data[14] << 8 | data[15], |
|
maxBitrate: data[16] << 24 | data[17] << 16 | data[18] << 8 | data[19], |
|
avgBitrate: data[20] << 24 | data[21] << 16 | data[22] << 8 | data[23], |
|
decoderConfigDescriptor: { |
|
tag: data[24], |
|
length: data[25], |
|
audioObjectType: data[26] >>> 3 & 0x1f, |
|
samplingFrequencyIndex: (data[26] & 0x07) << 1 | data[27] >>> 7 & 0x01, |
|
channelConfiguration: data[27] >>> 3 & 0x0f |
|
} |
|
} |
|
}; |
|
}, |
|
ftyp: function ftyp(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength), |
|
result = { |
|
majorBrand: parseType$2(data.subarray(0, 4)), |
|
minorVersion: view.getUint32(4), |
|
compatibleBrands: [] |
|
}, |
|
i = 8; |
|
|
|
while (i < data.byteLength) { |
|
result.compatibleBrands.push(parseType$2(data.subarray(i, i + 4))); |
|
i += 4; |
|
} |
|
|
|
return result; |
|
}, |
|
dinf: function dinf(data) { |
|
return { |
|
boxes: inspectMp4(data) |
|
}; |
|
}, |
|
dref: function dref(data) { |
|
return { |
|
version: data[0], |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
dataReferences: inspectMp4(data.subarray(8)) |
|
}; |
|
}, |
|
hdlr: function hdlr(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength), |
|
result = { |
|
version: view.getUint8(0), |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
handlerType: parseType$2(data.subarray(8, 12)), |
|
name: '' |
|
}, |
|
i = 8; // parse out the name field |
|
|
|
for (i = 24; i < data.byteLength; i++) { |
|
if (data[i] === 0x00) { |
|
// the name field is null-terminated |
|
i++; |
|
break; |
|
} |
|
|
|
result.name += String.fromCharCode(data[i]); |
|
} // decode UTF-8 to javascript's internal representation |
|
// see http://ecmanaut.blogspot.com/2006/07/encoding-decoding-utf8-in-javascript.html |
|
|
|
|
|
result.name = decodeURIComponent(escape(result.name)); |
|
return result; |
|
}, |
|
mdat: function mdat(data) { |
|
return { |
|
byteLength: data.byteLength, |
|
nals: nalParse(data) |
|
}; |
|
}, |
|
mdhd: function mdhd(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength), |
|
i = 4, |
|
language, |
|
result = { |
|
version: view.getUint8(0), |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
language: '' |
|
}; |
|
|
|
if (result.version === 1) { |
|
i += 4; |
|
result.creationTime = parseMp4Date(view.getUint32(i)); // truncating top 4 bytes |
|
|
|
i += 8; |
|
result.modificationTime = parseMp4Date(view.getUint32(i)); // truncating top 4 bytes |
|
|
|
i += 4; |
|
result.timescale = view.getUint32(i); |
|
i += 8; |
|
result.duration = view.getUint32(i); // truncating top 4 bytes |
|
} else { |
|
result.creationTime = parseMp4Date(view.getUint32(i)); |
|
i += 4; |
|
result.modificationTime = parseMp4Date(view.getUint32(i)); |
|
i += 4; |
|
result.timescale = view.getUint32(i); |
|
i += 4; |
|
result.duration = view.getUint32(i); |
|
} |
|
|
|
i += 4; // language is stored as an ISO-639-2/T code in an array of three 5-bit fields |
|
// each field is the packed difference between its ASCII value and 0x60 |
|
|
|
language = view.getUint16(i); |
|
result.language += String.fromCharCode((language >> 10) + 0x60); |
|
result.language += String.fromCharCode(((language & 0x03e0) >> 5) + 0x60); |
|
result.language += String.fromCharCode((language & 0x1f) + 0x60); |
|
return result; |
|
}, |
|
mdia: function mdia(data) { |
|
return { |
|
boxes: inspectMp4(data) |
|
}; |
|
}, |
|
mfhd: function mfhd(data) { |
|
return { |
|
version: data[0], |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
sequenceNumber: data[4] << 24 | data[5] << 16 | data[6] << 8 | data[7] |
|
}; |
|
}, |
|
minf: function minf(data) { |
|
return { |
|
boxes: inspectMp4(data) |
|
}; |
|
}, |
|
// codingname, not a first-class box type. stsd entries share the |
|
// same format as real boxes so the parsing infrastructure can be |
|
// shared |
|
mp4a: function mp4a(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength), |
|
result = { |
|
// 6 bytes reserved |
|
dataReferenceIndex: view.getUint16(6), |
|
// 4 + 4 bytes reserved |
|
channelcount: view.getUint16(16), |
|
samplesize: view.getUint16(18), |
|
// 2 bytes pre_defined |
|
// 2 bytes reserved |
|
samplerate: view.getUint16(24) + view.getUint16(26) / 65536 |
|
}; // if there are more bytes to process, assume this is an ISO/IEC |
|
// 14496-14 MP4AudioSampleEntry and parse the ESDBox |
|
|
|
if (data.byteLength > 28) { |
|
result.streamDescriptor = inspectMp4(data.subarray(28))[0]; |
|
} |
|
|
|
return result; |
|
}, |
|
moof: function moof(data) { |
|
return { |
|
boxes: inspectMp4(data) |
|
}; |
|
}, |
|
moov: function moov(data) { |
|
return { |
|
boxes: inspectMp4(data) |
|
}; |
|
}, |
|
mvex: function mvex(data) { |
|
return { |
|
boxes: inspectMp4(data) |
|
}; |
|
}, |
|
mvhd: function mvhd(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength), |
|
i = 4, |
|
result = { |
|
version: view.getUint8(0), |
|
flags: new Uint8Array(data.subarray(1, 4)) |
|
}; |
|
|
|
if (result.version === 1) { |
|
i += 4; |
|
result.creationTime = parseMp4Date(view.getUint32(i)); // truncating top 4 bytes |
|
|
|
i += 8; |
|
result.modificationTime = parseMp4Date(view.getUint32(i)); // truncating top 4 bytes |
|
|
|
i += 4; |
|
result.timescale = view.getUint32(i); |
|
i += 8; |
|
result.duration = view.getUint32(i); // truncating top 4 bytes |
|
} else { |
|
result.creationTime = parseMp4Date(view.getUint32(i)); |
|
i += 4; |
|
result.modificationTime = parseMp4Date(view.getUint32(i)); |
|
i += 4; |
|
result.timescale = view.getUint32(i); |
|
i += 4; |
|
result.duration = view.getUint32(i); |
|
} |
|
|
|
i += 4; // convert fixed-point, base 16 back to a number |
|
|
|
result.rate = view.getUint16(i) + view.getUint16(i + 2) / 16; |
|
i += 4; |
|
result.volume = view.getUint8(i) + view.getUint8(i + 1) / 8; |
|
i += 2; |
|
i += 2; |
|
i += 2 * 4; |
|
result.matrix = new Uint32Array(data.subarray(i, i + 9 * 4)); |
|
i += 9 * 4; |
|
i += 6 * 4; |
|
result.nextTrackId = view.getUint32(i); |
|
return result; |
|
}, |
|
pdin: function pdin(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength); |
|
return { |
|
version: view.getUint8(0), |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
rate: view.getUint32(4), |
|
initialDelay: view.getUint32(8) |
|
}; |
|
}, |
|
sdtp: function sdtp(data) { |
|
var result = { |
|
version: data[0], |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
samples: [] |
|
}, |
|
i; |
|
|
|
for (i = 4; i < data.byteLength; i++) { |
|
result.samples.push({ |
|
dependsOn: (data[i] & 0x30) >> 4, |
|
isDependedOn: (data[i] & 0x0c) >> 2, |
|
hasRedundancy: data[i] & 0x03 |
|
}); |
|
} |
|
|
|
return result; |
|
}, |
|
sidx: function sidx(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength), |
|
result = { |
|
version: data[0], |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
references: [], |
|
referenceId: view.getUint32(4), |
|
timescale: view.getUint32(8), |
|
earliestPresentationTime: view.getUint32(12), |
|
firstOffset: view.getUint32(16) |
|
}, |
|
referenceCount = view.getUint16(22), |
|
i; |
|
|
|
for (i = 24; referenceCount; i += 12, referenceCount--) { |
|
result.references.push({ |
|
referenceType: (data[i] & 0x80) >>> 7, |
|
referencedSize: view.getUint32(i) & 0x7FFFFFFF, |
|
subsegmentDuration: view.getUint32(i + 4), |
|
startsWithSap: !!(data[i + 8] & 0x80), |
|
sapType: (data[i + 8] & 0x70) >>> 4, |
|
sapDeltaTime: view.getUint32(i + 8) & 0x0FFFFFFF |
|
}); |
|
} |
|
|
|
return result; |
|
}, |
|
smhd: function smhd(data) { |
|
return { |
|
version: data[0], |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
balance: data[4] + data[5] / 256 |
|
}; |
|
}, |
|
stbl: function stbl(data) { |
|
return { |
|
boxes: inspectMp4(data) |
|
}; |
|
}, |
|
stco: function stco(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength), |
|
result = { |
|
version: data[0], |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
chunkOffsets: [] |
|
}, |
|
entryCount = view.getUint32(4), |
|
i; |
|
|
|
for (i = 8; entryCount; i += 4, entryCount--) { |
|
result.chunkOffsets.push(view.getUint32(i)); |
|
} |
|
|
|
return result; |
|
}, |
|
stsc: function stsc(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength), |
|
entryCount = view.getUint32(4), |
|
result = { |
|
version: data[0], |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
sampleToChunks: [] |
|
}, |
|
i; |
|
|
|
for (i = 8; entryCount; i += 12, entryCount--) { |
|
result.sampleToChunks.push({ |
|
firstChunk: view.getUint32(i), |
|
samplesPerChunk: view.getUint32(i + 4), |
|
sampleDescriptionIndex: view.getUint32(i + 8) |
|
}); |
|
} |
|
|
|
return result; |
|
}, |
|
stsd: function stsd(data) { |
|
return { |
|
version: data[0], |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
sampleDescriptions: inspectMp4(data.subarray(8)) |
|
}; |
|
}, |
|
stsz: function stsz(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength), |
|
result = { |
|
version: data[0], |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
sampleSize: view.getUint32(4), |
|
entries: [] |
|
}, |
|
i; |
|
|
|
for (i = 12; i < data.byteLength; i += 4) { |
|
result.entries.push(view.getUint32(i)); |
|
} |
|
|
|
return result; |
|
}, |
|
stts: function stts(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength), |
|
result = { |
|
version: data[0], |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
timeToSamples: [] |
|
}, |
|
entryCount = view.getUint32(4), |
|
i; |
|
|
|
for (i = 8; entryCount; i += 8, entryCount--) { |
|
result.timeToSamples.push({ |
|
sampleCount: view.getUint32(i), |
|
sampleDelta: view.getUint32(i + 4) |
|
}); |
|
} |
|
|
|
return result; |
|
}, |
|
styp: function styp(data) { |
|
return parse$$1.ftyp(data); |
|
}, |
|
tfdt: function tfdt(data) { |
|
var result = { |
|
version: data[0], |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
baseMediaDecodeTime: data[4] << 24 | data[5] << 16 | data[6] << 8 | data[7] |
|
}; |
|
|
|
if (result.version === 1) { |
|
result.baseMediaDecodeTime *= Math.pow(2, 32); |
|
result.baseMediaDecodeTime += data[8] << 24 | data[9] << 16 | data[10] << 8 | data[11]; |
|
} |
|
|
|
return result; |
|
}, |
|
tfhd: function tfhd(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength), |
|
result = { |
|
version: data[0], |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
trackId: view.getUint32(4) |
|
}, |
|
baseDataOffsetPresent = result.flags[2] & 0x01, |
|
sampleDescriptionIndexPresent = result.flags[2] & 0x02, |
|
defaultSampleDurationPresent = result.flags[2] & 0x08, |
|
defaultSampleSizePresent = result.flags[2] & 0x10, |
|
defaultSampleFlagsPresent = result.flags[2] & 0x20, |
|
durationIsEmpty = result.flags[0] & 0x010000, |
|
defaultBaseIsMoof = result.flags[0] & 0x020000, |
|
i; |
|
i = 8; |
|
|
|
if (baseDataOffsetPresent) { |
|
i += 4; // truncate top 4 bytes |
|
// FIXME: should we read the full 64 bits? |
|
|
|
result.baseDataOffset = view.getUint32(12); |
|
i += 4; |
|
} |
|
|
|
if (sampleDescriptionIndexPresent) { |
|
result.sampleDescriptionIndex = view.getUint32(i); |
|
i += 4; |
|
} |
|
|
|
if (defaultSampleDurationPresent) { |
|
result.defaultSampleDuration = view.getUint32(i); |
|
i += 4; |
|
} |
|
|
|
if (defaultSampleSizePresent) { |
|
result.defaultSampleSize = view.getUint32(i); |
|
i += 4; |
|
} |
|
|
|
if (defaultSampleFlagsPresent) { |
|
result.defaultSampleFlags = view.getUint32(i); |
|
} |
|
|
|
if (durationIsEmpty) { |
|
result.durationIsEmpty = true; |
|
} |
|
|
|
if (!baseDataOffsetPresent && defaultBaseIsMoof) { |
|
result.baseDataOffsetIsMoof = true; |
|
} |
|
|
|
return result; |
|
}, |
|
tkhd: function tkhd(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength), |
|
i = 4, |
|
result = { |
|
version: view.getUint8(0), |
|
flags: new Uint8Array(data.subarray(1, 4)) |
|
}; |
|
|
|
if (result.version === 1) { |
|
i += 4; |
|
result.creationTime = parseMp4Date(view.getUint32(i)); // truncating top 4 bytes |
|
|
|
i += 8; |
|
result.modificationTime = parseMp4Date(view.getUint32(i)); // truncating top 4 bytes |
|
|
|
i += 4; |
|
result.trackId = view.getUint32(i); |
|
i += 4; |
|
i += 8; |
|
result.duration = view.getUint32(i); // truncating top 4 bytes |
|
} else { |
|
result.creationTime = parseMp4Date(view.getUint32(i)); |
|
i += 4; |
|
result.modificationTime = parseMp4Date(view.getUint32(i)); |
|
i += 4; |
|
result.trackId = view.getUint32(i); |
|
i += 4; |
|
i += 4; |
|
result.duration = view.getUint32(i); |
|
} |
|
|
|
i += 4; |
|
i += 2 * 4; |
|
result.layer = view.getUint16(i); |
|
i += 2; |
|
result.alternateGroup = view.getUint16(i); |
|
i += 2; // convert fixed-point, base 16 back to a number |
|
|
|
result.volume = view.getUint8(i) + view.getUint8(i + 1) / 8; |
|
i += 2; |
|
i += 2; |
|
result.matrix = new Uint32Array(data.subarray(i, i + 9 * 4)); |
|
i += 9 * 4; |
|
result.width = view.getUint16(i) + view.getUint16(i + 2) / 16; |
|
i += 4; |
|
result.height = view.getUint16(i) + view.getUint16(i + 2) / 16; |
|
return result; |
|
}, |
|
traf: function traf(data) { |
|
return { |
|
boxes: inspectMp4(data) |
|
}; |
|
}, |
|
trak: function trak(data) { |
|
return { |
|
boxes: inspectMp4(data) |
|
}; |
|
}, |
|
trex: function trex(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength); |
|
return { |
|
version: data[0], |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
trackId: view.getUint32(4), |
|
defaultSampleDescriptionIndex: view.getUint32(8), |
|
defaultSampleDuration: view.getUint32(12), |
|
defaultSampleSize: view.getUint32(16), |
|
sampleDependsOn: data[20] & 0x03, |
|
sampleIsDependedOn: (data[21] & 0xc0) >> 6, |
|
sampleHasRedundancy: (data[21] & 0x30) >> 4, |
|
samplePaddingValue: (data[21] & 0x0e) >> 1, |
|
sampleIsDifferenceSample: !!(data[21] & 0x01), |
|
sampleDegradationPriority: view.getUint16(22) |
|
}; |
|
}, |
|
trun: function trun(data) { |
|
var result = { |
|
version: data[0], |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
samples: [] |
|
}, |
|
view = new DataView(data.buffer, data.byteOffset, data.byteLength), |
|
// Flag interpretation |
|
dataOffsetPresent = result.flags[2] & 0x01, |
|
// compare with 2nd byte of 0x1 |
|
firstSampleFlagsPresent = result.flags[2] & 0x04, |
|
// compare with 2nd byte of 0x4 |
|
sampleDurationPresent = result.flags[1] & 0x01, |
|
// compare with 2nd byte of 0x100 |
|
sampleSizePresent = result.flags[1] & 0x02, |
|
// compare with 2nd byte of 0x200 |
|
sampleFlagsPresent = result.flags[1] & 0x04, |
|
// compare with 2nd byte of 0x400 |
|
sampleCompositionTimeOffsetPresent = result.flags[1] & 0x08, |
|
// compare with 2nd byte of 0x800 |
|
sampleCount = view.getUint32(4), |
|
offset = 8, |
|
sample; |
|
|
|
if (dataOffsetPresent) { |
|
// 32 bit signed integer |
|
result.dataOffset = view.getInt32(offset); |
|
offset += 4; |
|
} // Overrides the flags for the first sample only. The order of |
|
// optional values will be: duration, size, compositionTimeOffset |
|
|
|
|
|
if (firstSampleFlagsPresent && sampleCount) { |
|
sample = { |
|
flags: parseSampleFlags(data.subarray(offset, offset + 4)) |
|
}; |
|
offset += 4; |
|
|
|
if (sampleDurationPresent) { |
|
sample.duration = view.getUint32(offset); |
|
offset += 4; |
|
} |
|
|
|
if (sampleSizePresent) { |
|
sample.size = view.getUint32(offset); |
|
offset += 4; |
|
} |
|
|
|
if (sampleCompositionTimeOffsetPresent) { |
|
// Note: this should be a signed int if version is 1 |
|
sample.compositionTimeOffset = view.getUint32(offset); |
|
offset += 4; |
|
} |
|
|
|
result.samples.push(sample); |
|
sampleCount--; |
|
} |
|
|
|
while (sampleCount--) { |
|
sample = {}; |
|
|
|
if (sampleDurationPresent) { |
|
sample.duration = view.getUint32(offset); |
|
offset += 4; |
|
} |
|
|
|
if (sampleSizePresent) { |
|
sample.size = view.getUint32(offset); |
|
offset += 4; |
|
} |
|
|
|
if (sampleFlagsPresent) { |
|
sample.flags = parseSampleFlags(data.subarray(offset, offset + 4)); |
|
offset += 4; |
|
} |
|
|
|
if (sampleCompositionTimeOffsetPresent) { |
|
// Note: this should be a signed int if version is 1 |
|
sample.compositionTimeOffset = view.getUint32(offset); |
|
offset += 4; |
|
} |
|
|
|
result.samples.push(sample); |
|
} |
|
|
|
return result; |
|
}, |
|
'url ': function url(data) { |
|
return { |
|
version: data[0], |
|
flags: new Uint8Array(data.subarray(1, 4)) |
|
}; |
|
}, |
|
vmhd: function vmhd(data) { |
|
var view = new DataView(data.buffer, data.byteOffset, data.byteLength); |
|
return { |
|
version: data[0], |
|
flags: new Uint8Array(data.subarray(1, 4)), |
|
graphicsmode: view.getUint16(4), |
|
opcolor: new Uint16Array([view.getUint16(6), view.getUint16(8), view.getUint16(10)]) |
|
}; |
|
} |
|
}; |
|
/** |
|
* Return a javascript array of box objects parsed from an ISO base |
|
* media file. |
|
* @param data {Uint8Array} the binary data of the media to be inspected |
|
* @return {array} a javascript array of potentially nested box objects |
|
*/ |
|
|
|
|
|
inspectMp4 = function inspectMp4(data) { |
|
var i = 0, |
|
result = [], |
|
view, |
|
size, |
|
type, |
|
end, |
|
box; // Convert data from Uint8Array to ArrayBuffer, to follow Dataview API |
|
|
|
var ab = new ArrayBuffer(data.length); |
|
var v = new Uint8Array(ab); |
|
|
|
for (var z = 0; z < data.length; ++z) { |
|
v[z] = data[z]; |
|
} |
|
|
|
view = new DataView(ab); |
|
|
|
while (i < data.byteLength) { |
|
// parse box data |
|
size = view.getUint32(i); |
|
type = parseType$2(data.subarray(i + 4, i + 8)); |
|
end = size > 1 ? i + size : data.byteLength; // parse type-specific data |
|
|
|
box = (parse$$1[type] || function (data) { |
|
return { |
|
data: data |
|
}; |
|
})(data.subarray(i + 8, end)); |
|
|
|
box.size = size; |
|
box.type = type; // store this box and move to the next |
|
|
|
result.push(box); |
|
i = end; |
|
} |
|
|
|
return result; |
|
}; |
|
/** |
|
* Returns a textual representation of the javascript represtentation |
|
* of an MP4 file. You can use it as an alternative to |
|
* JSON.stringify() to compare inspected MP4s. |
|
* @param inspectedMp4 {array} the parsed array of boxes in an MP4 |
|
* file |
|
* @param depth {number} (optional) the number of ancestor boxes of |
|
* the elements of inspectedMp4. Assumed to be zero if unspecified. |
|
* @return {string} a text representation of the parsed MP4 |
|
*/ |
|
|
|
|
|
_textifyMp = function textifyMp4(inspectedMp4, depth) { |
|
var indent; |
|
depth = depth || 0; |
|
indent = new Array(depth * 2 + 1).join(' '); // iterate over all the boxes |
|
|
|
return inspectedMp4.map(function (box, index) { |
|
// list the box type first at the current indentation level |
|
return indent + box.type + '\n' + // the type is already included and handle child boxes separately |
|
Object.keys(box).filter(function (key) { |
|
return key !== 'type' && key !== 'boxes'; // output all the box properties |
|
}).map(function (key) { |
|
var prefix = indent + ' ' + key + ': ', |
|
value = box[key]; // print out raw bytes as hexademical |
|
|
|
if (value instanceof Uint8Array || value instanceof Uint32Array) { |
|
var bytes = Array.prototype.slice.call(new Uint8Array(value.buffer, value.byteOffset, value.byteLength)).map(function (byte) { |
|
return ' ' + ('00' + byte.toString(16)).slice(-2); |
|
}).join('').match(/.{1,24}/g); |
|
|
|
if (!bytes) { |
|
return prefix + '<>'; |
|
} |
|
|
|
if (bytes.length === 1) { |
|
return prefix + '<' + bytes.join('').slice(1) + '>'; |
|
} |
|
|
|
return prefix + '<\n' + bytes.map(function (line) { |
|
return indent + ' ' + line; |
|
}).join('\n') + '\n' + indent + ' >'; |
|
} // stringify generic objects |
|
|
|
|
|
return prefix + JSON.stringify(value, null, 2).split('\n').map(function (line, index) { |
|
if (index === 0) { |
|
return line; |
|
} |
|
|
|
return indent + ' ' + line; |
|
}).join('\n'); |
|
}).join('\n') + ( // recursively textify the child boxes |
|
box.boxes ? '\n' + _textifyMp(box.boxes, depth + 1) : ''); |
|
}).join('\n'); |
|
}; |
|
|
|
var mp4Inspector = { |
|
inspect: inspectMp4, |
|
textify: _textifyMp, |
|
parseTfdt: parse$$1.tfdt, |
|
parseHdlr: parse$$1.hdlr, |
|
parseTfhd: parse$$1.tfhd, |
|
parseTrun: parse$$1.trun, |
|
parseSidx: parse$$1.sidx |
|
}; |
|
var discardEmulationPreventionBytes$1 = captionPacketParser.discardEmulationPreventionBytes; |
|
var CaptionStream$1 = captionStream.CaptionStream; |
|
/** |
|
* Maps an offset in the mdat to a sample based on the the size of the samples. |
|
* Assumes that `parseSamples` has been called first. |
|
* |
|
* @param {Number} offset - The offset into the mdat |
|
* @param {Object[]} samples - An array of samples, parsed using `parseSamples` |
|
* @return {?Object} The matching sample, or null if no match was found. |
|
* |
|
* @see ISO-BMFF-12/2015, Section 8.8.8 |
|
**/ |
|
|
|
var mapToSample = function mapToSample(offset, samples) { |
|
var approximateOffset = offset; |
|
|
|
for (var i = 0; i < samples.length; i++) { |
|
var sample = samples[i]; |
|
|
|
if (approximateOffset < sample.size) { |
|
return sample; |
|
} |
|
|
|
approximateOffset -= sample.size; |
|
} |
|
|
|
return null; |
|
}; |
|
/** |
|
* Finds SEI nal units contained in a Media Data Box. |
|
* Assumes that `parseSamples` has been called first. |
|
* |
|
* @param {Uint8Array} avcStream - The bytes of the mdat |
|
* @param {Object[]} samples - The samples parsed out by `parseSamples` |
|
* @param {Number} trackId - The trackId of this video track |
|
* @return {Object[]} seiNals - the parsed SEI NALUs found. |
|
* The contents of the seiNal should match what is expected by |
|
* CaptionStream.push (nalUnitType, size, data, escapedRBSP, pts, dts) |
|
* |
|
* @see ISO-BMFF-12/2015, Section 8.1.1 |
|
* @see Rec. ITU-T H.264, 7.3.2.3.1 |
|
**/ |
|
|
|
|
|
var findSeiNals = function findSeiNals(avcStream, samples, trackId) { |
|
var avcView = new DataView(avcStream.buffer, avcStream.byteOffset, avcStream.byteLength), |
|
result = [], |
|
seiNal, |
|
i, |
|
length, |
|
lastMatchedSample; |
|
|
|
for (i = 0; i + 4 < avcStream.length; i += length) { |
|
length = avcView.getUint32(i); |
|
i += 4; // Bail if this doesn't appear to be an H264 stream |
|
|
|
if (length <= 0) { |
|
continue; |
|
} |
|
|
|
switch (avcStream[i] & 0x1F) { |
|
case 0x06: |
|
var data = avcStream.subarray(i + 1, i + 1 + length); |
|
var matchingSample = mapToSample(i, samples); |
|
seiNal = { |
|
nalUnitType: 'sei_rbsp', |
|
size: length, |
|
data: data, |
|
escapedRBSP: discardEmulationPreventionBytes$1(data), |
|
trackId: trackId |
|
}; |
|
|
|
if (matchingSample) { |
|
seiNal.pts = matchingSample.pts; |
|
seiNal.dts = matchingSample.dts; |
|
lastMatchedSample = matchingSample; |
|
} else { |
|
// If a matching sample cannot be found, use the last |
|
// sample's values as they should be as close as possible |
|
seiNal.pts = lastMatchedSample.pts; |
|
seiNal.dts = lastMatchedSample.dts; |
|
} |
|
|
|
result.push(seiNal); |
|
break; |
|
|
|
default: |
|
break; |
|
} |
|
} |
|
|
|
return result; |
|
}; |
|
/** |
|
* Parses sample information out of Track Run Boxes and calculates |
|
* the absolute presentation and decode timestamps of each sample. |
|
* |
|
* @param {Array<Uint8Array>} truns - The Trun Run boxes to be parsed |
|
* @param {Number} baseMediaDecodeTime - base media decode time from tfdt |
|
@see ISO-BMFF-12/2015, Section 8.8.12 |
|
* @param {Object} tfhd - The parsed Track Fragment Header |
|
* @see inspect.parseTfhd |
|
* @return {Object[]} the parsed samples |
|
* |
|
* @see ISO-BMFF-12/2015, Section 8.8.8 |
|
**/ |
|
|
|
|
|
var parseSamples = function parseSamples(truns, baseMediaDecodeTime, tfhd) { |
|
var currentDts = baseMediaDecodeTime; |
|
var defaultSampleDuration = tfhd.defaultSampleDuration || 0; |
|
var defaultSampleSize = tfhd.defaultSampleSize || 0; |
|
var trackId = tfhd.trackId; |
|
var allSamples = []; |
|
truns.forEach(function (trun) { |
|
// Note: We currently do not parse the sample table as well |
|
// as the trun. It's possible some sources will require this. |
|
// moov > trak > mdia > minf > stbl |
|
var trackRun = mp4Inspector.parseTrun(trun); |
|
var samples = trackRun.samples; |
|
samples.forEach(function (sample) { |
|
if (sample.duration === undefined) { |
|
sample.duration = defaultSampleDuration; |
|
} |
|
|
|
if (sample.size === undefined) { |
|
sample.size = defaultSampleSize; |
|
} |
|
|
|
sample.trackId = trackId; |
|
sample.dts = currentDts; |
|
|
|
if (sample.compositionTimeOffset === undefined) { |
|
sample.compositionTimeOffset = 0; |
|
} |
|
|
|
sample.pts = currentDts + sample.compositionTimeOffset; |
|
currentDts += sample.duration; |
|
}); |
|
allSamples = allSamples.concat(samples); |
|
}); |
|
return allSamples; |
|
}; |
|
/** |
|
* Parses out caption nals from an FMP4 segment's video tracks. |
|
* |
|
* @param {Uint8Array} segment - The bytes of a single segment |
|
* @param {Number} videoTrackId - The trackId of a video track in the segment |
|
* @return {Object.<Number, Object[]>} A mapping of video trackId to |
|
* a list of seiNals found in that track |
|
**/ |
|
|
|
|
|
var parseCaptionNals = function parseCaptionNals(segment, videoTrackId) { |
|
// To get the samples |
|
var trafs = probe$$1.findBox(segment, ['moof', 'traf']); // To get SEI NAL units |
|
|
|
var mdats = probe$$1.findBox(segment, ['mdat']); |
|
var captionNals = {}; |
|
var mdatTrafPairs = []; // Pair up each traf with a mdat as moofs and mdats are in pairs |
|
|
|
mdats.forEach(function (mdat, index) { |
|
var matchingTraf = trafs[index]; |
|
mdatTrafPairs.push({ |
|
mdat: mdat, |
|
traf: matchingTraf |
|
}); |
|
}); |
|
mdatTrafPairs.forEach(function (pair) { |
|
var mdat = pair.mdat; |
|
var traf = pair.traf; |
|
var tfhd = probe$$1.findBox(traf, ['tfhd']); // Exactly 1 tfhd per traf |
|
|
|
var headerInfo = mp4Inspector.parseTfhd(tfhd[0]); |
|
var trackId = headerInfo.trackId; |
|
var tfdt = probe$$1.findBox(traf, ['tfdt']); // Either 0 or 1 tfdt per traf |
|
|
|
var baseMediaDecodeTime = tfdt.length > 0 ? mp4Inspector.parseTfdt(tfdt[0]).baseMediaDecodeTime : 0; |
|
var truns = probe$$1.findBox(traf, ['trun']); |
|
var samples; |
|
var seiNals; // Only parse video data for the chosen video track |
|
|
|
if (videoTrackId === trackId && truns.length > 0) { |
|
samples = parseSamples(truns, baseMediaDecodeTime, headerInfo); |
|
seiNals = findSeiNals(mdat, samples, trackId); |
|
|
|
if (!captionNals[trackId]) { |
|
captionNals[trackId] = []; |
|
} |
|
|
|
captionNals[trackId] = captionNals[trackId].concat(seiNals); |
|
} |
|
}); |
|
return captionNals; |
|
}; |
|
/** |
|
* Parses out inband captions from an MP4 container and returns |
|
* caption objects that can be used by WebVTT and the TextTrack API. |
|
* @see https://developer.mozilla.org/en-US/docs/Web/API/VTTCue |
|
* @see https://developer.mozilla.org/en-US/docs/Web/API/TextTrack |
|
* Assumes that `probe.getVideoTrackIds` and `probe.timescale` have been called first |
|
* |
|
* @param {Uint8Array} segment - The fmp4 segment containing embedded captions |
|
* @param {Number} trackId - The id of the video track to parse |
|
* @param {Number} timescale - The timescale for the video track from the init segment |
|
* |
|
* @return {?Object[]} parsedCaptions - A list of captions or null if no video tracks |
|
* @return {Number} parsedCaptions[].startTime - The time to show the caption in seconds |
|
* @return {Number} parsedCaptions[].endTime - The time to stop showing the caption in seconds |
|
* @return {String} parsedCaptions[].text - The visible content of the caption |
|
**/ |
|
|
|
|
|
var parseEmbeddedCaptions = function parseEmbeddedCaptions(segment, trackId, timescale) { |
|
var seiNals; |
|
|
|
if (!trackId) { |
|
return null; |
|
} |
|
|
|
seiNals = parseCaptionNals(segment, trackId); |
|
return { |
|
seiNals: seiNals[trackId], |
|
timescale: timescale |
|
}; |
|
}; |
|
/** |
|
* Converts SEI NALUs into captions that can be used by video.js |
|
**/ |
|
|
|
|
|
var CaptionParser$$1 = function CaptionParser$$1() { |
|
var isInitialized = false; |
|
var captionStream$$1; // Stores segments seen before trackId and timescale are set |
|
|
|
var segmentCache; // Stores video track ID of the track being parsed |
|
|
|
var trackId; // Stores the timescale of the track being parsed |
|
|
|
var timescale; // Stores captions parsed so far |
|
|
|
var parsedCaptions; |
|
/** |
|
* A method to indicate whether a CaptionParser has been initalized |
|
* @returns {Boolean} |
|
**/ |
|
|
|
this.isInitialized = function () { |
|
return isInitialized; |
|
}; |
|
/** |
|
* Initializes the underlying CaptionStream, SEI NAL parsing |
|
* and management, and caption collection |
|
**/ |
|
|
|
|
|
this.init = function () { |
|
captionStream$$1 = new CaptionStream$1(); |
|
isInitialized = true; // Collect dispatched captions |
|
|
|
captionStream$$1.on('data', function (event) { |
|
// Convert to seconds in the source's timescale |
|
event.startTime = event.startPts / timescale; |
|
event.endTime = event.endPts / timescale; |
|
parsedCaptions.captions.push(event); |
|
parsedCaptions.captionStreams[event.stream] = true; |
|
}); |
|
}; |
|
/** |
|
* Determines if a new video track will be selected |
|
* or if the timescale changed |
|
* @return {Boolean} |
|
**/ |
|
|
|
|
|
this.isNewInit = function (videoTrackIds, timescales) { |
|
if (videoTrackIds && videoTrackIds.length === 0 || timescales && typeof timescales === 'object' && Object.keys(timescales).length === 0) { |
|
return false; |
|
} |
|
|
|
return trackId !== videoTrackIds[0] || timescale !== timescales[trackId]; |
|
}; |
|
/** |
|
* Parses out SEI captions and interacts with underlying |
|
* CaptionStream to return dispatched captions |
|
* |
|
* @param {Uint8Array} segment - The fmp4 segment containing embedded captions |
|
* @param {Number[]} videoTrackIds - A list of video tracks found in the init segment |
|
* @param {Object.<Number, Number>} timescales - The timescales found in the init segment |
|
* @see parseEmbeddedCaptions |
|
* @see m2ts/caption-stream.js |
|
**/ |
|
|
|
|
|
this.parse = function (segment, videoTrackIds, timescales) { |
|
var parsedData; |
|
|
|
if (!this.isInitialized()) { |
|
return null; // This is not likely to be a video segment |
|
} else if (!videoTrackIds || !timescales) { |
|
return null; |
|
} else if (this.isNewInit(videoTrackIds, timescales)) { |
|
// Use the first video track only as there is no |
|
// mechanism to switch to other video tracks |
|
trackId = videoTrackIds[0]; |
|
timescale = timescales[trackId]; // If an init segment has not been seen yet, hold onto segment |
|
// data until we have one |
|
} else if (!trackId || !timescale) { |
|
segmentCache.push(segment); |
|
return null; |
|
} // Now that a timescale and trackId is set, parse cached segments |
|
|
|
|
|
while (segmentCache.length > 0) { |
|
var cachedSegment = segmentCache.shift(); |
|
this.parse(cachedSegment, videoTrackIds, timescales); |
|
} |
|
|
|
parsedData = parseEmbeddedCaptions(segment, trackId, timescale); |
|
|
|
if (parsedData === null || !parsedData.seiNals) { |
|
return null; |
|
} |
|
|
|
this.pushNals(parsedData.seiNals); // Force the parsed captions to be dispatched |
|
|
|
this.flushStream(); |
|
return parsedCaptions; |
|
}; |
|
/** |
|
* Pushes SEI NALUs onto CaptionStream |
|
* @param {Object[]} nals - A list of SEI nals parsed using `parseCaptionNals` |
|
* Assumes that `parseCaptionNals` has been called first |
|
* @see m2ts/caption-stream.js |
|
**/ |
|
|
|
|
|
this.pushNals = function (nals) { |
|
if (!this.isInitialized() || !nals || nals.length === 0) { |
|
return null; |
|
} |
|
|
|
nals.forEach(function (nal) { |
|
captionStream$$1.push(nal); |
|
}); |
|
}; |
|
/** |
|
* Flushes underlying CaptionStream to dispatch processed, displayable captions |
|
* @see m2ts/caption-stream.js |
|
**/ |
|
|
|
|
|
this.flushStream = function () { |
|
if (!this.isInitialized()) { |
|
return null; |
|
} |
|
|
|
captionStream$$1.flush(); |
|
}; |
|
/** |
|
* Reset caption buckets for new data |
|
**/ |
|
|
|
|
|
this.clearParsedCaptions = function () { |
|
parsedCaptions.captions = []; |
|
parsedCaptions.captionStreams = {}; |
|
}; |
|
/** |
|
* Resets underlying CaptionStream |
|
* @see m2ts/caption-stream.js |
|
**/ |
|
|
|
|
|
this.resetCaptionStream = function () { |
|
if (!this.isInitialized()) { |
|
return null; |
|
} |
|
|
|
captionStream$$1.reset(); |
|
}; |
|
/** |
|
* Convenience method to clear all captions flushed from the |
|
* CaptionStream and still being parsed |
|
* @see m2ts/caption-stream.js |
|
**/ |
|
|
|
|
|
this.clearAllCaptions = function () { |
|
this.clearParsedCaptions(); |
|
this.resetCaptionStream(); |
|
}; |
|
/** |
|
* Reset caption parser |
|
**/ |
|
|
|
|
|
this.reset = function () { |
|
segmentCache = []; |
|
trackId = null; |
|
timescale = null; |
|
|
|
if (!parsedCaptions) { |
|
parsedCaptions = { |
|
captions: [], |
|
// CC1, CC2, CC3, CC4 |
|
captionStreams: {} |
|
}; |
|
} else { |
|
this.clearParsedCaptions(); |
|
} |
|
|
|
this.resetCaptionStream(); |
|
}; |
|
|
|
this.reset(); |
|
}; |
|
|
|
var captionParser = CaptionParser$$1; |
|
var mp4$$1 = { |
|
generator: mp4Generator, |
|
probe: probe$$1, |
|
Transmuxer: transmuxer.Transmuxer, |
|
AudioSegmentStream: transmuxer.AudioSegmentStream, |
|
VideoSegmentStream: transmuxer.VideoSegmentStream, |
|
CaptionParser: captionParser |
|
}; |
|
|
|
var classCallCheck = function classCallCheck(instance, Constructor) { |
|
if (!(instance instanceof Constructor)) { |
|
throw new TypeError("Cannot call a class as a function"); |
|
} |
|
}; |
|
|
|
var createClass = function () { |
|
function defineProperties(target, props) { |
|
for (var i = 0; i < props.length; i++) { |
|
var descriptor = props[i]; |
|
descriptor.enumerable = descriptor.enumerable || false; |
|
descriptor.configurable = true; |
|
if ("value" in descriptor) descriptor.writable = true; |
|
Object.defineProperty(target, descriptor.key, descriptor); |
|
} |
|
} |
|
|
|
return function (Constructor, protoProps, staticProps) { |
|
if (protoProps) defineProperties(Constructor.prototype, protoProps); |
|
if (staticProps) defineProperties(Constructor, staticProps); |
|
return Constructor; |
|
}; |
|
}(); |
|
/** |
|
* @file transmuxer-worker.js |
|
*/ |
|
|
|
/** |
|
* Re-emits transmuxer events by converting them into messages to the |
|
* world outside the worker. |
|
* |
|
* @param {Object} transmuxer the transmuxer to wire events on |
|
* @private |
|
*/ |
|
|
|
|
|
var wireTransmuxerEvents = function wireTransmuxerEvents(self, transmuxer) { |
|
transmuxer.on('data', function (segment) { |
|
// transfer ownership of the underlying ArrayBuffer |
|
// instead of doing a copy to save memory |
|
// ArrayBuffers are transferable but generic TypedArrays are not |
|
// @link https://developer.mozilla.org/en-US/docs/Web/API/Web_Workers_API/Using_web_workers#Passing_data_by_transferring_ownership_(transferable_objects) |
|
var initArray = segment.initSegment; |
|
segment.initSegment = { |
|
data: initArray.buffer, |
|
byteOffset: initArray.byteOffset, |
|
byteLength: initArray.byteLength |
|
}; |
|
var typedArray = segment.data; |
|
segment.data = typedArray.buffer; |
|
self.postMessage({ |
|
action: 'data', |
|
segment: segment, |
|
byteOffset: typedArray.byteOffset, |
|
byteLength: typedArray.byteLength |
|
}, [segment.data]); |
|
}); |
|
|
|
if (transmuxer.captionStream) { |
|
transmuxer.captionStream.on('data', function (caption) { |
|
self.postMessage({ |
|
action: 'caption', |
|
data: caption |
|
}); |
|
}); |
|
} |
|
|
|
transmuxer.on('done', function (data) { |
|
self.postMessage({ |
|
action: 'done' |
|
}); |
|
}); |
|
transmuxer.on('gopInfo', function (gopInfo) { |
|
self.postMessage({ |
|
action: 'gopInfo', |
|
gopInfo: gopInfo |
|
}); |
|
}); |
|
transmuxer.on('videoSegmentTimingInfo', function (videoSegmentTimingInfo) { |
|
self.postMessage({ |
|
action: 'videoSegmentTimingInfo', |
|
videoSegmentTimingInfo: videoSegmentTimingInfo |
|
}); |
|
}); |
|
}; |
|
/** |
|
* All incoming messages route through this hash. If no function exists |
|
* to handle an incoming message, then we ignore the message. |
|
* |
|
* @class MessageHandlers |
|
* @param {Object} options the options to initialize with |
|
*/ |
|
|
|
|
|
var MessageHandlers = function () { |
|
function MessageHandlers(self, options) { |
|
classCallCheck(this, MessageHandlers); |
|
this.options = options || {}; |
|
this.self = self; |
|
this.init(); |
|
} |
|
/** |
|
* initialize our web worker and wire all the events. |
|
*/ |
|
|
|
|
|
createClass(MessageHandlers, [{ |
|
key: 'init', |
|
value: function init() { |
|
if (this.transmuxer) { |
|
this.transmuxer.dispose(); |
|
} |
|
|
|
this.transmuxer = new mp4$$1.Transmuxer(this.options); |
|
wireTransmuxerEvents(this.self, this.transmuxer); |
|
} |
|
/** |
|
* Adds data (a ts segment) to the start of the transmuxer pipeline for |
|
* processing. |
|
* |
|
* @param {ArrayBuffer} data data to push into the muxer |
|
*/ |
|
|
|
}, { |
|
key: 'push', |
|
value: function push(data) { |
|
// Cast array buffer to correct type for transmuxer |
|
var segment = new Uint8Array(data.data, data.byteOffset, data.byteLength); |
|
this.transmuxer.push(segment); |
|
} |
|
/** |
|
* Recreate the transmuxer so that the next segment added via `push` |
|
* start with a fresh transmuxer. |
|
*/ |
|
|
|
}, { |
|
key: 'reset', |
|
value: function reset() { |
|
this.init(); |
|
} |
|
/** |
|
* Set the value that will be used as the `baseMediaDecodeTime` time for the |
|
* next segment pushed in. Subsequent segments will have their `baseMediaDecodeTime` |
|
* set relative to the first based on the PTS values. |
|
* |
|
* @param {Object} data used to set the timestamp offset in the muxer |
|
*/ |
|
|
|
}, { |
|
key: 'setTimestampOffset', |
|
value: function setTimestampOffset(data) { |
|
var timestampOffset = data.timestampOffset || 0; |
|
this.transmuxer.setBaseMediaDecodeTime(Math.round(timestampOffset * 90000)); |
|
} |
|
}, { |
|
key: 'setAudioAppendStart', |
|
value: function setAudioAppendStart(data) { |
|
this.transmuxer.setAudioAppendStart(Math.ceil(data.appendStart * 90000)); |
|
} |
|
/** |
|
* Forces the pipeline to finish processing the last segment and emit it's |
|
* results. |
|
* |
|
* @param {Object} data event data, not really used |
|
*/ |
|
|
|
}, { |
|
key: 'flush', |
|
value: function flush(data) { |
|
this.transmuxer.flush(); |
|
} |
|
}, { |
|
key: 'resetCaptions', |
|
value: function resetCaptions() { |
|
this.transmuxer.resetCaptions(); |
|
} |
|
}, { |
|
key: 'alignGopsWith', |
|
value: function alignGopsWith(data) { |
|
this.transmuxer.alignGopsWith(data.gopsToAlignWith.slice()); |
|
} |
|
}]); |
|
return MessageHandlers; |
|
}(); |
|
/** |
|
* Our web wroker interface so that things can talk to mux.js |
|
* that will be running in a web worker. the scope is passed to this by |
|
* webworkify. |
|
* |
|
* @param {Object} self the scope for the web worker |
|
*/ |
|
|
|
|
|
var TransmuxerWorker = function TransmuxerWorker(self) { |
|
self.onmessage = function (event) { |
|
if (event.data.action === 'init' && event.data.options) { |
|
this.messageHandlers = new MessageHandlers(self, event.data.options); |
|
return; |
|
} |
|
|
|
if (!this.messageHandlers) { |
|
this.messageHandlers = new MessageHandlers(self); |
|
} |
|
|
|
if (event.data && event.data.action && event.data.action !== 'init') { |
|
if (this.messageHandlers[event.data.action]) { |
|
this.messageHandlers[event.data.action](event.data); |
|
} |
|
} |
|
}; |
|
}; |
|
|
|
var transmuxerWorker = new TransmuxerWorker(self); |
|
return transmuxerWorker; |
|
}(); |
|
}); |
|
/** |
|
* @file - codecs.js - Handles tasks regarding codec strings such as translating them to |
|
* codec strings, or translating codec strings into objects that can be examined. |
|
*/ |
|
// Default codec parameters if none were provided for video and/or audio |
|
|
|
var defaultCodecs = { |
|
videoCodec: 'avc1', |
|
videoObjectTypeIndicator: '.4d400d', |
|
// AAC-LC |
|
audioProfile: '2' |
|
}; |
|
/** |
|
* Replace the old apple-style `avc1.<dd>.<dd>` codec string with the standard |
|
* `avc1.<hhhhhh>` |
|
* |
|
* @param {Array} codecs an array of codec strings to fix |
|
* @return {Array} the translated codec array |
|
* @private |
|
*/ |
|
|
|
var translateLegacyCodecs = function translateLegacyCodecs(codecs) { |
|
return codecs.map(function (codec) { |
|
return codec.replace(/avc1\.(\d+)\.(\d+)/i, function (orig, profile, avcLevel) { |
|
var profileHex = ('00' + Number(profile).toString(16)).slice(-2); |
|
var avcLevelHex = ('00' + Number(avcLevel).toString(16)).slice(-2); |
|
return 'avc1.' + profileHex + '00' + avcLevelHex; |
|
}); |
|
}); |
|
}; |
|
/** |
|
* Parses a codec string to retrieve the number of codecs specified, |
|
* the video codec and object type indicator, and the audio profile. |
|
*/ |
|
|
|
|
|
var parseCodecs = function parseCodecs() { |
|
var codecs = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : ''; |
|
var result = { |
|
codecCount: 0 |
|
}; |
|
var parsed = void 0; |
|
result.codecCount = codecs.split(',').length; |
|
result.codecCount = result.codecCount || 2; // parse the video codec |
|
|
|
parsed = /(^|\s|,)+(avc[13])([^ ,]*)/i.exec(codecs); |
|
|
|
if (parsed) { |
|
result.videoCodec = parsed[2]; |
|
result.videoObjectTypeIndicator = parsed[3]; |
|
} // parse the last field of the audio codec |
|
|
|
|
|
result.audioProfile = /(^|\s|,)+mp4a.[0-9A-Fa-f]+\.([0-9A-Fa-f]+)/i.exec(codecs); |
|
result.audioProfile = result.audioProfile && result.audioProfile[2]; |
|
return result; |
|
}; |
|
/** |
|
* Replace codecs in the codec string with the old apple-style `avc1.<dd>.<dd>` to the |
|
* standard `avc1.<hhhhhh>`. |
|
* |
|
* @param codecString {String} the codec string |
|
* @return {String} the codec string with old apple-style codecs replaced |
|
* |
|
* @private |
|
*/ |
|
|
|
|
|
var mapLegacyAvcCodecs = function mapLegacyAvcCodecs(codecString) { |
|
return codecString.replace(/avc1\.(\d+)\.(\d+)/i, function (match) { |
|
return translateLegacyCodecs([match])[0]; |
|
}); |
|
}; |
|
/** |
|
* Build a media mime-type string from a set of parameters |
|
* @param {String} type either 'audio' or 'video' |
|
* @param {String} container either 'mp2t' or 'mp4' |
|
* @param {Array} codecs an array of codec strings to add |
|
* @return {String} a valid media mime-type |
|
*/ |
|
|
|
|
|
var makeMimeTypeString = function makeMimeTypeString(type, container, codecs) { |
|
// The codecs array is filtered so that falsey values are |
|
// dropped and don't cause Array#join to create spurious |
|
// commas |
|
return type + '/' + container + '; codecs="' + codecs.filter(function (c) { |
|
return !!c; |
|
}).join(', ') + '"'; |
|
}; |
|
/** |
|
* Returns the type container based on information in the playlist |
|
* @param {Playlist} media the current media playlist |
|
* @return {String} a valid media container type |
|
*/ |
|
|
|
|
|
var getContainerType = function getContainerType(media) { |
|
// An initialization segment means the media playlist is an iframe |
|
// playlist or is using the mp4 container. We don't currently |
|
// support iframe playlists, so assume this is signalling mp4 |
|
// fragments. |
|
if (media.segments && media.segments.length && media.segments[0].map) { |
|
return 'mp4'; |
|
} |
|
|
|
return 'mp2t'; |
|
}; |
|
/** |
|
* Returns a set of codec strings parsed from the playlist or the default |
|
* codec strings if no codecs were specified in the playlist |
|
* @param {Playlist} media the current media playlist |
|
* @return {Object} an object with the video and audio codecs |
|
*/ |
|
|
|
|
|
var getCodecs = function getCodecs(media) { |
|
// if the codecs were explicitly specified, use them instead of the |
|
// defaults |
|
var mediaAttributes = media.attributes || {}; |
|
|
|
if (mediaAttributes.CODECS) { |
|
return parseCodecs(mediaAttributes.CODECS); |
|
} |
|
|
|
return defaultCodecs; |
|
}; |
|
|
|
var audioProfileFromDefault = function audioProfileFromDefault(master, audioGroupId) { |
|
if (!master.mediaGroups.AUDIO || !audioGroupId) { |
|
return null; |
|
} |
|
|
|
var audioGroup = master.mediaGroups.AUDIO[audioGroupId]; |
|
|
|
if (!audioGroup) { |
|
return null; |
|
} |
|
|
|
for (var name in audioGroup) { |
|
var audioType = audioGroup[name]; |
|
|
|
if (audioType.default && audioType.playlists) { |
|
// codec should be the same for all playlists within the audio type |
|
return parseCodecs(audioType.playlists[0].attributes.CODECS).audioProfile; |
|
} |
|
} |
|
|
|
return null; |
|
}; |
|
/** |
|
* Calculates the MIME type strings for a working configuration of |
|
* SourceBuffers to play variant streams in a master playlist. If |
|
* there is no possible working configuration, an empty array will be |
|
* returned. |
|
* |
|
* @param master {Object} the m3u8 object for the master playlist |
|
* @param media {Object} the m3u8 object for the variant playlist |
|
* @return {Array} the MIME type strings. If the array has more than |
|
* one entry, the first element should be applied to the video |
|
* SourceBuffer and the second to the audio SourceBuffer. |
|
* |
|
* @private |
|
*/ |
|
|
|
|
|
var mimeTypesForPlaylist = function mimeTypesForPlaylist(master, media) { |
|
var containerType = getContainerType(media); |
|
var codecInfo = getCodecs(media); |
|
var mediaAttributes = media.attributes || {}; // Default condition for a traditional HLS (no demuxed audio/video) |
|
|
|
var isMuxed = true; |
|
var isMaat = false; |
|
|
|
if (!media) { |
|
// Not enough information |
|
return []; |
|
} |
|
|
|
if (master.mediaGroups.AUDIO && mediaAttributes.AUDIO) { |
|
var audioGroup = master.mediaGroups.AUDIO[mediaAttributes.AUDIO]; // Handle the case where we are in a multiple-audio track scenario |
|
|
|
if (audioGroup) { |
|
isMaat = true; // Start with the everything demuxed then... |
|
|
|
isMuxed = false; // ...check to see if any audio group tracks are muxed (ie. lacking a uri) |
|
|
|
for (var groupId in audioGroup) { |
|
// either a uri is present (if the case of HLS and an external playlist), or |
|
// playlists is present (in the case of DASH where we don't have external audio |
|
// playlists) |
|
if (!audioGroup[groupId].uri && !audioGroup[groupId].playlists) { |
|
isMuxed = true; |
|
break; |
|
} |
|
} |
|
} |
|
} // HLS with multiple-audio tracks must always get an audio codec. |
|
// Put another way, there is no way to have a video-only multiple-audio HLS! |
|
|
|
|
|
if (isMaat && !codecInfo.audioProfile) { |
|
if (!isMuxed) { |
|
// It is possible for codecs to be specified on the audio media group playlist but |
|
// not on the rendition playlist. This is mostly the case for DASH, where audio and |
|
// video are always separate (and separately specified). |
|
codecInfo.audioProfile = audioProfileFromDefault(master, mediaAttributes.AUDIO); |
|
} |
|
|
|
if (!codecInfo.audioProfile) { |
|
videojs$1.log.warn('Multiple audio tracks present but no audio codec string is specified. ' + 'Attempting to use the default audio codec (mp4a.40.2)'); |
|
codecInfo.audioProfile = defaultCodecs.audioProfile; |
|
} |
|
} // Generate the final codec strings from the codec object generated above |
|
|
|
|
|
var codecStrings = {}; |
|
|
|
if (codecInfo.videoCodec) { |
|
codecStrings.video = '' + codecInfo.videoCodec + codecInfo.videoObjectTypeIndicator; |
|
} |
|
|
|
if (codecInfo.audioProfile) { |
|
codecStrings.audio = 'mp4a.40.' + codecInfo.audioProfile; |
|
} // Finally, make and return an array with proper mime-types depending on |
|
// the configuration |
|
|
|
|
|
var justAudio = makeMimeTypeString('audio', containerType, [codecStrings.audio]); |
|
var justVideo = makeMimeTypeString('video', containerType, [codecStrings.video]); |
|
var bothVideoAudio = makeMimeTypeString('video', containerType, [codecStrings.video, codecStrings.audio]); |
|
|
|
if (isMaat) { |
|
if (!isMuxed && codecStrings.video) { |
|
return [justVideo, justAudio]; |
|
} |
|
|
|
if (!isMuxed && !codecStrings.video) { |
|
// There is no muxed content and no video codec string, so this is an audio only |
|
// stream with alternate audio. |
|
return [justAudio, justAudio]; |
|
} // There exists the possiblity that this will return a `video/container` |
|
// mime-type for the first entry in the array even when there is only audio. |
|
// This doesn't appear to be a problem and simplifies the code. |
|
|
|
|
|
return [bothVideoAudio, justAudio]; |
|
} // If there is no video codec at all, always just return a single |
|
// audio/<container> mime-type |
|
|
|
|
|
if (!codecStrings.video) { |
|
return [justAudio]; |
|
} // When not using separate audio media groups, audio and video is |
|
// *always* muxed |
|
|
|
|
|
return [bothVideoAudio]; |
|
}; |
|
/** |
|
* Parse a content type header into a type and parameters |
|
* object |
|
* |
|
* @param {String} type the content type header |
|
* @return {Object} the parsed content-type |
|
* @private |
|
*/ |
|
|
|
|
|
var parseContentType = function parseContentType(type) { |
|
var object = { |
|
type: '', |
|
parameters: {} |
|
}; |
|
var parameters = type.trim().split(';'); // first parameter should always be content-type |
|
|
|
object.type = parameters.shift().trim(); |
|
parameters.forEach(function (parameter) { |
|
var pair = parameter.trim().split('='); |
|
|
|
if (pair.length > 1) { |
|
var name = pair[0].replace(/"/g, '').trim(); |
|
var value = pair[1].replace(/"/g, '').trim(); |
|
object.parameters[name] = value; |
|
} |
|
}); |
|
return object; |
|
}; |
|
/** |
|
* Check if a codec string refers to an audio codec. |
|
* |
|
* @param {String} codec codec string to check |
|
* @return {Boolean} if this is an audio codec |
|
* @private |
|
*/ |
|
|
|
|
|
var isAudioCodec = function isAudioCodec(codec) { |
|
return /mp4a\.\d+.\d+/i.test(codec); |
|
}; |
|
/** |
|
* Check if a codec string refers to a video codec. |
|
* |
|
* @param {String} codec codec string to check |
|
* @return {Boolean} if this is a video codec |
|
* @private |
|
*/ |
|
|
|
|
|
var isVideoCodec = function isVideoCodec(codec) { |
|
return /avc1\.[\da-f]+/i.test(codec); |
|
}; |
|
/** |
|
* Returns a list of gops in the buffer that have a pts value of 3 seconds or more in |
|
* front of current time. |
|
* |
|
* @param {Array} buffer |
|
* The current buffer of gop information |
|
* @param {Number} currentTime |
|
* The current time |
|
* @param {Double} mapping |
|
* Offset to map display time to stream presentation time |
|
* @return {Array} |
|
* List of gops considered safe to append over |
|
*/ |
|
|
|
|
|
var gopsSafeToAlignWith = function gopsSafeToAlignWith(buffer, currentTime, mapping) { |
|
if (typeof currentTime === 'undefined' || currentTime === null || !buffer.length) { |
|
return []; |
|
} // pts value for current time + 3 seconds to give a bit more wiggle room |
|
|
|
|
|
var currentTimePts = Math.ceil((currentTime - mapping + 3) * 90000); |
|
var i = void 0; |
|
|
|
for (i = 0; i < buffer.length; i++) { |
|
if (buffer[i].pts > currentTimePts) { |
|
break; |
|
} |
|
} |
|
|
|
return buffer.slice(i); |
|
}; |
|
/** |
|
* Appends gop information (timing and byteLength) received by the transmuxer for the |
|
* gops appended in the last call to appendBuffer |
|
* |
|
* @param {Array} buffer |
|
* The current buffer of gop information |
|
* @param {Array} gops |
|
* List of new gop information |
|
* @param {boolean} replace |
|
* If true, replace the buffer with the new gop information. If false, append the |
|
* new gop information to the buffer in the right location of time. |
|
* @return {Array} |
|
* Updated list of gop information |
|
*/ |
|
|
|
|
|
var updateGopBuffer = function updateGopBuffer(buffer, gops, replace) { |
|
if (!gops.length) { |
|
return buffer; |
|
} |
|
|
|
if (replace) { |
|
// If we are in safe append mode, then completely overwrite the gop buffer |
|
// with the most recent appeneded data. This will make sure that when appending |
|
// future segments, we only try to align with gops that are both ahead of current |
|
// time and in the last segment appended. |
|
return gops.slice(); |
|
} |
|
|
|
var start = gops[0].pts; |
|
var i = 0; |
|
|
|
for (i; i < buffer.length; i++) { |
|
if (buffer[i].pts >= start) { |
|
break; |
|
} |
|
} |
|
|
|
return buffer.slice(0, i).concat(gops); |
|
}; |
|
/** |
|
* Removes gop information in buffer that overlaps with provided start and end |
|
* |
|
* @param {Array} buffer |
|
* The current buffer of gop information |
|
* @param {Double} start |
|
* position to start the remove at |
|
* @param {Double} end |
|
* position to end the remove at |
|
* @param {Double} mapping |
|
* Offset to map display time to stream presentation time |
|
*/ |
|
|
|
|
|
var removeGopBuffer = function removeGopBuffer(buffer, start, end, mapping) { |
|
var startPts = Math.ceil((start - mapping) * 90000); |
|
var endPts = Math.ceil((end - mapping) * 90000); |
|
var updatedBuffer = buffer.slice(); |
|
var i = buffer.length; |
|
|
|
while (i--) { |
|
if (buffer[i].pts <= endPts) { |
|
break; |
|
} |
|
} |
|
|
|
if (i === -1) { |
|
// no removal because end of remove range is before start of buffer |
|
return updatedBuffer; |
|
} |
|
|
|
var j = i + 1; |
|
|
|
while (j--) { |
|
if (buffer[j].pts <= startPts) { |
|
break; |
|
} |
|
} // clamp remove range start to 0 index |
|
|
|
|
|
j = Math.max(j, 0); |
|
updatedBuffer.splice(j, i - j + 1); |
|
return updatedBuffer; |
|
}; |
|
|
|
var buffered = function buffered(videoBuffer, audioBuffer, audioDisabled) { |
|
var start = null; |
|
var end = null; |
|
var arity = 0; |
|
var extents = []; |
|
var ranges = []; // neither buffer has been created yet |
|
|
|
if (!videoBuffer && !audioBuffer) { |
|
return videojs$1.createTimeRange(); |
|
} // only one buffer is configured |
|
|
|
|
|
if (!videoBuffer) { |
|
return audioBuffer.buffered; |
|
} |
|
|
|
if (!audioBuffer) { |
|
return videoBuffer.buffered; |
|
} // both buffers are configured |
|
|
|
|
|
if (audioDisabled) { |
|
return videoBuffer.buffered; |
|
} // both buffers are empty |
|
|
|
|
|
if (videoBuffer.buffered.length === 0 && audioBuffer.buffered.length === 0) { |
|
return videojs$1.createTimeRange(); |
|
} // Handle the case where we have both buffers and create an |
|
// intersection of the two |
|
|
|
|
|
var videoBuffered = videoBuffer.buffered; |
|
var audioBuffered = audioBuffer.buffered; |
|
var count = videoBuffered.length; // A) Gather up all start and end times |
|
|
|
while (count--) { |
|
extents.push({ |
|
time: videoBuffered.start(count), |
|
type: 'start' |
|
}); |
|
extents.push({ |
|
time: videoBuffered.end(count), |
|
type: 'end' |
|
}); |
|
} |
|
|
|
count = audioBuffered.length; |
|
|
|
while (count--) { |
|
extents.push({ |
|
time: audioBuffered.start(count), |
|
type: 'start' |
|
}); |
|
extents.push({ |
|
time: audioBuffered.end(count), |
|
type: 'end' |
|
}); |
|
} // B) Sort them by time |
|
|
|
|
|
extents.sort(function (a, b) { |
|
return a.time - b.time; |
|
}); // C) Go along one by one incrementing arity for start and decrementing |
|
// arity for ends |
|
|
|
for (count = 0; count < extents.length; count++) { |
|
if (extents[count].type === 'start') { |
|
arity++; // D) If arity is ever incremented to 2 we are entering an |
|
// overlapping range |
|
|
|
if (arity === 2) { |
|
start = extents[count].time; |
|
} |
|
} else if (extents[count].type === 'end') { |
|
arity--; // E) If arity is ever decremented to 1 we leaving an |
|
// overlapping range |
|
|
|
if (arity === 1) { |
|
end = extents[count].time; |
|
} |
|
} // F) Record overlapping ranges |
|
|
|
|
|
if (start !== null && end !== null) { |
|
ranges.push([start, end]); |
|
start = null; |
|
end = null; |
|
} |
|
} |
|
|
|
return videojs$1.createTimeRanges(ranges); |
|
}; |
|
/** |
|
* @file virtual-source-buffer.js |
|
*/ |
|
|
|
|
|
var ONE_SECOND_IN_TS$3 = 90000; // We create a wrapper around the SourceBuffer so that we can manage the |
|
// state of the `updating` property manually. We have to do this because |
|
// Firefox changes `updating` to false long before triggering `updateend` |
|
// events and that was causing strange problems in videojs-contrib-hls |
|
|
|
var makeWrappedSourceBuffer = function makeWrappedSourceBuffer(mediaSource, mimeType) { |
|
var sourceBuffer = mediaSource.addSourceBuffer(mimeType); |
|
var wrapper = Object.create(null); |
|
wrapper.updating = false; |
|
wrapper.realBuffer_ = sourceBuffer; |
|
|
|
var _loop = function _loop(key) { |
|
if (typeof sourceBuffer[key] === 'function') { |
|
wrapper[key] = function () { |
|
return sourceBuffer[key].apply(sourceBuffer, arguments); |
|
}; |
|
} else if (typeof wrapper[key] === 'undefined') { |
|
Object.defineProperty(wrapper, key, { |
|
get: function get$$1() { |
|
return sourceBuffer[key]; |
|
}, |
|
set: function set$$1(v) { |
|
return sourceBuffer[key] = v; |
|
} |
|
}); |
|
} |
|
}; |
|
|
|
for (var key in sourceBuffer) { |
|
_loop(key); |
|
} |
|
|
|
return wrapper; |
|
}; |
|
/** |
|
* VirtualSourceBuffers exist so that we can transmux non native formats |
|
* into a native format, but keep the same api as a native source buffer. |
|
* It creates a transmuxer, that works in its own thread (a web worker) and |
|
* that transmuxer muxes the data into a native format. VirtualSourceBuffer will |
|
* then send all of that data to the naive sourcebuffer so that it is |
|
* indestinguishable from a natively supported format. |
|
* |
|
* @param {HtmlMediaSource} mediaSource the parent mediaSource |
|
* @param {Array} codecs array of codecs that we will be dealing with |
|
* @class VirtualSourceBuffer |
|
* @extends video.js.EventTarget |
|
*/ |
|
|
|
|
|
var VirtualSourceBuffer = function (_videojs$EventTarget) { |
|
inherits$1(VirtualSourceBuffer, _videojs$EventTarget); |
|
|
|
function VirtualSourceBuffer(mediaSource, codecs) { |
|
classCallCheck$1(this, VirtualSourceBuffer); |
|
|
|
var _this = possibleConstructorReturn$1(this, (VirtualSourceBuffer.__proto__ || Object.getPrototypeOf(VirtualSourceBuffer)).call(this, videojs$1.EventTarget)); |
|
|
|
_this.timestampOffset_ = 0; |
|
_this.pendingBuffers_ = []; |
|
_this.bufferUpdating_ = false; |
|
_this.mediaSource_ = mediaSource; |
|
_this.codecs_ = codecs; |
|
_this.audioCodec_ = null; |
|
_this.videoCodec_ = null; |
|
_this.audioDisabled_ = false; |
|
_this.appendAudioInitSegment_ = true; |
|
_this.gopBuffer_ = []; |
|
_this.timeMapping_ = 0; |
|
_this.safeAppend_ = videojs$1.browser.IE_VERSION >= 11; |
|
var options = { |
|
remux: false, |
|
alignGopsAtEnd: _this.safeAppend_ |
|
}; |
|
|
|
_this.codecs_.forEach(function (codec) { |
|
if (isAudioCodec(codec)) { |
|
_this.audioCodec_ = codec; |
|
} else if (isVideoCodec(codec)) { |
|
_this.videoCodec_ = codec; |
|
} |
|
}); // append muxed segments to their respective native buffers as |
|
// soon as they are available |
|
|
|
|
|
_this.transmuxer_ = new TransmuxWorker(); |
|
|
|
_this.transmuxer_.postMessage({ |
|
action: 'init', |
|
options: options |
|
}); |
|
|
|
_this.transmuxer_.onmessage = function (event) { |
|
if (event.data.action === 'data') { |
|
return _this.data_(event); |
|
} |
|
|
|
if (event.data.action === 'done') { |
|
return _this.done_(event); |
|
} |
|
|
|
if (event.data.action === 'gopInfo') { |
|
return _this.appendGopInfo_(event); |
|
} |
|
|
|
if (event.data.action === 'videoSegmentTimingInfo') { |
|
return _this.videoSegmentTimingInfo_(event.data.videoSegmentTimingInfo); |
|
} |
|
}; // this timestampOffset is a property with the side-effect of resetting |
|
// baseMediaDecodeTime in the transmuxer on the setter |
|
|
|
|
|
Object.defineProperty(_this, 'timestampOffset', { |
|
get: function get$$1() { |
|
return this.timestampOffset_; |
|
}, |
|
set: function set$$1(val) { |
|
if (typeof val === 'number' && val >= 0) { |
|
this.timestampOffset_ = val; |
|
this.appendAudioInitSegment_ = true; // reset gop buffer on timestampoffset as this signals a change in timeline |
|
|
|
this.gopBuffer_.length = 0; |
|
this.timeMapping_ = 0; // We have to tell the transmuxer to set the baseMediaDecodeTime to |
|
// the desired timestampOffset for the next segment |
|
|
|
this.transmuxer_.postMessage({ |
|
action: 'setTimestampOffset', |
|
timestampOffset: val |
|
}); |
|
} |
|
} |
|
}); // setting the append window affects both source buffers |
|
|
|
Object.defineProperty(_this, 'appendWindowStart', { |
|
get: function get$$1() { |
|
return (this.videoBuffer_ || this.audioBuffer_).appendWindowStart; |
|
}, |
|
set: function set$$1(start) { |
|
if (this.videoBuffer_) { |
|
this.videoBuffer_.appendWindowStart = start; |
|
} |
|
|
|
if (this.audioBuffer_) { |
|
this.audioBuffer_.appendWindowStart = start; |
|
} |
|
} |
|
}); // this buffer is "updating" if either of its native buffers are |
|
|
|
Object.defineProperty(_this, 'updating', { |
|
get: function get$$1() { |
|
return !!(this.bufferUpdating_ || !this.audioDisabled_ && this.audioBuffer_ && this.audioBuffer_.updating || this.videoBuffer_ && this.videoBuffer_.updating); |
|
} |
|
}); // the buffered property is the intersection of the buffered |
|
// ranges of the native source buffers |
|
|
|
Object.defineProperty(_this, 'buffered', { |
|
get: function get$$1() { |
|
return buffered(this.videoBuffer_, this.audioBuffer_, this.audioDisabled_); |
|
} |
|
}); |
|
return _this; |
|
} |
|
/** |
|
* When we get a data event from the transmuxer |
|
* we call this function and handle the data that |
|
* was sent to us |
|
* |
|
* @private |
|
* @param {Event} event the data event from the transmuxer |
|
*/ |
|
|
|
|
|
createClass$1(VirtualSourceBuffer, [{ |
|
key: 'data_', |
|
value: function data_(event) { |
|
var segment = event.data.segment; // Cast ArrayBuffer to TypedArray |
|
|
|
segment.data = new Uint8Array(segment.data, event.data.byteOffset, event.data.byteLength); |
|
segment.initSegment = new Uint8Array(segment.initSegment.data, segment.initSegment.byteOffset, segment.initSegment.byteLength); |
|
createTextTracksIfNecessary(this, this.mediaSource_, segment); // Add the segments to the pendingBuffers array |
|
|
|
this.pendingBuffers_.push(segment); |
|
return; |
|
} |
|
/** |
|
* When we get a done event from the transmuxer |
|
* we call this function and we process all |
|
* of the pending data that we have been saving in the |
|
* data_ function |
|
* |
|
* @private |
|
* @param {Event} event the done event from the transmuxer |
|
*/ |
|
|
|
}, { |
|
key: 'done_', |
|
value: function done_(event) { |
|
// Don't process and append data if the mediaSource is closed |
|
if (this.mediaSource_.readyState === 'closed') { |
|
this.pendingBuffers_.length = 0; |
|
return; |
|
} // All buffers should have been flushed from the muxer |
|
// start processing anything we have received |
|
|
|
|
|
this.processPendingSegments_(); |
|
return; |
|
} |
|
}, { |
|
key: 'videoSegmentTimingInfo_', |
|
value: function videoSegmentTimingInfo_(timingInfo) { |
|
var timingInfoInSeconds = { |
|
start: { |
|
decode: timingInfo.start.dts / ONE_SECOND_IN_TS$3, |
|
presentation: timingInfo.start.pts / ONE_SECOND_IN_TS$3 |
|
}, |
|
end: { |
|
decode: timingInfo.end.dts / ONE_SECOND_IN_TS$3, |
|
presentation: timingInfo.end.pts / ONE_SECOND_IN_TS$3 |
|
}, |
|
baseMediaDecodeTime: timingInfo.baseMediaDecodeTime / ONE_SECOND_IN_TS$3 |
|
}; |
|
|
|
if (timingInfo.prependedContentDuration) { |
|
timingInfoInSeconds.prependedContentDuration = timingInfo.prependedContentDuration / ONE_SECOND_IN_TS$3; |
|
} |
|
|
|
this.trigger({ |
|
type: 'videoSegmentTimingInfo', |
|
videoSegmentTimingInfo: timingInfoInSeconds |
|
}); |
|
} |
|
/** |
|
* Create our internal native audio/video source buffers and add |
|
* event handlers to them with the following conditions: |
|
* 1. they do not already exist on the mediaSource |
|
* 2. this VSB has a codec for them |
|
* |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'createRealSourceBuffers_', |
|
value: function createRealSourceBuffers_() { |
|
var _this2 = this; |
|
|
|
var types = ['audio', 'video']; |
|
types.forEach(function (type) { |
|
// Don't create a SourceBuffer of this type if we don't have a |
|
// codec for it |
|
if (!_this2[type + 'Codec_']) { |
|
return; |
|
} // Do nothing if a SourceBuffer of this type already exists |
|
|
|
|
|
if (_this2[type + 'Buffer_']) { |
|
return; |
|
} |
|
|
|
var buffer = null; // If the mediasource already has a SourceBuffer for the codec |
|
// use that |
|
|
|
if (_this2.mediaSource_[type + 'Buffer_']) { |
|
buffer = _this2.mediaSource_[type + 'Buffer_']; // In multiple audio track cases, the audio source buffer is disabled |
|
// on the main VirtualSourceBuffer by the HTMLMediaSource much earlier |
|
// than createRealSourceBuffers_ is called to create the second |
|
// VirtualSourceBuffer because that happens as a side-effect of |
|
// videojs-contrib-hls starting the audioSegmentLoader. As a result, |
|
// the audioBuffer is essentially "ownerless" and no one will toggle |
|
// the `updating` state back to false once the `updateend` event is received |
|
// |
|
// Setting `updating` to false manually will work around this |
|
// situation and allow work to continue |
|
|
|
buffer.updating = false; |
|
} else { |
|
var codecProperty = type + 'Codec_'; |
|
var mimeType = type + '/mp4;codecs="' + _this2[codecProperty] + '"'; |
|
buffer = makeWrappedSourceBuffer(_this2.mediaSource_.nativeMediaSource_, mimeType); |
|
_this2.mediaSource_[type + 'Buffer_'] = buffer; |
|
} |
|
|
|
_this2[type + 'Buffer_'] = buffer; // Wire up the events to the SourceBuffer |
|
|
|
['update', 'updatestart', 'updateend'].forEach(function (event) { |
|
buffer.addEventListener(event, function () { |
|
// if audio is disabled |
|
if (type === 'audio' && _this2.audioDisabled_) { |
|
return; |
|
} |
|
|
|
if (event === 'updateend') { |
|
_this2[type + 'Buffer_'].updating = false; |
|
} |
|
|
|
var shouldTrigger = types.every(function (t) { |
|
// skip checking audio's updating status if audio |
|
// is not enabled |
|
if (t === 'audio' && _this2.audioDisabled_) { |
|
return true; |
|
} // if the other type if updating we don't trigger |
|
|
|
|
|
if (type !== t && _this2[t + 'Buffer_'] && _this2[t + 'Buffer_'].updating) { |
|
return false; |
|
} |
|
|
|
return true; |
|
}); |
|
|
|
if (shouldTrigger) { |
|
return _this2.trigger(event); |
|
} |
|
}); |
|
}); |
|
}); |
|
} |
|
/** |
|
* Emulate the native mediasource function, but our function will |
|
* send all of the proposed segments to the transmuxer so that we |
|
* can transmux them before we append them to our internal |
|
* native source buffers in the correct format. |
|
* |
|
* @link https://developer.mozilla.org/en-US/docs/Web/API/SourceBuffer/appendBuffer |
|
* @param {Uint8Array} segment the segment to append to the buffer |
|
*/ |
|
|
|
}, { |
|
key: 'appendBuffer', |
|
value: function appendBuffer(segment) { |
|
// Start the internal "updating" state |
|
this.bufferUpdating_ = true; |
|
|
|
if (this.audioBuffer_ && this.audioBuffer_.buffered.length) { |
|
var audioBuffered = this.audioBuffer_.buffered; |
|
this.transmuxer_.postMessage({ |
|
action: 'setAudioAppendStart', |
|
appendStart: audioBuffered.end(audioBuffered.length - 1) |
|
}); |
|
} |
|
|
|
if (this.videoBuffer_) { |
|
this.transmuxer_.postMessage({ |
|
action: 'alignGopsWith', |
|
gopsToAlignWith: gopsSafeToAlignWith(this.gopBuffer_, this.mediaSource_.player_ ? this.mediaSource_.player_.currentTime() : null, this.timeMapping_) |
|
}); |
|
} |
|
|
|
this.transmuxer_.postMessage({ |
|
action: 'push', |
|
// Send the typed-array of data as an ArrayBuffer so that |
|
// it can be sent as a "Transferable" and avoid the costly |
|
// memory copy |
|
data: segment.buffer, |
|
// To recreate the original typed-array, we need information |
|
// about what portion of the ArrayBuffer it was a view into |
|
byteOffset: segment.byteOffset, |
|
byteLength: segment.byteLength |
|
}, [segment.buffer]); |
|
this.transmuxer_.postMessage({ |
|
action: 'flush' |
|
}); |
|
} |
|
/** |
|
* Appends gop information (timing and byteLength) received by the transmuxer for the |
|
* gops appended in the last call to appendBuffer |
|
* |
|
* @param {Event} event |
|
* The gopInfo event from the transmuxer |
|
* @param {Array} event.data.gopInfo |
|
* List of gop info to append |
|
*/ |
|
|
|
}, { |
|
key: 'appendGopInfo_', |
|
value: function appendGopInfo_(event) { |
|
this.gopBuffer_ = updateGopBuffer(this.gopBuffer_, event.data.gopInfo, this.safeAppend_); |
|
} |
|
/** |
|
* Emulate the native mediasource function and remove parts |
|
* of the buffer from any of our internal buffers that exist |
|
* |
|
* @link https://developer.mozilla.org/en-US/docs/Web/API/SourceBuffer/remove |
|
* @param {Double} start position to start the remove at |
|
* @param {Double} end position to end the remove at |
|
*/ |
|
|
|
}, { |
|
key: 'remove', |
|
value: function remove(start, end) { |
|
if (this.videoBuffer_) { |
|
this.videoBuffer_.updating = true; |
|
this.videoBuffer_.remove(start, end); |
|
this.gopBuffer_ = removeGopBuffer(this.gopBuffer_, start, end, this.timeMapping_); |
|
} |
|
|
|
if (!this.audioDisabled_ && this.audioBuffer_) { |
|
this.audioBuffer_.updating = true; |
|
this.audioBuffer_.remove(start, end); |
|
} // Remove Metadata Cues (id3) |
|
|
|
|
|
removeCuesFromTrack(start, end, this.metadataTrack_); // Remove Any Captions |
|
|
|
if (this.inbandTextTracks_) { |
|
for (var track in this.inbandTextTracks_) { |
|
removeCuesFromTrack(start, end, this.inbandTextTracks_[track]); |
|
} |
|
} |
|
} |
|
/** |
|
* Process any segments that the muxer has output |
|
* Concatenate segments together based on type and append them into |
|
* their respective sourceBuffers |
|
* |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'processPendingSegments_', |
|
value: function processPendingSegments_() { |
|
var sortedSegments = { |
|
video: { |
|
segments: [], |
|
bytes: 0 |
|
}, |
|
audio: { |
|
segments: [], |
|
bytes: 0 |
|
}, |
|
captions: [], |
|
metadata: [] |
|
}; // Sort segments into separate video/audio arrays and |
|
// keep track of their total byte lengths |
|
|
|
sortedSegments = this.pendingBuffers_.reduce(function (segmentObj, segment) { |
|
var type = segment.type; |
|
var data = segment.data; |
|
var initSegment = segment.initSegment; |
|
segmentObj[type].segments.push(data); |
|
segmentObj[type].bytes += data.byteLength; |
|
segmentObj[type].initSegment = initSegment; // Gather any captions into a single array |
|
|
|
if (segment.captions) { |
|
segmentObj.captions = segmentObj.captions.concat(segment.captions); |
|
} |
|
|
|
if (segment.info) { |
|
segmentObj[type].info = segment.info; |
|
} // Gather any metadata into a single array |
|
|
|
|
|
if (segment.metadata) { |
|
segmentObj.metadata = segmentObj.metadata.concat(segment.metadata); |
|
} |
|
|
|
return segmentObj; |
|
}, sortedSegments); // Create the real source buffers if they don't exist by now since we |
|
// finally are sure what tracks are contained in the source |
|
|
|
if (!this.videoBuffer_ && !this.audioBuffer_) { |
|
// Remove any codecs that may have been specified by default but |
|
// are no longer applicable now |
|
if (sortedSegments.video.bytes === 0) { |
|
this.videoCodec_ = null; |
|
} |
|
|
|
if (sortedSegments.audio.bytes === 0) { |
|
this.audioCodec_ = null; |
|
} |
|
|
|
this.createRealSourceBuffers_(); |
|
} |
|
|
|
if (sortedSegments.audio.info) { |
|
this.mediaSource_.trigger({ |
|
type: 'audioinfo', |
|
info: sortedSegments.audio.info |
|
}); |
|
} |
|
|
|
if (sortedSegments.video.info) { |
|
this.mediaSource_.trigger({ |
|
type: 'videoinfo', |
|
info: sortedSegments.video.info |
|
}); |
|
} |
|
|
|
if (this.appendAudioInitSegment_) { |
|
if (!this.audioDisabled_ && this.audioBuffer_) { |
|
sortedSegments.audio.segments.unshift(sortedSegments.audio.initSegment); |
|
sortedSegments.audio.bytes += sortedSegments.audio.initSegment.byteLength; |
|
} |
|
|
|
this.appendAudioInitSegment_ = false; |
|
} |
|
|
|
var triggerUpdateend = false; // Merge multiple video and audio segments into one and append |
|
|
|
if (this.videoBuffer_ && sortedSegments.video.bytes) { |
|
sortedSegments.video.segments.unshift(sortedSegments.video.initSegment); |
|
sortedSegments.video.bytes += sortedSegments.video.initSegment.byteLength; |
|
this.concatAndAppendSegments_(sortedSegments.video, this.videoBuffer_); |
|
} else if (this.videoBuffer_ && (this.audioDisabled_ || !this.audioBuffer_)) { |
|
// The transmuxer did not return any bytes of video, meaning it was all trimmed |
|
// for gop alignment. Since we have a video buffer and audio is disabled, updateend |
|
// will never be triggered by this source buffer, which will cause contrib-hls |
|
// to be stuck forever waiting for updateend. If audio is not disabled, updateend |
|
// will be triggered by the audio buffer, which will be sent upwards since the video |
|
// buffer will not be in an updating state. |
|
triggerUpdateend = true; |
|
} // Add text-track data for all |
|
|
|
|
|
addTextTrackData(this, sortedSegments.captions, sortedSegments.metadata); |
|
|
|
if (!this.audioDisabled_ && this.audioBuffer_) { |
|
this.concatAndAppendSegments_(sortedSegments.audio, this.audioBuffer_); |
|
} |
|
|
|
this.pendingBuffers_.length = 0; |
|
|
|
if (triggerUpdateend) { |
|
this.trigger('updateend'); |
|
} // We are no longer in the internal "updating" state |
|
|
|
|
|
this.bufferUpdating_ = false; |
|
} |
|
/** |
|
* Combine all segments into a single Uint8Array and then append them |
|
* to the destination buffer |
|
* |
|
* @param {Object} segmentObj |
|
* @param {SourceBuffer} destinationBuffer native source buffer to append data to |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'concatAndAppendSegments_', |
|
value: function concatAndAppendSegments_(segmentObj, destinationBuffer) { |
|
var offset = 0; |
|
var tempBuffer = void 0; |
|
|
|
if (segmentObj.bytes) { |
|
tempBuffer = new Uint8Array(segmentObj.bytes); // Combine the individual segments into one large typed-array |
|
|
|
segmentObj.segments.forEach(function (segment) { |
|
tempBuffer.set(segment, offset); |
|
offset += segment.byteLength; |
|
}); |
|
|
|
try { |
|
destinationBuffer.updating = true; |
|
destinationBuffer.appendBuffer(tempBuffer); |
|
} catch (error) { |
|
if (this.mediaSource_.player_) { |
|
this.mediaSource_.player_.error({ |
|
code: -3, |
|
type: 'APPEND_BUFFER_ERR', |
|
message: error.message, |
|
originalError: error |
|
}); |
|
} |
|
} |
|
} |
|
} |
|
/** |
|
* Emulate the native mediasource function. abort any soureBuffer |
|
* actions and throw out any un-appended data. |
|
* |
|
* @link https://developer.mozilla.org/en-US/docs/Web/API/SourceBuffer/abort |
|
*/ |
|
|
|
}, { |
|
key: 'abort', |
|
value: function abort() { |
|
if (this.videoBuffer_) { |
|
this.videoBuffer_.abort(); |
|
} |
|
|
|
if (!this.audioDisabled_ && this.audioBuffer_) { |
|
this.audioBuffer_.abort(); |
|
} |
|
|
|
if (this.transmuxer_) { |
|
this.transmuxer_.postMessage({ |
|
action: 'reset' |
|
}); |
|
} |
|
|
|
this.pendingBuffers_.length = 0; |
|
this.bufferUpdating_ = false; |
|
} |
|
}]); |
|
return VirtualSourceBuffer; |
|
}(videojs$1.EventTarget); |
|
/** |
|
* @file html-media-source.js |
|
*/ |
|
|
|
/** |
|
* Our MediaSource implementation in HTML, mimics native |
|
* MediaSource where/if possible. |
|
* |
|
* @link https://developer.mozilla.org/en-US/docs/Web/API/MediaSource |
|
* @class HtmlMediaSource |
|
* @extends videojs.EventTarget |
|
*/ |
|
|
|
|
|
var HtmlMediaSource = function (_videojs$EventTarget) { |
|
inherits$1(HtmlMediaSource, _videojs$EventTarget); |
|
|
|
function HtmlMediaSource() { |
|
classCallCheck$1(this, HtmlMediaSource); |
|
|
|
var _this = possibleConstructorReturn$1(this, (HtmlMediaSource.__proto__ || Object.getPrototypeOf(HtmlMediaSource)).call(this)); |
|
|
|
var property = void 0; |
|
_this.nativeMediaSource_ = new window$1.MediaSource(); // delegate to the native MediaSource's methods by default |
|
|
|
for (property in _this.nativeMediaSource_) { |
|
if (!(property in HtmlMediaSource.prototype) && typeof _this.nativeMediaSource_[property] === 'function') { |
|
_this[property] = _this.nativeMediaSource_[property].bind(_this.nativeMediaSource_); |
|
} |
|
} // emulate `duration` and `seekable` until seeking can be |
|
// handled uniformly for live streams |
|
// see https://github.com/w3c/media-source/issues/5 |
|
|
|
|
|
_this.duration_ = NaN; |
|
Object.defineProperty(_this, 'duration', { |
|
get: function get$$1() { |
|
if (this.duration_ === Infinity) { |
|
return this.duration_; |
|
} |
|
|
|
return this.nativeMediaSource_.duration; |
|
}, |
|
set: function set$$1(duration) { |
|
this.duration_ = duration; |
|
|
|
if (duration !== Infinity) { |
|
this.nativeMediaSource_.duration = duration; |
|
return; |
|
} |
|
} |
|
}); |
|
Object.defineProperty(_this, 'seekable', { |
|
get: function get$$1() { |
|
if (this.duration_ === Infinity) { |
|
return videojs$1.createTimeRanges([[0, this.nativeMediaSource_.duration]]); |
|
} |
|
|
|
return this.nativeMediaSource_.seekable; |
|
} |
|
}); |
|
Object.defineProperty(_this, 'readyState', { |
|
get: function get$$1() { |
|
return this.nativeMediaSource_.readyState; |
|
} |
|
}); |
|
Object.defineProperty(_this, 'activeSourceBuffers', { |
|
get: function get$$1() { |
|
return this.activeSourceBuffers_; |
|
} |
|
}); // the list of virtual and native SourceBuffers created by this |
|
// MediaSource |
|
|
|
_this.sourceBuffers = []; |
|
_this.activeSourceBuffers_ = []; |
|
/** |
|
* update the list of active source buffers based upon various |
|
* imformation from HLS and video.js |
|
* |
|
* @private |
|
*/ |
|
|
|
_this.updateActiveSourceBuffers_ = function () { |
|
// Retain the reference but empty the array |
|
_this.activeSourceBuffers_.length = 0; // If there is only one source buffer, then it will always be active and audio will |
|
// be disabled based on the codec of the source buffer |
|
|
|
if (_this.sourceBuffers.length === 1) { |
|
var sourceBuffer = _this.sourceBuffers[0]; |
|
sourceBuffer.appendAudioInitSegment_ = true; |
|
sourceBuffer.audioDisabled_ = !sourceBuffer.audioCodec_; |
|
|
|
_this.activeSourceBuffers_.push(sourceBuffer); |
|
|
|
return; |
|
} // There are 2 source buffers, a combined (possibly video only) source buffer and |
|
// and an audio only source buffer. |
|
// By default, the audio in the combined virtual source buffer is enabled |
|
// and the audio-only source buffer (if it exists) is disabled. |
|
|
|
|
|
var disableCombined = false; |
|
var disableAudioOnly = true; // TODO: maybe we can store the sourcebuffers on the track objects? |
|
// safari may do something like this |
|
|
|
for (var i = 0; i < _this.player_.audioTracks().length; i++) { |
|
var track = _this.player_.audioTracks()[i]; |
|
|
|
if (track.enabled && track.kind !== 'main') { |
|
// The enabled track is an alternate audio track so disable the audio in |
|
// the combined source buffer and enable the audio-only source buffer. |
|
disableCombined = true; |
|
disableAudioOnly = false; |
|
break; |
|
} |
|
} |
|
|
|
_this.sourceBuffers.forEach(function (sourceBuffer, index) { |
|
/* eslinst-disable */ |
|
// TODO once codecs are required, we can switch to using the codecs to determine |
|
// what stream is the video stream, rather than relying on videoTracks |
|
|
|
/* eslinst-enable */ |
|
sourceBuffer.appendAudioInitSegment_ = true; |
|
|
|
if (sourceBuffer.videoCodec_ && sourceBuffer.audioCodec_) { |
|
// combined |
|
sourceBuffer.audioDisabled_ = disableCombined; |
|
} else if (sourceBuffer.videoCodec_ && !sourceBuffer.audioCodec_) { |
|
// If the "combined" source buffer is video only, then we do not want |
|
// disable the audio-only source buffer (this is mostly for demuxed |
|
// audio and video hls) |
|
sourceBuffer.audioDisabled_ = true; |
|
disableAudioOnly = false; |
|
} else if (!sourceBuffer.videoCodec_ && sourceBuffer.audioCodec_) { |
|
// audio only |
|
// In the case of audio only with alternate audio and disableAudioOnly is true |
|
// this means we want to disable the audio on the alternate audio sourcebuffer |
|
// but not the main "combined" source buffer. The "combined" source buffer is |
|
// always at index 0, so this ensures audio won't be disabled in both source |
|
// buffers. |
|
sourceBuffer.audioDisabled_ = index ? disableAudioOnly : !disableAudioOnly; |
|
|
|
if (sourceBuffer.audioDisabled_) { |
|
return; |
|
} |
|
} |
|
|
|
_this.activeSourceBuffers_.push(sourceBuffer); |
|
}); |
|
}; |
|
|
|
_this.onPlayerMediachange_ = function () { |
|
_this.sourceBuffers.forEach(function (sourceBuffer) { |
|
sourceBuffer.appendAudioInitSegment_ = true; |
|
}); |
|
}; |
|
|
|
_this.onHlsReset_ = function () { |
|
_this.sourceBuffers.forEach(function (sourceBuffer) { |
|
if (sourceBuffer.transmuxer_) { |
|
sourceBuffer.transmuxer_.postMessage({ |
|
action: 'resetCaptions' |
|
}); |
|
} |
|
}); |
|
}; |
|
|
|
_this.onHlsSegmentTimeMapping_ = function (event) { |
|
_this.sourceBuffers.forEach(function (buffer) { |
|
return buffer.timeMapping_ = event.mapping; |
|
}); |
|
}; // Re-emit MediaSource events on the polyfill |
|
|
|
|
|
['sourceopen', 'sourceclose', 'sourceended'].forEach(function (eventName) { |
|
this.nativeMediaSource_.addEventListener(eventName, this.trigger.bind(this)); |
|
}, _this); // capture the associated player when the MediaSource is |
|
// successfully attached |
|
|
|
_this.on('sourceopen', function (event) { |
|
// Get the player this MediaSource is attached to |
|
var video = document.querySelector('[src="' + _this.url_ + '"]'); |
|
|
|
if (!video) { |
|
return; |
|
} |
|
|
|
_this.player_ = videojs$1(video.parentNode); |
|
|
|
if (!_this.player_) { |
|
return; |
|
} // hls-reset is fired by videojs.Hls on to the tech after the main SegmentLoader |
|
// resets its state and flushes the buffer |
|
|
|
|
|
_this.player_.tech_.on('hls-reset', _this.onHlsReset_); // hls-segment-time-mapping is fired by videojs.Hls on to the tech after the main |
|
// SegmentLoader inspects an MTS segment and has an accurate stream to display |
|
// time mapping |
|
|
|
|
|
_this.player_.tech_.on('hls-segment-time-mapping', _this.onHlsSegmentTimeMapping_); |
|
|
|
if (_this.player_.audioTracks && _this.player_.audioTracks()) { |
|
_this.player_.audioTracks().on('change', _this.updateActiveSourceBuffers_); |
|
|
|
_this.player_.audioTracks().on('addtrack', _this.updateActiveSourceBuffers_); |
|
|
|
_this.player_.audioTracks().on('removetrack', _this.updateActiveSourceBuffers_); |
|
} |
|
|
|
_this.player_.on('mediachange', _this.onPlayerMediachange_); |
|
}); |
|
|
|
_this.on('sourceended', function (event) { |
|
var duration = durationOfVideo(_this.duration); |
|
|
|
for (var i = 0; i < _this.sourceBuffers.length; i++) { |
|
var sourcebuffer = _this.sourceBuffers[i]; |
|
var cues = sourcebuffer.metadataTrack_ && sourcebuffer.metadataTrack_.cues; |
|
|
|
if (cues && cues.length) { |
|
cues[cues.length - 1].endTime = duration; |
|
} |
|
} |
|
}); // explicitly terminate any WebWorkers that were created |
|
// by SourceHandlers |
|
|
|
|
|
_this.on('sourceclose', function (event) { |
|
this.sourceBuffers.forEach(function (sourceBuffer) { |
|
if (sourceBuffer.transmuxer_) { |
|
sourceBuffer.transmuxer_.terminate(); |
|
} |
|
}); |
|
this.sourceBuffers.length = 0; |
|
|
|
if (!this.player_) { |
|
return; |
|
} |
|
|
|
if (this.player_.audioTracks && this.player_.audioTracks()) { |
|
this.player_.audioTracks().off('change', this.updateActiveSourceBuffers_); |
|
this.player_.audioTracks().off('addtrack', this.updateActiveSourceBuffers_); |
|
this.player_.audioTracks().off('removetrack', this.updateActiveSourceBuffers_); |
|
} // We can only change this if the player hasn't been disposed of yet |
|
// because `off` eventually tries to use the el_ property. If it has |
|
// been disposed of, then don't worry about it because there are no |
|
// event handlers left to unbind anyway |
|
|
|
|
|
if (this.player_.el_) { |
|
this.player_.off('mediachange', this.onPlayerMediachange_); |
|
} |
|
|
|
if (this.player_.tech_ && this.player_.tech_.el_) { |
|
this.player_.tech_.off('hls-reset', this.onHlsReset_); |
|
this.player_.tech_.off('hls-segment-time-mapping', this.onHlsSegmentTimeMapping_); |
|
} |
|
}); |
|
|
|
return _this; |
|
} |
|
/** |
|
* Add a range that that can now be seeked to. |
|
* |
|
* @param {Double} start where to start the addition |
|
* @param {Double} end where to end the addition |
|
* @private |
|
*/ |
|
|
|
|
|
createClass$1(HtmlMediaSource, [{ |
|
key: 'addSeekableRange_', |
|
value: function addSeekableRange_(start, end) { |
|
var error = void 0; |
|
|
|
if (this.duration !== Infinity) { |
|
error = new Error('MediaSource.addSeekableRange() can only be invoked ' + 'when the duration is Infinity'); |
|
error.name = 'InvalidStateError'; |
|
error.code = 11; |
|
throw error; |
|
} |
|
|
|
if (end > this.nativeMediaSource_.duration || isNaN(this.nativeMediaSource_.duration)) { |
|
this.nativeMediaSource_.duration = end; |
|
} |
|
} |
|
/** |
|
* Add a source buffer to the media source. |
|
* |
|
* @link https://developer.mozilla.org/en-US/docs/Web/API/MediaSource/addSourceBuffer |
|
* @param {String} type the content-type of the content |
|
* @return {Object} the created source buffer |
|
*/ |
|
|
|
}, { |
|
key: 'addSourceBuffer', |
|
value: function addSourceBuffer(type) { |
|
var buffer = void 0; |
|
var parsedType = parseContentType(type); // Create a VirtualSourceBuffer to transmux MPEG-2 transport |
|
// stream segments into fragmented MP4s |
|
|
|
if (/^(video|audio)\/mp2t$/i.test(parsedType.type)) { |
|
var codecs = []; |
|
|
|
if (parsedType.parameters && parsedType.parameters.codecs) { |
|
codecs = parsedType.parameters.codecs.split(','); |
|
codecs = translateLegacyCodecs(codecs); |
|
codecs = codecs.filter(function (codec) { |
|
return isAudioCodec(codec) || isVideoCodec(codec); |
|
}); |
|
} |
|
|
|
if (codecs.length === 0) { |
|
codecs = ['avc1.4d400d', 'mp4a.40.2']; |
|
} |
|
|
|
buffer = new VirtualSourceBuffer(this, codecs); |
|
|
|
if (this.sourceBuffers.length !== 0) { |
|
// If another VirtualSourceBuffer already exists, then we are creating a |
|
// SourceBuffer for an alternate audio track and therefore we know that |
|
// the source has both an audio and video track. |
|
// That means we should trigger the manual creation of the real |
|
// SourceBuffers instead of waiting for the transmuxer to return data |
|
this.sourceBuffers[0].createRealSourceBuffers_(); |
|
buffer.createRealSourceBuffers_(); // Automatically disable the audio on the first source buffer if |
|
// a second source buffer is ever created |
|
|
|
this.sourceBuffers[0].audioDisabled_ = true; |
|
} |
|
} else { |
|
// delegate to the native implementation |
|
buffer = this.nativeMediaSource_.addSourceBuffer(type); |
|
} |
|
|
|
this.sourceBuffers.push(buffer); |
|
return buffer; |
|
} |
|
}]); |
|
return HtmlMediaSource; |
|
}(videojs$1.EventTarget); |
|
/** |
|
* @file videojs-contrib-media-sources.js |
|
*/ |
|
|
|
|
|
var urlCount = 0; // ------------ |
|
// Media Source |
|
// ------------ |
|
// store references to the media sources so they can be connected |
|
// to a video element (a swf object) |
|
// TODO: can we store this somewhere local to this module? |
|
|
|
videojs$1.mediaSources = {}; |
|
/** |
|
* Provide a method for a swf object to notify JS that a |
|
* media source is now open. |
|
* |
|
* @param {String} msObjectURL string referencing the MSE Object URL |
|
* @param {String} swfId the swf id |
|
*/ |
|
|
|
var open = function open(msObjectURL, swfId) { |
|
var mediaSource = videojs$1.mediaSources[msObjectURL]; |
|
|
|
if (mediaSource) { |
|
mediaSource.trigger({ |
|
type: 'sourceopen', |
|
swfId: swfId |
|
}); |
|
} else { |
|
throw new Error('Media Source not found (Video.js)'); |
|
} |
|
}; |
|
/** |
|
* Check to see if the native MediaSource object exists and supports |
|
* an MP4 container with both H.264 video and AAC-LC audio. |
|
* |
|
* @return {Boolean} if native media sources are supported |
|
*/ |
|
|
|
|
|
var supportsNativeMediaSources = function supportsNativeMediaSources() { |
|
return !!window$1.MediaSource && !!window$1.MediaSource.isTypeSupported && window$1.MediaSource.isTypeSupported('video/mp4;codecs="avc1.4d400d,mp4a.40.2"'); |
|
}; |
|
/** |
|
* An emulation of the MediaSource API so that we can support |
|
* native and non-native functionality. returns an instance of |
|
* HtmlMediaSource. |
|
* |
|
* @link https://developer.mozilla.org/en-US/docs/Web/API/MediaSource/MediaSource |
|
*/ |
|
|
|
|
|
var MediaSource = function MediaSource() { |
|
this.MediaSource = { |
|
open: open, |
|
supportsNativeMediaSources: supportsNativeMediaSources |
|
}; |
|
|
|
if (supportsNativeMediaSources()) { |
|
return new HtmlMediaSource(); |
|
} |
|
|
|
throw new Error('Cannot use create a virtual MediaSource for this video'); |
|
}; |
|
|
|
MediaSource.open = open; |
|
MediaSource.supportsNativeMediaSources = supportsNativeMediaSources; |
|
/** |
|
* A wrapper around the native URL for our MSE object |
|
* implementation, this object is exposed under videojs.URL |
|
* |
|
* @link https://developer.mozilla.org/en-US/docs/Web/API/URL/URL |
|
*/ |
|
|
|
var URL$1 = { |
|
/** |
|
* A wrapper around the native createObjectURL for our objects. |
|
* This function maps a native or emulated mediaSource to a blob |
|
* url so that it can be loaded into video.js |
|
* |
|
* @link https://developer.mozilla.org/en-US/docs/Web/API/URL/createObjectURL |
|
* @param {MediaSource} object the object to create a blob url to |
|
*/ |
|
createObjectURL: function createObjectURL(object) { |
|
var objectUrlPrefix = 'blob:vjs-media-source/'; |
|
var url = void 0; // use the native MediaSource to generate an object URL |
|
|
|
if (object instanceof HtmlMediaSource) { |
|
url = window$1.URL.createObjectURL(object.nativeMediaSource_); |
|
object.url_ = url; |
|
return url; |
|
} // if the object isn't an emulated MediaSource, delegate to the |
|
// native implementation |
|
|
|
|
|
if (!(object instanceof HtmlMediaSource)) { |
|
url = window$1.URL.createObjectURL(object); |
|
object.url_ = url; |
|
return url; |
|
} // build a URL that can be used to map back to the emulated |
|
// MediaSource |
|
|
|
|
|
url = objectUrlPrefix + urlCount; |
|
urlCount++; // setup the mapping back to object |
|
|
|
videojs$1.mediaSources[url] = object; |
|
return url; |
|
} |
|
}; |
|
videojs$1.MediaSource = MediaSource; |
|
videojs$1.URL = URL$1; |
|
var EventTarget$1$1 = videojs$1.EventTarget, |
|
mergeOptions$2 = videojs$1.mergeOptions; |
|
/** |
|
* Returns a new master manifest that is the result of merging an updated master manifest |
|
* into the original version. |
|
* |
|
* @param {Object} oldMaster |
|
* The old parsed mpd object |
|
* @param {Object} newMaster |
|
* The updated parsed mpd object |
|
* @return {Object} |
|
* A new object representing the original master manifest with the updated media |
|
* playlists merged in |
|
*/ |
|
|
|
var updateMaster$1 = function updateMaster$$1(oldMaster, newMaster) { |
|
var noChanges = void 0; |
|
var update = mergeOptions$2(oldMaster, { |
|
// These are top level properties that can be updated |
|
duration: newMaster.duration, |
|
minimumUpdatePeriod: newMaster.minimumUpdatePeriod |
|
}); // First update the playlists in playlist list |
|
|
|
for (var i = 0; i < newMaster.playlists.length; i++) { |
|
var playlistUpdate = updateMaster(update, newMaster.playlists[i]); |
|
|
|
if (playlistUpdate) { |
|
update = playlistUpdate; |
|
} else { |
|
noChanges = true; |
|
} |
|
} // Then update media group playlists |
|
|
|
|
|
forEachMediaGroup(newMaster, function (properties, type, group, label) { |
|
if (properties.playlists && properties.playlists.length) { |
|
var uri = properties.playlists[0].uri; |
|
|
|
var _playlistUpdate = updateMaster(update, properties.playlists[0]); |
|
|
|
if (_playlistUpdate) { |
|
update = _playlistUpdate; // update the playlist reference within media groups |
|
|
|
update.mediaGroups[type][group][label].playlists[0] = update.playlists[uri]; |
|
noChanges = false; |
|
} |
|
} |
|
}); |
|
|
|
if (noChanges) { |
|
return null; |
|
} |
|
|
|
return update; |
|
}; |
|
|
|
var DashPlaylistLoader = function (_EventTarget) { |
|
inherits$1(DashPlaylistLoader, _EventTarget); // DashPlaylistLoader must accept either a src url or a playlist because subsequent |
|
// playlist loader setups from media groups will expect to be able to pass a playlist |
|
// (since there aren't external URLs to media playlists with DASH) |
|
|
|
function DashPlaylistLoader(srcUrlOrPlaylist, hls) { |
|
var options = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {}; |
|
var masterPlaylistLoader = arguments[3]; |
|
classCallCheck$1(this, DashPlaylistLoader); |
|
|
|
var _this = possibleConstructorReturn$1(this, (DashPlaylistLoader.__proto__ || Object.getPrototypeOf(DashPlaylistLoader)).call(this)); |
|
|
|
var _options$withCredenti = options.withCredentials, |
|
withCredentials = _options$withCredenti === undefined ? false : _options$withCredenti, |
|
_options$handleManife = options.handleManifestRedirects, |
|
handleManifestRedirects = _options$handleManife === undefined ? false : _options$handleManife; |
|
_this.hls_ = hls; |
|
_this.withCredentials = withCredentials; |
|
_this.handleManifestRedirects = handleManifestRedirects; |
|
|
|
if (!srcUrlOrPlaylist) { |
|
throw new Error('A non-empty playlist URL or playlist is required'); |
|
} // event naming? |
|
|
|
|
|
_this.on('minimumUpdatePeriod', function () { |
|
_this.refreshXml_(); |
|
}); // live playlist staleness timeout |
|
|
|
|
|
_this.on('mediaupdatetimeout', function () { |
|
_this.refreshMedia_(); |
|
}); |
|
|
|
_this.state = 'HAVE_NOTHING'; |
|
_this.loadedPlaylists_ = {}; // initialize the loader state |
|
// The masterPlaylistLoader will be created with a string |
|
|
|
if (typeof srcUrlOrPlaylist === 'string') { |
|
_this.srcUrl = srcUrlOrPlaylist; |
|
return possibleConstructorReturn$1(_this); |
|
} |
|
|
|
_this.setupChildLoader(masterPlaylistLoader, srcUrlOrPlaylist); |
|
|
|
return _this; |
|
} |
|
|
|
createClass$1(DashPlaylistLoader, [{ |
|
key: 'setupChildLoader', |
|
value: function setupChildLoader(masterPlaylistLoader, playlist) { |
|
this.masterPlaylistLoader_ = masterPlaylistLoader; |
|
this.childPlaylist_ = playlist; |
|
} |
|
}, { |
|
key: 'dispose', |
|
value: function dispose() { |
|
this.stopRequest(); |
|
this.loadedPlaylists_ = {}; |
|
window$1.clearTimeout(this.mediaUpdateTimeout); |
|
} |
|
}, { |
|
key: 'hasPendingRequest', |
|
value: function hasPendingRequest() { |
|
return this.request || this.mediaRequest_; |
|
} |
|
}, { |
|
key: 'stopRequest', |
|
value: function stopRequest() { |
|
if (this.request) { |
|
var oldRequest = this.request; |
|
this.request = null; |
|
oldRequest.onreadystatechange = null; |
|
oldRequest.abort(); |
|
} |
|
} |
|
}, { |
|
key: 'media', |
|
value: function media(playlist) { |
|
// getter |
|
if (!playlist) { |
|
return this.media_; |
|
} // setter |
|
|
|
|
|
if (this.state === 'HAVE_NOTHING') { |
|
throw new Error('Cannot switch media playlist from ' + this.state); |
|
} |
|
|
|
var startingState = this.state; // find the playlist object if the target playlist has been specified by URI |
|
|
|
if (typeof playlist === 'string') { |
|
if (!this.master.playlists[playlist]) { |
|
throw new Error('Unknown playlist URI: ' + playlist); |
|
} |
|
|
|
playlist = this.master.playlists[playlist]; |
|
} |
|
|
|
var mediaChange = !this.media_ || playlist.uri !== this.media_.uri; // switch to previously loaded playlists immediately |
|
|
|
if (mediaChange && this.loadedPlaylists_[playlist.uri] && this.loadedPlaylists_[playlist.uri].endList) { |
|
this.state = 'HAVE_METADATA'; |
|
this.media_ = playlist; // trigger media change if the active media has been updated |
|
|
|
if (mediaChange) { |
|
this.trigger('mediachanging'); |
|
this.trigger('mediachange'); |
|
} |
|
|
|
return; |
|
} // switching to the active playlist is a no-op |
|
|
|
|
|
if (!mediaChange) { |
|
return; |
|
} // switching from an already loaded playlist |
|
|
|
|
|
if (this.media_) { |
|
this.trigger('mediachanging'); |
|
} // TODO: check for sidx here |
|
// Continue asynchronously if there is no sidx |
|
// wait one tick to allow haveMaster to run first on a child loader |
|
|
|
|
|
this.mediaRequest_ = window$1.setTimeout(this.haveMetadata.bind(this, { |
|
startingState: startingState, |
|
playlist: playlist |
|
}), 0); |
|
} |
|
}, { |
|
key: 'haveMetadata', |
|
value: function haveMetadata(_ref) { |
|
var startingState = _ref.startingState, |
|
playlist = _ref.playlist; |
|
this.state = 'HAVE_METADATA'; |
|
this.media_ = playlist; |
|
this.loadedPlaylists_[playlist.uri] = playlist; |
|
this.mediaRequest_ = null; // This will trigger loadedplaylist |
|
|
|
this.refreshMedia_(); // fire loadedmetadata the first time a media playlist is loaded |
|
// to resolve setup of media groups |
|
|
|
if (startingState === 'HAVE_MASTER') { |
|
this.trigger('loadedmetadata'); |
|
} else { |
|
// trigger media change if the active media has been updated |
|
this.trigger('mediachange'); |
|
} |
|
} |
|
}, { |
|
key: 'pause', |
|
value: function pause() { |
|
this.stopRequest(); |
|
window$1.clearTimeout(this.mediaUpdateTimeout); |
|
|
|
if (this.state === 'HAVE_NOTHING') { |
|
// If we pause the loader before any data has been retrieved, its as if we never |
|
// started, so reset to an unstarted state. |
|
this.started = false; |
|
} |
|
} |
|
}, { |
|
key: 'load', |
|
value: function load(isFinalRendition) { |
|
var _this2 = this; |
|
|
|
window$1.clearTimeout(this.mediaUpdateTimeout); |
|
var media = this.media(); |
|
|
|
if (isFinalRendition) { |
|
var delay = media ? media.targetDuration / 2 * 1000 : 5 * 1000; |
|
this.mediaUpdateTimeout = window$1.setTimeout(function () { |
|
return _this2.load(); |
|
}, delay); |
|
return; |
|
} // because the playlists are internal to the manifest, load should either load the |
|
// main manifest, or do nothing but trigger an event |
|
|
|
|
|
if (!this.started) { |
|
this.start(); |
|
return; |
|
} |
|
|
|
this.trigger('loadedplaylist'); |
|
} |
|
/** |
|
* Parses the master xml string and updates playlist uri references |
|
* |
|
* @return {Object} |
|
* The parsed mpd manifest object |
|
*/ |
|
|
|
}, { |
|
key: 'parseMasterXml', |
|
value: function parseMasterXml() { |
|
var master = parse(this.masterXml_, { |
|
manifestUri: this.srcUrl, |
|
clientOffset: this.clientOffset_ |
|
}); |
|
master.uri = this.srcUrl; // Set up phony URIs for the playlists since we won't have external URIs for DASH |
|
// but reference playlists by their URI throughout the project |
|
// TODO: Should we create the dummy uris in mpd-parser as well (leaning towards yes). |
|
|
|
for (var i = 0; i < master.playlists.length; i++) { |
|
var phonyUri = 'placeholder-uri-' + i; |
|
master.playlists[i].uri = phonyUri; // set up by URI references |
|
|
|
master.playlists[phonyUri] = master.playlists[i]; |
|
} // set up phony URIs for the media group playlists since we won't have external |
|
// URIs for DASH but reference playlists by their URI throughout the project |
|
|
|
|
|
forEachMediaGroup(master, function (properties, mediaType, groupKey, labelKey) { |
|
if (properties.playlists && properties.playlists.length) { |
|
var _phonyUri = 'placeholder-uri-' + mediaType + '-' + groupKey + '-' + labelKey; |
|
|
|
properties.playlists[0].uri = _phonyUri; // setup URI references |
|
|
|
master.playlists[_phonyUri] = properties.playlists[0]; |
|
} |
|
}); |
|
setupMediaPlaylists(master); |
|
resolveMediaGroupUris(master); |
|
return master; |
|
} |
|
}, { |
|
key: 'start', |
|
value: function start() { |
|
var _this3 = this; |
|
|
|
this.started = true; // We don't need to request the master manifest again |
|
// Call this asynchronously to match the xhr request behavior below |
|
|
|
if (this.masterPlaylistLoader_) { |
|
this.mediaRequest_ = window$1.setTimeout(this.haveMaster_.bind(this), 0); |
|
return; |
|
} // request the specified URL |
|
|
|
|
|
this.request = this.hls_.xhr({ |
|
uri: this.srcUrl, |
|
withCredentials: this.withCredentials |
|
}, function (error, req) { |
|
// disposed |
|
if (!_this3.request) { |
|
return; |
|
} // clear the loader's request reference |
|
|
|
|
|
_this3.request = null; |
|
|
|
if (error) { |
|
_this3.error = { |
|
status: req.status, |
|
message: 'DASH playlist request error at URL: ' + _this3.srcUrl, |
|
responseText: req.responseText, |
|
// MEDIA_ERR_NETWORK |
|
code: 2 |
|
}; |
|
|
|
if (_this3.state === 'HAVE_NOTHING') { |
|
_this3.started = false; |
|
} |
|
|
|
return _this3.trigger('error'); |
|
} |
|
|
|
_this3.masterXml_ = req.responseText; |
|
|
|
if (req.responseHeaders && req.responseHeaders.date) { |
|
_this3.masterLoaded_ = Date.parse(req.responseHeaders.date); |
|
} else { |
|
_this3.masterLoaded_ = Date.now(); |
|
} |
|
|
|
_this3.srcUrl = resolveManifestRedirect(_this3.handleManifestRedirects, _this3.srcUrl, req); |
|
|
|
_this3.syncClientServerClock_(_this3.onClientServerClockSync_.bind(_this3)); |
|
}); |
|
} |
|
/** |
|
* Parses the master xml for UTCTiming node to sync the client clock to the server |
|
* clock. If the UTCTiming node requires a HEAD or GET request, that request is made. |
|
* |
|
* @param {Function} done |
|
* Function to call when clock sync has completed |
|
*/ |
|
|
|
}, { |
|
key: 'syncClientServerClock_', |
|
value: function syncClientServerClock_(done) { |
|
var _this4 = this; |
|
|
|
var utcTiming = parseUTCTiming(this.masterXml_); // No UTCTiming element found in the mpd. Use Date header from mpd request as the |
|
// server clock |
|
|
|
if (utcTiming === null) { |
|
this.clientOffset_ = this.masterLoaded_ - Date.now(); |
|
return done(); |
|
} |
|
|
|
if (utcTiming.method === 'DIRECT') { |
|
this.clientOffset_ = utcTiming.value - Date.now(); |
|
return done(); |
|
} |
|
|
|
this.request = this.hls_.xhr({ |
|
uri: resolveUrl$1(this.srcUrl, utcTiming.value), |
|
method: utcTiming.method, |
|
withCredentials: this.withCredentials |
|
}, function (error, req) { |
|
// disposed |
|
if (!_this4.request) { |
|
return; |
|
} |
|
|
|
if (error) { |
|
// sync request failed, fall back to using date header from mpd |
|
// TODO: log warning |
|
_this4.clientOffset_ = _this4.masterLoaded_ - Date.now(); |
|
return done(); |
|
} |
|
|
|
var serverTime = void 0; |
|
|
|
if (utcTiming.method === 'HEAD') { |
|
if (!req.responseHeaders || !req.responseHeaders.date) { |
|
// expected date header not preset, fall back to using date header from mpd |
|
// TODO: log warning |
|
serverTime = _this4.masterLoaded_; |
|
} else { |
|
serverTime = Date.parse(req.responseHeaders.date); |
|
} |
|
} else { |
|
serverTime = Date.parse(req.responseText); |
|
} |
|
|
|
_this4.clientOffset_ = serverTime - Date.now(); |
|
done(); |
|
}); |
|
} |
|
}, { |
|
key: 'haveMaster_', |
|
value: function haveMaster_() { |
|
this.state = 'HAVE_MASTER'; // clear media request |
|
|
|
this.mediaRequest_ = null; |
|
|
|
if (!this.masterPlaylistLoader_) { |
|
this.master = this.parseMasterXml(); // We have the master playlist at this point, so |
|
// trigger this to allow MasterPlaylistController |
|
// to make an initial playlist selection |
|
|
|
this.trigger('loadedplaylist'); |
|
} else if (!this.media_) { |
|
// no media playlist was specifically selected so select |
|
// the one the child playlist loader was created with |
|
this.media(this.childPlaylist_); |
|
} |
|
} |
|
/** |
|
* Handler for after client/server clock synchronization has happened. Sets up |
|
* xml refresh timer if specificed by the manifest. |
|
*/ |
|
|
|
}, { |
|
key: 'onClientServerClockSync_', |
|
value: function onClientServerClockSync_() { |
|
var _this5 = this; |
|
|
|
this.haveMaster_(); |
|
|
|
if (!this.hasPendingRequest() && !this.media_) { |
|
this.media(this.master.playlists[0]); |
|
} // TODO: minimumUpdatePeriod can have a value of 0. Currently the manifest will not |
|
// be refreshed when this is the case. The inter-op guide says that when the |
|
// minimumUpdatePeriod is 0, the manifest should outline all currently available |
|
// segments, but future segments may require an update. I think a good solution |
|
// would be to update the manifest at the same rate that the media playlists |
|
// are "refreshed", i.e. every targetDuration. |
|
|
|
|
|
if (this.master && this.master.minimumUpdatePeriod) { |
|
window$1.setTimeout(function () { |
|
_this5.trigger('minimumUpdatePeriod'); |
|
}, this.master.minimumUpdatePeriod); |
|
} |
|
} |
|
/** |
|
* Sends request to refresh the master xml and updates the parsed master manifest |
|
* TODO: Does the client offset need to be recalculated when the xml is refreshed? |
|
*/ |
|
|
|
}, { |
|
key: 'refreshXml_', |
|
value: function refreshXml_() { |
|
var _this6 = this; // The srcUrl here *may* need to pass through handleManifestsRedirects when |
|
// sidx is implemented |
|
|
|
|
|
this.request = this.hls_.xhr({ |
|
uri: this.srcUrl, |
|
withCredentials: this.withCredentials |
|
}, function (error, req) { |
|
// disposed |
|
if (!_this6.request) { |
|
return; |
|
} // clear the loader's request reference |
|
|
|
|
|
_this6.request = null; |
|
|
|
if (error) { |
|
_this6.error = { |
|
status: req.status, |
|
message: 'DASH playlist request error at URL: ' + _this6.srcUrl, |
|
responseText: req.responseText, |
|
// MEDIA_ERR_NETWORK |
|
code: 2 |
|
}; |
|
|
|
if (_this6.state === 'HAVE_NOTHING') { |
|
_this6.started = false; |
|
} |
|
|
|
return _this6.trigger('error'); |
|
} |
|
|
|
_this6.masterXml_ = req.responseText; |
|
|
|
var newMaster = _this6.parseMasterXml(); |
|
|
|
var updatedMaster = updateMaster$1(_this6.master, newMaster); |
|
|
|
if (updatedMaster) { |
|
_this6.master = updatedMaster; |
|
} |
|
|
|
window$1.setTimeout(function () { |
|
_this6.trigger('minimumUpdatePeriod'); |
|
}, _this6.master.minimumUpdatePeriod); |
|
}); |
|
} |
|
/** |
|
* Refreshes the media playlist by re-parsing the master xml and updating playlist |
|
* references. If this is an alternate loader, the updated parsed manifest is retrieved |
|
* from the master loader. |
|
*/ |
|
|
|
}, { |
|
key: 'refreshMedia_', |
|
value: function refreshMedia_() { |
|
var _this7 = this; |
|
|
|
var oldMaster = void 0; |
|
var newMaster = void 0; |
|
|
|
if (this.masterPlaylistLoader_) { |
|
oldMaster = this.masterPlaylistLoader_.master; |
|
newMaster = this.masterPlaylistLoader_.parseMasterXml(); |
|
} else { |
|
oldMaster = this.master; |
|
newMaster = this.parseMasterXml(); |
|
} |
|
|
|
var updatedMaster = updateMaster$1(oldMaster, newMaster); |
|
|
|
if (updatedMaster) { |
|
if (this.masterPlaylistLoader_) { |
|
this.masterPlaylistLoader_.master = updatedMaster; |
|
} else { |
|
this.master = updatedMaster; |
|
} |
|
|
|
this.media_ = updatedMaster.playlists[this.media_.uri]; |
|
} else { |
|
this.trigger('playlistunchanged'); |
|
} |
|
|
|
if (!this.media().endList) { |
|
this.mediaUpdateTimeout = window$1.setTimeout(function () { |
|
_this7.trigger('mediaupdatetimeout'); |
|
}, refreshDelay(this.media(), !!updatedMaster)); |
|
} |
|
|
|
this.trigger('loadedplaylist'); |
|
} |
|
}]); |
|
return DashPlaylistLoader; |
|
}(EventTarget$1$1); |
|
|
|
var logger = function logger(source) { |
|
if (videojs$1.log.debug) { |
|
return videojs$1.log.debug.bind(videojs$1, 'VHS:', source + ' >'); |
|
} |
|
|
|
return function () {}; |
|
}; |
|
|
|
function noop$1() {} |
|
/** |
|
* @file source-updater.js |
|
*/ |
|
|
|
/** |
|
* A queue of callbacks to be serialized and applied when a |
|
* MediaSource and its associated SourceBuffers are not in the |
|
* updating state. It is used by the segment loader to update the |
|
* underlying SourceBuffers when new data is loaded, for instance. |
|
* |
|
* @class SourceUpdater |
|
* @param {MediaSource} mediaSource the MediaSource to create the |
|
* SourceBuffer from |
|
* @param {String} mimeType the desired MIME type of the underlying |
|
* SourceBuffer |
|
* @param {Object} sourceBufferEmitter an event emitter that fires when a source buffer is |
|
* added to the media source |
|
*/ |
|
|
|
|
|
var SourceUpdater = function () { |
|
function SourceUpdater(mediaSource, mimeType, type, sourceBufferEmitter) { |
|
classCallCheck$1(this, SourceUpdater); |
|
this.callbacks_ = []; |
|
this.pendingCallback_ = null; |
|
this.timestampOffset_ = 0; |
|
this.mediaSource = mediaSource; |
|
this.processedAppend_ = false; |
|
this.type_ = type; |
|
this.mimeType_ = mimeType; |
|
this.logger_ = logger('SourceUpdater[' + type + '][' + mimeType + ']'); |
|
|
|
if (mediaSource.readyState === 'closed') { |
|
mediaSource.addEventListener('sourceopen', this.createSourceBuffer_.bind(this, mimeType, sourceBufferEmitter)); |
|
} else { |
|
this.createSourceBuffer_(mimeType, sourceBufferEmitter); |
|
} |
|
} |
|
|
|
createClass$1(SourceUpdater, [{ |
|
key: 'createSourceBuffer_', |
|
value: function createSourceBuffer_(mimeType, sourceBufferEmitter) { |
|
var _this = this; |
|
|
|
this.sourceBuffer_ = this.mediaSource.addSourceBuffer(mimeType); |
|
this.logger_('created SourceBuffer'); |
|
|
|
if (sourceBufferEmitter) { |
|
sourceBufferEmitter.trigger('sourcebufferadded'); |
|
|
|
if (this.mediaSource.sourceBuffers.length < 2) { |
|
// There's another source buffer we must wait for before we can start updating |
|
// our own (or else we can get into a bad state, i.e., appending video/audio data |
|
// before the other video/audio source buffer is available and leading to a video |
|
// or audio only buffer). |
|
sourceBufferEmitter.on('sourcebufferadded', function () { |
|
_this.start_(); |
|
}); |
|
return; |
|
} |
|
} |
|
|
|
this.start_(); |
|
} |
|
}, { |
|
key: 'start_', |
|
value: function start_() { |
|
var _this2 = this; |
|
|
|
this.started_ = true; // run completion handlers and process callbacks as updateend |
|
// events fire |
|
|
|
this.onUpdateendCallback_ = function () { |
|
var pendingCallback = _this2.pendingCallback_; |
|
_this2.pendingCallback_ = null; |
|
|
|
_this2.logger_('buffered [' + printableRange(_this2.buffered()) + ']'); |
|
|
|
if (pendingCallback) { |
|
pendingCallback(); |
|
} |
|
|
|
_this2.runCallback_(); |
|
}; |
|
|
|
this.sourceBuffer_.addEventListener('updateend', this.onUpdateendCallback_); |
|
this.runCallback_(); |
|
} |
|
/** |
|
* Aborts the current segment and resets the segment parser. |
|
* |
|
* @param {Function} done function to call when done |
|
* @see http://w3c.github.io/media-source/#widl-SourceBuffer-abort-void |
|
*/ |
|
|
|
}, { |
|
key: 'abort', |
|
value: function abort(done) { |
|
var _this3 = this; |
|
|
|
if (this.processedAppend_) { |
|
this.queueCallback_(function () { |
|
_this3.sourceBuffer_.abort(); |
|
}, done); |
|
} |
|
} |
|
/** |
|
* Queue an update to append an ArrayBuffer. |
|
* |
|
* @param {ArrayBuffer} bytes |
|
* @param {Function} done the function to call when done |
|
* @see http://www.w3.org/TR/media-source/#widl-SourceBuffer-appendBuffer-void-ArrayBuffer-data |
|
*/ |
|
|
|
}, { |
|
key: 'appendBuffer', |
|
value: function appendBuffer(config, done) { |
|
var _this4 = this; |
|
|
|
this.processedAppend_ = true; |
|
this.queueCallback_(function () { |
|
if (config.videoSegmentTimingInfoCallback) { |
|
_this4.sourceBuffer_.addEventListener('videoSegmentTimingInfo', config.videoSegmentTimingInfoCallback); |
|
} |
|
|
|
_this4.sourceBuffer_.appendBuffer(config.bytes); |
|
}, function () { |
|
if (config.videoSegmentTimingInfoCallback) { |
|
_this4.sourceBuffer_.removeEventListener('videoSegmentTimingInfo', config.videoSegmentTimingInfoCallback); |
|
} |
|
|
|
done(); |
|
}); |
|
} |
|
/** |
|
* Indicates what TimeRanges are buffered in the managed SourceBuffer. |
|
* |
|
* @see http://www.w3.org/TR/media-source/#widl-SourceBuffer-buffered |
|
*/ |
|
|
|
}, { |
|
key: 'buffered', |
|
value: function buffered() { |
|
if (!this.sourceBuffer_) { |
|
return videojs$1.createTimeRanges(); |
|
} |
|
|
|
return this.sourceBuffer_.buffered; |
|
} |
|
/** |
|
* Queue an update to remove a time range from the buffer. |
|
* |
|
* @param {Number} start where to start the removal |
|
* @param {Number} end where to end the removal |
|
* @param {Function} [done=noop] optional callback to be executed when the remove |
|
* operation is complete |
|
* @see http://www.w3.org/TR/media-source/#widl-SourceBuffer-remove-void-double-start-unrestricted-double-end |
|
*/ |
|
|
|
}, { |
|
key: 'remove', |
|
value: function remove(start, end) { |
|
var _this5 = this; |
|
|
|
var done = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : noop$1; |
|
|
|
if (this.processedAppend_) { |
|
this.queueCallback_(function () { |
|
_this5.logger_('remove [' + start + ' => ' + end + ']'); |
|
|
|
_this5.sourceBuffer_.remove(start, end); |
|
}, done); |
|
} |
|
} |
|
/** |
|
* Whether the underlying sourceBuffer is updating or not |
|
* |
|
* @return {Boolean} the updating status of the SourceBuffer |
|
*/ |
|
|
|
}, { |
|
key: 'updating', |
|
value: function updating() { |
|
// we are updating if the sourcebuffer is updating or |
|
return !this.sourceBuffer_ || this.sourceBuffer_.updating || // if we have a pending callback that is not our internal noop |
|
!!this.pendingCallback_ && this.pendingCallback_ !== noop$1; |
|
} |
|
/** |
|
* Set/get the timestampoffset on the SourceBuffer |
|
* |
|
* @return {Number} the timestamp offset |
|
*/ |
|
|
|
}, { |
|
key: 'timestampOffset', |
|
value: function timestampOffset(offset) { |
|
var _this6 = this; |
|
|
|
if (typeof offset !== 'undefined') { |
|
this.queueCallback_(function () { |
|
_this6.sourceBuffer_.timestampOffset = offset; |
|
}); |
|
this.timestampOffset_ = offset; |
|
} |
|
|
|
return this.timestampOffset_; |
|
} |
|
/** |
|
* Queue a callback to run |
|
*/ |
|
|
|
}, { |
|
key: 'queueCallback_', |
|
value: function queueCallback_(callback, done) { |
|
this.callbacks_.push([callback.bind(this), done]); |
|
this.runCallback_(); |
|
} |
|
/** |
|
* Run a queued callback |
|
*/ |
|
|
|
}, { |
|
key: 'runCallback_', |
|
value: function runCallback_() { |
|
var callbacks = void 0; |
|
|
|
if (!this.updating() && this.callbacks_.length && this.started_) { |
|
callbacks = this.callbacks_.shift(); |
|
this.pendingCallback_ = callbacks[1]; |
|
callbacks[0](); |
|
} |
|
} |
|
/** |
|
* dispose of the source updater and the underlying sourceBuffer |
|
*/ |
|
|
|
}, { |
|
key: 'dispose', |
|
value: function dispose() { |
|
this.sourceBuffer_.removeEventListener('updateend', this.onUpdateendCallback_); |
|
|
|
if (this.sourceBuffer_ && this.mediaSource.readyState === 'open') { |
|
this.sourceBuffer_.abort(); |
|
} |
|
} |
|
}]); |
|
return SourceUpdater; |
|
}(); |
|
|
|
var Config = { |
|
GOAL_BUFFER_LENGTH: 30, |
|
MAX_GOAL_BUFFER_LENGTH: 60, |
|
GOAL_BUFFER_LENGTH_RATE: 1, |
|
// 0.5 MB/s |
|
INITIAL_BANDWIDTH: 4194304, |
|
// A fudge factor to apply to advertised playlist bitrates to account for |
|
// temporary flucations in client bandwidth |
|
BANDWIDTH_VARIANCE: 1.2, |
|
// How much of the buffer must be filled before we consider upswitching |
|
BUFFER_LOW_WATER_LINE: 0, |
|
MAX_BUFFER_LOW_WATER_LINE: 30, |
|
BUFFER_LOW_WATER_LINE_RATE: 1 |
|
}; |
|
var REQUEST_ERRORS = { |
|
FAILURE: 2, |
|
TIMEOUT: -101, |
|
ABORTED: -102 |
|
}; |
|
/** |
|
* Turns segment byterange into a string suitable for use in |
|
* HTTP Range requests |
|
* |
|
* @param {Object} byterange - an object with two values defining the start and end |
|
* of a byte-range |
|
*/ |
|
|
|
var byterangeStr = function byterangeStr(byterange) { |
|
var byterangeStart = void 0; |
|
var byterangeEnd = void 0; // `byterangeEnd` is one less than `offset + length` because the HTTP range |
|
// header uses inclusive ranges |
|
|
|
byterangeEnd = byterange.offset + byterange.length - 1; |
|
byterangeStart = byterange.offset; |
|
return 'bytes=' + byterangeStart + '-' + byterangeEnd; |
|
}; |
|
/** |
|
* Defines headers for use in the xhr request for a particular segment. |
|
* |
|
* @param {Object} segment - a simplified copy of the segmentInfo object |
|
* from SegmentLoader |
|
*/ |
|
|
|
|
|
var segmentXhrHeaders = function segmentXhrHeaders(segment) { |
|
var headers = {}; |
|
|
|
if (segment.byterange) { |
|
headers.Range = byterangeStr(segment.byterange); |
|
} |
|
|
|
return headers; |
|
}; |
|
/** |
|
* Abort all requests |
|
* |
|
* @param {Object} activeXhrs - an object that tracks all XHR requests |
|
*/ |
|
|
|
|
|
var abortAll = function abortAll(activeXhrs) { |
|
activeXhrs.forEach(function (xhr) { |
|
xhr.abort(); |
|
}); |
|
}; |
|
/** |
|
* Gather important bandwidth stats once a request has completed |
|
* |
|
* @param {Object} request - the XHR request from which to gather stats |
|
*/ |
|
|
|
|
|
var getRequestStats = function getRequestStats(request) { |
|
return { |
|
bandwidth: request.bandwidth, |
|
bytesReceived: request.bytesReceived || 0, |
|
roundTripTime: request.roundTripTime || 0 |
|
}; |
|
}; |
|
/** |
|
* If possible gather bandwidth stats as a request is in |
|
* progress |
|
* |
|
* @param {Event} progressEvent - an event object from an XHR's progress event |
|
*/ |
|
|
|
|
|
var getProgressStats = function getProgressStats(progressEvent) { |
|
var request = progressEvent.target; |
|
var roundTripTime = Date.now() - request.requestTime; |
|
var stats = { |
|
bandwidth: Infinity, |
|
bytesReceived: 0, |
|
roundTripTime: roundTripTime || 0 |
|
}; |
|
stats.bytesReceived = progressEvent.loaded; // This can result in Infinity if stats.roundTripTime is 0 but that is ok |
|
// because we should only use bandwidth stats on progress to determine when |
|
// abort a request early due to insufficient bandwidth |
|
|
|
stats.bandwidth = Math.floor(stats.bytesReceived / stats.roundTripTime * 8 * 1000); |
|
return stats; |
|
}; |
|
/** |
|
* Handle all error conditions in one place and return an object |
|
* with all the information |
|
* |
|
* @param {Error|null} error - if non-null signals an error occured with the XHR |
|
* @param {Object} request - the XHR request that possibly generated the error |
|
*/ |
|
|
|
|
|
var handleErrors = function handleErrors(error, request) { |
|
if (request.timedout) { |
|
return { |
|
status: request.status, |
|
message: 'HLS request timed-out at URL: ' + request.uri, |
|
code: REQUEST_ERRORS.TIMEOUT, |
|
xhr: request |
|
}; |
|
} |
|
|
|
if (request.aborted) { |
|
return { |
|
status: request.status, |
|
message: 'HLS request aborted at URL: ' + request.uri, |
|
code: REQUEST_ERRORS.ABORTED, |
|
xhr: request |
|
}; |
|
} |
|
|
|
if (error) { |
|
return { |
|
status: request.status, |
|
message: 'HLS request errored at URL: ' + request.uri, |
|
code: REQUEST_ERRORS.FAILURE, |
|
xhr: request |
|
}; |
|
} |
|
|
|
return null; |
|
}; |
|
/** |
|
* Handle responses for key data and convert the key data to the correct format |
|
* for the decryption step later |
|
* |
|
* @param {Object} segment - a simplified copy of the segmentInfo object |
|
* from SegmentLoader |
|
* @param {Function} finishProcessingFn - a callback to execute to continue processing |
|
* this request |
|
*/ |
|
|
|
|
|
var handleKeyResponse = function handleKeyResponse(segment, finishProcessingFn) { |
|
return function (error, request) { |
|
var response = request.response; |
|
var errorObj = handleErrors(error, request); |
|
|
|
if (errorObj) { |
|
return finishProcessingFn(errorObj, segment); |
|
} |
|
|
|
if (response.byteLength !== 16) { |
|
return finishProcessingFn({ |
|
status: request.status, |
|
message: 'Invalid HLS key at URL: ' + request.uri, |
|
code: REQUEST_ERRORS.FAILURE, |
|
xhr: request |
|
}, segment); |
|
} |
|
|
|
var view = new DataView(response); |
|
segment.key.bytes = new Uint32Array([view.getUint32(0), view.getUint32(4), view.getUint32(8), view.getUint32(12)]); |
|
return finishProcessingFn(null, segment); |
|
}; |
|
}; |
|
/** |
|
* Handle init-segment responses |
|
* |
|
* @param {Object} segment - a simplified copy of the segmentInfo object |
|
* from SegmentLoader |
|
* @param {Function} finishProcessingFn - a callback to execute to continue processing |
|
* this request |
|
*/ |
|
|
|
|
|
var handleInitSegmentResponse = function handleInitSegmentResponse(segment, captionParser, finishProcessingFn) { |
|
return function (error, request) { |
|
var response = request.response; |
|
var errorObj = handleErrors(error, request); |
|
|
|
if (errorObj) { |
|
return finishProcessingFn(errorObj, segment); |
|
} // stop processing if received empty content |
|
|
|
|
|
if (response.byteLength === 0) { |
|
return finishProcessingFn({ |
|
status: request.status, |
|
message: 'Empty HLS segment content at URL: ' + request.uri, |
|
code: REQUEST_ERRORS.FAILURE, |
|
xhr: request |
|
}, segment); |
|
} |
|
|
|
segment.map.bytes = new Uint8Array(request.response); // Initialize CaptionParser if it hasn't been yet |
|
|
|
if (!captionParser.isInitialized()) { |
|
captionParser.init(); |
|
} |
|
|
|
segment.map.timescales = probe.timescale(segment.map.bytes); |
|
segment.map.videoTrackIds = probe.videoTrackIds(segment.map.bytes); |
|
return finishProcessingFn(null, segment); |
|
}; |
|
}; |
|
/** |
|
* Response handler for segment-requests being sure to set the correct |
|
* property depending on whether the segment is encryped or not |
|
* Also records and keeps track of stats that are used for ABR purposes |
|
* |
|
* @param {Object} segment - a simplified copy of the segmentInfo object |
|
* from SegmentLoader |
|
* @param {Function} finishProcessingFn - a callback to execute to continue processing |
|
* this request |
|
*/ |
|
|
|
|
|
var handleSegmentResponse = function handleSegmentResponse(segment, captionParser, finishProcessingFn) { |
|
return function (error, request) { |
|
var response = request.response; |
|
var errorObj = handleErrors(error, request); |
|
var parsed = void 0; |
|
|
|
if (errorObj) { |
|
return finishProcessingFn(errorObj, segment); |
|
} // stop processing if received empty content |
|
|
|
|
|
if (response.byteLength === 0) { |
|
return finishProcessingFn({ |
|
status: request.status, |
|
message: 'Empty HLS segment content at URL: ' + request.uri, |
|
code: REQUEST_ERRORS.FAILURE, |
|
xhr: request |
|
}, segment); |
|
} |
|
|
|
segment.stats = getRequestStats(request); |
|
|
|
if (segment.key) { |
|
segment.encryptedBytes = new Uint8Array(request.response); |
|
} else { |
|
segment.bytes = new Uint8Array(request.response); |
|
} // This is likely an FMP4 and has the init segment. |
|
// Run through the CaptionParser in case there are captions. |
|
|
|
|
|
if (segment.map && segment.map.bytes) { |
|
// Initialize CaptionParser if it hasn't been yet |
|
if (!captionParser.isInitialized()) { |
|
captionParser.init(); |
|
} |
|
|
|
parsed = captionParser.parse(segment.bytes, segment.map.videoTrackIds, segment.map.timescales); |
|
|
|
if (parsed && parsed.captions) { |
|
segment.captionStreams = parsed.captionStreams; |
|
segment.fmp4Captions = parsed.captions; |
|
} |
|
} |
|
|
|
return finishProcessingFn(null, segment); |
|
}; |
|
}; |
|
/** |
|
* Decrypt the segment via the decryption web worker |
|
* |
|
* @param {WebWorker} decrypter - a WebWorker interface to AES-128 decryption routines |
|
* @param {Object} segment - a simplified copy of the segmentInfo object |
|
* from SegmentLoader |
|
* @param {Function} doneFn - a callback that is executed after decryption has completed |
|
*/ |
|
|
|
|
|
var decryptSegment = function decryptSegment(decrypter, segment, doneFn) { |
|
var decryptionHandler = function decryptionHandler(event) { |
|
if (event.data.source === segment.requestId) { |
|
decrypter.removeEventListener('message', decryptionHandler); |
|
var decrypted = event.data.decrypted; |
|
segment.bytes = new Uint8Array(decrypted.bytes, decrypted.byteOffset, decrypted.byteLength); |
|
return doneFn(null, segment); |
|
} |
|
}; |
|
|
|
decrypter.addEventListener('message', decryptionHandler); // this is an encrypted segment |
|
// incrementally decrypt the segment |
|
|
|
decrypter.postMessage(createTransferableMessage({ |
|
source: segment.requestId, |
|
encrypted: segment.encryptedBytes, |
|
key: segment.key.bytes, |
|
iv: segment.key.iv |
|
}), [segment.encryptedBytes.buffer, segment.key.bytes.buffer]); |
|
}; |
|
/** |
|
* This function waits for all XHRs to finish (with either success or failure) |
|
* before continueing processing via it's callback. The function gathers errors |
|
* from each request into a single errors array so that the error status for |
|
* each request can be examined later. |
|
* |
|
* @param {Object} activeXhrs - an object that tracks all XHR requests |
|
* @param {WebWorker} decrypter - a WebWorker interface to AES-128 decryption routines |
|
* @param {Function} doneFn - a callback that is executed after all resources have been |
|
* downloaded and any decryption completed |
|
*/ |
|
|
|
|
|
var waitForCompletion = function waitForCompletion(activeXhrs, decrypter, doneFn) { |
|
var count = 0; |
|
var didError = false; |
|
return function (error, segment) { |
|
if (didError) { |
|
return; |
|
} |
|
|
|
if (error) { |
|
didError = true; // If there are errors, we have to abort any outstanding requests |
|
|
|
abortAll(activeXhrs); // Even though the requests above are aborted, and in theory we could wait until we |
|
// handle the aborted events from those requests, there are some cases where we may |
|
// never get an aborted event. For instance, if the network connection is lost and |
|
// there were two requests, the first may have triggered an error immediately, while |
|
// the second request remains unsent. In that case, the aborted algorithm will not |
|
// trigger an abort: see https://xhr.spec.whatwg.org/#the-abort()-method |
|
// |
|
// We also can't rely on the ready state of the XHR, since the request that |
|
// triggered the connection error may also show as a ready state of 0 (unsent). |
|
// Therefore, we have to finish this group of requests immediately after the first |
|
// seen error. |
|
|
|
return doneFn(error, segment); |
|
} |
|
|
|
count += 1; |
|
|
|
if (count === activeXhrs.length) { |
|
// Keep track of when *all* of the requests have completed |
|
segment.endOfAllRequests = Date.now(); |
|
|
|
if (segment.encryptedBytes) { |
|
return decryptSegment(decrypter, segment, doneFn); |
|
} // Otherwise, everything is ready just continue |
|
|
|
|
|
return doneFn(null, segment); |
|
} |
|
}; |
|
}; |
|
/** |
|
* Simple progress event callback handler that gathers some stats before |
|
* executing a provided callback with the `segment` object |
|
* |
|
* @param {Object} segment - a simplified copy of the segmentInfo object |
|
* from SegmentLoader |
|
* @param {Function} progressFn - a callback that is executed each time a progress event |
|
* is received |
|
* @param {Event} event - the progress event object from XMLHttpRequest |
|
*/ |
|
|
|
|
|
var handleProgress = function handleProgress(segment, progressFn) { |
|
return function (event) { |
|
segment.stats = videojs$1.mergeOptions(segment.stats, getProgressStats(event)); // record the time that we receive the first byte of data |
|
|
|
if (!segment.stats.firstBytesReceivedAt && segment.stats.bytesReceived) { |
|
segment.stats.firstBytesReceivedAt = Date.now(); |
|
} |
|
|
|
return progressFn(event, segment); |
|
}; |
|
}; |
|
/** |
|
* Load all resources and does any processing necessary for a media-segment |
|
* |
|
* Features: |
|
* decrypts the media-segment if it has a key uri and an iv |
|
* aborts *all* requests if *any* one request fails |
|
* |
|
* The segment object, at minimum, has the following format: |
|
* { |
|
* resolvedUri: String, |
|
* [byterange]: { |
|
* offset: Number, |
|
* length: Number |
|
* }, |
|
* [key]: { |
|
* resolvedUri: String |
|
* [byterange]: { |
|
* offset: Number, |
|
* length: Number |
|
* }, |
|
* iv: { |
|
* bytes: Uint32Array |
|
* } |
|
* }, |
|
* [map]: { |
|
* resolvedUri: String, |
|
* [byterange]: { |
|
* offset: Number, |
|
* length: Number |
|
* }, |
|
* [bytes]: Uint8Array |
|
* } |
|
* } |
|
* ...where [name] denotes optional properties |
|
* |
|
* @param {Function} xhr - an instance of the xhr wrapper in xhr.js |
|
* @param {Object} xhrOptions - the base options to provide to all xhr requests |
|
* @param {WebWorker} decryptionWorker - a WebWorker interface to AES-128 |
|
* decryption routines |
|
* @param {Object} segment - a simplified copy of the segmentInfo object |
|
* from SegmentLoader |
|
* @param {Function} progressFn - a callback that receives progress events from the main |
|
* segment's xhr request |
|
* @param {Function} doneFn - a callback that is executed only once all requests have |
|
* succeeded or failed |
|
* @returns {Function} a function that, when invoked, immediately aborts all |
|
* outstanding requests |
|
*/ |
|
|
|
|
|
var mediaSegmentRequest = function mediaSegmentRequest(xhr, xhrOptions, decryptionWorker, captionParser, segment, progressFn, doneFn) { |
|
var activeXhrs = []; |
|
var finishProcessingFn = waitForCompletion(activeXhrs, decryptionWorker, doneFn); // optionally, request the decryption key |
|
|
|
if (segment.key) { |
|
var keyRequestOptions = videojs$1.mergeOptions(xhrOptions, { |
|
uri: segment.key.resolvedUri, |
|
responseType: 'arraybuffer' |
|
}); |
|
var keyRequestCallback = handleKeyResponse(segment, finishProcessingFn); |
|
var keyXhr = xhr(keyRequestOptions, keyRequestCallback); |
|
activeXhrs.push(keyXhr); |
|
} // optionally, request the associated media init segment |
|
|
|
|
|
if (segment.map && !segment.map.bytes) { |
|
var initSegmentOptions = videojs$1.mergeOptions(xhrOptions, { |
|
uri: segment.map.resolvedUri, |
|
responseType: 'arraybuffer', |
|
headers: segmentXhrHeaders(segment.map) |
|
}); |
|
var initSegmentRequestCallback = handleInitSegmentResponse(segment, captionParser, finishProcessingFn); |
|
var initSegmentXhr = xhr(initSegmentOptions, initSegmentRequestCallback); |
|
activeXhrs.push(initSegmentXhr); |
|
} |
|
|
|
var segmentRequestOptions = videojs$1.mergeOptions(xhrOptions, { |
|
uri: segment.resolvedUri, |
|
responseType: 'arraybuffer', |
|
headers: segmentXhrHeaders(segment) |
|
}); |
|
var segmentRequestCallback = handleSegmentResponse(segment, captionParser, finishProcessingFn); |
|
var segmentXhr = xhr(segmentRequestOptions, segmentRequestCallback); |
|
segmentXhr.addEventListener('progress', handleProgress(segment, progressFn)); |
|
activeXhrs.push(segmentXhr); |
|
return function () { |
|
return abortAll(activeXhrs); |
|
}; |
|
}; // Utilities |
|
|
|
/** |
|
* Returns the CSS value for the specified property on an element |
|
* using `getComputedStyle`. Firefox has a long-standing issue where |
|
* getComputedStyle() may return null when running in an iframe with |
|
* `display: none`. |
|
* |
|
* @see https://bugzilla.mozilla.org/show_bug.cgi?id=548397 |
|
* @param {HTMLElement} el the htmlelement to work on |
|
* @param {string} the proprety to get the style for |
|
*/ |
|
|
|
|
|
var safeGetComputedStyle = function safeGetComputedStyle(el, property) { |
|
var result = void 0; |
|
|
|
if (!el) { |
|
return ''; |
|
} |
|
|
|
result = window$1.getComputedStyle(el); |
|
|
|
if (!result) { |
|
return ''; |
|
} |
|
|
|
return result[property]; |
|
}; |
|
/** |
|
* Resuable stable sort function |
|
* |
|
* @param {Playlists} array |
|
* @param {Function} sortFn Different comparators |
|
* @function stableSort |
|
*/ |
|
|
|
|
|
var stableSort = function stableSort(array, sortFn) { |
|
var newArray = array.slice(); |
|
array.sort(function (left, right) { |
|
var cmp = sortFn(left, right); |
|
|
|
if (cmp === 0) { |
|
return newArray.indexOf(left) - newArray.indexOf(right); |
|
} |
|
|
|
return cmp; |
|
}); |
|
}; |
|
/** |
|
* A comparator function to sort two playlist object by bandwidth. |
|
* |
|
* @param {Object} left a media playlist object |
|
* @param {Object} right a media playlist object |
|
* @return {Number} Greater than zero if the bandwidth attribute of |
|
* left is greater than the corresponding attribute of right. Less |
|
* than zero if the bandwidth of right is greater than left and |
|
* exactly zero if the two are equal. |
|
*/ |
|
|
|
|
|
var comparePlaylistBandwidth = function comparePlaylistBandwidth(left, right) { |
|
var leftBandwidth = void 0; |
|
var rightBandwidth = void 0; |
|
|
|
if (left.attributes.BANDWIDTH) { |
|
leftBandwidth = left.attributes.BANDWIDTH; |
|
} |
|
|
|
leftBandwidth = leftBandwidth || window$1.Number.MAX_VALUE; |
|
|
|
if (right.attributes.BANDWIDTH) { |
|
rightBandwidth = right.attributes.BANDWIDTH; |
|
} |
|
|
|
rightBandwidth = rightBandwidth || window$1.Number.MAX_VALUE; |
|
return leftBandwidth - rightBandwidth; |
|
}; |
|
/** |
|
* A comparator function to sort two playlist object by resolution (width). |
|
* @param {Object} left a media playlist object |
|
* @param {Object} right a media playlist object |
|
* @return {Number} Greater than zero if the resolution.width attribute of |
|
* left is greater than the corresponding attribute of right. Less |
|
* than zero if the resolution.width of right is greater than left and |
|
* exactly zero if the two are equal. |
|
*/ |
|
|
|
|
|
var comparePlaylistResolution = function comparePlaylistResolution(left, right) { |
|
var leftWidth = void 0; |
|
var rightWidth = void 0; |
|
|
|
if (left.attributes.RESOLUTION && left.attributes.RESOLUTION.width) { |
|
leftWidth = left.attributes.RESOLUTION.width; |
|
} |
|
|
|
leftWidth = leftWidth || window$1.Number.MAX_VALUE; |
|
|
|
if (right.attributes.RESOLUTION && right.attributes.RESOLUTION.width) { |
|
rightWidth = right.attributes.RESOLUTION.width; |
|
} |
|
|
|
rightWidth = rightWidth || window$1.Number.MAX_VALUE; // NOTE - Fallback to bandwidth sort as appropriate in cases where multiple renditions |
|
// have the same media dimensions/ resolution |
|
|
|
if (leftWidth === rightWidth && left.attributes.BANDWIDTH && right.attributes.BANDWIDTH) { |
|
return left.attributes.BANDWIDTH - right.attributes.BANDWIDTH; |
|
} |
|
|
|
return leftWidth - rightWidth; |
|
}; |
|
/** |
|
* Chooses the appropriate media playlist based on bandwidth and player size |
|
* |
|
* @param {Object} master |
|
* Object representation of the master manifest |
|
* @param {Number} playerBandwidth |
|
* Current calculated bandwidth of the player |
|
* @param {Number} playerWidth |
|
* Current width of the player element |
|
* @param {Number} playerHeight |
|
* Current height of the player element |
|
* @param {Boolean} limitRenditionByPlayerDimensions |
|
* True if the player width and height should be used during the selection, false otherwise |
|
* @return {Playlist} the highest bitrate playlist less than the |
|
* currently detected bandwidth, accounting for some amount of |
|
* bandwidth variance |
|
*/ |
|
|
|
|
|
var simpleSelector = function simpleSelector(master, playerBandwidth, playerWidth, playerHeight, limitRenditionByPlayerDimensions) { |
|
// convert the playlists to an intermediary representation to make comparisons easier |
|
var sortedPlaylistReps = master.playlists.map(function (playlist) { |
|
var width = void 0; |
|
var height = void 0; |
|
var bandwidth = void 0; |
|
width = playlist.attributes.RESOLUTION && playlist.attributes.RESOLUTION.width; |
|
height = playlist.attributes.RESOLUTION && playlist.attributes.RESOLUTION.height; |
|
bandwidth = playlist.attributes.BANDWIDTH; |
|
bandwidth = bandwidth || window$1.Number.MAX_VALUE; |
|
return { |
|
bandwidth: bandwidth, |
|
width: width, |
|
height: height, |
|
playlist: playlist |
|
}; |
|
}); |
|
stableSort(sortedPlaylistReps, function (left, right) { |
|
return left.bandwidth - right.bandwidth; |
|
}); // filter out any playlists that have been excluded due to |
|
// incompatible configurations |
|
|
|
sortedPlaylistReps = sortedPlaylistReps.filter(function (rep) { |
|
return !Playlist.isIncompatible(rep.playlist); |
|
}); // filter out any playlists that have been disabled manually through the representations |
|
// api or blacklisted temporarily due to playback errors. |
|
|
|
var enabledPlaylistReps = sortedPlaylistReps.filter(function (rep) { |
|
return Playlist.isEnabled(rep.playlist); |
|
}); |
|
|
|
if (!enabledPlaylistReps.length) { |
|
// if there are no enabled playlists, then they have all been blacklisted or disabled |
|
// by the user through the representations api. In this case, ignore blacklisting and |
|
// fallback to what the user wants by using playlists the user has not disabled. |
|
enabledPlaylistReps = sortedPlaylistReps.filter(function (rep) { |
|
return !Playlist.isDisabled(rep.playlist); |
|
}); |
|
} // filter out any variant that has greater effective bitrate |
|
// than the current estimated bandwidth |
|
|
|
|
|
var bandwidthPlaylistReps = enabledPlaylistReps.filter(function (rep) { |
|
return rep.bandwidth * Config.BANDWIDTH_VARIANCE < playerBandwidth; |
|
}); |
|
var highestRemainingBandwidthRep = bandwidthPlaylistReps[bandwidthPlaylistReps.length - 1]; // get all of the renditions with the same (highest) bandwidth |
|
// and then taking the very first element |
|
|
|
var bandwidthBestRep = bandwidthPlaylistReps.filter(function (rep) { |
|
return rep.bandwidth === highestRemainingBandwidthRep.bandwidth; |
|
})[0]; // if we're not going to limit renditions by player size, make an early decision. |
|
|
|
if (limitRenditionByPlayerDimensions === false) { |
|
var _chosenRep = bandwidthBestRep || enabledPlaylistReps[0] || sortedPlaylistReps[0]; |
|
|
|
return _chosenRep ? _chosenRep.playlist : null; |
|
} // filter out playlists without resolution information |
|
|
|
|
|
var haveResolution = bandwidthPlaylistReps.filter(function (rep) { |
|
return rep.width && rep.height; |
|
}); // sort variants by resolution |
|
|
|
stableSort(haveResolution, function (left, right) { |
|
return left.width - right.width; |
|
}); // if we have the exact resolution as the player use it |
|
|
|
var resolutionBestRepList = haveResolution.filter(function (rep) { |
|
return rep.width === playerWidth && rep.height === playerHeight; |
|
}); |
|
highestRemainingBandwidthRep = resolutionBestRepList[resolutionBestRepList.length - 1]; // ensure that we pick the highest bandwidth variant that have exact resolution |
|
|
|
var resolutionBestRep = resolutionBestRepList.filter(function (rep) { |
|
return rep.bandwidth === highestRemainingBandwidthRep.bandwidth; |
|
})[0]; |
|
var resolutionPlusOneList = void 0; |
|
var resolutionPlusOneSmallest = void 0; |
|
var resolutionPlusOneRep = void 0; // find the smallest variant that is larger than the player |
|
// if there is no match of exact resolution |
|
|
|
if (!resolutionBestRep) { |
|
resolutionPlusOneList = haveResolution.filter(function (rep) { |
|
return rep.width > playerWidth || rep.height > playerHeight; |
|
}); // find all the variants have the same smallest resolution |
|
|
|
resolutionPlusOneSmallest = resolutionPlusOneList.filter(function (rep) { |
|
return rep.width === resolutionPlusOneList[0].width && rep.height === resolutionPlusOneList[0].height; |
|
}); // ensure that we also pick the highest bandwidth variant that |
|
// is just-larger-than the video player |
|
|
|
highestRemainingBandwidthRep = resolutionPlusOneSmallest[resolutionPlusOneSmallest.length - 1]; |
|
resolutionPlusOneRep = resolutionPlusOneSmallest.filter(function (rep) { |
|
return rep.bandwidth === highestRemainingBandwidthRep.bandwidth; |
|
})[0]; |
|
} // fallback chain of variants |
|
|
|
|
|
var chosenRep = resolutionPlusOneRep || resolutionBestRep || bandwidthBestRep || enabledPlaylistReps[0] || sortedPlaylistReps[0]; |
|
return chosenRep ? chosenRep.playlist : null; |
|
}; // Playlist Selectors |
|
|
|
/** |
|
* Chooses the appropriate media playlist based on the most recent |
|
* bandwidth estimate and the player size. |
|
* |
|
* Expects to be called within the context of an instance of HlsHandler |
|
* |
|
* @return {Playlist} the highest bitrate playlist less than the |
|
* currently detected bandwidth, accounting for some amount of |
|
* bandwidth variance |
|
*/ |
|
|
|
|
|
var lastBandwidthSelector = function lastBandwidthSelector() { |
|
return simpleSelector(this.playlists.master, this.systemBandwidth, parseInt(safeGetComputedStyle(this.tech_.el(), 'width'), 10), parseInt(safeGetComputedStyle(this.tech_.el(), 'height'), 10), this.limitRenditionByPlayerDimensions); |
|
}; |
|
/** |
|
* Chooses the appropriate media playlist based on the potential to rebuffer |
|
* |
|
* @param {Object} settings |
|
* Object of information required to use this selector |
|
* @param {Object} settings.master |
|
* Object representation of the master manifest |
|
* @param {Number} settings.currentTime |
|
* The current time of the player |
|
* @param {Number} settings.bandwidth |
|
* Current measured bandwidth |
|
* @param {Number} settings.duration |
|
* Duration of the media |
|
* @param {Number} settings.segmentDuration |
|
* Segment duration to be used in round trip time calculations |
|
* @param {Number} settings.timeUntilRebuffer |
|
* Time left in seconds until the player has to rebuffer |
|
* @param {Number} settings.currentTimeline |
|
* The current timeline segments are being loaded from |
|
* @param {SyncController} settings.syncController |
|
* SyncController for determining if we have a sync point for a given playlist |
|
* @return {Object|null} |
|
* {Object} return.playlist |
|
* The highest bandwidth playlist with the least amount of rebuffering |
|
* {Number} return.rebufferingImpact |
|
* The amount of time in seconds switching to this playlist will rebuffer. A |
|
* negative value means that switching will cause zero rebuffering. |
|
*/ |
|
|
|
|
|
var minRebufferMaxBandwidthSelector = function minRebufferMaxBandwidthSelector(settings) { |
|
var master = settings.master, |
|
currentTime = settings.currentTime, |
|
bandwidth = settings.bandwidth, |
|
duration$$1 = settings.duration, |
|
segmentDuration = settings.segmentDuration, |
|
timeUntilRebuffer = settings.timeUntilRebuffer, |
|
currentTimeline = settings.currentTimeline, |
|
syncController = settings.syncController; // filter out any playlists that have been excluded due to |
|
// incompatible configurations |
|
|
|
var compatiblePlaylists = master.playlists.filter(function (playlist) { |
|
return !Playlist.isIncompatible(playlist); |
|
}); // filter out any playlists that have been disabled manually through the representations |
|
// api or blacklisted temporarily due to playback errors. |
|
|
|
var enabledPlaylists = compatiblePlaylists.filter(Playlist.isEnabled); |
|
|
|
if (!enabledPlaylists.length) { |
|
// if there are no enabled playlists, then they have all been blacklisted or disabled |
|
// by the user through the representations api. In this case, ignore blacklisting and |
|
// fallback to what the user wants by using playlists the user has not disabled. |
|
enabledPlaylists = compatiblePlaylists.filter(function (playlist) { |
|
return !Playlist.isDisabled(playlist); |
|
}); |
|
} |
|
|
|
var bandwidthPlaylists = enabledPlaylists.filter(Playlist.hasAttribute.bind(null, 'BANDWIDTH')); |
|
var rebufferingEstimates = bandwidthPlaylists.map(function (playlist) { |
|
var syncPoint = syncController.getSyncPoint(playlist, duration$$1, currentTimeline, currentTime); // If there is no sync point for this playlist, switching to it will require a |
|
// sync request first. This will double the request time |
|
|
|
var numRequests = syncPoint ? 1 : 2; |
|
var requestTimeEstimate = Playlist.estimateSegmentRequestTime(segmentDuration, bandwidth, playlist); |
|
var rebufferingImpact = requestTimeEstimate * numRequests - timeUntilRebuffer; |
|
return { |
|
playlist: playlist, |
|
rebufferingImpact: rebufferingImpact |
|
}; |
|
}); |
|
var noRebufferingPlaylists = rebufferingEstimates.filter(function (estimate) { |
|
return estimate.rebufferingImpact <= 0; |
|
}); // Sort by bandwidth DESC |
|
|
|
stableSort(noRebufferingPlaylists, function (a, b) { |
|
return comparePlaylistBandwidth(b.playlist, a.playlist); |
|
}); |
|
|
|
if (noRebufferingPlaylists.length) { |
|
return noRebufferingPlaylists[0]; |
|
} |
|
|
|
stableSort(rebufferingEstimates, function (a, b) { |
|
return a.rebufferingImpact - b.rebufferingImpact; |
|
}); |
|
return rebufferingEstimates[0] || null; |
|
}; |
|
/** |
|
* Chooses the appropriate media playlist, which in this case is the lowest bitrate |
|
* one with video. If no renditions with video exist, return the lowest audio rendition. |
|
* |
|
* Expects to be called within the context of an instance of HlsHandler |
|
* |
|
* @return {Object|null} |
|
* {Object} return.playlist |
|
* The lowest bitrate playlist that contains a video codec. If no such rendition |
|
* exists pick the lowest audio rendition. |
|
*/ |
|
|
|
|
|
var lowestBitrateCompatibleVariantSelector = function lowestBitrateCompatibleVariantSelector() { |
|
// filter out any playlists that have been excluded due to |
|
// incompatible configurations or playback errors |
|
var playlists = this.playlists.master.playlists.filter(Playlist.isEnabled); // Sort ascending by bitrate |
|
|
|
stableSort(playlists, function (a, b) { |
|
return comparePlaylistBandwidth(a, b); |
|
}); // Parse and assume that playlists with no video codec have no video |
|
// (this is not necessarily true, although it is generally true). |
|
// |
|
// If an entire manifest has no valid videos everything will get filtered |
|
// out. |
|
|
|
var playlistsWithVideo = playlists.filter(function (playlist) { |
|
return parseCodecs(playlist.attributes.CODECS).videoCodec; |
|
}); |
|
return playlistsWithVideo[0] || null; |
|
}; |
|
/** |
|
* Create captions text tracks on video.js if they do not exist |
|
* |
|
* @param {Object} inbandTextTracks a reference to current inbandTextTracks |
|
* @param {Object} tech the video.js tech |
|
* @param {Object} captionStreams the caption streams to create |
|
* @private |
|
*/ |
|
|
|
|
|
var createCaptionsTrackIfNotExists = function createCaptionsTrackIfNotExists(inbandTextTracks, tech, captionStreams) { |
|
for (var trackId in captionStreams) { |
|
if (!inbandTextTracks[trackId]) { |
|
tech.trigger({ |
|
type: 'usage', |
|
name: 'hls-608' |
|
}); |
|
var track = tech.textTracks().getTrackById(trackId); |
|
|
|
if (track) { |
|
// Resuse an existing track with a CC# id because this was |
|
// very likely created by videojs-contrib-hls from information |
|
// in the m3u8 for us to use |
|
inbandTextTracks[trackId] = track; |
|
} else { |
|
// Otherwise, create a track with the default `CC#` label and |
|
// without a language |
|
inbandTextTracks[trackId] = tech.addRemoteTextTrack({ |
|
kind: 'captions', |
|
id: trackId, |
|
label: trackId |
|
}, false).track; |
|
} |
|
} |
|
} |
|
}; |
|
|
|
var addCaptionData = function addCaptionData(_ref) { |
|
var inbandTextTracks = _ref.inbandTextTracks, |
|
captionArray = _ref.captionArray, |
|
timestampOffset = _ref.timestampOffset; |
|
|
|
if (!captionArray) { |
|
return; |
|
} |
|
|
|
var Cue = window.WebKitDataCue || window.VTTCue; |
|
captionArray.forEach(function (caption) { |
|
var track = caption.stream; |
|
var startTime = caption.startTime; |
|
var endTime = caption.endTime; |
|
|
|
if (!inbandTextTracks[track]) { |
|
return; |
|
} |
|
|
|
startTime += timestampOffset; |
|
endTime += timestampOffset; |
|
inbandTextTracks[track].addCue(new Cue(startTime, endTime, caption.text)); |
|
}); |
|
}; |
|
/** |
|
* @file segment-loader.js |
|
*/ |
|
// in ms |
|
|
|
|
|
var CHECK_BUFFER_DELAY = 500; |
|
/** |
|
* Determines if we should call endOfStream on the media source based |
|
* on the state of the buffer or if appened segment was the final |
|
* segment in the playlist. |
|
* |
|
* @param {Object} playlist a media playlist object |
|
* @param {Object} mediaSource the MediaSource object |
|
* @param {Number} segmentIndex the index of segment we last appended |
|
* @returns {Boolean} do we need to call endOfStream on the MediaSource |
|
*/ |
|
|
|
var detectEndOfStream = function detectEndOfStream(playlist, mediaSource, segmentIndex) { |
|
if (!playlist || !mediaSource) { |
|
return false; |
|
} |
|
|
|
var segments = playlist.segments; // determine a few boolean values to help make the branch below easier |
|
// to read |
|
|
|
var appendedLastSegment = segmentIndex === segments.length; // if we've buffered to the end of the video, we need to call endOfStream |
|
// so that MediaSources can trigger the `ended` event when it runs out of |
|
// buffered data instead of waiting for me |
|
|
|
return playlist.endList && mediaSource.readyState === 'open' && appendedLastSegment; |
|
}; |
|
|
|
var finite = function finite(num) { |
|
return typeof num === 'number' && isFinite(num); |
|
}; |
|
|
|
var illegalMediaSwitch = function illegalMediaSwitch(loaderType, startingMedia, newSegmentMedia) { |
|
// Although these checks should most likely cover non 'main' types, for now it narrows |
|
// the scope of our checks. |
|
if (loaderType !== 'main' || !startingMedia || !newSegmentMedia) { |
|
return null; |
|
} |
|
|
|
if (!newSegmentMedia.containsAudio && !newSegmentMedia.containsVideo) { |
|
return 'Neither audio nor video found in segment.'; |
|
} |
|
|
|
if (startingMedia.containsVideo && !newSegmentMedia.containsVideo) { |
|
return 'Only audio found in segment when we expected video.' + ' We can\'t switch to audio only from a stream that had video.' + ' To get rid of this message, please add codec information to the manifest.'; |
|
} |
|
|
|
if (!startingMedia.containsVideo && newSegmentMedia.containsVideo) { |
|
return 'Video found in segment when we expected only audio.' + ' We can\'t switch to a stream with video from an audio only stream.' + ' To get rid of this message, please add codec information to the manifest.'; |
|
} |
|
|
|
return null; |
|
}; |
|
/** |
|
* Calculates a time value that is safe to remove from the back buffer without interupting |
|
* playback. |
|
* |
|
* @param {TimeRange} seekable |
|
* The current seekable range |
|
* @param {Number} currentTime |
|
* The current time of the player |
|
* @param {Number} targetDuration |
|
* The target duration of the current playlist |
|
* @return {Number} |
|
* Time that is safe to remove from the back buffer without interupting playback |
|
*/ |
|
|
|
|
|
var safeBackBufferTrimTime = function safeBackBufferTrimTime(seekable$$1, currentTime, targetDuration) { |
|
var removeToTime = void 0; |
|
|
|
if (seekable$$1.length && seekable$$1.start(0) > 0 && seekable$$1.start(0) < currentTime) { |
|
// If we have a seekable range use that as the limit for what can be removed safely |
|
removeToTime = seekable$$1.start(0); |
|
} else { |
|
// otherwise remove anything older than 30 seconds before the current play head |
|
removeToTime = currentTime - 30; |
|
} // Don't allow removing from the buffer within target duration of current time |
|
// to avoid the possibility of removing the GOP currently being played which could |
|
// cause playback stalls. |
|
|
|
|
|
return Math.min(removeToTime, currentTime - targetDuration); |
|
}; |
|
|
|
var segmentInfoString = function segmentInfoString(segmentInfo) { |
|
var _segmentInfo$segment = segmentInfo.segment, |
|
start = _segmentInfo$segment.start, |
|
end = _segmentInfo$segment.end, |
|
_segmentInfo$playlist = segmentInfo.playlist, |
|
seq = _segmentInfo$playlist.mediaSequence, |
|
id = _segmentInfo$playlist.id, |
|
_segmentInfo$playlist2 = _segmentInfo$playlist.segments, |
|
segments = _segmentInfo$playlist2 === undefined ? [] : _segmentInfo$playlist2, |
|
index = segmentInfo.mediaIndex, |
|
timeline = segmentInfo.timeline; |
|
return ['appending [' + index + '] of [' + seq + ', ' + (seq + segments.length) + '] from playlist [' + id + ']', '[' + start + ' => ' + end + '] in timeline [' + timeline + ']'].join(' '); |
|
}; |
|
/** |
|
* An object that manages segment loading and appending. |
|
* |
|
* @class SegmentLoader |
|
* @param {Object} options required and optional options |
|
* @extends videojs.EventTarget |
|
*/ |
|
|
|
|
|
var SegmentLoader = function (_videojs$EventTarget) { |
|
inherits$1(SegmentLoader, _videojs$EventTarget); |
|
|
|
function SegmentLoader(settings) { |
|
classCallCheck$1(this, SegmentLoader); // check pre-conditions |
|
|
|
var _this = possibleConstructorReturn$1(this, (SegmentLoader.__proto__ || Object.getPrototypeOf(SegmentLoader)).call(this)); |
|
|
|
if (!settings) { |
|
throw new TypeError('Initialization settings are required'); |
|
} |
|
|
|
if (typeof settings.currentTime !== 'function') { |
|
throw new TypeError('No currentTime getter specified'); |
|
} |
|
|
|
if (!settings.mediaSource) { |
|
throw new TypeError('No MediaSource specified'); |
|
} // public properties |
|
|
|
|
|
_this.bandwidth = settings.bandwidth; |
|
_this.throughput = { |
|
rate: 0, |
|
count: 0 |
|
}; |
|
_this.roundTrip = NaN; |
|
|
|
_this.resetStats_(); |
|
|
|
_this.mediaIndex = null; // private settings |
|
|
|
_this.hasPlayed_ = settings.hasPlayed; |
|
_this.currentTime_ = settings.currentTime; |
|
_this.seekable_ = settings.seekable; |
|
_this.seeking_ = settings.seeking; |
|
_this.duration_ = settings.duration; |
|
_this.mediaSource_ = settings.mediaSource; |
|
_this.hls_ = settings.hls; |
|
_this.loaderType_ = settings.loaderType; |
|
_this.startingMedia_ = void 0; |
|
_this.segmentMetadataTrack_ = settings.segmentMetadataTrack; |
|
_this.goalBufferLength_ = settings.goalBufferLength; |
|
_this.sourceType_ = settings.sourceType; |
|
_this.inbandTextTracks_ = settings.inbandTextTracks; |
|
_this.state_ = 'INIT'; // private instance variables |
|
|
|
_this.checkBufferTimeout_ = null; |
|
_this.error_ = void 0; |
|
_this.currentTimeline_ = -1; |
|
_this.pendingSegment_ = null; |
|
_this.mimeType_ = null; |
|
_this.sourceUpdater_ = null; |
|
_this.xhrOptions_ = null; // Fragmented mp4 playback |
|
|
|
_this.activeInitSegmentId_ = null; |
|
_this.initSegments_ = {}; // Fmp4 CaptionParser |
|
|
|
_this.captionParser_ = new mp4_6(); |
|
_this.decrypter_ = settings.decrypter; // Manages the tracking and generation of sync-points, mappings |
|
// between a time in the display time and a segment index within |
|
// a playlist |
|
|
|
_this.syncController_ = settings.syncController; |
|
_this.syncPoint_ = { |
|
segmentIndex: 0, |
|
time: 0 |
|
}; |
|
|
|
_this.syncController_.on('syncinfoupdate', function () { |
|
return _this.trigger('syncinfoupdate'); |
|
}); |
|
|
|
_this.mediaSource_.addEventListener('sourceopen', function () { |
|
return _this.ended_ = false; |
|
}); // ...for determining the fetch location |
|
|
|
|
|
_this.fetchAtBuffer_ = false; |
|
_this.logger_ = logger('SegmentLoader[' + _this.loaderType_ + ']'); |
|
Object.defineProperty(_this, 'state', { |
|
get: function get$$1() { |
|
return this.state_; |
|
}, |
|
set: function set$$1(newState) { |
|
if (newState !== this.state_) { |
|
this.logger_(this.state_ + ' -> ' + newState); |
|
this.state_ = newState; |
|
} |
|
} |
|
}); |
|
return _this; |
|
} |
|
/** |
|
* reset all of our media stats |
|
* |
|
* @private |
|
*/ |
|
|
|
|
|
createClass$1(SegmentLoader, [{ |
|
key: 'resetStats_', |
|
value: function resetStats_() { |
|
this.mediaBytesTransferred = 0; |
|
this.mediaRequests = 0; |
|
this.mediaRequestsAborted = 0; |
|
this.mediaRequestsTimedout = 0; |
|
this.mediaRequestsErrored = 0; |
|
this.mediaTransferDuration = 0; |
|
this.mediaSecondsLoaded = 0; |
|
} |
|
/** |
|
* dispose of the SegmentLoader and reset to the default state |
|
*/ |
|
|
|
}, { |
|
key: 'dispose', |
|
value: function dispose() { |
|
this.state = 'DISPOSED'; |
|
this.pause(); |
|
this.abort_(); |
|
|
|
if (this.sourceUpdater_) { |
|
this.sourceUpdater_.dispose(); |
|
} |
|
|
|
this.resetStats_(); |
|
this.captionParser_.reset(); |
|
} |
|
/** |
|
* abort anything that is currently doing on with the SegmentLoader |
|
* and reset to a default state |
|
*/ |
|
|
|
}, { |
|
key: 'abort', |
|
value: function abort() { |
|
if (this.state !== 'WAITING') { |
|
if (this.pendingSegment_) { |
|
this.pendingSegment_ = null; |
|
} |
|
|
|
return; |
|
} |
|
|
|
this.abort_(); // We aborted the requests we were waiting on, so reset the loader's state to READY |
|
// since we are no longer "waiting" on any requests. XHR callback is not always run |
|
// when the request is aborted. This will prevent the loader from being stuck in the |
|
// WAITING state indefinitely. |
|
|
|
this.state = 'READY'; // don't wait for buffer check timeouts to begin fetching the |
|
// next segment |
|
|
|
if (!this.paused()) { |
|
this.monitorBuffer_(); |
|
} |
|
} |
|
/** |
|
* abort all pending xhr requests and null any pending segements |
|
* |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'abort_', |
|
value: function abort_() { |
|
if (this.pendingSegment_) { |
|
this.pendingSegment_.abortRequests(); |
|
} // clear out the segment being processed |
|
|
|
|
|
this.pendingSegment_ = null; |
|
} |
|
/** |
|
* set an error on the segment loader and null out any pending segements |
|
* |
|
* @param {Error} error the error to set on the SegmentLoader |
|
* @return {Error} the error that was set or that is currently set |
|
*/ |
|
|
|
}, { |
|
key: 'error', |
|
value: function error(_error) { |
|
if (typeof _error !== 'undefined') { |
|
this.error_ = _error; |
|
} |
|
|
|
this.pendingSegment_ = null; |
|
return this.error_; |
|
} |
|
}, { |
|
key: 'endOfStream', |
|
value: function endOfStream() { |
|
this.ended_ = true; |
|
this.pause(); |
|
this.trigger('ended'); |
|
} |
|
/** |
|
* Indicates which time ranges are buffered |
|
* |
|
* @return {TimeRange} |
|
* TimeRange object representing the current buffered ranges |
|
*/ |
|
|
|
}, { |
|
key: 'buffered_', |
|
value: function buffered_() { |
|
if (!this.sourceUpdater_) { |
|
return videojs$1.createTimeRanges(); |
|
} |
|
|
|
return this.sourceUpdater_.buffered(); |
|
} |
|
/** |
|
* Gets and sets init segment for the provided map |
|
* |
|
* @param {Object} map |
|
* The map object representing the init segment to get or set |
|
* @param {Boolean=} set |
|
* If true, the init segment for the provided map should be saved |
|
* @return {Object} |
|
* map object for desired init segment |
|
*/ |
|
|
|
}, { |
|
key: 'initSegment', |
|
value: function initSegment(map) { |
|
var set$$1 = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : false; |
|
|
|
if (!map) { |
|
return null; |
|
} |
|
|
|
var id = initSegmentId(map); |
|
var storedMap = this.initSegments_[id]; |
|
|
|
if (set$$1 && !storedMap && map.bytes) { |
|
this.initSegments_[id] = storedMap = { |
|
resolvedUri: map.resolvedUri, |
|
byterange: map.byterange, |
|
bytes: map.bytes, |
|
timescales: map.timescales, |
|
videoTrackIds: map.videoTrackIds |
|
}; |
|
} |
|
|
|
return storedMap || map; |
|
} |
|
/** |
|
* Returns true if all configuration required for loading is present, otherwise false. |
|
* |
|
* @return {Boolean} True if the all configuration is ready for loading |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'couldBeginLoading_', |
|
value: function couldBeginLoading_() { |
|
return this.playlist_ && ( // the source updater is created when init_ is called, so either having a |
|
// source updater or being in the INIT state with a mimeType is enough |
|
// to say we have all the needed configuration to start loading. |
|
this.sourceUpdater_ || this.mimeType_ && this.state === 'INIT') && !this.paused(); |
|
} |
|
/** |
|
* load a playlist and start to fill the buffer |
|
*/ |
|
|
|
}, { |
|
key: 'load', |
|
value: function load() { |
|
// un-pause |
|
this.monitorBuffer_(); // if we don't have a playlist yet, keep waiting for one to be |
|
// specified |
|
|
|
if (!this.playlist_) { |
|
return; |
|
} // not sure if this is the best place for this |
|
|
|
|
|
this.syncController_.setDateTimeMapping(this.playlist_); // if all the configuration is ready, initialize and begin loading |
|
|
|
if (this.state === 'INIT' && this.couldBeginLoading_()) { |
|
return this.init_(); |
|
} // if we're in the middle of processing a segment already, don't |
|
// kick off an additional segment request |
|
|
|
|
|
if (!this.couldBeginLoading_() || this.state !== 'READY' && this.state !== 'INIT') { |
|
return; |
|
} |
|
|
|
this.state = 'READY'; |
|
} |
|
/** |
|
* Once all the starting parameters have been specified, begin |
|
* operation. This method should only be invoked from the INIT |
|
* state. |
|
* |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'init_', |
|
value: function init_() { |
|
this.state = 'READY'; |
|
this.sourceUpdater_ = new SourceUpdater(this.mediaSource_, this.mimeType_, this.loaderType_, this.sourceBufferEmitter_); |
|
this.resetEverything(); |
|
return this.monitorBuffer_(); |
|
} |
|
/** |
|
* set a playlist on the segment loader |
|
* |
|
* @param {PlaylistLoader} media the playlist to set on the segment loader |
|
*/ |
|
|
|
}, { |
|
key: 'playlist', |
|
value: function playlist(newPlaylist) { |
|
var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; |
|
|
|
if (!newPlaylist) { |
|
return; |
|
} |
|
|
|
var oldPlaylist = this.playlist_; |
|
var segmentInfo = this.pendingSegment_; |
|
this.playlist_ = newPlaylist; |
|
this.xhrOptions_ = options; // when we haven't started playing yet, the start of a live playlist |
|
// is always our zero-time so force a sync update each time the playlist |
|
// is refreshed from the server |
|
|
|
if (!this.hasPlayed_()) { |
|
newPlaylist.syncInfo = { |
|
mediaSequence: newPlaylist.mediaSequence, |
|
time: 0 |
|
}; |
|
} |
|
|
|
var oldId = null; |
|
|
|
if (oldPlaylist) { |
|
if (oldPlaylist.id) { |
|
oldId = oldPlaylist.id; |
|
} else if (oldPlaylist.uri) { |
|
oldId = oldPlaylist.uri; |
|
} |
|
} |
|
|
|
this.logger_('playlist update [' + oldId + ' => ' + (newPlaylist.id || newPlaylist.uri) + ']'); // in VOD, this is always a rendition switch (or we updated our syncInfo above) |
|
// in LIVE, we always want to update with new playlists (including refreshes) |
|
|
|
this.trigger('syncinfoupdate'); // if we were unpaused but waiting for a playlist, start |
|
// buffering now |
|
|
|
if (this.state === 'INIT' && this.couldBeginLoading_()) { |
|
return this.init_(); |
|
} |
|
|
|
if (!oldPlaylist || oldPlaylist.uri !== newPlaylist.uri) { |
|
if (this.mediaIndex !== null) { |
|
// we must "resync" the segment loader when we switch renditions and |
|
// the segment loader is already synced to the previous rendition |
|
this.resyncLoader(); |
|
} // the rest of this function depends on `oldPlaylist` being defined |
|
|
|
|
|
return; |
|
} // we reloaded the same playlist so we are in a live scenario |
|
// and we will likely need to adjust the mediaIndex |
|
|
|
|
|
var mediaSequenceDiff = newPlaylist.mediaSequence - oldPlaylist.mediaSequence; |
|
this.logger_('live window shift [' + mediaSequenceDiff + ']'); // update the mediaIndex on the SegmentLoader |
|
// this is important because we can abort a request and this value must be |
|
// equal to the last appended mediaIndex |
|
|
|
if (this.mediaIndex !== null) { |
|
this.mediaIndex -= mediaSequenceDiff; |
|
} // update the mediaIndex on the SegmentInfo object |
|
// this is important because we will update this.mediaIndex with this value |
|
// in `handleUpdateEnd_` after the segment has been successfully appended |
|
|
|
|
|
if (segmentInfo) { |
|
segmentInfo.mediaIndex -= mediaSequenceDiff; // we need to update the referenced segment so that timing information is |
|
// saved for the new playlist's segment, however, if the segment fell off the |
|
// playlist, we can leave the old reference and just lose the timing info |
|
|
|
if (segmentInfo.mediaIndex >= 0) { |
|
segmentInfo.segment = newPlaylist.segments[segmentInfo.mediaIndex]; |
|
} |
|
} |
|
|
|
this.syncController_.saveExpiredSegmentInfo(oldPlaylist, newPlaylist); |
|
} |
|
/** |
|
* Prevent the loader from fetching additional segments. If there |
|
* is a segment request outstanding, it will finish processing |
|
* before the loader halts. A segment loader can be unpaused by |
|
* calling load(). |
|
*/ |
|
|
|
}, { |
|
key: 'pause', |
|
value: function pause() { |
|
if (this.checkBufferTimeout_) { |
|
window$1.clearTimeout(this.checkBufferTimeout_); |
|
this.checkBufferTimeout_ = null; |
|
} |
|
} |
|
/** |
|
* Returns whether the segment loader is fetching additional |
|
* segments when given the opportunity. This property can be |
|
* modified through calls to pause() and load(). |
|
*/ |
|
|
|
}, { |
|
key: 'paused', |
|
value: function paused() { |
|
return this.checkBufferTimeout_ === null; |
|
} |
|
/** |
|
* create/set the following mimetype on the SourceBuffer through a |
|
* SourceUpdater |
|
* |
|
* @param {String} mimeType the mime type string to use |
|
* @param {Object} sourceBufferEmitter an event emitter that fires when a source buffer |
|
* is added to the media source |
|
*/ |
|
|
|
}, { |
|
key: 'mimeType', |
|
value: function mimeType(_mimeType, sourceBufferEmitter) { |
|
if (this.mimeType_) { |
|
return; |
|
} |
|
|
|
this.mimeType_ = _mimeType; |
|
this.sourceBufferEmitter_ = sourceBufferEmitter; // if we were unpaused but waiting for a sourceUpdater, start |
|
// buffering now |
|
|
|
if (this.state === 'INIT' && this.couldBeginLoading_()) { |
|
this.init_(); |
|
} |
|
} |
|
/** |
|
* Delete all the buffered data and reset the SegmentLoader |
|
* @param {Function} [done] an optional callback to be executed when the remove |
|
* operation is complete |
|
*/ |
|
|
|
}, { |
|
key: 'resetEverything', |
|
value: function resetEverything(done) { |
|
this.ended_ = false; |
|
this.resetLoader(); |
|
this.remove(0, this.duration_(), done); // clears fmp4 captions |
|
|
|
this.captionParser_.clearAllCaptions(); |
|
this.trigger('reseteverything'); |
|
} |
|
/** |
|
* Force the SegmentLoader to resync and start loading around the currentTime instead |
|
* of starting at the end of the buffer |
|
* |
|
* Useful for fast quality changes |
|
*/ |
|
|
|
}, { |
|
key: 'resetLoader', |
|
value: function resetLoader() { |
|
this.fetchAtBuffer_ = false; |
|
this.resyncLoader(); |
|
} |
|
/** |
|
* Force the SegmentLoader to restart synchronization and make a conservative guess |
|
* before returning to the simple walk-forward method |
|
*/ |
|
|
|
}, { |
|
key: 'resyncLoader', |
|
value: function resyncLoader() { |
|
this.mediaIndex = null; |
|
this.syncPoint_ = null; |
|
this.abort(); |
|
} |
|
/** |
|
* Remove any data in the source buffer between start and end times |
|
* @param {Number} start - the start time of the region to remove from the buffer |
|
* @param {Number} end - the end time of the region to remove from the buffer |
|
* @param {Function} [done] - an optional callback to be executed when the remove |
|
* operation is complete |
|
*/ |
|
|
|
}, { |
|
key: 'remove', |
|
value: function remove(start, end, done) { |
|
if (this.sourceUpdater_) { |
|
this.sourceUpdater_.remove(start, end, done); |
|
} |
|
|
|
removeCuesFromTrack(start, end, this.segmentMetadataTrack_); |
|
|
|
if (this.inbandTextTracks_) { |
|
for (var id in this.inbandTextTracks_) { |
|
removeCuesFromTrack(start, end, this.inbandTextTracks_[id]); |
|
} |
|
} |
|
} |
|
/** |
|
* (re-)schedule monitorBufferTick_ to run as soon as possible |
|
* |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'monitorBuffer_', |
|
value: function monitorBuffer_() { |
|
if (this.checkBufferTimeout_) { |
|
window$1.clearTimeout(this.checkBufferTimeout_); |
|
} |
|
|
|
this.checkBufferTimeout_ = window$1.setTimeout(this.monitorBufferTick_.bind(this), 1); |
|
} |
|
/** |
|
* As long as the SegmentLoader is in the READY state, periodically |
|
* invoke fillBuffer_(). |
|
* |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'monitorBufferTick_', |
|
value: function monitorBufferTick_() { |
|
if (this.state === 'READY') { |
|
this.fillBuffer_(); |
|
} |
|
|
|
if (this.checkBufferTimeout_) { |
|
window$1.clearTimeout(this.checkBufferTimeout_); |
|
} |
|
|
|
this.checkBufferTimeout_ = window$1.setTimeout(this.monitorBufferTick_.bind(this), CHECK_BUFFER_DELAY); |
|
} |
|
/** |
|
* fill the buffer with segements unless the sourceBuffers are |
|
* currently updating |
|
* |
|
* Note: this function should only ever be called by monitorBuffer_ |
|
* and never directly |
|
* |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'fillBuffer_', |
|
value: function fillBuffer_() { |
|
if (this.sourceUpdater_.updating()) { |
|
return; |
|
} |
|
|
|
if (!this.syncPoint_) { |
|
this.syncPoint_ = this.syncController_.getSyncPoint(this.playlist_, this.duration_(), this.currentTimeline_, this.currentTime_()); |
|
} // see if we need to begin loading immediately |
|
|
|
|
|
var segmentInfo = this.checkBuffer_(this.buffered_(), this.playlist_, this.mediaIndex, this.hasPlayed_(), this.currentTime_(), this.syncPoint_); |
|
|
|
if (!segmentInfo) { |
|
return; |
|
} |
|
|
|
if (this.isEndOfStream_(segmentInfo.mediaIndex)) { |
|
this.endOfStream(); |
|
return; |
|
} |
|
|
|
if (segmentInfo.mediaIndex === this.playlist_.segments.length - 1 && this.mediaSource_.readyState === 'ended' && !this.seeking_()) { |
|
return; |
|
} // We will need to change timestampOffset of the sourceBuffer if either of |
|
// the following conditions are true: |
|
// - The segment.timeline !== this.currentTimeline |
|
// (we are crossing a discontinuity somehow) |
|
// - The "timestampOffset" for the start of this segment is less than |
|
// the currently set timestampOffset |
|
// Also, clear captions if we are crossing a discontinuity boundary |
|
|
|
|
|
if (segmentInfo.timeline !== this.currentTimeline_ || segmentInfo.startOfSegment !== null && segmentInfo.startOfSegment < this.sourceUpdater_.timestampOffset()) { |
|
this.syncController_.reset(); |
|
segmentInfo.timestampOffset = segmentInfo.startOfSegment; |
|
this.captionParser_.clearAllCaptions(); |
|
} |
|
|
|
this.loadSegment_(segmentInfo); |
|
} |
|
/** |
|
* Determines if this segment loader is at the end of it's stream. |
|
* |
|
* @param {Number} mediaIndex the index of segment we last appended |
|
* @param {Object} [playlist=this.playlist_] a media playlist object |
|
* @returns {Boolean} true if at end of stream, false otherwise. |
|
*/ |
|
|
|
}, { |
|
key: 'isEndOfStream_', |
|
value: function isEndOfStream_(mediaIndex) { |
|
var playlist = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : this.playlist_; |
|
return detectEndOfStream(playlist, this.mediaSource_, mediaIndex) && !this.sourceUpdater_.updating(); |
|
} |
|
/** |
|
* Determines what segment request should be made, given current playback |
|
* state. |
|
* |
|
* @param {TimeRanges} buffered - the state of the buffer |
|
* @param {Object} playlist - the playlist object to fetch segments from |
|
* @param {Number} mediaIndex - the previous mediaIndex fetched or null |
|
* @param {Boolean} hasPlayed - a flag indicating whether we have played or not |
|
* @param {Number} currentTime - the playback position in seconds |
|
* @param {Object} syncPoint - a segment info object that describes the |
|
* @returns {Object} a segment request object that describes the segment to load |
|
*/ |
|
|
|
}, { |
|
key: 'checkBuffer_', |
|
value: function checkBuffer_(buffered, playlist, mediaIndex, hasPlayed, currentTime, syncPoint) { |
|
var lastBufferedEnd = 0; |
|
var startOfSegment = void 0; |
|
|
|
if (buffered.length) { |
|
lastBufferedEnd = buffered.end(buffered.length - 1); |
|
} |
|
|
|
var bufferedTime = Math.max(0, lastBufferedEnd - currentTime); |
|
|
|
if (!playlist.segments.length) { |
|
return null; |
|
} // if there is plenty of content buffered, and the video has |
|
// been played before relax for awhile |
|
|
|
|
|
if (bufferedTime >= this.goalBufferLength_()) { |
|
return null; |
|
} // if the video has not yet played once, and we already have |
|
// one segment downloaded do nothing |
|
|
|
|
|
if (!hasPlayed && bufferedTime >= 1) { |
|
return null; |
|
} // When the syncPoint is null, there is no way of determining a good |
|
// conservative segment index to fetch from |
|
// The best thing to do here is to get the kind of sync-point data by |
|
// making a request |
|
|
|
|
|
if (syncPoint === null) { |
|
mediaIndex = this.getSyncSegmentCandidate_(playlist); |
|
return this.generateSegmentInfo_(playlist, mediaIndex, null, true); |
|
} // Under normal playback conditions fetching is a simple walk forward |
|
|
|
|
|
if (mediaIndex !== null) { |
|
var segment = playlist.segments[mediaIndex]; |
|
|
|
if (segment && segment.end) { |
|
startOfSegment = segment.end; |
|
} else { |
|
startOfSegment = lastBufferedEnd; |
|
} |
|
|
|
return this.generateSegmentInfo_(playlist, mediaIndex + 1, startOfSegment, false); |
|
} // There is a sync-point but the lack of a mediaIndex indicates that |
|
// we need to make a good conservative guess about which segment to |
|
// fetch |
|
|
|
|
|
if (this.fetchAtBuffer_) { |
|
// Find the segment containing the end of the buffer |
|
var mediaSourceInfo = Playlist.getMediaInfoForTime(playlist, lastBufferedEnd, syncPoint.segmentIndex, syncPoint.time); |
|
mediaIndex = mediaSourceInfo.mediaIndex; |
|
startOfSegment = mediaSourceInfo.startTime; |
|
} else { |
|
// Find the segment containing currentTime |
|
var _mediaSourceInfo = Playlist.getMediaInfoForTime(playlist, currentTime, syncPoint.segmentIndex, syncPoint.time); |
|
|
|
mediaIndex = _mediaSourceInfo.mediaIndex; |
|
startOfSegment = _mediaSourceInfo.startTime; |
|
} |
|
|
|
return this.generateSegmentInfo_(playlist, mediaIndex, startOfSegment, false); |
|
} |
|
/** |
|
* The segment loader has no recourse except to fetch a segment in the |
|
* current playlist and use the internal timestamps in that segment to |
|
* generate a syncPoint. This function returns a good candidate index |
|
* for that process. |
|
* |
|
* @param {Object} playlist - the playlist object to look for a |
|
* @returns {Number} An index of a segment from the playlist to load |
|
*/ |
|
|
|
}, { |
|
key: 'getSyncSegmentCandidate_', |
|
value: function getSyncSegmentCandidate_(playlist) { |
|
var _this2 = this; |
|
|
|
if (this.currentTimeline_ === -1) { |
|
return 0; |
|
} |
|
|
|
var segmentIndexArray = playlist.segments.map(function (s, i) { |
|
return { |
|
timeline: s.timeline, |
|
segmentIndex: i |
|
}; |
|
}).filter(function (s) { |
|
return s.timeline === _this2.currentTimeline_; |
|
}); |
|
|
|
if (segmentIndexArray.length) { |
|
return segmentIndexArray[Math.min(segmentIndexArray.length - 1, 1)].segmentIndex; |
|
} |
|
|
|
return Math.max(playlist.segments.length - 1, 0); |
|
} |
|
}, { |
|
key: 'generateSegmentInfo_', |
|
value: function generateSegmentInfo_(playlist, mediaIndex, startOfSegment, isSyncRequest) { |
|
if (mediaIndex < 0 || mediaIndex >= playlist.segments.length) { |
|
return null; |
|
} |
|
|
|
var segment = playlist.segments[mediaIndex]; |
|
return { |
|
requestId: 'segment-loader-' + Math.random(), |
|
// resolve the segment URL relative to the playlist |
|
uri: segment.resolvedUri, |
|
// the segment's mediaIndex at the time it was requested |
|
mediaIndex: mediaIndex, |
|
// whether or not to update the SegmentLoader's state with this |
|
// segment's mediaIndex |
|
isSyncRequest: isSyncRequest, |
|
startOfSegment: startOfSegment, |
|
// the segment's playlist |
|
playlist: playlist, |
|
// unencrypted bytes of the segment |
|
bytes: null, |
|
// when a key is defined for this segment, the encrypted bytes |
|
encryptedBytes: null, |
|
// The target timestampOffset for this segment when we append it |
|
// to the source buffer |
|
timestampOffset: null, |
|
// The timeline that the segment is in |
|
timeline: segment.timeline, |
|
// The expected duration of the segment in seconds |
|
duration: segment.duration, |
|
// retain the segment in case the playlist updates while doing an async process |
|
segment: segment |
|
}; |
|
} |
|
/** |
|
* Determines if the network has enough bandwidth to complete the current segment |
|
* request in a timely manner. If not, the request will be aborted early and bandwidth |
|
* updated to trigger a playlist switch. |
|
* |
|
* @param {Object} stats |
|
* Object containing stats about the request timing and size |
|
* @return {Boolean} True if the request was aborted, false otherwise |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'abortRequestEarly_', |
|
value: function abortRequestEarly_(stats) { |
|
if (this.hls_.tech_.paused() || // Don't abort if the current playlist is on the lowestEnabledRendition |
|
// TODO: Replace using timeout with a boolean indicating whether this playlist is |
|
// the lowestEnabledRendition. |
|
!this.xhrOptions_.timeout || // Don't abort if we have no bandwidth information to estimate segment sizes |
|
!this.playlist_.attributes.BANDWIDTH) { |
|
return false; |
|
} // Wait at least 1 second since the first byte of data has been received before |
|
// using the calculated bandwidth from the progress event to allow the bitrate |
|
// to stabilize |
|
|
|
|
|
if (Date.now() - (stats.firstBytesReceivedAt || Date.now()) < 1000) { |
|
return false; |
|
} |
|
|
|
var currentTime = this.currentTime_(); |
|
var measuredBandwidth = stats.bandwidth; |
|
var segmentDuration = this.pendingSegment_.duration; |
|
var requestTimeRemaining = Playlist.estimateSegmentRequestTime(segmentDuration, measuredBandwidth, this.playlist_, stats.bytesReceived); // Subtract 1 from the timeUntilRebuffer so we still consider an early abort |
|
// if we are only left with less than 1 second when the request completes. |
|
// A negative timeUntilRebuffering indicates we are already rebuffering |
|
|
|
var timeUntilRebuffer$$1 = timeUntilRebuffer(this.buffered_(), currentTime, this.hls_.tech_.playbackRate()) - 1; // Only consider aborting early if the estimated time to finish the download |
|
// is larger than the estimated time until the player runs out of forward buffer |
|
|
|
if (requestTimeRemaining <= timeUntilRebuffer$$1) { |
|
return false; |
|
} |
|
|
|
var switchCandidate = minRebufferMaxBandwidthSelector({ |
|
master: this.hls_.playlists.master, |
|
currentTime: currentTime, |
|
bandwidth: measuredBandwidth, |
|
duration: this.duration_(), |
|
segmentDuration: segmentDuration, |
|
timeUntilRebuffer: timeUntilRebuffer$$1, |
|
currentTimeline: this.currentTimeline_, |
|
syncController: this.syncController_ |
|
}); |
|
|
|
if (!switchCandidate) { |
|
return; |
|
} |
|
|
|
var rebufferingImpact = requestTimeRemaining - timeUntilRebuffer$$1; |
|
var timeSavedBySwitching = rebufferingImpact - switchCandidate.rebufferingImpact; |
|
var minimumTimeSaving = 0.5; // If we are already rebuffering, increase the amount of variance we add to the |
|
// potential round trip time of the new request so that we are not too aggressive |
|
// with switching to a playlist that might save us a fraction of a second. |
|
|
|
if (timeUntilRebuffer$$1 <= TIME_FUDGE_FACTOR) { |
|
minimumTimeSaving = 1; |
|
} |
|
|
|
if (!switchCandidate.playlist || switchCandidate.playlist.uri === this.playlist_.uri || timeSavedBySwitching < minimumTimeSaving) { |
|
return false; |
|
} // set the bandwidth to that of the desired playlist being sure to scale by |
|
// BANDWIDTH_VARIANCE and add one so the playlist selector does not exclude it |
|
// don't trigger a bandwidthupdate as the bandwidth is artifial |
|
|
|
|
|
this.bandwidth = switchCandidate.playlist.attributes.BANDWIDTH * Config.BANDWIDTH_VARIANCE + 1; |
|
this.abort(); |
|
this.trigger('earlyabort'); |
|
return true; |
|
} |
|
/** |
|
* XHR `progress` event handler |
|
* |
|
* @param {Event} |
|
* The XHR `progress` event |
|
* @param {Object} simpleSegment |
|
* A simplified segment object copy |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'handleProgress_', |
|
value: function handleProgress_(event, simpleSegment) { |
|
if (!this.pendingSegment_ || simpleSegment.requestId !== this.pendingSegment_.requestId || this.abortRequestEarly_(simpleSegment.stats)) { |
|
return; |
|
} |
|
|
|
this.trigger('progress'); |
|
} |
|
/** |
|
* load a specific segment from a request into the buffer |
|
* |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'loadSegment_', |
|
value: function loadSegment_(segmentInfo) { |
|
this.state = 'WAITING'; |
|
this.pendingSegment_ = segmentInfo; |
|
this.trimBackBuffer_(segmentInfo); |
|
segmentInfo.abortRequests = mediaSegmentRequest(this.hls_.xhr, this.xhrOptions_, this.decrypter_, this.captionParser_, this.createSimplifiedSegmentObj_(segmentInfo), // progress callback |
|
this.handleProgress_.bind(this), this.segmentRequestFinished_.bind(this)); |
|
} |
|
/** |
|
* trim the back buffer so that we don't have too much data |
|
* in the source buffer |
|
* |
|
* @private |
|
* |
|
* @param {Object} segmentInfo - the current segment |
|
*/ |
|
|
|
}, { |
|
key: 'trimBackBuffer_', |
|
value: function trimBackBuffer_(segmentInfo) { |
|
var removeToTime = safeBackBufferTrimTime(this.seekable_(), this.currentTime_(), this.playlist_.targetDuration || 10); // Chrome has a hard limit of 150MB of |
|
// buffer and a very conservative "garbage collector" |
|
// We manually clear out the old buffer to ensure |
|
// we don't trigger the QuotaExceeded error |
|
// on the source buffer during subsequent appends |
|
|
|
if (removeToTime > 0) { |
|
this.remove(0, removeToTime); |
|
} |
|
} |
|
/** |
|
* created a simplified copy of the segment object with just the |
|
* information necessary to perform the XHR and decryption |
|
* |
|
* @private |
|
* |
|
* @param {Object} segmentInfo - the current segment |
|
* @returns {Object} a simplified segment object copy |
|
*/ |
|
|
|
}, { |
|
key: 'createSimplifiedSegmentObj_', |
|
value: function createSimplifiedSegmentObj_(segmentInfo) { |
|
var segment = segmentInfo.segment; |
|
var simpleSegment = { |
|
resolvedUri: segment.resolvedUri, |
|
byterange: segment.byterange, |
|
requestId: segmentInfo.requestId |
|
}; |
|
|
|
if (segment.key) { |
|
// if the media sequence is greater than 2^32, the IV will be incorrect |
|
// assuming 10s segments, that would be about 1300 years |
|
var iv = segment.key.iv || new Uint32Array([0, 0, 0, segmentInfo.mediaIndex + segmentInfo.playlist.mediaSequence]); |
|
simpleSegment.key = { |
|
resolvedUri: segment.key.resolvedUri, |
|
iv: iv |
|
}; |
|
} |
|
|
|
if (segment.map) { |
|
simpleSegment.map = this.initSegment(segment.map); |
|
} |
|
|
|
return simpleSegment; |
|
} |
|
/** |
|
* Handle the callback from the segmentRequest function and set the |
|
* associated SegmentLoader state and errors if necessary |
|
* |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'segmentRequestFinished_', |
|
value: function segmentRequestFinished_(error, simpleSegment) { |
|
// every request counts as a media request even if it has been aborted |
|
// or canceled due to a timeout |
|
this.mediaRequests += 1; |
|
|
|
if (simpleSegment.stats) { |
|
this.mediaBytesTransferred += simpleSegment.stats.bytesReceived; |
|
this.mediaTransferDuration += simpleSegment.stats.roundTripTime; |
|
} // The request was aborted and the SegmentLoader has already been reset |
|
|
|
|
|
if (!this.pendingSegment_) { |
|
this.mediaRequestsAborted += 1; |
|
return; |
|
} // the request was aborted and the SegmentLoader has already started |
|
// another request. this can happen when the timeout for an aborted |
|
// request triggers due to a limitation in the XHR library |
|
// do not count this as any sort of request or we risk double-counting |
|
|
|
|
|
if (simpleSegment.requestId !== this.pendingSegment_.requestId) { |
|
return; |
|
} // an error occurred from the active pendingSegment_ so reset everything |
|
|
|
|
|
if (error) { |
|
this.pendingSegment_ = null; |
|
this.state = 'READY'; // the requests were aborted just record the aborted stat and exit |
|
// this is not a true error condition and nothing corrective needs |
|
// to be done |
|
|
|
if (error.code === REQUEST_ERRORS.ABORTED) { |
|
this.mediaRequestsAborted += 1; |
|
return; |
|
} |
|
|
|
this.pause(); // the error is really just that at least one of the requests timed-out |
|
// set the bandwidth to a very low value and trigger an ABR switch to |
|
// take emergency action |
|
|
|
if (error.code === REQUEST_ERRORS.TIMEOUT) { |
|
this.mediaRequestsTimedout += 1; |
|
this.bandwidth = 1; |
|
this.roundTrip = NaN; |
|
this.trigger('bandwidthupdate'); |
|
return; |
|
} // if control-flow has arrived here, then the error is real |
|
// emit an error event to blacklist the current playlist |
|
|
|
|
|
this.mediaRequestsErrored += 1; |
|
this.error(error); |
|
this.trigger('error'); |
|
return; |
|
} // the response was a success so set any bandwidth stats the request |
|
// generated for ABR purposes |
|
|
|
|
|
this.bandwidth = simpleSegment.stats.bandwidth; |
|
this.roundTrip = simpleSegment.stats.roundTripTime; // if this request included an initialization segment, save that data |
|
// to the initSegment cache |
|
|
|
if (simpleSegment.map) { |
|
simpleSegment.map = this.initSegment(simpleSegment.map, true); |
|
} |
|
|
|
this.processSegmentResponse_(simpleSegment); |
|
} |
|
/** |
|
* Move any important data from the simplified segment object |
|
* back to the real segment object for future phases |
|
* |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'processSegmentResponse_', |
|
value: function processSegmentResponse_(simpleSegment) { |
|
var segmentInfo = this.pendingSegment_; |
|
segmentInfo.bytes = simpleSegment.bytes; |
|
|
|
if (simpleSegment.map) { |
|
segmentInfo.segment.map.bytes = simpleSegment.map.bytes; |
|
} |
|
|
|
segmentInfo.endOfAllRequests = simpleSegment.endOfAllRequests; // This has fmp4 captions, add them to text tracks |
|
|
|
if (simpleSegment.fmp4Captions) { |
|
createCaptionsTrackIfNotExists(this.inbandTextTracks_, this.hls_.tech_, simpleSegment.captionStreams); |
|
addCaptionData({ |
|
inbandTextTracks: this.inbandTextTracks_, |
|
captionArray: simpleSegment.fmp4Captions, |
|
// fmp4s will not have a timestamp offset |
|
timestampOffset: 0 |
|
}); // Reset stored captions since we added parsed |
|
// captions to a text track at this point |
|
|
|
this.captionParser_.clearParsedCaptions(); |
|
} |
|
|
|
this.handleSegment_(); |
|
} |
|
/** |
|
* append a decrypted segement to the SourceBuffer through a SourceUpdater |
|
* |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'handleSegment_', |
|
value: function handleSegment_() { |
|
var _this3 = this; |
|
|
|
if (!this.pendingSegment_) { |
|
this.state = 'READY'; |
|
return; |
|
} |
|
|
|
var segmentInfo = this.pendingSegment_; |
|
var segment = segmentInfo.segment; |
|
var timingInfo = this.syncController_.probeSegmentInfo(segmentInfo); // When we have our first timing info, determine what media types this loader is |
|
// dealing with. Although we're maintaining extra state, it helps to preserve the |
|
// separation of segment loader from the actual source buffers. |
|
|
|
if (typeof this.startingMedia_ === 'undefined' && timingInfo && ( // Guard against cases where we're not getting timing info at all until we are |
|
// certain that all streams will provide it. |
|
timingInfo.containsAudio || timingInfo.containsVideo)) { |
|
this.startingMedia_ = { |
|
containsAudio: timingInfo.containsAudio, |
|
containsVideo: timingInfo.containsVideo |
|
}; |
|
} |
|
|
|
var illegalMediaSwitchError = illegalMediaSwitch(this.loaderType_, this.startingMedia_, timingInfo); |
|
|
|
if (illegalMediaSwitchError) { |
|
this.error({ |
|
message: illegalMediaSwitchError, |
|
blacklistDuration: Infinity |
|
}); |
|
this.trigger('error'); |
|
return; |
|
} |
|
|
|
if (segmentInfo.isSyncRequest) { |
|
this.trigger('syncinfoupdate'); |
|
this.pendingSegment_ = null; |
|
this.state = 'READY'; |
|
return; |
|
} |
|
|
|
if (segmentInfo.timestampOffset !== null && segmentInfo.timestampOffset !== this.sourceUpdater_.timestampOffset()) { |
|
this.sourceUpdater_.timestampOffset(segmentInfo.timestampOffset); // fired when a timestamp offset is set in HLS (can also identify discontinuities) |
|
|
|
this.trigger('timestampoffset'); |
|
} |
|
|
|
var timelineMapping = this.syncController_.mappingForTimeline(segmentInfo.timeline); |
|
|
|
if (timelineMapping !== null) { |
|
this.trigger({ |
|
type: 'segmenttimemapping', |
|
mapping: timelineMapping |
|
}); |
|
} |
|
|
|
this.state = 'APPENDING'; // if the media initialization segment is changing, append it |
|
// before the content segment |
|
|
|
if (segment.map) { |
|
var initId = initSegmentId(segment.map); |
|
|
|
if (!this.activeInitSegmentId_ || this.activeInitSegmentId_ !== initId) { |
|
var initSegment = this.initSegment(segment.map); |
|
this.sourceUpdater_.appendBuffer({ |
|
bytes: initSegment.bytes |
|
}, function () { |
|
_this3.activeInitSegmentId_ = initId; |
|
}); |
|
} |
|
} |
|
|
|
segmentInfo.byteLength = segmentInfo.bytes.byteLength; |
|
|
|
if (typeof segment.start === 'number' && typeof segment.end === 'number') { |
|
this.mediaSecondsLoaded += segment.end - segment.start; |
|
} else { |
|
this.mediaSecondsLoaded += segment.duration; |
|
} |
|
|
|
this.logger_(segmentInfoString(segmentInfo)); |
|
this.sourceUpdater_.appendBuffer({ |
|
bytes: segmentInfo.bytes, |
|
videoSegmentTimingInfoCallback: this.handleVideoSegmentTimingInfo_.bind(this, segmentInfo.requestId) |
|
}, this.handleUpdateEnd_.bind(this)); |
|
} |
|
}, { |
|
key: 'handleVideoSegmentTimingInfo_', |
|
value: function handleVideoSegmentTimingInfo_(requestId, event) { |
|
if (!this.pendingSegment_ || requestId !== this.pendingSegment_.requestId) { |
|
return; |
|
} |
|
|
|
var segment = this.pendingSegment_.segment; |
|
|
|
if (!segment.videoTimingInfo) { |
|
segment.videoTimingInfo = {}; |
|
} |
|
|
|
segment.videoTimingInfo.transmuxerPrependedSeconds = event.videoSegmentTimingInfo.prependedContentDuration || 0; |
|
segment.videoTimingInfo.transmuxedPresentationStart = event.videoSegmentTimingInfo.start.presentation; |
|
segment.videoTimingInfo.transmuxedPresentationEnd = event.videoSegmentTimingInfo.end.presentation; // mainly used as a reference for debugging |
|
|
|
segment.videoTimingInfo.baseMediaDecodeTime = event.videoSegmentTimingInfo.baseMediaDecodeTime; |
|
} |
|
/** |
|
* callback to run when appendBuffer is finished. detects if we are |
|
* in a good state to do things with the data we got, or if we need |
|
* to wait for more |
|
* |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'handleUpdateEnd_', |
|
value: function handleUpdateEnd_() { |
|
if (!this.pendingSegment_) { |
|
this.state = 'READY'; |
|
|
|
if (!this.paused()) { |
|
this.monitorBuffer_(); |
|
} |
|
|
|
return; |
|
} |
|
|
|
var segmentInfo = this.pendingSegment_; |
|
var segment = segmentInfo.segment; |
|
var isWalkingForward = this.mediaIndex !== null; |
|
this.pendingSegment_ = null; |
|
this.recordThroughput_(segmentInfo); |
|
this.addSegmentMetadataCue_(segmentInfo); |
|
this.state = 'READY'; |
|
this.mediaIndex = segmentInfo.mediaIndex; |
|
this.fetchAtBuffer_ = true; |
|
this.currentTimeline_ = segmentInfo.timeline; // We must update the syncinfo to recalculate the seekable range before |
|
// the following conditional otherwise it may consider this a bad "guess" |
|
// and attempt to resync when the post-update seekable window and live |
|
// point would mean that this was the perfect segment to fetch |
|
|
|
this.trigger('syncinfoupdate'); // If we previously appended a segment that ends more than 3 targetDurations before |
|
// the currentTime_ that means that our conservative guess was too conservative. |
|
// In that case, reset the loader state so that we try to use any information gained |
|
// from the previous request to create a new, more accurate, sync-point. |
|
|
|
if (segment.end && this.currentTime_() - segment.end > segmentInfo.playlist.targetDuration * 3) { |
|
this.resetEverything(); |
|
return; |
|
} // Don't do a rendition switch unless we have enough time to get a sync segment |
|
// and conservatively guess |
|
|
|
|
|
if (isWalkingForward) { |
|
this.trigger('bandwidthupdate'); |
|
} |
|
|
|
this.trigger('progress'); // any time an update finishes and the last segment is in the |
|
// buffer, end the stream. this ensures the "ended" event will |
|
// fire if playback reaches that point. |
|
|
|
if (this.isEndOfStream_(segmentInfo.mediaIndex + 1, segmentInfo.playlist)) { |
|
this.endOfStream(); |
|
} |
|
|
|
if (!this.paused()) { |
|
this.monitorBuffer_(); |
|
} |
|
} |
|
/** |
|
* Records the current throughput of the decrypt, transmux, and append |
|
* portion of the semgment pipeline. `throughput.rate` is a the cumulative |
|
* moving average of the throughput. `throughput.count` is the number of |
|
* data points in the average. |
|
* |
|
* @private |
|
* @param {Object} segmentInfo the object returned by loadSegment |
|
*/ |
|
|
|
}, { |
|
key: 'recordThroughput_', |
|
value: function recordThroughput_(segmentInfo) { |
|
var rate = this.throughput.rate; // Add one to the time to ensure that we don't accidentally attempt to divide |
|
// by zero in the case where the throughput is ridiculously high |
|
|
|
var segmentProcessingTime = Date.now() - segmentInfo.endOfAllRequests + 1; // Multiply by 8000 to convert from bytes/millisecond to bits/second |
|
|
|
var segmentProcessingThroughput = Math.floor(segmentInfo.byteLength / segmentProcessingTime * 8 * 1000); // This is just a cumulative moving average calculation: |
|
// newAvg = oldAvg + (sample - oldAvg) / (sampleCount + 1) |
|
|
|
this.throughput.rate += (segmentProcessingThroughput - rate) / ++this.throughput.count; |
|
} |
|
/** |
|
* Adds a cue to the segment-metadata track with some metadata information about the |
|
* segment |
|
* |
|
* @private |
|
* @param {Object} segmentInfo |
|
* the object returned by loadSegment |
|
* @method addSegmentMetadataCue_ |
|
*/ |
|
|
|
}, { |
|
key: 'addSegmentMetadataCue_', |
|
value: function addSegmentMetadataCue_(segmentInfo) { |
|
if (!this.segmentMetadataTrack_) { |
|
return; |
|
} |
|
|
|
var segment = segmentInfo.segment; |
|
var start = segment.start; |
|
var end = segment.end; // Do not try adding the cue if the start and end times are invalid. |
|
|
|
if (!finite(start) || !finite(end)) { |
|
return; |
|
} |
|
|
|
removeCuesFromTrack(start, end, this.segmentMetadataTrack_); |
|
var Cue = window$1.WebKitDataCue || window$1.VTTCue; |
|
var value = { |
|
custom: segment.custom, |
|
dateTimeObject: segment.dateTimeObject, |
|
dateTimeString: segment.dateTimeString, |
|
bandwidth: segmentInfo.playlist.attributes.BANDWIDTH, |
|
resolution: segmentInfo.playlist.attributes.RESOLUTION, |
|
codecs: segmentInfo.playlist.attributes.CODECS, |
|
byteLength: segmentInfo.byteLength, |
|
uri: segmentInfo.uri, |
|
timeline: segmentInfo.timeline, |
|
playlist: segmentInfo.playlist.uri, |
|
start: start, |
|
end: end |
|
}; |
|
var data = JSON.stringify(value); |
|
var cue = new Cue(start, end, data); // Attach the metadata to the value property of the cue to keep consistency between |
|
// the differences of WebKitDataCue in safari and VTTCue in other browsers |
|
|
|
cue.value = value; |
|
this.segmentMetadataTrack_.addCue(cue); |
|
} |
|
}]); |
|
return SegmentLoader; |
|
}(videojs$1.EventTarget); |
|
|
|
var uint8ToUtf8 = function uint8ToUtf8(uintArray) { |
|
return decodeURIComponent(escape(String.fromCharCode.apply(null, uintArray))); |
|
}; |
|
/** |
|
* @file vtt-segment-loader.js |
|
*/ |
|
|
|
|
|
var VTT_LINE_TERMINATORS = new Uint8Array('\n\n'.split('').map(function (char) { |
|
return char.charCodeAt(0); |
|
})); |
|
/** |
|
* An object that manages segment loading and appending. |
|
* |
|
* @class VTTSegmentLoader |
|
* @param {Object} options required and optional options |
|
* @extends videojs.EventTarget |
|
*/ |
|
|
|
var VTTSegmentLoader = function (_SegmentLoader) { |
|
inherits$1(VTTSegmentLoader, _SegmentLoader); |
|
|
|
function VTTSegmentLoader(settings) { |
|
var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; |
|
classCallCheck$1(this, VTTSegmentLoader); // SegmentLoader requires a MediaSource be specified or it will throw an error; |
|
// however, VTTSegmentLoader has no need of a media source, so delete the reference |
|
|
|
var _this = possibleConstructorReturn$1(this, (VTTSegmentLoader.__proto__ || Object.getPrototypeOf(VTTSegmentLoader)).call(this, settings, options)); |
|
|
|
_this.mediaSource_ = null; |
|
_this.subtitlesTrack_ = null; |
|
return _this; |
|
} |
|
/** |
|
* Indicates which time ranges are buffered |
|
* |
|
* @return {TimeRange} |
|
* TimeRange object representing the current buffered ranges |
|
*/ |
|
|
|
|
|
createClass$1(VTTSegmentLoader, [{ |
|
key: 'buffered_', |
|
value: function buffered_() { |
|
if (!this.subtitlesTrack_ || !this.subtitlesTrack_.cues.length) { |
|
return videojs$1.createTimeRanges(); |
|
} |
|
|
|
var cues = this.subtitlesTrack_.cues; |
|
var start = cues[0].startTime; |
|
var end = cues[cues.length - 1].startTime; |
|
return videojs$1.createTimeRanges([[start, end]]); |
|
} |
|
/** |
|
* Gets and sets init segment for the provided map |
|
* |
|
* @param {Object} map |
|
* The map object representing the init segment to get or set |
|
* @param {Boolean=} set |
|
* If true, the init segment for the provided map should be saved |
|
* @return {Object} |
|
* map object for desired init segment |
|
*/ |
|
|
|
}, { |
|
key: 'initSegment', |
|
value: function initSegment(map) { |
|
var set$$1 = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : false; |
|
|
|
if (!map) { |
|
return null; |
|
} |
|
|
|
var id = initSegmentId(map); |
|
var storedMap = this.initSegments_[id]; |
|
|
|
if (set$$1 && !storedMap && map.bytes) { |
|
// append WebVTT line terminators to the media initialization segment if it exists |
|
// to follow the WebVTT spec (https://w3c.github.io/webvtt/#file-structure) that |
|
// requires two or more WebVTT line terminators between the WebVTT header and the |
|
// rest of the file |
|
var combinedByteLength = VTT_LINE_TERMINATORS.byteLength + map.bytes.byteLength; |
|
var combinedSegment = new Uint8Array(combinedByteLength); |
|
combinedSegment.set(map.bytes); |
|
combinedSegment.set(VTT_LINE_TERMINATORS, map.bytes.byteLength); |
|
this.initSegments_[id] = storedMap = { |
|
resolvedUri: map.resolvedUri, |
|
byterange: map.byterange, |
|
bytes: combinedSegment |
|
}; |
|
} |
|
|
|
return storedMap || map; |
|
} |
|
/** |
|
* Returns true if all configuration required for loading is present, otherwise false. |
|
* |
|
* @return {Boolean} True if the all configuration is ready for loading |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'couldBeginLoading_', |
|
value: function couldBeginLoading_() { |
|
return this.playlist_ && this.subtitlesTrack_ && !this.paused(); |
|
} |
|
/** |
|
* Once all the starting parameters have been specified, begin |
|
* operation. This method should only be invoked from the INIT |
|
* state. |
|
* |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'init_', |
|
value: function init_() { |
|
this.state = 'READY'; |
|
this.resetEverything(); |
|
return this.monitorBuffer_(); |
|
} |
|
/** |
|
* Set a subtitle track on the segment loader to add subtitles to |
|
* |
|
* @param {TextTrack=} track |
|
* The text track to add loaded subtitles to |
|
* @return {TextTrack} |
|
* Returns the subtitles track |
|
*/ |
|
|
|
}, { |
|
key: 'track', |
|
value: function track(_track) { |
|
if (typeof _track === 'undefined') { |
|
return this.subtitlesTrack_; |
|
} |
|
|
|
this.subtitlesTrack_ = _track; // if we were unpaused but waiting for a sourceUpdater, start |
|
// buffering now |
|
|
|
if (this.state === 'INIT' && this.couldBeginLoading_()) { |
|
this.init_(); |
|
} |
|
|
|
return this.subtitlesTrack_; |
|
} |
|
/** |
|
* Remove any data in the source buffer between start and end times |
|
* @param {Number} start - the start time of the region to remove from the buffer |
|
* @param {Number} end - the end time of the region to remove from the buffer |
|
*/ |
|
|
|
}, { |
|
key: 'remove', |
|
value: function remove(start, end) { |
|
removeCuesFromTrack(start, end, this.subtitlesTrack_); |
|
} |
|
/** |
|
* fill the buffer with segements unless the sourceBuffers are |
|
* currently updating |
|
* |
|
* Note: this function should only ever be called by monitorBuffer_ |
|
* and never directly |
|
* |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'fillBuffer_', |
|
value: function fillBuffer_() { |
|
var _this2 = this; |
|
|
|
if (!this.syncPoint_) { |
|
this.syncPoint_ = this.syncController_.getSyncPoint(this.playlist_, this.duration_(), this.currentTimeline_, this.currentTime_()); |
|
} // see if we need to begin loading immediately |
|
|
|
|
|
var segmentInfo = this.checkBuffer_(this.buffered_(), this.playlist_, this.mediaIndex, this.hasPlayed_(), this.currentTime_(), this.syncPoint_); |
|
segmentInfo = this.skipEmptySegments_(segmentInfo); |
|
|
|
if (!segmentInfo) { |
|
return; |
|
} |
|
|
|
if (this.syncController_.timestampOffsetForTimeline(segmentInfo.timeline) === null) { |
|
// We don't have the timestamp offset that we need to sync subtitles. |
|
// Rerun on a timestamp offset or user interaction. |
|
var checkTimestampOffset = function checkTimestampOffset() { |
|
_this2.state = 'READY'; |
|
|
|
if (!_this2.paused()) { |
|
// if not paused, queue a buffer check as soon as possible |
|
_this2.monitorBuffer_(); |
|
} |
|
}; |
|
|
|
this.syncController_.one('timestampoffset', checkTimestampOffset); |
|
this.state = 'WAITING_ON_TIMELINE'; |
|
return; |
|
} |
|
|
|
this.loadSegment_(segmentInfo); |
|
} |
|
/** |
|
* Prevents the segment loader from requesting segments we know contain no subtitles |
|
* by walking forward until we find the next segment that we don't know whether it is |
|
* empty or not. |
|
* |
|
* @param {Object} segmentInfo |
|
* a segment info object that describes the current segment |
|
* @return {Object} |
|
* a segment info object that describes the current segment |
|
*/ |
|
|
|
}, { |
|
key: 'skipEmptySegments_', |
|
value: function skipEmptySegments_(segmentInfo) { |
|
while (segmentInfo && segmentInfo.segment.empty) { |
|
segmentInfo = this.generateSegmentInfo_(segmentInfo.playlist, segmentInfo.mediaIndex + 1, segmentInfo.startOfSegment + segmentInfo.duration, segmentInfo.isSyncRequest); |
|
} |
|
|
|
return segmentInfo; |
|
} |
|
/** |
|
* append a decrypted segement to the SourceBuffer through a SourceUpdater |
|
* |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'handleSegment_', |
|
value: function handleSegment_() { |
|
var _this3 = this; |
|
|
|
if (!this.pendingSegment_ || !this.subtitlesTrack_) { |
|
this.state = 'READY'; |
|
return; |
|
} |
|
|
|
this.state = 'APPENDING'; |
|
var segmentInfo = this.pendingSegment_; |
|
var segment = segmentInfo.segment; // Make sure that vttjs has loaded, otherwise, wait till it finished loading |
|
|
|
if (typeof window$1.WebVTT !== 'function' && this.subtitlesTrack_ && this.subtitlesTrack_.tech_) { |
|
var loadHandler = function loadHandler() { |
|
_this3.handleSegment_(); |
|
}; |
|
|
|
this.state = 'WAITING_ON_VTTJS'; |
|
this.subtitlesTrack_.tech_.one('vttjsloaded', loadHandler); |
|
this.subtitlesTrack_.tech_.one('vttjserror', function () { |
|
_this3.subtitlesTrack_.tech_.off('vttjsloaded', loadHandler); |
|
|
|
_this3.error({ |
|
message: 'Error loading vtt.js' |
|
}); |
|
|
|
_this3.state = 'READY'; |
|
|
|
_this3.pause(); |
|
|
|
_this3.trigger('error'); |
|
}); |
|
return; |
|
} |
|
|
|
segment.requested = true; |
|
|
|
try { |
|
this.parseVTTCues_(segmentInfo); |
|
} catch (e) { |
|
this.error({ |
|
message: e.message |
|
}); |
|
this.state = 'READY'; |
|
this.pause(); |
|
return this.trigger('error'); |
|
} |
|
|
|
this.updateTimeMapping_(segmentInfo, this.syncController_.timelines[segmentInfo.timeline], this.playlist_); |
|
|
|
if (segmentInfo.isSyncRequest) { |
|
this.trigger('syncinfoupdate'); |
|
this.pendingSegment_ = null; |
|
this.state = 'READY'; |
|
return; |
|
} |
|
|
|
segmentInfo.byteLength = segmentInfo.bytes.byteLength; |
|
this.mediaSecondsLoaded += segment.duration; |
|
|
|
if (segmentInfo.cues.length) { |
|
// remove any overlapping cues to prevent doubling |
|
this.remove(segmentInfo.cues[0].endTime, segmentInfo.cues[segmentInfo.cues.length - 1].endTime); |
|
} |
|
|
|
segmentInfo.cues.forEach(function (cue) { |
|
_this3.subtitlesTrack_.addCue(cue); |
|
}); |
|
this.handleUpdateEnd_(); |
|
} |
|
/** |
|
* Uses the WebVTT parser to parse the segment response |
|
* |
|
* @param {Object} segmentInfo |
|
* a segment info object that describes the current segment |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'parseVTTCues_', |
|
value: function parseVTTCues_(segmentInfo) { |
|
var decoder = void 0; |
|
var decodeBytesToString = false; |
|
|
|
if (typeof window$1.TextDecoder === 'function') { |
|
decoder = new window$1.TextDecoder('utf8'); |
|
} else { |
|
decoder = window$1.WebVTT.StringDecoder(); |
|
decodeBytesToString = true; |
|
} |
|
|
|
var parser = new window$1.WebVTT.Parser(window$1, window$1.vttjs, decoder); |
|
segmentInfo.cues = []; |
|
segmentInfo.timestampmap = { |
|
MPEGTS: 0, |
|
LOCAL: 0 |
|
}; |
|
parser.oncue = segmentInfo.cues.push.bind(segmentInfo.cues); |
|
|
|
parser.ontimestampmap = function (map) { |
|
return segmentInfo.timestampmap = map; |
|
}; |
|
|
|
parser.onparsingerror = function (error) { |
|
videojs$1.log.warn('Error encountered when parsing cues: ' + error.message); |
|
}; |
|
|
|
if (segmentInfo.segment.map) { |
|
var mapData = segmentInfo.segment.map.bytes; |
|
|
|
if (decodeBytesToString) { |
|
mapData = uint8ToUtf8(mapData); |
|
} |
|
|
|
parser.parse(mapData); |
|
} |
|
|
|
var segmentData = segmentInfo.bytes; |
|
|
|
if (decodeBytesToString) { |
|
segmentData = uint8ToUtf8(segmentData); |
|
} |
|
|
|
parser.parse(segmentData); |
|
parser.flush(); |
|
} |
|
/** |
|
* Updates the start and end times of any cues parsed by the WebVTT parser using |
|
* the information parsed from the X-TIMESTAMP-MAP header and a TS to media time mapping |
|
* from the SyncController |
|
* |
|
* @param {Object} segmentInfo |
|
* a segment info object that describes the current segment |
|
* @param {Object} mappingObj |
|
* object containing a mapping from TS to media time |
|
* @param {Object} playlist |
|
* the playlist object containing the segment |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'updateTimeMapping_', |
|
value: function updateTimeMapping_(segmentInfo, mappingObj, playlist) { |
|
var segment = segmentInfo.segment; |
|
|
|
if (!mappingObj) { |
|
// If the sync controller does not have a mapping of TS to Media Time for the |
|
// timeline, then we don't have enough information to update the cue |
|
// start/end times |
|
return; |
|
} |
|
|
|
if (!segmentInfo.cues.length) { |
|
// If there are no cues, we also do not have enough information to figure out |
|
// segment timing. Mark that the segment contains no cues so we don't re-request |
|
// an empty segment. |
|
segment.empty = true; |
|
return; |
|
} |
|
|
|
var timestampmap = segmentInfo.timestampmap; |
|
var diff = timestampmap.MPEGTS / 90000 - timestampmap.LOCAL + mappingObj.mapping; |
|
segmentInfo.cues.forEach(function (cue) { |
|
// First convert cue time to TS time using the timestamp-map provided within the vtt |
|
cue.startTime += diff; |
|
cue.endTime += diff; |
|
}); |
|
|
|
if (!playlist.syncInfo) { |
|
var firstStart = segmentInfo.cues[0].startTime; |
|
var lastStart = segmentInfo.cues[segmentInfo.cues.length - 1].startTime; |
|
playlist.syncInfo = { |
|
mediaSequence: playlist.mediaSequence + segmentInfo.mediaIndex, |
|
time: Math.min(firstStart, lastStart - segment.duration) |
|
}; |
|
} |
|
} |
|
}]); |
|
return VTTSegmentLoader; |
|
}(SegmentLoader); |
|
/** |
|
* @file ad-cue-tags.js |
|
*/ |
|
|
|
/** |
|
* Searches for an ad cue that overlaps with the given mediaTime |
|
*/ |
|
|
|
|
|
var findAdCue = function findAdCue(track, mediaTime) { |
|
var cues = track.cues; |
|
|
|
for (var i = 0; i < cues.length; i++) { |
|
var cue = cues[i]; |
|
|
|
if (mediaTime >= cue.adStartTime && mediaTime <= cue.adEndTime) { |
|
return cue; |
|
} |
|
} |
|
|
|
return null; |
|
}; |
|
|
|
var updateAdCues = function updateAdCues(media, track) { |
|
var offset = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : 0; |
|
|
|
if (!media.segments) { |
|
return; |
|
} |
|
|
|
var mediaTime = offset; |
|
var cue = void 0; |
|
|
|
for (var i = 0; i < media.segments.length; i++) { |
|
var segment = media.segments[i]; |
|
|
|
if (!cue) { |
|
// Since the cues will span for at least the segment duration, adding a fudge |
|
// factor of half segment duration will prevent duplicate cues from being |
|
// created when timing info is not exact (e.g. cue start time initialized |
|
// at 10.006677, but next call mediaTime is 10.003332 ) |
|
cue = findAdCue(track, mediaTime + segment.duration / 2); |
|
} |
|
|
|
if (cue) { |
|
if ('cueIn' in segment) { |
|
// Found a CUE-IN so end the cue |
|
cue.endTime = mediaTime; |
|
cue.adEndTime = mediaTime; |
|
mediaTime += segment.duration; |
|
cue = null; |
|
continue; |
|
} |
|
|
|
if (mediaTime < cue.endTime) { |
|
// Already processed this mediaTime for this cue |
|
mediaTime += segment.duration; |
|
continue; |
|
} // otherwise extend cue until a CUE-IN is found |
|
|
|
|
|
cue.endTime += segment.duration; |
|
} else { |
|
if ('cueOut' in segment) { |
|
cue = new window$1.VTTCue(mediaTime, mediaTime + segment.duration, segment.cueOut); |
|
cue.adStartTime = mediaTime; // Assumes tag format to be |
|
// #EXT-X-CUE-OUT:30 |
|
|
|
cue.adEndTime = mediaTime + parseFloat(segment.cueOut); |
|
track.addCue(cue); |
|
} |
|
|
|
if ('cueOutCont' in segment) { |
|
// Entered into the middle of an ad cue |
|
var adOffset = void 0; |
|
var adTotal = void 0; // Assumes tag formate to be |
|
// #EXT-X-CUE-OUT-CONT:10/30 |
|
|
|
var _segment$cueOutCont$s = segment.cueOutCont.split('/').map(parseFloat); |
|
|
|
var _segment$cueOutCont$s2 = slicedToArray(_segment$cueOutCont$s, 2); |
|
|
|
adOffset = _segment$cueOutCont$s2[0]; |
|
adTotal = _segment$cueOutCont$s2[1]; |
|
cue = new window$1.VTTCue(mediaTime, mediaTime + segment.duration, ''); |
|
cue.adStartTime = mediaTime - adOffset; |
|
cue.adEndTime = cue.adStartTime + adTotal; |
|
track.addCue(cue); |
|
} |
|
} |
|
|
|
mediaTime += segment.duration; |
|
} |
|
}; |
|
/** |
|
* @file sync-controller.js |
|
*/ |
|
|
|
|
|
var tsprobe = tsInspector.inspect; |
|
var syncPointStrategies = [// Stategy "VOD": Handle the VOD-case where the sync-point is *always* |
|
// the equivalence display-time 0 === segment-index 0 |
|
{ |
|
name: 'VOD', |
|
run: function run(syncController, playlist, duration$$1, currentTimeline, currentTime) { |
|
if (duration$$1 !== Infinity) { |
|
var syncPoint = { |
|
time: 0, |
|
segmentIndex: 0 |
|
}; |
|
return syncPoint; |
|
} |
|
|
|
return null; |
|
} |
|
}, // Stategy "ProgramDateTime": We have a program-date-time tag in this playlist |
|
{ |
|
name: 'ProgramDateTime', |
|
run: function run(syncController, playlist, duration$$1, currentTimeline, currentTime) { |
|
if (!syncController.datetimeToDisplayTime) { |
|
return null; |
|
} |
|
|
|
var segments = playlist.segments || []; |
|
var syncPoint = null; |
|
var lastDistance = null; |
|
currentTime = currentTime || 0; |
|
|
|
for (var i = 0; i < segments.length; i++) { |
|
var segment = segments[i]; |
|
|
|
if (segment.dateTimeObject) { |
|
var segmentTime = segment.dateTimeObject.getTime() / 1000; |
|
var segmentStart = segmentTime + syncController.datetimeToDisplayTime; |
|
var distance = Math.abs(currentTime - segmentStart); // Once the distance begins to increase, we have passed |
|
// currentTime and can stop looking for better candidates |
|
|
|
if (lastDistance !== null && lastDistance < distance) { |
|
break; |
|
} |
|
|
|
lastDistance = distance; |
|
syncPoint = { |
|
time: segmentStart, |
|
segmentIndex: i |
|
}; |
|
} |
|
} |
|
|
|
return syncPoint; |
|
} |
|
}, // Stategy "Segment": We have a known time mapping for a timeline and a |
|
// segment in the current timeline with timing data |
|
{ |
|
name: 'Segment', |
|
run: function run(syncController, playlist, duration$$1, currentTimeline, currentTime) { |
|
var segments = playlist.segments || []; |
|
var syncPoint = null; |
|
var lastDistance = null; |
|
currentTime = currentTime || 0; |
|
|
|
for (var i = 0; i < segments.length; i++) { |
|
var segment = segments[i]; |
|
|
|
if (segment.timeline === currentTimeline && typeof segment.start !== 'undefined') { |
|
var distance = Math.abs(currentTime - segment.start); // Once the distance begins to increase, we have passed |
|
// currentTime and can stop looking for better candidates |
|
|
|
if (lastDistance !== null && lastDistance < distance) { |
|
break; |
|
} |
|
|
|
if (!syncPoint || lastDistance === null || lastDistance >= distance) { |
|
lastDistance = distance; |
|
syncPoint = { |
|
time: segment.start, |
|
segmentIndex: i |
|
}; |
|
} |
|
} |
|
} |
|
|
|
return syncPoint; |
|
} |
|
}, // Stategy "Discontinuity": We have a discontinuity with a known |
|
// display-time |
|
{ |
|
name: 'Discontinuity', |
|
run: function run(syncController, playlist, duration$$1, currentTimeline, currentTime) { |
|
var syncPoint = null; |
|
currentTime = currentTime || 0; |
|
|
|
if (playlist.discontinuityStarts && playlist.discontinuityStarts.length) { |
|
var lastDistance = null; |
|
|
|
for (var i = 0; i < playlist.discontinuityStarts.length; i++) { |
|
var segmentIndex = playlist.discontinuityStarts[i]; |
|
var discontinuity = playlist.discontinuitySequence + i + 1; |
|
var discontinuitySync = syncController.discontinuities[discontinuity]; |
|
|
|
if (discontinuitySync) { |
|
var distance = Math.abs(currentTime - discontinuitySync.time); // Once the distance begins to increase, we have passed |
|
// currentTime and can stop looking for better candidates |
|
|
|
if (lastDistance !== null && lastDistance < distance) { |
|
break; |
|
} |
|
|
|
if (!syncPoint || lastDistance === null || lastDistance >= distance) { |
|
lastDistance = distance; |
|
syncPoint = { |
|
time: discontinuitySync.time, |
|
segmentIndex: segmentIndex |
|
}; |
|
} |
|
} |
|
} |
|
} |
|
|
|
return syncPoint; |
|
} |
|
}, // Stategy "Playlist": We have a playlist with a known mapping of |
|
// segment index to display time |
|
{ |
|
name: 'Playlist', |
|
run: function run(syncController, playlist, duration$$1, currentTimeline, currentTime) { |
|
if (playlist.syncInfo) { |
|
var syncPoint = { |
|
time: playlist.syncInfo.time, |
|
segmentIndex: playlist.syncInfo.mediaSequence - playlist.mediaSequence |
|
}; |
|
return syncPoint; |
|
} |
|
|
|
return null; |
|
} |
|
}]; |
|
|
|
var SyncController = function (_videojs$EventTarget) { |
|
inherits$1(SyncController, _videojs$EventTarget); |
|
|
|
function SyncController() { |
|
classCallCheck$1(this, SyncController); // Segment Loader state variables... |
|
// ...for synching across variants |
|
|
|
var _this = possibleConstructorReturn$1(this, (SyncController.__proto__ || Object.getPrototypeOf(SyncController)).call(this)); |
|
|
|
_this.inspectCache_ = undefined; // ...for synching across variants |
|
|
|
_this.timelines = []; |
|
_this.discontinuities = []; |
|
_this.datetimeToDisplayTime = null; |
|
_this.logger_ = logger('SyncController'); |
|
return _this; |
|
} |
|
/** |
|
* Find a sync-point for the playlist specified |
|
* |
|
* A sync-point is defined as a known mapping from display-time to |
|
* a segment-index in the current playlist. |
|
* |
|
* @param {Playlist} playlist |
|
* The playlist that needs a sync-point |
|
* @param {Number} duration |
|
* Duration of the MediaSource (Infinite if playing a live source) |
|
* @param {Number} currentTimeline |
|
* The last timeline from which a segment was loaded |
|
* @returns {Object} |
|
* A sync-point object |
|
*/ |
|
|
|
|
|
createClass$1(SyncController, [{ |
|
key: 'getSyncPoint', |
|
value: function getSyncPoint(playlist, duration$$1, currentTimeline, currentTime) { |
|
var syncPoints = this.runStrategies_(playlist, duration$$1, currentTimeline, currentTime); |
|
|
|
if (!syncPoints.length) { |
|
// Signal that we need to attempt to get a sync-point manually |
|
// by fetching a segment in the playlist and constructing |
|
// a sync-point from that information |
|
return null; |
|
} // Now find the sync-point that is closest to the currentTime because |
|
// that should result in the most accurate guess about which segment |
|
// to fetch |
|
|
|
|
|
return this.selectSyncPoint_(syncPoints, { |
|
key: 'time', |
|
value: currentTime |
|
}); |
|
} |
|
/** |
|
* Calculate the amount of time that has expired off the playlist during playback |
|
* |
|
* @param {Playlist} playlist |
|
* Playlist object to calculate expired from |
|
* @param {Number} duration |
|
* Duration of the MediaSource (Infinity if playling a live source) |
|
* @returns {Number|null} |
|
* The amount of time that has expired off the playlist during playback. Null |
|
* if no sync-points for the playlist can be found. |
|
*/ |
|
|
|
}, { |
|
key: 'getExpiredTime', |
|
value: function getExpiredTime(playlist, duration$$1) { |
|
if (!playlist || !playlist.segments) { |
|
return null; |
|
} |
|
|
|
var syncPoints = this.runStrategies_(playlist, duration$$1, playlist.discontinuitySequence, 0); // Without sync-points, there is not enough information to determine the expired time |
|
|
|
if (!syncPoints.length) { |
|
return null; |
|
} |
|
|
|
var syncPoint = this.selectSyncPoint_(syncPoints, { |
|
key: 'segmentIndex', |
|
value: 0 |
|
}); // If the sync-point is beyond the start of the playlist, we want to subtract the |
|
// duration from index 0 to syncPoint.segmentIndex instead of adding. |
|
|
|
if (syncPoint.segmentIndex > 0) { |
|
syncPoint.time *= -1; |
|
} |
|
|
|
return Math.abs(syncPoint.time + sumDurations(playlist, syncPoint.segmentIndex, 0)); |
|
} |
|
/** |
|
* Runs each sync-point strategy and returns a list of sync-points returned by the |
|
* strategies |
|
* |
|
* @private |
|
* @param {Playlist} playlist |
|
* The playlist that needs a sync-point |
|
* @param {Number} duration |
|
* Duration of the MediaSource (Infinity if playing a live source) |
|
* @param {Number} currentTimeline |
|
* The last timeline from which a segment was loaded |
|
* @returns {Array} |
|
* A list of sync-point objects |
|
*/ |
|
|
|
}, { |
|
key: 'runStrategies_', |
|
value: function runStrategies_(playlist, duration$$1, currentTimeline, currentTime) { |
|
var syncPoints = []; // Try to find a sync-point in by utilizing various strategies... |
|
|
|
for (var i = 0; i < syncPointStrategies.length; i++) { |
|
var strategy = syncPointStrategies[i]; |
|
var syncPoint = strategy.run(this, playlist, duration$$1, currentTimeline, currentTime); |
|
|
|
if (syncPoint) { |
|
syncPoint.strategy = strategy.name; |
|
syncPoints.push({ |
|
strategy: strategy.name, |
|
syncPoint: syncPoint |
|
}); |
|
} |
|
} |
|
|
|
return syncPoints; |
|
} |
|
/** |
|
* Selects the sync-point nearest the specified target |
|
* |
|
* @private |
|
* @param {Array} syncPoints |
|
* List of sync-points to select from |
|
* @param {Object} target |
|
* Object specifying the property and value we are targeting |
|
* @param {String} target.key |
|
* Specifies the property to target. Must be either 'time' or 'segmentIndex' |
|
* @param {Number} target.value |
|
* The value to target for the specified key. |
|
* @returns {Object} |
|
* The sync-point nearest the target |
|
*/ |
|
|
|
}, { |
|
key: 'selectSyncPoint_', |
|
value: function selectSyncPoint_(syncPoints, target) { |
|
var bestSyncPoint = syncPoints[0].syncPoint; |
|
var bestDistance = Math.abs(syncPoints[0].syncPoint[target.key] - target.value); |
|
var bestStrategy = syncPoints[0].strategy; |
|
|
|
for (var i = 1; i < syncPoints.length; i++) { |
|
var newDistance = Math.abs(syncPoints[i].syncPoint[target.key] - target.value); |
|
|
|
if (newDistance < bestDistance) { |
|
bestDistance = newDistance; |
|
bestSyncPoint = syncPoints[i].syncPoint; |
|
bestStrategy = syncPoints[i].strategy; |
|
} |
|
} |
|
|
|
this.logger_('syncPoint for [' + target.key + ': ' + target.value + '] chosen with strategy' + (' [' + bestStrategy + ']: [time:' + bestSyncPoint.time + ',') + (' segmentIndex:' + bestSyncPoint.segmentIndex + ']')); |
|
return bestSyncPoint; |
|
} |
|
/** |
|
* Save any meta-data present on the segments when segments leave |
|
* the live window to the playlist to allow for synchronization at the |
|
* playlist level later. |
|
* |
|
* @param {Playlist} oldPlaylist - The previous active playlist |
|
* @param {Playlist} newPlaylist - The updated and most current playlist |
|
*/ |
|
|
|
}, { |
|
key: 'saveExpiredSegmentInfo', |
|
value: function saveExpiredSegmentInfo(oldPlaylist, newPlaylist) { |
|
var mediaSequenceDiff = newPlaylist.mediaSequence - oldPlaylist.mediaSequence; // When a segment expires from the playlist and it has a start time |
|
// save that information as a possible sync-point reference in future |
|
|
|
for (var i = mediaSequenceDiff - 1; i >= 0; i--) { |
|
var lastRemovedSegment = oldPlaylist.segments[i]; |
|
|
|
if (lastRemovedSegment && typeof lastRemovedSegment.start !== 'undefined') { |
|
newPlaylist.syncInfo = { |
|
mediaSequence: oldPlaylist.mediaSequence + i, |
|
time: lastRemovedSegment.start |
|
}; |
|
this.logger_('playlist refresh sync: [time:' + newPlaylist.syncInfo.time + ',' + (' mediaSequence: ' + newPlaylist.syncInfo.mediaSequence + ']')); |
|
this.trigger('syncinfoupdate'); |
|
break; |
|
} |
|
} |
|
} |
|
/** |
|
* Save the mapping from playlist's ProgramDateTime to display. This should |
|
* only ever happen once at the start of playback. |
|
* |
|
* @param {Playlist} playlist - The currently active playlist |
|
*/ |
|
|
|
}, { |
|
key: 'setDateTimeMapping', |
|
value: function setDateTimeMapping(playlist) { |
|
if (!this.datetimeToDisplayTime && playlist.segments && playlist.segments.length && playlist.segments[0].dateTimeObject) { |
|
var playlistTimestamp = playlist.segments[0].dateTimeObject.getTime() / 1000; |
|
this.datetimeToDisplayTime = -playlistTimestamp; |
|
} |
|
} |
|
/** |
|
* Reset the state of the inspection cache when we do a rendition |
|
* switch |
|
*/ |
|
|
|
}, { |
|
key: 'reset', |
|
value: function reset() { |
|
this.inspectCache_ = undefined; |
|
} |
|
/** |
|
* Probe or inspect a fmp4 or an mpeg2-ts segment to determine the start |
|
* and end of the segment in it's internal "media time". Used to generate |
|
* mappings from that internal "media time" to the display time that is |
|
* shown on the player. |
|
* |
|
* @param {SegmentInfo} segmentInfo - The current active request information |
|
*/ |
|
|
|
}, { |
|
key: 'probeSegmentInfo', |
|
value: function probeSegmentInfo(segmentInfo) { |
|
var segment = segmentInfo.segment; |
|
var playlist = segmentInfo.playlist; |
|
var timingInfo = void 0; |
|
|
|
if (segment.map) { |
|
timingInfo = this.probeMp4Segment_(segmentInfo); |
|
} else { |
|
timingInfo = this.probeTsSegment_(segmentInfo); |
|
} |
|
|
|
if (timingInfo) { |
|
if (this.calculateSegmentTimeMapping_(segmentInfo, timingInfo)) { |
|
this.saveDiscontinuitySyncInfo_(segmentInfo); // If the playlist does not have sync information yet, record that information |
|
// now with segment timing information |
|
|
|
if (!playlist.syncInfo) { |
|
playlist.syncInfo = { |
|
mediaSequence: playlist.mediaSequence + segmentInfo.mediaIndex, |
|
time: segment.start |
|
}; |
|
} |
|
} |
|
} |
|
|
|
return timingInfo; |
|
} |
|
/** |
|
* Probe an fmp4 or an mpeg2-ts segment to determine the start of the segment |
|
* in it's internal "media time". |
|
* |
|
* @private |
|
* @param {SegmentInfo} segmentInfo - The current active request information |
|
* @return {object} The start and end time of the current segment in "media time" |
|
*/ |
|
|
|
}, { |
|
key: 'probeMp4Segment_', |
|
value: function probeMp4Segment_(segmentInfo) { |
|
var segment = segmentInfo.segment; |
|
var timescales = probe.timescale(segment.map.bytes); |
|
var startTime = probe.startTime(timescales, segmentInfo.bytes); |
|
|
|
if (segmentInfo.timestampOffset !== null) { |
|
segmentInfo.timestampOffset -= startTime; |
|
} |
|
|
|
return { |
|
start: startTime, |
|
end: startTime + segment.duration |
|
}; |
|
} |
|
/** |
|
* Probe an mpeg2-ts segment to determine the start and end of the segment |
|
* in it's internal "media time". |
|
* |
|
* @private |
|
* @param {SegmentInfo} segmentInfo - The current active request information |
|
* @return {object} The start and end time of the current segment in "media time" |
|
*/ |
|
|
|
}, { |
|
key: 'probeTsSegment_', |
|
value: function probeTsSegment_(segmentInfo) { |
|
var timeInfo = tsprobe(segmentInfo.bytes, this.inspectCache_); |
|
var segmentStartTime = void 0; |
|
var segmentEndTime = void 0; |
|
|
|
if (!timeInfo) { |
|
return null; |
|
} |
|
|
|
if (timeInfo.video && timeInfo.video.length === 2) { |
|
this.inspectCache_ = timeInfo.video[1].dts; |
|
segmentStartTime = timeInfo.video[0].dtsTime; |
|
segmentEndTime = timeInfo.video[1].dtsTime; |
|
} else if (timeInfo.audio && timeInfo.audio.length === 2) { |
|
this.inspectCache_ = timeInfo.audio[1].dts; |
|
segmentStartTime = timeInfo.audio[0].dtsTime; |
|
segmentEndTime = timeInfo.audio[1].dtsTime; |
|
} |
|
|
|
var probedInfo = { |
|
start: segmentStartTime, |
|
end: segmentEndTime, |
|
containsVideo: timeInfo.video && timeInfo.video.length === 2, |
|
containsAudio: timeInfo.audio && timeInfo.audio.length === 2 |
|
}; |
|
return probedInfo; |
|
} |
|
}, { |
|
key: 'timestampOffsetForTimeline', |
|
value: function timestampOffsetForTimeline(timeline) { |
|
if (typeof this.timelines[timeline] === 'undefined') { |
|
return null; |
|
} |
|
|
|
return this.timelines[timeline].time; |
|
} |
|
}, { |
|
key: 'mappingForTimeline', |
|
value: function mappingForTimeline(timeline) { |
|
if (typeof this.timelines[timeline] === 'undefined') { |
|
return null; |
|
} |
|
|
|
return this.timelines[timeline].mapping; |
|
} |
|
/** |
|
* Use the "media time" for a segment to generate a mapping to "display time" and |
|
* save that display time to the segment. |
|
* |
|
* @private |
|
* @param {SegmentInfo} segmentInfo |
|
* The current active request information |
|
* @param {object} timingInfo |
|
* The start and end time of the current segment in "media time" |
|
* @returns {Boolean} |
|
* Returns false if segment time mapping could not be calculated |
|
*/ |
|
|
|
}, { |
|
key: 'calculateSegmentTimeMapping_', |
|
value: function calculateSegmentTimeMapping_(segmentInfo, timingInfo) { |
|
var segment = segmentInfo.segment; |
|
var mappingObj = this.timelines[segmentInfo.timeline]; |
|
|
|
if (segmentInfo.timestampOffset !== null) { |
|
mappingObj = { |
|
time: segmentInfo.startOfSegment, |
|
mapping: segmentInfo.startOfSegment - timingInfo.start |
|
}; |
|
this.timelines[segmentInfo.timeline] = mappingObj; |
|
this.trigger('timestampoffset'); |
|
this.logger_('time mapping for timeline ' + segmentInfo.timeline + ': ' + ('[time: ' + mappingObj.time + '] [mapping: ' + mappingObj.mapping + ']')); |
|
segment.start = segmentInfo.startOfSegment; |
|
segment.end = timingInfo.end + mappingObj.mapping; |
|
} else if (mappingObj) { |
|
segment.start = timingInfo.start + mappingObj.mapping; |
|
segment.end = timingInfo.end + mappingObj.mapping; |
|
} else { |
|
return false; |
|
} |
|
|
|
return true; |
|
} |
|
/** |
|
* Each time we have discontinuity in the playlist, attempt to calculate the location |
|
* in display of the start of the discontinuity and save that. We also save an accuracy |
|
* value so that we save values with the most accuracy (closest to 0.) |
|
* |
|
* @private |
|
* @param {SegmentInfo} segmentInfo - The current active request information |
|
*/ |
|
|
|
}, { |
|
key: 'saveDiscontinuitySyncInfo_', |
|
value: function saveDiscontinuitySyncInfo_(segmentInfo) { |
|
var playlist = segmentInfo.playlist; |
|
var segment = segmentInfo.segment; // If the current segment is a discontinuity then we know exactly where |
|
// the start of the range and it's accuracy is 0 (greater accuracy values |
|
// mean more approximation) |
|
|
|
if (segment.discontinuity) { |
|
this.discontinuities[segment.timeline] = { |
|
time: segment.start, |
|
accuracy: 0 |
|
}; |
|
} else if (playlist.discontinuityStarts && playlist.discontinuityStarts.length) { |
|
// Search for future discontinuities that we can provide better timing |
|
// information for and save that information for sync purposes |
|
for (var i = 0; i < playlist.discontinuityStarts.length; i++) { |
|
var segmentIndex = playlist.discontinuityStarts[i]; |
|
var discontinuity = playlist.discontinuitySequence + i + 1; |
|
var mediaIndexDiff = segmentIndex - segmentInfo.mediaIndex; |
|
var accuracy = Math.abs(mediaIndexDiff); |
|
|
|
if (!this.discontinuities[discontinuity] || this.discontinuities[discontinuity].accuracy > accuracy) { |
|
var time = void 0; |
|
|
|
if (mediaIndexDiff < 0) { |
|
time = segment.start - sumDurations(playlist, segmentInfo.mediaIndex, segmentIndex); |
|
} else { |
|
time = segment.end + sumDurations(playlist, segmentInfo.mediaIndex + 1, segmentIndex); |
|
} |
|
|
|
this.discontinuities[discontinuity] = { |
|
time: time, |
|
accuracy: accuracy |
|
}; |
|
} |
|
} |
|
} |
|
} |
|
}]); |
|
return SyncController; |
|
}(videojs$1.EventTarget); |
|
|
|
var Decrypter$1 = new shimWorker("./decrypter-worker.worker.js", function (window, document$$1) { |
|
var self = this; |
|
|
|
var decrypterWorker = function () { |
|
/* |
|
* pkcs7.pad |
|
* https://github.com/brightcove/pkcs7 |
|
* |
|
* Copyright (c) 2014 Brightcove |
|
* Licensed under the apache2 license. |
|
*/ |
|
|
|
/** |
|
* Returns the subarray of a Uint8Array without PKCS#7 padding. |
|
* @param padded {Uint8Array} unencrypted bytes that have been padded |
|
* @return {Uint8Array} the unpadded bytes |
|
* @see http://tools.ietf.org/html/rfc5652 |
|
*/ |
|
function unpad(padded) { |
|
return padded.subarray(0, padded.byteLength - padded[padded.byteLength - 1]); |
|
} |
|
|
|
var classCallCheck = function classCallCheck(instance, Constructor) { |
|
if (!(instance instanceof Constructor)) { |
|
throw new TypeError("Cannot call a class as a function"); |
|
} |
|
}; |
|
|
|
var createClass = function () { |
|
function defineProperties(target, props) { |
|
for (var i = 0; i < props.length; i++) { |
|
var descriptor = props[i]; |
|
descriptor.enumerable = descriptor.enumerable || false; |
|
descriptor.configurable = true; |
|
if ("value" in descriptor) descriptor.writable = true; |
|
Object.defineProperty(target, descriptor.key, descriptor); |
|
} |
|
} |
|
|
|
return function (Constructor, protoProps, staticProps) { |
|
if (protoProps) defineProperties(Constructor.prototype, protoProps); |
|
if (staticProps) defineProperties(Constructor, staticProps); |
|
return Constructor; |
|
}; |
|
}(); |
|
|
|
var inherits = function inherits(subClass, superClass) { |
|
if (typeof superClass !== "function" && superClass !== null) { |
|
throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); |
|
} |
|
|
|
subClass.prototype = Object.create(superClass && superClass.prototype, { |
|
constructor: { |
|
value: subClass, |
|
enumerable: false, |
|
writable: true, |
|
configurable: true |
|
} |
|
}); |
|
if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; |
|
}; |
|
|
|
var possibleConstructorReturn = function possibleConstructorReturn(self, call) { |
|
if (!self) { |
|
throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); |
|
} |
|
|
|
return call && (typeof call === "object" || typeof call === "function") ? call : self; |
|
}; |
|
/** |
|
* @file aes.js |
|
* |
|
* This file contains an adaptation of the AES decryption algorithm |
|
* from the Standford Javascript Cryptography Library. That work is |
|
* covered by the following copyright and permissions notice: |
|
* |
|
* Copyright 2009-2010 Emily Stark, Mike Hamburg, Dan Boneh. |
|
* All rights reserved. |
|
* |
|
* Redistribution and use in source and binary forms, with or without |
|
* modification, are permitted provided that the following conditions are |
|
* met: |
|
* |
|
* 1. Redistributions of source code must retain the above copyright |
|
* notice, this list of conditions and the following disclaimer. |
|
* |
|
* 2. Redistributions in binary form must reproduce the above |
|
* copyright notice, this list of conditions and the following |
|
* disclaimer in the documentation and/or other materials provided |
|
* with the distribution. |
|
* |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR |
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
|
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
|
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> OR CONTRIBUTORS BE |
|
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR |
|
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
|
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE |
|
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN |
|
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
|
* |
|
* The views and conclusions contained in the software and documentation |
|
* are those of the authors and should not be interpreted as representing |
|
* official policies, either expressed or implied, of the authors. |
|
*/ |
|
|
|
/** |
|
* Expand the S-box tables. |
|
* |
|
* @private |
|
*/ |
|
|
|
|
|
var precompute = function precompute() { |
|
var tables = [[[], [], [], [], []], [[], [], [], [], []]]; |
|
var encTable = tables[0]; |
|
var decTable = tables[1]; |
|
var sbox = encTable[4]; |
|
var sboxInv = decTable[4]; |
|
var i = void 0; |
|
var x = void 0; |
|
var xInv = void 0; |
|
var d = []; |
|
var th = []; |
|
var x2 = void 0; |
|
var x4 = void 0; |
|
var x8 = void 0; |
|
var s = void 0; |
|
var tEnc = void 0; |
|
var tDec = void 0; // Compute double and third tables |
|
|
|
for (i = 0; i < 256; i++) { |
|
th[(d[i] = i << 1 ^ (i >> 7) * 283) ^ i] = i; |
|
} |
|
|
|
for (x = xInv = 0; !sbox[x]; x ^= x2 || 1, xInv = th[xInv] || 1) { |
|
// Compute sbox |
|
s = xInv ^ xInv << 1 ^ xInv << 2 ^ xInv << 3 ^ xInv << 4; |
|
s = s >> 8 ^ s & 255 ^ 99; |
|
sbox[x] = s; |
|
sboxInv[s] = x; // Compute MixColumns |
|
|
|
x8 = d[x4 = d[x2 = d[x]]]; |
|
tDec = x8 * 0x1010101 ^ x4 * 0x10001 ^ x2 * 0x101 ^ x * 0x1010100; |
|
tEnc = d[s] * 0x101 ^ s * 0x1010100; |
|
|
|
for (i = 0; i < 4; i++) { |
|
encTable[i][x] = tEnc = tEnc << 24 ^ tEnc >>> 8; |
|
decTable[i][s] = tDec = tDec << 24 ^ tDec >>> 8; |
|
} |
|
} // Compactify. Considerable speedup on Firefox. |
|
|
|
|
|
for (i = 0; i < 5; i++) { |
|
encTable[i] = encTable[i].slice(0); |
|
decTable[i] = decTable[i].slice(0); |
|
} |
|
|
|
return tables; |
|
}; |
|
|
|
var aesTables = null; |
|
/** |
|
* Schedule out an AES key for both encryption and decryption. This |
|
* is a low-level class. Use a cipher mode to do bulk encryption. |
|
* |
|
* @class AES |
|
* @param key {Array} The key as an array of 4, 6 or 8 words. |
|
*/ |
|
|
|
var AES = function () { |
|
function AES(key) { |
|
classCallCheck(this, AES); |
|
/** |
|
* The expanded S-box and inverse S-box tables. These will be computed |
|
* on the client so that we don't have to send them down the wire. |
|
* |
|
* There are two tables, _tables[0] is for encryption and |
|
* _tables[1] is for decryption. |
|
* |
|
* The first 4 sub-tables are the expanded S-box with MixColumns. The |
|
* last (_tables[01][4]) is the S-box itself. |
|
* |
|
* @private |
|
*/ |
|
// if we have yet to precompute the S-box tables |
|
// do so now |
|
|
|
if (!aesTables) { |
|
aesTables = precompute(); |
|
} // then make a copy of that object for use |
|
|
|
|
|
this._tables = [[aesTables[0][0].slice(), aesTables[0][1].slice(), aesTables[0][2].slice(), aesTables[0][3].slice(), aesTables[0][4].slice()], [aesTables[1][0].slice(), aesTables[1][1].slice(), aesTables[1][2].slice(), aesTables[1][3].slice(), aesTables[1][4].slice()]]; |
|
var i = void 0; |
|
var j = void 0; |
|
var tmp = void 0; |
|
var encKey = void 0; |
|
var decKey = void 0; |
|
var sbox = this._tables[0][4]; |
|
var decTable = this._tables[1]; |
|
var keyLen = key.length; |
|
var rcon = 1; |
|
|
|
if (keyLen !== 4 && keyLen !== 6 && keyLen !== 8) { |
|
throw new Error('Invalid aes key size'); |
|
} |
|
|
|
encKey = key.slice(0); |
|
decKey = []; |
|
this._key = [encKey, decKey]; // schedule encryption keys |
|
|
|
for (i = keyLen; i < 4 * keyLen + 28; i++) { |
|
tmp = encKey[i - 1]; // apply sbox |
|
|
|
if (i % keyLen === 0 || keyLen === 8 && i % keyLen === 4) { |
|
tmp = sbox[tmp >>> 24] << 24 ^ sbox[tmp >> 16 & 255] << 16 ^ sbox[tmp >> 8 & 255] << 8 ^ sbox[tmp & 255]; // shift rows and add rcon |
|
|
|
if (i % keyLen === 0) { |
|
tmp = tmp << 8 ^ tmp >>> 24 ^ rcon << 24; |
|
rcon = rcon << 1 ^ (rcon >> 7) * 283; |
|
} |
|
} |
|
|
|
encKey[i] = encKey[i - keyLen] ^ tmp; |
|
} // schedule decryption keys |
|
|
|
|
|
for (j = 0; i; j++, i--) { |
|
tmp = encKey[j & 3 ? i : i - 4]; |
|
|
|
if (i <= 4 || j < 4) { |
|
decKey[j] = tmp; |
|
} else { |
|
decKey[j] = decTable[0][sbox[tmp >>> 24]] ^ decTable[1][sbox[tmp >> 16 & 255]] ^ decTable[2][sbox[tmp >> 8 & 255]] ^ decTable[3][sbox[tmp & 255]]; |
|
} |
|
} |
|
} |
|
/** |
|
* Decrypt 16 bytes, specified as four 32-bit words. |
|
* |
|
* @param {Number} encrypted0 the first word to decrypt |
|
* @param {Number} encrypted1 the second word to decrypt |
|
* @param {Number} encrypted2 the third word to decrypt |
|
* @param {Number} encrypted3 the fourth word to decrypt |
|
* @param {Int32Array} out the array to write the decrypted words |
|
* into |
|
* @param {Number} offset the offset into the output array to start |
|
* writing results |
|
* @return {Array} The plaintext. |
|
*/ |
|
|
|
|
|
AES.prototype.decrypt = function decrypt$$1(encrypted0, encrypted1, encrypted2, encrypted3, out, offset) { |
|
var key = this._key[1]; // state variables a,b,c,d are loaded with pre-whitened data |
|
|
|
var a = encrypted0 ^ key[0]; |
|
var b = encrypted3 ^ key[1]; |
|
var c = encrypted2 ^ key[2]; |
|
var d = encrypted1 ^ key[3]; |
|
var a2 = void 0; |
|
var b2 = void 0; |
|
var c2 = void 0; // key.length === 2 ? |
|
|
|
var nInnerRounds = key.length / 4 - 2; |
|
var i = void 0; |
|
var kIndex = 4; |
|
var table = this._tables[1]; // load up the tables |
|
|
|
var table0 = table[0]; |
|
var table1 = table[1]; |
|
var table2 = table[2]; |
|
var table3 = table[3]; |
|
var sbox = table[4]; // Inner rounds. Cribbed from OpenSSL. |
|
|
|
for (i = 0; i < nInnerRounds; i++) { |
|
a2 = table0[a >>> 24] ^ table1[b >> 16 & 255] ^ table2[c >> 8 & 255] ^ table3[d & 255] ^ key[kIndex]; |
|
b2 = table0[b >>> 24] ^ table1[c >> 16 & 255] ^ table2[d >> 8 & 255] ^ table3[a & 255] ^ key[kIndex + 1]; |
|
c2 = table0[c >>> 24] ^ table1[d >> 16 & 255] ^ table2[a >> 8 & 255] ^ table3[b & 255] ^ key[kIndex + 2]; |
|
d = table0[d >>> 24] ^ table1[a >> 16 & 255] ^ table2[b >> 8 & 255] ^ table3[c & 255] ^ key[kIndex + 3]; |
|
kIndex += 4; |
|
a = a2; |
|
b = b2; |
|
c = c2; |
|
} // Last round. |
|
|
|
|
|
for (i = 0; i < 4; i++) { |
|
out[(3 & -i) + offset] = sbox[a >>> 24] << 24 ^ sbox[b >> 16 & 255] << 16 ^ sbox[c >> 8 & 255] << 8 ^ sbox[d & 255] ^ key[kIndex++]; |
|
a2 = a; |
|
a = b; |
|
b = c; |
|
c = d; |
|
d = a2; |
|
} |
|
}; |
|
|
|
return AES; |
|
}(); |
|
/** |
|
* @file stream.js |
|
*/ |
|
|
|
/** |
|
* A lightweight readable stream implemention that handles event dispatching. |
|
* |
|
* @class Stream |
|
*/ |
|
|
|
|
|
var Stream = function () { |
|
function Stream() { |
|
classCallCheck(this, Stream); |
|
this.listeners = {}; |
|
} |
|
/** |
|
* Add a listener for a specified event type. |
|
* |
|
* @param {String} type the event name |
|
* @param {Function} listener the callback to be invoked when an event of |
|
* the specified type occurs |
|
*/ |
|
|
|
|
|
Stream.prototype.on = function on(type, listener) { |
|
if (!this.listeners[type]) { |
|
this.listeners[type] = []; |
|
} |
|
|
|
this.listeners[type].push(listener); |
|
}; |
|
/** |
|
* Remove a listener for a specified event type. |
|
* |
|
* @param {String} type the event name |
|
* @param {Function} listener a function previously registered for this |
|
* type of event through `on` |
|
* @return {Boolean} if we could turn it off or not |
|
*/ |
|
|
|
|
|
Stream.prototype.off = function off(type, listener) { |
|
if (!this.listeners[type]) { |
|
return false; |
|
} |
|
|
|
var index = this.listeners[type].indexOf(listener); |
|
this.listeners[type].splice(index, 1); |
|
return index > -1; |
|
}; |
|
/** |
|
* Trigger an event of the specified type on this stream. Any additional |
|
* arguments to this function are passed as parameters to event listeners. |
|
* |
|
* @param {String} type the event name |
|
*/ |
|
|
|
|
|
Stream.prototype.trigger = function trigger(type) { |
|
var callbacks = this.listeners[type]; |
|
|
|
if (!callbacks) { |
|
return; |
|
} // Slicing the arguments on every invocation of this method |
|
// can add a significant amount of overhead. Avoid the |
|
// intermediate object creation for the common case of a |
|
// single callback argument |
|
|
|
|
|
if (arguments.length === 2) { |
|
var length = callbacks.length; |
|
|
|
for (var i = 0; i < length; ++i) { |
|
callbacks[i].call(this, arguments[1]); |
|
} |
|
} else { |
|
var args = Array.prototype.slice.call(arguments, 1); |
|
var _length = callbacks.length; |
|
|
|
for (var _i = 0; _i < _length; ++_i) { |
|
callbacks[_i].apply(this, args); |
|
} |
|
} |
|
}; |
|
/** |
|
* Destroys the stream and cleans up. |
|
*/ |
|
|
|
|
|
Stream.prototype.dispose = function dispose() { |
|
this.listeners = {}; |
|
}; |
|
/** |
|
* Forwards all `data` events on this stream to the destination stream. The |
|
* destination stream should provide a method `push` to receive the data |
|
* events as they arrive. |
|
* |
|
* @param {Stream} destination the stream that will receive all `data` events |
|
* @see http://nodejs.org/api/stream.html#stream_readable_pipe_destination_options |
|
*/ |
|
|
|
|
|
Stream.prototype.pipe = function pipe(destination) { |
|
this.on('data', function (data) { |
|
destination.push(data); |
|
}); |
|
}; |
|
|
|
return Stream; |
|
}(); |
|
/** |
|
* @file async-stream.js |
|
*/ |
|
|
|
/** |
|
* A wrapper around the Stream class to use setTiemout |
|
* and run stream "jobs" Asynchronously |
|
* |
|
* @class AsyncStream |
|
* @extends Stream |
|
*/ |
|
|
|
|
|
var AsyncStream$$1 = function (_Stream) { |
|
inherits(AsyncStream$$1, _Stream); |
|
|
|
function AsyncStream$$1() { |
|
classCallCheck(this, AsyncStream$$1); |
|
|
|
var _this = possibleConstructorReturn(this, _Stream.call(this, Stream)); |
|
|
|
_this.jobs = []; |
|
_this.delay = 1; |
|
_this.timeout_ = null; |
|
return _this; |
|
} |
|
/** |
|
* process an async job |
|
* |
|
* @private |
|
*/ |
|
|
|
|
|
AsyncStream$$1.prototype.processJob_ = function processJob_() { |
|
this.jobs.shift()(); |
|
|
|
if (this.jobs.length) { |
|
this.timeout_ = setTimeout(this.processJob_.bind(this), this.delay); |
|
} else { |
|
this.timeout_ = null; |
|
} |
|
}; |
|
/** |
|
* push a job into the stream |
|
* |
|
* @param {Function} job the job to push into the stream |
|
*/ |
|
|
|
|
|
AsyncStream$$1.prototype.push = function push(job) { |
|
this.jobs.push(job); |
|
|
|
if (!this.timeout_) { |
|
this.timeout_ = setTimeout(this.processJob_.bind(this), this.delay); |
|
} |
|
}; |
|
|
|
return AsyncStream$$1; |
|
}(Stream); |
|
/** |
|
* @file decrypter.js |
|
* |
|
* An asynchronous implementation of AES-128 CBC decryption with |
|
* PKCS#7 padding. |
|
*/ |
|
|
|
/** |
|
* Convert network-order (big-endian) bytes into their little-endian |
|
* representation. |
|
*/ |
|
|
|
|
|
var ntoh = function ntoh(word) { |
|
return word << 24 | (word & 0xff00) << 8 | (word & 0xff0000) >> 8 | word >>> 24; |
|
}; |
|
/** |
|
* Decrypt bytes using AES-128 with CBC and PKCS#7 padding. |
|
* |
|
* @param {Uint8Array} encrypted the encrypted bytes |
|
* @param {Uint32Array} key the bytes of the decryption key |
|
* @param {Uint32Array} initVector the initialization vector (IV) to |
|
* use for the first round of CBC. |
|
* @return {Uint8Array} the decrypted bytes |
|
* |
|
* @see http://en.wikipedia.org/wiki/Advanced_Encryption_Standard |
|
* @see http://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Cipher_Block_Chaining_.28CBC.29 |
|
* @see https://tools.ietf.org/html/rfc2315 |
|
*/ |
|
|
|
|
|
var decrypt$$1 = function decrypt$$1(encrypted, key, initVector) { |
|
// word-level access to the encrypted bytes |
|
var encrypted32 = new Int32Array(encrypted.buffer, encrypted.byteOffset, encrypted.byteLength >> 2); |
|
var decipher = new AES(Array.prototype.slice.call(key)); // byte and word-level access for the decrypted output |
|
|
|
var decrypted = new Uint8Array(encrypted.byteLength); |
|
var decrypted32 = new Int32Array(decrypted.buffer); // temporary variables for working with the IV, encrypted, and |
|
// decrypted data |
|
|
|
var init0 = void 0; |
|
var init1 = void 0; |
|
var init2 = void 0; |
|
var init3 = void 0; |
|
var encrypted0 = void 0; |
|
var encrypted1 = void 0; |
|
var encrypted2 = void 0; |
|
var encrypted3 = void 0; // iteration variable |
|
|
|
var wordIx = void 0; // pull out the words of the IV to ensure we don't modify the |
|
// passed-in reference and easier access |
|
|
|
init0 = initVector[0]; |
|
init1 = initVector[1]; |
|
init2 = initVector[2]; |
|
init3 = initVector[3]; // decrypt four word sequences, applying cipher-block chaining (CBC) |
|
// to each decrypted block |
|
|
|
for (wordIx = 0; wordIx < encrypted32.length; wordIx += 4) { |
|
// convert big-endian (network order) words into little-endian |
|
// (javascript order) |
|
encrypted0 = ntoh(encrypted32[wordIx]); |
|
encrypted1 = ntoh(encrypted32[wordIx + 1]); |
|
encrypted2 = ntoh(encrypted32[wordIx + 2]); |
|
encrypted3 = ntoh(encrypted32[wordIx + 3]); // decrypt the block |
|
|
|
decipher.decrypt(encrypted0, encrypted1, encrypted2, encrypted3, decrypted32, wordIx); // XOR with the IV, and restore network byte-order to obtain the |
|
// plaintext |
|
|
|
decrypted32[wordIx] = ntoh(decrypted32[wordIx] ^ init0); |
|
decrypted32[wordIx + 1] = ntoh(decrypted32[wordIx + 1] ^ init1); |
|
decrypted32[wordIx + 2] = ntoh(decrypted32[wordIx + 2] ^ init2); |
|
decrypted32[wordIx + 3] = ntoh(decrypted32[wordIx + 3] ^ init3); // setup the IV for the next round |
|
|
|
init0 = encrypted0; |
|
init1 = encrypted1; |
|
init2 = encrypted2; |
|
init3 = encrypted3; |
|
} |
|
|
|
return decrypted; |
|
}; |
|
/** |
|
* The `Decrypter` class that manages decryption of AES |
|
* data through `AsyncStream` objects and the `decrypt` |
|
* function |
|
* |
|
* @param {Uint8Array} encrypted the encrypted bytes |
|
* @param {Uint32Array} key the bytes of the decryption key |
|
* @param {Uint32Array} initVector the initialization vector (IV) to |
|
* @param {Function} done the function to run when done |
|
* @class Decrypter |
|
*/ |
|
|
|
|
|
var Decrypter$$1 = function () { |
|
function Decrypter$$1(encrypted, key, initVector, done) { |
|
classCallCheck(this, Decrypter$$1); |
|
var step = Decrypter$$1.STEP; |
|
var encrypted32 = new Int32Array(encrypted.buffer); |
|
var decrypted = new Uint8Array(encrypted.byteLength); |
|
var i = 0; |
|
this.asyncStream_ = new AsyncStream$$1(); // split up the encryption job and do the individual chunks asynchronously |
|
|
|
this.asyncStream_.push(this.decryptChunk_(encrypted32.subarray(i, i + step), key, initVector, decrypted)); |
|
|
|
for (i = step; i < encrypted32.length; i += step) { |
|
initVector = new Uint32Array([ntoh(encrypted32[i - 4]), ntoh(encrypted32[i - 3]), ntoh(encrypted32[i - 2]), ntoh(encrypted32[i - 1])]); |
|
this.asyncStream_.push(this.decryptChunk_(encrypted32.subarray(i, i + step), key, initVector, decrypted)); |
|
} // invoke the done() callback when everything is finished |
|
|
|
|
|
this.asyncStream_.push(function () { |
|
// remove pkcs#7 padding from the decrypted bytes |
|
done(null, unpad(decrypted)); |
|
}); |
|
} |
|
/** |
|
* a getter for step the maximum number of bytes to process at one time |
|
* |
|
* @return {Number} the value of step 32000 |
|
*/ |
|
|
|
/** |
|
* @private |
|
*/ |
|
|
|
|
|
Decrypter$$1.prototype.decryptChunk_ = function decryptChunk_(encrypted, key, initVector, decrypted) { |
|
return function () { |
|
var bytes = decrypt$$1(encrypted, key, initVector); |
|
decrypted.set(bytes, encrypted.byteOffset); |
|
}; |
|
}; |
|
|
|
createClass(Decrypter$$1, null, [{ |
|
key: 'STEP', |
|
get: function get$$1() { |
|
// 4 * 8000; |
|
return 32000; |
|
} |
|
}]); |
|
return Decrypter$$1; |
|
}(); |
|
/** |
|
* @file bin-utils.js |
|
*/ |
|
|
|
/** |
|
* Creates an object for sending to a web worker modifying properties that are TypedArrays |
|
* into a new object with seperated properties for the buffer, byteOffset, and byteLength. |
|
* |
|
* @param {Object} message |
|
* Object of properties and values to send to the web worker |
|
* @return {Object} |
|
* Modified message with TypedArray values expanded |
|
* @function createTransferableMessage |
|
*/ |
|
|
|
|
|
var createTransferableMessage = function createTransferableMessage(message) { |
|
var transferable = {}; |
|
Object.keys(message).forEach(function (key) { |
|
var value = message[key]; |
|
|
|
if (ArrayBuffer.isView(value)) { |
|
transferable[key] = { |
|
bytes: value.buffer, |
|
byteOffset: value.byteOffset, |
|
byteLength: value.byteLength |
|
}; |
|
} else { |
|
transferable[key] = value; |
|
} |
|
}); |
|
return transferable; |
|
}; |
|
/** |
|
* Our web worker interface so that things can talk to aes-decrypter |
|
* that will be running in a web worker. the scope is passed to this by |
|
* webworkify. |
|
* |
|
* @param {Object} self |
|
* the scope for the web worker |
|
*/ |
|
|
|
|
|
var DecrypterWorker = function DecrypterWorker(self) { |
|
self.onmessage = function (event) { |
|
var data = event.data; |
|
var encrypted = new Uint8Array(data.encrypted.bytes, data.encrypted.byteOffset, data.encrypted.byteLength); |
|
var key = new Uint32Array(data.key.bytes, data.key.byteOffset, data.key.byteLength / 4); |
|
var iv = new Uint32Array(data.iv.bytes, data.iv.byteOffset, data.iv.byteLength / 4); |
|
/* eslint-disable no-new, handle-callback-err */ |
|
|
|
new Decrypter$$1(encrypted, key, iv, function (err, bytes) { |
|
self.postMessage(createTransferableMessage({ |
|
source: data.source, |
|
decrypted: bytes |
|
}), [bytes.buffer]); |
|
}); |
|
/* eslint-enable */ |
|
}; |
|
}; |
|
|
|
var decrypterWorker = new DecrypterWorker(self); |
|
return decrypterWorker; |
|
}(); |
|
}); |
|
/** |
|
* Convert the properties of an HLS track into an audioTrackKind. |
|
* |
|
* @private |
|
*/ |
|
|
|
var audioTrackKind_ = function audioTrackKind_(properties) { |
|
var kind = properties.default ? 'main' : 'alternative'; |
|
|
|
if (properties.characteristics && properties.characteristics.indexOf('public.accessibility.describes-video') >= 0) { |
|
kind = 'main-desc'; |
|
} |
|
|
|
return kind; |
|
}; |
|
/** |
|
* Pause provided segment loader and playlist loader if active |
|
* |
|
* @param {SegmentLoader} segmentLoader |
|
* SegmentLoader to pause |
|
* @param {Object} mediaType |
|
* Active media type |
|
* @function stopLoaders |
|
*/ |
|
|
|
|
|
var stopLoaders = function stopLoaders(segmentLoader, mediaType) { |
|
segmentLoader.abort(); |
|
segmentLoader.pause(); |
|
|
|
if (mediaType && mediaType.activePlaylistLoader) { |
|
mediaType.activePlaylistLoader.pause(); |
|
mediaType.activePlaylistLoader = null; |
|
} |
|
}; |
|
/** |
|
* Start loading provided segment loader and playlist loader |
|
* |
|
* @param {PlaylistLoader} playlistLoader |
|
* PlaylistLoader to start loading |
|
* @param {Object} mediaType |
|
* Active media type |
|
* @function startLoaders |
|
*/ |
|
|
|
|
|
var startLoaders = function startLoaders(playlistLoader, mediaType) { |
|
// Segment loader will be started after `loadedmetadata` or `loadedplaylist` from the |
|
// playlist loader |
|
mediaType.activePlaylistLoader = playlistLoader; |
|
playlistLoader.load(); |
|
}; |
|
/** |
|
* Returns a function to be called when the media group changes. It performs a |
|
* non-destructive (preserve the buffer) resync of the SegmentLoader. This is because a |
|
* change of group is merely a rendition switch of the same content at another encoding, |
|
* rather than a change of content, such as switching audio from English to Spanish. |
|
* |
|
* @param {String} type |
|
* MediaGroup type |
|
* @param {Object} settings |
|
* Object containing required information for media groups |
|
* @return {Function} |
|
* Handler for a non-destructive resync of SegmentLoader when the active media |
|
* group changes. |
|
* @function onGroupChanged |
|
*/ |
|
|
|
|
|
var onGroupChanged = function onGroupChanged(type, settings) { |
|
return function () { |
|
var _settings$segmentLoad = settings.segmentLoaders, |
|
segmentLoader = _settings$segmentLoad[type], |
|
mainSegmentLoader = _settings$segmentLoad.main, |
|
mediaType = settings.mediaTypes[type]; |
|
var activeTrack = mediaType.activeTrack(); |
|
var activeGroup = mediaType.activeGroup(activeTrack); |
|
var previousActiveLoader = mediaType.activePlaylistLoader; |
|
stopLoaders(segmentLoader, mediaType); |
|
|
|
if (!activeGroup) { |
|
// there is no group active |
|
return; |
|
} |
|
|
|
if (!activeGroup.playlistLoader) { |
|
if (previousActiveLoader) { |
|
// The previous group had a playlist loader but the new active group does not |
|
// this means we are switching from demuxed to muxed audio. In this case we want to |
|
// do a destructive reset of the main segment loader and not restart the audio |
|
// loaders. |
|
mainSegmentLoader.resetEverything(); |
|
} |
|
|
|
return; |
|
} // Non-destructive resync |
|
|
|
|
|
segmentLoader.resyncLoader(); |
|
startLoaders(activeGroup.playlistLoader, mediaType); |
|
}; |
|
}; |
|
/** |
|
* Returns a function to be called when the media track changes. It performs a |
|
* destructive reset of the SegmentLoader to ensure we start loading as close to |
|
* currentTime as possible. |
|
* |
|
* @param {String} type |
|
* MediaGroup type |
|
* @param {Object} settings |
|
* Object containing required information for media groups |
|
* @return {Function} |
|
* Handler for a destructive reset of SegmentLoader when the active media |
|
* track changes. |
|
* @function onTrackChanged |
|
*/ |
|
|
|
|
|
var onTrackChanged = function onTrackChanged(type, settings) { |
|
return function () { |
|
var _settings$segmentLoad2 = settings.segmentLoaders, |
|
segmentLoader = _settings$segmentLoad2[type], |
|
mainSegmentLoader = _settings$segmentLoad2.main, |
|
mediaType = settings.mediaTypes[type]; |
|
var activeTrack = mediaType.activeTrack(); |
|
var activeGroup = mediaType.activeGroup(activeTrack); |
|
var previousActiveLoader = mediaType.activePlaylistLoader; |
|
stopLoaders(segmentLoader, mediaType); |
|
|
|
if (!activeGroup) { |
|
// there is no group active so we do not want to restart loaders |
|
return; |
|
} |
|
|
|
if (!activeGroup.playlistLoader) { |
|
// when switching from demuxed audio/video to muxed audio/video (noted by no playlist |
|
// loader for the audio group), we want to do a destructive reset of the main segment |
|
// loader and not restart the audio loaders |
|
mainSegmentLoader.resetEverything(); |
|
return; |
|
} |
|
|
|
if (previousActiveLoader === activeGroup.playlistLoader) { |
|
// Nothing has actually changed. This can happen because track change events can fire |
|
// multiple times for a "single" change. One for enabling the new active track, and |
|
// one for disabling the track that was active |
|
startLoaders(activeGroup.playlistLoader, mediaType); |
|
return; |
|
} |
|
|
|
if (segmentLoader.track) { |
|
// For WebVTT, set the new text track in the segmentloader |
|
segmentLoader.track(activeTrack); |
|
} // destructive reset |
|
|
|
|
|
segmentLoader.resetEverything(); |
|
startLoaders(activeGroup.playlistLoader, mediaType); |
|
}; |
|
}; |
|
|
|
var onError = { |
|
/** |
|
* Returns a function to be called when a SegmentLoader or PlaylistLoader encounters |
|
* an error. |
|
* |
|
* @param {String} type |
|
* MediaGroup type |
|
* @param {Object} settings |
|
* Object containing required information for media groups |
|
* @return {Function} |
|
* Error handler. Logs warning (or error if the playlist is blacklisted) to |
|
* console and switches back to default audio track. |
|
* @function onError.AUDIO |
|
*/ |
|
AUDIO: function AUDIO(type, settings) { |
|
return function () { |
|
var segmentLoader = settings.segmentLoaders[type], |
|
mediaType = settings.mediaTypes[type], |
|
blacklistCurrentPlaylist = settings.blacklistCurrentPlaylist; |
|
stopLoaders(segmentLoader, mediaType); // switch back to default audio track |
|
|
|
var activeTrack = mediaType.activeTrack(); |
|
var activeGroup = mediaType.activeGroup(); |
|
var id = (activeGroup.filter(function (group) { |
|
return group.default; |
|
})[0] || activeGroup[0]).id; |
|
var defaultTrack = mediaType.tracks[id]; |
|
|
|
if (activeTrack === defaultTrack) { |
|
// Default track encountered an error. All we can do now is blacklist the current |
|
// rendition and hope another will switch audio groups |
|
blacklistCurrentPlaylist({ |
|
message: 'Problem encountered loading the default audio track.' |
|
}); |
|
return; |
|
} |
|
|
|
videojs$1.log.warn('Problem encountered loading the alternate audio track.' + 'Switching back to default.'); |
|
|
|
for (var trackId in mediaType.tracks) { |
|
mediaType.tracks[trackId].enabled = mediaType.tracks[trackId] === defaultTrack; |
|
} |
|
|
|
mediaType.onTrackChanged(); |
|
}; |
|
}, |
|
|
|
/** |
|
* Returns a function to be called when a SegmentLoader or PlaylistLoader encounters |
|
* an error. |
|
* |
|
* @param {String} type |
|
* MediaGroup type |
|
* @param {Object} settings |
|
* Object containing required information for media groups |
|
* @return {Function} |
|
* Error handler. Logs warning to console and disables the active subtitle track |
|
* @function onError.SUBTITLES |
|
*/ |
|
SUBTITLES: function SUBTITLES(type, settings) { |
|
return function () { |
|
var segmentLoader = settings.segmentLoaders[type], |
|
mediaType = settings.mediaTypes[type]; |
|
videojs$1.log.warn('Problem encountered loading the subtitle track.' + 'Disabling subtitle track.'); |
|
stopLoaders(segmentLoader, mediaType); |
|
var track = mediaType.activeTrack(); |
|
|
|
if (track) { |
|
track.mode = 'disabled'; |
|
} |
|
|
|
mediaType.onTrackChanged(); |
|
}; |
|
} |
|
}; |
|
var setupListeners = { |
|
/** |
|
* Setup event listeners for audio playlist loader |
|
* |
|
* @param {String} type |
|
* MediaGroup type |
|
* @param {PlaylistLoader|null} playlistLoader |
|
* PlaylistLoader to register listeners on |
|
* @param {Object} settings |
|
* Object containing required information for media groups |
|
* @function setupListeners.AUDIO |
|
*/ |
|
AUDIO: function AUDIO(type, playlistLoader, settings) { |
|
if (!playlistLoader) { |
|
// no playlist loader means audio will be muxed with the video |
|
return; |
|
} |
|
|
|
var tech = settings.tech, |
|
requestOptions = settings.requestOptions, |
|
segmentLoader = settings.segmentLoaders[type]; |
|
playlistLoader.on('loadedmetadata', function () { |
|
var media = playlistLoader.media(); |
|
segmentLoader.playlist(media, requestOptions); // if the video is already playing, or if this isn't a live video and preload |
|
// permits, start downloading segments |
|
|
|
if (!tech.paused() || media.endList && tech.preload() !== 'none') { |
|
segmentLoader.load(); |
|
} |
|
}); |
|
playlistLoader.on('loadedplaylist', function () { |
|
segmentLoader.playlist(playlistLoader.media(), requestOptions); // If the player isn't paused, ensure that the segment loader is running |
|
|
|
if (!tech.paused()) { |
|
segmentLoader.load(); |
|
} |
|
}); |
|
playlistLoader.on('error', onError[type](type, settings)); |
|
}, |
|
|
|
/** |
|
* Setup event listeners for subtitle playlist loader |
|
* |
|
* @param {String} type |
|
* MediaGroup type |
|
* @param {PlaylistLoader|null} playlistLoader |
|
* PlaylistLoader to register listeners on |
|
* @param {Object} settings |
|
* Object containing required information for media groups |
|
* @function setupListeners.SUBTITLES |
|
*/ |
|
SUBTITLES: function SUBTITLES(type, playlistLoader, settings) { |
|
var tech = settings.tech, |
|
requestOptions = settings.requestOptions, |
|
segmentLoader = settings.segmentLoaders[type], |
|
mediaType = settings.mediaTypes[type]; |
|
playlistLoader.on('loadedmetadata', function () { |
|
var media = playlistLoader.media(); |
|
segmentLoader.playlist(media, requestOptions); |
|
segmentLoader.track(mediaType.activeTrack()); // if the video is already playing, or if this isn't a live video and preload |
|
// permits, start downloading segments |
|
|
|
if (!tech.paused() || media.endList && tech.preload() !== 'none') { |
|
segmentLoader.load(); |
|
} |
|
}); |
|
playlistLoader.on('loadedplaylist', function () { |
|
segmentLoader.playlist(playlistLoader.media(), requestOptions); // If the player isn't paused, ensure that the segment loader is running |
|
|
|
if (!tech.paused()) { |
|
segmentLoader.load(); |
|
} |
|
}); |
|
playlistLoader.on('error', onError[type](type, settings)); |
|
} |
|
}; |
|
|
|
var byGroupId = function byGroupId(type, groupId) { |
|
return function (playlist) { |
|
return playlist.attributes[type] === groupId; |
|
}; |
|
}; |
|
|
|
var byResolvedUri = function byResolvedUri(resolvedUri) { |
|
return function (playlist) { |
|
return playlist.resolvedUri === resolvedUri; |
|
}; |
|
}; |
|
|
|
var initialize = { |
|
/** |
|
* Setup PlaylistLoaders and AudioTracks for the audio groups |
|
* |
|
* @param {String} type |
|
* MediaGroup type |
|
* @param {Object} settings |
|
* Object containing required information for media groups |
|
* @function initialize.AUDIO |
|
*/ |
|
'AUDIO': function AUDIO(type, settings) { |
|
var hls = settings.hls, |
|
sourceType = settings.sourceType, |
|
segmentLoader = settings.segmentLoaders[type], |
|
requestOptions = settings.requestOptions, |
|
_settings$master = settings.master, |
|
mediaGroups = _settings$master.mediaGroups, |
|
playlists = _settings$master.playlists, |
|
_settings$mediaTypes$ = settings.mediaTypes[type], |
|
groups = _settings$mediaTypes$.groups, |
|
tracks = _settings$mediaTypes$.tracks, |
|
masterPlaylistLoader = settings.masterPlaylistLoader; // force a default if we have none |
|
|
|
if (!mediaGroups[type] || Object.keys(mediaGroups[type]).length === 0) { |
|
mediaGroups[type] = { |
|
main: { |
|
default: { |
|
default: true |
|
} |
|
} |
|
}; |
|
} |
|
|
|
for (var groupId in mediaGroups[type]) { |
|
if (!groups[groupId]) { |
|
groups[groupId] = []; |
|
} // List of playlists that have an AUDIO attribute value matching the current |
|
// group ID |
|
|
|
|
|
var groupPlaylists = playlists.filter(byGroupId(type, groupId)); |
|
|
|
for (var variantLabel in mediaGroups[type][groupId]) { |
|
var properties = mediaGroups[type][groupId][variantLabel]; // List of playlists for the current group ID that have a matching uri with |
|
// this alternate audio variant |
|
|
|
var matchingPlaylists = groupPlaylists.filter(byResolvedUri(properties.resolvedUri)); |
|
|
|
if (matchingPlaylists.length) { |
|
// If there is a playlist that has the same uri as this audio variant, assume |
|
// that the playlist is audio only. We delete the resolvedUri property here |
|
// to prevent a playlist loader from being created so that we don't have |
|
// both the main and audio segment loaders loading the same audio segments |
|
// from the same playlist. |
|
delete properties.resolvedUri; |
|
} |
|
|
|
var playlistLoader = void 0; |
|
|
|
if (properties.resolvedUri) { |
|
playlistLoader = new PlaylistLoader(properties.resolvedUri, hls, requestOptions); |
|
} else if (properties.playlists && sourceType === 'dash') { |
|
playlistLoader = new DashPlaylistLoader(properties.playlists[0], hls, requestOptions, masterPlaylistLoader); |
|
} else { |
|
// no resolvedUri means the audio is muxed with the video when using this |
|
// audio track |
|
playlistLoader = null; |
|
} |
|
|
|
properties = videojs$1.mergeOptions({ |
|
id: variantLabel, |
|
playlistLoader: playlistLoader |
|
}, properties); |
|
setupListeners[type](type, properties.playlistLoader, settings); |
|
groups[groupId].push(properties); |
|
|
|
if (typeof tracks[variantLabel] === 'undefined') { |
|
var track = new videojs$1.AudioTrack({ |
|
id: variantLabel, |
|
kind: audioTrackKind_(properties), |
|
enabled: false, |
|
language: properties.language, |
|
default: properties.default, |
|
label: variantLabel |
|
}); |
|
tracks[variantLabel] = track; |
|
} |
|
} |
|
} // setup single error event handler for the segment loader |
|
|
|
|
|
segmentLoader.on('error', onError[type](type, settings)); |
|
}, |
|
|
|
/** |
|
* Setup PlaylistLoaders and TextTracks for the subtitle groups |
|
* |
|
* @param {String} type |
|
* MediaGroup type |
|
* @param {Object} settings |
|
* Object containing required information for media groups |
|
* @function initialize.SUBTITLES |
|
*/ |
|
'SUBTITLES': function SUBTITLES(type, settings) { |
|
var tech = settings.tech, |
|
hls = settings.hls, |
|
sourceType = settings.sourceType, |
|
segmentLoader = settings.segmentLoaders[type], |
|
requestOptions = settings.requestOptions, |
|
mediaGroups = settings.master.mediaGroups, |
|
_settings$mediaTypes$2 = settings.mediaTypes[type], |
|
groups = _settings$mediaTypes$2.groups, |
|
tracks = _settings$mediaTypes$2.tracks, |
|
masterPlaylistLoader = settings.masterPlaylistLoader; |
|
|
|
for (var groupId in mediaGroups[type]) { |
|
if (!groups[groupId]) { |
|
groups[groupId] = []; |
|
} |
|
|
|
for (var variantLabel in mediaGroups[type][groupId]) { |
|
if (mediaGroups[type][groupId][variantLabel].forced) { |
|
// Subtitle playlists with the forced attribute are not selectable in Safari. |
|
// According to Apple's HLS Authoring Specification: |
|
// If content has forced subtitles and regular subtitles in a given language, |
|
// the regular subtitles track in that language MUST contain both the forced |
|
// subtitles and the regular subtitles for that language. |
|
// Because of this requirement and that Safari does not add forced subtitles, |
|
// forced subtitles are skipped here to maintain consistent experience across |
|
// all platforms |
|
continue; |
|
} |
|
|
|
var properties = mediaGroups[type][groupId][variantLabel]; |
|
var playlistLoader = void 0; |
|
|
|
if (sourceType === 'hls') { |
|
playlistLoader = new PlaylistLoader(properties.resolvedUri, hls, requestOptions); |
|
} else if (sourceType === 'dash') { |
|
playlistLoader = new DashPlaylistLoader(properties.playlists[0], hls, requestOptions, masterPlaylistLoader); |
|
} |
|
|
|
properties = videojs$1.mergeOptions({ |
|
id: variantLabel, |
|
playlistLoader: playlistLoader |
|
}, properties); |
|
setupListeners[type](type, properties.playlistLoader, settings); |
|
groups[groupId].push(properties); |
|
|
|
if (typeof tracks[variantLabel] === 'undefined') { |
|
var track = tech.addRemoteTextTrack({ |
|
id: variantLabel, |
|
kind: 'subtitles', |
|
default: properties.default && properties.autoselect, |
|
language: properties.language, |
|
label: variantLabel |
|
}, false).track; |
|
tracks[variantLabel] = track; |
|
} |
|
} |
|
} // setup single error event handler for the segment loader |
|
|
|
|
|
segmentLoader.on('error', onError[type](type, settings)); |
|
}, |
|
|
|
/** |
|
* Setup TextTracks for the closed-caption groups |
|
* |
|
* @param {String} type |
|
* MediaGroup type |
|
* @param {Object} settings |
|
* Object containing required information for media groups |
|
* @function initialize['CLOSED-CAPTIONS'] |
|
*/ |
|
'CLOSED-CAPTIONS': function CLOSEDCAPTIONS(type, settings) { |
|
var tech = settings.tech, |
|
mediaGroups = settings.master.mediaGroups, |
|
_settings$mediaTypes$3 = settings.mediaTypes[type], |
|
groups = _settings$mediaTypes$3.groups, |
|
tracks = _settings$mediaTypes$3.tracks; |
|
|
|
for (var groupId in mediaGroups[type]) { |
|
if (!groups[groupId]) { |
|
groups[groupId] = []; |
|
} |
|
|
|
for (var variantLabel in mediaGroups[type][groupId]) { |
|
var properties = mediaGroups[type][groupId][variantLabel]; // We only support CEA608 captions for now, so ignore anything that |
|
// doesn't use a CCx INSTREAM-ID |
|
|
|
if (!properties.instreamId.match(/CC\d/)) { |
|
continue; |
|
} // No PlaylistLoader is required for Closed-Captions because the captions are |
|
// embedded within the video stream |
|
|
|
|
|
groups[groupId].push(videojs$1.mergeOptions({ |
|
id: variantLabel |
|
}, properties)); |
|
|
|
if (typeof tracks[variantLabel] === 'undefined') { |
|
var track = tech.addRemoteTextTrack({ |
|
id: properties.instreamId, |
|
kind: 'captions', |
|
default: properties.default && properties.autoselect, |
|
language: properties.language, |
|
label: variantLabel |
|
}, false).track; |
|
tracks[variantLabel] = track; |
|
} |
|
} |
|
} |
|
} |
|
}; |
|
/** |
|
* Returns a function used to get the active group of the provided type |
|
* |
|
* @param {String} type |
|
* MediaGroup type |
|
* @param {Object} settings |
|
* Object containing required information for media groups |
|
* @return {Function} |
|
* Function that returns the active media group for the provided type. Takes an |
|
* optional parameter {TextTrack} track. If no track is provided, a list of all |
|
* variants in the group, otherwise the variant corresponding to the provided |
|
* track is returned. |
|
* @function activeGroup |
|
*/ |
|
|
|
var activeGroup = function activeGroup(type, settings) { |
|
return function (track) { |
|
var masterPlaylistLoader = settings.masterPlaylistLoader, |
|
groups = settings.mediaTypes[type].groups; |
|
var media = masterPlaylistLoader.media(); |
|
|
|
if (!media) { |
|
return null; |
|
} |
|
|
|
var variants = null; |
|
|
|
if (media.attributes[type]) { |
|
variants = groups[media.attributes[type]]; |
|
} |
|
|
|
variants = variants || groups.main; |
|
|
|
if (typeof track === 'undefined') { |
|
return variants; |
|
} |
|
|
|
if (track === null) { |
|
// An active track was specified so a corresponding group is expected. track === null |
|
// means no track is currently active so there is no corresponding group |
|
return null; |
|
} |
|
|
|
return variants.filter(function (props) { |
|
return props.id === track.id; |
|
})[0] || null; |
|
}; |
|
}; |
|
|
|
var activeTrack = { |
|
/** |
|
* Returns a function used to get the active track of type provided |
|
* |
|
* @param {String} type |
|
* MediaGroup type |
|
* @param {Object} settings |
|
* Object containing required information for media groups |
|
* @return {Function} |
|
* Function that returns the active media track for the provided type. Returns |
|
* null if no track is active |
|
* @function activeTrack.AUDIO |
|
*/ |
|
AUDIO: function AUDIO(type, settings) { |
|
return function () { |
|
var tracks = settings.mediaTypes[type].tracks; |
|
|
|
for (var id in tracks) { |
|
if (tracks[id].enabled) { |
|
return tracks[id]; |
|
} |
|
} |
|
|
|
return null; |
|
}; |
|
}, |
|
|
|
/** |
|
* Returns a function used to get the active track of type provided |
|
* |
|
* @param {String} type |
|
* MediaGroup type |
|
* @param {Object} settings |
|
* Object containing required information for media groups |
|
* @return {Function} |
|
* Function that returns the active media track for the provided type. Returns |
|
* null if no track is active |
|
* @function activeTrack.SUBTITLES |
|
*/ |
|
SUBTITLES: function SUBTITLES(type, settings) { |
|
return function () { |
|
var tracks = settings.mediaTypes[type].tracks; |
|
|
|
for (var id in tracks) { |
|
if (tracks[id].mode === 'showing') { |
|
return tracks[id]; |
|
} |
|
} |
|
|
|
return null; |
|
}; |
|
} |
|
}; |
|
/** |
|
* Setup PlaylistLoaders and Tracks for media groups (Audio, Subtitles, |
|
* Closed-Captions) specified in the master manifest. |
|
* |
|
* @param {Object} settings |
|
* Object containing required information for setting up the media groups |
|
* @param {SegmentLoader} settings.segmentLoaders.AUDIO |
|
* Audio segment loader |
|
* @param {SegmentLoader} settings.segmentLoaders.SUBTITLES |
|
* Subtitle segment loader |
|
* @param {SegmentLoader} settings.segmentLoaders.main |
|
* Main segment loader |
|
* @param {Tech} settings.tech |
|
* The tech of the player |
|
* @param {Object} settings.requestOptions |
|
* XHR request options used by the segment loaders |
|
* @param {PlaylistLoader} settings.masterPlaylistLoader |
|
* PlaylistLoader for the master source |
|
* @param {HlsHandler} settings.hls |
|
* HLS SourceHandler |
|
* @param {Object} settings.master |
|
* The parsed master manifest |
|
* @param {Object} settings.mediaTypes |
|
* Object to store the loaders, tracks, and utility methods for each media type |
|
* @param {Function} settings.blacklistCurrentPlaylist |
|
* Blacklists the current rendition and forces a rendition switch. |
|
* @function setupMediaGroups |
|
*/ |
|
|
|
var setupMediaGroups = function setupMediaGroups(settings) { |
|
['AUDIO', 'SUBTITLES', 'CLOSED-CAPTIONS'].forEach(function (type) { |
|
initialize[type](type, settings); |
|
}); |
|
var mediaTypes = settings.mediaTypes, |
|
masterPlaylistLoader = settings.masterPlaylistLoader, |
|
tech = settings.tech, |
|
hls = settings.hls; // setup active group and track getters and change event handlers |
|
|
|
['AUDIO', 'SUBTITLES'].forEach(function (type) { |
|
mediaTypes[type].activeGroup = activeGroup(type, settings); |
|
mediaTypes[type].activeTrack = activeTrack[type](type, settings); |
|
mediaTypes[type].onGroupChanged = onGroupChanged(type, settings); |
|
mediaTypes[type].onTrackChanged = onTrackChanged(type, settings); |
|
}); // DO NOT enable the default subtitle or caption track. |
|
// DO enable the default audio track |
|
|
|
var audioGroup = mediaTypes.AUDIO.activeGroup(); |
|
var groupId = (audioGroup.filter(function (group) { |
|
return group.default; |
|
})[0] || audioGroup[0]).id; |
|
mediaTypes.AUDIO.tracks[groupId].enabled = true; |
|
mediaTypes.AUDIO.onTrackChanged(); |
|
masterPlaylistLoader.on('mediachange', function () { |
|
['AUDIO', 'SUBTITLES'].forEach(function (type) { |
|
return mediaTypes[type].onGroupChanged(); |
|
}); |
|
}); // custom audio track change event handler for usage event |
|
|
|
var onAudioTrackChanged = function onAudioTrackChanged() { |
|
mediaTypes.AUDIO.onTrackChanged(); |
|
tech.trigger({ |
|
type: 'usage', |
|
name: 'hls-audio-change' |
|
}); |
|
}; |
|
|
|
tech.audioTracks().addEventListener('change', onAudioTrackChanged); |
|
tech.remoteTextTracks().addEventListener('change', mediaTypes.SUBTITLES.onTrackChanged); |
|
hls.on('dispose', function () { |
|
tech.audioTracks().removeEventListener('change', onAudioTrackChanged); |
|
tech.remoteTextTracks().removeEventListener('change', mediaTypes.SUBTITLES.onTrackChanged); |
|
}); // clear existing audio tracks and add the ones we just created |
|
|
|
tech.clearTracks('audio'); |
|
|
|
for (var id in mediaTypes.AUDIO.tracks) { |
|
tech.audioTracks().addTrack(mediaTypes.AUDIO.tracks[id]); |
|
} |
|
}; |
|
/** |
|
* Creates skeleton object used to store the loaders, tracks, and utility methods for each |
|
* media type |
|
* |
|
* @return {Object} |
|
* Object to store the loaders, tracks, and utility methods for each media type |
|
* @function createMediaTypes |
|
*/ |
|
|
|
|
|
var createMediaTypes = function createMediaTypes() { |
|
var mediaTypes = {}; |
|
['AUDIO', 'SUBTITLES', 'CLOSED-CAPTIONS'].forEach(function (type) { |
|
mediaTypes[type] = { |
|
groups: {}, |
|
tracks: {}, |
|
activePlaylistLoader: null, |
|
activeGroup: noop$1, |
|
activeTrack: noop$1, |
|
onGroupChanged: noop$1, |
|
onTrackChanged: noop$1 |
|
}; |
|
}); |
|
return mediaTypes; |
|
}; |
|
/** |
|
* @file master-playlist-controller.js |
|
*/ |
|
|
|
|
|
var ABORT_EARLY_BLACKLIST_SECONDS = 60 * 2; |
|
var Hls = void 0; // SegmentLoader stats that need to have each loader's |
|
// values summed to calculate the final value |
|
|
|
var loaderStats = ['mediaRequests', 'mediaRequestsAborted', 'mediaRequestsTimedout', 'mediaRequestsErrored', 'mediaTransferDuration', 'mediaBytesTransferred']; |
|
|
|
var sumLoaderStat = function sumLoaderStat(stat) { |
|
return this.audioSegmentLoader_[stat] + this.mainSegmentLoader_[stat]; |
|
}; |
|
/** |
|
* the master playlist controller controller all interactons |
|
* between playlists and segmentloaders. At this time this mainly |
|
* involves a master playlist and a series of audio playlists |
|
* if they are available |
|
* |
|
* @class MasterPlaylistController |
|
* @extends videojs.EventTarget |
|
*/ |
|
|
|
|
|
var MasterPlaylistController = function (_videojs$EventTarget) { |
|
inherits$1(MasterPlaylistController, _videojs$EventTarget); |
|
|
|
function MasterPlaylistController(options) { |
|
classCallCheck$1(this, MasterPlaylistController); |
|
|
|
var _this = possibleConstructorReturn$1(this, (MasterPlaylistController.__proto__ || Object.getPrototypeOf(MasterPlaylistController)).call(this)); |
|
|
|
var url = options.url, |
|
handleManifestRedirects = options.handleManifestRedirects, |
|
withCredentials = options.withCredentials, |
|
tech = options.tech, |
|
bandwidth = options.bandwidth, |
|
externHls = options.externHls, |
|
useCueTags = options.useCueTags, |
|
blacklistDuration = options.blacklistDuration, |
|
enableLowInitialPlaylist = options.enableLowInitialPlaylist, |
|
sourceType = options.sourceType, |
|
seekTo = options.seekTo; |
|
|
|
if (!url) { |
|
throw new Error('A non-empty playlist URL is required'); |
|
} |
|
|
|
Hls = externHls; |
|
_this.withCredentials = withCredentials; |
|
_this.tech_ = tech; |
|
_this.hls_ = tech.hls; |
|
_this.seekTo_ = seekTo; |
|
_this.sourceType_ = sourceType; |
|
_this.useCueTags_ = useCueTags; |
|
_this.blacklistDuration = blacklistDuration; |
|
_this.enableLowInitialPlaylist = enableLowInitialPlaylist; |
|
|
|
if (_this.useCueTags_) { |
|
_this.cueTagsTrack_ = _this.tech_.addTextTrack('metadata', 'ad-cues'); |
|
_this.cueTagsTrack_.inBandMetadataTrackDispatchType = ''; |
|
} |
|
|
|
_this.requestOptions_ = { |
|
withCredentials: withCredentials, |
|
handleManifestRedirects: handleManifestRedirects, |
|
timeout: null |
|
}; |
|
_this.mediaTypes_ = createMediaTypes(); |
|
_this.mediaSource = new videojs$1.MediaSource(); // load the media source into the player |
|
|
|
_this.mediaSource.addEventListener('sourceopen', _this.handleSourceOpen_.bind(_this)); |
|
|
|
_this.seekable_ = videojs$1.createTimeRanges(); |
|
|
|
_this.hasPlayed_ = function () { |
|
return false; |
|
}; |
|
|
|
_this.syncController_ = new SyncController(options); |
|
_this.segmentMetadataTrack_ = tech.addRemoteTextTrack({ |
|
kind: 'metadata', |
|
label: 'segment-metadata' |
|
}, false).track; |
|
_this.decrypter_ = new Decrypter$1(); |
|
_this.inbandTextTracks_ = {}; |
|
var segmentLoaderSettings = { |
|
hls: _this.hls_, |
|
mediaSource: _this.mediaSource, |
|
currentTime: _this.tech_.currentTime.bind(_this.tech_), |
|
seekable: function seekable$$1() { |
|
return _this.seekable(); |
|
}, |
|
seeking: function seeking() { |
|
return _this.tech_.seeking(); |
|
}, |
|
duration: function duration$$1() { |
|
return _this.mediaSource.duration; |
|
}, |
|
hasPlayed: function hasPlayed() { |
|
return _this.hasPlayed_(); |
|
}, |
|
goalBufferLength: function goalBufferLength() { |
|
return _this.goalBufferLength(); |
|
}, |
|
bandwidth: bandwidth, |
|
syncController: _this.syncController_, |
|
decrypter: _this.decrypter_, |
|
sourceType: _this.sourceType_, |
|
inbandTextTracks: _this.inbandTextTracks_ |
|
}; |
|
_this.masterPlaylistLoader_ = _this.sourceType_ === 'dash' ? new DashPlaylistLoader(url, _this.hls_, _this.requestOptions_) : new PlaylistLoader(url, _this.hls_, _this.requestOptions_); |
|
|
|
_this.setupMasterPlaylistLoaderListeners_(); // setup segment loaders |
|
// combined audio/video or just video when alternate audio track is selected |
|
|
|
|
|
_this.mainSegmentLoader_ = new SegmentLoader(videojs$1.mergeOptions(segmentLoaderSettings, { |
|
segmentMetadataTrack: _this.segmentMetadataTrack_, |
|
loaderType: 'main' |
|
}), options); // alternate audio track |
|
|
|
_this.audioSegmentLoader_ = new SegmentLoader(videojs$1.mergeOptions(segmentLoaderSettings, { |
|
loaderType: 'audio' |
|
}), options); |
|
_this.subtitleSegmentLoader_ = new VTTSegmentLoader(videojs$1.mergeOptions(segmentLoaderSettings, { |
|
loaderType: 'vtt' |
|
}), options); |
|
|
|
_this.setupSegmentLoaderListeners_(); // Create SegmentLoader stat-getters |
|
|
|
|
|
loaderStats.forEach(function (stat) { |
|
_this[stat + '_'] = sumLoaderStat.bind(_this, stat); |
|
}); |
|
_this.logger_ = logger('MPC'); |
|
|
|
_this.masterPlaylistLoader_.load(); |
|
|
|
return _this; |
|
} |
|
/** |
|
* Register event handlers on the master playlist loader. A helper |
|
* function for construction time. |
|
* |
|
* @private |
|
*/ |
|
|
|
|
|
createClass$1(MasterPlaylistController, [{ |
|
key: 'setupMasterPlaylistLoaderListeners_', |
|
value: function setupMasterPlaylistLoaderListeners_() { |
|
var _this2 = this; |
|
|
|
this.masterPlaylistLoader_.on('loadedmetadata', function () { |
|
var media = _this2.masterPlaylistLoader_.media(); |
|
|
|
var requestTimeout = media.targetDuration * 1.5 * 1000; // If we don't have any more available playlists, we don't want to |
|
// timeout the request. |
|
|
|
if (isLowestEnabledRendition(_this2.masterPlaylistLoader_.master, _this2.masterPlaylistLoader_.media())) { |
|
_this2.requestOptions_.timeout = 0; |
|
} else { |
|
_this2.requestOptions_.timeout = requestTimeout; |
|
} // if this isn't a live video and preload permits, start |
|
// downloading segments |
|
|
|
|
|
if (media.endList && _this2.tech_.preload() !== 'none') { |
|
_this2.mainSegmentLoader_.playlist(media, _this2.requestOptions_); |
|
|
|
_this2.mainSegmentLoader_.load(); |
|
} |
|
|
|
setupMediaGroups({ |
|
sourceType: _this2.sourceType_, |
|
segmentLoaders: { |
|
AUDIO: _this2.audioSegmentLoader_, |
|
SUBTITLES: _this2.subtitleSegmentLoader_, |
|
main: _this2.mainSegmentLoader_ |
|
}, |
|
tech: _this2.tech_, |
|
requestOptions: _this2.requestOptions_, |
|
masterPlaylistLoader: _this2.masterPlaylistLoader_, |
|
hls: _this2.hls_, |
|
master: _this2.master(), |
|
mediaTypes: _this2.mediaTypes_, |
|
blacklistCurrentPlaylist: _this2.blacklistCurrentPlaylist.bind(_this2) |
|
}); |
|
|
|
_this2.triggerPresenceUsage_(_this2.master(), media); |
|
|
|
try { |
|
_this2.setupSourceBuffers_(); |
|
} catch (e) { |
|
videojs$1.log.warn('Failed to create SourceBuffers', e); |
|
return _this2.mediaSource.endOfStream('decode'); |
|
} |
|
|
|
_this2.setupFirstPlay(); |
|
|
|
if (!_this2.mediaTypes_.AUDIO.activePlaylistLoader || _this2.mediaTypes_.AUDIO.activePlaylistLoader.media()) { |
|
_this2.trigger('selectedinitialmedia'); |
|
} else { |
|
// We must wait for the active audio playlist loader to |
|
// finish setting up before triggering this event so the |
|
// representations API and EME setup is correct |
|
_this2.mediaTypes_.AUDIO.activePlaylistLoader.one('loadedmetadata', function () { |
|
_this2.trigger('selectedinitialmedia'); |
|
}); |
|
} |
|
}); |
|
this.masterPlaylistLoader_.on('loadedplaylist', function () { |
|
var updatedPlaylist = _this2.masterPlaylistLoader_.media(); |
|
|
|
if (!updatedPlaylist) { |
|
// blacklist any variants that are not supported by the browser before selecting |
|
// an initial media as the playlist selectors do not consider browser support |
|
_this2.excludeUnsupportedVariants_(); |
|
|
|
var selectedMedia = void 0; |
|
|
|
if (_this2.enableLowInitialPlaylist) { |
|
selectedMedia = _this2.selectInitialPlaylist(); |
|
} |
|
|
|
if (!selectedMedia) { |
|
selectedMedia = _this2.selectPlaylist(); |
|
} |
|
|
|
_this2.initialMedia_ = selectedMedia; |
|
|
|
_this2.masterPlaylistLoader_.media(_this2.initialMedia_); |
|
|
|
return; |
|
} |
|
|
|
if (_this2.useCueTags_) { |
|
_this2.updateAdCues_(updatedPlaylist); |
|
} // TODO: Create a new event on the PlaylistLoader that signals |
|
// that the segments have changed in some way and use that to |
|
// update the SegmentLoader instead of doing it twice here and |
|
// on `mediachange` |
|
|
|
|
|
_this2.mainSegmentLoader_.playlist(updatedPlaylist, _this2.requestOptions_); |
|
|
|
_this2.updateDuration(); // If the player isn't paused, ensure that the segment loader is running, |
|
// as it is possible that it was temporarily stopped while waiting for |
|
// a playlist (e.g., in case the playlist errored and we re-requested it). |
|
|
|
|
|
if (!_this2.tech_.paused()) { |
|
_this2.mainSegmentLoader_.load(); |
|
|
|
if (_this2.audioSegmentLoader_) { |
|
_this2.audioSegmentLoader_.load(); |
|
} |
|
} |
|
|
|
if (!updatedPlaylist.endList) { |
|
var addSeekableRange = function addSeekableRange() { |
|
var seekable$$1 = _this2.seekable(); |
|
|
|
if (seekable$$1.length !== 0) { |
|
_this2.mediaSource.addSeekableRange_(seekable$$1.start(0), seekable$$1.end(0)); |
|
} |
|
}; |
|
|
|
if (_this2.duration() !== Infinity) { |
|
var onDurationchange = function onDurationchange() { |
|
if (_this2.duration() === Infinity) { |
|
addSeekableRange(); |
|
} else { |
|
_this2.tech_.one('durationchange', onDurationchange); |
|
} |
|
}; |
|
|
|
_this2.tech_.one('durationchange', onDurationchange); |
|
} else { |
|
addSeekableRange(); |
|
} |
|
} |
|
}); |
|
this.masterPlaylistLoader_.on('error', function () { |
|
_this2.blacklistCurrentPlaylist(_this2.masterPlaylistLoader_.error); |
|
}); |
|
this.masterPlaylistLoader_.on('mediachanging', function () { |
|
_this2.mainSegmentLoader_.abort(); |
|
|
|
_this2.mainSegmentLoader_.pause(); |
|
}); |
|
this.masterPlaylistLoader_.on('mediachange', function () { |
|
var media = _this2.masterPlaylistLoader_.media(); |
|
|
|
var requestTimeout = media.targetDuration * 1.5 * 1000; // If we don't have any more available playlists, we don't want to |
|
// timeout the request. |
|
|
|
if (isLowestEnabledRendition(_this2.masterPlaylistLoader_.master, _this2.masterPlaylistLoader_.media())) { |
|
_this2.requestOptions_.timeout = 0; |
|
} else { |
|
_this2.requestOptions_.timeout = requestTimeout; |
|
} // TODO: Create a new event on the PlaylistLoader that signals |
|
// that the segments have changed in some way and use that to |
|
// update the SegmentLoader instead of doing it twice here and |
|
// on `loadedplaylist` |
|
|
|
|
|
_this2.mainSegmentLoader_.playlist(media, _this2.requestOptions_); |
|
|
|
_this2.mainSegmentLoader_.load(); |
|
|
|
_this2.tech_.trigger({ |
|
type: 'mediachange', |
|
bubbles: true |
|
}); |
|
}); |
|
this.masterPlaylistLoader_.on('playlistunchanged', function () { |
|
var updatedPlaylist = _this2.masterPlaylistLoader_.media(); |
|
|
|
var playlistOutdated = _this2.stuckAtPlaylistEnd_(updatedPlaylist); |
|
|
|
if (playlistOutdated) { |
|
// Playlist has stopped updating and we're stuck at its end. Try to |
|
// blacklist it and switch to another playlist in the hope that that |
|
// one is updating (and give the player a chance to re-adjust to the |
|
// safe live point). |
|
_this2.blacklistCurrentPlaylist({ |
|
message: 'Playlist no longer updating.' |
|
}); // useful for monitoring QoS |
|
|
|
|
|
_this2.tech_.trigger('playliststuck'); |
|
} |
|
}); |
|
this.masterPlaylistLoader_.on('renditiondisabled', function () { |
|
_this2.tech_.trigger({ |
|
type: 'usage', |
|
name: 'hls-rendition-disabled' |
|
}); |
|
}); |
|
this.masterPlaylistLoader_.on('renditionenabled', function () { |
|
_this2.tech_.trigger({ |
|
type: 'usage', |
|
name: 'hls-rendition-enabled' |
|
}); |
|
}); |
|
} |
|
/** |
|
* A helper function for triggerring presence usage events once per source |
|
* |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'triggerPresenceUsage_', |
|
value: function triggerPresenceUsage_(master, media) { |
|
var mediaGroups = master.mediaGroups || {}; |
|
var defaultDemuxed = true; |
|
var audioGroupKeys = Object.keys(mediaGroups.AUDIO); |
|
|
|
for (var mediaGroup in mediaGroups.AUDIO) { |
|
for (var label in mediaGroups.AUDIO[mediaGroup]) { |
|
var properties = mediaGroups.AUDIO[mediaGroup][label]; |
|
|
|
if (!properties.uri) { |
|
defaultDemuxed = false; |
|
} |
|
} |
|
} |
|
|
|
if (defaultDemuxed) { |
|
this.tech_.trigger({ |
|
type: 'usage', |
|
name: 'hls-demuxed' |
|
}); |
|
} |
|
|
|
if (Object.keys(mediaGroups.SUBTITLES).length) { |
|
this.tech_.trigger({ |
|
type: 'usage', |
|
name: 'hls-webvtt' |
|
}); |
|
} |
|
|
|
if (Hls.Playlist.isAes(media)) { |
|
this.tech_.trigger({ |
|
type: 'usage', |
|
name: 'hls-aes' |
|
}); |
|
} |
|
|
|
if (Hls.Playlist.isFmp4(media)) { |
|
this.tech_.trigger({ |
|
type: 'usage', |
|
name: 'hls-fmp4' |
|
}); |
|
} |
|
|
|
if (audioGroupKeys.length && Object.keys(mediaGroups.AUDIO[audioGroupKeys[0]]).length > 1) { |
|
this.tech_.trigger({ |
|
type: 'usage', |
|
name: 'hls-alternate-audio' |
|
}); |
|
} |
|
|
|
if (this.useCueTags_) { |
|
this.tech_.trigger({ |
|
type: 'usage', |
|
name: 'hls-playlist-cue-tags' |
|
}); |
|
} |
|
} |
|
/** |
|
* Register event handlers on the segment loaders. A helper function |
|
* for construction time. |
|
* |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'setupSegmentLoaderListeners_', |
|
value: function setupSegmentLoaderListeners_() { |
|
var _this3 = this; |
|
|
|
this.mainSegmentLoader_.on('bandwidthupdate', function () { |
|
var nextPlaylist = _this3.selectPlaylist(); |
|
|
|
var currentPlaylist = _this3.masterPlaylistLoader_.media(); |
|
|
|
var buffered = _this3.tech_.buffered(); |
|
|
|
var forwardBuffer = buffered.length ? buffered.end(buffered.length - 1) - _this3.tech_.currentTime() : 0; |
|
|
|
var bufferLowWaterLine = _this3.bufferLowWaterLine(); // If the playlist is live, then we want to not take low water line into account. |
|
// This is because in LIVE, the player plays 3 segments from the end of the |
|
// playlist, and if `BUFFER_LOW_WATER_LINE` is greater than the duration availble |
|
// in those segments, a viewer will never experience a rendition upswitch. |
|
|
|
|
|
if (!currentPlaylist.endList || // For the same reason as LIVE, we ignore the low water line when the VOD |
|
// duration is below the max potential low water line |
|
_this3.duration() < Config.MAX_BUFFER_LOW_WATER_LINE || // we want to switch down to lower resolutions quickly to continue playback, but |
|
nextPlaylist.attributes.BANDWIDTH < currentPlaylist.attributes.BANDWIDTH || // ensure we have some buffer before we switch up to prevent us running out of |
|
// buffer while loading a higher rendition. |
|
forwardBuffer >= bufferLowWaterLine) { |
|
_this3.masterPlaylistLoader_.media(nextPlaylist); |
|
} |
|
|
|
_this3.tech_.trigger('bandwidthupdate'); |
|
}); |
|
this.mainSegmentLoader_.on('progress', function () { |
|
_this3.trigger('progress'); |
|
}); |
|
this.mainSegmentLoader_.on('error', function () { |
|
_this3.blacklistCurrentPlaylist(_this3.mainSegmentLoader_.error()); |
|
}); |
|
this.mainSegmentLoader_.on('syncinfoupdate', function () { |
|
_this3.onSyncInfoUpdate_(); |
|
}); |
|
this.mainSegmentLoader_.on('timestampoffset', function () { |
|
_this3.tech_.trigger({ |
|
type: 'usage', |
|
name: 'hls-timestamp-offset' |
|
}); |
|
}); |
|
this.audioSegmentLoader_.on('syncinfoupdate', function () { |
|
_this3.onSyncInfoUpdate_(); |
|
}); |
|
this.mainSegmentLoader_.on('ended', function () { |
|
_this3.onEndOfStream(); |
|
}); |
|
this.mainSegmentLoader_.on('earlyabort', function () { |
|
_this3.blacklistCurrentPlaylist({ |
|
message: 'Aborted early because there isn\'t enough bandwidth to complete the ' + 'request without rebuffering.' |
|
}, ABORT_EARLY_BLACKLIST_SECONDS); |
|
}); |
|
this.mainSegmentLoader_.on('reseteverything', function () { |
|
// If playing an MTS stream, a videojs.MediaSource is listening for |
|
// hls-reset to reset caption parsing state in the transmuxer |
|
_this3.tech_.trigger('hls-reset'); |
|
}); |
|
this.mainSegmentLoader_.on('segmenttimemapping', function (event) { |
|
// If playing an MTS stream in html, a videojs.MediaSource is listening for |
|
// hls-segment-time-mapping update its internal mapping of stream to display time |
|
_this3.tech_.trigger({ |
|
type: 'hls-segment-time-mapping', |
|
mapping: event.mapping |
|
}); |
|
}); |
|
this.audioSegmentLoader_.on('ended', function () { |
|
_this3.onEndOfStream(); |
|
}); |
|
} |
|
}, { |
|
key: 'mediaSecondsLoaded_', |
|
value: function mediaSecondsLoaded_() { |
|
return Math.max(this.audioSegmentLoader_.mediaSecondsLoaded + this.mainSegmentLoader_.mediaSecondsLoaded); |
|
} |
|
/** |
|
* Call load on our SegmentLoaders |
|
*/ |
|
|
|
}, { |
|
key: 'load', |
|
value: function load() { |
|
this.mainSegmentLoader_.load(); |
|
|
|
if (this.mediaTypes_.AUDIO.activePlaylistLoader) { |
|
this.audioSegmentLoader_.load(); |
|
} |
|
|
|
if (this.mediaTypes_.SUBTITLES.activePlaylistLoader) { |
|
this.subtitleSegmentLoader_.load(); |
|
} |
|
} |
|
/** |
|
* Re-tune playback quality level for the current player |
|
* conditions without performing destructive actions, like |
|
* removing already buffered content |
|
* |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'smoothQualityChange_', |
|
value: function smoothQualityChange_() { |
|
var media = this.selectPlaylist(); |
|
|
|
if (media !== this.masterPlaylistLoader_.media()) { |
|
this.masterPlaylistLoader_.media(media); |
|
this.mainSegmentLoader_.resetLoader(); // don't need to reset audio as it is reset when media changes |
|
} |
|
} |
|
/** |
|
* Re-tune playback quality level for the current player |
|
* conditions. This method will perform destructive actions like removing |
|
* already buffered content in order to readjust the currently active |
|
* playlist quickly. This is good for manual quality changes |
|
* |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'fastQualityChange_', |
|
value: function fastQualityChange_() { |
|
var _this4 = this; |
|
|
|
var media = this.selectPlaylist(); |
|
|
|
if (media === this.masterPlaylistLoader_.media()) { |
|
return; |
|
} |
|
|
|
this.masterPlaylistLoader_.media(media); // Delete all buffered data to allow an immediate quality switch, then seek to give |
|
// the browser a kick to remove any cached frames from the previous rendtion (.04 seconds |
|
// ahead is roughly the minimum that will accomplish this across a variety of content |
|
// in IE and Edge, but seeking in place is sufficient on all other browsers) |
|
// Edge/IE bug: https://developer.microsoft.com/en-us/microsoft-edge/platform/issues/14600375/ |
|
// Chrome bug: https://bugs.chromium.org/p/chromium/issues/detail?id=651904 |
|
|
|
this.mainSegmentLoader_.resetEverything(function () { |
|
// Since this is not a typical seek, we avoid the seekTo method which can cause segments |
|
// from the previously enabled rendition to load before the new playlist has finished loading |
|
if (videojs$1.browser.IE_VERSION || videojs$1.browser.IS_EDGE) { |
|
_this4.tech_.setCurrentTime(_this4.tech_.currentTime() + 0.04); |
|
} else { |
|
_this4.tech_.setCurrentTime(_this4.tech_.currentTime()); |
|
} |
|
}); // don't need to reset audio as it is reset when media changes |
|
} |
|
/** |
|
* Begin playback. |
|
*/ |
|
|
|
}, { |
|
key: 'play', |
|
value: function play() { |
|
if (this.setupFirstPlay()) { |
|
return; |
|
} |
|
|
|
if (this.tech_.ended()) { |
|
this.seekTo_(0); |
|
} |
|
|
|
if (this.hasPlayed_()) { |
|
this.load(); |
|
} |
|
|
|
var seekable$$1 = this.tech_.seekable(); // if the viewer has paused and we fell out of the live window, |
|
// seek forward to the live point |
|
|
|
if (this.tech_.duration() === Infinity) { |
|
if (this.tech_.currentTime() < seekable$$1.start(0)) { |
|
return this.seekTo_(seekable$$1.end(seekable$$1.length - 1)); |
|
} |
|
} |
|
} |
|
/** |
|
* Seek to the latest media position if this is a live video and the |
|
* player and video are loaded and initialized. |
|
*/ |
|
|
|
}, { |
|
key: 'setupFirstPlay', |
|
value: function setupFirstPlay() { |
|
var _this5 = this; |
|
|
|
var media = this.masterPlaylistLoader_.media(); // Check that everything is ready to begin buffering for the first call to play |
|
// If 1) there is no active media |
|
// 2) the player is paused |
|
// 3) the first play has already been setup |
|
// then exit early |
|
|
|
if (!media || this.tech_.paused() || this.hasPlayed_()) { |
|
return false; |
|
} // when the video is a live stream |
|
|
|
|
|
if (!media.endList) { |
|
var seekable$$1 = this.seekable(); |
|
|
|
if (!seekable$$1.length) { |
|
// without a seekable range, the player cannot seek to begin buffering at the live |
|
// point |
|
return false; |
|
} |
|
|
|
if (videojs$1.browser.IE_VERSION && this.tech_.readyState() === 0) { |
|
// IE11 throws an InvalidStateError if you try to set currentTime while the |
|
// readyState is 0, so it must be delayed until the tech fires loadedmetadata. |
|
this.tech_.one('loadedmetadata', function () { |
|
_this5.trigger('firstplay'); |
|
|
|
_this5.seekTo_(seekable$$1.end(0)); |
|
|
|
_this5.hasPlayed_ = function () { |
|
return true; |
|
}; |
|
}); |
|
return false; |
|
} // trigger firstplay to inform the source handler to ignore the next seek event |
|
|
|
|
|
this.trigger('firstplay'); // seek to the live point |
|
|
|
this.seekTo_(seekable$$1.end(0)); |
|
} |
|
|
|
this.hasPlayed_ = function () { |
|
return true; |
|
}; // we can begin loading now that everything is ready |
|
|
|
|
|
this.load(); |
|
return true; |
|
} |
|
/** |
|
* handle the sourceopen event on the MediaSource |
|
* |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'handleSourceOpen_', |
|
value: function handleSourceOpen_() { |
|
// Only attempt to create the source buffer if none already exist. |
|
// handleSourceOpen is also called when we are "re-opening" a source buffer |
|
// after `endOfStream` has been called (in response to a seek for instance) |
|
try { |
|
this.setupSourceBuffers_(); |
|
} catch (e) { |
|
videojs$1.log.warn('Failed to create Source Buffers', e); |
|
return this.mediaSource.endOfStream('decode'); |
|
} // if autoplay is enabled, begin playback. This is duplicative of |
|
// code in video.js but is required because play() must be invoked |
|
// *after* the media source has opened. |
|
|
|
|
|
if (this.tech_.autoplay()) { |
|
var playPromise = this.tech_.play(); // Catch/silence error when a pause interrupts a play request |
|
// on browsers which return a promise |
|
|
|
if (typeof playPromise !== 'undefined' && typeof playPromise.then === 'function') { |
|
playPromise.then(null, function (e) {}); |
|
} |
|
} |
|
|
|
this.trigger('sourceopen'); |
|
} |
|
/** |
|
* Calls endOfStream on the media source when all active stream types have called |
|
* endOfStream |
|
* |
|
* @param {string} streamType |
|
* Stream type of the segment loader that called endOfStream |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'onEndOfStream', |
|
value: function onEndOfStream() { |
|
var isEndOfStream = this.mainSegmentLoader_.ended_; |
|
|
|
if (this.mediaTypes_.AUDIO.activePlaylistLoader) { |
|
// if the audio playlist loader exists, then alternate audio is active |
|
if (!this.mainSegmentLoader_.startingMedia_ || this.mainSegmentLoader_.startingMedia_.containsVideo) { |
|
// if we do not know if the main segment loader contains video yet or if we |
|
// definitively know the main segment loader contains video, then we need to wait |
|
// for both main and audio segment loaders to call endOfStream |
|
isEndOfStream = isEndOfStream && this.audioSegmentLoader_.ended_; |
|
} else { |
|
// otherwise just rely on the audio loader |
|
isEndOfStream = this.audioSegmentLoader_.ended_; |
|
} |
|
} |
|
|
|
if (!isEndOfStream) { |
|
return; |
|
} |
|
|
|
this.logger_('calling mediaSource.endOfStream()'); // on chrome calling endOfStream can sometimes cause an exception, |
|
// even when the media source is in a valid state. |
|
|
|
try { |
|
this.mediaSource.endOfStream(); |
|
} catch (e) { |
|
videojs$1.log.warn('Failed to call media source endOfStream', e); |
|
} |
|
} |
|
/** |
|
* Check if a playlist has stopped being updated |
|
* @param {Object} playlist the media playlist object |
|
* @return {boolean} whether the playlist has stopped being updated or not |
|
*/ |
|
|
|
}, { |
|
key: 'stuckAtPlaylistEnd_', |
|
value: function stuckAtPlaylistEnd_(playlist) { |
|
var seekable$$1 = this.seekable(); |
|
|
|
if (!seekable$$1.length) { |
|
// playlist doesn't have enough information to determine whether we are stuck |
|
return false; |
|
} |
|
|
|
var expired = this.syncController_.getExpiredTime(playlist, this.mediaSource.duration); |
|
|
|
if (expired === null) { |
|
return false; |
|
} // does not use the safe live end to calculate playlist end, since we |
|
// don't want to say we are stuck while there is still content |
|
|
|
|
|
var absolutePlaylistEnd = Hls.Playlist.playlistEnd(playlist, expired); |
|
var currentTime = this.tech_.currentTime(); |
|
var buffered = this.tech_.buffered(); |
|
|
|
if (!buffered.length) { |
|
// return true if the playhead reached the absolute end of the playlist |
|
return absolutePlaylistEnd - currentTime <= SAFE_TIME_DELTA; |
|
} |
|
|
|
var bufferedEnd = buffered.end(buffered.length - 1); // return true if there is too little buffer left and buffer has reached absolute |
|
// end of playlist |
|
|
|
return bufferedEnd - currentTime <= SAFE_TIME_DELTA && absolutePlaylistEnd - bufferedEnd <= SAFE_TIME_DELTA; |
|
} |
|
/** |
|
* Blacklists a playlist when an error occurs for a set amount of time |
|
* making it unavailable for selection by the rendition selection algorithm |
|
* and then forces a new playlist (rendition) selection. |
|
* |
|
* @param {Object=} error an optional error that may include the playlist |
|
* to blacklist |
|
* @param {Number=} blacklistDuration an optional number of seconds to blacklist the |
|
* playlist |
|
*/ |
|
|
|
}, { |
|
key: 'blacklistCurrentPlaylist', |
|
value: function blacklistCurrentPlaylist() { |
|
var error = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {}; |
|
var blacklistDuration = arguments[1]; |
|
var currentPlaylist = void 0; |
|
var nextPlaylist = void 0; // If the `error` was generated by the playlist loader, it will contain |
|
// the playlist we were trying to load (but failed) and that should be |
|
// blacklisted instead of the currently selected playlist which is likely |
|
// out-of-date in this scenario |
|
|
|
currentPlaylist = error.playlist || this.masterPlaylistLoader_.media(); |
|
blacklistDuration = blacklistDuration || error.blacklistDuration || this.blacklistDuration; // If there is no current playlist, then an error occurred while we were |
|
// trying to load the master OR while we were disposing of the tech |
|
|
|
if (!currentPlaylist) { |
|
this.error = error; |
|
|
|
try { |
|
return this.mediaSource.endOfStream('network'); |
|
} catch (e) { |
|
return this.trigger('error'); |
|
} |
|
} |
|
|
|
var isFinalRendition = this.masterPlaylistLoader_.master.playlists.filter(isEnabled).length === 1; |
|
|
|
if (isFinalRendition) { |
|
// Never blacklisting this playlist because it's final rendition |
|
videojs$1.log.warn('Problem encountered with the current ' + 'HLS playlist. Trying again since it is the final playlist.'); |
|
this.tech_.trigger('retryplaylist'); |
|
return this.masterPlaylistLoader_.load(isFinalRendition); |
|
} // Blacklist this playlist |
|
|
|
|
|
currentPlaylist.excludeUntil = Date.now() + blacklistDuration * 1000; |
|
this.tech_.trigger('blacklistplaylist'); |
|
this.tech_.trigger({ |
|
type: 'usage', |
|
name: 'hls-rendition-blacklisted' |
|
}); // Select a new playlist |
|
|
|
nextPlaylist = this.selectPlaylist(); |
|
videojs$1.log.warn('Problem encountered with the current HLS playlist.' + (error.message ? ' ' + error.message : '') + ' Switching to another playlist.'); |
|
return this.masterPlaylistLoader_.media(nextPlaylist); |
|
} |
|
/** |
|
* Pause all segment loaders |
|
*/ |
|
|
|
}, { |
|
key: 'pauseLoading', |
|
value: function pauseLoading() { |
|
this.mainSegmentLoader_.pause(); |
|
|
|
if (this.mediaTypes_.AUDIO.activePlaylistLoader) { |
|
this.audioSegmentLoader_.pause(); |
|
} |
|
|
|
if (this.mediaTypes_.SUBTITLES.activePlaylistLoader) { |
|
this.subtitleSegmentLoader_.pause(); |
|
} |
|
} |
|
/** |
|
* set the current time on all segment loaders |
|
* |
|
* @param {TimeRange} currentTime the current time to set |
|
* @return {TimeRange} the current time |
|
*/ |
|
|
|
}, { |
|
key: 'setCurrentTime', |
|
value: function setCurrentTime(currentTime) { |
|
var buffered = findRange(this.tech_.buffered(), currentTime); |
|
|
|
if (!(this.masterPlaylistLoader_ && this.masterPlaylistLoader_.media())) { |
|
// return immediately if the metadata is not ready yet |
|
return 0; |
|
} // it's clearly an edge-case but don't thrown an error if asked to |
|
// seek within an empty playlist |
|
|
|
|
|
if (!this.masterPlaylistLoader_.media().segments) { |
|
return 0; |
|
} // In flash playback, the segment loaders should be reset on every seek, even |
|
// in buffer seeks. If the seek location is already buffered, continue buffering as |
|
// usual |
|
// TODO: redo this comment |
|
|
|
|
|
if (buffered && buffered.length) { |
|
return currentTime; |
|
} // cancel outstanding requests so we begin buffering at the new |
|
// location |
|
|
|
|
|
this.mainSegmentLoader_.resetEverything(); |
|
this.mainSegmentLoader_.abort(); |
|
|
|
if (this.mediaTypes_.AUDIO.activePlaylistLoader) { |
|
this.audioSegmentLoader_.resetEverything(); |
|
this.audioSegmentLoader_.abort(); |
|
} |
|
|
|
if (this.mediaTypes_.SUBTITLES.activePlaylistLoader) { |
|
this.subtitleSegmentLoader_.resetEverything(); |
|
this.subtitleSegmentLoader_.abort(); |
|
} // start segment loader loading in case they are paused |
|
|
|
|
|
this.load(); |
|
} |
|
/** |
|
* get the current duration |
|
* |
|
* @return {TimeRange} the duration |
|
*/ |
|
|
|
}, { |
|
key: 'duration', |
|
value: function duration$$1() { |
|
if (!this.masterPlaylistLoader_) { |
|
return 0; |
|
} |
|
|
|
if (this.mediaSource) { |
|
return this.mediaSource.duration; |
|
} |
|
|
|
return Hls.Playlist.duration(this.masterPlaylistLoader_.media()); |
|
} |
|
/** |
|
* check the seekable range |
|
* |
|
* @return {TimeRange} the seekable range |
|
*/ |
|
|
|
}, { |
|
key: 'seekable', |
|
value: function seekable$$1() { |
|
return this.seekable_; |
|
} |
|
}, { |
|
key: 'onSyncInfoUpdate_', |
|
value: function onSyncInfoUpdate_() { |
|
var mainSeekable = void 0; |
|
var audioSeekable = void 0; |
|
|
|
if (!this.masterPlaylistLoader_) { |
|
return; |
|
} |
|
|
|
var media = this.masterPlaylistLoader_.media(); |
|
|
|
if (!media) { |
|
return; |
|
} |
|
|
|
var expired = this.syncController_.getExpiredTime(media, this.mediaSource.duration); |
|
|
|
if (expired === null) { |
|
// not enough information to update seekable |
|
return; |
|
} |
|
|
|
mainSeekable = Hls.Playlist.seekable(media, expired); |
|
|
|
if (mainSeekable.length === 0) { |
|
return; |
|
} |
|
|
|
if (this.mediaTypes_.AUDIO.activePlaylistLoader) { |
|
media = this.mediaTypes_.AUDIO.activePlaylistLoader.media(); |
|
expired = this.syncController_.getExpiredTime(media, this.mediaSource.duration); |
|
|
|
if (expired === null) { |
|
return; |
|
} |
|
|
|
audioSeekable = Hls.Playlist.seekable(media, expired); |
|
|
|
if (audioSeekable.length === 0) { |
|
return; |
|
} |
|
} |
|
|
|
var oldEnd = void 0; |
|
var oldStart = void 0; |
|
|
|
if (this.seekable_ && this.seekable_.length) { |
|
oldEnd = this.seekable_.end(0); |
|
oldStart = this.seekable_.start(0); |
|
} |
|
|
|
if (!audioSeekable) { |
|
// seekable has been calculated based on buffering video data so it |
|
// can be returned directly |
|
this.seekable_ = mainSeekable; |
|
} else if (audioSeekable.start(0) > mainSeekable.end(0) || mainSeekable.start(0) > audioSeekable.end(0)) { |
|
// seekables are pretty far off, rely on main |
|
this.seekable_ = mainSeekable; |
|
} else { |
|
this.seekable_ = videojs$1.createTimeRanges([[audioSeekable.start(0) > mainSeekable.start(0) ? audioSeekable.start(0) : mainSeekable.start(0), audioSeekable.end(0) < mainSeekable.end(0) ? audioSeekable.end(0) : mainSeekable.end(0)]]); |
|
} // seekable is the same as last time |
|
|
|
|
|
if (this.seekable_ && this.seekable_.length) { |
|
if (this.seekable_.end(0) === oldEnd && this.seekable_.start(0) === oldStart) { |
|
return; |
|
} |
|
} |
|
|
|
this.logger_('seekable updated [' + printableRange(this.seekable_) + ']'); |
|
this.tech_.trigger('seekablechanged'); |
|
} |
|
/** |
|
* Update the player duration |
|
*/ |
|
|
|
}, { |
|
key: 'updateDuration', |
|
value: function updateDuration() { |
|
var _this6 = this; |
|
|
|
var oldDuration = this.mediaSource.duration; |
|
var newDuration = Hls.Playlist.duration(this.masterPlaylistLoader_.media()); |
|
var buffered = this.tech_.buffered(); |
|
|
|
var setDuration = function setDuration() { |
|
// on firefox setting the duration may sometimes cause an exception |
|
// even if the media source is open and source buffers are not |
|
// updating, something about the media source being in an invalid state. |
|
_this6.logger_('Setting duration from ' + _this6.mediaSource.duration + ' => ' + newDuration); |
|
|
|
try { |
|
_this6.mediaSource.duration = newDuration; |
|
} catch (e) { |
|
videojs$1.log.warn('Failed to set media source duration', e); |
|
} |
|
|
|
_this6.tech_.trigger('durationchange'); |
|
|
|
_this6.mediaSource.removeEventListener('sourceopen', setDuration); |
|
}; |
|
|
|
if (buffered.length > 0) { |
|
newDuration = Math.max(newDuration, buffered.end(buffered.length - 1)); |
|
} // if the duration has changed, invalidate the cached value |
|
|
|
|
|
if (oldDuration !== newDuration) { |
|
// update the duration |
|
if (this.mediaSource.readyState !== 'open') { |
|
this.mediaSource.addEventListener('sourceopen', setDuration); |
|
} else { |
|
setDuration(); |
|
} |
|
} |
|
} |
|
/** |
|
* dispose of the MasterPlaylistController and everything |
|
* that it controls |
|
*/ |
|
|
|
}, { |
|
key: 'dispose', |
|
value: function dispose() { |
|
var _this7 = this; |
|
|
|
this.decrypter_.terminate(); |
|
this.masterPlaylistLoader_.dispose(); |
|
this.mainSegmentLoader_.dispose(); |
|
['AUDIO', 'SUBTITLES'].forEach(function (type) { |
|
var groups = _this7.mediaTypes_[type].groups; |
|
|
|
for (var id in groups) { |
|
groups[id].forEach(function (group) { |
|
if (group.playlistLoader) { |
|
group.playlistLoader.dispose(); |
|
} |
|
}); |
|
} |
|
}); |
|
this.audioSegmentLoader_.dispose(); |
|
this.subtitleSegmentLoader_.dispose(); |
|
} |
|
/** |
|
* return the master playlist object if we have one |
|
* |
|
* @return {Object} the master playlist object that we parsed |
|
*/ |
|
|
|
}, { |
|
key: 'master', |
|
value: function master() { |
|
return this.masterPlaylistLoader_.master; |
|
} |
|
/** |
|
* return the currently selected playlist |
|
* |
|
* @return {Object} the currently selected playlist object that we parsed |
|
*/ |
|
|
|
}, { |
|
key: 'media', |
|
value: function media() { |
|
// playlist loader will not return media if it has not been fully loaded |
|
return this.masterPlaylistLoader_.media() || this.initialMedia_; |
|
} |
|
/** |
|
* setup our internal source buffers on our segment Loaders |
|
* |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'setupSourceBuffers_', |
|
value: function setupSourceBuffers_() { |
|
var media = this.masterPlaylistLoader_.media(); |
|
var mimeTypes = void 0; // wait until a media playlist is available and the Media Source is |
|
// attached |
|
|
|
if (!media || this.mediaSource.readyState !== 'open') { |
|
return; |
|
} |
|
|
|
mimeTypes = mimeTypesForPlaylist(this.masterPlaylistLoader_.master, media); |
|
|
|
if (mimeTypes.length < 1) { |
|
this.error = 'No compatible SourceBuffer configuration for the variant stream:' + media.resolvedUri; |
|
return this.mediaSource.endOfStream('decode'); |
|
} |
|
|
|
this.configureLoaderMimeTypes_(mimeTypes); // exclude any incompatible variant streams from future playlist |
|
// selection |
|
|
|
this.excludeIncompatibleVariants_(media); |
|
} |
|
}, { |
|
key: 'configureLoaderMimeTypes_', |
|
value: function configureLoaderMimeTypes_(mimeTypes) { |
|
// If the content is demuxed, we can't start appending segments to a source buffer |
|
// until both source buffers are set up, or else the browser may not let us add the |
|
// second source buffer (it will assume we are playing either audio only or video |
|
// only). |
|
var sourceBufferEmitter = // If there is more than one mime type |
|
mimeTypes.length > 1 && // and the first mime type does not have muxed video and audio |
|
mimeTypes[0].indexOf(',') === -1 && // and the two mime types are different (they can be the same in the case of audio |
|
// only with alternate audio) |
|
mimeTypes[0] !== mimeTypes[1] ? // then we want to wait on the second source buffer |
|
new videojs$1.EventTarget() : // otherwise there is no need to wait as the content is either audio only, |
|
// video only, or muxed content. |
|
null; |
|
this.mainSegmentLoader_.mimeType(mimeTypes[0], sourceBufferEmitter); |
|
|
|
if (mimeTypes[1]) { |
|
this.audioSegmentLoader_.mimeType(mimeTypes[1], sourceBufferEmitter); |
|
} |
|
} |
|
/** |
|
* Blacklists playlists with codecs that are unsupported by the browser. |
|
*/ |
|
|
|
}, { |
|
key: 'excludeUnsupportedVariants_', |
|
value: function excludeUnsupportedVariants_() { |
|
this.master().playlists.forEach(function (variant) { |
|
if (variant.attributes.CODECS && window$1.MediaSource && window$1.MediaSource.isTypeSupported && !window$1.MediaSource.isTypeSupported('video/mp4; codecs="' + mapLegacyAvcCodecs(variant.attributes.CODECS) + '"')) { |
|
variant.excludeUntil = Infinity; |
|
} |
|
}); |
|
} |
|
/** |
|
* Blacklist playlists that are known to be codec or |
|
* stream-incompatible with the SourceBuffer configuration. For |
|
* instance, Media Source Extensions would cause the video element to |
|
* stall waiting for video data if you switched from a variant with |
|
* video and audio to an audio-only one. |
|
* |
|
* @param {Object} media a media playlist compatible with the current |
|
* set of SourceBuffers. Variants in the current master playlist that |
|
* do not appear to have compatible codec or stream configurations |
|
* will be excluded from the default playlist selection algorithm |
|
* indefinitely. |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'excludeIncompatibleVariants_', |
|
value: function excludeIncompatibleVariants_(media) { |
|
var codecCount = 2; |
|
var videoCodec = null; |
|
var codecs = void 0; |
|
|
|
if (media.attributes.CODECS) { |
|
codecs = parseCodecs(media.attributes.CODECS); |
|
videoCodec = codecs.videoCodec; |
|
codecCount = codecs.codecCount; |
|
} |
|
|
|
this.master().playlists.forEach(function (variant) { |
|
var variantCodecs = { |
|
codecCount: 2, |
|
videoCodec: null |
|
}; |
|
|
|
if (variant.attributes.CODECS) { |
|
variantCodecs = parseCodecs(variant.attributes.CODECS); |
|
} // if the streams differ in the presence or absence of audio or |
|
// video, they are incompatible |
|
|
|
|
|
if (variantCodecs.codecCount !== codecCount) { |
|
variant.excludeUntil = Infinity; |
|
} // if h.264 is specified on the current playlist, some flavor of |
|
// it must be specified on all compatible variants |
|
|
|
|
|
if (variantCodecs.videoCodec !== videoCodec) { |
|
variant.excludeUntil = Infinity; |
|
} |
|
}); |
|
} |
|
}, { |
|
key: 'updateAdCues_', |
|
value: function updateAdCues_(media) { |
|
var offset = 0; |
|
var seekable$$1 = this.seekable(); |
|
|
|
if (seekable$$1.length) { |
|
offset = seekable$$1.start(0); |
|
} |
|
|
|
updateAdCues(media, this.cueTagsTrack_, offset); |
|
} |
|
/** |
|
* Calculates the desired forward buffer length based on current time |
|
* |
|
* @return {Number} Desired forward buffer length in seconds |
|
*/ |
|
|
|
}, { |
|
key: 'goalBufferLength', |
|
value: function goalBufferLength() { |
|
var currentTime = this.tech_.currentTime(); |
|
var initial = Config.GOAL_BUFFER_LENGTH; |
|
var rate = Config.GOAL_BUFFER_LENGTH_RATE; |
|
var max = Math.max(initial, Config.MAX_GOAL_BUFFER_LENGTH); |
|
return Math.min(initial + currentTime * rate, max); |
|
} |
|
/** |
|
* Calculates the desired buffer low water line based on current time |
|
* |
|
* @return {Number} Desired buffer low water line in seconds |
|
*/ |
|
|
|
}, { |
|
key: 'bufferLowWaterLine', |
|
value: function bufferLowWaterLine() { |
|
var currentTime = this.tech_.currentTime(); |
|
var initial = Config.BUFFER_LOW_WATER_LINE; |
|
var rate = Config.BUFFER_LOW_WATER_LINE_RATE; |
|
var max = Math.max(initial, Config.MAX_BUFFER_LOW_WATER_LINE); |
|
return Math.min(initial + currentTime * rate, max); |
|
} |
|
}]); |
|
return MasterPlaylistController; |
|
}(videojs$1.EventTarget); |
|
/** |
|
* Returns a function that acts as the Enable/disable playlist function. |
|
* |
|
* @param {PlaylistLoader} loader - The master playlist loader |
|
* @param {String} playlistUri - uri of the playlist |
|
* @param {Function} changePlaylistFn - A function to be called after a |
|
* playlist's enabled-state has been changed. Will NOT be called if a |
|
* playlist's enabled-state is unchanged |
|
* @param {Boolean=} enable - Value to set the playlist enabled-state to |
|
* or if undefined returns the current enabled-state for the playlist |
|
* @return {Function} Function for setting/getting enabled |
|
*/ |
|
|
|
|
|
var enableFunction = function enableFunction(loader, playlistUri, changePlaylistFn) { |
|
return function (enable) { |
|
var playlist = loader.master.playlists[playlistUri]; |
|
var incompatible = isIncompatible(playlist); |
|
var currentlyEnabled = isEnabled(playlist); |
|
|
|
if (typeof enable === 'undefined') { |
|
return currentlyEnabled; |
|
} |
|
|
|
if (enable) { |
|
delete playlist.disabled; |
|
} else { |
|
playlist.disabled = true; |
|
} |
|
|
|
if (enable !== currentlyEnabled && !incompatible) { |
|
// Ensure the outside world knows about our changes |
|
changePlaylistFn(); |
|
|
|
if (enable) { |
|
loader.trigger('renditionenabled'); |
|
} else { |
|
loader.trigger('renditiondisabled'); |
|
} |
|
} |
|
|
|
return enable; |
|
}; |
|
}; |
|
/** |
|
* The representation object encapsulates the publicly visible information |
|
* in a media playlist along with a setter/getter-type function (enabled) |
|
* for changing the enabled-state of a particular playlist entry |
|
* |
|
* @class Representation |
|
*/ |
|
|
|
|
|
var Representation = function Representation(hlsHandler, playlist, id) { |
|
classCallCheck$1(this, Representation); |
|
var mpc = hlsHandler.masterPlaylistController_, |
|
smoothQualityChange = hlsHandler.options_.smoothQualityChange; // Get a reference to a bound version of the quality change function |
|
|
|
var changeType = smoothQualityChange ? 'smooth' : 'fast'; |
|
var qualityChangeFunction = mpc[changeType + 'QualityChange_'].bind(mpc); // some playlist attributes are optional |
|
|
|
if (playlist.attributes.RESOLUTION) { |
|
var resolution = playlist.attributes.RESOLUTION; |
|
this.width = resolution.width; |
|
this.height = resolution.height; |
|
} |
|
|
|
this.bandwidth = playlist.attributes.BANDWIDTH; // The id is simply the ordinality of the media playlist |
|
// within the master playlist |
|
|
|
this.id = id; // Partially-apply the enableFunction to create a playlist- |
|
// specific variant |
|
|
|
this.enabled = enableFunction(hlsHandler.playlists, playlist.uri, qualityChangeFunction); |
|
}; |
|
/** |
|
* A mixin function that adds the `representations` api to an instance |
|
* of the HlsHandler class |
|
* @param {HlsHandler} hlsHandler - An instance of HlsHandler to add the |
|
* representation API into |
|
*/ |
|
|
|
|
|
var renditionSelectionMixin = function renditionSelectionMixin(hlsHandler) { |
|
var playlists = hlsHandler.playlists; // Add a single API-specific function to the HlsHandler instance |
|
|
|
hlsHandler.representations = function () { |
|
return playlists.master.playlists.filter(function (media) { |
|
return !isIncompatible(media); |
|
}).map(function (e, i) { |
|
return new Representation(hlsHandler, e, e.uri); |
|
}); |
|
}; |
|
}; |
|
/** |
|
* @file playback-watcher.js |
|
* |
|
* Playback starts, and now my watch begins. It shall not end until my death. I shall |
|
* take no wait, hold no uncleared timeouts, father no bad seeks. I shall wear no crowns |
|
* and win no glory. I shall live and die at my post. I am the corrector of the underflow. |
|
* I am the watcher of gaps. I am the shield that guards the realms of seekable. I pledge |
|
* my life and honor to the Playback Watch, for this Player and all the Players to come. |
|
*/ |
|
// Set of events that reset the playback-watcher time check logic and clear the timeout |
|
|
|
|
|
var timerCancelEvents = ['seeking', 'seeked', 'pause', 'playing', 'error']; |
|
/** |
|
* @class PlaybackWatcher |
|
*/ |
|
|
|
var PlaybackWatcher = function () { |
|
/** |
|
* Represents an PlaybackWatcher object. |
|
* @constructor |
|
* @param {object} options an object that includes the tech and settings |
|
*/ |
|
function PlaybackWatcher(options) { |
|
var _this = this; |
|
|
|
classCallCheck$1(this, PlaybackWatcher); |
|
this.tech_ = options.tech; |
|
this.seekable = options.seekable; |
|
this.seekTo = options.seekTo; |
|
this.allowSeeksWithinUnsafeLiveWindow = options.allowSeeksWithinUnsafeLiveWindow; |
|
this.media = options.media; |
|
this.consecutiveUpdates = 0; |
|
this.lastRecordedTime = null; |
|
this.timer_ = null; |
|
this.checkCurrentTimeTimeout_ = null; |
|
this.logger_ = logger('PlaybackWatcher'); |
|
this.logger_('initialize'); |
|
|
|
var canPlayHandler = function canPlayHandler() { |
|
return _this.monitorCurrentTime_(); |
|
}; |
|
|
|
var waitingHandler = function waitingHandler() { |
|
return _this.techWaiting_(); |
|
}; |
|
|
|
var cancelTimerHandler = function cancelTimerHandler() { |
|
return _this.cancelTimer_(); |
|
}; |
|
|
|
var fixesBadSeeksHandler = function fixesBadSeeksHandler() { |
|
return _this.fixesBadSeeks_(); |
|
}; |
|
|
|
this.tech_.on('seekablechanged', fixesBadSeeksHandler); |
|
this.tech_.on('waiting', waitingHandler); |
|
this.tech_.on(timerCancelEvents, cancelTimerHandler); |
|
this.tech_.on('canplay', canPlayHandler); // Define the dispose function to clean up our events |
|
|
|
this.dispose = function () { |
|
_this.logger_('dispose'); |
|
|
|
_this.tech_.off('seekablechanged', fixesBadSeeksHandler); |
|
|
|
_this.tech_.off('waiting', waitingHandler); |
|
|
|
_this.tech_.off(timerCancelEvents, cancelTimerHandler); |
|
|
|
_this.tech_.off('canplay', canPlayHandler); |
|
|
|
if (_this.checkCurrentTimeTimeout_) { |
|
window$1.clearTimeout(_this.checkCurrentTimeTimeout_); |
|
} |
|
|
|
_this.cancelTimer_(); |
|
}; |
|
} |
|
/** |
|
* Periodically check current time to see if playback stopped |
|
* |
|
* @private |
|
*/ |
|
|
|
|
|
createClass$1(PlaybackWatcher, [{ |
|
key: 'monitorCurrentTime_', |
|
value: function monitorCurrentTime_() { |
|
this.checkCurrentTime_(); |
|
|
|
if (this.checkCurrentTimeTimeout_) { |
|
window$1.clearTimeout(this.checkCurrentTimeTimeout_); |
|
} // 42 = 24 fps // 250 is what Webkit uses // FF uses 15 |
|
|
|
|
|
this.checkCurrentTimeTimeout_ = window$1.setTimeout(this.monitorCurrentTime_.bind(this), 250); |
|
} |
|
/** |
|
* The purpose of this function is to emulate the "waiting" event on |
|
* browsers that do not emit it when they are waiting for more |
|
* data to continue playback |
|
* |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'checkCurrentTime_', |
|
value: function checkCurrentTime_() { |
|
if (this.tech_.seeking() && this.fixesBadSeeks_()) { |
|
this.consecutiveUpdates = 0; |
|
this.lastRecordedTime = this.tech_.currentTime(); |
|
return; |
|
} |
|
|
|
if (this.tech_.paused() || this.tech_.seeking()) { |
|
return; |
|
} |
|
|
|
var currentTime = this.tech_.currentTime(); |
|
var buffered = this.tech_.buffered(); |
|
|
|
if (this.lastRecordedTime === currentTime && (!buffered.length || currentTime + SAFE_TIME_DELTA >= buffered.end(buffered.length - 1))) { |
|
// If current time is at the end of the final buffered region, then any playback |
|
// stall is most likely caused by buffering in a low bandwidth environment. The tech |
|
// should fire a `waiting` event in this scenario, but due to browser and tech |
|
// inconsistencies. Calling `techWaiting_` here allows us to simulate |
|
// responding to a native `waiting` event when the tech fails to emit one. |
|
return this.techWaiting_(); |
|
} |
|
|
|
if (this.consecutiveUpdates >= 5 && currentTime === this.lastRecordedTime) { |
|
this.consecutiveUpdates++; |
|
this.waiting_(); |
|
} else if (currentTime === this.lastRecordedTime) { |
|
this.consecutiveUpdates++; |
|
} else { |
|
this.consecutiveUpdates = 0; |
|
this.lastRecordedTime = currentTime; |
|
} |
|
} |
|
/** |
|
* Cancels any pending timers and resets the 'timeupdate' mechanism |
|
* designed to detect that we are stalled |
|
* |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'cancelTimer_', |
|
value: function cancelTimer_() { |
|
this.consecutiveUpdates = 0; |
|
|
|
if (this.timer_) { |
|
this.logger_('cancelTimer_'); |
|
clearTimeout(this.timer_); |
|
} |
|
|
|
this.timer_ = null; |
|
} |
|
/** |
|
* Fixes situations where there's a bad seek |
|
* |
|
* @return {Boolean} whether an action was taken to fix the seek |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'fixesBadSeeks_', |
|
value: function fixesBadSeeks_() { |
|
var seeking = this.tech_.seeking(); |
|
|
|
if (!seeking) { |
|
return false; |
|
} |
|
|
|
var seekable = this.seekable(); |
|
var currentTime = this.tech_.currentTime(); |
|
var isAfterSeekableRange = this.afterSeekableWindow_(seekable, currentTime, this.media(), this.allowSeeksWithinUnsafeLiveWindow); |
|
var seekTo = void 0; |
|
|
|
if (isAfterSeekableRange) { |
|
var seekableEnd = seekable.end(seekable.length - 1); // sync to live point (if VOD, our seekable was updated and we're simply adjusting) |
|
|
|
seekTo = seekableEnd; |
|
} |
|
|
|
if (this.beforeSeekableWindow_(seekable, currentTime)) { |
|
var seekableStart = seekable.start(0); // sync to the beginning of the live window |
|
// provide a buffer of .1 seconds to handle rounding/imprecise numbers |
|
|
|
seekTo = seekableStart + SAFE_TIME_DELTA; |
|
} |
|
|
|
if (typeof seekTo !== 'undefined') { |
|
this.logger_('Trying to seek outside of seekable at time ' + currentTime + ' with ' + ('seekable range ' + printableRange(seekable) + '. Seeking to ') + (seekTo + '.')); |
|
this.seekTo(seekTo); |
|
return true; |
|
} |
|
|
|
return false; |
|
} |
|
/** |
|
* Handler for situations when we determine the player is waiting. |
|
* |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'waiting_', |
|
value: function waiting_() { |
|
if (this.techWaiting_()) { |
|
return; |
|
} // All tech waiting checks failed. Use last resort correction |
|
|
|
|
|
var currentTime = this.tech_.currentTime(); |
|
var buffered = this.tech_.buffered(); |
|
var currentRange = findRange(buffered, currentTime); // Sometimes the player can stall for unknown reasons within a contiguous buffered |
|
// region with no indication that anything is amiss (seen in Firefox). Seeking to |
|
// currentTime is usually enough to kickstart the player. This checks that the player |
|
// is currently within a buffered region before attempting a corrective seek. |
|
// Chrome does not appear to continue `timeupdate` events after a `waiting` event |
|
// until there is ~ 3 seconds of forward buffer available. PlaybackWatcher should also |
|
// make sure there is ~3 seconds of forward buffer before taking any corrective action |
|
// to avoid triggering an `unknownwaiting` event when the network is slow. |
|
|
|
if (currentRange.length && currentTime + 3 <= currentRange.end(0)) { |
|
this.cancelTimer_(); |
|
this.seekTo(currentTime); |
|
this.logger_('Stopped at ' + currentTime + ' while inside a buffered region ' + ('[' + currentRange.start(0) + ' -> ' + currentRange.end(0) + ']. Attempting to resume ') + 'playback by seeking to the current time.'); // unknown waiting corrections may be useful for monitoring QoS |
|
|
|
this.tech_.trigger({ |
|
type: 'usage', |
|
name: 'hls-unknown-waiting' |
|
}); |
|
return; |
|
} |
|
} |
|
/** |
|
* Handler for situations when the tech fires a `waiting` event |
|
* |
|
* @return {Boolean} |
|
* True if an action (or none) was needed to correct the waiting. False if no |
|
* checks passed |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'techWaiting_', |
|
value: function techWaiting_() { |
|
var seekable = this.seekable(); |
|
var currentTime = this.tech_.currentTime(); |
|
|
|
if (this.tech_.seeking() && this.fixesBadSeeks_()) { |
|
// Tech is seeking or bad seek fixed, no action needed |
|
return true; |
|
} |
|
|
|
if (this.tech_.seeking() || this.timer_ !== null) { |
|
// Tech is seeking or already waiting on another action, no action needed |
|
return true; |
|
} |
|
|
|
if (this.beforeSeekableWindow_(seekable, currentTime)) { |
|
var livePoint = seekable.end(seekable.length - 1); |
|
this.logger_('Fell out of live window at time ' + currentTime + '. Seeking to ' + ('live point (seekable end) ' + livePoint)); |
|
this.cancelTimer_(); |
|
this.seekTo(livePoint); // live window resyncs may be useful for monitoring QoS |
|
|
|
this.tech_.trigger({ |
|
type: 'usage', |
|
name: 'hls-live-resync' |
|
}); |
|
return true; |
|
} |
|
|
|
var buffered = this.tech_.buffered(); |
|
var nextRange = findNextRange(buffered, currentTime); |
|
|
|
if (this.videoUnderflow_(nextRange, buffered, currentTime)) { |
|
// Even though the video underflowed and was stuck in a gap, the audio overplayed |
|
// the gap, leading currentTime into a buffered range. Seeking to currentTime |
|
// allows the video to catch up to the audio position without losing any audio |
|
// (only suffering ~3 seconds of frozen video and a pause in audio playback). |
|
this.cancelTimer_(); |
|
this.seekTo(currentTime); // video underflow may be useful for monitoring QoS |
|
|
|
this.tech_.trigger({ |
|
type: 'usage', |
|
name: 'hls-video-underflow' |
|
}); |
|
return true; |
|
} // check for gap |
|
|
|
|
|
if (nextRange.length > 0) { |
|
var difference = nextRange.start(0) - currentTime; |
|
this.logger_('Stopped at ' + currentTime + ', setting timer for ' + difference + ', seeking ' + ('to ' + nextRange.start(0))); |
|
this.timer_ = setTimeout(this.skipTheGap_.bind(this), difference * 1000, currentTime); |
|
return true; |
|
} // All checks failed. Returning false to indicate failure to correct waiting |
|
|
|
|
|
return false; |
|
} |
|
}, { |
|
key: 'afterSeekableWindow_', |
|
value: function afterSeekableWindow_(seekable, currentTime, playlist) { |
|
var allowSeeksWithinUnsafeLiveWindow = arguments.length > 3 && arguments[3] !== undefined ? arguments[3] : false; |
|
|
|
if (!seekable.length) { |
|
// we can't make a solid case if there's no seekable, default to false |
|
return false; |
|
} |
|
|
|
var allowedEnd = seekable.end(seekable.length - 1) + SAFE_TIME_DELTA; |
|
var isLive = !playlist.endList; |
|
|
|
if (isLive && allowSeeksWithinUnsafeLiveWindow) { |
|
allowedEnd = seekable.end(seekable.length - 1) + playlist.targetDuration * 3; |
|
} |
|
|
|
if (currentTime > allowedEnd) { |
|
return true; |
|
} |
|
|
|
return false; |
|
} |
|
}, { |
|
key: 'beforeSeekableWindow_', |
|
value: function beforeSeekableWindow_(seekable, currentTime) { |
|
if (seekable.length && // can't fall before 0 and 0 seekable start identifies VOD stream |
|
seekable.start(0) > 0 && currentTime < seekable.start(0) - SAFE_TIME_DELTA) { |
|
return true; |
|
} |
|
|
|
return false; |
|
} |
|
}, { |
|
key: 'videoUnderflow_', |
|
value: function videoUnderflow_(nextRange, buffered, currentTime) { |
|
if (nextRange.length === 0) { |
|
// Even if there is no available next range, there is still a possibility we are |
|
// stuck in a gap due to video underflow. |
|
var gap = this.gapFromVideoUnderflow_(buffered, currentTime); |
|
|
|
if (gap) { |
|
this.logger_('Encountered a gap in video from ' + gap.start + ' to ' + gap.end + '. ' + ('Seeking to current time ' + currentTime)); |
|
return true; |
|
} |
|
} |
|
|
|
return false; |
|
} |
|
/** |
|
* Timer callback. If playback still has not proceeded, then we seek |
|
* to the start of the next buffered region. |
|
* |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'skipTheGap_', |
|
value: function skipTheGap_(scheduledCurrentTime) { |
|
var buffered = this.tech_.buffered(); |
|
var currentTime = this.tech_.currentTime(); |
|
var nextRange = findNextRange(buffered, currentTime); |
|
this.cancelTimer_(); |
|
|
|
if (nextRange.length === 0 || currentTime !== scheduledCurrentTime) { |
|
return; |
|
} |
|
|
|
this.logger_('skipTheGap_:', 'currentTime:', currentTime, 'scheduled currentTime:', scheduledCurrentTime, 'nextRange start:', nextRange.start(0)); // only seek if we still have not played |
|
|
|
this.seekTo(nextRange.start(0) + TIME_FUDGE_FACTOR); |
|
this.tech_.trigger({ |
|
type: 'usage', |
|
name: 'hls-gap-skip' |
|
}); |
|
} |
|
}, { |
|
key: 'gapFromVideoUnderflow_', |
|
value: function gapFromVideoUnderflow_(buffered, currentTime) { |
|
// At least in Chrome, if there is a gap in the video buffer, the audio will continue |
|
// playing for ~3 seconds after the video gap starts. This is done to account for |
|
// video buffer underflow/underrun (note that this is not done when there is audio |
|
// buffer underflow/underrun -- in that case the video will stop as soon as it |
|
// encounters the gap, as audio stalls are more noticeable/jarring to a user than |
|
// video stalls). The player's time will reflect the playthrough of audio, so the |
|
// time will appear as if we are in a buffered region, even if we are stuck in a |
|
// "gap." |
|
// |
|
// Example: |
|
// video buffer: 0 => 10.1, 10.2 => 20 |
|
// audio buffer: 0 => 20 |
|
// overall buffer: 0 => 10.1, 10.2 => 20 |
|
// current time: 13 |
|
// |
|
// Chrome's video froze at 10 seconds, where the video buffer encountered the gap, |
|
// however, the audio continued playing until it reached ~3 seconds past the gap |
|
// (13 seconds), at which point it stops as well. Since current time is past the |
|
// gap, findNextRange will return no ranges. |
|
// |
|
// To check for this issue, we see if there is a gap that starts somewhere within |
|
// a 3 second range (3 seconds +/- 1 second) back from our current time. |
|
var gaps = findGaps(buffered); |
|
|
|
for (var i = 0; i < gaps.length; i++) { |
|
var start = gaps.start(i); |
|
var end = gaps.end(i); // gap is starts no more than 4 seconds back |
|
|
|
if (currentTime - start < 4 && currentTime - start > 2) { |
|
return { |
|
start: start, |
|
end: end |
|
}; |
|
} |
|
} |
|
|
|
return null; |
|
} |
|
}]); |
|
return PlaybackWatcher; |
|
}(); |
|
|
|
var defaultOptions = { |
|
errorInterval: 30, |
|
getSource: function getSource(next) { |
|
var tech = this.tech({ |
|
IWillNotUseThisInPlugins: true |
|
}); |
|
var sourceObj = tech.currentSource_; |
|
return next(sourceObj); |
|
} |
|
}; |
|
/** |
|
* Main entry point for the plugin |
|
* |
|
* @param {Player} player a reference to a videojs Player instance |
|
* @param {Object} [options] an object with plugin options |
|
* @private |
|
*/ |
|
|
|
var initPlugin = function initPlugin(player, options) { |
|
var lastCalled = 0; |
|
var seekTo = 0; |
|
var localOptions = videojs$1.mergeOptions(defaultOptions, options); |
|
player.ready(function () { |
|
player.trigger({ |
|
type: 'usage', |
|
name: 'hls-error-reload-initialized' |
|
}); |
|
}); |
|
/** |
|
* Player modifications to perform that must wait until `loadedmetadata` |
|
* has been triggered |
|
* |
|
* @private |
|
*/ |
|
|
|
var loadedMetadataHandler = function loadedMetadataHandler() { |
|
if (seekTo) { |
|
player.currentTime(seekTo); |
|
} |
|
}; |
|
/** |
|
* Set the source on the player element, play, and seek if necessary |
|
* |
|
* @param {Object} sourceObj An object specifying the source url and mime-type to play |
|
* @private |
|
*/ |
|
|
|
|
|
var setSource = function setSource(sourceObj) { |
|
if (sourceObj === null || sourceObj === undefined) { |
|
return; |
|
} |
|
|
|
seekTo = player.duration() !== Infinity && player.currentTime() || 0; |
|
player.one('loadedmetadata', loadedMetadataHandler); |
|
player.src(sourceObj); |
|
player.trigger({ |
|
type: 'usage', |
|
name: 'hls-error-reload' |
|
}); |
|
player.play(); |
|
}; |
|
/** |
|
* Attempt to get a source from either the built-in getSource function |
|
* or a custom function provided via the options |
|
* |
|
* @private |
|
*/ |
|
|
|
|
|
var errorHandler = function errorHandler() { |
|
// Do not attempt to reload the source if a source-reload occurred before |
|
// 'errorInterval' time has elapsed since the last source-reload |
|
if (Date.now() - lastCalled < localOptions.errorInterval * 1000) { |
|
player.trigger({ |
|
type: 'usage', |
|
name: 'hls-error-reload-canceled' |
|
}); |
|
return; |
|
} |
|
|
|
if (!localOptions.getSource || typeof localOptions.getSource !== 'function') { |
|
videojs$1.log.error('ERROR: reloadSourceOnError - The option getSource must be a function!'); |
|
return; |
|
} |
|
|
|
lastCalled = Date.now(); |
|
return localOptions.getSource.call(player, setSource); |
|
}; |
|
/** |
|
* Unbind any event handlers that were bound by the plugin |
|
* |
|
* @private |
|
*/ |
|
|
|
|
|
var cleanupEvents = function cleanupEvents() { |
|
player.off('loadedmetadata', loadedMetadataHandler); |
|
player.off('error', errorHandler); |
|
player.off('dispose', cleanupEvents); |
|
}; |
|
/** |
|
* Cleanup before re-initializing the plugin |
|
* |
|
* @param {Object} [newOptions] an object with plugin options |
|
* @private |
|
*/ |
|
|
|
|
|
var reinitPlugin = function reinitPlugin(newOptions) { |
|
cleanupEvents(); |
|
initPlugin(player, newOptions); |
|
}; |
|
|
|
player.on('error', errorHandler); |
|
player.on('dispose', cleanupEvents); // Overwrite the plugin function so that we can correctly cleanup before |
|
// initializing the plugin |
|
|
|
player.reloadSourceOnError = reinitPlugin; |
|
}; |
|
/** |
|
* Reload the source when an error is detected as long as there |
|
* wasn't an error previously within the last 30 seconds |
|
* |
|
* @param {Object} [options] an object with plugin options |
|
*/ |
|
|
|
|
|
var reloadSourceOnError = function reloadSourceOnError(options) { |
|
initPlugin(this, options); |
|
}; |
|
|
|
var version$3 = "1.9.3"; // since VHS handles HLS and DASH (and in the future, more types), use * to capture all |
|
|
|
videojs$1.use('*', function (player) { |
|
return { |
|
setSource: function setSource(srcObj, next) { |
|
// pass null as the first argument to indicate that the source is not rejected |
|
next(null, srcObj); |
|
}, |
|
// VHS needs to know when seeks happen. For external seeks (generated at the player |
|
// level), this middleware will capture the action. For internal seeks (generated at |
|
// the tech level), we use a wrapped function so that we can handle it on our own |
|
// (specified elsewhere). |
|
setCurrentTime: function setCurrentTime(time) { |
|
if (player.vhs && player.currentSource().src === player.vhs.source_.src) { |
|
player.vhs.setCurrentTime(time); |
|
} |
|
|
|
return time; |
|
}, |
|
// Sync VHS after play requests. |
|
// This specifically handles replay where the order of actions is |
|
// play, video element will seek to 0 (skipping the setCurrentTime middleware) |
|
// then triggers a play event. |
|
play: function play() { |
|
if (player.vhs && player.currentSource().src === player.vhs.source_.src) { |
|
player.vhs.setCurrentTime(player.tech_.currentTime()); |
|
} |
|
} |
|
}; |
|
}); |
|
/** |
|
* @file videojs-http-streaming.js |
|
* |
|
* The main file for the HLS project. |
|
* License: https://github.com/videojs/videojs-http-streaming/blob/master/LICENSE |
|
*/ |
|
|
|
var Hls$1 = { |
|
PlaylistLoader: PlaylistLoader, |
|
Playlist: Playlist, |
|
Decrypter: Decrypter, |
|
AsyncStream: AsyncStream, |
|
decrypt: decrypt, |
|
utils: utils$1, |
|
STANDARD_PLAYLIST_SELECTOR: lastBandwidthSelector, |
|
INITIAL_PLAYLIST_SELECTOR: lowestBitrateCompatibleVariantSelector, |
|
comparePlaylistBandwidth: comparePlaylistBandwidth, |
|
comparePlaylistResolution: comparePlaylistResolution, |
|
xhr: xhrFactory() |
|
}; // Define getter/setters for config properites |
|
|
|
['GOAL_BUFFER_LENGTH', 'MAX_GOAL_BUFFER_LENGTH', 'GOAL_BUFFER_LENGTH_RATE', 'BUFFER_LOW_WATER_LINE', 'MAX_BUFFER_LOW_WATER_LINE', 'BUFFER_LOW_WATER_LINE_RATE', 'BANDWIDTH_VARIANCE'].forEach(function (prop) { |
|
Object.defineProperty(Hls$1, prop, { |
|
get: function get$$1() { |
|
videojs$1.log.warn('using Hls.' + prop + ' is UNSAFE be sure you know what you are doing'); |
|
return Config[prop]; |
|
}, |
|
set: function set$$1(value) { |
|
videojs$1.log.warn('using Hls.' + prop + ' is UNSAFE be sure you know what you are doing'); |
|
|
|
if (typeof value !== 'number' || value < 0) { |
|
videojs$1.log.warn('value of Hls.' + prop + ' must be greater than or equal to 0'); |
|
return; |
|
} |
|
|
|
Config[prop] = value; |
|
} |
|
}); |
|
}); |
|
var LOCAL_STORAGE_KEY$1 = 'videojs-vhs'; |
|
|
|
var simpleTypeFromSourceType = function simpleTypeFromSourceType(type) { |
|
var mpegurlRE = /^(audio|video|application)\/(x-|vnd\.apple\.)?mpegurl/i; |
|
|
|
if (mpegurlRE.test(type)) { |
|
return 'hls'; |
|
} |
|
|
|
var dashRE = /^application\/dash\+xml/i; |
|
|
|
if (dashRE.test(type)) { |
|
return 'dash'; |
|
} |
|
|
|
return null; |
|
}; |
|
/** |
|
* Updates the selectedIndex of the QualityLevelList when a mediachange happens in hls. |
|
* |
|
* @param {QualityLevelList} qualityLevels The QualityLevelList to update. |
|
* @param {PlaylistLoader} playlistLoader PlaylistLoader containing the new media info. |
|
* @function handleHlsMediaChange |
|
*/ |
|
|
|
|
|
var handleHlsMediaChange = function handleHlsMediaChange(qualityLevels, playlistLoader) { |
|
var newPlaylist = playlistLoader.media(); |
|
var selectedIndex = -1; |
|
|
|
for (var i = 0; i < qualityLevels.length; i++) { |
|
if (qualityLevels[i].id === newPlaylist.uri) { |
|
selectedIndex = i; |
|
break; |
|
} |
|
} |
|
|
|
qualityLevels.selectedIndex_ = selectedIndex; |
|
qualityLevels.trigger({ |
|
selectedIndex: selectedIndex, |
|
type: 'change' |
|
}); |
|
}; |
|
/** |
|
* Adds quality levels to list once playlist metadata is available |
|
* |
|
* @param {QualityLevelList} qualityLevels The QualityLevelList to attach events to. |
|
* @param {Object} hls Hls object to listen to for media events. |
|
* @function handleHlsLoadedMetadata |
|
*/ |
|
|
|
|
|
var handleHlsLoadedMetadata = function handleHlsLoadedMetadata(qualityLevels, hls) { |
|
hls.representations().forEach(function (rep) { |
|
qualityLevels.addQualityLevel(rep); |
|
}); |
|
handleHlsMediaChange(qualityLevels, hls.playlists); |
|
}; // HLS is a source handler, not a tech. Make sure attempts to use it |
|
// as one do not cause exceptions. |
|
|
|
|
|
Hls$1.canPlaySource = function () { |
|
return videojs$1.log.warn('HLS is no longer a tech. Please remove it from ' + 'your player\'s techOrder.'); |
|
}; |
|
|
|
var emeKeySystems = function emeKeySystems(keySystemOptions, videoPlaylist, audioPlaylist) { |
|
if (!keySystemOptions) { |
|
return keySystemOptions; |
|
} // upsert the content types based on the selected playlist |
|
|
|
|
|
var keySystemContentTypes = {}; |
|
|
|
for (var keySystem in keySystemOptions) { |
|
keySystemContentTypes[keySystem] = { |
|
audioContentType: 'audio/mp4; codecs="' + audioPlaylist.attributes.CODECS + '"', |
|
videoContentType: 'video/mp4; codecs="' + videoPlaylist.attributes.CODECS + '"' |
|
}; |
|
|
|
if (videoPlaylist.contentProtection && videoPlaylist.contentProtection[keySystem] && videoPlaylist.contentProtection[keySystem].pssh) { |
|
keySystemContentTypes[keySystem].pssh = videoPlaylist.contentProtection[keySystem].pssh; |
|
} // videojs-contrib-eme accepts the option of specifying: 'com.some.cdm': 'url' |
|
// so we need to prevent overwriting the URL entirely |
|
|
|
|
|
if (typeof keySystemOptions[keySystem] === 'string') { |
|
keySystemContentTypes[keySystem].url = keySystemOptions[keySystem]; |
|
} |
|
} |
|
|
|
return videojs$1.mergeOptions(keySystemOptions, keySystemContentTypes); |
|
}; |
|
|
|
var setupEmeOptions = function setupEmeOptions(hlsHandler) { |
|
if (hlsHandler.options_.sourceType !== 'dash') { |
|
return; |
|
} |
|
|
|
var player = videojs$1.players[hlsHandler.tech_.options_.playerId]; |
|
|
|
if (player.eme) { |
|
var sourceOptions = emeKeySystems(hlsHandler.source_.keySystems, hlsHandler.playlists.media(), hlsHandler.masterPlaylistController_.mediaTypes_.AUDIO.activePlaylistLoader.media()); |
|
|
|
if (sourceOptions) { |
|
player.currentSource().keySystems = sourceOptions; // works around https://bugs.chromium.org/p/chromium/issues/detail?id=895449 |
|
|
|
if (player.eme.initializeMediaKeys) { |
|
player.eme.initializeMediaKeys(); |
|
} |
|
} |
|
} |
|
}; |
|
|
|
var getVhsLocalStorage = function getVhsLocalStorage() { |
|
if (!window.localStorage) { |
|
return null; |
|
} |
|
|
|
var storedObject = window.localStorage.getItem(LOCAL_STORAGE_KEY$1); |
|
|
|
if (!storedObject) { |
|
return null; |
|
} |
|
|
|
try { |
|
return JSON.parse(storedObject); |
|
} catch (e) { |
|
// someone may have tampered with the value |
|
return null; |
|
} |
|
}; |
|
|
|
var updateVhsLocalStorage = function updateVhsLocalStorage(options) { |
|
if (!window.localStorage) { |
|
return false; |
|
} |
|
|
|
var objectToStore = getVhsLocalStorage(); |
|
objectToStore = objectToStore ? videojs$1.mergeOptions(objectToStore, options) : options; |
|
|
|
try { |
|
window.localStorage.setItem(LOCAL_STORAGE_KEY$1, JSON.stringify(objectToStore)); |
|
} catch (e) { |
|
// Throws if storage is full (e.g., always on iOS 5+ Safari private mode, where |
|
// storage is set to 0). |
|
// https://developer.mozilla.org/en-US/docs/Web/API/Storage/setItem#Exceptions |
|
// No need to perform any operation. |
|
return false; |
|
} |
|
|
|
return objectToStore; |
|
}; |
|
/** |
|
* Whether the browser has built-in HLS support. |
|
*/ |
|
|
|
|
|
Hls$1.supportsNativeHls = function () { |
|
var video = document.createElement('video'); // native HLS is definitely not supported if HTML5 video isn't |
|
|
|
if (!videojs$1.getTech('Html5').isSupported()) { |
|
return false; |
|
} // HLS manifests can go by many mime-types |
|
|
|
|
|
var canPlay = [// Apple santioned |
|
'application/vnd.apple.mpegurl', // Apple sanctioned for backwards compatibility |
|
'audio/mpegurl', // Very common |
|
'audio/x-mpegurl', // Very common |
|
'application/x-mpegurl', // Included for completeness |
|
'video/x-mpegurl', 'video/mpegurl', 'application/mpegurl']; |
|
return canPlay.some(function (canItPlay) { |
|
return /maybe|probably/i.test(video.canPlayType(canItPlay)); |
|
}); |
|
}(); |
|
|
|
Hls$1.supportsNativeDash = function () { |
|
if (!videojs$1.getTech('Html5').isSupported()) { |
|
return false; |
|
} |
|
|
|
return /maybe|probably/i.test(document.createElement('video').canPlayType('application/dash+xml')); |
|
}(); |
|
|
|
Hls$1.supportsTypeNatively = function (type) { |
|
if (type === 'hls') { |
|
return Hls$1.supportsNativeHls; |
|
} |
|
|
|
if (type === 'dash') { |
|
return Hls$1.supportsNativeDash; |
|
} |
|
|
|
return false; |
|
}; |
|
/** |
|
* HLS is a source handler, not a tech. Make sure attempts to use it |
|
* as one do not cause exceptions. |
|
*/ |
|
|
|
|
|
Hls$1.isSupported = function () { |
|
return videojs$1.log.warn('HLS is no longer a tech. Please remove it from ' + 'your player\'s techOrder.'); |
|
}; |
|
|
|
var Component$1 = videojs$1.getComponent('Component'); |
|
/** |
|
* The Hls Handler object, where we orchestrate all of the parts |
|
* of HLS to interact with video.js |
|
* |
|
* @class HlsHandler |
|
* @extends videojs.Component |
|
* @param {Object} source the soruce object |
|
* @param {Tech} tech the parent tech object |
|
* @param {Object} options optional and required options |
|
*/ |
|
|
|
var HlsHandler = function (_Component) { |
|
inherits$1(HlsHandler, _Component); |
|
|
|
function HlsHandler(source, tech, options) { |
|
classCallCheck$1(this, HlsHandler); // tech.player() is deprecated but setup a reference to HLS for |
|
// backwards-compatibility |
|
|
|
var _this = possibleConstructorReturn$1(this, (HlsHandler.__proto__ || Object.getPrototypeOf(HlsHandler)).call(this, tech, options.hls)); |
|
|
|
if (tech.options_ && tech.options_.playerId) { |
|
var _player = videojs$1(tech.options_.playerId); |
|
|
|
if (!_player.hasOwnProperty('hls')) { |
|
Object.defineProperty(_player, 'hls', { |
|
get: function get$$1() { |
|
videojs$1.log.warn('player.hls is deprecated. Use player.tech().hls instead.'); |
|
tech.trigger({ |
|
type: 'usage', |
|
name: 'hls-player-access' |
|
}); |
|
return _this; |
|
}, |
|
configurable: true |
|
}); |
|
} // Set up a reference to the HlsHandler from player.vhs. This allows users to start |
|
// migrating from player.tech_.hls... to player.vhs... for API access. Although this |
|
// isn't the most appropriate form of reference for video.js (since all APIs should |
|
// be provided through core video.js), it is a common pattern for plugins, and vhs |
|
// will act accordingly. |
|
|
|
|
|
_player.vhs = _this; // deprecated, for backwards compatibility |
|
|
|
_player.dash = _this; |
|
_this.player_ = _player; |
|
} |
|
|
|
_this.tech_ = tech; |
|
_this.source_ = source; |
|
_this.stats = {}; |
|
|
|
_this.setOptions_(); |
|
|
|
if (_this.options_.overrideNative && tech.overrideNativeAudioTracks && tech.overrideNativeVideoTracks) { |
|
tech.overrideNativeAudioTracks(true); |
|
tech.overrideNativeVideoTracks(true); |
|
} else if (_this.options_.overrideNative && (tech.featuresNativeVideoTracks || tech.featuresNativeAudioTracks)) { |
|
// overriding native HLS only works if audio tracks have been emulated |
|
// error early if we're misconfigured |
|
throw new Error('Overriding native HLS requires emulated tracks. ' + 'See https://git.io/vMpjB'); |
|
} // listen for fullscreenchange events for this player so that we |
|
// can adjust our quality selection quickly |
|
|
|
|
|
_this.on(document, ['fullscreenchange', 'webkitfullscreenchange', 'mozfullscreenchange', 'MSFullscreenChange'], function (event) { |
|
var fullscreenElement = document.fullscreenElement || document.webkitFullscreenElement || document.mozFullScreenElement || document.msFullscreenElement; |
|
|
|
if (fullscreenElement && fullscreenElement.contains(_this.tech_.el())) { |
|
_this.masterPlaylistController_.smoothQualityChange_(); |
|
} |
|
}); // Handle seeking when looping - middleware doesn't handle this seek event from the tech |
|
|
|
|
|
_this.on(_this.tech_, 'seeking', function () { |
|
if (this.tech_.currentTime() === 0 && this.tech_.player_.loop()) { |
|
this.setCurrentTime(0); |
|
} |
|
}); |
|
|
|
_this.on(_this.tech_, 'error', function () { |
|
if (this.masterPlaylistController_) { |
|
this.masterPlaylistController_.pauseLoading(); |
|
} |
|
}); |
|
|
|
_this.on(_this.tech_, 'play', _this.play); |
|
|
|
return _this; |
|
} |
|
|
|
createClass$1(HlsHandler, [{ |
|
key: 'setOptions_', |
|
value: function setOptions_() { |
|
var _this2 = this; // defaults |
|
|
|
|
|
this.options_.withCredentials = this.options_.withCredentials || false; |
|
this.options_.handleManifestRedirects = this.options_.handleManifestRedirects || false; |
|
this.options_.limitRenditionByPlayerDimensions = this.options_.limitRenditionByPlayerDimensions === false ? false : true; |
|
this.options_.smoothQualityChange = this.options_.smoothQualityChange || false; |
|
this.options_.useBandwidthFromLocalStorage = typeof this.source_.useBandwidthFromLocalStorage !== 'undefined' ? this.source_.useBandwidthFromLocalStorage : this.options_.useBandwidthFromLocalStorage || false; |
|
this.options_.customTagParsers = this.options_.customTagParsers || []; |
|
this.options_.customTagMappers = this.options_.customTagMappers || []; |
|
|
|
if (typeof this.options_.blacklistDuration !== 'number') { |
|
this.options_.blacklistDuration = 5 * 60; |
|
} |
|
|
|
if (typeof this.options_.bandwidth !== 'number') { |
|
if (this.options_.useBandwidthFromLocalStorage) { |
|
var storedObject = getVhsLocalStorage(); |
|
|
|
if (storedObject && storedObject.bandwidth) { |
|
this.options_.bandwidth = storedObject.bandwidth; |
|
this.tech_.trigger({ |
|
type: 'usage', |
|
name: 'hls-bandwidth-from-local-storage' |
|
}); |
|
} |
|
|
|
if (storedObject && storedObject.throughput) { |
|
this.options_.throughput = storedObject.throughput; |
|
this.tech_.trigger({ |
|
type: 'usage', |
|
name: 'hls-throughput-from-local-storage' |
|
}); |
|
} |
|
} |
|
} // if bandwidth was not set by options or pulled from local storage, start playlist |
|
// selection at a reasonable bandwidth |
|
|
|
|
|
if (typeof this.options_.bandwidth !== 'number') { |
|
this.options_.bandwidth = Config.INITIAL_BANDWIDTH; |
|
} // If the bandwidth number is unchanged from the initial setting |
|
// then this takes precedence over the enableLowInitialPlaylist option |
|
|
|
|
|
this.options_.enableLowInitialPlaylist = this.options_.enableLowInitialPlaylist && this.options_.bandwidth === Config.INITIAL_BANDWIDTH; // grab options passed to player.src |
|
|
|
['withCredentials', 'limitRenditionByPlayerDimensions', 'bandwidth', 'smoothQualityChange', 'customTagParsers', 'customTagMappers', 'handleManifestRedirects'].forEach(function (option) { |
|
if (typeof _this2.source_[option] !== 'undefined') { |
|
_this2.options_[option] = _this2.source_[option]; |
|
} |
|
}); |
|
this.limitRenditionByPlayerDimensions = this.options_.limitRenditionByPlayerDimensions; |
|
} |
|
/** |
|
* called when player.src gets called, handle a new source |
|
* |
|
* @param {Object} src the source object to handle |
|
*/ |
|
|
|
}, { |
|
key: 'src', |
|
value: function src(_src, type) { |
|
var _this3 = this; // do nothing if the src is falsey |
|
|
|
|
|
if (!_src) { |
|
return; |
|
} |
|
|
|
this.setOptions_(); // add master playlist controller options |
|
|
|
this.options_.url = this.source_.src; |
|
this.options_.tech = this.tech_; |
|
this.options_.externHls = Hls$1; |
|
this.options_.sourceType = simpleTypeFromSourceType(type); // Whenever we seek internally, we should update both the tech and call our own |
|
// setCurrentTime function. This is needed because "seeking" events aren't always |
|
// reliable. External seeks (via the player object) are handled via middleware. |
|
|
|
this.options_.seekTo = function (time) { |
|
_this3.tech_.setCurrentTime(time); |
|
|
|
_this3.setCurrentTime(time); |
|
}; |
|
|
|
this.masterPlaylistController_ = new MasterPlaylistController(this.options_); |
|
this.playbackWatcher_ = new PlaybackWatcher(videojs$1.mergeOptions(this.options_, { |
|
seekable: function seekable$$1() { |
|
return _this3.seekable(); |
|
}, |
|
media: function media() { |
|
return _this3.masterPlaylistController_.media(); |
|
} |
|
})); |
|
this.masterPlaylistController_.on('error', function () { |
|
var player = videojs$1.players[_this3.tech_.options_.playerId]; |
|
player.error(_this3.masterPlaylistController_.error); |
|
}); // `this` in selectPlaylist should be the HlsHandler for backwards |
|
// compatibility with < v2 |
|
|
|
this.masterPlaylistController_.selectPlaylist = this.selectPlaylist ? this.selectPlaylist.bind(this) : Hls$1.STANDARD_PLAYLIST_SELECTOR.bind(this); |
|
this.masterPlaylistController_.selectInitialPlaylist = Hls$1.INITIAL_PLAYLIST_SELECTOR.bind(this); // re-expose some internal objects for backwards compatibility with < v2 |
|
|
|
this.playlists = this.masterPlaylistController_.masterPlaylistLoader_; |
|
this.mediaSource = this.masterPlaylistController_.mediaSource; // Proxy assignment of some properties to the master playlist |
|
// controller. Using a custom property for backwards compatibility |
|
// with < v2 |
|
|
|
Object.defineProperties(this, { |
|
selectPlaylist: { |
|
get: function get$$1() { |
|
return this.masterPlaylistController_.selectPlaylist; |
|
}, |
|
set: function set$$1(selectPlaylist) { |
|
this.masterPlaylistController_.selectPlaylist = selectPlaylist.bind(this); |
|
} |
|
}, |
|
throughput: { |
|
get: function get$$1() { |
|
return this.masterPlaylistController_.mainSegmentLoader_.throughput.rate; |
|
}, |
|
set: function set$$1(throughput) { |
|
this.masterPlaylistController_.mainSegmentLoader_.throughput.rate = throughput; // By setting `count` to 1 the throughput value becomes the starting value |
|
// for the cumulative average |
|
|
|
this.masterPlaylistController_.mainSegmentLoader_.throughput.count = 1; |
|
} |
|
}, |
|
bandwidth: { |
|
get: function get$$1() { |
|
return this.masterPlaylistController_.mainSegmentLoader_.bandwidth; |
|
}, |
|
set: function set$$1(bandwidth) { |
|
this.masterPlaylistController_.mainSegmentLoader_.bandwidth = bandwidth; // setting the bandwidth manually resets the throughput counter |
|
// `count` is set to zero that current value of `rate` isn't included |
|
// in the cumulative average |
|
|
|
this.masterPlaylistController_.mainSegmentLoader_.throughput = { |
|
rate: 0, |
|
count: 0 |
|
}; |
|
} |
|
}, |
|
|
|
/** |
|
* `systemBandwidth` is a combination of two serial processes bit-rates. The first |
|
* is the network bitrate provided by `bandwidth` and the second is the bitrate of |
|
* the entire process after that - decryption, transmuxing, and appending - provided |
|
* by `throughput`. |
|
* |
|
* Since the two process are serial, the overall system bandwidth is given by: |
|
* sysBandwidth = 1 / (1 / bandwidth + 1 / throughput) |
|
*/ |
|
systemBandwidth: { |
|
get: function get$$1() { |
|
var invBandwidth = 1 / (this.bandwidth || 1); |
|
var invThroughput = void 0; |
|
|
|
if (this.throughput > 0) { |
|
invThroughput = 1 / this.throughput; |
|
} else { |
|
invThroughput = 0; |
|
} |
|
|
|
var systemBitrate = Math.floor(1 / (invBandwidth + invThroughput)); |
|
return systemBitrate; |
|
}, |
|
set: function set$$1() { |
|
videojs$1.log.error('The "systemBandwidth" property is read-only'); |
|
} |
|
} |
|
}); |
|
|
|
if (this.options_.bandwidth) { |
|
this.bandwidth = this.options_.bandwidth; |
|
} |
|
|
|
if (this.options_.throughput) { |
|
this.throughput = this.options_.throughput; |
|
} |
|
|
|
Object.defineProperties(this.stats, { |
|
bandwidth: { |
|
get: function get$$1() { |
|
return _this3.bandwidth || 0; |
|
}, |
|
enumerable: true |
|
}, |
|
mediaRequests: { |
|
get: function get$$1() { |
|
return _this3.masterPlaylistController_.mediaRequests_() || 0; |
|
}, |
|
enumerable: true |
|
}, |
|
mediaRequestsAborted: { |
|
get: function get$$1() { |
|
return _this3.masterPlaylistController_.mediaRequestsAborted_() || 0; |
|
}, |
|
enumerable: true |
|
}, |
|
mediaRequestsTimedout: { |
|
get: function get$$1() { |
|
return _this3.masterPlaylistController_.mediaRequestsTimedout_() || 0; |
|
}, |
|
enumerable: true |
|
}, |
|
mediaRequestsErrored: { |
|
get: function get$$1() { |
|
return _this3.masterPlaylistController_.mediaRequestsErrored_() || 0; |
|
}, |
|
enumerable: true |
|
}, |
|
mediaTransferDuration: { |
|
get: function get$$1() { |
|
return _this3.masterPlaylistController_.mediaTransferDuration_() || 0; |
|
}, |
|
enumerable: true |
|
}, |
|
mediaBytesTransferred: { |
|
get: function get$$1() { |
|
return _this3.masterPlaylistController_.mediaBytesTransferred_() || 0; |
|
}, |
|
enumerable: true |
|
}, |
|
mediaSecondsLoaded: { |
|
get: function get$$1() { |
|
return _this3.masterPlaylistController_.mediaSecondsLoaded_() || 0; |
|
}, |
|
enumerable: true |
|
}, |
|
buffered: { |
|
get: function get$$1() { |
|
return timeRangesToArray(_this3.tech_.buffered()); |
|
}, |
|
enumerable: true |
|
}, |
|
currentTime: { |
|
get: function get$$1() { |
|
return _this3.tech_.currentTime(); |
|
}, |
|
enumerable: true |
|
}, |
|
currentSource: { |
|
get: function get$$1() { |
|
return _this3.tech_.currentSource_; |
|
}, |
|
enumerable: true |
|
}, |
|
currentTech: { |
|
get: function get$$1() { |
|
return _this3.tech_.name_; |
|
}, |
|
enumerable: true |
|
}, |
|
duration: { |
|
get: function get$$1() { |
|
return _this3.tech_.duration(); |
|
}, |
|
enumerable: true |
|
}, |
|
master: { |
|
get: function get$$1() { |
|
return _this3.playlists.master; |
|
}, |
|
enumerable: true |
|
}, |
|
playerDimensions: { |
|
get: function get$$1() { |
|
return _this3.tech_.currentDimensions(); |
|
}, |
|
enumerable: true |
|
}, |
|
seekable: { |
|
get: function get$$1() { |
|
return timeRangesToArray(_this3.tech_.seekable()); |
|
}, |
|
enumerable: true |
|
}, |
|
timestamp: { |
|
get: function get$$1() { |
|
return Date.now(); |
|
}, |
|
enumerable: true |
|
}, |
|
videoPlaybackQuality: { |
|
get: function get$$1() { |
|
return _this3.tech_.getVideoPlaybackQuality(); |
|
}, |
|
enumerable: true |
|
} |
|
}); |
|
this.tech_.one('canplay', this.masterPlaylistController_.setupFirstPlay.bind(this.masterPlaylistController_)); |
|
this.tech_.on('bandwidthupdate', function () { |
|
if (_this3.options_.useBandwidthFromLocalStorage) { |
|
updateVhsLocalStorage({ |
|
bandwidth: _this3.bandwidth, |
|
throughput: Math.round(_this3.throughput) |
|
}); |
|
} |
|
}); |
|
this.masterPlaylistController_.on('selectedinitialmedia', function () { |
|
// Add the manual rendition mix-in to HlsHandler |
|
renditionSelectionMixin(_this3); |
|
setupEmeOptions(_this3); |
|
}); // the bandwidth of the primary segment loader is our best |
|
// estimate of overall bandwidth |
|
|
|
this.on(this.masterPlaylistController_, 'progress', function () { |
|
this.tech_.trigger('progress'); |
|
}); |
|
this.tech_.ready(function () { |
|
return _this3.setupQualityLevels_(); |
|
}); // do nothing if the tech has been disposed already |
|
// this can occur if someone sets the src in player.ready(), for instance |
|
|
|
if (!this.tech_.el()) { |
|
return; |
|
} |
|
|
|
this.tech_.src(videojs$1.URL.createObjectURL(this.masterPlaylistController_.mediaSource)); |
|
} |
|
/** |
|
* Initializes the quality levels and sets listeners to update them. |
|
* |
|
* @method setupQualityLevels_ |
|
* @private |
|
*/ |
|
|
|
}, { |
|
key: 'setupQualityLevels_', |
|
value: function setupQualityLevels_() { |
|
var _this4 = this; |
|
|
|
var player = videojs$1.players[this.tech_.options_.playerId]; |
|
|
|
if (player && player.qualityLevels) { |
|
this.qualityLevels_ = player.qualityLevels(); |
|
this.masterPlaylistController_.on('selectedinitialmedia', function () { |
|
handleHlsLoadedMetadata(_this4.qualityLevels_, _this4); |
|
}); |
|
this.playlists.on('mediachange', function () { |
|
handleHlsMediaChange(_this4.qualityLevels_, _this4.playlists); |
|
}); |
|
} |
|
} |
|
/** |
|
* Begin playing the video. |
|
*/ |
|
|
|
}, { |
|
key: 'play', |
|
value: function play() { |
|
this.masterPlaylistController_.play(); |
|
} |
|
/** |
|
* a wrapper around the function in MasterPlaylistController |
|
*/ |
|
|
|
}, { |
|
key: 'setCurrentTime', |
|
value: function setCurrentTime(currentTime) { |
|
this.masterPlaylistController_.setCurrentTime(currentTime); |
|
} |
|
/** |
|
* a wrapper around the function in MasterPlaylistController |
|
*/ |
|
|
|
}, { |
|
key: 'duration', |
|
value: function duration$$1() { |
|
return this.masterPlaylistController_.duration(); |
|
} |
|
/** |
|
* a wrapper around the function in MasterPlaylistController |
|
*/ |
|
|
|
}, { |
|
key: 'seekable', |
|
value: function seekable$$1() { |
|
return this.masterPlaylistController_.seekable(); |
|
} |
|
/** |
|
* Abort all outstanding work and cleanup. |
|
*/ |
|
|
|
}, { |
|
key: 'dispose', |
|
value: function dispose() { |
|
if (this.playbackWatcher_) { |
|
this.playbackWatcher_.dispose(); |
|
} |
|
|
|
if (this.masterPlaylistController_) { |
|
this.masterPlaylistController_.dispose(); |
|
} |
|
|
|
if (this.qualityLevels_) { |
|
this.qualityLevels_.dispose(); |
|
} |
|
|
|
if (this.player_) { |
|
delete this.player_.vhs; |
|
delete this.player_.dash; |
|
delete this.player_.hls; |
|
} |
|
|
|
if (this.tech_ && this.tech_.hls) { |
|
delete this.tech_.hls; |
|
} |
|
|
|
get$1(HlsHandler.prototype.__proto__ || Object.getPrototypeOf(HlsHandler.prototype), 'dispose', this).call(this); |
|
} |
|
}, { |
|
key: 'convertToProgramTime', |
|
value: function convertToProgramTime(time, callback) { |
|
return getProgramTime({ |
|
playlist: this.masterPlaylistController_.media(), |
|
time: time, |
|
callback: callback |
|
}); |
|
} // the player must be playing before calling this |
|
|
|
}, { |
|
key: 'seekToProgramTime', |
|
value: function seekToProgramTime$$1(programTime, callback) { |
|
var pauseAfterSeek = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : true; |
|
var retryCount = arguments.length > 3 && arguments[3] !== undefined ? arguments[3] : 2; |
|
return seekToProgramTime({ |
|
programTime: programTime, |
|
playlist: this.masterPlaylistController_.media(), |
|
retryCount: retryCount, |
|
pauseAfterSeek: pauseAfterSeek, |
|
seekTo: this.options_.seekTo, |
|
tech: this.options_.tech, |
|
callback: callback |
|
}); |
|
} |
|
}]); |
|
return HlsHandler; |
|
}(Component$1); |
|
/** |
|
* The Source Handler object, which informs video.js what additional |
|
* MIME types are supported and sets up playback. It is registered |
|
* automatically to the appropriate tech based on the capabilities of |
|
* the browser it is running in. It is not necessary to use or modify |
|
* this object in normal usage. |
|
*/ |
|
|
|
|
|
var HlsSourceHandler = { |
|
name: 'videojs-http-streaming', |
|
VERSION: version$3, |
|
canHandleSource: function canHandleSource(srcObj) { |
|
var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; |
|
var localOptions = videojs$1.mergeOptions(videojs$1.options, options); |
|
return HlsSourceHandler.canPlayType(srcObj.type, localOptions); |
|
}, |
|
handleSource: function handleSource(source, tech) { |
|
var options = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {}; |
|
var localOptions = videojs$1.mergeOptions(videojs$1.options, options); |
|
tech.hls = new HlsHandler(source, tech, localOptions); |
|
tech.hls.xhr = xhrFactory(); |
|
tech.hls.src(source.src, source.type); |
|
return tech.hls; |
|
}, |
|
canPlayType: function canPlayType(type) { |
|
var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; |
|
|
|
var _videojs$mergeOptions = videojs$1.mergeOptions(videojs$1.options, options), |
|
overrideNative = _videojs$mergeOptions.hls.overrideNative; |
|
|
|
var supportedType = simpleTypeFromSourceType(type); |
|
var canUseMsePlayback = supportedType && (!Hls$1.supportsTypeNatively(supportedType) || overrideNative); |
|
return canUseMsePlayback ? 'maybe' : ''; |
|
} |
|
}; |
|
|
|
if (typeof videojs$1.MediaSource === 'undefined' || typeof videojs$1.URL === 'undefined') { |
|
videojs$1.MediaSource = MediaSource; |
|
videojs$1.URL = URL$1; |
|
} // register source handlers with the appropriate techs |
|
|
|
|
|
if (MediaSource.supportsNativeMediaSources()) { |
|
videojs$1.getTech('Html5').registerSourceHandler(HlsSourceHandler, 0); |
|
} |
|
|
|
videojs$1.HlsHandler = HlsHandler; |
|
videojs$1.HlsSourceHandler = HlsSourceHandler; |
|
videojs$1.Hls = Hls$1; |
|
|
|
if (!videojs$1.use) { |
|
videojs$1.registerComponent('Hls', Hls$1); |
|
} |
|
|
|
videojs$1.options.hls = videojs$1.options.hls || {}; |
|
|
|
if (videojs$1.registerPlugin) { |
|
videojs$1.registerPlugin('reloadSourceOnError', reloadSourceOnError); |
|
} else { |
|
videojs$1.plugin('reloadSourceOnError', reloadSourceOnError); |
|
} |
|
|
|
return videojs$1; |
|
|
|
}));
|