{ "version": 3, "sources": ["angular:script/global:scripts.js"], "sourcesContent": ["/*! jQuery v3.7.1 | (c) OpenJS Foundation and other contributors | jquery.org/license */\n!function(e,t){\"use strict\";\"object\"==typeof module&&\"object\"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error(\"jQuery requires a window with a document\");return t(e)}:t(e)}(\"undefined\"!=typeof window?window:this,function(ie,e){\"use strict\";var oe=[],r=Object.getPrototypeOf,ae=oe.slice,g=oe.flat?function(e){return oe.flat.call(e)}:function(e){return oe.concat.apply([],e)},s=oe.push,se=oe.indexOf,n={},i=n.toString,ue=n.hasOwnProperty,o=ue.toString,a=o.call(Object),le={},v=function(e){return\"function\"==typeof e&&\"number\"!=typeof e.nodeType&&\"function\"!=typeof e.item},y=function(e){return null!=e&&e===e.window},C=ie.document,u={type:!0,src:!0,nonce:!0,noModule:!0};function m(e,t,n){var r,i,o=(n=n||C).createElement(\"script\");if(o.text=e,t)for(r in u)(i=t[r]||t.getAttribute&&t.getAttribute(r))&&o.setAttribute(r,i);n.head.appendChild(o).parentNode.removeChild(o)}function x(e){return null==e?e+\"\":\"object\"==typeof e||\"function\"==typeof e?n[i.call(e)]||\"object\":typeof e}var t=\"3.7.1\",l=/HTML$/i,ce=function(e,t){return new ce.fn.init(e,t)};function c(e){var t=!!e&&\"length\"in e&&e.length,n=x(e);return!v(e)&&!y(e)&&(\"array\"===n||0===t||\"number\"==typeof t&&0+~]|\"+ge+\")\"+ge+\"*\"),x=new RegExp(ge+\"|>\"),j=new RegExp(g),A=new RegExp(\"^\"+t+\"$\"),D={ID:new RegExp(\"^#(\"+t+\")\"),CLASS:new RegExp(\"^\\\\.(\"+t+\")\"),TAG:new RegExp(\"^(\"+t+\"|[*])\"),ATTR:new RegExp(\"^\"+p),PSEUDO:new RegExp(\"^\"+g),CHILD:new RegExp(\"^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\\\(\"+ge+\"*(even|odd|(([+-]|)(\\\\d*)n|)\"+ge+\"*(?:([+-]|)\"+ge+\"*(\\\\d+)|))\"+ge+\"*\\\\)|)\",\"i\"),bool:new RegExp(\"^(?:\"+f+\")$\",\"i\"),needsContext:new RegExp(\"^\"+ge+\"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\\\(\"+ge+\"*((?:-\\\\d)?\\\\d*)\"+ge+\"*\\\\)|)(?=[^-]|$)\",\"i\")},N=/^(?:input|select|textarea|button)$/i,q=/^h\\d$/i,L=/^(?:#([\\w-]+)|(\\w+)|\\.([\\w-]+))$/,H=/[+~]/,O=new RegExp(\"\\\\\\\\[\\\\da-fA-F]{1,6}\"+ge+\"?|\\\\\\\\([^\\\\r\\\\n\\\\f])\",\"g\"),P=function(e,t){var n=\"0x\"+e.slice(1)-65536;return t||(n<0?String.fromCharCode(n+65536):String.fromCharCode(n>>10|55296,1023&n|56320))},M=function(){V()},R=J(function(e){return!0===e.disabled&&fe(e,\"fieldset\")},{dir:\"parentNode\",next:\"legend\"});try{k.apply(oe=ae.call(ye.childNodes),ye.childNodes),oe[ye.childNodes.length].nodeType}catch(e){k={apply:function(e,t){me.apply(e,ae.call(t))},call:function(e){me.apply(e,ae.call(arguments,1))}}}function I(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,p=e?e.nodeType:9;if(n=n||[],\"string\"!=typeof t||!t||1!==p&&9!==p&&11!==p)return n;if(!r&&(V(e),e=e||T,C)){if(11!==p&&(u=L.exec(t)))if(i=u[1]){if(9===p){if(!(a=e.getElementById(i)))return n;if(a.id===i)return k.call(n,a),n}else if(f&&(a=f.getElementById(i))&&I.contains(e,a)&&a.id===i)return k.call(n,a),n}else{if(u[2])return k.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&e.getElementsByClassName)return k.apply(n,e.getElementsByClassName(i)),n}if(!(h[t+\" \"]||d&&d.test(t))){if(c=t,f=e,1===p&&(x.test(t)||m.test(t))){(f=H.test(t)&&U(e.parentNode)||e)==e&&le.scope||((s=e.getAttribute(\"id\"))?s=ce.escapeSelector(s):e.setAttribute(\"id\",s=S)),o=(l=Y(t)).length;while(o--)l[o]=(s?\"#\"+s:\":scope\")+\" \"+Q(l[o]);c=l.join(\",\")}try{return k.apply(n,f.querySelectorAll(c)),n}catch(e){h(t,!0)}finally{s===S&&e.removeAttribute(\"id\")}}}return re(t.replace(ve,\"$1\"),e,n,r)}function W(){var r=[];return function e(t,n){return r.push(t+\" \")>b.cacheLength&&delete e[r.shift()],e[t+\" \"]=n}}function F(e){return e[S]=!0,e}function $(e){var t=T.createElement(\"fieldset\");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function B(t){return function(e){return fe(e,\"input\")&&e.type===t}}function _(t){return function(e){return(fe(e,\"input\")||fe(e,\"button\"))&&e.type===t}}function z(t){return function(e){return\"form\"in e?e.parentNode&&!1===e.disabled?\"label\"in e?\"label\"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&R(e)===t:e.disabled===t:\"label\"in e&&e.disabled===t}}function X(a){return F(function(o){return o=+o,F(function(e,t){var n,r=a([],e.length,o),i=r.length;while(i--)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function U(e){return e&&\"undefined\"!=typeof e.getElementsByTagName&&e}function V(e){var t,n=e?e.ownerDocument||e:ye;return n!=T&&9===n.nodeType&&n.documentElement&&(r=(T=n).documentElement,C=!ce.isXMLDoc(T),i=r.matches||r.webkitMatchesSelector||r.msMatchesSelector,r.msMatchesSelector&&ye!=T&&(t=T.defaultView)&&t.top!==t&&t.addEventListener(\"unload\",M),le.getById=$(function(e){return r.appendChild(e).id=ce.expando,!T.getElementsByName||!T.getElementsByName(ce.expando).length}),le.disconnectedMatch=$(function(e){return i.call(e,\"*\")}),le.scope=$(function(){return T.querySelectorAll(\":scope\")}),le.cssHas=$(function(){try{return T.querySelector(\":has(*,:jqfake)\"),!1}catch(e){return!0}}),le.getById?(b.filter.ID=function(e){var t=e.replace(O,P);return function(e){return e.getAttribute(\"id\")===t}},b.find.ID=function(e,t){if(\"undefined\"!=typeof t.getElementById&&C){var n=t.getElementById(e);return n?[n]:[]}}):(b.filter.ID=function(e){var n=e.replace(O,P);return function(e){var t=\"undefined\"!=typeof e.getAttributeNode&&e.getAttributeNode(\"id\");return t&&t.value===n}},b.find.ID=function(e,t){if(\"undefined\"!=typeof t.getElementById&&C){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode(\"id\"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode(\"id\"))&&n.value===e)return[o]}return[]}}),b.find.TAG=function(e,t){return\"undefined\"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):t.querySelectorAll(e)},b.find.CLASS=function(e,t){if(\"undefined\"!=typeof t.getElementsByClassName&&C)return t.getElementsByClassName(e)},d=[],$(function(e){var t;r.appendChild(e).innerHTML=\"\",e.querySelectorAll(\"[selected]\").length||d.push(\"\\\\[\"+ge+\"*(?:value|\"+f+\")\"),e.querySelectorAll(\"[id~=\"+S+\"-]\").length||d.push(\"~=\"),e.querySelectorAll(\"a#\"+S+\"+*\").length||d.push(\".#.+[+~]\"),e.querySelectorAll(\":checked\").length||d.push(\":checked\"),(t=T.createElement(\"input\")).setAttribute(\"type\",\"hidden\"),e.appendChild(t).setAttribute(\"name\",\"D\"),r.appendChild(e).disabled=!0,2!==e.querySelectorAll(\":disabled\").length&&d.push(\":enabled\",\":disabled\"),(t=T.createElement(\"input\")).setAttribute(\"name\",\"\"),e.appendChild(t),e.querySelectorAll(\"[name='']\").length||d.push(\"\\\\[\"+ge+\"*name\"+ge+\"*=\"+ge+\"*(?:''|\\\"\\\")\")}),le.cssHas||d.push(\":has\"),d=d.length&&new RegExp(d.join(\"|\")),l=function(e,t){if(e===t)return a=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)==(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!le.sortDetached&&t.compareDocumentPosition(e)===n?e===T||e.ownerDocument==ye&&I.contains(ye,e)?-1:t===T||t.ownerDocument==ye&&I.contains(ye,t)?1:o?se.call(o,e)-se.call(o,t):0:4&n?-1:1)}),T}for(e in I.matches=function(e,t){return I(e,null,null,t)},I.matchesSelector=function(e,t){if(V(e),C&&!h[t+\" \"]&&(!d||!d.test(t)))try{var n=i.call(e,t);if(n||le.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){h(t,!0)}return 0\":{dir:\"parentNode\",first:!0},\" \":{dir:\"parentNode\"},\"+\":{dir:\"previousSibling\",first:!0},\"~\":{dir:\"previousSibling\"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(O,P),e[3]=(e[3]||e[4]||e[5]||\"\").replace(O,P),\"~=\"===e[2]&&(e[3]=\" \"+e[3]+\" \"),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),\"nth\"===e[1].slice(0,3)?(e[3]||I.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*(\"even\"===e[3]||\"odd\"===e[3])),e[5]=+(e[7]+e[8]||\"odd\"===e[3])):e[3]&&I.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return D.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||\"\":n&&j.test(n)&&(t=Y(n,!0))&&(t=n.indexOf(\")\",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(O,P).toLowerCase();return\"*\"===e?function(){return!0}:function(e){return fe(e,t)}},CLASS:function(e){var t=s[e+\" \"];return t||(t=new RegExp(\"(^|\"+ge+\")\"+e+\"(\"+ge+\"|$)\"))&&s(e,function(e){return t.test(\"string\"==typeof e.className&&e.className||\"undefined\"!=typeof e.getAttribute&&e.getAttribute(\"class\")||\"\")})},ATTR:function(n,r,i){return function(e){var t=I.attr(e,n);return null==t?\"!=\"===r:!r||(t+=\"\",\"=\"===r?t===i:\"!=\"===r?t!==i:\"^=\"===r?i&&0===t.indexOf(i):\"*=\"===r?i&&-1:\\x20\\t\\r\\n\\f]*)[\\x20\\t\\r\\n\\f]*\\/?>(?:<\\/\\1>|)$/i;function T(e,n,r){return v(n)?ce.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?ce.grep(e,function(e){return e===n!==r}):\"string\"!=typeof n?ce.grep(e,function(e){return-1)[^>]*|#([\\w-]+))$/;(ce.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||k,\"string\"==typeof e){if(!(r=\"<\"===e[0]&&\">\"===e[e.length-1]&&3<=e.length?[null,e,null]:S.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof ce?t[0]:t,ce.merge(this,ce.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:C,!0)),w.test(r[1])&&ce.isPlainObject(t))for(r in t)v(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=C.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):v(e)?void 0!==n.ready?n.ready(e):e(ce):ce.makeArray(e,this)}).prototype=ce.fn,k=ce(C);var E=/^(?:parents|prev(?:Until|All))/,j={children:!0,contents:!0,next:!0,prev:!0};function A(e,t){while((e=e[t])&&1!==e.nodeType);return e}ce.fn.extend({has:function(e){var t=ce(e,this),n=t.length;return this.filter(function(){for(var e=0;e\\x20\\t\\r\\n\\f]*)/i,Ce=/^$|^module$|\\/(?:java|ecma)script/i;xe=C.createDocumentFragment().appendChild(C.createElement(\"div\")),(be=C.createElement(\"input\")).setAttribute(\"type\",\"radio\"),be.setAttribute(\"checked\",\"checked\"),be.setAttribute(\"name\",\"t\"),xe.appendChild(be),le.checkClone=xe.cloneNode(!0).cloneNode(!0).lastChild.checked,xe.innerHTML=\"\",le.noCloneChecked=!!xe.cloneNode(!0).lastChild.defaultValue,xe.innerHTML=\"\",le.option=!!xe.lastChild;var ke={thead:[1,\"\",\"
\"],col:[2,\"\",\"
\"],tr:[2,\"\",\"
\"],td:[3,\"\",\"
\"],_default:[0,\"\",\"\"]};function Se(e,t){var n;return n=\"undefined\"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||\"*\"):\"undefined\"!=typeof e.querySelectorAll?e.querySelectorAll(t||\"*\"):[],void 0===t||t&&fe(e,t)?ce.merge([e],n):n}function Ee(e,t){for(var n=0,r=e.length;n\",\"\"]);var je=/<|&#?\\w+;/;function Ae(e,t,n,r,i){for(var o,a,s,u,l,c,f=t.createDocumentFragment(),p=[],d=0,h=e.length;d\\s*$/g;function Re(e,t){return fe(e,\"table\")&&fe(11!==t.nodeType?t:t.firstChild,\"tr\")&&ce(e).children(\"tbody\")[0]||e}function Ie(e){return e.type=(null!==e.getAttribute(\"type\"))+\"/\"+e.type,e}function We(e){return\"true/\"===(e.type||\"\").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute(\"type\"),e}function Fe(e,t){var n,r,i,o,a,s;if(1===t.nodeType){if(_.hasData(e)&&(s=_.get(e).events))for(i in _.remove(t,\"handle events\"),s)for(n=0,r=s[i].length;n\").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on(\"load error\",i=function(e){r.remove(),i=null,e&&t(\"error\"===e.type?404:200,e.type)}),C.head.appendChild(r[0])},abort:function(){i&&i()}}});var Jt,Kt=[],Zt=/(=)\\?(?=&|$)|\\?\\?/;ce.ajaxSetup({jsonp:\"callback\",jsonpCallback:function(){var e=Kt.pop()||ce.expando+\"_\"+jt.guid++;return this[e]=!0,e}}),ce.ajaxPrefilter(\"json jsonp\",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Zt.test(e.url)?\"url\":\"string\"==typeof e.data&&0===(e.contentType||\"\").indexOf(\"application/x-www-form-urlencoded\")&&Zt.test(e.data)&&\"data\");if(a||\"jsonp\"===e.dataTypes[0])return r=e.jsonpCallback=v(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Zt,\"$1\"+r):!1!==e.jsonp&&(e.url+=(At.test(e.url)?\"&\":\"?\")+e.jsonp+\"=\"+r),e.converters[\"script json\"]=function(){return o||ce.error(r+\" was not called\"),o[0]},e.dataTypes[0]=\"json\",i=ie[r],ie[r]=function(){o=arguments},n.always(function(){void 0===i?ce(ie).removeProp(r):ie[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,Kt.push(r)),o&&v(i)&&i(o[0]),o=i=void 0}),\"script\"}),le.createHTMLDocument=((Jt=C.implementation.createHTMLDocument(\"\").body).innerHTML=\"
\",2===Jt.childNodes.length),ce.parseHTML=function(e,t,n){return\"string\"!=typeof e?[]:(\"boolean\"==typeof t&&(n=t,t=!1),t||(le.createHTMLDocument?((r=(t=C.implementation.createHTMLDocument(\"\")).createElement(\"base\")).href=C.location.href,t.head.appendChild(r)):t=C),o=!n&&[],(i=w.exec(e))?[t.createElement(i[1])]:(i=Ae([e],t,o),o&&o.length&&ce(o).remove(),ce.merge([],i.childNodes)));var r,i,o},ce.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(\" \");return-1\").append(ce.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},ce.expr.pseudos.animated=function(t){return ce.grep(ce.timers,function(e){return t===e.elem}).length},ce.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=ce.css(e,\"position\"),c=ce(e),f={};\"static\"===l&&(e.style.position=\"relative\"),s=c.offset(),o=ce.css(e,\"top\"),u=ce.css(e,\"left\"),(\"absolute\"===l||\"fixed\"===l)&&-1<(o+u).indexOf(\"auto\")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),v(t)&&(t=t.call(e,n,ce.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),\"using\"in t?t.using.call(e,f):c.css(f)}},ce.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){ce.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if(\"fixed\"===ce.css(r,\"position\"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&\"static\"===ce.css(e,\"position\"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=ce(e).offset()).top+=ce.css(e,\"borderTopWidth\",!0),i.left+=ce.css(e,\"borderLeftWidth\",!0))}return{top:t.top-i.top-ce.css(r,\"marginTop\",!0),left:t.left-i.left-ce.css(r,\"marginLeft\",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&\"static\"===ce.css(e,\"position\"))e=e.offsetParent;return e||J})}}),ce.each({scrollLeft:\"pageXOffset\",scrollTop:\"pageYOffset\"},function(t,i){var o=\"pageYOffset\"===i;ce.fn[t]=function(e){return M(this,function(e,t,n){var r;if(y(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),ce.each([\"top\",\"left\"],function(e,n){ce.cssHooks[n]=Ye(le.pixelPosition,function(e,t){if(t)return t=Ge(e,n),_e.test(t)?ce(e).position()[n]+\"px\":t})}),ce.each({Height:\"height\",Width:\"width\"},function(a,s){ce.each({padding:\"inner\"+a,content:s,\"\":\"outer\"+a},function(r,o){ce.fn[o]=function(e,t){var n=arguments.length&&(r||\"boolean\"!=typeof e),i=r||(!0===e||!0===t?\"margin\":\"border\");return M(this,function(e,t,n){var r;return y(e)?0===o.indexOf(\"outer\")?e[\"inner\"+a]:e.document.documentElement[\"client\"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body[\"scroll\"+a],r[\"scroll\"+a],e.body[\"offset\"+a],r[\"offset\"+a],r[\"client\"+a])):void 0===n?ce.css(e,t,i):ce.style(e,t,n,i)},s,n?e:void 0,n)}})}),ce.each([\"ajaxStart\",\"ajaxStop\",\"ajaxComplete\",\"ajaxError\",\"ajaxSuccess\",\"ajaxSend\"],function(e,t){ce.fn[t]=function(e){return this.on(t,e)}}),ce.fn.extend({bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,\"**\"):this.off(t,e||\"**\",n)},hover:function(e,t){return this.on(\"mouseenter\",e).on(\"mouseleave\",t||e)}}),ce.each(\"blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu\".split(\" \"),function(e,n){ce.fn[n]=function(e,t){return 0t[i]})}return e.default=t,Object.freeze(e)}const i=e(t),s=new Map,n={set(t,e,i){s.has(t)||s.set(t,new Map);const n=s.get(t);n.has(e)||0===n.size?n.set(e,i):console.error(`Bootstrap doesn't allow more than one instance per element. Bound instance: ${Array.from(n.keys())[0]}.`)},get:(t,e)=>s.has(t)&&s.get(t).get(e)||null,remove(t,e){if(!s.has(t))return;const i=s.get(t);i.delete(e),0===i.size&&s.delete(t)}},o=\"transitionend\",r=t=>(t&&window.CSS&&window.CSS.escape&&(t=t.replace(/#([^\\s\"#']+)/g,((t,e)=>`#${CSS.escape(e)}`))),t),a=t=>{t.dispatchEvent(new Event(o))},l=t=>!(!t||\"object\"!=typeof t)&&(void 0!==t.jquery&&(t=t[0]),void 0!==t.nodeType),c=t=>l(t)?t.jquery?t[0]:t:\"string\"==typeof t&&t.length>0?document.querySelector(r(t)):null,h=t=>{if(!l(t)||0===t.getClientRects().length)return!1;const e=\"visible\"===getComputedStyle(t).getPropertyValue(\"visibility\"),i=t.closest(\"details:not([open])\");if(!i)return e;if(i!==t){const e=t.closest(\"summary\");if(e&&e.parentNode!==i)return!1;if(null===e)return!1}return e},d=t=>!t||t.nodeType!==Node.ELEMENT_NODE||!!t.classList.contains(\"disabled\")||(void 0!==t.disabled?t.disabled:t.hasAttribute(\"disabled\")&&\"false\"!==t.getAttribute(\"disabled\")),u=t=>{if(!document.documentElement.attachShadow)return null;if(\"function\"==typeof t.getRootNode){const e=t.getRootNode();return e instanceof ShadowRoot?e:null}return t instanceof ShadowRoot?t:t.parentNode?u(t.parentNode):null},_=()=>{},g=t=>{t.offsetHeight},f=()=>window.jQuery&&!document.body.hasAttribute(\"data-bs-no-jquery\")?window.jQuery:null,m=[],p=()=>\"rtl\"===document.documentElement.dir,b=t=>{var e;e=()=>{const e=f();if(e){const i=t.NAME,s=e.fn[i];e.fn[i]=t.jQueryInterface,e.fn[i].Constructor=t,e.fn[i].noConflict=()=>(e.fn[i]=s,t.jQueryInterface)}},\"loading\"===document.readyState?(m.length||document.addEventListener(\"DOMContentLoaded\",(()=>{for(const t of m)t()})),m.push(e)):e()},v=(t,e=[],i=t)=>\"function\"==typeof t?t(...e):i,y=(t,e,i=!0)=>{if(!i)return void v(t);const s=(t=>{if(!t)return 0;let{transitionDuration:e,transitionDelay:i}=window.getComputedStyle(t);const s=Number.parseFloat(e),n=Number.parseFloat(i);return s||n?(e=e.split(\",\")[0],i=i.split(\",\")[0],1e3*(Number.parseFloat(e)+Number.parseFloat(i))):0})(e)+5;let n=!1;const r=({target:i})=>{i===e&&(n=!0,e.removeEventListener(o,r),v(t))};e.addEventListener(o,r),setTimeout((()=>{n||a(e)}),s)},w=(t,e,i,s)=>{const n=t.length;let o=t.indexOf(e);return-1===o?!i&&s?t[n-1]:t[0]:(o+=i?1:-1,s&&(o=(o+n)%n),t[Math.max(0,Math.min(o,n-1))])},A=/[^.]*(?=\\..*)\\.|.*/,E=/\\..*/,C=/::\\d+$/,T={};let k=1;const $={mouseenter:\"mouseover\",mouseleave:\"mouseout\"},S=new Set([\"click\",\"dblclick\",\"mouseup\",\"mousedown\",\"contextmenu\",\"mousewheel\",\"DOMMouseScroll\",\"mouseover\",\"mouseout\",\"mousemove\",\"selectstart\",\"selectend\",\"keydown\",\"keypress\",\"keyup\",\"orientationchange\",\"touchstart\",\"touchmove\",\"touchend\",\"touchcancel\",\"pointerdown\",\"pointermove\",\"pointerup\",\"pointerleave\",\"pointercancel\",\"gesturestart\",\"gesturechange\",\"gestureend\",\"focus\",\"blur\",\"change\",\"reset\",\"select\",\"submit\",\"focusin\",\"focusout\",\"load\",\"unload\",\"beforeunload\",\"resize\",\"move\",\"DOMContentLoaded\",\"readystatechange\",\"error\",\"abort\",\"scroll\"]);function L(t,e){return e&&`${e}::${k++}`||t.uidEvent||k++}function O(t){const e=L(t);return t.uidEvent=e,T[e]=T[e]||{},T[e]}function I(t,e,i=null){return Object.values(t).find((t=>t.callable===e&&t.delegationSelector===i))}function D(t,e,i){const s=\"string\"==typeof e,n=s?i:e||i;let o=M(t);return S.has(o)||(o=t),[s,n,o]}function N(t,e,i,s,n){if(\"string\"!=typeof e||!t)return;let[o,r,a]=D(e,i,s);if(e in $){const t=t=>function(e){if(!e.relatedTarget||e.relatedTarget!==e.delegateTarget&&!e.delegateTarget.contains(e.relatedTarget))return t.call(this,e)};r=t(r)}const l=O(t),c=l[a]||(l[a]={}),h=I(c,r,o?i:null);if(h)return void(h.oneOff=h.oneOff&&n);const d=L(r,e.replace(A,\"\")),u=o?function(t,e,i){return function s(n){const o=t.querySelectorAll(e);for(let{target:r}=n;r&&r!==this;r=r.parentNode)for(const a of o)if(a===r)return F(n,{delegateTarget:r}),s.oneOff&&j.off(t,n.type,e,i),i.apply(r,[n])}}(t,i,r):function(t,e){return function i(s){return F(s,{delegateTarget:t}),i.oneOff&&j.off(t,s.type,e),e.apply(t,[s])}}(t,r);u.delegationSelector=o?i:null,u.callable=r,u.oneOff=n,u.uidEvent=d,c[d]=u,t.addEventListener(a,u,o)}function P(t,e,i,s,n){const o=I(e[i],s,n);o&&(t.removeEventListener(i,o,Boolean(n)),delete e[i][o.uidEvent])}function x(t,e,i,s){const n=e[i]||{};for(const[o,r]of Object.entries(n))o.includes(s)&&P(t,e,i,r.callable,r.delegationSelector)}function M(t){return t=t.replace(E,\"\"),$[t]||t}const j={on(t,e,i,s){N(t,e,i,s,!1)},one(t,e,i,s){N(t,e,i,s,!0)},off(t,e,i,s){if(\"string\"!=typeof e||!t)return;const[n,o,r]=D(e,i,s),a=r!==e,l=O(t),c=l[r]||{},h=e.startsWith(\".\");if(void 0===o){if(h)for(const i of Object.keys(l))x(t,l,i,e.slice(1));for(const[i,s]of Object.entries(c)){const n=i.replace(C,\"\");a&&!e.includes(n)||P(t,l,r,s.callable,s.delegationSelector)}}else{if(!Object.keys(c).length)return;P(t,l,r,o,n?i:null)}},trigger(t,e,i){if(\"string\"!=typeof e||!t)return null;const s=f();let n=null,o=!0,r=!0,a=!1;e!==M(e)&&s&&(n=s.Event(e,i),s(t).trigger(n),o=!n.isPropagationStopped(),r=!n.isImmediatePropagationStopped(),a=n.isDefaultPrevented());const l=F(new Event(e,{bubbles:o,cancelable:!0}),i);return a&&l.preventDefault(),r&&t.dispatchEvent(l),l.defaultPrevented&&n&&n.preventDefault(),l}};function F(t,e={}){for(const[i,s]of Object.entries(e))try{t[i]=s}catch(e){Object.defineProperty(t,i,{configurable:!0,get:()=>s})}return t}function z(t){if(\"true\"===t)return!0;if(\"false\"===t)return!1;if(t===Number(t).toString())return Number(t);if(\"\"===t||\"null\"===t)return null;if(\"string\"!=typeof t)return t;try{return JSON.parse(decodeURIComponent(t))}catch(e){return t}}function H(t){return t.replace(/[A-Z]/g,(t=>`-${t.toLowerCase()}`))}const B={setDataAttribute(t,e,i){t.setAttribute(`data-bs-${H(e)}`,i)},removeDataAttribute(t,e){t.removeAttribute(`data-bs-${H(e)}`)},getDataAttributes(t){if(!t)return{};const e={},i=Object.keys(t.dataset).filter((t=>t.startsWith(\"bs\")&&!t.startsWith(\"bsConfig\")));for(const s of i){let i=s.replace(/^bs/,\"\");i=i.charAt(0).toLowerCase()+i.slice(1,i.length),e[i]=z(t.dataset[s])}return e},getDataAttribute:(t,e)=>z(t.getAttribute(`data-bs-${H(e)}`))};class q{static get Default(){return{}}static get DefaultType(){return{}}static get NAME(){throw new Error('You have to implement the static method \"NAME\", for each component!')}_getConfig(t){return t=this._mergeConfigObj(t),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}_configAfterMerge(t){return t}_mergeConfigObj(t,e){const i=l(e)?B.getDataAttribute(e,\"config\"):{};return{...this.constructor.Default,...\"object\"==typeof i?i:{},...l(e)?B.getDataAttributes(e):{},...\"object\"==typeof t?t:{}}}_typeCheckConfig(t,e=this.constructor.DefaultType){for(const[s,n]of Object.entries(e)){const e=t[s],o=l(e)?\"element\":null==(i=e)?`${i}`:Object.prototype.toString.call(i).match(/\\s([a-z]+)/i)[1].toLowerCase();if(!new RegExp(n).test(o))throw new TypeError(`${this.constructor.NAME.toUpperCase()}: Option \"${s}\" provided type \"${o}\" but expected type \"${n}\".`)}var i}}class W extends q{constructor(t,e){super(),(t=c(t))&&(this._element=t,this._config=this._getConfig(e),n.set(this._element,this.constructor.DATA_KEY,this))}dispose(){n.remove(this._element,this.constructor.DATA_KEY),j.off(this._element,this.constructor.EVENT_KEY);for(const t of Object.getOwnPropertyNames(this))this[t]=null}_queueCallback(t,e,i=!0){y(t,e,i)}_getConfig(t){return t=this._mergeConfigObj(t,this._element),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}static getInstance(t){return n.get(c(t),this.DATA_KEY)}static getOrCreateInstance(t,e={}){return this.getInstance(t)||new this(t,\"object\"==typeof e?e:null)}static get VERSION(){return\"5.3.3\"}static get DATA_KEY(){return`bs.${this.NAME}`}static get EVENT_KEY(){return`.${this.DATA_KEY}`}static eventName(t){return`${t}${this.EVENT_KEY}`}}const R=t=>{let e=t.getAttribute(\"data-bs-target\");if(!e||\"#\"===e){let i=t.getAttribute(\"href\");if(!i||!i.includes(\"#\")&&!i.startsWith(\".\"))return null;i.includes(\"#\")&&!i.startsWith(\"#\")&&(i=`#${i.split(\"#\")[1]}`),e=i&&\"#\"!==i?i.trim():null}return e?e.split(\",\").map((t=>r(t))).join(\",\"):null},K={find:(t,e=document.documentElement)=>[].concat(...Element.prototype.querySelectorAll.call(e,t)),findOne:(t,e=document.documentElement)=>Element.prototype.querySelector.call(e,t),children:(t,e)=>[].concat(...t.children).filter((t=>t.matches(e))),parents(t,e){const i=[];let s=t.parentNode.closest(e);for(;s;)i.push(s),s=s.parentNode.closest(e);return i},prev(t,e){let i=t.previousElementSibling;for(;i;){if(i.matches(e))return[i];i=i.previousElementSibling}return[]},next(t,e){let i=t.nextElementSibling;for(;i;){if(i.matches(e))return[i];i=i.nextElementSibling}return[]},focusableChildren(t){const e=[\"a\",\"button\",\"input\",\"textarea\",\"select\",\"details\",\"[tabindex]\",'[contenteditable=\"true\"]'].map((t=>`${t}:not([tabindex^=\"-\"])`)).join(\",\");return this.find(e,t).filter((t=>!d(t)&&h(t)))},getSelectorFromElement(t){const e=R(t);return e&&K.findOne(e)?e:null},getElementFromSelector(t){const e=R(t);return e?K.findOne(e):null},getMultipleElementsFromSelector(t){const e=R(t);return e?K.find(e):[]}},V=(t,e=\"hide\")=>{const i=`click.dismiss${t.EVENT_KEY}`,s=t.NAME;j.on(document,i,`[data-bs-dismiss=\"${s}\"]`,(function(i){if([\"A\",\"AREA\"].includes(this.tagName)&&i.preventDefault(),d(this))return;const n=K.getElementFromSelector(this)||this.closest(`.${s}`);t.getOrCreateInstance(n)[e]()}))},Q=\".bs.alert\",X=`close${Q}`,Y=`closed${Q}`;class U extends W{static get NAME(){return\"alert\"}close(){if(j.trigger(this._element,X).defaultPrevented)return;this._element.classList.remove(\"show\");const t=this._element.classList.contains(\"fade\");this._queueCallback((()=>this._destroyElement()),this._element,t)}_destroyElement(){this._element.remove(),j.trigger(this._element,Y),this.dispose()}static jQueryInterface(t){return this.each((function(){const e=U.getOrCreateInstance(this);if(\"string\"==typeof t){if(void 0===e[t]||t.startsWith(\"_\")||\"constructor\"===t)throw new TypeError(`No method named \"${t}\"`);e[t](this)}}))}}V(U,\"close\"),b(U);const G='[data-bs-toggle=\"button\"]';class J extends W{static get NAME(){return\"button\"}toggle(){this._element.setAttribute(\"aria-pressed\",this._element.classList.toggle(\"active\"))}static jQueryInterface(t){return this.each((function(){const e=J.getOrCreateInstance(this);\"toggle\"===t&&e[t]()}))}}j.on(document,\"click.bs.button.data-api\",G,(t=>{t.preventDefault();const e=t.target.closest(G);J.getOrCreateInstance(e).toggle()})),b(J);const Z=\".bs.swipe\",tt=`touchstart${Z}`,et=`touchmove${Z}`,it=`touchend${Z}`,st=`pointerdown${Z}`,nt=`pointerup${Z}`,ot={endCallback:null,leftCallback:null,rightCallback:null},rt={endCallback:\"(function|null)\",leftCallback:\"(function|null)\",rightCallback:\"(function|null)\"};class at extends q{constructor(t,e){super(),this._element=t,t&&at.isSupported()&&(this._config=this._getConfig(e),this._deltaX=0,this._supportPointerEvents=Boolean(window.PointerEvent),this._initEvents())}static get Default(){return ot}static get DefaultType(){return rt}static get NAME(){return\"swipe\"}dispose(){j.off(this._element,Z)}_start(t){this._supportPointerEvents?this._eventIsPointerPenTouch(t)&&(this._deltaX=t.clientX):this._deltaX=t.touches[0].clientX}_end(t){this._eventIsPointerPenTouch(t)&&(this._deltaX=t.clientX-this._deltaX),this._handleSwipe(),v(this._config.endCallback)}_move(t){this._deltaX=t.touches&&t.touches.length>1?0:t.touches[0].clientX-this._deltaX}_handleSwipe(){const t=Math.abs(this._deltaX);if(t<=40)return;const e=t/this._deltaX;this._deltaX=0,e&&v(e>0?this._config.rightCallback:this._config.leftCallback)}_initEvents(){this._supportPointerEvents?(j.on(this._element,st,(t=>this._start(t))),j.on(this._element,nt,(t=>this._end(t))),this._element.classList.add(\"pointer-event\")):(j.on(this._element,tt,(t=>this._start(t))),j.on(this._element,et,(t=>this._move(t))),j.on(this._element,it,(t=>this._end(t))))}_eventIsPointerPenTouch(t){return this._supportPointerEvents&&(\"pen\"===t.pointerType||\"touch\"===t.pointerType)}static isSupported(){return\"ontouchstart\"in document.documentElement||navigator.maxTouchPoints>0}}const lt=\".bs.carousel\",ct=\".data-api\",ht=\"next\",dt=\"prev\",ut=\"left\",_t=\"right\",gt=`slide${lt}`,ft=`slid${lt}`,mt=`keydown${lt}`,pt=`mouseenter${lt}`,bt=`mouseleave${lt}`,vt=`dragstart${lt}`,yt=`load${lt}${ct}`,wt=`click${lt}${ct}`,At=\"carousel\",Et=\"active\",Ct=\".active\",Tt=\".carousel-item\",kt=Ct+Tt,$t={ArrowLeft:_t,ArrowRight:ut},St={interval:5e3,keyboard:!0,pause:\"hover\",ride:!1,touch:!0,wrap:!0},Lt={interval:\"(number|boolean)\",keyboard:\"boolean\",pause:\"(string|boolean)\",ride:\"(boolean|string)\",touch:\"boolean\",wrap:\"boolean\"};class Ot extends W{constructor(t,e){super(t,e),this._interval=null,this._activeElement=null,this._isSliding=!1,this.touchTimeout=null,this._swipeHelper=null,this._indicatorsElement=K.findOne(\".carousel-indicators\",this._element),this._addEventListeners(),this._config.ride===At&&this.cycle()}static get Default(){return St}static get DefaultType(){return Lt}static get NAME(){return\"carousel\"}next(){this._slide(ht)}nextWhenVisible(){!document.hidden&&h(this._element)&&this.next()}prev(){this._slide(dt)}pause(){this._isSliding&&a(this._element),this._clearInterval()}cycle(){this._clearInterval(),this._updateInterval(),this._interval=setInterval((()=>this.nextWhenVisible()),this._config.interval)}_maybeEnableCycle(){this._config.ride&&(this._isSliding?j.one(this._element,ft,(()=>this.cycle())):this.cycle())}to(t){const e=this._getItems();if(t>e.length-1||t<0)return;if(this._isSliding)return void j.one(this._element,ft,(()=>this.to(t)));const i=this._getItemIndex(this._getActive());if(i===t)return;const s=t>i?ht:dt;this._slide(s,e[t])}dispose(){this._swipeHelper&&this._swipeHelper.dispose(),super.dispose()}_configAfterMerge(t){return t.defaultInterval=t.interval,t}_addEventListeners(){this._config.keyboard&&j.on(this._element,mt,(t=>this._keydown(t))),\"hover\"===this._config.pause&&(j.on(this._element,pt,(()=>this.pause())),j.on(this._element,bt,(()=>this._maybeEnableCycle()))),this._config.touch&&at.isSupported()&&this._addTouchEventListeners()}_addTouchEventListeners(){for(const t of K.find(\".carousel-item img\",this._element))j.on(t,vt,(t=>t.preventDefault()));const t={leftCallback:()=>this._slide(this._directionToOrder(ut)),rightCallback:()=>this._slide(this._directionToOrder(_t)),endCallback:()=>{\"hover\"===this._config.pause&&(this.pause(),this.touchTimeout&&clearTimeout(this.touchTimeout),this.touchTimeout=setTimeout((()=>this._maybeEnableCycle()),500+this._config.interval))}};this._swipeHelper=new at(this._element,t)}_keydown(t){if(/input|textarea/i.test(t.target.tagName))return;const e=$t[t.key];e&&(t.preventDefault(),this._slide(this._directionToOrder(e)))}_getItemIndex(t){return this._getItems().indexOf(t)}_setActiveIndicatorElement(t){if(!this._indicatorsElement)return;const e=K.findOne(Ct,this._indicatorsElement);e.classList.remove(Et),e.removeAttribute(\"aria-current\");const i=K.findOne(`[data-bs-slide-to=\"${t}\"]`,this._indicatorsElement);i&&(i.classList.add(Et),i.setAttribute(\"aria-current\",\"true\"))}_updateInterval(){const t=this._activeElement||this._getActive();if(!t)return;const e=Number.parseInt(t.getAttribute(\"data-bs-interval\"),10);this._config.interval=e||this._config.defaultInterval}_slide(t,e=null){if(this._isSliding)return;const i=this._getActive(),s=t===ht,n=e||w(this._getItems(),i,s,this._config.wrap);if(n===i)return;const o=this._getItemIndex(n),r=e=>j.trigger(this._element,e,{relatedTarget:n,direction:this._orderToDirection(t),from:this._getItemIndex(i),to:o});if(r(gt).defaultPrevented)return;if(!i||!n)return;const a=Boolean(this._interval);this.pause(),this._isSliding=!0,this._setActiveIndicatorElement(o),this._activeElement=n;const l=s?\"carousel-item-start\":\"carousel-item-end\",c=s?\"carousel-item-next\":\"carousel-item-prev\";n.classList.add(c),g(n),i.classList.add(l),n.classList.add(l),this._queueCallback((()=>{n.classList.remove(l,c),n.classList.add(Et),i.classList.remove(Et,c,l),this._isSliding=!1,r(ft)}),i,this._isAnimated()),a&&this.cycle()}_isAnimated(){return this._element.classList.contains(\"slide\")}_getActive(){return K.findOne(kt,this._element)}_getItems(){return K.find(Tt,this._element)}_clearInterval(){this._interval&&(clearInterval(this._interval),this._interval=null)}_directionToOrder(t){return p()?t===ut?dt:ht:t===ut?ht:dt}_orderToDirection(t){return p()?t===dt?ut:_t:t===dt?_t:ut}static jQueryInterface(t){return this.each((function(){const e=Ot.getOrCreateInstance(this,t);if(\"number\"!=typeof t){if(\"string\"==typeof t){if(void 0===e[t]||t.startsWith(\"_\")||\"constructor\"===t)throw new TypeError(`No method named \"${t}\"`);e[t]()}}else e.to(t)}))}}j.on(document,wt,\"[data-bs-slide], [data-bs-slide-to]\",(function(t){const e=K.getElementFromSelector(this);if(!e||!e.classList.contains(At))return;t.preventDefault();const i=Ot.getOrCreateInstance(e),s=this.getAttribute(\"data-bs-slide-to\");return s?(i.to(s),void i._maybeEnableCycle()):\"next\"===B.getDataAttribute(this,\"slide\")?(i.next(),void i._maybeEnableCycle()):(i.prev(),void i._maybeEnableCycle())})),j.on(window,yt,(()=>{const t=K.find('[data-bs-ride=\"carousel\"]');for(const e of t)Ot.getOrCreateInstance(e)})),b(Ot);const It=\".bs.collapse\",Dt=`show${It}`,Nt=`shown${It}`,Pt=`hide${It}`,xt=`hidden${It}`,Mt=`click${It}.data-api`,jt=\"show\",Ft=\"collapse\",zt=\"collapsing\",Ht=`:scope .${Ft} .${Ft}`,Bt='[data-bs-toggle=\"collapse\"]',qt={parent:null,toggle:!0},Wt={parent:\"(null|element)\",toggle:\"boolean\"};class Rt extends W{constructor(t,e){super(t,e),this._isTransitioning=!1,this._triggerArray=[];const i=K.find(Bt);for(const t of i){const e=K.getSelectorFromElement(t),i=K.find(e).filter((t=>t===this._element));null!==e&&i.length&&this._triggerArray.push(t)}this._initializeChildren(),this._config.parent||this._addAriaAndCollapsedClass(this._triggerArray,this._isShown()),this._config.toggle&&this.toggle()}static get Default(){return qt}static get DefaultType(){return Wt}static get NAME(){return\"collapse\"}toggle(){this._isShown()?this.hide():this.show()}show(){if(this._isTransitioning||this._isShown())return;let t=[];if(this._config.parent&&(t=this._getFirstLevelChildren(\".collapse.show, .collapse.collapsing\").filter((t=>t!==this._element)).map((t=>Rt.getOrCreateInstance(t,{toggle:!1})))),t.length&&t[0]._isTransitioning)return;if(j.trigger(this._element,Dt).defaultPrevented)return;for(const e of t)e.hide();const e=this._getDimension();this._element.classList.remove(Ft),this._element.classList.add(zt),this._element.style[e]=0,this._addAriaAndCollapsedClass(this._triggerArray,!0),this._isTransitioning=!0;const i=`scroll${e[0].toUpperCase()+e.slice(1)}`;this._queueCallback((()=>{this._isTransitioning=!1,this._element.classList.remove(zt),this._element.classList.add(Ft,jt),this._element.style[e]=\"\",j.trigger(this._element,Nt)}),this._element,!0),this._element.style[e]=`${this._element[i]}px`}hide(){if(this._isTransitioning||!this._isShown())return;if(j.trigger(this._element,Pt).defaultPrevented)return;const t=this._getDimension();this._element.style[t]=`${this._element.getBoundingClientRect()[t]}px`,g(this._element),this._element.classList.add(zt),this._element.classList.remove(Ft,jt);for(const t of this._triggerArray){const e=K.getElementFromSelector(t);e&&!this._isShown(e)&&this._addAriaAndCollapsedClass([t],!1)}this._isTransitioning=!0,this._element.style[t]=\"\",this._queueCallback((()=>{this._isTransitioning=!1,this._element.classList.remove(zt),this._element.classList.add(Ft),j.trigger(this._element,xt)}),this._element,!0)}_isShown(t=this._element){return t.classList.contains(jt)}_configAfterMerge(t){return t.toggle=Boolean(t.toggle),t.parent=c(t.parent),t}_getDimension(){return this._element.classList.contains(\"collapse-horizontal\")?\"width\":\"height\"}_initializeChildren(){if(!this._config.parent)return;const t=this._getFirstLevelChildren(Bt);for(const e of t){const t=K.getElementFromSelector(e);t&&this._addAriaAndCollapsedClass([e],this._isShown(t))}}_getFirstLevelChildren(t){const e=K.find(Ht,this._config.parent);return K.find(t,this._config.parent).filter((t=>!e.includes(t)))}_addAriaAndCollapsedClass(t,e){if(t.length)for(const i of t)i.classList.toggle(\"collapsed\",!e),i.setAttribute(\"aria-expanded\",e)}static jQueryInterface(t){const e={};return\"string\"==typeof t&&/show|hide/.test(t)&&(e.toggle=!1),this.each((function(){const i=Rt.getOrCreateInstance(this,e);if(\"string\"==typeof t){if(void 0===i[t])throw new TypeError(`No method named \"${t}\"`);i[t]()}}))}}j.on(document,Mt,Bt,(function(t){(\"A\"===t.target.tagName||t.delegateTarget&&\"A\"===t.delegateTarget.tagName)&&t.preventDefault();for(const t of K.getMultipleElementsFromSelector(this))Rt.getOrCreateInstance(t,{toggle:!1}).toggle()})),b(Rt);const Kt=\"dropdown\",Vt=\".bs.dropdown\",Qt=\".data-api\",Xt=\"ArrowUp\",Yt=\"ArrowDown\",Ut=`hide${Vt}`,Gt=`hidden${Vt}`,Jt=`show${Vt}`,Zt=`shown${Vt}`,te=`click${Vt}${Qt}`,ee=`keydown${Vt}${Qt}`,ie=`keyup${Vt}${Qt}`,se=\"show\",ne='[data-bs-toggle=\"dropdown\"]:not(.disabled):not(:disabled)',oe=`${ne}.${se}`,re=\".dropdown-menu\",ae=p()?\"top-end\":\"top-start\",le=p()?\"top-start\":\"top-end\",ce=p()?\"bottom-end\":\"bottom-start\",he=p()?\"bottom-start\":\"bottom-end\",de=p()?\"left-start\":\"right-start\",ue=p()?\"right-start\":\"left-start\",_e={autoClose:!0,boundary:\"clippingParents\",display:\"dynamic\",offset:[0,2],popperConfig:null,reference:\"toggle\"},ge={autoClose:\"(boolean|string)\",boundary:\"(string|element)\",display:\"string\",offset:\"(array|string|function)\",popperConfig:\"(null|object|function)\",reference:\"(string|element|object)\"};class fe extends W{constructor(t,e){super(t,e),this._popper=null,this._parent=this._element.parentNode,this._menu=K.next(this._element,re)[0]||K.prev(this._element,re)[0]||K.findOne(re,this._parent),this._inNavbar=this._detectNavbar()}static get Default(){return _e}static get DefaultType(){return ge}static get NAME(){return Kt}toggle(){return this._isShown()?this.hide():this.show()}show(){if(d(this._element)||this._isShown())return;const t={relatedTarget:this._element};if(!j.trigger(this._element,Jt,t).defaultPrevented){if(this._createPopper(),\"ontouchstart\"in document.documentElement&&!this._parent.closest(\".navbar-nav\"))for(const t of[].concat(...document.body.children))j.on(t,\"mouseover\",_);this._element.focus(),this._element.setAttribute(\"aria-expanded\",!0),this._menu.classList.add(se),this._element.classList.add(se),j.trigger(this._element,Zt,t)}}hide(){if(d(this._element)||!this._isShown())return;const t={relatedTarget:this._element};this._completeHide(t)}dispose(){this._popper&&this._popper.destroy(),super.dispose()}update(){this._inNavbar=this._detectNavbar(),this._popper&&this._popper.update()}_completeHide(t){if(!j.trigger(this._element,Ut,t).defaultPrevented){if(\"ontouchstart\"in document.documentElement)for(const t of[].concat(...document.body.children))j.off(t,\"mouseover\",_);this._popper&&this._popper.destroy(),this._menu.classList.remove(se),this._element.classList.remove(se),this._element.setAttribute(\"aria-expanded\",\"false\"),B.removeDataAttribute(this._menu,\"popper\"),j.trigger(this._element,Gt,t)}}_getConfig(t){if(\"object\"==typeof(t=super._getConfig(t)).reference&&!l(t.reference)&&\"function\"!=typeof t.reference.getBoundingClientRect)throw new TypeError(`${Kt.toUpperCase()}: Option \"reference\" provided type \"object\" without a required \"getBoundingClientRect\" method.`);return t}_createPopper(){if(void 0===i)throw new TypeError(\"Bootstrap's dropdowns require Popper (https://popper.js.org)\");let t=this._element;\"parent\"===this._config.reference?t=this._parent:l(this._config.reference)?t=c(this._config.reference):\"object\"==typeof this._config.reference&&(t=this._config.reference);const e=this._getPopperConfig();this._popper=i.createPopper(t,this._menu,e)}_isShown(){return this._menu.classList.contains(se)}_getPlacement(){const t=this._parent;if(t.classList.contains(\"dropend\"))return de;if(t.classList.contains(\"dropstart\"))return ue;if(t.classList.contains(\"dropup-center\"))return\"top\";if(t.classList.contains(\"dropdown-center\"))return\"bottom\";const e=\"end\"===getComputedStyle(this._menu).getPropertyValue(\"--bs-position\").trim();return t.classList.contains(\"dropup\")?e?le:ae:e?he:ce}_detectNavbar(){return null!==this._element.closest(\".navbar\")}_getOffset(){const{offset:t}=this._config;return\"string\"==typeof t?t.split(\",\").map((t=>Number.parseInt(t,10))):\"function\"==typeof t?e=>t(e,this._element):t}_getPopperConfig(){const t={placement:this._getPlacement(),modifiers:[{name:\"preventOverflow\",options:{boundary:this._config.boundary}},{name:\"offset\",options:{offset:this._getOffset()}}]};return(this._inNavbar||\"static\"===this._config.display)&&(B.setDataAttribute(this._menu,\"popper\",\"static\"),t.modifiers=[{name:\"applyStyles\",enabled:!1}]),{...t,...v(this._config.popperConfig,[t])}}_selectMenuItem({key:t,target:e}){const i=K.find(\".dropdown-menu .dropdown-item:not(.disabled):not(:disabled)\",this._menu).filter((t=>h(t)));i.length&&w(i,e,t===Yt,!i.includes(e)).focus()}static jQueryInterface(t){return this.each((function(){const e=fe.getOrCreateInstance(this,t);if(\"string\"==typeof t){if(void 0===e[t])throw new TypeError(`No method named \"${t}\"`);e[t]()}}))}static clearMenus(t){if(2===t.button||\"keyup\"===t.type&&\"Tab\"!==t.key)return;const e=K.find(oe);for(const i of e){const e=fe.getInstance(i);if(!e||!1===e._config.autoClose)continue;const s=t.composedPath(),n=s.includes(e._menu);if(s.includes(e._element)||\"inside\"===e._config.autoClose&&!n||\"outside\"===e._config.autoClose&&n)continue;if(e._menu.contains(t.target)&&(\"keyup\"===t.type&&\"Tab\"===t.key||/input|select|option|textarea|form/i.test(t.target.tagName)))continue;const o={relatedTarget:e._element};\"click\"===t.type&&(o.clickEvent=t),e._completeHide(o)}}static dataApiKeydownHandler(t){const e=/input|textarea/i.test(t.target.tagName),i=\"Escape\"===t.key,s=[Xt,Yt].includes(t.key);if(!s&&!i)return;if(e&&!i)return;t.preventDefault();const n=this.matches(ne)?this:K.prev(this,ne)[0]||K.next(this,ne)[0]||K.findOne(ne,t.delegateTarget.parentNode),o=fe.getOrCreateInstance(n);if(s)return t.stopPropagation(),o.show(),void o._selectMenuItem(t);o._isShown()&&(t.stopPropagation(),o.hide(),n.focus())}}j.on(document,ee,ne,fe.dataApiKeydownHandler),j.on(document,ee,re,fe.dataApiKeydownHandler),j.on(document,te,fe.clearMenus),j.on(document,ie,fe.clearMenus),j.on(document,te,ne,(function(t){t.preventDefault(),fe.getOrCreateInstance(this).toggle()})),b(fe);const me=\"backdrop\",pe=\"show\",be=`mousedown.bs.${me}`,ve={className:\"modal-backdrop\",clickCallback:null,isAnimated:!1,isVisible:!0,rootElement:\"body\"},ye={className:\"string\",clickCallback:\"(function|null)\",isAnimated:\"boolean\",isVisible:\"boolean\",rootElement:\"(element|string)\"};class we extends q{constructor(t){super(),this._config=this._getConfig(t),this._isAppended=!1,this._element=null}static get Default(){return ve}static get DefaultType(){return ye}static get NAME(){return me}show(t){if(!this._config.isVisible)return void v(t);this._append();const e=this._getElement();this._config.isAnimated&&g(e),e.classList.add(pe),this._emulateAnimation((()=>{v(t)}))}hide(t){this._config.isVisible?(this._getElement().classList.remove(pe),this._emulateAnimation((()=>{this.dispose(),v(t)}))):v(t)}dispose(){this._isAppended&&(j.off(this._element,be),this._element.remove(),this._isAppended=!1)}_getElement(){if(!this._element){const t=document.createElement(\"div\");t.className=this._config.className,this._config.isAnimated&&t.classList.add(\"fade\"),this._element=t}return this._element}_configAfterMerge(t){return t.rootElement=c(t.rootElement),t}_append(){if(this._isAppended)return;const t=this._getElement();this._config.rootElement.append(t),j.on(t,be,(()=>{v(this._config.clickCallback)})),this._isAppended=!0}_emulateAnimation(t){y(t,this._getElement(),this._config.isAnimated)}}const Ae=\".bs.focustrap\",Ee=`focusin${Ae}`,Ce=`keydown.tab${Ae}`,Te=\"backward\",ke={autofocus:!0,trapElement:null},$e={autofocus:\"boolean\",trapElement:\"element\"};class Se extends q{constructor(t){super(),this._config=this._getConfig(t),this._isActive=!1,this._lastTabNavDirection=null}static get Default(){return ke}static get DefaultType(){return $e}static get NAME(){return\"focustrap\"}activate(){this._isActive||(this._config.autofocus&&this._config.trapElement.focus(),j.off(document,Ae),j.on(document,Ee,(t=>this._handleFocusin(t))),j.on(document,Ce,(t=>this._handleKeydown(t))),this._isActive=!0)}deactivate(){this._isActive&&(this._isActive=!1,j.off(document,Ae))}_handleFocusin(t){const{trapElement:e}=this._config;if(t.target===document||t.target===e||e.contains(t.target))return;const i=K.focusableChildren(e);0===i.length?e.focus():this._lastTabNavDirection===Te?i[i.length-1].focus():i[0].focus()}_handleKeydown(t){\"Tab\"===t.key&&(this._lastTabNavDirection=t.shiftKey?Te:\"forward\")}}const Le=\".fixed-top, .fixed-bottom, .is-fixed, .sticky-top\",Oe=\".sticky-top\",Ie=\"padding-right\",De=\"margin-right\";class Ne{constructor(){this._element=document.body}getWidth(){const t=document.documentElement.clientWidth;return Math.abs(window.innerWidth-t)}hide(){const t=this.getWidth();this._disableOverFlow(),this._setElementAttributes(this._element,Ie,(e=>e+t)),this._setElementAttributes(Le,Ie,(e=>e+t)),this._setElementAttributes(Oe,De,(e=>e-t))}reset(){this._resetElementAttributes(this._element,\"overflow\"),this._resetElementAttributes(this._element,Ie),this._resetElementAttributes(Le,Ie),this._resetElementAttributes(Oe,De)}isOverflowing(){return this.getWidth()>0}_disableOverFlow(){this._saveInitialAttribute(this._element,\"overflow\"),this._element.style.overflow=\"hidden\"}_setElementAttributes(t,e,i){const s=this.getWidth();this._applyManipulationCallback(t,(t=>{if(t!==this._element&&window.innerWidth>t.clientWidth+s)return;this._saveInitialAttribute(t,e);const n=window.getComputedStyle(t).getPropertyValue(e);t.style.setProperty(e,`${i(Number.parseFloat(n))}px`)}))}_saveInitialAttribute(t,e){const i=t.style.getPropertyValue(e);i&&B.setDataAttribute(t,e,i)}_resetElementAttributes(t,e){this._applyManipulationCallback(t,(t=>{const i=B.getDataAttribute(t,e);null!==i?(B.removeDataAttribute(t,e),t.style.setProperty(e,i)):t.style.removeProperty(e)}))}_applyManipulationCallback(t,e){if(l(t))e(t);else for(const i of K.find(t,this._element))e(i)}}const Pe=\".bs.modal\",xe=`hide${Pe}`,Me=`hidePrevented${Pe}`,je=`hidden${Pe}`,Fe=`show${Pe}`,ze=`shown${Pe}`,He=`resize${Pe}`,Be=`click.dismiss${Pe}`,qe=`mousedown.dismiss${Pe}`,We=`keydown.dismiss${Pe}`,Re=`click${Pe}.data-api`,Ke=\"modal-open\",Ve=\"show\",Qe=\"modal-static\",Xe={backdrop:!0,focus:!0,keyboard:!0},Ye={backdrop:\"(boolean|string)\",focus:\"boolean\",keyboard:\"boolean\"};class Ue extends W{constructor(t,e){super(t,e),this._dialog=K.findOne(\".modal-dialog\",this._element),this._backdrop=this._initializeBackDrop(),this._focustrap=this._initializeFocusTrap(),this._isShown=!1,this._isTransitioning=!1,this._scrollBar=new Ne,this._addEventListeners()}static get Default(){return Xe}static get DefaultType(){return Ye}static get NAME(){return\"modal\"}toggle(t){return this._isShown?this.hide():this.show(t)}show(t){this._isShown||this._isTransitioning||j.trigger(this._element,Fe,{relatedTarget:t}).defaultPrevented||(this._isShown=!0,this._isTransitioning=!0,this._scrollBar.hide(),document.body.classList.add(Ke),this._adjustDialog(),this._backdrop.show((()=>this._showElement(t))))}hide(){this._isShown&&!this._isTransitioning&&(j.trigger(this._element,xe).defaultPrevented||(this._isShown=!1,this._isTransitioning=!0,this._focustrap.deactivate(),this._element.classList.remove(Ve),this._queueCallback((()=>this._hideModal()),this._element,this._isAnimated())))}dispose(){j.off(window,Pe),j.off(this._dialog,Pe),this._backdrop.dispose(),this._focustrap.deactivate(),super.dispose()}handleUpdate(){this._adjustDialog()}_initializeBackDrop(){return new we({isVisible:Boolean(this._config.backdrop),isAnimated:this._isAnimated()})}_initializeFocusTrap(){return new Se({trapElement:this._element})}_showElement(t){document.body.contains(this._element)||document.body.append(this._element),this._element.style.display=\"block\",this._element.removeAttribute(\"aria-hidden\"),this._element.setAttribute(\"aria-modal\",!0),this._element.setAttribute(\"role\",\"dialog\"),this._element.scrollTop=0;const e=K.findOne(\".modal-body\",this._dialog);e&&(e.scrollTop=0),g(this._element),this._element.classList.add(Ve),this._queueCallback((()=>{this._config.focus&&this._focustrap.activate(),this._isTransitioning=!1,j.trigger(this._element,ze,{relatedTarget:t})}),this._dialog,this._isAnimated())}_addEventListeners(){j.on(this._element,We,(t=>{\"Escape\"===t.key&&(this._config.keyboard?this.hide():this._triggerBackdropTransition())})),j.on(window,He,(()=>{this._isShown&&!this._isTransitioning&&this._adjustDialog()})),j.on(this._element,qe,(t=>{j.one(this._element,Be,(e=>{this._element===t.target&&this._element===e.target&&(\"static\"!==this._config.backdrop?this._config.backdrop&&this.hide():this._triggerBackdropTransition())}))}))}_hideModal(){this._element.style.display=\"none\",this._element.setAttribute(\"aria-hidden\",!0),this._element.removeAttribute(\"aria-modal\"),this._element.removeAttribute(\"role\"),this._isTransitioning=!1,this._backdrop.hide((()=>{document.body.classList.remove(Ke),this._resetAdjustments(),this._scrollBar.reset(),j.trigger(this._element,je)}))}_isAnimated(){return this._element.classList.contains(\"fade\")}_triggerBackdropTransition(){if(j.trigger(this._element,Me).defaultPrevented)return;const t=this._element.scrollHeight>document.documentElement.clientHeight,e=this._element.style.overflowY;\"hidden\"===e||this._element.classList.contains(Qe)||(t||(this._element.style.overflowY=\"hidden\"),this._element.classList.add(Qe),this._queueCallback((()=>{this._element.classList.remove(Qe),this._queueCallback((()=>{this._element.style.overflowY=e}),this._dialog)}),this._dialog),this._element.focus())}_adjustDialog(){const t=this._element.scrollHeight>document.documentElement.clientHeight,e=this._scrollBar.getWidth(),i=e>0;if(i&&!t){const t=p()?\"paddingLeft\":\"paddingRight\";this._element.style[t]=`${e}px`}if(!i&&t){const t=p()?\"paddingRight\":\"paddingLeft\";this._element.style[t]=`${e}px`}}_resetAdjustments(){this._element.style.paddingLeft=\"\",this._element.style.paddingRight=\"\"}static jQueryInterface(t,e){return this.each((function(){const i=Ue.getOrCreateInstance(this,t);if(\"string\"==typeof t){if(void 0===i[t])throw new TypeError(`No method named \"${t}\"`);i[t](e)}}))}}j.on(document,Re,'[data-bs-toggle=\"modal\"]',(function(t){const e=K.getElementFromSelector(this);[\"A\",\"AREA\"].includes(this.tagName)&&t.preventDefault(),j.one(e,Fe,(t=>{t.defaultPrevented||j.one(e,je,(()=>{h(this)&&this.focus()}))}));const i=K.findOne(\".modal.show\");i&&Ue.getInstance(i).hide(),Ue.getOrCreateInstance(e).toggle(this)})),V(Ue),b(Ue);const Ge=\".bs.offcanvas\",Je=\".data-api\",Ze=`load${Ge}${Je}`,ti=\"show\",ei=\"showing\",ii=\"hiding\",si=\".offcanvas.show\",ni=`show${Ge}`,oi=`shown${Ge}`,ri=`hide${Ge}`,ai=`hidePrevented${Ge}`,li=`hidden${Ge}`,ci=`resize${Ge}`,hi=`click${Ge}${Je}`,di=`keydown.dismiss${Ge}`,ui={backdrop:!0,keyboard:!0,scroll:!1},_i={backdrop:\"(boolean|string)\",keyboard:\"boolean\",scroll:\"boolean\"};class gi extends W{constructor(t,e){super(t,e),this._isShown=!1,this._backdrop=this._initializeBackDrop(),this._focustrap=this._initializeFocusTrap(),this._addEventListeners()}static get Default(){return ui}static get DefaultType(){return _i}static get NAME(){return\"offcanvas\"}toggle(t){return this._isShown?this.hide():this.show(t)}show(t){this._isShown||j.trigger(this._element,ni,{relatedTarget:t}).defaultPrevented||(this._isShown=!0,this._backdrop.show(),this._config.scroll||(new Ne).hide(),this._element.setAttribute(\"aria-modal\",!0),this._element.setAttribute(\"role\",\"dialog\"),this._element.classList.add(ei),this._queueCallback((()=>{this._config.scroll&&!this._config.backdrop||this._focustrap.activate(),this._element.classList.add(ti),this._element.classList.remove(ei),j.trigger(this._element,oi,{relatedTarget:t})}),this._element,!0))}hide(){this._isShown&&(j.trigger(this._element,ri).defaultPrevented||(this._focustrap.deactivate(),this._element.blur(),this._isShown=!1,this._element.classList.add(ii),this._backdrop.hide(),this._queueCallback((()=>{this._element.classList.remove(ti,ii),this._element.removeAttribute(\"aria-modal\"),this._element.removeAttribute(\"role\"),this._config.scroll||(new Ne).reset(),j.trigger(this._element,li)}),this._element,!0)))}dispose(){this._backdrop.dispose(),this._focustrap.deactivate(),super.dispose()}_initializeBackDrop(){const t=Boolean(this._config.backdrop);return new we({className:\"offcanvas-backdrop\",isVisible:t,isAnimated:!0,rootElement:this._element.parentNode,clickCallback:t?()=>{\"static\"!==this._config.backdrop?this.hide():j.trigger(this._element,ai)}:null})}_initializeFocusTrap(){return new Se({trapElement:this._element})}_addEventListeners(){j.on(this._element,di,(t=>{\"Escape\"===t.key&&(this._config.keyboard?this.hide():j.trigger(this._element,ai))}))}static jQueryInterface(t){return this.each((function(){const e=gi.getOrCreateInstance(this,t);if(\"string\"==typeof t){if(void 0===e[t]||t.startsWith(\"_\")||\"constructor\"===t)throw new TypeError(`No method named \"${t}\"`);e[t](this)}}))}}j.on(document,hi,'[data-bs-toggle=\"offcanvas\"]',(function(t){const e=K.getElementFromSelector(this);if([\"A\",\"AREA\"].includes(this.tagName)&&t.preventDefault(),d(this))return;j.one(e,li,(()=>{h(this)&&this.focus()}));const i=K.findOne(si);i&&i!==e&&gi.getInstance(i).hide(),gi.getOrCreateInstance(e).toggle(this)})),j.on(window,Ze,(()=>{for(const t of K.find(si))gi.getOrCreateInstance(t).show()})),j.on(window,ci,(()=>{for(const t of K.find(\"[aria-modal][class*=show][class*=offcanvas-]\"))\"fixed\"!==getComputedStyle(t).position&&gi.getOrCreateInstance(t).hide()})),V(gi),b(gi);const fi={\"*\":[\"class\",\"dir\",\"id\",\"lang\",\"role\",/^aria-[\\w-]*$/i],a:[\"target\",\"href\",\"title\",\"rel\"],area:[],b:[],br:[],col:[],code:[],dd:[],div:[],dl:[],dt:[],em:[],hr:[],h1:[],h2:[],h3:[],h4:[],h5:[],h6:[],i:[],img:[\"src\",\"srcset\",\"alt\",\"title\",\"width\",\"height\"],li:[],ol:[],p:[],pre:[],s:[],small:[],span:[],sub:[],sup:[],strong:[],u:[],ul:[]},mi=new Set([\"background\",\"cite\",\"href\",\"itemtype\",\"longdesc\",\"poster\",\"src\",\"xlink:href\"]),pi=/^(?!javascript:)(?:[a-z0-9+.-]+:|[^&:/?#]*(?:[/?#]|$))/i,bi=(t,e)=>{const i=t.nodeName.toLowerCase();return e.includes(i)?!mi.has(i)||Boolean(pi.test(t.nodeValue)):e.filter((t=>t instanceof RegExp)).some((t=>t.test(i)))},vi={allowList:fi,content:{},extraClass:\"\",html:!1,sanitize:!0,sanitizeFn:null,template:\"
\"},yi={allowList:\"object\",content:\"object\",extraClass:\"(string|function)\",html:\"boolean\",sanitize:\"boolean\",sanitizeFn:\"(null|function)\",template:\"string\"},wi={entry:\"(string|element|function|null)\",selector:\"(string|element)\"};class Ai extends q{constructor(t){super(),this._config=this._getConfig(t)}static get Default(){return vi}static get DefaultType(){return yi}static get NAME(){return\"TemplateFactory\"}getContent(){return Object.values(this._config.content).map((t=>this._resolvePossibleFunction(t))).filter(Boolean)}hasContent(){return this.getContent().length>0}changeContent(t){return this._checkContent(t),this._config.content={...this._config.content,...t},this}toHtml(){const t=document.createElement(\"div\");t.innerHTML=this._maybeSanitize(this._config.template);for(const[e,i]of Object.entries(this._config.content))this._setContent(t,i,e);const e=t.children[0],i=this._resolvePossibleFunction(this._config.extraClass);return i&&e.classList.add(...i.split(\" \")),e}_typeCheckConfig(t){super._typeCheckConfig(t),this._checkContent(t.content)}_checkContent(t){for(const[e,i]of Object.entries(t))super._typeCheckConfig({selector:e,entry:i},wi)}_setContent(t,e,i){const s=K.findOne(i,t);s&&((e=this._resolvePossibleFunction(e))?l(e)?this._putElementInTemplate(c(e),s):this._config.html?s.innerHTML=this._maybeSanitize(e):s.textContent=e:s.remove())}_maybeSanitize(t){return this._config.sanitize?function(t,e,i){if(!t.length)return t;if(i&&\"function\"==typeof i)return i(t);const s=(new window.DOMParser).parseFromString(t,\"text/html\"),n=[].concat(...s.body.querySelectorAll(\"*\"));for(const t of n){const i=t.nodeName.toLowerCase();if(!Object.keys(e).includes(i)){t.remove();continue}const s=[].concat(...t.attributes),n=[].concat(e[\"*\"]||[],e[i]||[]);for(const e of s)bi(e,n)||t.removeAttribute(e.nodeName)}return s.body.innerHTML}(t,this._config.allowList,this._config.sanitizeFn):t}_resolvePossibleFunction(t){return v(t,[this])}_putElementInTemplate(t,e){if(this._config.html)return e.innerHTML=\"\",void e.append(t);e.textContent=t.textContent}}const Ei=new Set([\"sanitize\",\"allowList\",\"sanitizeFn\"]),Ci=\"fade\",Ti=\"show\",ki=\".modal\",$i=\"hide.bs.modal\",Si=\"hover\",Li=\"focus\",Oi={AUTO:\"auto\",TOP:\"top\",RIGHT:p()?\"left\":\"right\",BOTTOM:\"bottom\",LEFT:p()?\"right\":\"left\"},Ii={allowList:fi,animation:!0,boundary:\"clippingParents\",container:!1,customClass:\"\",delay:0,fallbackPlacements:[\"top\",\"right\",\"bottom\",\"left\"],html:!1,offset:[0,6],placement:\"top\",popperConfig:null,sanitize:!0,sanitizeFn:null,selector:!1,template:'
',title:\"\",trigger:\"hover focus\"},Di={allowList:\"object\",animation:\"boolean\",boundary:\"(string|element)\",container:\"(string|element|boolean)\",customClass:\"(string|function)\",delay:\"(number|object)\",fallbackPlacements:\"array\",html:\"boolean\",offset:\"(array|string|function)\",placement:\"(string|function)\",popperConfig:\"(null|object|function)\",sanitize:\"boolean\",sanitizeFn:\"(null|function)\",selector:\"(string|boolean)\",template:\"string\",title:\"(string|element|function)\",trigger:\"string\"};class Ni extends W{constructor(t,e){if(void 0===i)throw new TypeError(\"Bootstrap's tooltips require Popper (https://popper.js.org)\");super(t,e),this._isEnabled=!0,this._timeout=0,this._isHovered=null,this._activeTrigger={},this._popper=null,this._templateFactory=null,this._newContent=null,this.tip=null,this._setListeners(),this._config.selector||this._fixTitle()}static get Default(){return Ii}static get DefaultType(){return Di}static get NAME(){return\"tooltip\"}enable(){this._isEnabled=!0}disable(){this._isEnabled=!1}toggleEnabled(){this._isEnabled=!this._isEnabled}toggle(){this._isEnabled&&(this._activeTrigger.click=!this._activeTrigger.click,this._isShown()?this._leave():this._enter())}dispose(){clearTimeout(this._timeout),j.off(this._element.closest(ki),$i,this._hideModalHandler),this._element.getAttribute(\"data-bs-original-title\")&&this._element.setAttribute(\"title\",this._element.getAttribute(\"data-bs-original-title\")),this._disposePopper(),super.dispose()}show(){if(\"none\"===this._element.style.display)throw new Error(\"Please use show on visible elements\");if(!this._isWithContent()||!this._isEnabled)return;const t=j.trigger(this._element,this.constructor.eventName(\"show\")),e=(u(this._element)||this._element.ownerDocument.documentElement).contains(this._element);if(t.defaultPrevented||!e)return;this._disposePopper();const i=this._getTipElement();this._element.setAttribute(\"aria-describedby\",i.getAttribute(\"id\"));const{container:s}=this._config;if(this._element.ownerDocument.documentElement.contains(this.tip)||(s.append(i),j.trigger(this._element,this.constructor.eventName(\"inserted\"))),this._popper=this._createPopper(i),i.classList.add(Ti),\"ontouchstart\"in document.documentElement)for(const t of[].concat(...document.body.children))j.on(t,\"mouseover\",_);this._queueCallback((()=>{j.trigger(this._element,this.constructor.eventName(\"shown\")),!1===this._isHovered&&this._leave(),this._isHovered=!1}),this.tip,this._isAnimated())}hide(){if(this._isShown()&&!j.trigger(this._element,this.constructor.eventName(\"hide\")).defaultPrevented){if(this._getTipElement().classList.remove(Ti),\"ontouchstart\"in document.documentElement)for(const t of[].concat(...document.body.children))j.off(t,\"mouseover\",_);this._activeTrigger.click=!1,this._activeTrigger[Li]=!1,this._activeTrigger[Si]=!1,this._isHovered=null,this._queueCallback((()=>{this._isWithActiveTrigger()||(this._isHovered||this._disposePopper(),this._element.removeAttribute(\"aria-describedby\"),j.trigger(this._element,this.constructor.eventName(\"hidden\")))}),this.tip,this._isAnimated())}}update(){this._popper&&this._popper.update()}_isWithContent(){return Boolean(this._getTitle())}_getTipElement(){return this.tip||(this.tip=this._createTipElement(this._newContent||this._getContentForTemplate())),this.tip}_createTipElement(t){const e=this._getTemplateFactory(t).toHtml();if(!e)return null;e.classList.remove(Ci,Ti),e.classList.add(`bs-${this.constructor.NAME}-auto`);const i=(t=>{do{t+=Math.floor(1e6*Math.random())}while(document.getElementById(t));return t})(this.constructor.NAME).toString();return e.setAttribute(\"id\",i),this._isAnimated()&&e.classList.add(Ci),e}setContent(t){this._newContent=t,this._isShown()&&(this._disposePopper(),this.show())}_getTemplateFactory(t){return this._templateFactory?this._templateFactory.changeContent(t):this._templateFactory=new Ai({...this._config,content:t,extraClass:this._resolvePossibleFunction(this._config.customClass)}),this._templateFactory}_getContentForTemplate(){return{\".tooltip-inner\":this._getTitle()}}_getTitle(){return this._resolvePossibleFunction(this._config.title)||this._element.getAttribute(\"data-bs-original-title\")}_initializeOnDelegatedTarget(t){return this.constructor.getOrCreateInstance(t.delegateTarget,this._getDelegateConfig())}_isAnimated(){return this._config.animation||this.tip&&this.tip.classList.contains(Ci)}_isShown(){return this.tip&&this.tip.classList.contains(Ti)}_createPopper(t){const e=v(this._config.placement,[this,t,this._element]),s=Oi[e.toUpperCase()];return i.createPopper(this._element,t,this._getPopperConfig(s))}_getOffset(){const{offset:t}=this._config;return\"string\"==typeof t?t.split(\",\").map((t=>Number.parseInt(t,10))):\"function\"==typeof t?e=>t(e,this._element):t}_resolvePossibleFunction(t){return v(t,[this._element])}_getPopperConfig(t){const e={placement:t,modifiers:[{name:\"flip\",options:{fallbackPlacements:this._config.fallbackPlacements}},{name:\"offset\",options:{offset:this._getOffset()}},{name:\"preventOverflow\",options:{boundary:this._config.boundary}},{name:\"arrow\",options:{element:`.${this.constructor.NAME}-arrow`}},{name:\"preSetPlacement\",enabled:!0,phase:\"beforeMain\",fn:t=>{this._getTipElement().setAttribute(\"data-popper-placement\",t.state.placement)}}]};return{...e,...v(this._config.popperConfig,[e])}}_setListeners(){const t=this._config.trigger.split(\" \");for(const e of t)if(\"click\"===e)j.on(this._element,this.constructor.eventName(\"click\"),this._config.selector,(t=>{this._initializeOnDelegatedTarget(t).toggle()}));else if(\"manual\"!==e){const t=e===Si?this.constructor.eventName(\"mouseenter\"):this.constructor.eventName(\"focusin\"),i=e===Si?this.constructor.eventName(\"mouseleave\"):this.constructor.eventName(\"focusout\");j.on(this._element,t,this._config.selector,(t=>{const e=this._initializeOnDelegatedTarget(t);e._activeTrigger[\"focusin\"===t.type?Li:Si]=!0,e._enter()})),j.on(this._element,i,this._config.selector,(t=>{const e=this._initializeOnDelegatedTarget(t);e._activeTrigger[\"focusout\"===t.type?Li:Si]=e._element.contains(t.relatedTarget),e._leave()}))}this._hideModalHandler=()=>{this._element&&this.hide()},j.on(this._element.closest(ki),$i,this._hideModalHandler)}_fixTitle(){const t=this._element.getAttribute(\"title\");t&&(this._element.getAttribute(\"aria-label\")||this._element.textContent.trim()||this._element.setAttribute(\"aria-label\",t),this._element.setAttribute(\"data-bs-original-title\",t),this._element.removeAttribute(\"title\"))}_enter(){this._isShown()||this._isHovered?this._isHovered=!0:(this._isHovered=!0,this._setTimeout((()=>{this._isHovered&&this.show()}),this._config.delay.show))}_leave(){this._isWithActiveTrigger()||(this._isHovered=!1,this._setTimeout((()=>{this._isHovered||this.hide()}),this._config.delay.hide))}_setTimeout(t,e){clearTimeout(this._timeout),this._timeout=setTimeout(t,e)}_isWithActiveTrigger(){return Object.values(this._activeTrigger).includes(!0)}_getConfig(t){const e=B.getDataAttributes(this._element);for(const t of Object.keys(e))Ei.has(t)&&delete e[t];return t={...e,...\"object\"==typeof t&&t?t:{}},t=this._mergeConfigObj(t),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}_configAfterMerge(t){return t.container=!1===t.container?document.body:c(t.container),\"number\"==typeof t.delay&&(t.delay={show:t.delay,hide:t.delay}),\"number\"==typeof t.title&&(t.title=t.title.toString()),\"number\"==typeof t.content&&(t.content=t.content.toString()),t}_getDelegateConfig(){const t={};for(const[e,i]of Object.entries(this._config))this.constructor.Default[e]!==i&&(t[e]=i);return t.selector=!1,t.trigger=\"manual\",t}_disposePopper(){this._popper&&(this._popper.destroy(),this._popper=null),this.tip&&(this.tip.remove(),this.tip=null)}static jQueryInterface(t){return this.each((function(){const e=Ni.getOrCreateInstance(this,t);if(\"string\"==typeof t){if(void 0===e[t])throw new TypeError(`No method named \"${t}\"`);e[t]()}}))}}b(Ni);const Pi={...Ni.Default,content:\"\",offset:[0,8],placement:\"right\",template:'

',trigger:\"click\"},xi={...Ni.DefaultType,content:\"(null|string|element|function)\"};class Mi extends Ni{static get Default(){return Pi}static get DefaultType(){return xi}static get NAME(){return\"popover\"}_isWithContent(){return this._getTitle()||this._getContent()}_getContentForTemplate(){return{\".popover-header\":this._getTitle(),\".popover-body\":this._getContent()}}_getContent(){return this._resolvePossibleFunction(this._config.content)}static jQueryInterface(t){return this.each((function(){const e=Mi.getOrCreateInstance(this,t);if(\"string\"==typeof t){if(void 0===e[t])throw new TypeError(`No method named \"${t}\"`);e[t]()}}))}}b(Mi);const ji=\".bs.scrollspy\",Fi=`activate${ji}`,zi=`click${ji}`,Hi=`load${ji}.data-api`,Bi=\"active\",qi=\"[href]\",Wi=\".nav-link\",Ri=`${Wi}, .nav-item > ${Wi}, .list-group-item`,Ki={offset:null,rootMargin:\"0px 0px -25%\",smoothScroll:!1,target:null,threshold:[.1,.5,1]},Vi={offset:\"(number|null)\",rootMargin:\"string\",smoothScroll:\"boolean\",target:\"element\",threshold:\"array\"};class Qi extends W{constructor(t,e){super(t,e),this._targetLinks=new Map,this._observableSections=new Map,this._rootElement=\"visible\"===getComputedStyle(this._element).overflowY?null:this._element,this._activeTarget=null,this._observer=null,this._previousScrollData={visibleEntryTop:0,parentScrollTop:0},this.refresh()}static get Default(){return Ki}static get DefaultType(){return Vi}static get NAME(){return\"scrollspy\"}refresh(){this._initializeTargetsAndObservables(),this._maybeEnableSmoothScroll(),this._observer?this._observer.disconnect():this._observer=this._getNewObserver();for(const t of this._observableSections.values())this._observer.observe(t)}dispose(){this._observer.disconnect(),super.dispose()}_configAfterMerge(t){return t.target=c(t.target)||document.body,t.rootMargin=t.offset?`${t.offset}px 0px -30%`:t.rootMargin,\"string\"==typeof t.threshold&&(t.threshold=t.threshold.split(\",\").map((t=>Number.parseFloat(t)))),t}_maybeEnableSmoothScroll(){this._config.smoothScroll&&(j.off(this._config.target,zi),j.on(this._config.target,zi,qi,(t=>{const e=this._observableSections.get(t.target.hash);if(e){t.preventDefault();const i=this._rootElement||window,s=e.offsetTop-this._element.offsetTop;if(i.scrollTo)return void i.scrollTo({top:s,behavior:\"smooth\"});i.scrollTop=s}})))}_getNewObserver(){const t={root:this._rootElement,threshold:this._config.threshold,rootMargin:this._config.rootMargin};return new IntersectionObserver((t=>this._observerCallback(t)),t)}_observerCallback(t){const e=t=>this._targetLinks.get(`#${t.target.id}`),i=t=>{this._previousScrollData.visibleEntryTop=t.target.offsetTop,this._process(e(t))},s=(this._rootElement||document.documentElement).scrollTop,n=s>=this._previousScrollData.parentScrollTop;this._previousScrollData.parentScrollTop=s;for(const o of t){if(!o.isIntersecting){this._activeTarget=null,this._clearActiveClass(e(o));continue}const t=o.target.offsetTop>=this._previousScrollData.visibleEntryTop;if(n&&t){if(i(o),!s)return}else n||t||i(o)}}_initializeTargetsAndObservables(){this._targetLinks=new Map,this._observableSections=new Map;const t=K.find(qi,this._config.target);for(const e of t){if(!e.hash||d(e))continue;const t=K.findOne(decodeURI(e.hash),this._element);h(t)&&(this._targetLinks.set(decodeURI(e.hash),e),this._observableSections.set(e.hash,t))}}_process(t){this._activeTarget!==t&&(this._clearActiveClass(this._config.target),this._activeTarget=t,t.classList.add(Bi),this._activateParents(t),j.trigger(this._element,Fi,{relatedTarget:t}))}_activateParents(t){if(t.classList.contains(\"dropdown-item\"))K.findOne(\".dropdown-toggle\",t.closest(\".dropdown\")).classList.add(Bi);else for(const e of K.parents(t,\".nav, .list-group\"))for(const t of K.prev(e,Ri))t.classList.add(Bi)}_clearActiveClass(t){t.classList.remove(Bi);const e=K.find(`${qi}.${Bi}`,t);for(const t of e)t.classList.remove(Bi)}static jQueryInterface(t){return this.each((function(){const e=Qi.getOrCreateInstance(this,t);if(\"string\"==typeof t){if(void 0===e[t]||t.startsWith(\"_\")||\"constructor\"===t)throw new TypeError(`No method named \"${t}\"`);e[t]()}}))}}j.on(window,Hi,(()=>{for(const t of K.find('[data-bs-spy=\"scroll\"]'))Qi.getOrCreateInstance(t)})),b(Qi);const Xi=\".bs.tab\",Yi=`hide${Xi}`,Ui=`hidden${Xi}`,Gi=`show${Xi}`,Ji=`shown${Xi}`,Zi=`click${Xi}`,ts=`keydown${Xi}`,es=`load${Xi}`,is=\"ArrowLeft\",ss=\"ArrowRight\",ns=\"ArrowUp\",os=\"ArrowDown\",rs=\"Home\",as=\"End\",ls=\"active\",cs=\"fade\",hs=\"show\",ds=\".dropdown-toggle\",us=`:not(${ds})`,_s='[data-bs-toggle=\"tab\"], [data-bs-toggle=\"pill\"], [data-bs-toggle=\"list\"]',gs=`.nav-link${us}, .list-group-item${us}, [role=\"tab\"]${us}, ${_s}`,fs=`.${ls}[data-bs-toggle=\"tab\"], .${ls}[data-bs-toggle=\"pill\"], .${ls}[data-bs-toggle=\"list\"]`;class ms extends W{constructor(t){super(t),this._parent=this._element.closest('.list-group, .nav, [role=\"tablist\"]'),this._parent&&(this._setInitialAttributes(this._parent,this._getChildren()),j.on(this._element,ts,(t=>this._keydown(t))))}static get NAME(){return\"tab\"}show(){const t=this._element;if(this._elemIsActive(t))return;const e=this._getActiveElem(),i=e?j.trigger(e,Yi,{relatedTarget:t}):null;j.trigger(t,Gi,{relatedTarget:e}).defaultPrevented||i&&i.defaultPrevented||(this._deactivate(e,t),this._activate(t,e))}_activate(t,e){t&&(t.classList.add(ls),this._activate(K.getElementFromSelector(t)),this._queueCallback((()=>{\"tab\"===t.getAttribute(\"role\")?(t.removeAttribute(\"tabindex\"),t.setAttribute(\"aria-selected\",!0),this._toggleDropDown(t,!0),j.trigger(t,Ji,{relatedTarget:e})):t.classList.add(hs)}),t,t.classList.contains(cs)))}_deactivate(t,e){t&&(t.classList.remove(ls),t.blur(),this._deactivate(K.getElementFromSelector(t)),this._queueCallback((()=>{\"tab\"===t.getAttribute(\"role\")?(t.setAttribute(\"aria-selected\",!1),t.setAttribute(\"tabindex\",\"-1\"),this._toggleDropDown(t,!1),j.trigger(t,Ui,{relatedTarget:e})):t.classList.remove(hs)}),t,t.classList.contains(cs)))}_keydown(t){if(![is,ss,ns,os,rs,as].includes(t.key))return;t.stopPropagation(),t.preventDefault();const e=this._getChildren().filter((t=>!d(t)));let i;if([rs,as].includes(t.key))i=e[t.key===rs?0:e.length-1];else{const s=[ss,os].includes(t.key);i=w(e,t.target,s,!0)}i&&(i.focus({preventScroll:!0}),ms.getOrCreateInstance(i).show())}_getChildren(){return K.find(gs,this._parent)}_getActiveElem(){return this._getChildren().find((t=>this._elemIsActive(t)))||null}_setInitialAttributes(t,e){this._setAttributeIfNotExists(t,\"role\",\"tablist\");for(const t of e)this._setInitialAttributesOnChild(t)}_setInitialAttributesOnChild(t){t=this._getInnerElement(t);const e=this._elemIsActive(t),i=this._getOuterElement(t);t.setAttribute(\"aria-selected\",e),i!==t&&this._setAttributeIfNotExists(i,\"role\",\"presentation\"),e||t.setAttribute(\"tabindex\",\"-1\"),this._setAttributeIfNotExists(t,\"role\",\"tab\"),this._setInitialAttributesOnTargetPanel(t)}_setInitialAttributesOnTargetPanel(t){const e=K.getElementFromSelector(t);e&&(this._setAttributeIfNotExists(e,\"role\",\"tabpanel\"),t.id&&this._setAttributeIfNotExists(e,\"aria-labelledby\",`${t.id}`))}_toggleDropDown(t,e){const i=this._getOuterElement(t);if(!i.classList.contains(\"dropdown\"))return;const s=(t,s)=>{const n=K.findOne(t,i);n&&n.classList.toggle(s,e)};s(ds,ls),s(\".dropdown-menu\",hs),i.setAttribute(\"aria-expanded\",e)}_setAttributeIfNotExists(t,e,i){t.hasAttribute(e)||t.setAttribute(e,i)}_elemIsActive(t){return t.classList.contains(ls)}_getInnerElement(t){return t.matches(gs)?t:K.findOne(gs,t)}_getOuterElement(t){return t.closest(\".nav-item, .list-group-item\")||t}static jQueryInterface(t){return this.each((function(){const e=ms.getOrCreateInstance(this);if(\"string\"==typeof t){if(void 0===e[t]||t.startsWith(\"_\")||\"constructor\"===t)throw new TypeError(`No method named \"${t}\"`);e[t]()}}))}}j.on(document,Zi,_s,(function(t){[\"A\",\"AREA\"].includes(this.tagName)&&t.preventDefault(),d(this)||ms.getOrCreateInstance(this).show()})),j.on(window,es,(()=>{for(const t of K.find(fs))ms.getOrCreateInstance(t)})),b(ms);const ps=\".bs.toast\",bs=`mouseover${ps}`,vs=`mouseout${ps}`,ys=`focusin${ps}`,ws=`focusout${ps}`,As=`hide${ps}`,Es=`hidden${ps}`,Cs=`show${ps}`,Ts=`shown${ps}`,ks=\"hide\",$s=\"show\",Ss=\"showing\",Ls={animation:\"boolean\",autohide:\"boolean\",delay:\"number\"},Os={animation:!0,autohide:!0,delay:5e3};class Is extends W{constructor(t,e){super(t,e),this._timeout=null,this._hasMouseInteraction=!1,this._hasKeyboardInteraction=!1,this._setListeners()}static get Default(){return Os}static get DefaultType(){return Ls}static get NAME(){return\"toast\"}show(){j.trigger(this._element,Cs).defaultPrevented||(this._clearTimeout(),this._config.animation&&this._element.classList.add(\"fade\"),this._element.classList.remove(ks),g(this._element),this._element.classList.add($s,Ss),this._queueCallback((()=>{this._element.classList.remove(Ss),j.trigger(this._element,Ts),this._maybeScheduleHide()}),this._element,this._config.animation))}hide(){this.isShown()&&(j.trigger(this._element,As).defaultPrevented||(this._element.classList.add(Ss),this._queueCallback((()=>{this._element.classList.add(ks),this._element.classList.remove(Ss,$s),j.trigger(this._element,Es)}),this._element,this._config.animation)))}dispose(){this._clearTimeout(),this.isShown()&&this._element.classList.remove($s),super.dispose()}isShown(){return this._element.classList.contains($s)}_maybeScheduleHide(){this._config.autohide&&(this._hasMouseInteraction||this._hasKeyboardInteraction||(this._timeout=setTimeout((()=>{this.hide()}),this._config.delay)))}_onInteraction(t,e){switch(t.type){case\"mouseover\":case\"mouseout\":this._hasMouseInteraction=e;break;case\"focusin\":case\"focusout\":this._hasKeyboardInteraction=e}if(e)return void this._clearTimeout();const i=t.relatedTarget;this._element===i||this._element.contains(i)||this._maybeScheduleHide()}_setListeners(){j.on(this._element,bs,(t=>this._onInteraction(t,!0))),j.on(this._element,vs,(t=>this._onInteraction(t,!1))),j.on(this._element,ys,(t=>this._onInteraction(t,!0))),j.on(this._element,ws,(t=>this._onInteraction(t,!1)))}_clearTimeout(){clearTimeout(this._timeout),this._timeout=null}static jQueryInterface(t){return this.each((function(){const e=Is.getOrCreateInstance(this,t);if(\"string\"==typeof t){if(void 0===e[t])throw new TypeError(`No method named \"${t}\"`);e[t](this)}}))}}return V(Is),b(Is),{Alert:U,Button:J,Carousel:Ot,Collapse:Rt,Dropdown:fe,Modal:Ue,Offcanvas:gi,Popover:Mi,ScrollSpy:Qi,Tab:ms,Toast:Is,Tooltip:Ni}}));\n//# sourceMappingURL=bootstrap.min.js.map\n/*! @name mux.js @version 5.11.0 @license Apache-2.0 */\n(function (global, factory) {\n typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() :\n typeof define === 'function' && define.amd ? define(factory) :\n (global = typeof globalThis !== 'undefined' ? globalThis : global || self, global.muxjs = factory());\n}(this, (function () { 'use strict';\n\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n *\n * A lightweight readable stream implemention that handles event dispatching.\n * Objects that inherit from streams should call init in their constructors.\n */\n\n var Stream = function Stream() {\n this.init = function () {\n var listeners = {};\n /**\n * Add a listener for a specified event type.\n * @param type {string} the event name\n * @param listener {function} the callback to be invoked when an event of\n * the specified type occurs\n */\n\n this.on = function (type, listener) {\n if (!listeners[type]) {\n listeners[type] = [];\n }\n\n listeners[type] = listeners[type].concat(listener);\n };\n /**\n * Remove a listener for a specified event type.\n * @param type {string} the event name\n * @param listener {function} a function previously registered for this\n * type of event through `on`\n */\n\n\n this.off = function (type, listener) {\n var index;\n\n if (!listeners[type]) {\n return false;\n }\n\n index = listeners[type].indexOf(listener);\n listeners[type] = listeners[type].slice();\n listeners[type].splice(index, 1);\n return index > -1;\n };\n /**\n * Trigger an event of the specified type on this stream. Any additional\n * arguments to this function are passed as parameters to event listeners.\n * @param type {string} the event name\n */\n\n\n this.trigger = function (type) {\n var callbacks, i, length, args;\n callbacks = listeners[type];\n\n if (!callbacks) {\n return;\n } // Slicing the arguments on every invocation of this method\n // can add a significant amount of overhead. Avoid the\n // intermediate object creation for the common case of a\n // single callback argument\n\n\n if (arguments.length === 2) {\n length = callbacks.length;\n\n for (i = 0; i < length; ++i) {\n callbacks[i].call(this, arguments[1]);\n }\n } else {\n args = [];\n i = arguments.length;\n\n for (i = 1; i < arguments.length; ++i) {\n args.push(arguments[i]);\n }\n\n length = callbacks.length;\n\n for (i = 0; i < length; ++i) {\n callbacks[i].apply(this, args);\n }\n }\n };\n /**\n * Destroys the stream and cleans up.\n */\n\n\n this.dispose = function () {\n listeners = {};\n };\n };\n };\n /**\n * Forwards all `data` events on this stream to the destination stream. The\n * destination stream should provide a method `push` to receive the data\n * events as they arrive.\n * @param destination {stream} the stream that will receive all `data` events\n * @param autoFlush {boolean} if false, we will not call `flush` on the destination\n * when the current stream emits a 'done' event\n * @see http://nodejs.org/api/stream.html#stream_readable_pipe_destination_options\n */\n\n\n Stream.prototype.pipe = function (destination) {\n this.on('data', function (data) {\n destination.push(data);\n });\n this.on('done', function (flushSource) {\n destination.flush(flushSource);\n });\n this.on('partialdone', function (flushSource) {\n destination.partialFlush(flushSource);\n });\n this.on('endedtimeline', function (flushSource) {\n destination.endTimeline(flushSource);\n });\n this.on('reset', function (flushSource) {\n destination.reset(flushSource);\n });\n return destination;\n }; // Default stream functions that are expected to be overridden to perform\n // actual work. These are provided by the prototype as a sort of no-op\n // implementation so that we don't have to check for their existence in the\n // `pipe` function above.\n\n\n Stream.prototype.push = function (data) {\n this.trigger('data', data);\n };\n\n Stream.prototype.flush = function (flushSource) {\n this.trigger('done', flushSource);\n };\n\n Stream.prototype.partialFlush = function (flushSource) {\n this.trigger('partialdone', flushSource);\n };\n\n Stream.prototype.endTimeline = function (flushSource) {\n this.trigger('endedtimeline', flushSource);\n };\n\n Stream.prototype.reset = function (flushSource) {\n this.trigger('reset', flushSource);\n };\n\n var stream = Stream;\n\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n */\n var ONE_SECOND_IN_TS = 90000,\n // 90kHz clock\n secondsToVideoTs,\n secondsToAudioTs,\n videoTsToSeconds,\n audioTsToSeconds,\n audioTsToVideoTs,\n videoTsToAudioTs,\n metadataTsToSeconds;\n\n secondsToVideoTs = function secondsToVideoTs(seconds) {\n return seconds * ONE_SECOND_IN_TS;\n };\n\n secondsToAudioTs = function secondsToAudioTs(seconds, sampleRate) {\n return seconds * sampleRate;\n };\n\n videoTsToSeconds = function videoTsToSeconds(timestamp) {\n return timestamp / ONE_SECOND_IN_TS;\n };\n\n audioTsToSeconds = function audioTsToSeconds(timestamp, sampleRate) {\n return timestamp / sampleRate;\n };\n\n audioTsToVideoTs = function audioTsToVideoTs(timestamp, sampleRate) {\n return secondsToVideoTs(audioTsToSeconds(timestamp, sampleRate));\n };\n\n videoTsToAudioTs = function videoTsToAudioTs(timestamp, sampleRate) {\n return secondsToAudioTs(videoTsToSeconds(timestamp), sampleRate);\n };\n /**\n * Adjust ID3 tag or caption timing information by the timeline pts values\n * (if keepOriginalTimestamps is false) and convert to seconds\n */\n\n\n metadataTsToSeconds = function metadataTsToSeconds(timestamp, timelineStartPts, keepOriginalTimestamps) {\n return videoTsToSeconds(keepOriginalTimestamps ? timestamp : timestamp - timelineStartPts);\n };\n\n var clock = {\n ONE_SECOND_IN_TS: ONE_SECOND_IN_TS,\n secondsToVideoTs: secondsToVideoTs,\n secondsToAudioTs: secondsToAudioTs,\n videoTsToSeconds: videoTsToSeconds,\n audioTsToSeconds: audioTsToSeconds,\n audioTsToVideoTs: audioTsToVideoTs,\n videoTsToAudioTs: videoTsToAudioTs,\n metadataTsToSeconds: metadataTsToSeconds\n };\n\n var ONE_SECOND_IN_TS$1 = clock.ONE_SECOND_IN_TS;\n\n var _AdtsStream;\n\n var ADTS_SAMPLING_FREQUENCIES = [96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050, 16000, 12000, 11025, 8000, 7350];\n /*\n * Accepts a ElementaryStream and emits data events with parsed\n * AAC Audio Frames of the individual packets. Input audio in ADTS\n * format is unpacked and re-emitted as AAC frames.\n *\n * @see http://wiki.multimedia.cx/index.php?title=ADTS\n * @see http://wiki.multimedia.cx/?title=Understanding_AAC\n */\n\n _AdtsStream = function AdtsStream(handlePartialSegments) {\n var buffer,\n frameNum = 0;\n\n _AdtsStream.prototype.init.call(this);\n\n this.push = function (packet) {\n var i = 0,\n frameLength,\n protectionSkipBytes,\n frameEnd,\n oldBuffer,\n sampleCount,\n adtsFrameDuration;\n\n if (!handlePartialSegments) {\n frameNum = 0;\n }\n\n if (packet.type !== 'audio') {\n // ignore non-audio data\n return;\n } // Prepend any data in the buffer to the input data so that we can parse\n // aac frames the cross a PES packet boundary\n\n\n if (buffer) {\n oldBuffer = buffer;\n buffer = new Uint8Array(oldBuffer.byteLength + packet.data.byteLength);\n buffer.set(oldBuffer);\n buffer.set(packet.data, oldBuffer.byteLength);\n } else {\n buffer = packet.data;\n } // unpack any ADTS frames which have been fully received\n // for details on the ADTS header, see http://wiki.multimedia.cx/index.php?title=ADTS\n\n\n while (i + 5 < buffer.length) {\n // Look for the start of an ADTS header..\n if (buffer[i] !== 0xFF || (buffer[i + 1] & 0xF6) !== 0xF0) {\n // If a valid header was not found, jump one forward and attempt to\n // find a valid ADTS header starting at the next byte\n i++;\n continue;\n } // The protection skip bit tells us if we have 2 bytes of CRC data at the\n // end of the ADTS header\n\n\n protectionSkipBytes = (~buffer[i + 1] & 0x01) * 2; // Frame length is a 13 bit integer starting 16 bits from the\n // end of the sync sequence\n\n frameLength = (buffer[i + 3] & 0x03) << 11 | buffer[i + 4] << 3 | (buffer[i + 5] & 0xe0) >> 5;\n sampleCount = ((buffer[i + 6] & 0x03) + 1) * 1024;\n adtsFrameDuration = sampleCount * ONE_SECOND_IN_TS$1 / ADTS_SAMPLING_FREQUENCIES[(buffer[i + 2] & 0x3c) >>> 2];\n frameEnd = i + frameLength; // If we don't have enough data to actually finish this ADTS frame, return\n // and wait for more data\n\n if (buffer.byteLength < frameEnd) {\n return;\n } // Otherwise, deliver the complete AAC frame\n\n\n this.trigger('data', {\n pts: packet.pts + frameNum * adtsFrameDuration,\n dts: packet.dts + frameNum * adtsFrameDuration,\n sampleCount: sampleCount,\n audioobjecttype: (buffer[i + 2] >>> 6 & 0x03) + 1,\n channelcount: (buffer[i + 2] & 1) << 2 | (buffer[i + 3] & 0xc0) >>> 6,\n samplerate: ADTS_SAMPLING_FREQUENCIES[(buffer[i + 2] & 0x3c) >>> 2],\n samplingfrequencyindex: (buffer[i + 2] & 0x3c) >>> 2,\n // assume ISO/IEC 14496-12 AudioSampleEntry default of 16\n samplesize: 16,\n data: buffer.subarray(i + 7 + protectionSkipBytes, frameEnd)\n });\n frameNum++; // If the buffer is empty, clear it and return\n\n if (buffer.byteLength === frameEnd) {\n buffer = undefined;\n return;\n } // Remove the finished frame from the buffer and start the process again\n\n\n buffer = buffer.subarray(frameEnd);\n }\n };\n\n this.flush = function () {\n frameNum = 0;\n this.trigger('done');\n };\n\n this.reset = function () {\n buffer = void 0;\n this.trigger('reset');\n };\n\n this.endTimeline = function () {\n buffer = void 0;\n this.trigger('endedtimeline');\n };\n };\n\n _AdtsStream.prototype = new stream();\n var adts = _AdtsStream;\n\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n */\n\n var ExpGolomb;\n /**\n * Parser for exponential Golomb codes, a variable-bitwidth number encoding\n * scheme used by h264.\n */\n\n ExpGolomb = function ExpGolomb(workingData) {\n var // the number of bytes left to examine in workingData\n workingBytesAvailable = workingData.byteLength,\n // the current word being examined\n workingWord = 0,\n // :uint\n // the number of bits left to examine in the current word\n workingBitsAvailable = 0; // :uint;\n // ():uint\n\n this.length = function () {\n return 8 * workingBytesAvailable;\n }; // ():uint\n\n\n this.bitsAvailable = function () {\n return 8 * workingBytesAvailable + workingBitsAvailable;\n }; // ():void\n\n\n this.loadWord = function () {\n var position = workingData.byteLength - workingBytesAvailable,\n workingBytes = new Uint8Array(4),\n availableBytes = Math.min(4, workingBytesAvailable);\n\n if (availableBytes === 0) {\n throw new Error('no bytes available');\n }\n\n workingBytes.set(workingData.subarray(position, position + availableBytes));\n workingWord = new DataView(workingBytes.buffer).getUint32(0); // track the amount of workingData that has been processed\n\n workingBitsAvailable = availableBytes * 8;\n workingBytesAvailable -= availableBytes;\n }; // (count:int):void\n\n\n this.skipBits = function (count) {\n var skipBytes; // :int\n\n if (workingBitsAvailable > count) {\n workingWord <<= count;\n workingBitsAvailable -= count;\n } else {\n count -= workingBitsAvailable;\n skipBytes = Math.floor(count / 8);\n count -= skipBytes * 8;\n workingBytesAvailable -= skipBytes;\n this.loadWord();\n workingWord <<= count;\n workingBitsAvailable -= count;\n }\n }; // (size:int):uint\n\n\n this.readBits = function (size) {\n var bits = Math.min(workingBitsAvailable, size),\n // :uint\n valu = workingWord >>> 32 - bits; // :uint\n // if size > 31, handle error\n\n workingBitsAvailable -= bits;\n\n if (workingBitsAvailable > 0) {\n workingWord <<= bits;\n } else if (workingBytesAvailable > 0) {\n this.loadWord();\n }\n\n bits = size - bits;\n\n if (bits > 0) {\n return valu << bits | this.readBits(bits);\n }\n\n return valu;\n }; // ():uint\n\n\n this.skipLeadingZeros = function () {\n var leadingZeroCount; // :uint\n\n for (leadingZeroCount = 0; leadingZeroCount < workingBitsAvailable; ++leadingZeroCount) {\n if ((workingWord & 0x80000000 >>> leadingZeroCount) !== 0) {\n // the first bit of working word is 1\n workingWord <<= leadingZeroCount;\n workingBitsAvailable -= leadingZeroCount;\n return leadingZeroCount;\n }\n } // we exhausted workingWord and still have not found a 1\n\n\n this.loadWord();\n return leadingZeroCount + this.skipLeadingZeros();\n }; // ():void\n\n\n this.skipUnsignedExpGolomb = function () {\n this.skipBits(1 + this.skipLeadingZeros());\n }; // ():void\n\n\n this.skipExpGolomb = function () {\n this.skipBits(1 + this.skipLeadingZeros());\n }; // ():uint\n\n\n this.readUnsignedExpGolomb = function () {\n var clz = this.skipLeadingZeros(); // :uint\n\n return this.readBits(clz + 1) - 1;\n }; // ():int\n\n\n this.readExpGolomb = function () {\n var valu = this.readUnsignedExpGolomb(); // :int\n\n if (0x01 & valu) {\n // the number is odd if the low order bit is set\n return 1 + valu >>> 1; // add 1 to make it even, and divide by 2\n }\n\n return -1 * (valu >>> 1); // divide by two then make it negative\n }; // Some convenience functions\n // :Boolean\n\n\n this.readBoolean = function () {\n return this.readBits(1) === 1;\n }; // ():int\n\n\n this.readUnsignedByte = function () {\n return this.readBits(8);\n };\n\n this.loadWord();\n };\n\n var expGolomb = ExpGolomb;\n\n var _H264Stream, _NalByteStream;\n\n var PROFILES_WITH_OPTIONAL_SPS_DATA;\n /**\n * Accepts a NAL unit byte stream and unpacks the embedded NAL units.\n */\n\n _NalByteStream = function NalByteStream() {\n var syncPoint = 0,\n i,\n buffer;\n\n _NalByteStream.prototype.init.call(this);\n /*\n * Scans a byte stream and triggers a data event with the NAL units found.\n * @param {Object} data Event received from H264Stream\n * @param {Uint8Array} data.data The h264 byte stream to be scanned\n *\n * @see H264Stream.push\n */\n\n\n this.push = function (data) {\n var swapBuffer;\n\n if (!buffer) {\n buffer = data.data;\n } else {\n swapBuffer = new Uint8Array(buffer.byteLength + data.data.byteLength);\n swapBuffer.set(buffer);\n swapBuffer.set(data.data, buffer.byteLength);\n buffer = swapBuffer;\n }\n\n var len = buffer.byteLength; // Rec. ITU-T H.264, Annex B\n // scan for NAL unit boundaries\n // a match looks like this:\n // 0 0 1 .. NAL .. 0 0 1\n // ^ sync point ^ i\n // or this:\n // 0 0 1 .. NAL .. 0 0 0\n // ^ sync point ^ i\n // advance the sync point to a NAL start, if necessary\n\n for (; syncPoint < len - 3; syncPoint++) {\n if (buffer[syncPoint + 2] === 1) {\n // the sync point is properly aligned\n i = syncPoint + 5;\n break;\n }\n }\n\n while (i < len) {\n // look at the current byte to determine if we've hit the end of\n // a NAL unit boundary\n switch (buffer[i]) {\n case 0:\n // skip past non-sync sequences\n if (buffer[i - 1] !== 0) {\n i += 2;\n break;\n } else if (buffer[i - 2] !== 0) {\n i++;\n break;\n } // deliver the NAL unit if it isn't empty\n\n\n if (syncPoint + 3 !== i - 2) {\n this.trigger('data', buffer.subarray(syncPoint + 3, i - 2));\n } // drop trailing zeroes\n\n\n do {\n i++;\n } while (buffer[i] !== 1 && i < len);\n\n syncPoint = i - 2;\n i += 3;\n break;\n\n case 1:\n // skip past non-sync sequences\n if (buffer[i - 1] !== 0 || buffer[i - 2] !== 0) {\n i += 3;\n break;\n } // deliver the NAL unit\n\n\n this.trigger('data', buffer.subarray(syncPoint + 3, i - 2));\n syncPoint = i - 2;\n i += 3;\n break;\n\n default:\n // the current byte isn't a one or zero, so it cannot be part\n // of a sync sequence\n i += 3;\n break;\n }\n } // filter out the NAL units that were delivered\n\n\n buffer = buffer.subarray(syncPoint);\n i -= syncPoint;\n syncPoint = 0;\n };\n\n this.reset = function () {\n buffer = null;\n syncPoint = 0;\n this.trigger('reset');\n };\n\n this.flush = function () {\n // deliver the last buffered NAL unit\n if (buffer && buffer.byteLength > 3) {\n this.trigger('data', buffer.subarray(syncPoint + 3));\n } // reset the stream state\n\n\n buffer = null;\n syncPoint = 0;\n this.trigger('done');\n };\n\n this.endTimeline = function () {\n this.flush();\n this.trigger('endedtimeline');\n };\n };\n\n _NalByteStream.prototype = new stream(); // values of profile_idc that indicate additional fields are included in the SPS\n // see Recommendation ITU-T H.264 (4/2013),\n // 7.3.2.1.1 Sequence parameter set data syntax\n\n PROFILES_WITH_OPTIONAL_SPS_DATA = {\n 100: true,\n 110: true,\n 122: true,\n 244: true,\n 44: true,\n 83: true,\n 86: true,\n 118: true,\n 128: true,\n 138: true,\n 139: true,\n 134: true\n };\n /**\n * Accepts input from a ElementaryStream and produces H.264 NAL unit data\n * events.\n */\n\n _H264Stream = function H264Stream() {\n var nalByteStream = new _NalByteStream(),\n self,\n trackId,\n currentPts,\n currentDts,\n discardEmulationPreventionBytes,\n readSequenceParameterSet,\n skipScalingList;\n\n _H264Stream.prototype.init.call(this);\n\n self = this;\n /*\n * Pushes a packet from a stream onto the NalByteStream\n *\n * @param {Object} packet - A packet received from a stream\n * @param {Uint8Array} packet.data - The raw bytes of the packet\n * @param {Number} packet.dts - Decode timestamp of the packet\n * @param {Number} packet.pts - Presentation timestamp of the packet\n * @param {Number} packet.trackId - The id of the h264 track this packet came from\n * @param {('video'|'audio')} packet.type - The type of packet\n *\n */\n\n this.push = function (packet) {\n if (packet.type !== 'video') {\n return;\n }\n\n trackId = packet.trackId;\n currentPts = packet.pts;\n currentDts = packet.dts;\n nalByteStream.push(packet);\n };\n /*\n * Identify NAL unit types and pass on the NALU, trackId, presentation and decode timestamps\n * for the NALUs to the next stream component.\n * Also, preprocess caption and sequence parameter NALUs.\n *\n * @param {Uint8Array} data - A NAL unit identified by `NalByteStream.push`\n * @see NalByteStream.push\n */\n\n\n nalByteStream.on('data', function (data) {\n var event = {\n trackId: trackId,\n pts: currentPts,\n dts: currentDts,\n data: data\n };\n\n switch (data[0] & 0x1f) {\n case 0x05:\n event.nalUnitType = 'slice_layer_without_partitioning_rbsp_idr';\n break;\n\n case 0x06:\n event.nalUnitType = 'sei_rbsp';\n event.escapedRBSP = discardEmulationPreventionBytes(data.subarray(1));\n break;\n\n case 0x07:\n event.nalUnitType = 'seq_parameter_set_rbsp';\n event.escapedRBSP = discardEmulationPreventionBytes(data.subarray(1));\n event.config = readSequenceParameterSet(event.escapedRBSP);\n break;\n\n case 0x08:\n event.nalUnitType = 'pic_parameter_set_rbsp';\n break;\n\n case 0x09:\n event.nalUnitType = 'access_unit_delimiter_rbsp';\n break;\n } // This triggers data on the H264Stream\n\n\n self.trigger('data', event);\n });\n nalByteStream.on('done', function () {\n self.trigger('done');\n });\n nalByteStream.on('partialdone', function () {\n self.trigger('partialdone');\n });\n nalByteStream.on('reset', function () {\n self.trigger('reset');\n });\n nalByteStream.on('endedtimeline', function () {\n self.trigger('endedtimeline');\n });\n\n this.flush = function () {\n nalByteStream.flush();\n };\n\n this.partialFlush = function () {\n nalByteStream.partialFlush();\n };\n\n this.reset = function () {\n nalByteStream.reset();\n };\n\n this.endTimeline = function () {\n nalByteStream.endTimeline();\n };\n /**\n * Advance the ExpGolomb decoder past a scaling list. The scaling\n * list is optionally transmitted as part of a sequence parameter\n * set and is not relevant to transmuxing.\n * @param count {number} the number of entries in this scaling list\n * @param expGolombDecoder {object} an ExpGolomb pointed to the\n * start of a scaling list\n * @see Recommendation ITU-T H.264, Section 7.3.2.1.1.1\n */\n\n\n skipScalingList = function skipScalingList(count, expGolombDecoder) {\n var lastScale = 8,\n nextScale = 8,\n j,\n deltaScale;\n\n for (j = 0; j < count; j++) {\n if (nextScale !== 0) {\n deltaScale = expGolombDecoder.readExpGolomb();\n nextScale = (lastScale + deltaScale + 256) % 256;\n }\n\n lastScale = nextScale === 0 ? lastScale : nextScale;\n }\n };\n /**\n * Expunge any \"Emulation Prevention\" bytes from a \"Raw Byte\n * Sequence Payload\"\n * @param data {Uint8Array} the bytes of a RBSP from a NAL\n * unit\n * @return {Uint8Array} the RBSP without any Emulation\n * Prevention Bytes\n */\n\n\n discardEmulationPreventionBytes = function discardEmulationPreventionBytes(data) {\n var length = data.byteLength,\n emulationPreventionBytesPositions = [],\n i = 1,\n newLength,\n newData; // Find all `Emulation Prevention Bytes`\n\n while (i < length - 2) {\n if (data[i] === 0 && data[i + 1] === 0 && data[i + 2] === 0x03) {\n emulationPreventionBytesPositions.push(i + 2);\n i += 2;\n } else {\n i++;\n }\n } // If no Emulation Prevention Bytes were found just return the original\n // array\n\n\n if (emulationPreventionBytesPositions.length === 0) {\n return data;\n } // Create a new array to hold the NAL unit data\n\n\n newLength = length - emulationPreventionBytesPositions.length;\n newData = new Uint8Array(newLength);\n var sourceIndex = 0;\n\n for (i = 0; i < newLength; sourceIndex++, i++) {\n if (sourceIndex === emulationPreventionBytesPositions[0]) {\n // Skip this byte\n sourceIndex++; // Remove this position index\n\n emulationPreventionBytesPositions.shift();\n }\n\n newData[i] = data[sourceIndex];\n }\n\n return newData;\n };\n /**\n * Read a sequence parameter set and return some interesting video\n * properties. A sequence parameter set is the H264 metadata that\n * describes the properties of upcoming video frames.\n * @param data {Uint8Array} the bytes of a sequence parameter set\n * @return {object} an object with configuration parsed from the\n * sequence parameter set, including the dimensions of the\n * associated video frames.\n */\n\n\n readSequenceParameterSet = function readSequenceParameterSet(data) {\n var frameCropLeftOffset = 0,\n frameCropRightOffset = 0,\n frameCropTopOffset = 0,\n frameCropBottomOffset = 0,\n sarScale = 1,\n expGolombDecoder,\n profileIdc,\n levelIdc,\n profileCompatibility,\n chromaFormatIdc,\n picOrderCntType,\n numRefFramesInPicOrderCntCycle,\n picWidthInMbsMinus1,\n picHeightInMapUnitsMinus1,\n frameMbsOnlyFlag,\n scalingListCount,\n sarRatio,\n aspectRatioIdc,\n i;\n expGolombDecoder = new expGolomb(data);\n profileIdc = expGolombDecoder.readUnsignedByte(); // profile_idc\n\n profileCompatibility = expGolombDecoder.readUnsignedByte(); // constraint_set[0-5]_flag\n\n levelIdc = expGolombDecoder.readUnsignedByte(); // level_idc u(8)\n\n expGolombDecoder.skipUnsignedExpGolomb(); // seq_parameter_set_id\n // some profiles have more optional data we don't need\n\n if (PROFILES_WITH_OPTIONAL_SPS_DATA[profileIdc]) {\n chromaFormatIdc = expGolombDecoder.readUnsignedExpGolomb();\n\n if (chromaFormatIdc === 3) {\n expGolombDecoder.skipBits(1); // separate_colour_plane_flag\n }\n\n expGolombDecoder.skipUnsignedExpGolomb(); // bit_depth_luma_minus8\n\n expGolombDecoder.skipUnsignedExpGolomb(); // bit_depth_chroma_minus8\n\n expGolombDecoder.skipBits(1); // qpprime_y_zero_transform_bypass_flag\n\n if (expGolombDecoder.readBoolean()) {\n // seq_scaling_matrix_present_flag\n scalingListCount = chromaFormatIdc !== 3 ? 8 : 12;\n\n for (i = 0; i < scalingListCount; i++) {\n if (expGolombDecoder.readBoolean()) {\n // seq_scaling_list_present_flag[ i ]\n if (i < 6) {\n skipScalingList(16, expGolombDecoder);\n } else {\n skipScalingList(64, expGolombDecoder);\n }\n }\n }\n }\n }\n\n expGolombDecoder.skipUnsignedExpGolomb(); // log2_max_frame_num_minus4\n\n picOrderCntType = expGolombDecoder.readUnsignedExpGolomb();\n\n if (picOrderCntType === 0) {\n expGolombDecoder.readUnsignedExpGolomb(); // log2_max_pic_order_cnt_lsb_minus4\n } else if (picOrderCntType === 1) {\n expGolombDecoder.skipBits(1); // delta_pic_order_always_zero_flag\n\n expGolombDecoder.skipExpGolomb(); // offset_for_non_ref_pic\n\n expGolombDecoder.skipExpGolomb(); // offset_for_top_to_bottom_field\n\n numRefFramesInPicOrderCntCycle = expGolombDecoder.readUnsignedExpGolomb();\n\n for (i = 0; i < numRefFramesInPicOrderCntCycle; i++) {\n expGolombDecoder.skipExpGolomb(); // offset_for_ref_frame[ i ]\n }\n }\n\n expGolombDecoder.skipUnsignedExpGolomb(); // max_num_ref_frames\n\n expGolombDecoder.skipBits(1); // gaps_in_frame_num_value_allowed_flag\n\n picWidthInMbsMinus1 = expGolombDecoder.readUnsignedExpGolomb();\n picHeightInMapUnitsMinus1 = expGolombDecoder.readUnsignedExpGolomb();\n frameMbsOnlyFlag = expGolombDecoder.readBits(1);\n\n if (frameMbsOnlyFlag === 0) {\n expGolombDecoder.skipBits(1); // mb_adaptive_frame_field_flag\n }\n\n expGolombDecoder.skipBits(1); // direct_8x8_inference_flag\n\n if (expGolombDecoder.readBoolean()) {\n // frame_cropping_flag\n frameCropLeftOffset = expGolombDecoder.readUnsignedExpGolomb();\n frameCropRightOffset = expGolombDecoder.readUnsignedExpGolomb();\n frameCropTopOffset = expGolombDecoder.readUnsignedExpGolomb();\n frameCropBottomOffset = expGolombDecoder.readUnsignedExpGolomb();\n }\n\n if (expGolombDecoder.readBoolean()) {\n // vui_parameters_present_flag\n if (expGolombDecoder.readBoolean()) {\n // aspect_ratio_info_present_flag\n aspectRatioIdc = expGolombDecoder.readUnsignedByte();\n\n switch (aspectRatioIdc) {\n case 1:\n sarRatio = [1, 1];\n break;\n\n case 2:\n sarRatio = [12, 11];\n break;\n\n case 3:\n sarRatio = [10, 11];\n break;\n\n case 4:\n sarRatio = [16, 11];\n break;\n\n case 5:\n sarRatio = [40, 33];\n break;\n\n case 6:\n sarRatio = [24, 11];\n break;\n\n case 7:\n sarRatio = [20, 11];\n break;\n\n case 8:\n sarRatio = [32, 11];\n break;\n\n case 9:\n sarRatio = [80, 33];\n break;\n\n case 10:\n sarRatio = [18, 11];\n break;\n\n case 11:\n sarRatio = [15, 11];\n break;\n\n case 12:\n sarRatio = [64, 33];\n break;\n\n case 13:\n sarRatio = [160, 99];\n break;\n\n case 14:\n sarRatio = [4, 3];\n break;\n\n case 15:\n sarRatio = [3, 2];\n break;\n\n case 16:\n sarRatio = [2, 1];\n break;\n\n case 255:\n {\n sarRatio = [expGolombDecoder.readUnsignedByte() << 8 | expGolombDecoder.readUnsignedByte(), expGolombDecoder.readUnsignedByte() << 8 | expGolombDecoder.readUnsignedByte()];\n break;\n }\n }\n\n if (sarRatio) {\n sarScale = sarRatio[0] / sarRatio[1];\n }\n }\n }\n\n return {\n profileIdc: profileIdc,\n levelIdc: levelIdc,\n profileCompatibility: profileCompatibility,\n width: Math.ceil(((picWidthInMbsMinus1 + 1) * 16 - frameCropLeftOffset * 2 - frameCropRightOffset * 2) * sarScale),\n height: (2 - frameMbsOnlyFlag) * (picHeightInMapUnitsMinus1 + 1) * 16 - frameCropTopOffset * 2 - frameCropBottomOffset * 2,\n sarRatio: sarRatio\n };\n };\n };\n\n _H264Stream.prototype = new stream();\n var h264 = {\n H264Stream: _H264Stream,\n NalByteStream: _NalByteStream\n };\n\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n */\n\n var codecs = {\n Adts: adts,\n h264: h264\n };\n\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n *\n * Functions that generate fragmented MP4s suitable for use with Media\n * Source Extensions.\n */\n\n var UINT32_MAX = Math.pow(2, 32) - 1;\n var box, dinf, esds, ftyp, mdat, mfhd, minf, moof, moov, mvex, mvhd, trak, tkhd, mdia, mdhd, hdlr, sdtp, stbl, stsd, traf, trex, trun, types, MAJOR_BRAND, MINOR_VERSION, AVC1_BRAND, VIDEO_HDLR, AUDIO_HDLR, HDLR_TYPES, VMHD, SMHD, DREF, STCO, STSC, STSZ, STTS; // pre-calculate constants\n\n (function () {\n var i;\n types = {\n avc1: [],\n // codingname\n avcC: [],\n btrt: [],\n dinf: [],\n dref: [],\n esds: [],\n ftyp: [],\n hdlr: [],\n mdat: [],\n mdhd: [],\n mdia: [],\n mfhd: [],\n minf: [],\n moof: [],\n moov: [],\n mp4a: [],\n // codingname\n mvex: [],\n mvhd: [],\n pasp: [],\n sdtp: [],\n smhd: [],\n stbl: [],\n stco: [],\n stsc: [],\n stsd: [],\n stsz: [],\n stts: [],\n styp: [],\n tfdt: [],\n tfhd: [],\n traf: [],\n trak: [],\n trun: [],\n trex: [],\n tkhd: [],\n vmhd: []\n }; // In environments where Uint8Array is undefined (e.g., IE8), skip set up so that we\n // don't throw an error\n\n if (typeof Uint8Array === 'undefined') {\n return;\n }\n\n for (i in types) {\n if (types.hasOwnProperty(i)) {\n types[i] = [i.charCodeAt(0), i.charCodeAt(1), i.charCodeAt(2), i.charCodeAt(3)];\n }\n }\n\n MAJOR_BRAND = new Uint8Array(['i'.charCodeAt(0), 's'.charCodeAt(0), 'o'.charCodeAt(0), 'm'.charCodeAt(0)]);\n AVC1_BRAND = new Uint8Array(['a'.charCodeAt(0), 'v'.charCodeAt(0), 'c'.charCodeAt(0), '1'.charCodeAt(0)]);\n MINOR_VERSION = new Uint8Array([0, 0, 0, 1]);\n VIDEO_HDLR = new Uint8Array([0x00, // version 0\n 0x00, 0x00, 0x00, // flags\n 0x00, 0x00, 0x00, 0x00, // pre_defined\n 0x76, 0x69, 0x64, 0x65, // handler_type: 'vide'\n 0x00, 0x00, 0x00, 0x00, // reserved\n 0x00, 0x00, 0x00, 0x00, // reserved\n 0x00, 0x00, 0x00, 0x00, // reserved\n 0x56, 0x69, 0x64, 0x65, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x00 // name: 'VideoHandler'\n ]);\n AUDIO_HDLR = new Uint8Array([0x00, // version 0\n 0x00, 0x00, 0x00, // flags\n 0x00, 0x00, 0x00, 0x00, // pre_defined\n 0x73, 0x6f, 0x75, 0x6e, // handler_type: 'soun'\n 0x00, 0x00, 0x00, 0x00, // reserved\n 0x00, 0x00, 0x00, 0x00, // reserved\n 0x00, 0x00, 0x00, 0x00, // reserved\n 0x53, 0x6f, 0x75, 0x6e, 0x64, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x00 // name: 'SoundHandler'\n ]);\n HDLR_TYPES = {\n video: VIDEO_HDLR,\n audio: AUDIO_HDLR\n };\n DREF = new Uint8Array([0x00, // version 0\n 0x00, 0x00, 0x00, // flags\n 0x00, 0x00, 0x00, 0x01, // entry_count\n 0x00, 0x00, 0x00, 0x0c, // entry_size\n 0x75, 0x72, 0x6c, 0x20, // 'url' type\n 0x00, // version 0\n 0x00, 0x00, 0x01 // entry_flags\n ]);\n SMHD = new Uint8Array([0x00, // version\n 0x00, 0x00, 0x00, // flags\n 0x00, 0x00, // balance, 0 means centered\n 0x00, 0x00 // reserved\n ]);\n STCO = new Uint8Array([0x00, // version\n 0x00, 0x00, 0x00, // flags\n 0x00, 0x00, 0x00, 0x00 // entry_count\n ]);\n STSC = STCO;\n STSZ = new Uint8Array([0x00, // version\n 0x00, 0x00, 0x00, // flags\n 0x00, 0x00, 0x00, 0x00, // sample_size\n 0x00, 0x00, 0x00, 0x00 // sample_count\n ]);\n STTS = STCO;\n VMHD = new Uint8Array([0x00, // version\n 0x00, 0x00, 0x01, // flags\n 0x00, 0x00, // graphicsmode\n 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // opcolor\n ]);\n })();\n\n box = function box(type) {\n var payload = [],\n size = 0,\n i,\n result,\n view;\n\n for (i = 1; i < arguments.length; i++) {\n payload.push(arguments[i]);\n }\n\n i = payload.length; // calculate the total size we need to allocate\n\n while (i--) {\n size += payload[i].byteLength;\n }\n\n result = new Uint8Array(size + 8);\n view = new DataView(result.buffer, result.byteOffset, result.byteLength);\n view.setUint32(0, result.byteLength);\n result.set(type, 4); // copy the payload into the result\n\n for (i = 0, size = 8; i < payload.length; i++) {\n result.set(payload[i], size);\n size += payload[i].byteLength;\n }\n\n return result;\n };\n\n dinf = function dinf() {\n return box(types.dinf, box(types.dref, DREF));\n };\n\n esds = function esds(track) {\n return box(types.esds, new Uint8Array([0x00, // version\n 0x00, 0x00, 0x00, // flags\n // ES_Descriptor\n 0x03, // tag, ES_DescrTag\n 0x19, // length\n 0x00, 0x00, // ES_ID\n 0x00, // streamDependenceFlag, URL_flag, reserved, streamPriority\n // DecoderConfigDescriptor\n 0x04, // tag, DecoderConfigDescrTag\n 0x11, // length\n 0x40, // object type\n 0x15, // streamType\n 0x00, 0x06, 0x00, // bufferSizeDB\n 0x00, 0x00, 0xda, 0xc0, // maxBitrate\n 0x00, 0x00, 0xda, 0xc0, // avgBitrate\n // DecoderSpecificInfo\n 0x05, // tag, DecoderSpecificInfoTag\n 0x02, // length\n // ISO/IEC 14496-3, AudioSpecificConfig\n // for samplingFrequencyIndex see ISO/IEC 13818-7:2006, 8.1.3.2.2, Table 35\n track.audioobjecttype << 3 | track.samplingfrequencyindex >>> 1, track.samplingfrequencyindex << 7 | track.channelcount << 3, 0x06, 0x01, 0x02 // GASpecificConfig\n ]));\n };\n\n ftyp = function ftyp() {\n return box(types.ftyp, MAJOR_BRAND, MINOR_VERSION, MAJOR_BRAND, AVC1_BRAND);\n };\n\n hdlr = function hdlr(type) {\n return box(types.hdlr, HDLR_TYPES[type]);\n };\n\n mdat = function mdat(data) {\n return box(types.mdat, data);\n };\n\n mdhd = function mdhd(track) {\n var result = new Uint8Array([0x00, // version 0\n 0x00, 0x00, 0x00, // flags\n 0x00, 0x00, 0x00, 0x02, // creation_time\n 0x00, 0x00, 0x00, 0x03, // modification_time\n 0x00, 0x01, 0x5f, 0x90, // timescale, 90,000 \"ticks\" per second\n track.duration >>> 24 & 0xFF, track.duration >>> 16 & 0xFF, track.duration >>> 8 & 0xFF, track.duration & 0xFF, // duration\n 0x55, 0xc4, // 'und' language (undetermined)\n 0x00, 0x00]); // Use the sample rate from the track metadata, when it is\n // defined. The sample rate can be parsed out of an ADTS header, for\n // instance.\n\n if (track.samplerate) {\n result[12] = track.samplerate >>> 24 & 0xFF;\n result[13] = track.samplerate >>> 16 & 0xFF;\n result[14] = track.samplerate >>> 8 & 0xFF;\n result[15] = track.samplerate & 0xFF;\n }\n\n return box(types.mdhd, result);\n };\n\n mdia = function mdia(track) {\n return box(types.mdia, mdhd(track), hdlr(track.type), minf(track));\n };\n\n mfhd = function mfhd(sequenceNumber) {\n return box(types.mfhd, new Uint8Array([0x00, 0x00, 0x00, 0x00, // flags\n (sequenceNumber & 0xFF000000) >> 24, (sequenceNumber & 0xFF0000) >> 16, (sequenceNumber & 0xFF00) >> 8, sequenceNumber & 0xFF // sequence_number\n ]));\n };\n\n minf = function minf(track) {\n return box(types.minf, track.type === 'video' ? box(types.vmhd, VMHD) : box(types.smhd, SMHD), dinf(), stbl(track));\n };\n\n moof = function moof(sequenceNumber, tracks) {\n var trackFragments = [],\n i = tracks.length; // build traf boxes for each track fragment\n\n while (i--) {\n trackFragments[i] = traf(tracks[i]);\n }\n\n return box.apply(null, [types.moof, mfhd(sequenceNumber)].concat(trackFragments));\n };\n /**\n * Returns a movie box.\n * @param tracks {array} the tracks associated with this movie\n * @see ISO/IEC 14496-12:2012(E), section 8.2.1\n */\n\n\n moov = function moov(tracks) {\n var i = tracks.length,\n boxes = [];\n\n while (i--) {\n boxes[i] = trak(tracks[i]);\n }\n\n return box.apply(null, [types.moov, mvhd(0xffffffff)].concat(boxes).concat(mvex(tracks)));\n };\n\n mvex = function mvex(tracks) {\n var i = tracks.length,\n boxes = [];\n\n while (i--) {\n boxes[i] = trex(tracks[i]);\n }\n\n return box.apply(null, [types.mvex].concat(boxes));\n };\n\n mvhd = function mvhd(duration) {\n var bytes = new Uint8Array([0x00, // version 0\n 0x00, 0x00, 0x00, // flags\n 0x00, 0x00, 0x00, 0x01, // creation_time\n 0x00, 0x00, 0x00, 0x02, // modification_time\n 0x00, 0x01, 0x5f, 0x90, // timescale, 90,000 \"ticks\" per second\n (duration & 0xFF000000) >> 24, (duration & 0xFF0000) >> 16, (duration & 0xFF00) >> 8, duration & 0xFF, // duration\n 0x00, 0x01, 0x00, 0x00, // 1.0 rate\n 0x01, 0x00, // 1.0 volume\n 0x00, 0x00, // reserved\n 0x00, 0x00, 0x00, 0x00, // reserved\n 0x00, 0x00, 0x00, 0x00, // reserved\n 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, // transformation: unity matrix\n 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // pre_defined\n 0xff, 0xff, 0xff, 0xff // next_track_ID\n ]);\n return box(types.mvhd, bytes);\n };\n\n sdtp = function sdtp(track) {\n var samples = track.samples || [],\n bytes = new Uint8Array(4 + samples.length),\n flags,\n i; // leave the full box header (4 bytes) all zero\n // write the sample table\n\n for (i = 0; i < samples.length; i++) {\n flags = samples[i].flags;\n bytes[i + 4] = flags.dependsOn << 4 | flags.isDependedOn << 2 | flags.hasRedundancy;\n }\n\n return box(types.sdtp, bytes);\n };\n\n stbl = function stbl(track) {\n return box(types.stbl, stsd(track), box(types.stts, STTS), box(types.stsc, STSC), box(types.stsz, STSZ), box(types.stco, STCO));\n };\n\n (function () {\n var videoSample, audioSample;\n\n stsd = function stsd(track) {\n return box(types.stsd, new Uint8Array([0x00, // version 0\n 0x00, 0x00, 0x00, // flags\n 0x00, 0x00, 0x00, 0x01]), track.type === 'video' ? videoSample(track) : audioSample(track));\n };\n\n videoSample = function videoSample(track) {\n var sps = track.sps || [],\n pps = track.pps || [],\n sequenceParameterSets = [],\n pictureParameterSets = [],\n i,\n avc1Box; // assemble the SPSs\n\n for (i = 0; i < sps.length; i++) {\n sequenceParameterSets.push((sps[i].byteLength & 0xFF00) >>> 8);\n sequenceParameterSets.push(sps[i].byteLength & 0xFF); // sequenceParameterSetLength\n\n sequenceParameterSets = sequenceParameterSets.concat(Array.prototype.slice.call(sps[i])); // SPS\n } // assemble the PPSs\n\n\n for (i = 0; i < pps.length; i++) {\n pictureParameterSets.push((pps[i].byteLength & 0xFF00) >>> 8);\n pictureParameterSets.push(pps[i].byteLength & 0xFF);\n pictureParameterSets = pictureParameterSets.concat(Array.prototype.slice.call(pps[i]));\n }\n\n avc1Box = [types.avc1, new Uint8Array([0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // reserved\n 0x00, 0x01, // data_reference_index\n 0x00, 0x00, // pre_defined\n 0x00, 0x00, // reserved\n 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // pre_defined\n (track.width & 0xff00) >> 8, track.width & 0xff, // width\n (track.height & 0xff00) >> 8, track.height & 0xff, // height\n 0x00, 0x48, 0x00, 0x00, // horizresolution\n 0x00, 0x48, 0x00, 0x00, // vertresolution\n 0x00, 0x00, 0x00, 0x00, // reserved\n 0x00, 0x01, // frame_count\n 0x13, 0x76, 0x69, 0x64, 0x65, 0x6f, 0x6a, 0x73, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x69, 0x62, 0x2d, 0x68, 0x6c, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // compressorname\n 0x00, 0x18, // depth = 24\n 0x11, 0x11 // pre_defined = -1\n ]), box(types.avcC, new Uint8Array([0x01, // configurationVersion\n track.profileIdc, // AVCProfileIndication\n track.profileCompatibility, // profile_compatibility\n track.levelIdc, // AVCLevelIndication\n 0xff // lengthSizeMinusOne, hard-coded to 4 bytes\n ].concat([sps.length], // numOfSequenceParameterSets\n sequenceParameterSets, // \"SPS\"\n [pps.length], // numOfPictureParameterSets\n pictureParameterSets // \"PPS\"\n ))), box(types.btrt, new Uint8Array([0x00, 0x1c, 0x9c, 0x80, // bufferSizeDB\n 0x00, 0x2d, 0xc6, 0xc0, // maxBitrate\n 0x00, 0x2d, 0xc6, 0xc0 // avgBitrate\n ]))];\n\n if (track.sarRatio) {\n var hSpacing = track.sarRatio[0],\n vSpacing = track.sarRatio[1];\n avc1Box.push(box(types.pasp, new Uint8Array([(hSpacing & 0xFF000000) >> 24, (hSpacing & 0xFF0000) >> 16, (hSpacing & 0xFF00) >> 8, hSpacing & 0xFF, (vSpacing & 0xFF000000) >> 24, (vSpacing & 0xFF0000) >> 16, (vSpacing & 0xFF00) >> 8, vSpacing & 0xFF])));\n }\n\n return box.apply(null, avc1Box);\n };\n\n audioSample = function audioSample(track) {\n return box(types.mp4a, new Uint8Array([// SampleEntry, ISO/IEC 14496-12\n 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // reserved\n 0x00, 0x01, // data_reference_index\n // AudioSampleEntry, ISO/IEC 14496-12\n 0x00, 0x00, 0x00, 0x00, // reserved\n 0x00, 0x00, 0x00, 0x00, // reserved\n (track.channelcount & 0xff00) >> 8, track.channelcount & 0xff, // channelcount\n (track.samplesize & 0xff00) >> 8, track.samplesize & 0xff, // samplesize\n 0x00, 0x00, // pre_defined\n 0x00, 0x00, // reserved\n (track.samplerate & 0xff00) >> 8, track.samplerate & 0xff, 0x00, 0x00 // samplerate, 16.16\n // MP4AudioSampleEntry, ISO/IEC 14496-14\n ]), esds(track));\n };\n })();\n\n tkhd = function tkhd(track) {\n var result = new Uint8Array([0x00, // version 0\n 0x00, 0x00, 0x07, // flags\n 0x00, 0x00, 0x00, 0x00, // creation_time\n 0x00, 0x00, 0x00, 0x00, // modification_time\n (track.id & 0xFF000000) >> 24, (track.id & 0xFF0000) >> 16, (track.id & 0xFF00) >> 8, track.id & 0xFF, // track_ID\n 0x00, 0x00, 0x00, 0x00, // reserved\n (track.duration & 0xFF000000) >> 24, (track.duration & 0xFF0000) >> 16, (track.duration & 0xFF00) >> 8, track.duration & 0xFF, // duration\n 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // reserved\n 0x00, 0x00, // layer\n 0x00, 0x00, // alternate_group\n 0x01, 0x00, // non-audio track volume\n 0x00, 0x00, // reserved\n 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, // transformation: unity matrix\n (track.width & 0xFF00) >> 8, track.width & 0xFF, 0x00, 0x00, // width\n (track.height & 0xFF00) >> 8, track.height & 0xFF, 0x00, 0x00 // height\n ]);\n return box(types.tkhd, result);\n };\n /**\n * Generate a track fragment (traf) box. A traf box collects metadata\n * about tracks in a movie fragment (moof) box.\n */\n\n\n traf = function traf(track) {\n var trackFragmentHeader, trackFragmentDecodeTime, trackFragmentRun, sampleDependencyTable, dataOffset, upperWordBaseMediaDecodeTime, lowerWordBaseMediaDecodeTime;\n trackFragmentHeader = box(types.tfhd, new Uint8Array([0x00, // version 0\n 0x00, 0x00, 0x3a, // flags\n (track.id & 0xFF000000) >> 24, (track.id & 0xFF0000) >> 16, (track.id & 0xFF00) >> 8, track.id & 0xFF, // track_ID\n 0x00, 0x00, 0x00, 0x01, // sample_description_index\n 0x00, 0x00, 0x00, 0x00, // default_sample_duration\n 0x00, 0x00, 0x00, 0x00, // default_sample_size\n 0x00, 0x00, 0x00, 0x00 // default_sample_flags\n ]));\n upperWordBaseMediaDecodeTime = Math.floor(track.baseMediaDecodeTime / (UINT32_MAX + 1));\n lowerWordBaseMediaDecodeTime = Math.floor(track.baseMediaDecodeTime % (UINT32_MAX + 1));\n trackFragmentDecodeTime = box(types.tfdt, new Uint8Array([0x01, // version 1\n 0x00, 0x00, 0x00, // flags\n // baseMediaDecodeTime\n upperWordBaseMediaDecodeTime >>> 24 & 0xFF, upperWordBaseMediaDecodeTime >>> 16 & 0xFF, upperWordBaseMediaDecodeTime >>> 8 & 0xFF, upperWordBaseMediaDecodeTime & 0xFF, lowerWordBaseMediaDecodeTime >>> 24 & 0xFF, lowerWordBaseMediaDecodeTime >>> 16 & 0xFF, lowerWordBaseMediaDecodeTime >>> 8 & 0xFF, lowerWordBaseMediaDecodeTime & 0xFF])); // the data offset specifies the number of bytes from the start of\n // the containing moof to the first payload byte of the associated\n // mdat\n\n dataOffset = 32 + // tfhd\n 20 + // tfdt\n 8 + // traf header\n 16 + // mfhd\n 8 + // moof header\n 8; // mdat header\n // audio tracks require less metadata\n\n if (track.type === 'audio') {\n trackFragmentRun = trun(track, dataOffset);\n return box(types.traf, trackFragmentHeader, trackFragmentDecodeTime, trackFragmentRun);\n } // video tracks should contain an independent and disposable samples\n // box (sdtp)\n // generate one and adjust offsets to match\n\n\n sampleDependencyTable = sdtp(track);\n trackFragmentRun = trun(track, sampleDependencyTable.length + dataOffset);\n return box(types.traf, trackFragmentHeader, trackFragmentDecodeTime, trackFragmentRun, sampleDependencyTable);\n };\n /**\n * Generate a track box.\n * @param track {object} a track definition\n * @return {Uint8Array} the track box\n */\n\n\n trak = function trak(track) {\n track.duration = track.duration || 0xffffffff;\n return box(types.trak, tkhd(track), mdia(track));\n };\n\n trex = function trex(track) {\n var result = new Uint8Array([0x00, // version 0\n 0x00, 0x00, 0x00, // flags\n (track.id & 0xFF000000) >> 24, (track.id & 0xFF0000) >> 16, (track.id & 0xFF00) >> 8, track.id & 0xFF, // track_ID\n 0x00, 0x00, 0x00, 0x01, // default_sample_description_index\n 0x00, 0x00, 0x00, 0x00, // default_sample_duration\n 0x00, 0x00, 0x00, 0x00, // default_sample_size\n 0x00, 0x01, 0x00, 0x01 // default_sample_flags\n ]); // the last two bytes of default_sample_flags is the sample\n // degradation priority, a hint about the importance of this sample\n // relative to others. Lower the degradation priority for all sample\n // types other than video.\n\n if (track.type !== 'video') {\n result[result.length - 1] = 0x00;\n }\n\n return box(types.trex, result);\n };\n\n (function () {\n var audioTrun, videoTrun, trunHeader; // This method assumes all samples are uniform. That is, if a\n // duration is present for the first sample, it will be present for\n // all subsequent samples.\n // see ISO/IEC 14496-12:2012, Section 8.8.8.1\n\n trunHeader = function trunHeader(samples, offset) {\n var durationPresent = 0,\n sizePresent = 0,\n flagsPresent = 0,\n compositionTimeOffset = 0; // trun flag constants\n\n if (samples.length) {\n if (samples[0].duration !== undefined) {\n durationPresent = 0x1;\n }\n\n if (samples[0].size !== undefined) {\n sizePresent = 0x2;\n }\n\n if (samples[0].flags !== undefined) {\n flagsPresent = 0x4;\n }\n\n if (samples[0].compositionTimeOffset !== undefined) {\n compositionTimeOffset = 0x8;\n }\n }\n\n return [0x00, // version 0\n 0x00, durationPresent | sizePresent | flagsPresent | compositionTimeOffset, 0x01, // flags\n (samples.length & 0xFF000000) >>> 24, (samples.length & 0xFF0000) >>> 16, (samples.length & 0xFF00) >>> 8, samples.length & 0xFF, // sample_count\n (offset & 0xFF000000) >>> 24, (offset & 0xFF0000) >>> 16, (offset & 0xFF00) >>> 8, offset & 0xFF // data_offset\n ];\n };\n\n videoTrun = function videoTrun(track, offset) {\n var bytesOffest, bytes, header, samples, sample, i;\n samples = track.samples || [];\n offset += 8 + 12 + 16 * samples.length;\n header = trunHeader(samples, offset);\n bytes = new Uint8Array(header.length + samples.length * 16);\n bytes.set(header);\n bytesOffest = header.length;\n\n for (i = 0; i < samples.length; i++) {\n sample = samples[i];\n bytes[bytesOffest++] = (sample.duration & 0xFF000000) >>> 24;\n bytes[bytesOffest++] = (sample.duration & 0xFF0000) >>> 16;\n bytes[bytesOffest++] = (sample.duration & 0xFF00) >>> 8;\n bytes[bytesOffest++] = sample.duration & 0xFF; // sample_duration\n\n bytes[bytesOffest++] = (sample.size & 0xFF000000) >>> 24;\n bytes[bytesOffest++] = (sample.size & 0xFF0000) >>> 16;\n bytes[bytesOffest++] = (sample.size & 0xFF00) >>> 8;\n bytes[bytesOffest++] = sample.size & 0xFF; // sample_size\n\n bytes[bytesOffest++] = sample.flags.isLeading << 2 | sample.flags.dependsOn;\n bytes[bytesOffest++] = sample.flags.isDependedOn << 6 | sample.flags.hasRedundancy << 4 | sample.flags.paddingValue << 1 | sample.flags.isNonSyncSample;\n bytes[bytesOffest++] = sample.flags.degradationPriority & 0xF0 << 8;\n bytes[bytesOffest++] = sample.flags.degradationPriority & 0x0F; // sample_flags\n\n bytes[bytesOffest++] = (sample.compositionTimeOffset & 0xFF000000) >>> 24;\n bytes[bytesOffest++] = (sample.compositionTimeOffset & 0xFF0000) >>> 16;\n bytes[bytesOffest++] = (sample.compositionTimeOffset & 0xFF00) >>> 8;\n bytes[bytesOffest++] = sample.compositionTimeOffset & 0xFF; // sample_composition_time_offset\n }\n\n return box(types.trun, bytes);\n };\n\n audioTrun = function audioTrun(track, offset) {\n var bytes, bytesOffest, header, samples, sample, i;\n samples = track.samples || [];\n offset += 8 + 12 + 8 * samples.length;\n header = trunHeader(samples, offset);\n bytes = new Uint8Array(header.length + samples.length * 8);\n bytes.set(header);\n bytesOffest = header.length;\n\n for (i = 0; i < samples.length; i++) {\n sample = samples[i];\n bytes[bytesOffest++] = (sample.duration & 0xFF000000) >>> 24;\n bytes[bytesOffest++] = (sample.duration & 0xFF0000) >>> 16;\n bytes[bytesOffest++] = (sample.duration & 0xFF00) >>> 8;\n bytes[bytesOffest++] = sample.duration & 0xFF; // sample_duration\n\n bytes[bytesOffest++] = (sample.size & 0xFF000000) >>> 24;\n bytes[bytesOffest++] = (sample.size & 0xFF0000) >>> 16;\n bytes[bytesOffest++] = (sample.size & 0xFF00) >>> 8;\n bytes[bytesOffest++] = sample.size & 0xFF; // sample_size\n }\n\n return box(types.trun, bytes);\n };\n\n trun = function trun(track, offset) {\n if (track.type === 'audio') {\n return audioTrun(track, offset);\n }\n\n return videoTrun(track, offset);\n };\n })();\n\n var mp4Generator = {\n ftyp: ftyp,\n mdat: mdat,\n moof: moof,\n moov: moov,\n initSegment: function initSegment(tracks) {\n var fileType = ftyp(),\n movie = moov(tracks),\n result;\n result = new Uint8Array(fileType.byteLength + movie.byteLength);\n result.set(fileType);\n result.set(movie, fileType.byteLength);\n return result;\n }\n };\n\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n */\n var toUnsigned = function toUnsigned(value) {\n return value >>> 0;\n };\n\n var toHexString = function toHexString(value) {\n return ('00' + value.toString(16)).slice(-2);\n };\n\n var bin = {\n toUnsigned: toUnsigned,\n toHexString: toHexString\n };\n\n var parseType = function parseType(buffer) {\n var result = '';\n result += String.fromCharCode(buffer[0]);\n result += String.fromCharCode(buffer[1]);\n result += String.fromCharCode(buffer[2]);\n result += String.fromCharCode(buffer[3]);\n return result;\n };\n\n var parseType_1 = parseType;\n\n var toUnsigned$1 = bin.toUnsigned;\n\n var findBox = function findBox(data, path) {\n var results = [],\n i,\n size,\n type,\n end,\n subresults;\n\n if (!path.length) {\n // short-circuit the search for empty paths\n return null;\n }\n\n for (i = 0; i < data.byteLength;) {\n size = toUnsigned$1(data[i] << 24 | data[i + 1] << 16 | data[i + 2] << 8 | data[i + 3]);\n type = parseType_1(data.subarray(i + 4, i + 8));\n end = size > 1 ? i + size : data.byteLength;\n\n if (type === path[0]) {\n if (path.length === 1) {\n // this is the end of the path and we've found the box we were\n // looking for\n results.push(data.subarray(i + 8, end));\n } else {\n // recursively search for the next box along the path\n subresults = findBox(data.subarray(i + 8, end), path.slice(1));\n\n if (subresults.length) {\n results = results.concat(subresults);\n }\n }\n }\n\n i = end;\n } // we've finished searching all of data\n\n\n return results;\n };\n\n var findBox_1 = findBox;\n\n var tfhd = function tfhd(data) {\n var view = new DataView(data.buffer, data.byteOffset, data.byteLength),\n result = {\n version: data[0],\n flags: new Uint8Array(data.subarray(1, 4)),\n trackId: view.getUint32(4)\n },\n baseDataOffsetPresent = result.flags[2] & 0x01,\n sampleDescriptionIndexPresent = result.flags[2] & 0x02,\n defaultSampleDurationPresent = result.flags[2] & 0x08,\n defaultSampleSizePresent = result.flags[2] & 0x10,\n defaultSampleFlagsPresent = result.flags[2] & 0x20,\n durationIsEmpty = result.flags[0] & 0x010000,\n defaultBaseIsMoof = result.flags[0] & 0x020000,\n i;\n i = 8;\n\n if (baseDataOffsetPresent) {\n i += 4; // truncate top 4 bytes\n // FIXME: should we read the full 64 bits?\n\n result.baseDataOffset = view.getUint32(12);\n i += 4;\n }\n\n if (sampleDescriptionIndexPresent) {\n result.sampleDescriptionIndex = view.getUint32(i);\n i += 4;\n }\n\n if (defaultSampleDurationPresent) {\n result.defaultSampleDuration = view.getUint32(i);\n i += 4;\n }\n\n if (defaultSampleSizePresent) {\n result.defaultSampleSize = view.getUint32(i);\n i += 4;\n }\n\n if (defaultSampleFlagsPresent) {\n result.defaultSampleFlags = view.getUint32(i);\n }\n\n if (durationIsEmpty) {\n result.durationIsEmpty = true;\n }\n\n if (!baseDataOffsetPresent && defaultBaseIsMoof) {\n result.baseDataOffsetIsMoof = true;\n }\n\n return result;\n };\n\n var parseTfhd = tfhd;\n\n var parseSampleFlags = function parseSampleFlags(flags) {\n return {\n isLeading: (flags[0] & 0x0c) >>> 2,\n dependsOn: flags[0] & 0x03,\n isDependedOn: (flags[1] & 0xc0) >>> 6,\n hasRedundancy: (flags[1] & 0x30) >>> 4,\n paddingValue: (flags[1] & 0x0e) >>> 1,\n isNonSyncSample: flags[1] & 0x01,\n degradationPriority: flags[2] << 8 | flags[3]\n };\n };\n\n var parseSampleFlags_1 = parseSampleFlags;\n\n var trun$1 = function trun(data) {\n var result = {\n version: data[0],\n flags: new Uint8Array(data.subarray(1, 4)),\n samples: []\n },\n view = new DataView(data.buffer, data.byteOffset, data.byteLength),\n // Flag interpretation\n dataOffsetPresent = result.flags[2] & 0x01,\n // compare with 2nd byte of 0x1\n firstSampleFlagsPresent = result.flags[2] & 0x04,\n // compare with 2nd byte of 0x4\n sampleDurationPresent = result.flags[1] & 0x01,\n // compare with 2nd byte of 0x100\n sampleSizePresent = result.flags[1] & 0x02,\n // compare with 2nd byte of 0x200\n sampleFlagsPresent = result.flags[1] & 0x04,\n // compare with 2nd byte of 0x400\n sampleCompositionTimeOffsetPresent = result.flags[1] & 0x08,\n // compare with 2nd byte of 0x800\n sampleCount = view.getUint32(4),\n offset = 8,\n sample;\n\n if (dataOffsetPresent) {\n // 32 bit signed integer\n result.dataOffset = view.getInt32(offset);\n offset += 4;\n } // Overrides the flags for the first sample only. The order of\n // optional values will be: duration, size, compositionTimeOffset\n\n\n if (firstSampleFlagsPresent && sampleCount) {\n sample = {\n flags: parseSampleFlags_1(data.subarray(offset, offset + 4))\n };\n offset += 4;\n\n if (sampleDurationPresent) {\n sample.duration = view.getUint32(offset);\n offset += 4;\n }\n\n if (sampleSizePresent) {\n sample.size = view.getUint32(offset);\n offset += 4;\n }\n\n if (sampleCompositionTimeOffsetPresent) {\n if (result.version === 1) {\n sample.compositionTimeOffset = view.getInt32(offset);\n } else {\n sample.compositionTimeOffset = view.getUint32(offset);\n }\n\n offset += 4;\n }\n\n result.samples.push(sample);\n sampleCount--;\n }\n\n while (sampleCount--) {\n sample = {};\n\n if (sampleDurationPresent) {\n sample.duration = view.getUint32(offset);\n offset += 4;\n }\n\n if (sampleSizePresent) {\n sample.size = view.getUint32(offset);\n offset += 4;\n }\n\n if (sampleFlagsPresent) {\n sample.flags = parseSampleFlags_1(data.subarray(offset, offset + 4));\n offset += 4;\n }\n\n if (sampleCompositionTimeOffsetPresent) {\n if (result.version === 1) {\n sample.compositionTimeOffset = view.getInt32(offset);\n } else {\n sample.compositionTimeOffset = view.getUint32(offset);\n }\n\n offset += 4;\n }\n\n result.samples.push(sample);\n }\n\n return result;\n };\n\n var parseTrun = trun$1;\n\n var toUnsigned$2 = bin.toUnsigned;\n\n var tfdt = function tfdt(data) {\n var result = {\n version: data[0],\n flags: new Uint8Array(data.subarray(1, 4)),\n baseMediaDecodeTime: toUnsigned$2(data[4] << 24 | data[5] << 16 | data[6] << 8 | data[7])\n };\n\n if (result.version === 1) {\n result.baseMediaDecodeTime *= Math.pow(2, 32);\n result.baseMediaDecodeTime += toUnsigned$2(data[8] << 24 | data[9] << 16 | data[10] << 8 | data[11]);\n }\n\n return result;\n };\n\n var parseTfdt = tfdt;\n\n var toUnsigned$3 = bin.toUnsigned;\n var toHexString$1 = bin.toHexString;\n var timescale, startTime, compositionStartTime, getVideoTrackIds, getTracks, getTimescaleFromMediaHeader;\n /**\n * Parses an MP4 initialization segment and extracts the timescale\n * values for any declared tracks. Timescale values indicate the\n * number of clock ticks per second to assume for time-based values\n * elsewhere in the MP4.\n *\n * To determine the start time of an MP4, you need two pieces of\n * information: the timescale unit and the earliest base media decode\n * time. Multiple timescales can be specified within an MP4 but the\n * base media decode time is always expressed in the timescale from\n * the media header box for the track:\n * ```\n * moov > trak > mdia > mdhd.timescale\n * ```\n * @param init {Uint8Array} the bytes of the init segment\n * @return {object} a hash of track ids to timescale values or null if\n * the init segment is malformed.\n */\n\n timescale = function timescale(init) {\n var result = {},\n traks = findBox_1(init, ['moov', 'trak']); // mdhd timescale\n\n return traks.reduce(function (result, trak) {\n var tkhd, version, index, id, mdhd;\n tkhd = findBox_1(trak, ['tkhd'])[0];\n\n if (!tkhd) {\n return null;\n }\n\n version = tkhd[0];\n index = version === 0 ? 12 : 20;\n id = toUnsigned$3(tkhd[index] << 24 | tkhd[index + 1] << 16 | tkhd[index + 2] << 8 | tkhd[index + 3]);\n mdhd = findBox_1(trak, ['mdia', 'mdhd'])[0];\n\n if (!mdhd) {\n return null;\n }\n\n version = mdhd[0];\n index = version === 0 ? 12 : 20;\n result[id] = toUnsigned$3(mdhd[index] << 24 | mdhd[index + 1] << 16 | mdhd[index + 2] << 8 | mdhd[index + 3]);\n return result;\n }, result);\n };\n /**\n * Determine the base media decode start time, in seconds, for an MP4\n * fragment. If multiple fragments are specified, the earliest time is\n * returned.\n *\n * The base media decode time can be parsed from track fragment\n * metadata:\n * ```\n * moof > traf > tfdt.baseMediaDecodeTime\n * ```\n * It requires the timescale value from the mdhd to interpret.\n *\n * @param timescale {object} a hash of track ids to timescale values.\n * @return {number} the earliest base media decode start time for the\n * fragment, in seconds\n */\n\n\n startTime = function startTime(timescale, fragment) {\n var trafs, baseTimes, result; // we need info from two childrend of each track fragment box\n\n trafs = findBox_1(fragment, ['moof', 'traf']); // determine the start times for each track\n\n baseTimes = [].concat.apply([], trafs.map(function (traf) {\n return findBox_1(traf, ['tfhd']).map(function (tfhd) {\n var id, scale, baseTime; // get the track id from the tfhd\n\n id = toUnsigned$3(tfhd[4] << 24 | tfhd[5] << 16 | tfhd[6] << 8 | tfhd[7]); // assume a 90kHz clock if no timescale was specified\n\n scale = timescale[id] || 90e3; // get the base media decode time from the tfdt\n\n baseTime = findBox_1(traf, ['tfdt']).map(function (tfdt) {\n var version, result;\n version = tfdt[0];\n result = toUnsigned$3(tfdt[4] << 24 | tfdt[5] << 16 | tfdt[6] << 8 | tfdt[7]);\n\n if (version === 1) {\n result *= Math.pow(2, 32);\n result += toUnsigned$3(tfdt[8] << 24 | tfdt[9] << 16 | tfdt[10] << 8 | tfdt[11]);\n }\n\n return result;\n })[0];\n baseTime = typeof baseTime === 'number' && !isNaN(baseTime) ? baseTime : Infinity; // convert base time to seconds\n\n return baseTime / scale;\n });\n })); // return the minimum\n\n result = Math.min.apply(null, baseTimes);\n return isFinite(result) ? result : 0;\n };\n /**\n * Determine the composition start, in seconds, for an MP4\n * fragment.\n *\n * The composition start time of a fragment can be calculated using the base\n * media decode time, composition time offset, and timescale, as follows:\n *\n * compositionStartTime = (baseMediaDecodeTime + compositionTimeOffset) / timescale\n *\n * All of the aforementioned information is contained within a media fragment's\n * `traf` box, except for timescale info, which comes from the initialization\n * segment, so a track id (also contained within a `traf`) is also necessary to\n * associate it with a timescale\n *\n *\n * @param timescales {object} - a hash of track ids to timescale values.\n * @param fragment {Unit8Array} - the bytes of a media segment\n * @return {number} the composition start time for the fragment, in seconds\n **/\n\n\n compositionStartTime = function compositionStartTime(timescales, fragment) {\n var trafBoxes = findBox_1(fragment, ['moof', 'traf']);\n var baseMediaDecodeTime = 0;\n var compositionTimeOffset = 0;\n var trackId;\n\n if (trafBoxes && trafBoxes.length) {\n // The spec states that track run samples contained within a `traf` box are contiguous, but\n // it does not explicitly state whether the `traf` boxes themselves are contiguous.\n // We will assume that they are, so we only need the first to calculate start time.\n var tfhd = findBox_1(trafBoxes[0], ['tfhd'])[0];\n var trun = findBox_1(trafBoxes[0], ['trun'])[0];\n var tfdt = findBox_1(trafBoxes[0], ['tfdt'])[0];\n\n if (tfhd) {\n var parsedTfhd = parseTfhd(tfhd);\n trackId = parsedTfhd.trackId;\n }\n\n if (tfdt) {\n var parsedTfdt = parseTfdt(tfdt);\n baseMediaDecodeTime = parsedTfdt.baseMediaDecodeTime;\n }\n\n if (trun) {\n var parsedTrun = parseTrun(trun);\n\n if (parsedTrun.samples && parsedTrun.samples.length) {\n compositionTimeOffset = parsedTrun.samples[0].compositionTimeOffset || 0;\n }\n }\n } // Get timescale for this specific track. Assume a 90kHz clock if no timescale was\n // specified.\n\n\n var timescale = timescales[trackId] || 90e3; // return the composition start time, in seconds\n\n return (baseMediaDecodeTime + compositionTimeOffset) / timescale;\n };\n /**\n * Find the trackIds of the video tracks in this source.\n * Found by parsing the Handler Reference and Track Header Boxes:\n * moov > trak > mdia > hdlr\n * moov > trak > tkhd\n *\n * @param {Uint8Array} init - The bytes of the init segment for this source\n * @return {Number[]} A list of trackIds\n *\n * @see ISO-BMFF-12/2015, Section 8.4.3\n **/\n\n\n getVideoTrackIds = function getVideoTrackIds(init) {\n var traks = findBox_1(init, ['moov', 'trak']);\n var videoTrackIds = [];\n traks.forEach(function (trak) {\n var hdlrs = findBox_1(trak, ['mdia', 'hdlr']);\n var tkhds = findBox_1(trak, ['tkhd']);\n hdlrs.forEach(function (hdlr, index) {\n var handlerType = parseType_1(hdlr.subarray(8, 12));\n var tkhd = tkhds[index];\n var view;\n var version;\n var trackId;\n\n if (handlerType === 'vide') {\n view = new DataView(tkhd.buffer, tkhd.byteOffset, tkhd.byteLength);\n version = view.getUint8(0);\n trackId = version === 0 ? view.getUint32(12) : view.getUint32(20);\n videoTrackIds.push(trackId);\n }\n });\n });\n return videoTrackIds;\n };\n\n getTimescaleFromMediaHeader = function getTimescaleFromMediaHeader(mdhd) {\n // mdhd is a FullBox, meaning it will have its own version as the first byte\n var version = mdhd[0];\n var index = version === 0 ? 12 : 20;\n return toUnsigned$3(mdhd[index] << 24 | mdhd[index + 1] << 16 | mdhd[index + 2] << 8 | mdhd[index + 3]);\n };\n /**\n * Get all the video, audio, and hint tracks from a non fragmented\n * mp4 segment\n */\n\n\n getTracks = function getTracks(init) {\n var traks = findBox_1(init, ['moov', 'trak']);\n var tracks = [];\n traks.forEach(function (trak) {\n var track = {};\n var tkhd = findBox_1(trak, ['tkhd'])[0];\n var view, tkhdVersion; // id\n\n if (tkhd) {\n view = new DataView(tkhd.buffer, tkhd.byteOffset, tkhd.byteLength);\n tkhdVersion = view.getUint8(0);\n track.id = tkhdVersion === 0 ? view.getUint32(12) : view.getUint32(20);\n }\n\n var hdlr = findBox_1(trak, ['mdia', 'hdlr'])[0]; // type\n\n if (hdlr) {\n var type = parseType_1(hdlr.subarray(8, 12));\n\n if (type === 'vide') {\n track.type = 'video';\n } else if (type === 'soun') {\n track.type = 'audio';\n } else {\n track.type = type;\n }\n } // codec\n\n\n var stsd = findBox_1(trak, ['mdia', 'minf', 'stbl', 'stsd'])[0];\n\n if (stsd) {\n var sampleDescriptions = stsd.subarray(8); // gives the codec type string\n\n track.codec = parseType_1(sampleDescriptions.subarray(4, 8));\n var codecBox = findBox_1(sampleDescriptions, [track.codec])[0];\n var codecConfig, codecConfigType;\n\n if (codecBox) {\n // https://tools.ietf.org/html/rfc6381#section-3.3\n if (/^[a-z]vc[1-9]$/i.test(track.codec)) {\n // we don't need anything but the \"config\" parameter of the\n // avc1 codecBox\n codecConfig = codecBox.subarray(78);\n codecConfigType = parseType_1(codecConfig.subarray(4, 8));\n\n if (codecConfigType === 'avcC' && codecConfig.length > 11) {\n track.codec += '.'; // left padded with zeroes for single digit hex\n // profile idc\n\n track.codec += toHexString$1(codecConfig[9]); // the byte containing the constraint_set flags\n\n track.codec += toHexString$1(codecConfig[10]); // level idc\n\n track.codec += toHexString$1(codecConfig[11]);\n } else {\n // TODO: show a warning that we couldn't parse the codec\n // and are using the default\n track.codec = 'avc1.4d400d';\n }\n } else if (/^mp4[a,v]$/i.test(track.codec)) {\n // we do not need anything but the streamDescriptor of the mp4a codecBox\n codecConfig = codecBox.subarray(28);\n codecConfigType = parseType_1(codecConfig.subarray(4, 8));\n\n if (codecConfigType === 'esds' && codecConfig.length > 20 && codecConfig[19] !== 0) {\n track.codec += '.' + toHexString$1(codecConfig[19]); // this value is only a single digit\n\n track.codec += '.' + toHexString$1(codecConfig[20] >>> 2 & 0x3f).replace(/^0/, '');\n } else {\n // TODO: show a warning that we couldn't parse the codec\n // and are using the default\n track.codec = 'mp4a.40.2';\n }\n } else {\n // flac, opus, etc\n track.codec = track.codec.toLowerCase();\n }\n }\n }\n\n var mdhd = findBox_1(trak, ['mdia', 'mdhd'])[0];\n\n if (mdhd) {\n track.timescale = getTimescaleFromMediaHeader(mdhd);\n }\n\n tracks.push(track);\n });\n return tracks;\n };\n\n var probe = {\n // export mp4 inspector's findBox and parseType for backwards compatibility\n findBox: findBox_1,\n parseType: parseType_1,\n timescale: timescale,\n startTime: startTime,\n compositionStartTime: compositionStartTime,\n videoTrackIds: getVideoTrackIds,\n tracks: getTracks,\n getTimescaleFromMediaHeader: getTimescaleFromMediaHeader\n };\n\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n */\n // Convert an array of nal units into an array of frames with each frame being\n // composed of the nal units that make up that frame\n // Also keep track of cummulative data about the frame from the nal units such\n // as the frame duration, starting pts, etc.\n var groupNalsIntoFrames = function groupNalsIntoFrames(nalUnits) {\n var i,\n currentNal,\n currentFrame = [],\n frames = []; // TODO added for LHLS, make sure this is OK\n\n frames.byteLength = 0;\n frames.nalCount = 0;\n frames.duration = 0;\n currentFrame.byteLength = 0;\n\n for (i = 0; i < nalUnits.length; i++) {\n currentNal = nalUnits[i]; // Split on 'aud'-type nal units\n\n if (currentNal.nalUnitType === 'access_unit_delimiter_rbsp') {\n // Since the very first nal unit is expected to be an AUD\n // only push to the frames array when currentFrame is not empty\n if (currentFrame.length) {\n currentFrame.duration = currentNal.dts - currentFrame.dts; // TODO added for LHLS, make sure this is OK\n\n frames.byteLength += currentFrame.byteLength;\n frames.nalCount += currentFrame.length;\n frames.duration += currentFrame.duration;\n frames.push(currentFrame);\n }\n\n currentFrame = [currentNal];\n currentFrame.byteLength = currentNal.data.byteLength;\n currentFrame.pts = currentNal.pts;\n currentFrame.dts = currentNal.dts;\n } else {\n // Specifically flag key frames for ease of use later\n if (currentNal.nalUnitType === 'slice_layer_without_partitioning_rbsp_idr') {\n currentFrame.keyFrame = true;\n }\n\n currentFrame.duration = currentNal.dts - currentFrame.dts;\n currentFrame.byteLength += currentNal.data.byteLength;\n currentFrame.push(currentNal);\n }\n } // For the last frame, use the duration of the previous frame if we\n // have nothing better to go on\n\n\n if (frames.length && (!currentFrame.duration || currentFrame.duration <= 0)) {\n currentFrame.duration = frames[frames.length - 1].duration;\n } // Push the final frame\n // TODO added for LHLS, make sure this is OK\n\n\n frames.byteLength += currentFrame.byteLength;\n frames.nalCount += currentFrame.length;\n frames.duration += currentFrame.duration;\n frames.push(currentFrame);\n return frames;\n }; // Convert an array of frames into an array of Gop with each Gop being composed\n // of the frames that make up that Gop\n // Also keep track of cummulative data about the Gop from the frames such as the\n // Gop duration, starting pts, etc.\n\n\n var groupFramesIntoGops = function groupFramesIntoGops(frames) {\n var i,\n currentFrame,\n currentGop = [],\n gops = []; // We must pre-set some of the values on the Gop since we\n // keep running totals of these values\n\n currentGop.byteLength = 0;\n currentGop.nalCount = 0;\n currentGop.duration = 0;\n currentGop.pts = frames[0].pts;\n currentGop.dts = frames[0].dts; // store some metadata about all the Gops\n\n gops.byteLength = 0;\n gops.nalCount = 0;\n gops.duration = 0;\n gops.pts = frames[0].pts;\n gops.dts = frames[0].dts;\n\n for (i = 0; i < frames.length; i++) {\n currentFrame = frames[i];\n\n if (currentFrame.keyFrame) {\n // Since the very first frame is expected to be an keyframe\n // only push to the gops array when currentGop is not empty\n if (currentGop.length) {\n gops.push(currentGop);\n gops.byteLength += currentGop.byteLength;\n gops.nalCount += currentGop.nalCount;\n gops.duration += currentGop.duration;\n }\n\n currentGop = [currentFrame];\n currentGop.nalCount = currentFrame.length;\n currentGop.byteLength = currentFrame.byteLength;\n currentGop.pts = currentFrame.pts;\n currentGop.dts = currentFrame.dts;\n currentGop.duration = currentFrame.duration;\n } else {\n currentGop.duration += currentFrame.duration;\n currentGop.nalCount += currentFrame.length;\n currentGop.byteLength += currentFrame.byteLength;\n currentGop.push(currentFrame);\n }\n }\n\n if (gops.length && currentGop.duration <= 0) {\n currentGop.duration = gops[gops.length - 1].duration;\n }\n\n gops.byteLength += currentGop.byteLength;\n gops.nalCount += currentGop.nalCount;\n gops.duration += currentGop.duration; // push the final Gop\n\n gops.push(currentGop);\n return gops;\n };\n /*\n * Search for the first keyframe in the GOPs and throw away all frames\n * until that keyframe. Then extend the duration of the pulled keyframe\n * and pull the PTS and DTS of the keyframe so that it covers the time\n * range of the frames that were disposed.\n *\n * @param {Array} gops video GOPs\n * @returns {Array} modified video GOPs\n */\n\n\n var extendFirstKeyFrame = function extendFirstKeyFrame(gops) {\n var currentGop;\n\n if (!gops[0][0].keyFrame && gops.length > 1) {\n // Remove the first GOP\n currentGop = gops.shift();\n gops.byteLength -= currentGop.byteLength;\n gops.nalCount -= currentGop.nalCount; // Extend the first frame of what is now the\n // first gop to cover the time period of the\n // frames we just removed\n\n gops[0][0].dts = currentGop.dts;\n gops[0][0].pts = currentGop.pts;\n gops[0][0].duration += currentGop.duration;\n }\n\n return gops;\n };\n /**\n * Default sample object\n * see ISO/IEC 14496-12:2012, section 8.6.4.3\n */\n\n\n var createDefaultSample = function createDefaultSample() {\n return {\n size: 0,\n flags: {\n isLeading: 0,\n dependsOn: 1,\n isDependedOn: 0,\n hasRedundancy: 0,\n degradationPriority: 0,\n isNonSyncSample: 1\n }\n };\n };\n /*\n * Collates information from a video frame into an object for eventual\n * entry into an MP4 sample table.\n *\n * @param {Object} frame the video frame\n * @param {Number} dataOffset the byte offset to position the sample\n * @return {Object} object containing sample table info for a frame\n */\n\n\n var sampleForFrame = function sampleForFrame(frame, dataOffset) {\n var sample = createDefaultSample();\n sample.dataOffset = dataOffset;\n sample.compositionTimeOffset = frame.pts - frame.dts;\n sample.duration = frame.duration;\n sample.size = 4 * frame.length; // Space for nal unit size\n\n sample.size += frame.byteLength;\n\n if (frame.keyFrame) {\n sample.flags.dependsOn = 2;\n sample.flags.isNonSyncSample = 0;\n }\n\n return sample;\n }; // generate the track's sample table from an array of gops\n\n\n var generateSampleTable = function generateSampleTable(gops, baseDataOffset) {\n var h,\n i,\n sample,\n currentGop,\n currentFrame,\n dataOffset = baseDataOffset || 0,\n samples = [];\n\n for (h = 0; h < gops.length; h++) {\n currentGop = gops[h];\n\n for (i = 0; i < currentGop.length; i++) {\n currentFrame = currentGop[i];\n sample = sampleForFrame(currentFrame, dataOffset);\n dataOffset += sample.size;\n samples.push(sample);\n }\n }\n\n return samples;\n }; // generate the track's raw mdat data from an array of gops\n\n\n var concatenateNalData = function concatenateNalData(gops) {\n var h,\n i,\n j,\n currentGop,\n currentFrame,\n currentNal,\n dataOffset = 0,\n nalsByteLength = gops.byteLength,\n numberOfNals = gops.nalCount,\n totalByteLength = nalsByteLength + 4 * numberOfNals,\n data = new Uint8Array(totalByteLength),\n view = new DataView(data.buffer); // For each Gop..\n\n for (h = 0; h < gops.length; h++) {\n currentGop = gops[h]; // For each Frame..\n\n for (i = 0; i < currentGop.length; i++) {\n currentFrame = currentGop[i]; // For each NAL..\n\n for (j = 0; j < currentFrame.length; j++) {\n currentNal = currentFrame[j];\n view.setUint32(dataOffset, currentNal.data.byteLength);\n dataOffset += 4;\n data.set(currentNal.data, dataOffset);\n dataOffset += currentNal.data.byteLength;\n }\n }\n }\n\n return data;\n }; // generate the track's sample table from a frame\n\n\n var generateSampleTableForFrame = function generateSampleTableForFrame(frame, baseDataOffset) {\n var sample,\n dataOffset = baseDataOffset || 0,\n samples = [];\n sample = sampleForFrame(frame, dataOffset);\n samples.push(sample);\n return samples;\n }; // generate the track's raw mdat data from a frame\n\n\n var concatenateNalDataForFrame = function concatenateNalDataForFrame(frame) {\n var i,\n currentNal,\n dataOffset = 0,\n nalsByteLength = frame.byteLength,\n numberOfNals = frame.length,\n totalByteLength = nalsByteLength + 4 * numberOfNals,\n data = new Uint8Array(totalByteLength),\n view = new DataView(data.buffer); // For each NAL..\n\n for (i = 0; i < frame.length; i++) {\n currentNal = frame[i];\n view.setUint32(dataOffset, currentNal.data.byteLength);\n dataOffset += 4;\n data.set(currentNal.data, dataOffset);\n dataOffset += currentNal.data.byteLength;\n }\n\n return data;\n };\n\n var frameUtils = {\n groupNalsIntoFrames: groupNalsIntoFrames,\n groupFramesIntoGops: groupFramesIntoGops,\n extendFirstKeyFrame: extendFirstKeyFrame,\n generateSampleTable: generateSampleTable,\n concatenateNalData: concatenateNalData,\n generateSampleTableForFrame: generateSampleTableForFrame,\n concatenateNalDataForFrame: concatenateNalDataForFrame\n };\n\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n */\n var highPrefix = [33, 16, 5, 32, 164, 27];\n var lowPrefix = [33, 65, 108, 84, 1, 2, 4, 8, 168, 2, 4, 8, 17, 191, 252];\n\n var zeroFill = function zeroFill(count) {\n var a = [];\n\n while (count--) {\n a.push(0);\n }\n\n return a;\n };\n\n var makeTable = function makeTable(metaTable) {\n return Object.keys(metaTable).reduce(function (obj, key) {\n obj[key] = new Uint8Array(metaTable[key].reduce(function (arr, part) {\n return arr.concat(part);\n }, []));\n return obj;\n }, {});\n };\n\n var silence;\n\n var silence_1 = function silence_1() {\n if (!silence) {\n // Frames-of-silence to use for filling in missing AAC frames\n var coneOfSilence = {\n 96000: [highPrefix, [227, 64], zeroFill(154), [56]],\n 88200: [highPrefix, [231], zeroFill(170), [56]],\n 64000: [highPrefix, [248, 192], zeroFill(240), [56]],\n 48000: [highPrefix, [255, 192], zeroFill(268), [55, 148, 128], zeroFill(54), [112]],\n 44100: [highPrefix, [255, 192], zeroFill(268), [55, 163, 128], zeroFill(84), [112]],\n 32000: [highPrefix, [255, 192], zeroFill(268), [55, 234], zeroFill(226), [112]],\n 24000: [highPrefix, [255, 192], zeroFill(268), [55, 255, 128], zeroFill(268), [111, 112], zeroFill(126), [224]],\n 16000: [highPrefix, [255, 192], zeroFill(268), [55, 255, 128], zeroFill(268), [111, 255], zeroFill(269), [223, 108], zeroFill(195), [1, 192]],\n 12000: [lowPrefix, zeroFill(268), [3, 127, 248], zeroFill(268), [6, 255, 240], zeroFill(268), [13, 255, 224], zeroFill(268), [27, 253, 128], zeroFill(259), [56]],\n 11025: [lowPrefix, zeroFill(268), [3, 127, 248], zeroFill(268), [6, 255, 240], zeroFill(268), [13, 255, 224], zeroFill(268), [27, 255, 192], zeroFill(268), [55, 175, 128], zeroFill(108), [112]],\n 8000: [lowPrefix, zeroFill(268), [3, 121, 16], zeroFill(47), [7]]\n };\n silence = makeTable(coneOfSilence);\n }\n\n return silence;\n };\n\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n */\n\n /**\n * Sum the `byteLength` properties of the data in each AAC frame\n */\n\n var sumFrameByteLengths = function sumFrameByteLengths(array) {\n var i,\n currentObj,\n sum = 0; // sum the byteLength's all each nal unit in the frame\n\n for (i = 0; i < array.length; i++) {\n currentObj = array[i];\n sum += currentObj.data.byteLength;\n }\n\n return sum;\n }; // Possibly pad (prefix) the audio track with silence if appending this track\n // would lead to the introduction of a gap in the audio buffer\n\n\n var prefixWithSilence = function prefixWithSilence(track, frames, audioAppendStartTs, videoBaseMediaDecodeTime) {\n var baseMediaDecodeTimeTs,\n frameDuration = 0,\n audioGapDuration = 0,\n audioFillFrameCount = 0,\n audioFillDuration = 0,\n silentFrame,\n i,\n firstFrame;\n\n if (!frames.length) {\n return;\n }\n\n baseMediaDecodeTimeTs = clock.audioTsToVideoTs(track.baseMediaDecodeTime, track.samplerate); // determine frame clock duration based on sample rate, round up to avoid overfills\n\n frameDuration = Math.ceil(clock.ONE_SECOND_IN_TS / (track.samplerate / 1024));\n\n if (audioAppendStartTs && videoBaseMediaDecodeTime) {\n // insert the shortest possible amount (audio gap or audio to video gap)\n audioGapDuration = baseMediaDecodeTimeTs - Math.max(audioAppendStartTs, videoBaseMediaDecodeTime); // number of full frames in the audio gap\n\n audioFillFrameCount = Math.floor(audioGapDuration / frameDuration);\n audioFillDuration = audioFillFrameCount * frameDuration;\n } // don't attempt to fill gaps smaller than a single frame or larger\n // than a half second\n\n\n if (audioFillFrameCount < 1 || audioFillDuration > clock.ONE_SECOND_IN_TS / 2) {\n return;\n }\n\n silentFrame = silence_1()[track.samplerate];\n\n if (!silentFrame) {\n // we don't have a silent frame pregenerated for the sample rate, so use a frame\n // from the content instead\n silentFrame = frames[0].data;\n }\n\n for (i = 0; i < audioFillFrameCount; i++) {\n firstFrame = frames[0];\n frames.splice(0, 0, {\n data: silentFrame,\n dts: firstFrame.dts - frameDuration,\n pts: firstFrame.pts - frameDuration\n });\n }\n\n track.baseMediaDecodeTime -= Math.floor(clock.videoTsToAudioTs(audioFillDuration, track.samplerate));\n return audioFillDuration;\n }; // If the audio segment extends before the earliest allowed dts\n // value, remove AAC frames until starts at or after the earliest\n // allowed DTS so that we don't end up with a negative baseMedia-\n // DecodeTime for the audio track\n\n\n var trimAdtsFramesByEarliestDts = function trimAdtsFramesByEarliestDts(adtsFrames, track, earliestAllowedDts) {\n if (track.minSegmentDts >= earliestAllowedDts) {\n return adtsFrames;\n } // We will need to recalculate the earliest segment Dts\n\n\n track.minSegmentDts = Infinity;\n return adtsFrames.filter(function (currentFrame) {\n // If this is an allowed frame, keep it and record it's Dts\n if (currentFrame.dts >= earliestAllowedDts) {\n track.minSegmentDts = Math.min(track.minSegmentDts, currentFrame.dts);\n track.minSegmentPts = track.minSegmentDts;\n return true;\n } // Otherwise, discard it\n\n\n return false;\n });\n }; // generate the track's raw mdat data from an array of frames\n\n\n var generateSampleTable$1 = function generateSampleTable(frames) {\n var i,\n currentFrame,\n samples = [];\n\n for (i = 0; i < frames.length; i++) {\n currentFrame = frames[i];\n samples.push({\n size: currentFrame.data.byteLength,\n duration: 1024 // For AAC audio, all samples contain 1024 samples\n\n });\n }\n\n return samples;\n }; // generate the track's sample table from an array of frames\n\n\n var concatenateFrameData = function concatenateFrameData(frames) {\n var i,\n currentFrame,\n dataOffset = 0,\n data = new Uint8Array(sumFrameByteLengths(frames));\n\n for (i = 0; i < frames.length; i++) {\n currentFrame = frames[i];\n data.set(currentFrame.data, dataOffset);\n dataOffset += currentFrame.data.byteLength;\n }\n\n return data;\n };\n\n var audioFrameUtils = {\n prefixWithSilence: prefixWithSilence,\n trimAdtsFramesByEarliestDts: trimAdtsFramesByEarliestDts,\n generateSampleTable: generateSampleTable$1,\n concatenateFrameData: concatenateFrameData\n };\n\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n */\n\n var ONE_SECOND_IN_TS$2 = clock.ONE_SECOND_IN_TS;\n /**\n * Store information about the start and end of the track and the\n * duration for each frame/sample we process in order to calculate\n * the baseMediaDecodeTime\n */\n\n var collectDtsInfo = function collectDtsInfo(track, data) {\n if (typeof data.pts === 'number') {\n if (track.timelineStartInfo.pts === undefined) {\n track.timelineStartInfo.pts = data.pts;\n }\n\n if (track.minSegmentPts === undefined) {\n track.minSegmentPts = data.pts;\n } else {\n track.minSegmentPts = Math.min(track.minSegmentPts, data.pts);\n }\n\n if (track.maxSegmentPts === undefined) {\n track.maxSegmentPts = data.pts;\n } else {\n track.maxSegmentPts = Math.max(track.maxSegmentPts, data.pts);\n }\n }\n\n if (typeof data.dts === 'number') {\n if (track.timelineStartInfo.dts === undefined) {\n track.timelineStartInfo.dts = data.dts;\n }\n\n if (track.minSegmentDts === undefined) {\n track.minSegmentDts = data.dts;\n } else {\n track.minSegmentDts = Math.min(track.minSegmentDts, data.dts);\n }\n\n if (track.maxSegmentDts === undefined) {\n track.maxSegmentDts = data.dts;\n } else {\n track.maxSegmentDts = Math.max(track.maxSegmentDts, data.dts);\n }\n }\n };\n /**\n * Clear values used to calculate the baseMediaDecodeTime between\n * tracks\n */\n\n\n var clearDtsInfo = function clearDtsInfo(track) {\n delete track.minSegmentDts;\n delete track.maxSegmentDts;\n delete track.minSegmentPts;\n delete track.maxSegmentPts;\n };\n /**\n * Calculate the track's baseMediaDecodeTime based on the earliest\n * DTS the transmuxer has ever seen and the minimum DTS for the\n * current track\n * @param track {object} track metadata configuration\n * @param keepOriginalTimestamps {boolean} If true, keep the timestamps\n * in the source; false to adjust the first segment to start at 0.\n */\n\n\n var calculateTrackBaseMediaDecodeTime = function calculateTrackBaseMediaDecodeTime(track, keepOriginalTimestamps) {\n var baseMediaDecodeTime,\n scale,\n minSegmentDts = track.minSegmentDts; // Optionally adjust the time so the first segment starts at zero.\n\n if (!keepOriginalTimestamps) {\n minSegmentDts -= track.timelineStartInfo.dts;\n } // track.timelineStartInfo.baseMediaDecodeTime is the location, in time, where\n // we want the start of the first segment to be placed\n\n\n baseMediaDecodeTime = track.timelineStartInfo.baseMediaDecodeTime; // Add to that the distance this segment is from the very first\n\n baseMediaDecodeTime += minSegmentDts; // baseMediaDecodeTime must not become negative\n\n baseMediaDecodeTime = Math.max(0, baseMediaDecodeTime);\n\n if (track.type === 'audio') {\n // Audio has a different clock equal to the sampling_rate so we need to\n // scale the PTS values into the clock rate of the track\n scale = track.samplerate / ONE_SECOND_IN_TS$2;\n baseMediaDecodeTime *= scale;\n baseMediaDecodeTime = Math.floor(baseMediaDecodeTime);\n }\n\n return baseMediaDecodeTime;\n };\n\n var trackDecodeInfo = {\n clearDtsInfo: clearDtsInfo,\n calculateTrackBaseMediaDecodeTime: calculateTrackBaseMediaDecodeTime,\n collectDtsInfo: collectDtsInfo\n };\n\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n *\n * Reads in-band caption information from a video elementary\n * stream. Captions must follow the CEA-708 standard for injection\n * into an MPEG-2 transport streams.\n * @see https://en.wikipedia.org/wiki/CEA-708\n * @see https://www.gpo.gov/fdsys/pkg/CFR-2007-title47-vol1/pdf/CFR-2007-title47-vol1-sec15-119.pdf\n */\n // payload type field to indicate how they are to be\n // interpreted. CEAS-708 caption content is always transmitted with\n // payload type 0x04.\n\n var USER_DATA_REGISTERED_ITU_T_T35 = 4,\n RBSP_TRAILING_BITS = 128;\n /**\n * Parse a supplemental enhancement information (SEI) NAL unit.\n * Stops parsing once a message of type ITU T T35 has been found.\n *\n * @param bytes {Uint8Array} the bytes of a SEI NAL unit\n * @return {object} the parsed SEI payload\n * @see Rec. ITU-T H.264, 7.3.2.3.1\n */\n\n var parseSei = function parseSei(bytes) {\n var i = 0,\n result = {\n payloadType: -1,\n payloadSize: 0\n },\n payloadType = 0,\n payloadSize = 0; // go through the sei_rbsp parsing each each individual sei_message\n\n while (i < bytes.byteLength) {\n // stop once we have hit the end of the sei_rbsp\n if (bytes[i] === RBSP_TRAILING_BITS) {\n break;\n } // Parse payload type\n\n\n while (bytes[i] === 0xFF) {\n payloadType += 255;\n i++;\n }\n\n payloadType += bytes[i++]; // Parse payload size\n\n while (bytes[i] === 0xFF) {\n payloadSize += 255;\n i++;\n }\n\n payloadSize += bytes[i++]; // this sei_message is a 608/708 caption so save it and break\n // there can only ever be one caption message in a frame's sei\n\n if (!result.payload && payloadType === USER_DATA_REGISTERED_ITU_T_T35) {\n var userIdentifier = String.fromCharCode(bytes[i + 3], bytes[i + 4], bytes[i + 5], bytes[i + 6]);\n\n if (userIdentifier === 'GA94') {\n result.payloadType = payloadType;\n result.payloadSize = payloadSize;\n result.payload = bytes.subarray(i, i + payloadSize);\n break;\n } else {\n result.payload = void 0;\n }\n } // skip the payload and parse the next message\n\n\n i += payloadSize;\n payloadType = 0;\n payloadSize = 0;\n }\n\n return result;\n }; // see ANSI/SCTE 128-1 (2013), section 8.1\n\n\n var parseUserData = function parseUserData(sei) {\n // itu_t_t35_contry_code must be 181 (United States) for\n // captions\n if (sei.payload[0] !== 181) {\n return null;\n } // itu_t_t35_provider_code should be 49 (ATSC) for captions\n\n\n if ((sei.payload[1] << 8 | sei.payload[2]) !== 49) {\n return null;\n } // the user_identifier should be \"GA94\" to indicate ATSC1 data\n\n\n if (String.fromCharCode(sei.payload[3], sei.payload[4], sei.payload[5], sei.payload[6]) !== 'GA94') {\n return null;\n } // finally, user_data_type_code should be 0x03 for caption data\n\n\n if (sei.payload[7] !== 0x03) {\n return null;\n } // return the user_data_type_structure and strip the trailing\n // marker bits\n\n\n return sei.payload.subarray(8, sei.payload.length - 1);\n }; // see CEA-708-D, section 4.4\n\n\n var parseCaptionPackets = function parseCaptionPackets(pts, userData) {\n var results = [],\n i,\n count,\n offset,\n data; // if this is just filler, return immediately\n\n if (!(userData[0] & 0x40)) {\n return results;\n } // parse out the cc_data_1 and cc_data_2 fields\n\n\n count = userData[0] & 0x1f;\n\n for (i = 0; i < count; i++) {\n offset = i * 3;\n data = {\n type: userData[offset + 2] & 0x03,\n pts: pts\n }; // capture cc data when cc_valid is 1\n\n if (userData[offset + 2] & 0x04) {\n data.ccData = userData[offset + 3] << 8 | userData[offset + 4];\n results.push(data);\n }\n }\n\n return results;\n };\n\n var discardEmulationPreventionBytes = function discardEmulationPreventionBytes(data) {\n var length = data.byteLength,\n emulationPreventionBytesPositions = [],\n i = 1,\n newLength,\n newData; // Find all `Emulation Prevention Bytes`\n\n while (i < length - 2) {\n if (data[i] === 0 && data[i + 1] === 0 && data[i + 2] === 0x03) {\n emulationPreventionBytesPositions.push(i + 2);\n i += 2;\n } else {\n i++;\n }\n } // If no Emulation Prevention Bytes were found just return the original\n // array\n\n\n if (emulationPreventionBytesPositions.length === 0) {\n return data;\n } // Create a new array to hold the NAL unit data\n\n\n newLength = length - emulationPreventionBytesPositions.length;\n newData = new Uint8Array(newLength);\n var sourceIndex = 0;\n\n for (i = 0; i < newLength; sourceIndex++, i++) {\n if (sourceIndex === emulationPreventionBytesPositions[0]) {\n // Skip this byte\n sourceIndex++; // Remove this position index\n\n emulationPreventionBytesPositions.shift();\n }\n\n newData[i] = data[sourceIndex];\n }\n\n return newData;\n }; // exports\n\n\n var captionPacketParser = {\n parseSei: parseSei,\n parseUserData: parseUserData,\n parseCaptionPackets: parseCaptionPackets,\n discardEmulationPreventionBytes: discardEmulationPreventionBytes,\n USER_DATA_REGISTERED_ITU_T_T35: USER_DATA_REGISTERED_ITU_T_T35\n };\n\n // Link To Transport\n // -----------------\n\n\n var CaptionStream = function CaptionStream(options) {\n options = options || {};\n CaptionStream.prototype.init.call(this); // parse708captions flag, default to true\n\n this.parse708captions_ = typeof options.parse708captions === 'boolean' ? options.parse708captions : true;\n this.captionPackets_ = [];\n this.ccStreams_ = [new Cea608Stream(0, 0), // eslint-disable-line no-use-before-define\n new Cea608Stream(0, 1), // eslint-disable-line no-use-before-define\n new Cea608Stream(1, 0), // eslint-disable-line no-use-before-define\n new Cea608Stream(1, 1) // eslint-disable-line no-use-before-define\n ];\n\n if (this.parse708captions_) {\n this.cc708Stream_ = new Cea708Stream(); // eslint-disable-line no-use-before-define\n }\n\n this.reset(); // forward data and done events from CCs to this CaptionStream\n\n this.ccStreams_.forEach(function (cc) {\n cc.on('data', this.trigger.bind(this, 'data'));\n cc.on('partialdone', this.trigger.bind(this, 'partialdone'));\n cc.on('done', this.trigger.bind(this, 'done'));\n }, this);\n\n if (this.parse708captions_) {\n this.cc708Stream_.on('data', this.trigger.bind(this, 'data'));\n this.cc708Stream_.on('partialdone', this.trigger.bind(this, 'partialdone'));\n this.cc708Stream_.on('done', this.trigger.bind(this, 'done'));\n }\n };\n\n CaptionStream.prototype = new stream();\n\n CaptionStream.prototype.push = function (event) {\n var sei, userData, newCaptionPackets; // only examine SEI NALs\n\n if (event.nalUnitType !== 'sei_rbsp') {\n return;\n } // parse the sei\n\n\n sei = captionPacketParser.parseSei(event.escapedRBSP); // no payload data, skip\n\n if (!sei.payload) {\n return;\n } // ignore everything but user_data_registered_itu_t_t35\n\n\n if (sei.payloadType !== captionPacketParser.USER_DATA_REGISTERED_ITU_T_T35) {\n return;\n } // parse out the user data payload\n\n\n userData = captionPacketParser.parseUserData(sei); // ignore unrecognized userData\n\n if (!userData) {\n return;\n } // Sometimes, the same segment # will be downloaded twice. To stop the\n // caption data from being processed twice, we track the latest dts we've\n // received and ignore everything with a dts before that. However, since\n // data for a specific dts can be split across packets on either side of\n // a segment boundary, we need to make sure we *don't* ignore the packets\n // from the *next* segment that have dts === this.latestDts_. By constantly\n // tracking the number of packets received with dts === this.latestDts_, we\n // know how many should be ignored once we start receiving duplicates.\n\n\n if (event.dts < this.latestDts_) {\n // We've started getting older data, so set the flag.\n this.ignoreNextEqualDts_ = true;\n return;\n } else if (event.dts === this.latestDts_ && this.ignoreNextEqualDts_) {\n this.numSameDts_--;\n\n if (!this.numSameDts_) {\n // We've received the last duplicate packet, time to start processing again\n this.ignoreNextEqualDts_ = false;\n }\n\n return;\n } // parse out CC data packets and save them for later\n\n\n newCaptionPackets = captionPacketParser.parseCaptionPackets(event.pts, userData);\n this.captionPackets_ = this.captionPackets_.concat(newCaptionPackets);\n\n if (this.latestDts_ !== event.dts) {\n this.numSameDts_ = 0;\n }\n\n this.numSameDts_++;\n this.latestDts_ = event.dts;\n };\n\n CaptionStream.prototype.flushCCStreams = function (flushType) {\n this.ccStreams_.forEach(function (cc) {\n return flushType === 'flush' ? cc.flush() : cc.partialFlush();\n }, this);\n };\n\n CaptionStream.prototype.flushStream = function (flushType) {\n // make sure we actually parsed captions before proceeding\n if (!this.captionPackets_.length) {\n this.flushCCStreams(flushType);\n return;\n } // In Chrome, the Array#sort function is not stable so add a\n // presortIndex that we can use to ensure we get a stable-sort\n\n\n this.captionPackets_.forEach(function (elem, idx) {\n elem.presortIndex = idx;\n }); // sort caption byte-pairs based on their PTS values\n\n this.captionPackets_.sort(function (a, b) {\n if (a.pts === b.pts) {\n return a.presortIndex - b.presortIndex;\n }\n\n return a.pts - b.pts;\n });\n this.captionPackets_.forEach(function (packet) {\n if (packet.type < 2) {\n // Dispatch packet to the right Cea608Stream\n this.dispatchCea608Packet(packet);\n } else {\n // Dispatch packet to the Cea708Stream\n this.dispatchCea708Packet(packet);\n }\n }, this);\n this.captionPackets_.length = 0;\n this.flushCCStreams(flushType);\n };\n\n CaptionStream.prototype.flush = function () {\n return this.flushStream('flush');\n }; // Only called if handling partial data\n\n\n CaptionStream.prototype.partialFlush = function () {\n return this.flushStream('partialFlush');\n };\n\n CaptionStream.prototype.reset = function () {\n this.latestDts_ = null;\n this.ignoreNextEqualDts_ = false;\n this.numSameDts_ = 0;\n this.activeCea608Channel_ = [null, null];\n this.ccStreams_.forEach(function (ccStream) {\n ccStream.reset();\n });\n }; // From the CEA-608 spec:\n\n /*\n * When XDS sub-packets are interleaved with other services, the end of each sub-packet shall be followed\n * by a control pair to change to a different service. When any of the control codes from 0x10 to 0x1F is\n * used to begin a control code pair, it indicates the return to captioning or Text data. The control code pair\n * and subsequent data should then be processed according to the FCC rules. It may be necessary for the\n * line 21 data encoder to automatically insert a control code pair (i.e. RCL, RU2, RU3, RU4, RDC, or RTD)\n * to switch to captioning or Text.\n */\n // With that in mind, we ignore any data between an XDS control code and a\n // subsequent closed-captioning control code.\n\n\n CaptionStream.prototype.dispatchCea608Packet = function (packet) {\n // NOTE: packet.type is the CEA608 field\n if (this.setsTextOrXDSActive(packet)) {\n this.activeCea608Channel_[packet.type] = null;\n } else if (this.setsChannel1Active(packet)) {\n this.activeCea608Channel_[packet.type] = 0;\n } else if (this.setsChannel2Active(packet)) {\n this.activeCea608Channel_[packet.type] = 1;\n }\n\n if (this.activeCea608Channel_[packet.type] === null) {\n // If we haven't received anything to set the active channel, or the\n // packets are Text/XDS data, discard the data; we don't want jumbled\n // captions\n return;\n }\n\n this.ccStreams_[(packet.type << 1) + this.activeCea608Channel_[packet.type]].push(packet);\n };\n\n CaptionStream.prototype.setsChannel1Active = function (packet) {\n return (packet.ccData & 0x7800) === 0x1000;\n };\n\n CaptionStream.prototype.setsChannel2Active = function (packet) {\n return (packet.ccData & 0x7800) === 0x1800;\n };\n\n CaptionStream.prototype.setsTextOrXDSActive = function (packet) {\n return (packet.ccData & 0x7100) === 0x0100 || (packet.ccData & 0x78fe) === 0x102a || (packet.ccData & 0x78fe) === 0x182a;\n };\n\n CaptionStream.prototype.dispatchCea708Packet = function (packet) {\n if (this.parse708captions_) {\n this.cc708Stream_.push(packet);\n }\n }; // ----------------------\n // Session to Application\n // ----------------------\n // This hash maps special and extended character codes to their\n // proper Unicode equivalent. The first one-byte key is just a\n // non-standard character code. The two-byte keys that follow are\n // the extended CEA708 character codes, along with the preceding\n // 0x10 extended character byte to distinguish these codes from\n // non-extended character codes. Every CEA708 character code that\n // is not in this object maps directly to a standard unicode\n // character code.\n // The transparent space and non-breaking transparent space are\n // technically not fully supported since there is no code to\n // make them transparent, so they have normal non-transparent\n // stand-ins.\n // The special closed caption (CC) character isn't a standard\n // unicode character, so a fairly similar unicode character was\n // chosen in it's place.\n\n\n var CHARACTER_TRANSLATION_708 = {\n 0x7f: 0x266a,\n // \u266A\n 0x1020: 0x20,\n // Transparent Space\n 0x1021: 0xa0,\n // Nob-breaking Transparent Space\n 0x1025: 0x2026,\n // \u2026\n 0x102a: 0x0160,\n // \u0160\n 0x102c: 0x0152,\n // \u0152\n 0x1030: 0x2588,\n // \u2588\n 0x1031: 0x2018,\n // \u2018\n 0x1032: 0x2019,\n // \u2019\n 0x1033: 0x201c,\n // \u201C\n 0x1034: 0x201d,\n // \u201D\n 0x1035: 0x2022,\n // \u2022\n 0x1039: 0x2122,\n // \u2122\n 0x103a: 0x0161,\n // \u0161\n 0x103c: 0x0153,\n // \u0153\n 0x103d: 0x2120,\n // \u2120\n 0x103f: 0x0178,\n // \u0178\n 0x1076: 0x215b,\n // \u215B\n 0x1077: 0x215c,\n // \u215C\n 0x1078: 0x215d,\n // \u215D\n 0x1079: 0x215e,\n // \u215E\n 0x107a: 0x23d0,\n // \u23D0\n 0x107b: 0x23a4,\n // \u23A4\n 0x107c: 0x23a3,\n // \u23A3\n 0x107d: 0x23af,\n // \u23AF\n 0x107e: 0x23a6,\n // \u23A6\n 0x107f: 0x23a1,\n // \u23A1\n 0x10a0: 0x3138 // \u3138 (CC char)\n\n };\n\n var get708CharFromCode = function get708CharFromCode(code) {\n var newCode = CHARACTER_TRANSLATION_708[code] || code;\n\n if (code & 0x1000 && code === newCode) {\n // Invalid extended code\n return '';\n }\n\n return String.fromCharCode(newCode);\n };\n\n var within708TextBlock = function within708TextBlock(b) {\n return 0x20 <= b && b <= 0x7f || 0xa0 <= b && b <= 0xff;\n };\n\n var Cea708Window = function Cea708Window(windowNum) {\n this.windowNum = windowNum;\n this.reset();\n };\n\n Cea708Window.prototype.reset = function () {\n this.clearText();\n this.pendingNewLine = false;\n this.winAttr = {};\n this.penAttr = {};\n this.penLoc = {};\n this.penColor = {}; // These default values are arbitrary,\n // defineWindow will usually override them\n\n this.visible = 0;\n this.rowLock = 0;\n this.columnLock = 0;\n this.priority = 0;\n this.relativePositioning = 0;\n this.anchorVertical = 0;\n this.anchorHorizontal = 0;\n this.anchorPoint = 0;\n this.rowCount = 1;\n this.virtualRowCount = this.rowCount + 1;\n this.columnCount = 41;\n this.windowStyle = 0;\n this.penStyle = 0;\n };\n\n Cea708Window.prototype.getText = function () {\n return this.rows.join('\\n');\n };\n\n Cea708Window.prototype.clearText = function () {\n this.rows = [''];\n this.rowIdx = 0;\n };\n\n Cea708Window.prototype.newLine = function (pts) {\n if (this.rows.length >= this.virtualRowCount && typeof this.beforeRowOverflow === 'function') {\n this.beforeRowOverflow(pts);\n }\n\n if (this.rows.length > 0) {\n this.rows.push('');\n this.rowIdx++;\n } // Show all virtual rows since there's no visible scrolling\n\n\n while (this.rows.length > this.virtualRowCount) {\n this.rows.shift();\n this.rowIdx--;\n }\n };\n\n Cea708Window.prototype.isEmpty = function () {\n if (this.rows.length === 0) {\n return true;\n } else if (this.rows.length === 1) {\n return this.rows[0] === '';\n }\n\n return false;\n };\n\n Cea708Window.prototype.addText = function (text) {\n this.rows[this.rowIdx] += text;\n };\n\n Cea708Window.prototype.backspace = function () {\n if (!this.isEmpty()) {\n var row = this.rows[this.rowIdx];\n this.rows[this.rowIdx] = row.substr(0, row.length - 1);\n }\n };\n\n var Cea708Service = function Cea708Service(serviceNum) {\n this.serviceNum = serviceNum;\n this.text = '';\n this.currentWindow = new Cea708Window(-1);\n this.windows = [];\n };\n /**\n * Initialize service windows\n * Must be run before service use\n *\n * @param {Integer} pts PTS value\n * @param {Function} beforeRowOverflow Function to execute before row overflow of a window\n */\n\n\n Cea708Service.prototype.init = function (pts, beforeRowOverflow) {\n this.startPts = pts;\n\n for (var win = 0; win < 8; win++) {\n this.windows[win] = new Cea708Window(win);\n\n if (typeof beforeRowOverflow === 'function') {\n this.windows[win].beforeRowOverflow = beforeRowOverflow;\n }\n }\n };\n /**\n * Set current window of service to be affected by commands\n *\n * @param {Integer} windowNum Window number\n */\n\n\n Cea708Service.prototype.setCurrentWindow = function (windowNum) {\n this.currentWindow = this.windows[windowNum];\n };\n\n var Cea708Stream = function Cea708Stream() {\n Cea708Stream.prototype.init.call(this);\n var self = this;\n this.current708Packet = null;\n this.services = {};\n\n this.push = function (packet) {\n if (packet.type === 3) {\n // 708 packet start\n self.new708Packet();\n self.add708Bytes(packet);\n } else {\n if (self.current708Packet === null) {\n // This should only happen at the start of a file if there's no packet start.\n self.new708Packet();\n }\n\n self.add708Bytes(packet);\n }\n };\n };\n\n Cea708Stream.prototype = new stream();\n /**\n * Push current 708 packet, create new 708 packet.\n */\n\n Cea708Stream.prototype.new708Packet = function () {\n if (this.current708Packet !== null) {\n this.push708Packet();\n }\n\n this.current708Packet = {\n data: [],\n ptsVals: []\n };\n };\n /**\n * Add pts and both bytes from packet into current 708 packet.\n */\n\n\n Cea708Stream.prototype.add708Bytes = function (packet) {\n var data = packet.ccData;\n var byte0 = data >>> 8;\n var byte1 = data & 0xff; // I would just keep a list of packets instead of bytes, but it isn't clear in the spec\n // that service blocks will always line up with byte pairs.\n\n this.current708Packet.ptsVals.push(packet.pts);\n this.current708Packet.data.push(byte0);\n this.current708Packet.data.push(byte1);\n };\n /**\n * Parse completed 708 packet into service blocks and push each service block.\n */\n\n\n Cea708Stream.prototype.push708Packet = function () {\n var packet708 = this.current708Packet;\n var packetData = packet708.data;\n var serviceNum = null;\n var blockSize = null;\n var i = 0;\n var b = packetData[i++];\n packet708.seq = b >> 6;\n packet708.sizeCode = b & 0x3f; // 0b00111111;\n\n for (; i < packetData.length; i++) {\n b = packetData[i++];\n serviceNum = b >> 5;\n blockSize = b & 0x1f; // 0b00011111\n\n if (serviceNum === 7 && blockSize > 0) {\n // Extended service num\n b = packetData[i++];\n serviceNum = b;\n }\n\n this.pushServiceBlock(serviceNum, i, blockSize);\n\n if (blockSize > 0) {\n i += blockSize - 1;\n }\n }\n };\n /**\n * Parse service block, execute commands, read text.\n *\n * Note: While many of these commands serve important purposes,\n * many others just parse out the parameters or attributes, but\n * nothing is done with them because this is not a full and complete\n * implementation of the entire 708 spec.\n *\n * @param {Integer} serviceNum Service number\n * @param {Integer} start Start index of the 708 packet data\n * @param {Integer} size Block size\n */\n\n\n Cea708Stream.prototype.pushServiceBlock = function (serviceNum, start, size) {\n var b;\n var i = start;\n var packetData = this.current708Packet.data;\n var service = this.services[serviceNum];\n\n if (!service) {\n service = this.initService(serviceNum, i);\n }\n\n for (; i < start + size && i < packetData.length; i++) {\n b = packetData[i];\n\n if (within708TextBlock(b)) {\n i = this.handleText(i, service);\n } else if (b === 0x10) {\n i = this.extendedCommands(i, service);\n } else if (0x80 <= b && b <= 0x87) {\n i = this.setCurrentWindow(i, service);\n } else if (0x98 <= b && b <= 0x9f) {\n i = this.defineWindow(i, service);\n } else if (b === 0x88) {\n i = this.clearWindows(i, service);\n } else if (b === 0x8c) {\n i = this.deleteWindows(i, service);\n } else if (b === 0x89) {\n i = this.displayWindows(i, service);\n } else if (b === 0x8a) {\n i = this.hideWindows(i, service);\n } else if (b === 0x8b) {\n i = this.toggleWindows(i, service);\n } else if (b === 0x97) {\n i = this.setWindowAttributes(i, service);\n } else if (b === 0x90) {\n i = this.setPenAttributes(i, service);\n } else if (b === 0x91) {\n i = this.setPenColor(i, service);\n } else if (b === 0x92) {\n i = this.setPenLocation(i, service);\n } else if (b === 0x8f) {\n service = this.reset(i, service);\n } else if (b === 0x08) {\n // BS: Backspace\n service.currentWindow.backspace();\n } else if (b === 0x0c) {\n // FF: Form feed\n service.currentWindow.clearText();\n } else if (b === 0x0d) {\n // CR: Carriage return\n service.currentWindow.pendingNewLine = true;\n } else if (b === 0x0e) {\n // HCR: Horizontal carriage return\n service.currentWindow.clearText();\n } else if (b === 0x8d) {\n // DLY: Delay, nothing to do\n i++;\n } else ;\n }\n };\n /**\n * Execute an extended command\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n\n Cea708Stream.prototype.extendedCommands = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[++i];\n\n if (within708TextBlock(b)) {\n i = this.handleText(i, service, true);\n }\n\n return i;\n };\n /**\n * Get PTS value of a given byte index\n *\n * @param {Integer} byteIndex Index of the byte\n * @return {Integer} PTS\n */\n\n\n Cea708Stream.prototype.getPts = function (byteIndex) {\n // There's 1 pts value per 2 bytes\n return this.current708Packet.ptsVals[Math.floor(byteIndex / 2)];\n };\n /**\n * Initializes a service\n *\n * @param {Integer} serviceNum Service number\n * @return {Service} Initialized service object\n */\n\n\n Cea708Stream.prototype.initService = function (serviceNum, i) {\n var self = this;\n this.services[serviceNum] = new Cea708Service(serviceNum);\n this.services[serviceNum].init(this.getPts(i), function (pts) {\n self.flushDisplayed(pts, self.services[serviceNum]);\n });\n return this.services[serviceNum];\n };\n /**\n * Execute text writing to current window\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n\n Cea708Stream.prototype.handleText = function (i, service, isExtended) {\n var packetData = this.current708Packet.data;\n var b = packetData[i];\n var extended = isExtended ? 0x1000 : 0x0000;\n var char = get708CharFromCode(extended | b);\n var win = service.currentWindow;\n\n if (win.pendingNewLine && !win.isEmpty()) {\n win.newLine(this.getPts(i));\n }\n\n win.pendingNewLine = false;\n win.addText(char);\n return i;\n };\n /**\n * Parse and execute the CW# command.\n *\n * Set the current window.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n\n Cea708Stream.prototype.setCurrentWindow = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[i];\n var windowNum = b & 0x07;\n service.setCurrentWindow(windowNum);\n return i;\n };\n /**\n * Parse and execute the DF# command.\n *\n * Define a window and set it as the current window.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n\n Cea708Stream.prototype.defineWindow = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[i];\n var windowNum = b & 0x07;\n service.setCurrentWindow(windowNum);\n var win = service.currentWindow;\n b = packetData[++i];\n win.visible = (b & 0x20) >> 5; // v\n\n win.rowLock = (b & 0x10) >> 4; // rl\n\n win.columnLock = (b & 0x08) >> 3; // cl\n\n win.priority = b & 0x07; // p\n\n b = packetData[++i];\n win.relativePositioning = (b & 0x80) >> 7; // rp\n\n win.anchorVertical = b & 0x7f; // av\n\n b = packetData[++i];\n win.anchorHorizontal = b; // ah\n\n b = packetData[++i];\n win.anchorPoint = (b & 0xf0) >> 4; // ap\n\n win.rowCount = b & 0x0f; // rc\n\n b = packetData[++i];\n win.columnCount = b & 0x3f; // cc\n\n b = packetData[++i];\n win.windowStyle = (b & 0x38) >> 3; // ws\n\n win.penStyle = b & 0x07; // ps\n // The spec says there are (rowCount+1) \"virtual rows\"\n\n win.virtualRowCount = win.rowCount + 1;\n return i;\n };\n /**\n * Parse and execute the SWA command.\n *\n * Set attributes of the current window.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n\n Cea708Stream.prototype.setWindowAttributes = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[i];\n var winAttr = service.currentWindow.winAttr;\n b = packetData[++i];\n winAttr.fillOpacity = (b & 0xc0) >> 6; // fo\n\n winAttr.fillRed = (b & 0x30) >> 4; // fr\n\n winAttr.fillGreen = (b & 0x0c) >> 2; // fg\n\n winAttr.fillBlue = b & 0x03; // fb\n\n b = packetData[++i];\n winAttr.borderType = (b & 0xc0) >> 6; // bt\n\n winAttr.borderRed = (b & 0x30) >> 4; // br\n\n winAttr.borderGreen = (b & 0x0c) >> 2; // bg\n\n winAttr.borderBlue = b & 0x03; // bb\n\n b = packetData[++i];\n winAttr.borderType += (b & 0x80) >> 5; // bt\n\n winAttr.wordWrap = (b & 0x40) >> 6; // ww\n\n winAttr.printDirection = (b & 0x30) >> 4; // pd\n\n winAttr.scrollDirection = (b & 0x0c) >> 2; // sd\n\n winAttr.justify = b & 0x03; // j\n\n b = packetData[++i];\n winAttr.effectSpeed = (b & 0xf0) >> 4; // es\n\n winAttr.effectDirection = (b & 0x0c) >> 2; // ed\n\n winAttr.displayEffect = b & 0x03; // de\n\n return i;\n };\n /**\n * Gather text from all displayed windows and push a caption to output.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n */\n\n\n Cea708Stream.prototype.flushDisplayed = function (pts, service) {\n var displayedText = []; // TODO: Positioning not supported, displaying multiple windows will not necessarily\n // display text in the correct order, but sample files so far have not shown any issue.\n\n for (var winId = 0; winId < 8; winId++) {\n if (service.windows[winId].visible && !service.windows[winId].isEmpty()) {\n displayedText.push(service.windows[winId].getText());\n }\n }\n\n service.endPts = pts;\n service.text = displayedText.join('\\n\\n');\n this.pushCaption(service);\n service.startPts = pts;\n };\n /**\n * Push a caption to output if the caption contains text.\n *\n * @param {Service} service The service object to be affected\n */\n\n\n Cea708Stream.prototype.pushCaption = function (service) {\n if (service.text !== '') {\n this.trigger('data', {\n startPts: service.startPts,\n endPts: service.endPts,\n text: service.text,\n stream: 'cc708_' + service.serviceNum\n });\n service.text = '';\n service.startPts = service.endPts;\n }\n };\n /**\n * Parse and execute the DSW command.\n *\n * Set visible property of windows based on the parsed bitmask.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n\n Cea708Stream.prototype.displayWindows = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[++i];\n var pts = this.getPts(i);\n this.flushDisplayed(pts, service);\n\n for (var winId = 0; winId < 8; winId++) {\n if (b & 0x01 << winId) {\n service.windows[winId].visible = 1;\n }\n }\n\n return i;\n };\n /**\n * Parse and execute the HDW command.\n *\n * Set visible property of windows based on the parsed bitmask.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n\n Cea708Stream.prototype.hideWindows = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[++i];\n var pts = this.getPts(i);\n this.flushDisplayed(pts, service);\n\n for (var winId = 0; winId < 8; winId++) {\n if (b & 0x01 << winId) {\n service.windows[winId].visible = 0;\n }\n }\n\n return i;\n };\n /**\n * Parse and execute the TGW command.\n *\n * Set visible property of windows based on the parsed bitmask.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n\n Cea708Stream.prototype.toggleWindows = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[++i];\n var pts = this.getPts(i);\n this.flushDisplayed(pts, service);\n\n for (var winId = 0; winId < 8; winId++) {\n if (b & 0x01 << winId) {\n service.windows[winId].visible ^= 1;\n }\n }\n\n return i;\n };\n /**\n * Parse and execute the CLW command.\n *\n * Clear text of windows based on the parsed bitmask.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n\n Cea708Stream.prototype.clearWindows = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[++i];\n var pts = this.getPts(i);\n this.flushDisplayed(pts, service);\n\n for (var winId = 0; winId < 8; winId++) {\n if (b & 0x01 << winId) {\n service.windows[winId].clearText();\n }\n }\n\n return i;\n };\n /**\n * Parse and execute the DLW command.\n *\n * Re-initialize windows based on the parsed bitmask.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n\n Cea708Stream.prototype.deleteWindows = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[++i];\n var pts = this.getPts(i);\n this.flushDisplayed(pts, service);\n\n for (var winId = 0; winId < 8; winId++) {\n if (b & 0x01 << winId) {\n service.windows[winId].reset();\n }\n }\n\n return i;\n };\n /**\n * Parse and execute the SPA command.\n *\n * Set pen attributes of the current window.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n\n Cea708Stream.prototype.setPenAttributes = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[i];\n var penAttr = service.currentWindow.penAttr;\n b = packetData[++i];\n penAttr.textTag = (b & 0xf0) >> 4; // tt\n\n penAttr.offset = (b & 0x0c) >> 2; // o\n\n penAttr.penSize = b & 0x03; // s\n\n b = packetData[++i];\n penAttr.italics = (b & 0x80) >> 7; // i\n\n penAttr.underline = (b & 0x40) >> 6; // u\n\n penAttr.edgeType = (b & 0x38) >> 3; // et\n\n penAttr.fontStyle = b & 0x07; // fs\n\n return i;\n };\n /**\n * Parse and execute the SPC command.\n *\n * Set pen color of the current window.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n\n Cea708Stream.prototype.setPenColor = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[i];\n var penColor = service.currentWindow.penColor;\n b = packetData[++i];\n penColor.fgOpacity = (b & 0xc0) >> 6; // fo\n\n penColor.fgRed = (b & 0x30) >> 4; // fr\n\n penColor.fgGreen = (b & 0x0c) >> 2; // fg\n\n penColor.fgBlue = b & 0x03; // fb\n\n b = packetData[++i];\n penColor.bgOpacity = (b & 0xc0) >> 6; // bo\n\n penColor.bgRed = (b & 0x30) >> 4; // br\n\n penColor.bgGreen = (b & 0x0c) >> 2; // bg\n\n penColor.bgBlue = b & 0x03; // bb\n\n b = packetData[++i];\n penColor.edgeRed = (b & 0x30) >> 4; // er\n\n penColor.edgeGreen = (b & 0x0c) >> 2; // eg\n\n penColor.edgeBlue = b & 0x03; // eb\n\n return i;\n };\n /**\n * Parse and execute the SPL command.\n *\n * Set pen location of the current window.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n\n Cea708Stream.prototype.setPenLocation = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[i];\n var penLoc = service.currentWindow.penLoc; // Positioning isn't really supported at the moment, so this essentially just inserts a linebreak\n\n service.currentWindow.pendingNewLine = true;\n b = packetData[++i];\n penLoc.row = b & 0x0f; // r\n\n b = packetData[++i];\n penLoc.column = b & 0x3f; // c\n\n return i;\n };\n /**\n * Execute the RST command.\n *\n * Reset service to a clean slate. Re-initialize.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Service} Re-initialized service\n */\n\n\n Cea708Stream.prototype.reset = function (i, service) {\n var pts = this.getPts(i);\n this.flushDisplayed(pts, service);\n return this.initService(service.serviceNum, i);\n }; // This hash maps non-ASCII, special, and extended character codes to their\n // proper Unicode equivalent. The first keys that are only a single byte\n // are the non-standard ASCII characters, which simply map the CEA608 byte\n // to the standard ASCII/Unicode. The two-byte keys that follow are the CEA608\n // character codes, but have their MSB bitmasked with 0x03 so that a lookup\n // can be performed regardless of the field and data channel on which the\n // character code was received.\n\n\n var CHARACTER_TRANSLATION = {\n 0x2a: 0xe1,\n // \u00E1\n 0x5c: 0xe9,\n // \u00E9\n 0x5e: 0xed,\n // \u00ED\n 0x5f: 0xf3,\n // \u00F3\n 0x60: 0xfa,\n // \u00FA\n 0x7b: 0xe7,\n // \u00E7\n 0x7c: 0xf7,\n // \u00F7\n 0x7d: 0xd1,\n // \u00D1\n 0x7e: 0xf1,\n // \u00F1\n 0x7f: 0x2588,\n // \u2588\n 0x0130: 0xae,\n // \u00AE\n 0x0131: 0xb0,\n // \u00B0\n 0x0132: 0xbd,\n // \u00BD\n 0x0133: 0xbf,\n // \u00BF\n 0x0134: 0x2122,\n // \u2122\n 0x0135: 0xa2,\n // \u00A2\n 0x0136: 0xa3,\n // \u00A3\n 0x0137: 0x266a,\n // \u266A\n 0x0138: 0xe0,\n // \u00E0\n 0x0139: 0xa0,\n //\n 0x013a: 0xe8,\n // \u00E8\n 0x013b: 0xe2,\n // \u00E2\n 0x013c: 0xea,\n // \u00EA\n 0x013d: 0xee,\n // \u00EE\n 0x013e: 0xf4,\n // \u00F4\n 0x013f: 0xfb,\n // \u00FB\n 0x0220: 0xc1,\n // \u00C1\n 0x0221: 0xc9,\n // \u00C9\n 0x0222: 0xd3,\n // \u00D3\n 0x0223: 0xda,\n // \u00DA\n 0x0224: 0xdc,\n // \u00DC\n 0x0225: 0xfc,\n // \u00FC\n 0x0226: 0x2018,\n // \u2018\n 0x0227: 0xa1,\n // \u00A1\n 0x0228: 0x2a,\n // *\n 0x0229: 0x27,\n // '\n 0x022a: 0x2014,\n // \u2014\n 0x022b: 0xa9,\n // \u00A9\n 0x022c: 0x2120,\n // \u2120\n 0x022d: 0x2022,\n // \u2022\n 0x022e: 0x201c,\n // \u201C\n 0x022f: 0x201d,\n // \u201D\n 0x0230: 0xc0,\n // \u00C0\n 0x0231: 0xc2,\n // \u00C2\n 0x0232: 0xc7,\n // \u00C7\n 0x0233: 0xc8,\n // \u00C8\n 0x0234: 0xca,\n // \u00CA\n 0x0235: 0xcb,\n // \u00CB\n 0x0236: 0xeb,\n // \u00EB\n 0x0237: 0xce,\n // \u00CE\n 0x0238: 0xcf,\n // \u00CF\n 0x0239: 0xef,\n // \u00EF\n 0x023a: 0xd4,\n // \u00D4\n 0x023b: 0xd9,\n // \u00D9\n 0x023c: 0xf9,\n // \u00F9\n 0x023d: 0xdb,\n // \u00DB\n 0x023e: 0xab,\n // \u00AB\n 0x023f: 0xbb,\n // \u00BB\n 0x0320: 0xc3,\n // \u00C3\n 0x0321: 0xe3,\n // \u00E3\n 0x0322: 0xcd,\n // \u00CD\n 0x0323: 0xcc,\n // \u00CC\n 0x0324: 0xec,\n // \u00EC\n 0x0325: 0xd2,\n // \u00D2\n 0x0326: 0xf2,\n // \u00F2\n 0x0327: 0xd5,\n // \u00D5\n 0x0328: 0xf5,\n // \u00F5\n 0x0329: 0x7b,\n // {\n 0x032a: 0x7d,\n // }\n 0x032b: 0x5c,\n // \\\n 0x032c: 0x5e,\n // ^\n 0x032d: 0x5f,\n // _\n 0x032e: 0x7c,\n // |\n 0x032f: 0x7e,\n // ~\n 0x0330: 0xc4,\n // \u00C4\n 0x0331: 0xe4,\n // \u00E4\n 0x0332: 0xd6,\n // \u00D6\n 0x0333: 0xf6,\n // \u00F6\n 0x0334: 0xdf,\n // \u00DF\n 0x0335: 0xa5,\n // \u00A5\n 0x0336: 0xa4,\n // \u00A4\n 0x0337: 0x2502,\n // \u2502\n 0x0338: 0xc5,\n // \u00C5\n 0x0339: 0xe5,\n // \u00E5\n 0x033a: 0xd8,\n // \u00D8\n 0x033b: 0xf8,\n // \u00F8\n 0x033c: 0x250c,\n // \u250C\n 0x033d: 0x2510,\n // \u2510\n 0x033e: 0x2514,\n // \u2514\n 0x033f: 0x2518 // \u2518\n\n };\n\n var getCharFromCode = function getCharFromCode(code) {\n if (code === null) {\n return '';\n }\n\n code = CHARACTER_TRANSLATION[code] || code;\n return String.fromCharCode(code);\n }; // the index of the last row in a CEA-608 display buffer\n\n\n var BOTTOM_ROW = 14; // This array is used for mapping PACs -> row #, since there's no way of\n // getting it through bit logic.\n\n var ROWS = [0x1100, 0x1120, 0x1200, 0x1220, 0x1500, 0x1520, 0x1600, 0x1620, 0x1700, 0x1720, 0x1000, 0x1300, 0x1320, 0x1400, 0x1420]; // CEA-608 captions are rendered onto a 34x15 matrix of character\n // cells. The \"bottom\" row is the last element in the outer array.\n\n var createDisplayBuffer = function createDisplayBuffer() {\n var result = [],\n i = BOTTOM_ROW + 1;\n\n while (i--) {\n result.push('');\n }\n\n return result;\n };\n\n var Cea608Stream = function Cea608Stream(field, dataChannel) {\n Cea608Stream.prototype.init.call(this);\n this.field_ = field || 0;\n this.dataChannel_ = dataChannel || 0;\n this.name_ = 'CC' + ((this.field_ << 1 | this.dataChannel_) + 1);\n this.setConstants();\n this.reset();\n\n this.push = function (packet) {\n var data, swap, char0, char1, text; // remove the parity bits\n\n data = packet.ccData & 0x7f7f; // ignore duplicate control codes; the spec demands they're sent twice\n\n if (data === this.lastControlCode_) {\n this.lastControlCode_ = null;\n return;\n } // Store control codes\n\n\n if ((data & 0xf000) === 0x1000) {\n this.lastControlCode_ = data;\n } else if (data !== this.PADDING_) {\n this.lastControlCode_ = null;\n }\n\n char0 = data >>> 8;\n char1 = data & 0xff;\n\n if (data === this.PADDING_) {\n return;\n } else if (data === this.RESUME_CAPTION_LOADING_) {\n this.mode_ = 'popOn';\n } else if (data === this.END_OF_CAPTION_) {\n // If an EOC is received while in paint-on mode, the displayed caption\n // text should be swapped to non-displayed memory as if it was a pop-on\n // caption. Because of that, we should explicitly switch back to pop-on\n // mode\n this.mode_ = 'popOn';\n this.clearFormatting(packet.pts); // if a caption was being displayed, it's gone now\n\n this.flushDisplayed(packet.pts); // flip memory\n\n swap = this.displayed_;\n this.displayed_ = this.nonDisplayed_;\n this.nonDisplayed_ = swap; // start measuring the time to display the caption\n\n this.startPts_ = packet.pts;\n } else if (data === this.ROLL_UP_2_ROWS_) {\n this.rollUpRows_ = 2;\n this.setRollUp(packet.pts);\n } else if (data === this.ROLL_UP_3_ROWS_) {\n this.rollUpRows_ = 3;\n this.setRollUp(packet.pts);\n } else if (data === this.ROLL_UP_4_ROWS_) {\n this.rollUpRows_ = 4;\n this.setRollUp(packet.pts);\n } else if (data === this.CARRIAGE_RETURN_) {\n this.clearFormatting(packet.pts);\n this.flushDisplayed(packet.pts);\n this.shiftRowsUp_();\n this.startPts_ = packet.pts;\n } else if (data === this.BACKSPACE_) {\n if (this.mode_ === 'popOn') {\n this.nonDisplayed_[this.row_] = this.nonDisplayed_[this.row_].slice(0, -1);\n } else {\n this.displayed_[this.row_] = this.displayed_[this.row_].slice(0, -1);\n }\n } else if (data === this.ERASE_DISPLAYED_MEMORY_) {\n this.flushDisplayed(packet.pts);\n this.displayed_ = createDisplayBuffer();\n } else if (data === this.ERASE_NON_DISPLAYED_MEMORY_) {\n this.nonDisplayed_ = createDisplayBuffer();\n } else if (data === this.RESUME_DIRECT_CAPTIONING_) {\n if (this.mode_ !== 'paintOn') {\n // NOTE: This should be removed when proper caption positioning is\n // implemented\n this.flushDisplayed(packet.pts);\n this.displayed_ = createDisplayBuffer();\n }\n\n this.mode_ = 'paintOn';\n this.startPts_ = packet.pts; // Append special characters to caption text\n } else if (this.isSpecialCharacter(char0, char1)) {\n // Bitmask char0 so that we can apply character transformations\n // regardless of field and data channel.\n // Then byte-shift to the left and OR with char1 so we can pass the\n // entire character code to `getCharFromCode`.\n char0 = (char0 & 0x03) << 8;\n text = getCharFromCode(char0 | char1);\n this[this.mode_](packet.pts, text);\n this.column_++; // Append extended characters to caption text\n } else if (this.isExtCharacter(char0, char1)) {\n // Extended characters always follow their \"non-extended\" equivalents.\n // IE if a \"\u00E8\" is desired, you'll always receive \"e\u00E8\"; non-compliant\n // decoders are supposed to drop the \"\u00E8\", while compliant decoders\n // backspace the \"e\" and insert \"\u00E8\".\n // Delete the previous character\n if (this.mode_ === 'popOn') {\n this.nonDisplayed_[this.row_] = this.nonDisplayed_[this.row_].slice(0, -1);\n } else {\n this.displayed_[this.row_] = this.displayed_[this.row_].slice(0, -1);\n } // Bitmask char0 so that we can apply character transformations\n // regardless of field and data channel.\n // Then byte-shift to the left and OR with char1 so we can pass the\n // entire character code to `getCharFromCode`.\n\n\n char0 = (char0 & 0x03) << 8;\n text = getCharFromCode(char0 | char1);\n this[this.mode_](packet.pts, text);\n this.column_++; // Process mid-row codes\n } else if (this.isMidRowCode(char0, char1)) {\n // Attributes are not additive, so clear all formatting\n this.clearFormatting(packet.pts); // According to the standard, mid-row codes\n // should be replaced with spaces, so add one now\n\n this[this.mode_](packet.pts, ' ');\n this.column_++;\n\n if ((char1 & 0xe) === 0xe) {\n this.addFormatting(packet.pts, ['i']);\n }\n\n if ((char1 & 0x1) === 0x1) {\n this.addFormatting(packet.pts, ['u']);\n } // Detect offset control codes and adjust cursor\n\n } else if (this.isOffsetControlCode(char0, char1)) {\n // Cursor position is set by indent PAC (see below) in 4-column\n // increments, with an additional offset code of 1-3 to reach any\n // of the 32 columns specified by CEA-608. So all we need to do\n // here is increment the column cursor by the given offset.\n this.column_ += char1 & 0x03; // Detect PACs (Preamble Address Codes)\n } else if (this.isPAC(char0, char1)) {\n // There's no logic for PAC -> row mapping, so we have to just\n // find the row code in an array and use its index :(\n var row = ROWS.indexOf(data & 0x1f20); // Configure the caption window if we're in roll-up mode\n\n if (this.mode_ === 'rollUp') {\n // This implies that the base row is incorrectly set.\n // As per the recommendation in CEA-608(Base Row Implementation), defer to the number\n // of roll-up rows set.\n if (row - this.rollUpRows_ + 1 < 0) {\n row = this.rollUpRows_ - 1;\n }\n\n this.setRollUp(packet.pts, row);\n }\n\n if (row !== this.row_) {\n // formatting is only persistent for current row\n this.clearFormatting(packet.pts);\n this.row_ = row;\n } // All PACs can apply underline, so detect and apply\n // (All odd-numbered second bytes set underline)\n\n\n if (char1 & 0x1 && this.formatting_.indexOf('u') === -1) {\n this.addFormatting(packet.pts, ['u']);\n }\n\n if ((data & 0x10) === 0x10) {\n // We've got an indent level code. Each successive even number\n // increments the column cursor by 4, so we can get the desired\n // column position by bit-shifting to the right (to get n/2)\n // and multiplying by 4.\n this.column_ = ((data & 0xe) >> 1) * 4;\n }\n\n if (this.isColorPAC(char1)) {\n // it's a color code, though we only support white, which\n // can be either normal or italicized. white italics can be\n // either 0x4e or 0x6e depending on the row, so we just\n // bitwise-and with 0xe to see if italics should be turned on\n if ((char1 & 0xe) === 0xe) {\n this.addFormatting(packet.pts, ['i']);\n }\n } // We have a normal character in char0, and possibly one in char1\n\n } else if (this.isNormalChar(char0)) {\n if (char1 === 0x00) {\n char1 = null;\n }\n\n text = getCharFromCode(char0);\n text += getCharFromCode(char1);\n this[this.mode_](packet.pts, text);\n this.column_ += text.length;\n } // finish data processing\n\n };\n };\n\n Cea608Stream.prototype = new stream(); // Trigger a cue point that captures the current state of the\n // display buffer\n\n Cea608Stream.prototype.flushDisplayed = function (pts) {\n var content = this.displayed_ // remove spaces from the start and end of the string\n .map(function (row) {\n try {\n return row.trim();\n } catch (e) {\n // Ordinarily, this shouldn't happen. However, caption\n // parsing errors should not throw exceptions and\n // break playback.\n // eslint-disable-next-line no-console\n console.error('Skipping malformed caption.');\n return '';\n }\n }) // combine all text rows to display in one cue\n .join('\\n') // and remove blank rows from the start and end, but not the middle\n .replace(/^\\n+|\\n+$/g, '');\n\n if (content.length) {\n this.trigger('data', {\n startPts: this.startPts_,\n endPts: pts,\n text: content,\n stream: this.name_\n });\n }\n };\n /**\n * Zero out the data, used for startup and on seek\n */\n\n\n Cea608Stream.prototype.reset = function () {\n this.mode_ = 'popOn'; // When in roll-up mode, the index of the last row that will\n // actually display captions. If a caption is shifted to a row\n // with a lower index than this, it is cleared from the display\n // buffer\n\n this.topRow_ = 0;\n this.startPts_ = 0;\n this.displayed_ = createDisplayBuffer();\n this.nonDisplayed_ = createDisplayBuffer();\n this.lastControlCode_ = null; // Track row and column for proper line-breaking and spacing\n\n this.column_ = 0;\n this.row_ = BOTTOM_ROW;\n this.rollUpRows_ = 2; // This variable holds currently-applied formatting\n\n this.formatting_ = [];\n };\n /**\n * Sets up control code and related constants for this instance\n */\n\n\n Cea608Stream.prototype.setConstants = function () {\n // The following attributes have these uses:\n // ext_ : char0 for mid-row codes, and the base for extended\n // chars (ext_+0, ext_+1, and ext_+2 are char0s for\n // extended codes)\n // control_: char0 for control codes, except byte-shifted to the\n // left so that we can do this.control_ | CONTROL_CODE\n // offset_: char0 for tab offset codes\n //\n // It's also worth noting that control codes, and _only_ control codes,\n // differ between field 1 and field2. Field 2 control codes are always\n // their field 1 value plus 1. That's why there's the \"| field\" on the\n // control value.\n if (this.dataChannel_ === 0) {\n this.BASE_ = 0x10;\n this.EXT_ = 0x11;\n this.CONTROL_ = (0x14 | this.field_) << 8;\n this.OFFSET_ = 0x17;\n } else if (this.dataChannel_ === 1) {\n this.BASE_ = 0x18;\n this.EXT_ = 0x19;\n this.CONTROL_ = (0x1c | this.field_) << 8;\n this.OFFSET_ = 0x1f;\n } // Constants for the LSByte command codes recognized by Cea608Stream. This\n // list is not exhaustive. For a more comprehensive listing and semantics see\n // http://www.gpo.gov/fdsys/pkg/CFR-2010-title47-vol1/pdf/CFR-2010-title47-vol1-sec15-119.pdf\n // Padding\n\n\n this.PADDING_ = 0x0000; // Pop-on Mode\n\n this.RESUME_CAPTION_LOADING_ = this.CONTROL_ | 0x20;\n this.END_OF_CAPTION_ = this.CONTROL_ | 0x2f; // Roll-up Mode\n\n this.ROLL_UP_2_ROWS_ = this.CONTROL_ | 0x25;\n this.ROLL_UP_3_ROWS_ = this.CONTROL_ | 0x26;\n this.ROLL_UP_4_ROWS_ = this.CONTROL_ | 0x27;\n this.CARRIAGE_RETURN_ = this.CONTROL_ | 0x2d; // paint-on mode\n\n this.RESUME_DIRECT_CAPTIONING_ = this.CONTROL_ | 0x29; // Erasure\n\n this.BACKSPACE_ = this.CONTROL_ | 0x21;\n this.ERASE_DISPLAYED_MEMORY_ = this.CONTROL_ | 0x2c;\n this.ERASE_NON_DISPLAYED_MEMORY_ = this.CONTROL_ | 0x2e;\n };\n /**\n * Detects if the 2-byte packet data is a special character\n *\n * Special characters have a second byte in the range 0x30 to 0x3f,\n * with the first byte being 0x11 (for data channel 1) or 0x19 (for\n * data channel 2).\n *\n * @param {Integer} char0 The first byte\n * @param {Integer} char1 The second byte\n * @return {Boolean} Whether the 2 bytes are an special character\n */\n\n\n Cea608Stream.prototype.isSpecialCharacter = function (char0, char1) {\n return char0 === this.EXT_ && char1 >= 0x30 && char1 <= 0x3f;\n };\n /**\n * Detects if the 2-byte packet data is an extended character\n *\n * Extended characters have a second byte in the range 0x20 to 0x3f,\n * with the first byte being 0x12 or 0x13 (for data channel 1) or\n * 0x1a or 0x1b (for data channel 2).\n *\n * @param {Integer} char0 The first byte\n * @param {Integer} char1 The second byte\n * @return {Boolean} Whether the 2 bytes are an extended character\n */\n\n\n Cea608Stream.prototype.isExtCharacter = function (char0, char1) {\n return (char0 === this.EXT_ + 1 || char0 === this.EXT_ + 2) && char1 >= 0x20 && char1 <= 0x3f;\n };\n /**\n * Detects if the 2-byte packet is a mid-row code\n *\n * Mid-row codes have a second byte in the range 0x20 to 0x2f, with\n * the first byte being 0x11 (for data channel 1) or 0x19 (for data\n * channel 2).\n *\n * @param {Integer} char0 The first byte\n * @param {Integer} char1 The second byte\n * @return {Boolean} Whether the 2 bytes are a mid-row code\n */\n\n\n Cea608Stream.prototype.isMidRowCode = function (char0, char1) {\n return char0 === this.EXT_ && char1 >= 0x20 && char1 <= 0x2f;\n };\n /**\n * Detects if the 2-byte packet is an offset control code\n *\n * Offset control codes have a second byte in the range 0x21 to 0x23,\n * with the first byte being 0x17 (for data channel 1) or 0x1f (for\n * data channel 2).\n *\n * @param {Integer} char0 The first byte\n * @param {Integer} char1 The second byte\n * @return {Boolean} Whether the 2 bytes are an offset control code\n */\n\n\n Cea608Stream.prototype.isOffsetControlCode = function (char0, char1) {\n return char0 === this.OFFSET_ && char1 >= 0x21 && char1 <= 0x23;\n };\n /**\n * Detects if the 2-byte packet is a Preamble Address Code\n *\n * PACs have a first byte in the range 0x10 to 0x17 (for data channel 1)\n * or 0x18 to 0x1f (for data channel 2), with the second byte in the\n * range 0x40 to 0x7f.\n *\n * @param {Integer} char0 The first byte\n * @param {Integer} char1 The second byte\n * @return {Boolean} Whether the 2 bytes are a PAC\n */\n\n\n Cea608Stream.prototype.isPAC = function (char0, char1) {\n return char0 >= this.BASE_ && char0 < this.BASE_ + 8 && char1 >= 0x40 && char1 <= 0x7f;\n };\n /**\n * Detects if a packet's second byte is in the range of a PAC color code\n *\n * PAC color codes have the second byte be in the range 0x40 to 0x4f, or\n * 0x60 to 0x6f.\n *\n * @param {Integer} char1 The second byte\n * @return {Boolean} Whether the byte is a color PAC\n */\n\n\n Cea608Stream.prototype.isColorPAC = function (char1) {\n return char1 >= 0x40 && char1 <= 0x4f || char1 >= 0x60 && char1 <= 0x7f;\n };\n /**\n * Detects if a single byte is in the range of a normal character\n *\n * Normal text bytes are in the range 0x20 to 0x7f.\n *\n * @param {Integer} char The byte\n * @return {Boolean} Whether the byte is a normal character\n */\n\n\n Cea608Stream.prototype.isNormalChar = function (char) {\n return char >= 0x20 && char <= 0x7f;\n };\n /**\n * Configures roll-up\n *\n * @param {Integer} pts Current PTS\n * @param {Integer} newBaseRow Used by PACs to slide the current window to\n * a new position\n */\n\n\n Cea608Stream.prototype.setRollUp = function (pts, newBaseRow) {\n // Reset the base row to the bottom row when switching modes\n if (this.mode_ !== 'rollUp') {\n this.row_ = BOTTOM_ROW;\n this.mode_ = 'rollUp'; // Spec says to wipe memories when switching to roll-up\n\n this.flushDisplayed(pts);\n this.nonDisplayed_ = createDisplayBuffer();\n this.displayed_ = createDisplayBuffer();\n }\n\n if (newBaseRow !== undefined && newBaseRow !== this.row_) {\n // move currently displayed captions (up or down) to the new base row\n for (var i = 0; i < this.rollUpRows_; i++) {\n this.displayed_[newBaseRow - i] = this.displayed_[this.row_ - i];\n this.displayed_[this.row_ - i] = '';\n }\n }\n\n if (newBaseRow === undefined) {\n newBaseRow = this.row_;\n }\n\n this.topRow_ = newBaseRow - this.rollUpRows_ + 1;\n }; // Adds the opening HTML tag for the passed character to the caption text,\n // and keeps track of it for later closing\n\n\n Cea608Stream.prototype.addFormatting = function (pts, format) {\n this.formatting_ = this.formatting_.concat(format);\n var text = format.reduce(function (text, format) {\n return text + '<' + format + '>';\n }, '');\n this[this.mode_](pts, text);\n }; // Adds HTML closing tags for current formatting to caption text and\n // clears remembered formatting\n\n\n Cea608Stream.prototype.clearFormatting = function (pts) {\n if (!this.formatting_.length) {\n return;\n }\n\n var text = this.formatting_.reverse().reduce(function (text, format) {\n return text + '';\n }, '');\n this.formatting_ = [];\n this[this.mode_](pts, text);\n }; // Mode Implementations\n\n\n Cea608Stream.prototype.popOn = function (pts, text) {\n var baseRow = this.nonDisplayed_[this.row_]; // buffer characters\n\n baseRow += text;\n this.nonDisplayed_[this.row_] = baseRow;\n };\n\n Cea608Stream.prototype.rollUp = function (pts, text) {\n var baseRow = this.displayed_[this.row_];\n baseRow += text;\n this.displayed_[this.row_] = baseRow;\n };\n\n Cea608Stream.prototype.shiftRowsUp_ = function () {\n var i; // clear out inactive rows\n\n for (i = 0; i < this.topRow_; i++) {\n this.displayed_[i] = '';\n }\n\n for (i = this.row_ + 1; i < BOTTOM_ROW + 1; i++) {\n this.displayed_[i] = '';\n } // shift displayed rows up\n\n\n for (i = this.topRow_; i < this.row_; i++) {\n this.displayed_[i] = this.displayed_[i + 1];\n } // clear out the bottom row\n\n\n this.displayed_[this.row_] = '';\n };\n\n Cea608Stream.prototype.paintOn = function (pts, text) {\n var baseRow = this.displayed_[this.row_];\n baseRow += text;\n this.displayed_[this.row_] = baseRow;\n }; // exports\n\n\n var captionStream = {\n CaptionStream: CaptionStream,\n Cea608Stream: Cea608Stream,\n Cea708Stream: Cea708Stream\n };\n\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n */\n\n var streamTypes = {\n H264_STREAM_TYPE: 0x1B,\n ADTS_STREAM_TYPE: 0x0F,\n METADATA_STREAM_TYPE: 0x15\n };\n\n var MAX_TS = 8589934592;\n var RO_THRESH = 4294967296;\n var TYPE_SHARED = 'shared';\n\n var handleRollover = function handleRollover(value, reference) {\n var direction = 1;\n\n if (value > reference) {\n // If the current timestamp value is greater than our reference timestamp and we detect a\n // timestamp rollover, this means the roll over is happening in the opposite direction.\n // Example scenario: Enter a long stream/video just after a rollover occurred. The reference\n // point will be set to a small number, e.g. 1. The user then seeks backwards over the\n // rollover point. In loading this segment, the timestamp values will be very large,\n // e.g. 2^33 - 1. Since this comes before the data we loaded previously, we want to adjust\n // the time stamp to be `value - 2^33`.\n direction = -1;\n } // Note: A seek forwards or back that is greater than the RO_THRESH (2^32, ~13 hours) will\n // cause an incorrect adjustment.\n\n\n while (Math.abs(reference - value) > RO_THRESH) {\n value += direction * MAX_TS;\n }\n\n return value;\n };\n\n var TimestampRolloverStream = function TimestampRolloverStream(type) {\n var lastDTS, referenceDTS;\n TimestampRolloverStream.prototype.init.call(this); // The \"shared\" type is used in cases where a stream will contain muxed\n // video and audio. We could use `undefined` here, but having a string\n // makes debugging a little clearer.\n\n this.type_ = type || TYPE_SHARED;\n\n this.push = function (data) {\n // Any \"shared\" rollover streams will accept _all_ data. Otherwise,\n // streams will only accept data that matches their type.\n if (this.type_ !== TYPE_SHARED && data.type !== this.type_) {\n return;\n }\n\n if (referenceDTS === undefined) {\n referenceDTS = data.dts;\n }\n\n data.dts = handleRollover(data.dts, referenceDTS);\n data.pts = handleRollover(data.pts, referenceDTS);\n lastDTS = data.dts;\n this.trigger('data', data);\n };\n\n this.flush = function () {\n referenceDTS = lastDTS;\n this.trigger('done');\n };\n\n this.endTimeline = function () {\n this.flush();\n this.trigger('endedtimeline');\n };\n\n this.discontinuity = function () {\n referenceDTS = void 0;\n lastDTS = void 0;\n };\n\n this.reset = function () {\n this.discontinuity();\n this.trigger('reset');\n };\n };\n\n TimestampRolloverStream.prototype = new stream();\n var timestampRolloverStream = {\n TimestampRolloverStream: TimestampRolloverStream,\n handleRollover: handleRollover\n };\n\n var percentEncode = function percentEncode(bytes, start, end) {\n var i,\n result = '';\n\n for (i = start; i < end; i++) {\n result += '%' + ('00' + bytes[i].toString(16)).slice(-2);\n }\n\n return result;\n },\n // return the string representation of the specified byte range,\n // interpreted as UTf-8.\n parseUtf8 = function parseUtf8(bytes, start, end) {\n return decodeURIComponent(percentEncode(bytes, start, end));\n },\n // return the string representation of the specified byte range,\n // interpreted as ISO-8859-1.\n parseIso88591 = function parseIso88591(bytes, start, end) {\n return unescape(percentEncode(bytes, start, end)); // jshint ignore:line\n },\n parseSyncSafeInteger = function parseSyncSafeInteger(data) {\n return data[0] << 21 | data[1] << 14 | data[2] << 7 | data[3];\n },\n tagParsers = {\n TXXX: function TXXX(tag) {\n var i;\n\n if (tag.data[0] !== 3) {\n // ignore frames with unrecognized character encodings\n return;\n }\n\n for (i = 1; i < tag.data.length; i++) {\n if (tag.data[i] === 0) {\n // parse the text fields\n tag.description = parseUtf8(tag.data, 1, i); // do not include the null terminator in the tag value\n\n tag.value = parseUtf8(tag.data, i + 1, tag.data.length).replace(/\\0*$/, '');\n break;\n }\n }\n\n tag.data = tag.value;\n },\n WXXX: function WXXX(tag) {\n var i;\n\n if (tag.data[0] !== 3) {\n // ignore frames with unrecognized character encodings\n return;\n }\n\n for (i = 1; i < tag.data.length; i++) {\n if (tag.data[i] === 0) {\n // parse the description and URL fields\n tag.description = parseUtf8(tag.data, 1, i);\n tag.url = parseUtf8(tag.data, i + 1, tag.data.length);\n break;\n }\n }\n },\n PRIV: function PRIV(tag) {\n var i;\n\n for (i = 0; i < tag.data.length; i++) {\n if (tag.data[i] === 0) {\n // parse the description and URL fields\n tag.owner = parseIso88591(tag.data, 0, i);\n break;\n }\n }\n\n tag.privateData = tag.data.subarray(i + 1);\n tag.data = tag.privateData;\n }\n },\n _MetadataStream;\n\n _MetadataStream = function MetadataStream(options) {\n var settings = {\n debug: !!(options && options.debug),\n // the bytes of the program-level descriptor field in MP2T\n // see ISO/IEC 13818-1:2013 (E), section 2.6 \"Program and\n // program element descriptors\"\n descriptor: options && options.descriptor\n },\n // the total size in bytes of the ID3 tag being parsed\n tagSize = 0,\n // tag data that is not complete enough to be parsed\n buffer = [],\n // the total number of bytes currently in the buffer\n bufferSize = 0,\n i;\n\n _MetadataStream.prototype.init.call(this); // calculate the text track in-band metadata track dispatch type\n // https://html.spec.whatwg.org/multipage/embedded-content.html#steps-to-expose-a-media-resource-specific-text-track\n\n\n this.dispatchType = streamTypes.METADATA_STREAM_TYPE.toString(16);\n\n if (settings.descriptor) {\n for (i = 0; i < settings.descriptor.length; i++) {\n this.dispatchType += ('00' + settings.descriptor[i].toString(16)).slice(-2);\n }\n }\n\n this.push = function (chunk) {\n var tag, frameStart, frameSize, frame, i, frameHeader;\n\n if (chunk.type !== 'timed-metadata') {\n return;\n } // if data_alignment_indicator is set in the PES header,\n // we must have the start of a new ID3 tag. Assume anything\n // remaining in the buffer was malformed and throw it out\n\n\n if (chunk.dataAlignmentIndicator) {\n bufferSize = 0;\n buffer.length = 0;\n } // ignore events that don't look like ID3 data\n\n\n if (buffer.length === 0 && (chunk.data.length < 10 || chunk.data[0] !== 'I'.charCodeAt(0) || chunk.data[1] !== 'D'.charCodeAt(0) || chunk.data[2] !== '3'.charCodeAt(0))) {\n if (settings.debug) {\n // eslint-disable-next-line no-console\n console.log('Skipping unrecognized metadata packet');\n }\n\n return;\n } // add this chunk to the data we've collected so far\n\n\n buffer.push(chunk);\n bufferSize += chunk.data.byteLength; // grab the size of the entire frame from the ID3 header\n\n if (buffer.length === 1) {\n // the frame size is transmitted as a 28-bit integer in the\n // last four bytes of the ID3 header.\n // The most significant bit of each byte is dropped and the\n // results concatenated to recover the actual value.\n tagSize = parseSyncSafeInteger(chunk.data.subarray(6, 10)); // ID3 reports the tag size excluding the header but it's more\n // convenient for our comparisons to include it\n\n tagSize += 10;\n } // if the entire frame has not arrived, wait for more data\n\n\n if (bufferSize < tagSize) {\n return;\n } // collect the entire frame so it can be parsed\n\n\n tag = {\n data: new Uint8Array(tagSize),\n frames: [],\n pts: buffer[0].pts,\n dts: buffer[0].dts\n };\n\n for (i = 0; i < tagSize;) {\n tag.data.set(buffer[0].data.subarray(0, tagSize - i), i);\n i += buffer[0].data.byteLength;\n bufferSize -= buffer[0].data.byteLength;\n buffer.shift();\n } // find the start of the first frame and the end of the tag\n\n\n frameStart = 10;\n\n if (tag.data[5] & 0x40) {\n // advance the frame start past the extended header\n frameStart += 4; // header size field\n\n frameStart += parseSyncSafeInteger(tag.data.subarray(10, 14)); // clip any padding off the end\n\n tagSize -= parseSyncSafeInteger(tag.data.subarray(16, 20));\n } // parse one or more ID3 frames\n // http://id3.org/id3v2.3.0#ID3v2_frame_overview\n\n\n do {\n // determine the number of bytes in this frame\n frameSize = parseSyncSafeInteger(tag.data.subarray(frameStart + 4, frameStart + 8));\n\n if (frameSize < 1) {\n // eslint-disable-next-line no-console\n return console.log('Malformed ID3 frame encountered. Skipping metadata parsing.');\n }\n\n frameHeader = String.fromCharCode(tag.data[frameStart], tag.data[frameStart + 1], tag.data[frameStart + 2], tag.data[frameStart + 3]);\n frame = {\n id: frameHeader,\n data: tag.data.subarray(frameStart + 10, frameStart + frameSize + 10)\n };\n frame.key = frame.id;\n\n if (tagParsers[frame.id]) {\n tagParsers[frame.id](frame); // handle the special PRIV frame used to indicate the start\n // time for raw AAC data\n\n if (frame.owner === 'com.apple.streaming.transportStreamTimestamp') {\n var d = frame.data,\n size = (d[3] & 0x01) << 30 | d[4] << 22 | d[5] << 14 | d[6] << 6 | d[7] >>> 2;\n size *= 4;\n size += d[7] & 0x03;\n frame.timeStamp = size; // in raw AAC, all subsequent data will be timestamped based\n // on the value of this frame\n // we couldn't have known the appropriate pts and dts before\n // parsing this ID3 tag so set those values now\n\n if (tag.pts === undefined && tag.dts === undefined) {\n tag.pts = frame.timeStamp;\n tag.dts = frame.timeStamp;\n }\n\n this.trigger('timestamp', frame);\n }\n }\n\n tag.frames.push(frame);\n frameStart += 10; // advance past the frame header\n\n frameStart += frameSize; // advance past the frame body\n } while (frameStart < tagSize);\n\n this.trigger('data', tag);\n };\n };\n\n _MetadataStream.prototype = new stream();\n var metadataStream = _MetadataStream;\n\n var TimestampRolloverStream$1 = timestampRolloverStream.TimestampRolloverStream; // object types\n\n var _TransportPacketStream, _TransportParseStream, _ElementaryStream; // constants\n\n\n var MP2T_PACKET_LENGTH = 188,\n // bytes\n SYNC_BYTE = 0x47;\n /**\n * Splits an incoming stream of binary data into MPEG-2 Transport\n * Stream packets.\n */\n\n _TransportPacketStream = function TransportPacketStream() {\n var buffer = new Uint8Array(MP2T_PACKET_LENGTH),\n bytesInBuffer = 0;\n\n _TransportPacketStream.prototype.init.call(this); // Deliver new bytes to the stream.\n\n /**\n * Split a stream of data into M2TS packets\n **/\n\n\n this.push = function (bytes) {\n var startIndex = 0,\n endIndex = MP2T_PACKET_LENGTH,\n everything; // If there are bytes remaining from the last segment, prepend them to the\n // bytes that were pushed in\n\n if (bytesInBuffer) {\n everything = new Uint8Array(bytes.byteLength + bytesInBuffer);\n everything.set(buffer.subarray(0, bytesInBuffer));\n everything.set(bytes, bytesInBuffer);\n bytesInBuffer = 0;\n } else {\n everything = bytes;\n } // While we have enough data for a packet\n\n\n while (endIndex < everything.byteLength) {\n // Look for a pair of start and end sync bytes in the data..\n if (everything[startIndex] === SYNC_BYTE && everything[endIndex] === SYNC_BYTE) {\n // We found a packet so emit it and jump one whole packet forward in\n // the stream\n this.trigger('data', everything.subarray(startIndex, endIndex));\n startIndex += MP2T_PACKET_LENGTH;\n endIndex += MP2T_PACKET_LENGTH;\n continue;\n } // If we get here, we have somehow become de-synchronized and we need to step\n // forward one byte at a time until we find a pair of sync bytes that denote\n // a packet\n\n\n startIndex++;\n endIndex++;\n } // If there was some data left over at the end of the segment that couldn't\n // possibly be a whole packet, keep it because it might be the start of a packet\n // that continues in the next segment\n\n\n if (startIndex < everything.byteLength) {\n buffer.set(everything.subarray(startIndex), 0);\n bytesInBuffer = everything.byteLength - startIndex;\n }\n };\n /**\n * Passes identified M2TS packets to the TransportParseStream to be parsed\n **/\n\n\n this.flush = function () {\n // If the buffer contains a whole packet when we are being flushed, emit it\n // and empty the buffer. Otherwise hold onto the data because it may be\n // important for decoding the next segment\n if (bytesInBuffer === MP2T_PACKET_LENGTH && buffer[0] === SYNC_BYTE) {\n this.trigger('data', buffer);\n bytesInBuffer = 0;\n }\n\n this.trigger('done');\n };\n\n this.endTimeline = function () {\n this.flush();\n this.trigger('endedtimeline');\n };\n\n this.reset = function () {\n bytesInBuffer = 0;\n this.trigger('reset');\n };\n };\n\n _TransportPacketStream.prototype = new stream();\n /**\n * Accepts an MP2T TransportPacketStream and emits data events with parsed\n * forms of the individual transport stream packets.\n */\n\n _TransportParseStream = function TransportParseStream() {\n var parsePsi, parsePat, parsePmt, self;\n\n _TransportParseStream.prototype.init.call(this);\n\n self = this;\n this.packetsWaitingForPmt = [];\n this.programMapTable = undefined;\n\n parsePsi = function parsePsi(payload, psi) {\n var offset = 0; // PSI packets may be split into multiple sections and those\n // sections may be split into multiple packets. If a PSI\n // section starts in this packet, the payload_unit_start_indicator\n // will be true and the first byte of the payload will indicate\n // the offset from the current position to the start of the\n // section.\n\n if (psi.payloadUnitStartIndicator) {\n offset += payload[offset] + 1;\n }\n\n if (psi.type === 'pat') {\n parsePat(payload.subarray(offset), psi);\n } else {\n parsePmt(payload.subarray(offset), psi);\n }\n };\n\n parsePat = function parsePat(payload, pat) {\n pat.section_number = payload[7]; // eslint-disable-line camelcase\n\n pat.last_section_number = payload[8]; // eslint-disable-line camelcase\n // skip the PSI header and parse the first PMT entry\n\n self.pmtPid = (payload[10] & 0x1F) << 8 | payload[11];\n pat.pmtPid = self.pmtPid;\n };\n /**\n * Parse out the relevant fields of a Program Map Table (PMT).\n * @param payload {Uint8Array} the PMT-specific portion of an MP2T\n * packet. The first byte in this array should be the table_id\n * field.\n * @param pmt {object} the object that should be decorated with\n * fields parsed from the PMT.\n */\n\n\n parsePmt = function parsePmt(payload, pmt) {\n var sectionLength, tableEnd, programInfoLength, offset; // PMTs can be sent ahead of the time when they should actually\n // take effect. We don't believe this should ever be the case\n // for HLS but we'll ignore \"forward\" PMT declarations if we see\n // them. Future PMT declarations have the current_next_indicator\n // set to zero.\n\n if (!(payload[5] & 0x01)) {\n return;\n } // overwrite any existing program map table\n\n\n self.programMapTable = {\n video: null,\n audio: null,\n 'timed-metadata': {}\n }; // the mapping table ends at the end of the current section\n\n sectionLength = (payload[1] & 0x0f) << 8 | payload[2];\n tableEnd = 3 + sectionLength - 4; // to determine where the table is, we have to figure out how\n // long the program info descriptors are\n\n programInfoLength = (payload[10] & 0x0f) << 8 | payload[11]; // advance the offset to the first entry in the mapping table\n\n offset = 12 + programInfoLength;\n\n while (offset < tableEnd) {\n var streamType = payload[offset];\n var pid = (payload[offset + 1] & 0x1F) << 8 | payload[offset + 2]; // only map a single elementary_pid for audio and video stream types\n // TODO: should this be done for metadata too? for now maintain behavior of\n // multiple metadata streams\n\n if (streamType === streamTypes.H264_STREAM_TYPE && self.programMapTable.video === null) {\n self.programMapTable.video = pid;\n } else if (streamType === streamTypes.ADTS_STREAM_TYPE && self.programMapTable.audio === null) {\n self.programMapTable.audio = pid;\n } else if (streamType === streamTypes.METADATA_STREAM_TYPE) {\n // map pid to stream type for metadata streams\n self.programMapTable['timed-metadata'][pid] = streamType;\n } // move to the next table entry\n // skip past the elementary stream descriptors, if present\n\n\n offset += ((payload[offset + 3] & 0x0F) << 8 | payload[offset + 4]) + 5;\n } // record the map on the packet as well\n\n\n pmt.programMapTable = self.programMapTable;\n };\n /**\n * Deliver a new MP2T packet to the next stream in the pipeline.\n */\n\n\n this.push = function (packet) {\n var result = {},\n offset = 4;\n result.payloadUnitStartIndicator = !!(packet[1] & 0x40); // pid is a 13-bit field starting at the last bit of packet[1]\n\n result.pid = packet[1] & 0x1f;\n result.pid <<= 8;\n result.pid |= packet[2]; // if an adaption field is present, its length is specified by the\n // fifth byte of the TS packet header. The adaptation field is\n // used to add stuffing to PES packets that don't fill a complete\n // TS packet, and to specify some forms of timing and control data\n // that we do not currently use.\n\n if ((packet[3] & 0x30) >>> 4 > 0x01) {\n offset += packet[offset] + 1;\n } // parse the rest of the packet based on the type\n\n\n if (result.pid === 0) {\n result.type = 'pat';\n parsePsi(packet.subarray(offset), result);\n this.trigger('data', result);\n } else if (result.pid === this.pmtPid) {\n result.type = 'pmt';\n parsePsi(packet.subarray(offset), result);\n this.trigger('data', result); // if there are any packets waiting for a PMT to be found, process them now\n\n while (this.packetsWaitingForPmt.length) {\n this.processPes_.apply(this, this.packetsWaitingForPmt.shift());\n }\n } else if (this.programMapTable === undefined) {\n // When we have not seen a PMT yet, defer further processing of\n // PES packets until one has been parsed\n this.packetsWaitingForPmt.push([packet, offset, result]);\n } else {\n this.processPes_(packet, offset, result);\n }\n };\n\n this.processPes_ = function (packet, offset, result) {\n // set the appropriate stream type\n if (result.pid === this.programMapTable.video) {\n result.streamType = streamTypes.H264_STREAM_TYPE;\n } else if (result.pid === this.programMapTable.audio) {\n result.streamType = streamTypes.ADTS_STREAM_TYPE;\n } else {\n // if not video or audio, it is timed-metadata or unknown\n // if unknown, streamType will be undefined\n result.streamType = this.programMapTable['timed-metadata'][result.pid];\n }\n\n result.type = 'pes';\n result.data = packet.subarray(offset);\n this.trigger('data', result);\n };\n };\n\n _TransportParseStream.prototype = new stream();\n _TransportParseStream.STREAM_TYPES = {\n h264: 0x1b,\n adts: 0x0f\n };\n /**\n * Reconsistutes program elementary stream (PES) packets from parsed\n * transport stream packets. That is, if you pipe an\n * mp2t.TransportParseStream into a mp2t.ElementaryStream, the output\n * events will be events which capture the bytes for individual PES\n * packets plus relevant metadata that has been extracted from the\n * container.\n */\n\n _ElementaryStream = function ElementaryStream() {\n var self = this,\n // PES packet fragments\n video = {\n data: [],\n size: 0\n },\n audio = {\n data: [],\n size: 0\n },\n timedMetadata = {\n data: [],\n size: 0\n },\n programMapTable,\n parsePes = function parsePes(payload, pes) {\n var ptsDtsFlags;\n var startPrefix = payload[0] << 16 | payload[1] << 8 | payload[2]; // default to an empty array\n\n pes.data = new Uint8Array(); // In certain live streams, the start of a TS fragment has ts packets\n // that are frame data that is continuing from the previous fragment. This\n // is to check that the pes data is the start of a new pes payload\n\n if (startPrefix !== 1) {\n return;\n } // get the packet length, this will be 0 for video\n\n\n pes.packetLength = 6 + (payload[4] << 8 | payload[5]); // find out if this packets starts a new keyframe\n\n pes.dataAlignmentIndicator = (payload[6] & 0x04) !== 0; // PES packets may be annotated with a PTS value, or a PTS value\n // and a DTS value. Determine what combination of values is\n // available to work with.\n\n ptsDtsFlags = payload[7]; // PTS and DTS are normally stored as a 33-bit number. Javascript\n // performs all bitwise operations on 32-bit integers but javascript\n // supports a much greater range (52-bits) of integer using standard\n // mathematical operations.\n // We construct a 31-bit value using bitwise operators over the 31\n // most significant bits and then multiply by 4 (equal to a left-shift\n // of 2) before we add the final 2 least significant bits of the\n // timestamp (equal to an OR.)\n\n if (ptsDtsFlags & 0xC0) {\n // the PTS and DTS are not written out directly. For information\n // on how they are encoded, see\n // http://dvd.sourceforge.net/dvdinfo/pes-hdr.html\n pes.pts = (payload[9] & 0x0E) << 27 | (payload[10] & 0xFF) << 20 | (payload[11] & 0xFE) << 12 | (payload[12] & 0xFF) << 5 | (payload[13] & 0xFE) >>> 3;\n pes.pts *= 4; // Left shift by 2\n\n pes.pts += (payload[13] & 0x06) >>> 1; // OR by the two LSBs\n\n pes.dts = pes.pts;\n\n if (ptsDtsFlags & 0x40) {\n pes.dts = (payload[14] & 0x0E) << 27 | (payload[15] & 0xFF) << 20 | (payload[16] & 0xFE) << 12 | (payload[17] & 0xFF) << 5 | (payload[18] & 0xFE) >>> 3;\n pes.dts *= 4; // Left shift by 2\n\n pes.dts += (payload[18] & 0x06) >>> 1; // OR by the two LSBs\n }\n } // the data section starts immediately after the PES header.\n // pes_header_data_length specifies the number of header bytes\n // that follow the last byte of the field.\n\n\n pes.data = payload.subarray(9 + payload[8]);\n },\n\n /**\n * Pass completely parsed PES packets to the next stream in the pipeline\n **/\n flushStream = function flushStream(stream, type, forceFlush) {\n var packetData = new Uint8Array(stream.size),\n event = {\n type: type\n },\n i = 0,\n offset = 0,\n packetFlushable = false,\n fragment; // do nothing if there is not enough buffered data for a complete\n // PES header\n\n if (!stream.data.length || stream.size < 9) {\n return;\n }\n\n event.trackId = stream.data[0].pid; // reassemble the packet\n\n for (i = 0; i < stream.data.length; i++) {\n fragment = stream.data[i];\n packetData.set(fragment.data, offset);\n offset += fragment.data.byteLength;\n } // parse assembled packet's PES header\n\n\n parsePes(packetData, event); // non-video PES packets MUST have a non-zero PES_packet_length\n // check that there is enough stream data to fill the packet\n\n packetFlushable = type === 'video' || event.packetLength <= stream.size; // flush pending packets if the conditions are right\n\n if (forceFlush || packetFlushable) {\n stream.size = 0;\n stream.data.length = 0;\n } // only emit packets that are complete. this is to avoid assembling\n // incomplete PES packets due to poor segmentation\n\n\n if (packetFlushable) {\n self.trigger('data', event);\n }\n };\n\n _ElementaryStream.prototype.init.call(this);\n /**\n * Identifies M2TS packet types and parses PES packets using metadata\n * parsed from the PMT\n **/\n\n\n this.push = function (data) {\n ({\n pat: function pat() {// we have to wait for the PMT to arrive as well before we\n // have any meaningful metadata\n },\n pes: function pes() {\n var stream, streamType;\n\n switch (data.streamType) {\n case streamTypes.H264_STREAM_TYPE:\n stream = video;\n streamType = 'video';\n break;\n\n case streamTypes.ADTS_STREAM_TYPE:\n stream = audio;\n streamType = 'audio';\n break;\n\n case streamTypes.METADATA_STREAM_TYPE:\n stream = timedMetadata;\n streamType = 'timed-metadata';\n break;\n\n default:\n // ignore unknown stream types\n return;\n } // if a new packet is starting, we can flush the completed\n // packet\n\n\n if (data.payloadUnitStartIndicator) {\n flushStream(stream, streamType, true);\n } // buffer this fragment until we are sure we've received the\n // complete payload\n\n\n stream.data.push(data);\n stream.size += data.data.byteLength;\n },\n pmt: function pmt() {\n var event = {\n type: 'metadata',\n tracks: []\n };\n programMapTable = data.programMapTable; // translate audio and video streams to tracks\n\n if (programMapTable.video !== null) {\n event.tracks.push({\n timelineStartInfo: {\n baseMediaDecodeTime: 0\n },\n id: +programMapTable.video,\n codec: 'avc',\n type: 'video'\n });\n }\n\n if (programMapTable.audio !== null) {\n event.tracks.push({\n timelineStartInfo: {\n baseMediaDecodeTime: 0\n },\n id: +programMapTable.audio,\n codec: 'adts',\n type: 'audio'\n });\n }\n\n self.trigger('data', event);\n }\n })[data.type]();\n };\n\n this.reset = function () {\n video.size = 0;\n video.data.length = 0;\n audio.size = 0;\n audio.data.length = 0;\n this.trigger('reset');\n };\n /**\n * Flush any remaining input. Video PES packets may be of variable\n * length. Normally, the start of a new video packet can trigger the\n * finalization of the previous packet. That is not possible if no\n * more video is forthcoming, however. In that case, some other\n * mechanism (like the end of the file) has to be employed. When it is\n * clear that no additional data is forthcoming, calling this method\n * will flush the buffered packets.\n */\n\n\n this.flushStreams_ = function () {\n // !!THIS ORDER IS IMPORTANT!!\n // video first then audio\n flushStream(video, 'video');\n flushStream(audio, 'audio');\n flushStream(timedMetadata, 'timed-metadata');\n };\n\n this.flush = function () {\n this.flushStreams_();\n this.trigger('done');\n };\n };\n\n _ElementaryStream.prototype = new stream();\n var m2ts = {\n PAT_PID: 0x0000,\n MP2T_PACKET_LENGTH: MP2T_PACKET_LENGTH,\n TransportPacketStream: _TransportPacketStream,\n TransportParseStream: _TransportParseStream,\n ElementaryStream: _ElementaryStream,\n TimestampRolloverStream: TimestampRolloverStream$1,\n CaptionStream: captionStream.CaptionStream,\n Cea608Stream: captionStream.Cea608Stream,\n Cea708Stream: captionStream.Cea708Stream,\n MetadataStream: metadataStream\n };\n\n for (var type in streamTypes) {\n if (streamTypes.hasOwnProperty(type)) {\n m2ts[type] = streamTypes[type];\n }\n }\n\n var m2ts_1 = m2ts;\n\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n *\n * Utilities to detect basic properties and metadata about Aac data.\n */\n\n var ADTS_SAMPLING_FREQUENCIES$1 = [96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050, 16000, 12000, 11025, 8000, 7350];\n\n var parseId3TagSize = function parseId3TagSize(header, byteIndex) {\n var returnSize = header[byteIndex + 6] << 21 | header[byteIndex + 7] << 14 | header[byteIndex + 8] << 7 | header[byteIndex + 9],\n flags = header[byteIndex + 5],\n footerPresent = (flags & 16) >> 4; // if we get a negative returnSize clamp it to 0\n\n returnSize = returnSize >= 0 ? returnSize : 0;\n\n if (footerPresent) {\n return returnSize + 20;\n }\n\n return returnSize + 10;\n };\n\n var getId3Offset = function getId3Offset(data, offset) {\n if (data.length - offset < 10 || data[offset] !== 'I'.charCodeAt(0) || data[offset + 1] !== 'D'.charCodeAt(0) || data[offset + 2] !== '3'.charCodeAt(0)) {\n return offset;\n }\n\n offset += parseId3TagSize(data, offset);\n return getId3Offset(data, offset);\n }; // TODO: use vhs-utils\n\n\n var isLikelyAacData = function isLikelyAacData(data) {\n var offset = getId3Offset(data, 0);\n return data.length >= offset + 2 && (data[offset] & 0xFF) === 0xFF && (data[offset + 1] & 0xF0) === 0xF0 && // verify that the 2 layer bits are 0, aka this\n // is not mp3 data but aac data.\n (data[offset + 1] & 0x16) === 0x10;\n };\n\n var parseSyncSafeInteger$1 = function parseSyncSafeInteger(data) {\n return data[0] << 21 | data[1] << 14 | data[2] << 7 | data[3];\n }; // return a percent-encoded representation of the specified byte range\n // @see http://en.wikipedia.org/wiki/Percent-encoding\n\n\n var percentEncode$1 = function percentEncode(bytes, start, end) {\n var i,\n result = '';\n\n for (i = start; i < end; i++) {\n result += '%' + ('00' + bytes[i].toString(16)).slice(-2);\n }\n\n return result;\n }; // return the string representation of the specified byte range,\n // interpreted as ISO-8859-1.\n\n\n var parseIso88591$1 = function parseIso88591(bytes, start, end) {\n return unescape(percentEncode$1(bytes, start, end)); // jshint ignore:line\n };\n\n var parseAdtsSize = function parseAdtsSize(header, byteIndex) {\n var lowThree = (header[byteIndex + 5] & 0xE0) >> 5,\n middle = header[byteIndex + 4] << 3,\n highTwo = header[byteIndex + 3] & 0x3 << 11;\n return highTwo | middle | lowThree;\n };\n\n var parseType$1 = function parseType(header, byteIndex) {\n if (header[byteIndex] === 'I'.charCodeAt(0) && header[byteIndex + 1] === 'D'.charCodeAt(0) && header[byteIndex + 2] === '3'.charCodeAt(0)) {\n return 'timed-metadata';\n } else if (header[byteIndex] & 0xff === 0xff && (header[byteIndex + 1] & 0xf0) === 0xf0) {\n return 'audio';\n }\n\n return null;\n };\n\n var parseSampleRate = function parseSampleRate(packet) {\n var i = 0;\n\n while (i + 5 < packet.length) {\n if (packet[i] !== 0xFF || (packet[i + 1] & 0xF6) !== 0xF0) {\n // If a valid header was not found, jump one forward and attempt to\n // find a valid ADTS header starting at the next byte\n i++;\n continue;\n }\n\n return ADTS_SAMPLING_FREQUENCIES$1[(packet[i + 2] & 0x3c) >>> 2];\n }\n\n return null;\n };\n\n var parseAacTimestamp = function parseAacTimestamp(packet) {\n var frameStart, frameSize, frame, frameHeader; // find the start of the first frame and the end of the tag\n\n frameStart = 10;\n\n if (packet[5] & 0x40) {\n // advance the frame start past the extended header\n frameStart += 4; // header size field\n\n frameStart += parseSyncSafeInteger$1(packet.subarray(10, 14));\n } // parse one or more ID3 frames\n // http://id3.org/id3v2.3.0#ID3v2_frame_overview\n\n\n do {\n // determine the number of bytes in this frame\n frameSize = parseSyncSafeInteger$1(packet.subarray(frameStart + 4, frameStart + 8));\n\n if (frameSize < 1) {\n return null;\n }\n\n frameHeader = String.fromCharCode(packet[frameStart], packet[frameStart + 1], packet[frameStart + 2], packet[frameStart + 3]);\n\n if (frameHeader === 'PRIV') {\n frame = packet.subarray(frameStart + 10, frameStart + frameSize + 10);\n\n for (var i = 0; i < frame.byteLength; i++) {\n if (frame[i] === 0) {\n var owner = parseIso88591$1(frame, 0, i);\n\n if (owner === 'com.apple.streaming.transportStreamTimestamp') {\n var d = frame.subarray(i + 1);\n var size = (d[3] & 0x01) << 30 | d[4] << 22 | d[5] << 14 | d[6] << 6 | d[7] >>> 2;\n size *= 4;\n size += d[7] & 0x03;\n return size;\n }\n\n break;\n }\n }\n }\n\n frameStart += 10; // advance past the frame header\n\n frameStart += frameSize; // advance past the frame body\n } while (frameStart < packet.byteLength);\n\n return null;\n };\n\n var utils = {\n isLikelyAacData: isLikelyAacData,\n parseId3TagSize: parseId3TagSize,\n parseAdtsSize: parseAdtsSize,\n parseType: parseType$1,\n parseSampleRate: parseSampleRate,\n parseAacTimestamp: parseAacTimestamp\n };\n\n var _AacStream;\n /**\n * Splits an incoming stream of binary data into ADTS and ID3 Frames.\n */\n\n\n _AacStream = function AacStream() {\n var everything = new Uint8Array(),\n timeStamp = 0;\n\n _AacStream.prototype.init.call(this);\n\n this.setTimestamp = function (timestamp) {\n timeStamp = timestamp;\n };\n\n this.push = function (bytes) {\n var frameSize = 0,\n byteIndex = 0,\n bytesLeft,\n chunk,\n packet,\n tempLength; // If there are bytes remaining from the last segment, prepend them to the\n // bytes that were pushed in\n\n if (everything.length) {\n tempLength = everything.length;\n everything = new Uint8Array(bytes.byteLength + tempLength);\n everything.set(everything.subarray(0, tempLength));\n everything.set(bytes, tempLength);\n } else {\n everything = bytes;\n }\n\n while (everything.length - byteIndex >= 3) {\n if (everything[byteIndex] === 'I'.charCodeAt(0) && everything[byteIndex + 1] === 'D'.charCodeAt(0) && everything[byteIndex + 2] === '3'.charCodeAt(0)) {\n // Exit early because we don't have enough to parse\n // the ID3 tag header\n if (everything.length - byteIndex < 10) {\n break;\n } // check framesize\n\n\n frameSize = utils.parseId3TagSize(everything, byteIndex); // Exit early if we don't have enough in the buffer\n // to emit a full packet\n // Add to byteIndex to support multiple ID3 tags in sequence\n\n if (byteIndex + frameSize > everything.length) {\n break;\n }\n\n chunk = {\n type: 'timed-metadata',\n data: everything.subarray(byteIndex, byteIndex + frameSize)\n };\n this.trigger('data', chunk);\n byteIndex += frameSize;\n continue;\n } else if ((everything[byteIndex] & 0xff) === 0xff && (everything[byteIndex + 1] & 0xf0) === 0xf0) {\n // Exit early because we don't have enough to parse\n // the ADTS frame header\n if (everything.length - byteIndex < 7) {\n break;\n }\n\n frameSize = utils.parseAdtsSize(everything, byteIndex); // Exit early if we don't have enough in the buffer\n // to emit a full packet\n\n if (byteIndex + frameSize > everything.length) {\n break;\n }\n\n packet = {\n type: 'audio',\n data: everything.subarray(byteIndex, byteIndex + frameSize),\n pts: timeStamp,\n dts: timeStamp\n };\n this.trigger('data', packet);\n byteIndex += frameSize;\n continue;\n }\n\n byteIndex++;\n }\n\n bytesLeft = everything.length - byteIndex;\n\n if (bytesLeft > 0) {\n everything = everything.subarray(byteIndex);\n } else {\n everything = new Uint8Array();\n }\n };\n\n this.reset = function () {\n everything = new Uint8Array();\n this.trigger('reset');\n };\n\n this.endTimeline = function () {\n everything = new Uint8Array();\n this.trigger('endedtimeline');\n };\n };\n\n _AacStream.prototype = new stream();\n var aac = _AacStream;\n\n // constants\n var AUDIO_PROPERTIES = ['audioobjecttype', 'channelcount', 'samplerate', 'samplingfrequencyindex', 'samplesize'];\n var audioProperties = AUDIO_PROPERTIES;\n\n var VIDEO_PROPERTIES = ['width', 'height', 'profileIdc', 'levelIdc', 'profileCompatibility', 'sarRatio'];\n var videoProperties = VIDEO_PROPERTIES;\n\n var H264Stream = h264.H264Stream;\n var isLikelyAacData$1 = utils.isLikelyAacData;\n var ONE_SECOND_IN_TS$3 = clock.ONE_SECOND_IN_TS; // object types\n\n var _VideoSegmentStream, _AudioSegmentStream, _Transmuxer, _CoalesceStream;\n /**\n * Compare two arrays (even typed) for same-ness\n */\n\n\n var arrayEquals = function arrayEquals(a, b) {\n var i;\n\n if (a.length !== b.length) {\n return false;\n } // compare the value of each element in the array\n\n\n for (i = 0; i < a.length; i++) {\n if (a[i] !== b[i]) {\n return false;\n }\n }\n\n return true;\n };\n\n var generateSegmentTimingInfo = function generateSegmentTimingInfo(baseMediaDecodeTime, startDts, startPts, endDts, endPts, prependedContentDuration) {\n var ptsOffsetFromDts = startPts - startDts,\n decodeDuration = endDts - startDts,\n presentationDuration = endPts - startPts; // The PTS and DTS values are based on the actual stream times from the segment,\n // however, the player time values will reflect a start from the baseMediaDecodeTime.\n // In order to provide relevant values for the player times, base timing info on the\n // baseMediaDecodeTime and the DTS and PTS durations of the segment.\n\n return {\n start: {\n dts: baseMediaDecodeTime,\n pts: baseMediaDecodeTime + ptsOffsetFromDts\n },\n end: {\n dts: baseMediaDecodeTime + decodeDuration,\n pts: baseMediaDecodeTime + presentationDuration\n },\n prependedContentDuration: prependedContentDuration,\n baseMediaDecodeTime: baseMediaDecodeTime\n };\n };\n /**\n * Constructs a single-track, ISO BMFF media segment from AAC data\n * events. The output of this stream can be fed to a SourceBuffer\n * configured with a suitable initialization segment.\n * @param track {object} track metadata configuration\n * @param options {object} transmuxer options object\n * @param options.keepOriginalTimestamps {boolean} If true, keep the timestamps\n * in the source; false to adjust the first segment to start at 0.\n */\n\n\n _AudioSegmentStream = function AudioSegmentStream(track, options) {\n var adtsFrames = [],\n sequenceNumber = 0,\n earliestAllowedDts = 0,\n audioAppendStartTs = 0,\n videoBaseMediaDecodeTime = Infinity;\n options = options || {};\n\n _AudioSegmentStream.prototype.init.call(this);\n\n this.push = function (data) {\n trackDecodeInfo.collectDtsInfo(track, data);\n\n if (track) {\n audioProperties.forEach(function (prop) {\n track[prop] = data[prop];\n });\n } // buffer audio data until end() is called\n\n\n adtsFrames.push(data);\n };\n\n this.setEarliestDts = function (earliestDts) {\n earliestAllowedDts = earliestDts;\n };\n\n this.setVideoBaseMediaDecodeTime = function (baseMediaDecodeTime) {\n videoBaseMediaDecodeTime = baseMediaDecodeTime;\n };\n\n this.setAudioAppendStart = function (timestamp) {\n audioAppendStartTs = timestamp;\n };\n\n this.flush = function () {\n var frames, moof, mdat, boxes, frameDuration, segmentDuration, videoClockCyclesOfSilencePrefixed; // return early if no audio data has been observed\n\n if (adtsFrames.length === 0) {\n this.trigger('done', 'AudioSegmentStream');\n return;\n }\n\n frames = audioFrameUtils.trimAdtsFramesByEarliestDts(adtsFrames, track, earliestAllowedDts);\n track.baseMediaDecodeTime = trackDecodeInfo.calculateTrackBaseMediaDecodeTime(track, options.keepOriginalTimestamps); // amount of audio filled but the value is in video clock rather than audio clock\n\n videoClockCyclesOfSilencePrefixed = audioFrameUtils.prefixWithSilence(track, frames, audioAppendStartTs, videoBaseMediaDecodeTime); // we have to build the index from byte locations to\n // samples (that is, adts frames) in the audio data\n\n track.samples = audioFrameUtils.generateSampleTable(frames); // concatenate the audio data to constuct the mdat\n\n mdat = mp4Generator.mdat(audioFrameUtils.concatenateFrameData(frames));\n adtsFrames = [];\n moof = mp4Generator.moof(sequenceNumber, [track]);\n boxes = new Uint8Array(moof.byteLength + mdat.byteLength); // bump the sequence number for next time\n\n sequenceNumber++;\n boxes.set(moof);\n boxes.set(mdat, moof.byteLength);\n trackDecodeInfo.clearDtsInfo(track);\n frameDuration = Math.ceil(ONE_SECOND_IN_TS$3 * 1024 / track.samplerate); // TODO this check was added to maintain backwards compatibility (particularly with\n // tests) on adding the timingInfo event. However, it seems unlikely that there's a\n // valid use-case where an init segment/data should be triggered without associated\n // frames. Leaving for now, but should be looked into.\n\n if (frames.length) {\n segmentDuration = frames.length * frameDuration;\n this.trigger('segmentTimingInfo', generateSegmentTimingInfo( // The audio track's baseMediaDecodeTime is in audio clock cycles, but the\n // frame info is in video clock cycles. Convert to match expectation of\n // listeners (that all timestamps will be based on video clock cycles).\n clock.audioTsToVideoTs(track.baseMediaDecodeTime, track.samplerate), // frame times are already in video clock, as is segment duration\n frames[0].dts, frames[0].pts, frames[0].dts + segmentDuration, frames[0].pts + segmentDuration, videoClockCyclesOfSilencePrefixed || 0));\n this.trigger('timingInfo', {\n start: frames[0].pts,\n end: frames[0].pts + segmentDuration\n });\n }\n\n this.trigger('data', {\n track: track,\n boxes: boxes\n });\n this.trigger('done', 'AudioSegmentStream');\n };\n\n this.reset = function () {\n trackDecodeInfo.clearDtsInfo(track);\n adtsFrames = [];\n this.trigger('reset');\n };\n };\n\n _AudioSegmentStream.prototype = new stream();\n /**\n * Constructs a single-track, ISO BMFF media segment from H264 data\n * events. The output of this stream can be fed to a SourceBuffer\n * configured with a suitable initialization segment.\n * @param track {object} track metadata configuration\n * @param options {object} transmuxer options object\n * @param options.alignGopsAtEnd {boolean} If true, start from the end of the\n * gopsToAlignWith list when attempting to align gop pts\n * @param options.keepOriginalTimestamps {boolean} If true, keep the timestamps\n * in the source; false to adjust the first segment to start at 0.\n */\n\n _VideoSegmentStream = function VideoSegmentStream(track, options) {\n var sequenceNumber = 0,\n nalUnits = [],\n gopsToAlignWith = [],\n config,\n pps;\n options = options || {};\n\n _VideoSegmentStream.prototype.init.call(this);\n\n delete track.minPTS;\n this.gopCache_ = [];\n /**\n * Constructs a ISO BMFF segment given H264 nalUnits\n * @param {Object} nalUnit A data event representing a nalUnit\n * @param {String} nalUnit.nalUnitType\n * @param {Object} nalUnit.config Properties for a mp4 track\n * @param {Uint8Array} nalUnit.data The nalUnit bytes\n * @see lib/codecs/h264.js\n **/\n\n this.push = function (nalUnit) {\n trackDecodeInfo.collectDtsInfo(track, nalUnit); // record the track config\n\n if (nalUnit.nalUnitType === 'seq_parameter_set_rbsp' && !config) {\n config = nalUnit.config;\n track.sps = [nalUnit.data];\n videoProperties.forEach(function (prop) {\n track[prop] = config[prop];\n }, this);\n }\n\n if (nalUnit.nalUnitType === 'pic_parameter_set_rbsp' && !pps) {\n pps = nalUnit.data;\n track.pps = [nalUnit.data];\n } // buffer video until flush() is called\n\n\n nalUnits.push(nalUnit);\n };\n /**\n * Pass constructed ISO BMFF track and boxes on to the\n * next stream in the pipeline\n **/\n\n\n this.flush = function () {\n var frames,\n gopForFusion,\n gops,\n moof,\n mdat,\n boxes,\n prependedContentDuration = 0,\n firstGop,\n lastGop; // Throw away nalUnits at the start of the byte stream until\n // we find the first AUD\n\n while (nalUnits.length) {\n if (nalUnits[0].nalUnitType === 'access_unit_delimiter_rbsp') {\n break;\n }\n\n nalUnits.shift();\n } // Return early if no video data has been observed\n\n\n if (nalUnits.length === 0) {\n this.resetStream_();\n this.trigger('done', 'VideoSegmentStream');\n return;\n } // Organize the raw nal-units into arrays that represent\n // higher-level constructs such as frames and gops\n // (group-of-pictures)\n\n\n frames = frameUtils.groupNalsIntoFrames(nalUnits);\n gops = frameUtils.groupFramesIntoGops(frames); // If the first frame of this fragment is not a keyframe we have\n // a problem since MSE (on Chrome) requires a leading keyframe.\n //\n // We have two approaches to repairing this situation:\n // 1) GOP-FUSION:\n // This is where we keep track of the GOPS (group-of-pictures)\n // from previous fragments and attempt to find one that we can\n // prepend to the current fragment in order to create a valid\n // fragment.\n // 2) KEYFRAME-PULLING:\n // Here we search for the first keyframe in the fragment and\n // throw away all the frames between the start of the fragment\n // and that keyframe. We then extend the duration and pull the\n // PTS of the keyframe forward so that it covers the time range\n // of the frames that were disposed of.\n //\n // #1 is far prefereable over #2 which can cause \"stuttering\" but\n // requires more things to be just right.\n\n if (!gops[0][0].keyFrame) {\n // Search for a gop for fusion from our gopCache\n gopForFusion = this.getGopForFusion_(nalUnits[0], track);\n\n if (gopForFusion) {\n // in order to provide more accurate timing information about the segment, save\n // the number of seconds prepended to the original segment due to GOP fusion\n prependedContentDuration = gopForFusion.duration;\n gops.unshift(gopForFusion); // Adjust Gops' metadata to account for the inclusion of the\n // new gop at the beginning\n\n gops.byteLength += gopForFusion.byteLength;\n gops.nalCount += gopForFusion.nalCount;\n gops.pts = gopForFusion.pts;\n gops.dts = gopForFusion.dts;\n gops.duration += gopForFusion.duration;\n } else {\n // If we didn't find a candidate gop fall back to keyframe-pulling\n gops = frameUtils.extendFirstKeyFrame(gops);\n }\n } // Trim gops to align with gopsToAlignWith\n\n\n if (gopsToAlignWith.length) {\n var alignedGops;\n\n if (options.alignGopsAtEnd) {\n alignedGops = this.alignGopsAtEnd_(gops);\n } else {\n alignedGops = this.alignGopsAtStart_(gops);\n }\n\n if (!alignedGops) {\n // save all the nals in the last GOP into the gop cache\n this.gopCache_.unshift({\n gop: gops.pop(),\n pps: track.pps,\n sps: track.sps\n }); // Keep a maximum of 6 GOPs in the cache\n\n this.gopCache_.length = Math.min(6, this.gopCache_.length); // Clear nalUnits\n\n nalUnits = []; // return early no gops can be aligned with desired gopsToAlignWith\n\n this.resetStream_();\n this.trigger('done', 'VideoSegmentStream');\n return;\n } // Some gops were trimmed. clear dts info so minSegmentDts and pts are correct\n // when recalculated before sending off to CoalesceStream\n\n\n trackDecodeInfo.clearDtsInfo(track);\n gops = alignedGops;\n }\n\n trackDecodeInfo.collectDtsInfo(track, gops); // First, we have to build the index from byte locations to\n // samples (that is, frames) in the video data\n\n track.samples = frameUtils.generateSampleTable(gops); // Concatenate the video data and construct the mdat\n\n mdat = mp4Generator.mdat(frameUtils.concatenateNalData(gops));\n track.baseMediaDecodeTime = trackDecodeInfo.calculateTrackBaseMediaDecodeTime(track, options.keepOriginalTimestamps);\n this.trigger('processedGopsInfo', gops.map(function (gop) {\n return {\n pts: gop.pts,\n dts: gop.dts,\n byteLength: gop.byteLength\n };\n }));\n firstGop = gops[0];\n lastGop = gops[gops.length - 1];\n this.trigger('segmentTimingInfo', generateSegmentTimingInfo(track.baseMediaDecodeTime, firstGop.dts, firstGop.pts, lastGop.dts + lastGop.duration, lastGop.pts + lastGop.duration, prependedContentDuration));\n this.trigger('timingInfo', {\n start: gops[0].pts,\n end: gops[gops.length - 1].pts + gops[gops.length - 1].duration\n }); // save all the nals in the last GOP into the gop cache\n\n this.gopCache_.unshift({\n gop: gops.pop(),\n pps: track.pps,\n sps: track.sps\n }); // Keep a maximum of 6 GOPs in the cache\n\n this.gopCache_.length = Math.min(6, this.gopCache_.length); // Clear nalUnits\n\n nalUnits = [];\n this.trigger('baseMediaDecodeTime', track.baseMediaDecodeTime);\n this.trigger('timelineStartInfo', track.timelineStartInfo);\n moof = mp4Generator.moof(sequenceNumber, [track]); // it would be great to allocate this array up front instead of\n // throwing away hundreds of media segment fragments\n\n boxes = new Uint8Array(moof.byteLength + mdat.byteLength); // Bump the sequence number for next time\n\n sequenceNumber++;\n boxes.set(moof);\n boxes.set(mdat, moof.byteLength);\n this.trigger('data', {\n track: track,\n boxes: boxes\n });\n this.resetStream_(); // Continue with the flush process now\n\n this.trigger('done', 'VideoSegmentStream');\n };\n\n this.reset = function () {\n this.resetStream_();\n nalUnits = [];\n this.gopCache_.length = 0;\n gopsToAlignWith.length = 0;\n this.trigger('reset');\n };\n\n this.resetStream_ = function () {\n trackDecodeInfo.clearDtsInfo(track); // reset config and pps because they may differ across segments\n // for instance, when we are rendition switching\n\n config = undefined;\n pps = undefined;\n }; // Search for a candidate Gop for gop-fusion from the gop cache and\n // return it or return null if no good candidate was found\n\n\n this.getGopForFusion_ = function (nalUnit) {\n var halfSecond = 45000,\n // Half-a-second in a 90khz clock\n allowableOverlap = 10000,\n // About 3 frames @ 30fps\n nearestDistance = Infinity,\n dtsDistance,\n nearestGopObj,\n currentGop,\n currentGopObj,\n i; // Search for the GOP nearest to the beginning of this nal unit\n\n for (i = 0; i < this.gopCache_.length; i++) {\n currentGopObj = this.gopCache_[i];\n currentGop = currentGopObj.gop; // Reject Gops with different SPS or PPS\n\n if (!(track.pps && arrayEquals(track.pps[0], currentGopObj.pps[0])) || !(track.sps && arrayEquals(track.sps[0], currentGopObj.sps[0]))) {\n continue;\n } // Reject Gops that would require a negative baseMediaDecodeTime\n\n\n if (currentGop.dts < track.timelineStartInfo.dts) {\n continue;\n } // The distance between the end of the gop and the start of the nalUnit\n\n\n dtsDistance = nalUnit.dts - currentGop.dts - currentGop.duration; // Only consider GOPS that start before the nal unit and end within\n // a half-second of the nal unit\n\n if (dtsDistance >= -allowableOverlap && dtsDistance <= halfSecond) {\n // Always use the closest GOP we found if there is more than\n // one candidate\n if (!nearestGopObj || nearestDistance > dtsDistance) {\n nearestGopObj = currentGopObj;\n nearestDistance = dtsDistance;\n }\n }\n }\n\n if (nearestGopObj) {\n return nearestGopObj.gop;\n }\n\n return null;\n }; // trim gop list to the first gop found that has a matching pts with a gop in the list\n // of gopsToAlignWith starting from the START of the list\n\n\n this.alignGopsAtStart_ = function (gops) {\n var alignIndex, gopIndex, align, gop, byteLength, nalCount, duration, alignedGops;\n byteLength = gops.byteLength;\n nalCount = gops.nalCount;\n duration = gops.duration;\n alignIndex = gopIndex = 0;\n\n while (alignIndex < gopsToAlignWith.length && gopIndex < gops.length) {\n align = gopsToAlignWith[alignIndex];\n gop = gops[gopIndex];\n\n if (align.pts === gop.pts) {\n break;\n }\n\n if (gop.pts > align.pts) {\n // this current gop starts after the current gop we want to align on, so increment\n // align index\n alignIndex++;\n continue;\n } // current gop starts before the current gop we want to align on. so increment gop\n // index\n\n\n gopIndex++;\n byteLength -= gop.byteLength;\n nalCount -= gop.nalCount;\n duration -= gop.duration;\n }\n\n if (gopIndex === 0) {\n // no gops to trim\n return gops;\n }\n\n if (gopIndex === gops.length) {\n // all gops trimmed, skip appending all gops\n return null;\n }\n\n alignedGops = gops.slice(gopIndex);\n alignedGops.byteLength = byteLength;\n alignedGops.duration = duration;\n alignedGops.nalCount = nalCount;\n alignedGops.pts = alignedGops[0].pts;\n alignedGops.dts = alignedGops[0].dts;\n return alignedGops;\n }; // trim gop list to the first gop found that has a matching pts with a gop in the list\n // of gopsToAlignWith starting from the END of the list\n\n\n this.alignGopsAtEnd_ = function (gops) {\n var alignIndex, gopIndex, align, gop, alignEndIndex, matchFound;\n alignIndex = gopsToAlignWith.length - 1;\n gopIndex = gops.length - 1;\n alignEndIndex = null;\n matchFound = false;\n\n while (alignIndex >= 0 && gopIndex >= 0) {\n align = gopsToAlignWith[alignIndex];\n gop = gops[gopIndex];\n\n if (align.pts === gop.pts) {\n matchFound = true;\n break;\n }\n\n if (align.pts > gop.pts) {\n alignIndex--;\n continue;\n }\n\n if (alignIndex === gopsToAlignWith.length - 1) {\n // gop.pts is greater than the last alignment candidate. If no match is found\n // by the end of this loop, we still want to append gops that come after this\n // point\n alignEndIndex = gopIndex;\n }\n\n gopIndex--;\n }\n\n if (!matchFound && alignEndIndex === null) {\n return null;\n }\n\n var trimIndex;\n\n if (matchFound) {\n trimIndex = gopIndex;\n } else {\n trimIndex = alignEndIndex;\n }\n\n if (trimIndex === 0) {\n return gops;\n }\n\n var alignedGops = gops.slice(trimIndex);\n var metadata = alignedGops.reduce(function (total, gop) {\n total.byteLength += gop.byteLength;\n total.duration += gop.duration;\n total.nalCount += gop.nalCount;\n return total;\n }, {\n byteLength: 0,\n duration: 0,\n nalCount: 0\n });\n alignedGops.byteLength = metadata.byteLength;\n alignedGops.duration = metadata.duration;\n alignedGops.nalCount = metadata.nalCount;\n alignedGops.pts = alignedGops[0].pts;\n alignedGops.dts = alignedGops[0].dts;\n return alignedGops;\n };\n\n this.alignGopsWith = function (newGopsToAlignWith) {\n gopsToAlignWith = newGopsToAlignWith;\n };\n };\n\n _VideoSegmentStream.prototype = new stream();\n /**\n * A Stream that can combine multiple streams (ie. audio & video)\n * into a single output segment for MSE. Also supports audio-only\n * and video-only streams.\n * @param options {object} transmuxer options object\n * @param options.keepOriginalTimestamps {boolean} If true, keep the timestamps\n * in the source; false to adjust the first segment to start at media timeline start.\n */\n\n _CoalesceStream = function CoalesceStream(options, metadataStream) {\n // Number of Tracks per output segment\n // If greater than 1, we combine multiple\n // tracks into a single segment\n this.numberOfTracks = 0;\n this.metadataStream = metadataStream;\n options = options || {};\n\n if (typeof options.remux !== 'undefined') {\n this.remuxTracks = !!options.remux;\n } else {\n this.remuxTracks = true;\n }\n\n if (typeof options.keepOriginalTimestamps === 'boolean') {\n this.keepOriginalTimestamps = options.keepOriginalTimestamps;\n } else {\n this.keepOriginalTimestamps = false;\n }\n\n this.pendingTracks = [];\n this.videoTrack = null;\n this.pendingBoxes = [];\n this.pendingCaptions = [];\n this.pendingMetadata = [];\n this.pendingBytes = 0;\n this.emittedTracks = 0;\n\n _CoalesceStream.prototype.init.call(this); // Take output from multiple\n\n\n this.push = function (output) {\n // buffer incoming captions until the associated video segment\n // finishes\n if (output.text) {\n return this.pendingCaptions.push(output);\n } // buffer incoming id3 tags until the final flush\n\n\n if (output.frames) {\n return this.pendingMetadata.push(output);\n } // Add this track to the list of pending tracks and store\n // important information required for the construction of\n // the final segment\n\n\n this.pendingTracks.push(output.track);\n this.pendingBytes += output.boxes.byteLength; // TODO: is there an issue for this against chrome?\n // We unshift audio and push video because\n // as of Chrome 75 when switching from\n // one init segment to another if the video\n // mdat does not appear after the audio mdat\n // only audio will play for the duration of our transmux.\n\n if (output.track.type === 'video') {\n this.videoTrack = output.track;\n this.pendingBoxes.push(output.boxes);\n }\n\n if (output.track.type === 'audio') {\n this.audioTrack = output.track;\n this.pendingBoxes.unshift(output.boxes);\n }\n };\n };\n\n _CoalesceStream.prototype = new stream();\n\n _CoalesceStream.prototype.flush = function (flushSource) {\n var offset = 0,\n event = {\n captions: [],\n captionStreams: {},\n metadata: [],\n info: {}\n },\n caption,\n id3,\n initSegment,\n timelineStartPts = 0,\n i;\n\n if (this.pendingTracks.length < this.numberOfTracks) {\n if (flushSource !== 'VideoSegmentStream' && flushSource !== 'AudioSegmentStream') {\n // Return because we haven't received a flush from a data-generating\n // portion of the segment (meaning that we have only recieved meta-data\n // or captions.)\n return;\n } else if (this.remuxTracks) {\n // Return until we have enough tracks from the pipeline to remux (if we\n // are remuxing audio and video into a single MP4)\n return;\n } else if (this.pendingTracks.length === 0) {\n // In the case where we receive a flush without any data having been\n // received we consider it an emitted track for the purposes of coalescing\n // `done` events.\n // We do this for the case where there is an audio and video track in the\n // segment but no audio data. (seen in several playlists with alternate\n // audio tracks and no audio present in the main TS segments.)\n this.emittedTracks++;\n\n if (this.emittedTracks >= this.numberOfTracks) {\n this.trigger('done');\n this.emittedTracks = 0;\n }\n\n return;\n }\n }\n\n if (this.videoTrack) {\n timelineStartPts = this.videoTrack.timelineStartInfo.pts;\n videoProperties.forEach(function (prop) {\n event.info[prop] = this.videoTrack[prop];\n }, this);\n } else if (this.audioTrack) {\n timelineStartPts = this.audioTrack.timelineStartInfo.pts;\n audioProperties.forEach(function (prop) {\n event.info[prop] = this.audioTrack[prop];\n }, this);\n }\n\n if (this.videoTrack || this.audioTrack) {\n if (this.pendingTracks.length === 1) {\n event.type = this.pendingTracks[0].type;\n } else {\n event.type = 'combined';\n }\n\n this.emittedTracks += this.pendingTracks.length;\n initSegment = mp4Generator.initSegment(this.pendingTracks); // Create a new typed array to hold the init segment\n\n event.initSegment = new Uint8Array(initSegment.byteLength); // Create an init segment containing a moov\n // and track definitions\n\n event.initSegment.set(initSegment); // Create a new typed array to hold the moof+mdats\n\n event.data = new Uint8Array(this.pendingBytes); // Append each moof+mdat (one per track) together\n\n for (i = 0; i < this.pendingBoxes.length; i++) {\n event.data.set(this.pendingBoxes[i], offset);\n offset += this.pendingBoxes[i].byteLength;\n } // Translate caption PTS times into second offsets to match the\n // video timeline for the segment, and add track info\n\n\n for (i = 0; i < this.pendingCaptions.length; i++) {\n caption = this.pendingCaptions[i];\n caption.startTime = clock.metadataTsToSeconds(caption.startPts, timelineStartPts, this.keepOriginalTimestamps);\n caption.endTime = clock.metadataTsToSeconds(caption.endPts, timelineStartPts, this.keepOriginalTimestamps);\n event.captionStreams[caption.stream] = true;\n event.captions.push(caption);\n } // Translate ID3 frame PTS times into second offsets to match the\n // video timeline for the segment\n\n\n for (i = 0; i < this.pendingMetadata.length; i++) {\n id3 = this.pendingMetadata[i];\n id3.cueTime = clock.metadataTsToSeconds(id3.pts, timelineStartPts, this.keepOriginalTimestamps);\n event.metadata.push(id3);\n } // We add this to every single emitted segment even though we only need\n // it for the first\n\n\n event.metadata.dispatchType = this.metadataStream.dispatchType; // Reset stream state\n\n this.pendingTracks.length = 0;\n this.videoTrack = null;\n this.pendingBoxes.length = 0;\n this.pendingCaptions.length = 0;\n this.pendingBytes = 0;\n this.pendingMetadata.length = 0; // Emit the built segment\n // We include captions and ID3 tags for backwards compatibility,\n // ideally we should send only video and audio in the data event\n\n this.trigger('data', event); // Emit each caption to the outside world\n // Ideally, this would happen immediately on parsing captions,\n // but we need to ensure that video data is sent back first\n // so that caption timing can be adjusted to match video timing\n\n for (i = 0; i < event.captions.length; i++) {\n caption = event.captions[i];\n this.trigger('caption', caption);\n } // Emit each id3 tag to the outside world\n // Ideally, this would happen immediately on parsing the tag,\n // but we need to ensure that video data is sent back first\n // so that ID3 frame timing can be adjusted to match video timing\n\n\n for (i = 0; i < event.metadata.length; i++) {\n id3 = event.metadata[i];\n this.trigger('id3Frame', id3);\n }\n } // Only emit `done` if all tracks have been flushed and emitted\n\n\n if (this.emittedTracks >= this.numberOfTracks) {\n this.trigger('done');\n this.emittedTracks = 0;\n }\n };\n\n _CoalesceStream.prototype.setRemux = function (val) {\n this.remuxTracks = val;\n };\n /**\n * A Stream that expects MP2T binary data as input and produces\n * corresponding media segments, suitable for use with Media Source\n * Extension (MSE) implementations that support the ISO BMFF byte\n * stream format, like Chrome.\n */\n\n\n _Transmuxer = function Transmuxer(options) {\n var self = this,\n hasFlushed = true,\n videoTrack,\n audioTrack;\n\n _Transmuxer.prototype.init.call(this);\n\n options = options || {};\n this.baseMediaDecodeTime = options.baseMediaDecodeTime || 0;\n this.transmuxPipeline_ = {};\n\n this.setupAacPipeline = function () {\n var pipeline = {};\n this.transmuxPipeline_ = pipeline;\n pipeline.type = 'aac';\n pipeline.metadataStream = new m2ts_1.MetadataStream(); // set up the parsing pipeline\n\n pipeline.aacStream = new aac();\n pipeline.audioTimestampRolloverStream = new m2ts_1.TimestampRolloverStream('audio');\n pipeline.timedMetadataTimestampRolloverStream = new m2ts_1.TimestampRolloverStream('timed-metadata');\n pipeline.adtsStream = new adts();\n pipeline.coalesceStream = new _CoalesceStream(options, pipeline.metadataStream);\n pipeline.headOfPipeline = pipeline.aacStream;\n pipeline.aacStream.pipe(pipeline.audioTimestampRolloverStream).pipe(pipeline.adtsStream);\n pipeline.aacStream.pipe(pipeline.timedMetadataTimestampRolloverStream).pipe(pipeline.metadataStream).pipe(pipeline.coalesceStream);\n pipeline.metadataStream.on('timestamp', function (frame) {\n pipeline.aacStream.setTimestamp(frame.timeStamp);\n });\n pipeline.aacStream.on('data', function (data) {\n if (data.type !== 'timed-metadata' && data.type !== 'audio' || pipeline.audioSegmentStream) {\n return;\n }\n\n audioTrack = audioTrack || {\n timelineStartInfo: {\n baseMediaDecodeTime: self.baseMediaDecodeTime\n },\n codec: 'adts',\n type: 'audio'\n }; // hook up the audio segment stream to the first track with aac data\n\n pipeline.coalesceStream.numberOfTracks++;\n pipeline.audioSegmentStream = new _AudioSegmentStream(audioTrack, options);\n pipeline.audioSegmentStream.on('timingInfo', self.trigger.bind(self, 'audioTimingInfo')); // Set up the final part of the audio pipeline\n\n pipeline.adtsStream.pipe(pipeline.audioSegmentStream).pipe(pipeline.coalesceStream); // emit pmt info\n\n self.trigger('trackinfo', {\n hasAudio: !!audioTrack,\n hasVideo: !!videoTrack\n });\n }); // Re-emit any data coming from the coalesce stream to the outside world\n\n pipeline.coalesceStream.on('data', this.trigger.bind(this, 'data')); // Let the consumer know we have finished flushing the entire pipeline\n\n pipeline.coalesceStream.on('done', this.trigger.bind(this, 'done'));\n };\n\n this.setupTsPipeline = function () {\n var pipeline = {};\n this.transmuxPipeline_ = pipeline;\n pipeline.type = 'ts';\n pipeline.metadataStream = new m2ts_1.MetadataStream(); // set up the parsing pipeline\n\n pipeline.packetStream = new m2ts_1.TransportPacketStream();\n pipeline.parseStream = new m2ts_1.TransportParseStream();\n pipeline.elementaryStream = new m2ts_1.ElementaryStream();\n pipeline.timestampRolloverStream = new m2ts_1.TimestampRolloverStream();\n pipeline.adtsStream = new adts();\n pipeline.h264Stream = new H264Stream();\n pipeline.captionStream = new m2ts_1.CaptionStream(options);\n pipeline.coalesceStream = new _CoalesceStream(options, pipeline.metadataStream);\n pipeline.headOfPipeline = pipeline.packetStream; // disassemble MPEG2-TS packets into elementary streams\n\n pipeline.packetStream.pipe(pipeline.parseStream).pipe(pipeline.elementaryStream).pipe(pipeline.timestampRolloverStream); // !!THIS ORDER IS IMPORTANT!!\n // demux the streams\n\n pipeline.timestampRolloverStream.pipe(pipeline.h264Stream);\n pipeline.timestampRolloverStream.pipe(pipeline.adtsStream);\n pipeline.timestampRolloverStream.pipe(pipeline.metadataStream).pipe(pipeline.coalesceStream); // Hook up CEA-608/708 caption stream\n\n pipeline.h264Stream.pipe(pipeline.captionStream).pipe(pipeline.coalesceStream);\n pipeline.elementaryStream.on('data', function (data) {\n var i;\n\n if (data.type === 'metadata') {\n i = data.tracks.length; // scan the tracks listed in the metadata\n\n while (i--) {\n if (!videoTrack && data.tracks[i].type === 'video') {\n videoTrack = data.tracks[i];\n videoTrack.timelineStartInfo.baseMediaDecodeTime = self.baseMediaDecodeTime;\n } else if (!audioTrack && data.tracks[i].type === 'audio') {\n audioTrack = data.tracks[i];\n audioTrack.timelineStartInfo.baseMediaDecodeTime = self.baseMediaDecodeTime;\n }\n } // hook up the video segment stream to the first track with h264 data\n\n\n if (videoTrack && !pipeline.videoSegmentStream) {\n pipeline.coalesceStream.numberOfTracks++;\n pipeline.videoSegmentStream = new _VideoSegmentStream(videoTrack, options);\n pipeline.videoSegmentStream.on('timelineStartInfo', function (timelineStartInfo) {\n // When video emits timelineStartInfo data after a flush, we forward that\n // info to the AudioSegmentStream, if it exists, because video timeline\n // data takes precedence. Do not do this if keepOriginalTimestamps is set,\n // because this is a particularly subtle form of timestamp alteration.\n if (audioTrack && !options.keepOriginalTimestamps) {\n audioTrack.timelineStartInfo = timelineStartInfo; // On the first segment we trim AAC frames that exist before the\n // very earliest DTS we have seen in video because Chrome will\n // interpret any video track with a baseMediaDecodeTime that is\n // non-zero as a gap.\n\n pipeline.audioSegmentStream.setEarliestDts(timelineStartInfo.dts - self.baseMediaDecodeTime);\n }\n });\n pipeline.videoSegmentStream.on('processedGopsInfo', self.trigger.bind(self, 'gopInfo'));\n pipeline.videoSegmentStream.on('segmentTimingInfo', self.trigger.bind(self, 'videoSegmentTimingInfo'));\n pipeline.videoSegmentStream.on('baseMediaDecodeTime', function (baseMediaDecodeTime) {\n if (audioTrack) {\n pipeline.audioSegmentStream.setVideoBaseMediaDecodeTime(baseMediaDecodeTime);\n }\n });\n pipeline.videoSegmentStream.on('timingInfo', self.trigger.bind(self, 'videoTimingInfo')); // Set up the final part of the video pipeline\n\n pipeline.h264Stream.pipe(pipeline.videoSegmentStream).pipe(pipeline.coalesceStream);\n }\n\n if (audioTrack && !pipeline.audioSegmentStream) {\n // hook up the audio segment stream to the first track with aac data\n pipeline.coalesceStream.numberOfTracks++;\n pipeline.audioSegmentStream = new _AudioSegmentStream(audioTrack, options);\n pipeline.audioSegmentStream.on('timingInfo', self.trigger.bind(self, 'audioTimingInfo'));\n pipeline.audioSegmentStream.on('segmentTimingInfo', self.trigger.bind(self, 'audioSegmentTimingInfo')); // Set up the final part of the audio pipeline\n\n pipeline.adtsStream.pipe(pipeline.audioSegmentStream).pipe(pipeline.coalesceStream);\n } // emit pmt info\n\n\n self.trigger('trackinfo', {\n hasAudio: !!audioTrack,\n hasVideo: !!videoTrack\n });\n }\n }); // Re-emit any data coming from the coalesce stream to the outside world\n\n pipeline.coalesceStream.on('data', this.trigger.bind(this, 'data'));\n pipeline.coalesceStream.on('id3Frame', function (id3Frame) {\n id3Frame.dispatchType = pipeline.metadataStream.dispatchType;\n self.trigger('id3Frame', id3Frame);\n });\n pipeline.coalesceStream.on('caption', this.trigger.bind(this, 'caption')); // Let the consumer know we have finished flushing the entire pipeline\n\n pipeline.coalesceStream.on('done', this.trigger.bind(this, 'done'));\n }; // hook up the segment streams once track metadata is delivered\n\n\n this.setBaseMediaDecodeTime = function (baseMediaDecodeTime) {\n var pipeline = this.transmuxPipeline_;\n\n if (!options.keepOriginalTimestamps) {\n this.baseMediaDecodeTime = baseMediaDecodeTime;\n }\n\n if (audioTrack) {\n audioTrack.timelineStartInfo.dts = undefined;\n audioTrack.timelineStartInfo.pts = undefined;\n trackDecodeInfo.clearDtsInfo(audioTrack);\n\n if (pipeline.audioTimestampRolloverStream) {\n pipeline.audioTimestampRolloverStream.discontinuity();\n }\n }\n\n if (videoTrack) {\n if (pipeline.videoSegmentStream) {\n pipeline.videoSegmentStream.gopCache_ = [];\n }\n\n videoTrack.timelineStartInfo.dts = undefined;\n videoTrack.timelineStartInfo.pts = undefined;\n trackDecodeInfo.clearDtsInfo(videoTrack);\n pipeline.captionStream.reset();\n }\n\n if (pipeline.timestampRolloverStream) {\n pipeline.timestampRolloverStream.discontinuity();\n }\n };\n\n this.setAudioAppendStart = function (timestamp) {\n if (audioTrack) {\n this.transmuxPipeline_.audioSegmentStream.setAudioAppendStart(timestamp);\n }\n };\n\n this.setRemux = function (val) {\n var pipeline = this.transmuxPipeline_;\n options.remux = val;\n\n if (pipeline && pipeline.coalesceStream) {\n pipeline.coalesceStream.setRemux(val);\n }\n };\n\n this.alignGopsWith = function (gopsToAlignWith) {\n if (videoTrack && this.transmuxPipeline_.videoSegmentStream) {\n this.transmuxPipeline_.videoSegmentStream.alignGopsWith(gopsToAlignWith);\n }\n }; // feed incoming data to the front of the parsing pipeline\n\n\n this.push = function (data) {\n if (hasFlushed) {\n var isAac = isLikelyAacData$1(data);\n\n if (isAac && this.transmuxPipeline_.type !== 'aac') {\n this.setupAacPipeline();\n } else if (!isAac && this.transmuxPipeline_.type !== 'ts') {\n this.setupTsPipeline();\n }\n\n hasFlushed = false;\n }\n\n this.transmuxPipeline_.headOfPipeline.push(data);\n }; // flush any buffered data\n\n\n this.flush = function () {\n hasFlushed = true; // Start at the top of the pipeline and flush all pending work\n\n this.transmuxPipeline_.headOfPipeline.flush();\n };\n\n this.endTimeline = function () {\n this.transmuxPipeline_.headOfPipeline.endTimeline();\n };\n\n this.reset = function () {\n if (this.transmuxPipeline_.headOfPipeline) {\n this.transmuxPipeline_.headOfPipeline.reset();\n }\n }; // Caption data has to be reset when seeking outside buffered range\n\n\n this.resetCaptions = function () {\n if (this.transmuxPipeline_.captionStream) {\n this.transmuxPipeline_.captionStream.reset();\n }\n };\n };\n\n _Transmuxer.prototype = new stream();\n var transmuxer = {\n Transmuxer: _Transmuxer,\n VideoSegmentStream: _VideoSegmentStream,\n AudioSegmentStream: _AudioSegmentStream,\n AUDIO_PROPERTIES: audioProperties,\n VIDEO_PROPERTIES: videoProperties,\n // exported for testing\n generateSegmentTimingInfo: generateSegmentTimingInfo\n };\n\n var discardEmulationPreventionBytes$1 = captionPacketParser.discardEmulationPreventionBytes;\n var CaptionStream$1 = captionStream.CaptionStream;\n /**\n * Maps an offset in the mdat to a sample based on the the size of the samples.\n * Assumes that `parseSamples` has been called first.\n *\n * @param {Number} offset - The offset into the mdat\n * @param {Object[]} samples - An array of samples, parsed using `parseSamples`\n * @return {?Object} The matching sample, or null if no match was found.\n *\n * @see ISO-BMFF-12/2015, Section 8.8.8\n **/\n\n var mapToSample = function mapToSample(offset, samples) {\n var approximateOffset = offset;\n\n for (var i = 0; i < samples.length; i++) {\n var sample = samples[i];\n\n if (approximateOffset < sample.size) {\n return sample;\n }\n\n approximateOffset -= sample.size;\n }\n\n return null;\n };\n /**\n * Finds SEI nal units contained in a Media Data Box.\n * Assumes that `parseSamples` has been called first.\n *\n * @param {Uint8Array} avcStream - The bytes of the mdat\n * @param {Object[]} samples - The samples parsed out by `parseSamples`\n * @param {Number} trackId - The trackId of this video track\n * @return {Object[]} seiNals - the parsed SEI NALUs found.\n * The contents of the seiNal should match what is expected by\n * CaptionStream.push (nalUnitType, size, data, escapedRBSP, pts, dts)\n *\n * @see ISO-BMFF-12/2015, Section 8.1.1\n * @see Rec. ITU-T H.264, 7.3.2.3.1\n **/\n\n\n var findSeiNals = function findSeiNals(avcStream, samples, trackId) {\n var avcView = new DataView(avcStream.buffer, avcStream.byteOffset, avcStream.byteLength),\n result = [],\n seiNal,\n i,\n length,\n lastMatchedSample;\n\n for (i = 0; i + 4 < avcStream.length; i += length) {\n length = avcView.getUint32(i);\n i += 4; // Bail if this doesn't appear to be an H264 stream\n\n if (length <= 0) {\n continue;\n }\n\n switch (avcStream[i] & 0x1F) {\n case 0x06:\n var data = avcStream.subarray(i + 1, i + 1 + length);\n var matchingSample = mapToSample(i, samples);\n seiNal = {\n nalUnitType: 'sei_rbsp',\n size: length,\n data: data,\n escapedRBSP: discardEmulationPreventionBytes$1(data),\n trackId: trackId\n };\n\n if (matchingSample) {\n seiNal.pts = matchingSample.pts;\n seiNal.dts = matchingSample.dts;\n lastMatchedSample = matchingSample;\n } else if (lastMatchedSample) {\n // If a matching sample cannot be found, use the last\n // sample's values as they should be as close as possible\n seiNal.pts = lastMatchedSample.pts;\n seiNal.dts = lastMatchedSample.dts;\n } else {\n // eslint-disable-next-line no-console\n console.log(\"We've encountered a nal unit without data. See mux.js#233.\");\n break;\n }\n\n result.push(seiNal);\n break;\n }\n }\n\n return result;\n };\n /**\n * Parses sample information out of Track Run Boxes and calculates\n * the absolute presentation and decode timestamps of each sample.\n *\n * @param {Array} truns - The Trun Run boxes to be parsed\n * @param {Number} baseMediaDecodeTime - base media decode time from tfdt\n @see ISO-BMFF-12/2015, Section 8.8.12\n * @param {Object} tfhd - The parsed Track Fragment Header\n * @see inspect.parseTfhd\n * @return {Object[]} the parsed samples\n *\n * @see ISO-BMFF-12/2015, Section 8.8.8\n **/\n\n\n var parseSamples = function parseSamples(truns, baseMediaDecodeTime, tfhd) {\n var currentDts = baseMediaDecodeTime;\n var defaultSampleDuration = tfhd.defaultSampleDuration || 0;\n var defaultSampleSize = tfhd.defaultSampleSize || 0;\n var trackId = tfhd.trackId;\n var allSamples = [];\n truns.forEach(function (trun) {\n // Note: We currently do not parse the sample table as well\n // as the trun. It's possible some sources will require this.\n // moov > trak > mdia > minf > stbl\n var trackRun = parseTrun(trun);\n var samples = trackRun.samples;\n samples.forEach(function (sample) {\n if (sample.duration === undefined) {\n sample.duration = defaultSampleDuration;\n }\n\n if (sample.size === undefined) {\n sample.size = defaultSampleSize;\n }\n\n sample.trackId = trackId;\n sample.dts = currentDts;\n\n if (sample.compositionTimeOffset === undefined) {\n sample.compositionTimeOffset = 0;\n }\n\n sample.pts = currentDts + sample.compositionTimeOffset;\n currentDts += sample.duration;\n });\n allSamples = allSamples.concat(samples);\n });\n return allSamples;\n };\n /**\n * Parses out caption nals from an FMP4 segment's video tracks.\n *\n * @param {Uint8Array} segment - The bytes of a single segment\n * @param {Number} videoTrackId - The trackId of a video track in the segment\n * @return {Object.} A mapping of video trackId to\n * a list of seiNals found in that track\n **/\n\n\n var parseCaptionNals = function parseCaptionNals(segment, videoTrackId) {\n // To get the samples\n var trafs = findBox_1(segment, ['moof', 'traf']); // To get SEI NAL units\n\n var mdats = findBox_1(segment, ['mdat']);\n var captionNals = {};\n var mdatTrafPairs = []; // Pair up each traf with a mdat as moofs and mdats are in pairs\n\n mdats.forEach(function (mdat, index) {\n var matchingTraf = trafs[index];\n mdatTrafPairs.push({\n mdat: mdat,\n traf: matchingTraf\n });\n });\n mdatTrafPairs.forEach(function (pair) {\n var mdat = pair.mdat;\n var traf = pair.traf;\n var tfhd = findBox_1(traf, ['tfhd']); // Exactly 1 tfhd per traf\n\n var headerInfo = parseTfhd(tfhd[0]);\n var trackId = headerInfo.trackId;\n var tfdt = findBox_1(traf, ['tfdt']); // Either 0 or 1 tfdt per traf\n\n var baseMediaDecodeTime = tfdt.length > 0 ? parseTfdt(tfdt[0]).baseMediaDecodeTime : 0;\n var truns = findBox_1(traf, ['trun']);\n var samples;\n var seiNals; // Only parse video data for the chosen video track\n\n if (videoTrackId === trackId && truns.length > 0) {\n samples = parseSamples(truns, baseMediaDecodeTime, headerInfo);\n seiNals = findSeiNals(mdat, samples, trackId);\n\n if (!captionNals[trackId]) {\n captionNals[trackId] = [];\n }\n\n captionNals[trackId] = captionNals[trackId].concat(seiNals);\n }\n });\n return captionNals;\n };\n /**\n * Parses out inband captions from an MP4 container and returns\n * caption objects that can be used by WebVTT and the TextTrack API.\n * @see https://developer.mozilla.org/en-US/docs/Web/API/VTTCue\n * @see https://developer.mozilla.org/en-US/docs/Web/API/TextTrack\n * Assumes that `probe.getVideoTrackIds` and `probe.timescale` have been called first\n *\n * @param {Uint8Array} segment - The fmp4 segment containing embedded captions\n * @param {Number} trackId - The id of the video track to parse\n * @param {Number} timescale - The timescale for the video track from the init segment\n *\n * @return {?Object[]} parsedCaptions - A list of captions or null if no video tracks\n * @return {Number} parsedCaptions[].startTime - The time to show the caption in seconds\n * @return {Number} parsedCaptions[].endTime - The time to stop showing the caption in seconds\n * @return {String} parsedCaptions[].text - The visible content of the caption\n **/\n\n\n var parseEmbeddedCaptions = function parseEmbeddedCaptions(segment, trackId, timescale) {\n var seiNals; // the ISO-BMFF spec says that trackId can't be zero, but there's some broken content out there\n\n if (trackId === null) {\n return null;\n }\n\n seiNals = parseCaptionNals(segment, trackId);\n return {\n seiNals: seiNals[trackId],\n timescale: timescale\n };\n };\n /**\n * Converts SEI NALUs into captions that can be used by video.js\n **/\n\n\n var CaptionParser = function CaptionParser() {\n var isInitialized = false;\n var captionStream; // Stores segments seen before trackId and timescale are set\n\n var segmentCache; // Stores video track ID of the track being parsed\n\n var trackId; // Stores the timescale of the track being parsed\n\n var timescale; // Stores captions parsed so far\n\n var parsedCaptions; // Stores whether we are receiving partial data or not\n\n var parsingPartial;\n /**\n * A method to indicate whether a CaptionParser has been initalized\n * @returns {Boolean}\n **/\n\n this.isInitialized = function () {\n return isInitialized;\n };\n /**\n * Initializes the underlying CaptionStream, SEI NAL parsing\n * and management, and caption collection\n **/\n\n\n this.init = function (options) {\n captionStream = new CaptionStream$1();\n isInitialized = true;\n parsingPartial = options ? options.isPartial : false; // Collect dispatched captions\n\n captionStream.on('data', function (event) {\n // Convert to seconds in the source's timescale\n event.startTime = event.startPts / timescale;\n event.endTime = event.endPts / timescale;\n parsedCaptions.captions.push(event);\n parsedCaptions.captionStreams[event.stream] = true;\n });\n };\n /**\n * Determines if a new video track will be selected\n * or if the timescale changed\n * @return {Boolean}\n **/\n\n\n this.isNewInit = function (videoTrackIds, timescales) {\n if (videoTrackIds && videoTrackIds.length === 0 || timescales && typeof timescales === 'object' && Object.keys(timescales).length === 0) {\n return false;\n }\n\n return trackId !== videoTrackIds[0] || timescale !== timescales[trackId];\n };\n /**\n * Parses out SEI captions and interacts with underlying\n * CaptionStream to return dispatched captions\n *\n * @param {Uint8Array} segment - The fmp4 segment containing embedded captions\n * @param {Number[]} videoTrackIds - A list of video tracks found in the init segment\n * @param {Object.} timescales - The timescales found in the init segment\n * @see parseEmbeddedCaptions\n * @see m2ts/caption-stream.js\n **/\n\n\n this.parse = function (segment, videoTrackIds, timescales) {\n var parsedData;\n\n if (!this.isInitialized()) {\n return null; // This is not likely to be a video segment\n } else if (!videoTrackIds || !timescales) {\n return null;\n } else if (this.isNewInit(videoTrackIds, timescales)) {\n // Use the first video track only as there is no\n // mechanism to switch to other video tracks\n trackId = videoTrackIds[0];\n timescale = timescales[trackId]; // If an init segment has not been seen yet, hold onto segment\n // data until we have one.\n // the ISO-BMFF spec says that trackId can't be zero, but there's some broken content out there\n } else if (trackId === null || !timescale) {\n segmentCache.push(segment);\n return null;\n } // Now that a timescale and trackId is set, parse cached segments\n\n\n while (segmentCache.length > 0) {\n var cachedSegment = segmentCache.shift();\n this.parse(cachedSegment, videoTrackIds, timescales);\n }\n\n parsedData = parseEmbeddedCaptions(segment, trackId, timescale);\n\n if (parsedData === null || !parsedData.seiNals) {\n return null;\n }\n\n this.pushNals(parsedData.seiNals); // Force the parsed captions to be dispatched\n\n this.flushStream();\n return parsedCaptions;\n };\n /**\n * Pushes SEI NALUs onto CaptionStream\n * @param {Object[]} nals - A list of SEI nals parsed using `parseCaptionNals`\n * Assumes that `parseCaptionNals` has been called first\n * @see m2ts/caption-stream.js\n **/\n\n\n this.pushNals = function (nals) {\n if (!this.isInitialized() || !nals || nals.length === 0) {\n return null;\n }\n\n nals.forEach(function (nal) {\n captionStream.push(nal);\n });\n };\n /**\n * Flushes underlying CaptionStream to dispatch processed, displayable captions\n * @see m2ts/caption-stream.js\n **/\n\n\n this.flushStream = function () {\n if (!this.isInitialized()) {\n return null;\n }\n\n if (!parsingPartial) {\n captionStream.flush();\n } else {\n captionStream.partialFlush();\n }\n };\n /**\n * Reset caption buckets for new data\n **/\n\n\n this.clearParsedCaptions = function () {\n parsedCaptions.captions = [];\n parsedCaptions.captionStreams = {};\n };\n /**\n * Resets underlying CaptionStream\n * @see m2ts/caption-stream.js\n **/\n\n\n this.resetCaptionStream = function () {\n if (!this.isInitialized()) {\n return null;\n }\n\n captionStream.reset();\n };\n /**\n * Convenience method to clear all captions flushed from the\n * CaptionStream and still being parsed\n * @see m2ts/caption-stream.js\n **/\n\n\n this.clearAllCaptions = function () {\n this.clearParsedCaptions();\n this.resetCaptionStream();\n };\n /**\n * Reset caption parser\n **/\n\n\n this.reset = function () {\n segmentCache = [];\n trackId = null;\n timescale = null;\n\n if (!parsedCaptions) {\n parsedCaptions = {\n captions: [],\n // CC1, CC2, CC3, CC4\n captionStreams: {}\n };\n } else {\n this.clearParsedCaptions();\n }\n\n this.resetCaptionStream();\n };\n\n this.reset();\n };\n\n var captionParser = CaptionParser;\n\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n */\n\n var mp4 = {\n generator: mp4Generator,\n probe: probe,\n Transmuxer: transmuxer.Transmuxer,\n AudioSegmentStream: transmuxer.AudioSegmentStream,\n VideoSegmentStream: transmuxer.VideoSegmentStream,\n CaptionParser: captionParser\n };\n\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n *\n * An object that stores the bytes of an FLV tag and methods for\n * querying and manipulating that data.\n * @see http://download.macromedia.com/f4v/video_file_format_spec_v10_1.pdf\n */\n\n var _FlvTag; // (type:uint, extraData:Boolean = false) extends ByteArray\n\n\n _FlvTag = function FlvTag(type, extraData) {\n var // Counter if this is a metadata tag, nal start marker if this is a video\n // tag. unused if this is an audio tag\n adHoc = 0,\n // :uint\n // The default size is 16kb but this is not enough to hold iframe\n // data and the resizing algorithm costs a bit so we create a larger\n // starting buffer for video tags\n bufferStartSize = 16384,\n // checks whether the FLV tag has enough capacity to accept the proposed\n // write and re-allocates the internal buffers if necessary\n prepareWrite = function prepareWrite(flv, count) {\n var bytes,\n minLength = flv.position + count;\n\n if (minLength < flv.bytes.byteLength) {\n // there's enough capacity so do nothing\n return;\n } // allocate a new buffer and copy over the data that will not be modified\n\n\n bytes = new Uint8Array(minLength * 2);\n bytes.set(flv.bytes.subarray(0, flv.position), 0);\n flv.bytes = bytes;\n flv.view = new DataView(flv.bytes.buffer);\n },\n // commonly used metadata properties\n widthBytes = _FlvTag.widthBytes || new Uint8Array('width'.length),\n heightBytes = _FlvTag.heightBytes || new Uint8Array('height'.length),\n videocodecidBytes = _FlvTag.videocodecidBytes || new Uint8Array('videocodecid'.length),\n i;\n\n if (!_FlvTag.widthBytes) {\n // calculating the bytes of common metadata names ahead of time makes the\n // corresponding writes faster because we don't have to loop over the\n // characters\n // re-test with test/perf.html if you're planning on changing this\n for (i = 0; i < 'width'.length; i++) {\n widthBytes[i] = 'width'.charCodeAt(i);\n }\n\n for (i = 0; i < 'height'.length; i++) {\n heightBytes[i] = 'height'.charCodeAt(i);\n }\n\n for (i = 0; i < 'videocodecid'.length; i++) {\n videocodecidBytes[i] = 'videocodecid'.charCodeAt(i);\n }\n\n _FlvTag.widthBytes = widthBytes;\n _FlvTag.heightBytes = heightBytes;\n _FlvTag.videocodecidBytes = videocodecidBytes;\n }\n\n this.keyFrame = false; // :Boolean\n\n switch (type) {\n case _FlvTag.VIDEO_TAG:\n this.length = 16; // Start the buffer at 256k\n\n bufferStartSize *= 6;\n break;\n\n case _FlvTag.AUDIO_TAG:\n this.length = 13;\n this.keyFrame = true;\n break;\n\n case _FlvTag.METADATA_TAG:\n this.length = 29;\n this.keyFrame = true;\n break;\n\n default:\n throw new Error('Unknown FLV tag type');\n }\n\n this.bytes = new Uint8Array(bufferStartSize);\n this.view = new DataView(this.bytes.buffer);\n this.bytes[0] = type;\n this.position = this.length;\n this.keyFrame = extraData; // Defaults to false\n // presentation timestamp\n\n this.pts = 0; // decoder timestamp\n\n this.dts = 0; // ByteArray#writeBytes(bytes:ByteArray, offset:uint = 0, length:uint = 0)\n\n this.writeBytes = function (bytes, offset, length) {\n var start = offset || 0,\n end;\n length = length || bytes.byteLength;\n end = start + length;\n prepareWrite(this, length);\n this.bytes.set(bytes.subarray(start, end), this.position);\n this.position += length;\n this.length = Math.max(this.length, this.position);\n }; // ByteArray#writeByte(value:int):void\n\n\n this.writeByte = function (byte) {\n prepareWrite(this, 1);\n this.bytes[this.position] = byte;\n this.position++;\n this.length = Math.max(this.length, this.position);\n }; // ByteArray#writeShort(value:int):void\n\n\n this.writeShort = function (short) {\n prepareWrite(this, 2);\n this.view.setUint16(this.position, short);\n this.position += 2;\n this.length = Math.max(this.length, this.position);\n }; // Negative index into array\n // (pos:uint):int\n\n\n this.negIndex = function (pos) {\n return this.bytes[this.length - pos];\n }; // The functions below ONLY work when this[0] == VIDEO_TAG.\n // We are not going to check for that because we dont want the overhead\n // (nal:ByteArray = null):int\n\n\n this.nalUnitSize = function () {\n if (adHoc === 0) {\n return 0;\n }\n\n return this.length - (adHoc + 4);\n };\n\n this.startNalUnit = function () {\n // remember position and add 4 bytes\n if (adHoc > 0) {\n throw new Error('Attempted to create new NAL wihout closing the old one');\n } // reserve 4 bytes for nal unit size\n\n\n adHoc = this.length;\n this.length += 4;\n this.position = this.length;\n }; // (nal:ByteArray = null):void\n\n\n this.endNalUnit = function (nalContainer) {\n var nalStart, // :uint\n nalLength; // :uint\n // Rewind to the marker and write the size\n\n if (this.length === adHoc + 4) {\n // we started a nal unit, but didnt write one, so roll back the 4 byte size value\n this.length -= 4;\n } else if (adHoc > 0) {\n nalStart = adHoc + 4;\n nalLength = this.length - nalStart;\n this.position = adHoc;\n this.view.setUint32(this.position, nalLength);\n this.position = this.length;\n\n if (nalContainer) {\n // Add the tag to the NAL unit\n nalContainer.push(this.bytes.subarray(nalStart, nalStart + nalLength));\n }\n }\n\n adHoc = 0;\n };\n /**\n * Write out a 64-bit floating point valued metadata property. This method is\n * called frequently during a typical parse and needs to be fast.\n */\n // (key:String, val:Number):void\n\n\n this.writeMetaDataDouble = function (key, val) {\n var i;\n prepareWrite(this, 2 + key.length + 9); // write size of property name\n\n this.view.setUint16(this.position, key.length);\n this.position += 2; // this next part looks terrible but it improves parser throughput by\n // 10kB/s in my testing\n // write property name\n\n if (key === 'width') {\n this.bytes.set(widthBytes, this.position);\n this.position += 5;\n } else if (key === 'height') {\n this.bytes.set(heightBytes, this.position);\n this.position += 6;\n } else if (key === 'videocodecid') {\n this.bytes.set(videocodecidBytes, this.position);\n this.position += 12;\n } else {\n for (i = 0; i < key.length; i++) {\n this.bytes[this.position] = key.charCodeAt(i);\n this.position++;\n }\n } // skip null byte\n\n\n this.position++; // write property value\n\n this.view.setFloat64(this.position, val);\n this.position += 8; // update flv tag length\n\n this.length = Math.max(this.length, this.position);\n ++adHoc;\n }; // (key:String, val:Boolean):void\n\n\n this.writeMetaDataBoolean = function (key, val) {\n var i;\n prepareWrite(this, 2);\n this.view.setUint16(this.position, key.length);\n this.position += 2;\n\n for (i = 0; i < key.length; i++) {\n // if key.charCodeAt(i) >= 255, handle error\n prepareWrite(this, 1);\n this.bytes[this.position] = key.charCodeAt(i);\n this.position++;\n }\n\n prepareWrite(this, 2);\n this.view.setUint8(this.position, 0x01);\n this.position++;\n this.view.setUint8(this.position, val ? 0x01 : 0x00);\n this.position++;\n this.length = Math.max(this.length, this.position);\n ++adHoc;\n }; // ():ByteArray\n\n\n this.finalize = function () {\n var dtsDelta, // :int\n len; // :int\n\n switch (this.bytes[0]) {\n // Video Data\n case _FlvTag.VIDEO_TAG:\n // We only support AVC, 1 = key frame (for AVC, a seekable\n // frame), 2 = inter frame (for AVC, a non-seekable frame)\n this.bytes[11] = (this.keyFrame || extraData ? 0x10 : 0x20) | 0x07;\n this.bytes[12] = extraData ? 0x00 : 0x01;\n dtsDelta = this.pts - this.dts;\n this.bytes[13] = (dtsDelta & 0x00FF0000) >>> 16;\n this.bytes[14] = (dtsDelta & 0x0000FF00) >>> 8;\n this.bytes[15] = (dtsDelta & 0x000000FF) >>> 0;\n break;\n\n case _FlvTag.AUDIO_TAG:\n this.bytes[11] = 0xAF; // 44 kHz, 16-bit stereo\n\n this.bytes[12] = extraData ? 0x00 : 0x01;\n break;\n\n case _FlvTag.METADATA_TAG:\n this.position = 11;\n this.view.setUint8(this.position, 0x02); // String type\n\n this.position++;\n this.view.setUint16(this.position, 0x0A); // 10 Bytes\n\n this.position += 2; // set \"onMetaData\"\n\n this.bytes.set([0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61], this.position);\n this.position += 10;\n this.bytes[this.position] = 0x08; // Array type\n\n this.position++;\n this.view.setUint32(this.position, adHoc);\n this.position = this.length;\n this.bytes.set([0, 0, 9], this.position);\n this.position += 3; // End Data Tag\n\n this.length = this.position;\n break;\n }\n\n len = this.length - 11; // write the DataSize field\n\n this.bytes[1] = (len & 0x00FF0000) >>> 16;\n this.bytes[2] = (len & 0x0000FF00) >>> 8;\n this.bytes[3] = (len & 0x000000FF) >>> 0; // write the Timestamp\n\n this.bytes[4] = (this.dts & 0x00FF0000) >>> 16;\n this.bytes[5] = (this.dts & 0x0000FF00) >>> 8;\n this.bytes[6] = (this.dts & 0x000000FF) >>> 0;\n this.bytes[7] = (this.dts & 0xFF000000) >>> 24; // write the StreamID\n\n this.bytes[8] = 0;\n this.bytes[9] = 0;\n this.bytes[10] = 0; // Sometimes we're at the end of the view and have one slot to write a\n // uint32, so, prepareWrite of count 4, since, view is uint8\n\n prepareWrite(this, 4);\n this.view.setUint32(this.length, this.length);\n this.length += 4;\n this.position += 4; // trim down the byte buffer to what is actually being used\n\n this.bytes = this.bytes.subarray(0, this.length);\n this.frameTime = _FlvTag.frameTime(this.bytes); // if bytes.bytelength isn't equal to this.length, handle error\n\n return this;\n };\n };\n\n _FlvTag.AUDIO_TAG = 0x08; // == 8, :uint\n\n _FlvTag.VIDEO_TAG = 0x09; // == 9, :uint\n\n _FlvTag.METADATA_TAG = 0x12; // == 18, :uint\n // (tag:ByteArray):Boolean {\n\n _FlvTag.isAudioFrame = function (tag) {\n return _FlvTag.AUDIO_TAG === tag[0];\n }; // (tag:ByteArray):Boolean {\n\n\n _FlvTag.isVideoFrame = function (tag) {\n return _FlvTag.VIDEO_TAG === tag[0];\n }; // (tag:ByteArray):Boolean {\n\n\n _FlvTag.isMetaData = function (tag) {\n return _FlvTag.METADATA_TAG === tag[0];\n }; // (tag:ByteArray):Boolean {\n\n\n _FlvTag.isKeyFrame = function (tag) {\n if (_FlvTag.isVideoFrame(tag)) {\n return tag[11] === 0x17;\n }\n\n if (_FlvTag.isAudioFrame(tag)) {\n return true;\n }\n\n if (_FlvTag.isMetaData(tag)) {\n return true;\n }\n\n return false;\n }; // (tag:ByteArray):uint {\n\n\n _FlvTag.frameTime = function (tag) {\n var pts = tag[4] << 16; // :uint\n\n pts |= tag[5] << 8;\n pts |= tag[6] << 0;\n pts |= tag[7] << 24;\n return pts;\n };\n\n var flvTag = _FlvTag;\n\n /**\n * The final stage of the transmuxer that emits the flv tags\n * for audio, video, and metadata. Also tranlates in time and\n * outputs caption data and id3 cues.\n */\n\n\n var CoalesceStream = function CoalesceStream(options) {\n // Number of Tracks per output segment\n // If greater than 1, we combine multiple\n // tracks into a single segment\n this.numberOfTracks = 0;\n this.metadataStream = options.metadataStream;\n this.videoTags = [];\n this.audioTags = [];\n this.videoTrack = null;\n this.audioTrack = null;\n this.pendingCaptions = [];\n this.pendingMetadata = [];\n this.pendingTracks = 0;\n this.processedTracks = 0;\n CoalesceStream.prototype.init.call(this); // Take output from multiple\n\n this.push = function (output) {\n // buffer incoming captions until the associated video segment\n // finishes\n if (output.text) {\n return this.pendingCaptions.push(output);\n } // buffer incoming id3 tags until the final flush\n\n\n if (output.frames) {\n return this.pendingMetadata.push(output);\n }\n\n if (output.track.type === 'video') {\n this.videoTrack = output.track;\n this.videoTags = output.tags;\n this.pendingTracks++;\n }\n\n if (output.track.type === 'audio') {\n this.audioTrack = output.track;\n this.audioTags = output.tags;\n this.pendingTracks++;\n }\n };\n };\n\n CoalesceStream.prototype = new stream();\n\n CoalesceStream.prototype.flush = function (flushSource) {\n var id3,\n caption,\n i,\n timelineStartPts,\n event = {\n tags: {},\n captions: [],\n captionStreams: {},\n metadata: []\n };\n\n if (this.pendingTracks < this.numberOfTracks) {\n if (flushSource !== 'VideoSegmentStream' && flushSource !== 'AudioSegmentStream') {\n // Return because we haven't received a flush from a data-generating\n // portion of the segment (meaning that we have only recieved meta-data\n // or captions.)\n return;\n } else if (this.pendingTracks === 0) {\n // In the case where we receive a flush without any data having been\n // received we consider it an emitted track for the purposes of coalescing\n // `done` events.\n // We do this for the case where there is an audio and video track in the\n // segment but no audio data. (seen in several playlists with alternate\n // audio tracks and no audio present in the main TS segments.)\n this.processedTracks++;\n\n if (this.processedTracks < this.numberOfTracks) {\n return;\n }\n }\n }\n\n this.processedTracks += this.pendingTracks;\n this.pendingTracks = 0;\n\n if (this.processedTracks < this.numberOfTracks) {\n return;\n }\n\n if (this.videoTrack) {\n timelineStartPts = this.videoTrack.timelineStartInfo.pts;\n } else if (this.audioTrack) {\n timelineStartPts = this.audioTrack.timelineStartInfo.pts;\n }\n\n event.tags.videoTags = this.videoTags;\n event.tags.audioTags = this.audioTags; // Translate caption PTS times into second offsets into the\n // video timeline for the segment, and add track info\n\n for (i = 0; i < this.pendingCaptions.length; i++) {\n caption = this.pendingCaptions[i];\n caption.startTime = caption.startPts - timelineStartPts;\n caption.startTime /= 90e3;\n caption.endTime = caption.endPts - timelineStartPts;\n caption.endTime /= 90e3;\n event.captionStreams[caption.stream] = true;\n event.captions.push(caption);\n } // Translate ID3 frame PTS times into second offsets into the\n // video timeline for the segment\n\n\n for (i = 0; i < this.pendingMetadata.length; i++) {\n id3 = this.pendingMetadata[i];\n id3.cueTime = id3.pts - timelineStartPts;\n id3.cueTime /= 90e3;\n event.metadata.push(id3);\n } // We add this to every single emitted segment even though we only need\n // it for the first\n\n\n event.metadata.dispatchType = this.metadataStream.dispatchType; // Reset stream state\n\n this.videoTrack = null;\n this.audioTrack = null;\n this.videoTags = [];\n this.audioTags = [];\n this.pendingCaptions.length = 0;\n this.pendingMetadata.length = 0;\n this.pendingTracks = 0;\n this.processedTracks = 0; // Emit the final segment\n\n this.trigger('data', event);\n this.trigger('done');\n };\n\n var coalesceStream = CoalesceStream;\n\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n */\n\n var TagList = function TagList() {\n var self = this;\n this.list = [];\n\n this.push = function (tag) {\n this.list.push({\n bytes: tag.bytes,\n dts: tag.dts,\n pts: tag.pts,\n keyFrame: tag.keyFrame,\n metaDataTag: tag.metaDataTag\n });\n };\n\n Object.defineProperty(this, 'length', {\n get: function get() {\n return self.list.length;\n }\n });\n };\n\n var tagList = TagList;\n\n var H264Stream$1 = h264.H264Stream;\n\n var _Transmuxer$1, _VideoSegmentStream$1, _AudioSegmentStream$1, collectTimelineInfo, metaDataTag, extraDataTag;\n /**\n * Store information about the start and end of the tracka and the\n * duration for each frame/sample we process in order to calculate\n * the baseMediaDecodeTime\n */\n\n\n collectTimelineInfo = function collectTimelineInfo(track, data) {\n if (typeof data.pts === 'number') {\n if (track.timelineStartInfo.pts === undefined) {\n track.timelineStartInfo.pts = data.pts;\n } else {\n track.timelineStartInfo.pts = Math.min(track.timelineStartInfo.pts, data.pts);\n }\n }\n\n if (typeof data.dts === 'number') {\n if (track.timelineStartInfo.dts === undefined) {\n track.timelineStartInfo.dts = data.dts;\n } else {\n track.timelineStartInfo.dts = Math.min(track.timelineStartInfo.dts, data.dts);\n }\n }\n };\n\n metaDataTag = function metaDataTag(track, pts) {\n var tag = new flvTag(flvTag.METADATA_TAG); // :FlvTag\n\n tag.dts = pts;\n tag.pts = pts;\n tag.writeMetaDataDouble('videocodecid', 7);\n tag.writeMetaDataDouble('width', track.width);\n tag.writeMetaDataDouble('height', track.height);\n return tag;\n };\n\n extraDataTag = function extraDataTag(track, pts) {\n var i,\n tag = new flvTag(flvTag.VIDEO_TAG, true);\n tag.dts = pts;\n tag.pts = pts;\n tag.writeByte(0x01); // version\n\n tag.writeByte(track.profileIdc); // profile\n\n tag.writeByte(track.profileCompatibility); // compatibility\n\n tag.writeByte(track.levelIdc); // level\n\n tag.writeByte(0xFC | 0x03); // reserved (6 bits), NULA length size - 1 (2 bits)\n\n tag.writeByte(0xE0 | 0x01); // reserved (3 bits), num of SPS (5 bits)\n\n tag.writeShort(track.sps[0].length); // data of SPS\n\n tag.writeBytes(track.sps[0]); // SPS\n\n tag.writeByte(track.pps.length); // num of PPS (will there ever be more that 1 PPS?)\n\n for (i = 0; i < track.pps.length; ++i) {\n tag.writeShort(track.pps[i].length); // 2 bytes for length of PPS\n\n tag.writeBytes(track.pps[i]); // data of PPS\n }\n\n return tag;\n };\n /**\n * Constructs a single-track, media segment from AAC data\n * events. The output of this stream can be fed to flash.\n */\n\n\n _AudioSegmentStream$1 = function AudioSegmentStream(track) {\n var adtsFrames = [],\n videoKeyFrames = [],\n oldExtraData;\n\n _AudioSegmentStream$1.prototype.init.call(this);\n\n this.push = function (data) {\n collectTimelineInfo(track, data);\n\n if (track) {\n track.audioobjecttype = data.audioobjecttype;\n track.channelcount = data.channelcount;\n track.samplerate = data.samplerate;\n track.samplingfrequencyindex = data.samplingfrequencyindex;\n track.samplesize = data.samplesize;\n track.extraData = track.audioobjecttype << 11 | track.samplingfrequencyindex << 7 | track.channelcount << 3;\n }\n\n data.pts = Math.round(data.pts / 90);\n data.dts = Math.round(data.dts / 90); // buffer audio data until end() is called\n\n adtsFrames.push(data);\n };\n\n this.flush = function () {\n var currentFrame,\n adtsFrame,\n lastMetaPts,\n tags = new tagList(); // return early if no audio data has been observed\n\n if (adtsFrames.length === 0) {\n this.trigger('done', 'AudioSegmentStream');\n return;\n }\n\n lastMetaPts = -Infinity;\n\n while (adtsFrames.length) {\n currentFrame = adtsFrames.shift(); // write out a metadata frame at every video key frame\n\n if (videoKeyFrames.length && currentFrame.pts >= videoKeyFrames[0]) {\n lastMetaPts = videoKeyFrames.shift();\n this.writeMetaDataTags(tags, lastMetaPts);\n } // also write out metadata tags every 1 second so that the decoder\n // is re-initialized quickly after seeking into a different\n // audio configuration.\n\n\n if (track.extraData !== oldExtraData || currentFrame.pts - lastMetaPts >= 1000) {\n this.writeMetaDataTags(tags, currentFrame.pts);\n oldExtraData = track.extraData;\n lastMetaPts = currentFrame.pts;\n }\n\n adtsFrame = new flvTag(flvTag.AUDIO_TAG);\n adtsFrame.pts = currentFrame.pts;\n adtsFrame.dts = currentFrame.dts;\n adtsFrame.writeBytes(currentFrame.data);\n tags.push(adtsFrame.finalize());\n }\n\n videoKeyFrames.length = 0;\n oldExtraData = null;\n this.trigger('data', {\n track: track,\n tags: tags.list\n });\n this.trigger('done', 'AudioSegmentStream');\n };\n\n this.writeMetaDataTags = function (tags, pts) {\n var adtsFrame;\n adtsFrame = new flvTag(flvTag.METADATA_TAG); // For audio, DTS is always the same as PTS. We want to set the DTS\n // however so we can compare with video DTS to determine approximate\n // packet order\n\n adtsFrame.pts = pts;\n adtsFrame.dts = pts; // AAC is always 10\n\n adtsFrame.writeMetaDataDouble('audiocodecid', 10);\n adtsFrame.writeMetaDataBoolean('stereo', track.channelcount === 2);\n adtsFrame.writeMetaDataDouble('audiosamplerate', track.samplerate); // Is AAC always 16 bit?\n\n adtsFrame.writeMetaDataDouble('audiosamplesize', 16);\n tags.push(adtsFrame.finalize());\n adtsFrame = new flvTag(flvTag.AUDIO_TAG, true); // For audio, DTS is always the same as PTS. We want to set the DTS\n // however so we can compare with video DTS to determine approximate\n // packet order\n\n adtsFrame.pts = pts;\n adtsFrame.dts = pts;\n adtsFrame.view.setUint16(adtsFrame.position, track.extraData);\n adtsFrame.position += 2;\n adtsFrame.length = Math.max(adtsFrame.length, adtsFrame.position);\n tags.push(adtsFrame.finalize());\n };\n\n this.onVideoKeyFrame = function (pts) {\n videoKeyFrames.push(pts);\n };\n };\n\n _AudioSegmentStream$1.prototype = new stream();\n /**\n * Store FlvTags for the h264 stream\n * @param track {object} track metadata configuration\n */\n\n _VideoSegmentStream$1 = function VideoSegmentStream(track) {\n var nalUnits = [],\n config,\n h264Frame;\n\n _VideoSegmentStream$1.prototype.init.call(this);\n\n this.finishFrame = function (tags, frame) {\n if (!frame) {\n return;\n } // Check if keyframe and the length of tags.\n // This makes sure we write metadata on the first frame of a segment.\n\n\n if (config && track && track.newMetadata && (frame.keyFrame || tags.length === 0)) {\n // Push extra data on every IDR frame in case we did a stream change + seek\n var metaTag = metaDataTag(config, frame.dts).finalize();\n var extraTag = extraDataTag(track, frame.dts).finalize();\n metaTag.metaDataTag = extraTag.metaDataTag = true;\n tags.push(metaTag);\n tags.push(extraTag);\n track.newMetadata = false;\n this.trigger('keyframe', frame.dts);\n }\n\n frame.endNalUnit();\n tags.push(frame.finalize());\n h264Frame = null;\n };\n\n this.push = function (data) {\n collectTimelineInfo(track, data);\n data.pts = Math.round(data.pts / 90);\n data.dts = Math.round(data.dts / 90); // buffer video until flush() is called\n\n nalUnits.push(data);\n };\n\n this.flush = function () {\n var currentNal,\n tags = new tagList(); // Throw away nalUnits at the start of the byte stream until we find\n // the first AUD\n\n while (nalUnits.length) {\n if (nalUnits[0].nalUnitType === 'access_unit_delimiter_rbsp') {\n break;\n }\n\n nalUnits.shift();\n } // return early if no video data has been observed\n\n\n if (nalUnits.length === 0) {\n this.trigger('done', 'VideoSegmentStream');\n return;\n }\n\n while (nalUnits.length) {\n currentNal = nalUnits.shift(); // record the track config\n\n if (currentNal.nalUnitType === 'seq_parameter_set_rbsp') {\n track.newMetadata = true;\n config = currentNal.config;\n track.width = config.width;\n track.height = config.height;\n track.sps = [currentNal.data];\n track.profileIdc = config.profileIdc;\n track.levelIdc = config.levelIdc;\n track.profileCompatibility = config.profileCompatibility;\n h264Frame.endNalUnit();\n } else if (currentNal.nalUnitType === 'pic_parameter_set_rbsp') {\n track.newMetadata = true;\n track.pps = [currentNal.data];\n h264Frame.endNalUnit();\n } else if (currentNal.nalUnitType === 'access_unit_delimiter_rbsp') {\n if (h264Frame) {\n this.finishFrame(tags, h264Frame);\n }\n\n h264Frame = new flvTag(flvTag.VIDEO_TAG);\n h264Frame.pts = currentNal.pts;\n h264Frame.dts = currentNal.dts;\n } else {\n if (currentNal.nalUnitType === 'slice_layer_without_partitioning_rbsp_idr') {\n // the current sample is a key frame\n h264Frame.keyFrame = true;\n }\n\n h264Frame.endNalUnit();\n }\n\n h264Frame.startNalUnit();\n h264Frame.writeBytes(currentNal.data);\n }\n\n if (h264Frame) {\n this.finishFrame(tags, h264Frame);\n }\n\n this.trigger('data', {\n track: track,\n tags: tags.list\n }); // Continue with the flush process now\n\n this.trigger('done', 'VideoSegmentStream');\n };\n };\n\n _VideoSegmentStream$1.prototype = new stream();\n /**\n * An object that incrementally transmuxes MPEG2 Trasport Stream\n * chunks into an FLV.\n */\n\n _Transmuxer$1 = function Transmuxer(options) {\n var self = this,\n packetStream,\n parseStream,\n elementaryStream,\n videoTimestampRolloverStream,\n audioTimestampRolloverStream,\n timedMetadataTimestampRolloverStream,\n adtsStream,\n h264Stream,\n videoSegmentStream,\n audioSegmentStream,\n captionStream,\n coalesceStream$1;\n\n _Transmuxer$1.prototype.init.call(this);\n\n options = options || {}; // expose the metadata stream\n\n this.metadataStream = new m2ts_1.MetadataStream();\n options.metadataStream = this.metadataStream; // set up the parsing pipeline\n\n packetStream = new m2ts_1.TransportPacketStream();\n parseStream = new m2ts_1.TransportParseStream();\n elementaryStream = new m2ts_1.ElementaryStream();\n videoTimestampRolloverStream = new m2ts_1.TimestampRolloverStream('video');\n audioTimestampRolloverStream = new m2ts_1.TimestampRolloverStream('audio');\n timedMetadataTimestampRolloverStream = new m2ts_1.TimestampRolloverStream('timed-metadata');\n adtsStream = new adts();\n h264Stream = new H264Stream$1();\n coalesceStream$1 = new coalesceStream(options); // disassemble MPEG2-TS packets into elementary streams\n\n packetStream.pipe(parseStream).pipe(elementaryStream); // !!THIS ORDER IS IMPORTANT!!\n // demux the streams\n\n elementaryStream.pipe(videoTimestampRolloverStream).pipe(h264Stream);\n elementaryStream.pipe(audioTimestampRolloverStream).pipe(adtsStream);\n elementaryStream.pipe(timedMetadataTimestampRolloverStream).pipe(this.metadataStream).pipe(coalesceStream$1); // if CEA-708 parsing is available, hook up a caption stream\n\n captionStream = new m2ts_1.CaptionStream(options);\n h264Stream.pipe(captionStream).pipe(coalesceStream$1); // hook up the segment streams once track metadata is delivered\n\n elementaryStream.on('data', function (data) {\n var i, videoTrack, audioTrack;\n\n if (data.type === 'metadata') {\n i = data.tracks.length; // scan the tracks listed in the metadata\n\n while (i--) {\n if (data.tracks[i].type === 'video') {\n videoTrack = data.tracks[i];\n } else if (data.tracks[i].type === 'audio') {\n audioTrack = data.tracks[i];\n }\n } // hook up the video segment stream to the first track with h264 data\n\n\n if (videoTrack && !videoSegmentStream) {\n coalesceStream$1.numberOfTracks++;\n videoSegmentStream = new _VideoSegmentStream$1(videoTrack); // Set up the final part of the video pipeline\n\n h264Stream.pipe(videoSegmentStream).pipe(coalesceStream$1);\n }\n\n if (audioTrack && !audioSegmentStream) {\n // hook up the audio segment stream to the first track with aac data\n coalesceStream$1.numberOfTracks++;\n audioSegmentStream = new _AudioSegmentStream$1(audioTrack); // Set up the final part of the audio pipeline\n\n adtsStream.pipe(audioSegmentStream).pipe(coalesceStream$1);\n\n if (videoSegmentStream) {\n videoSegmentStream.on('keyframe', audioSegmentStream.onVideoKeyFrame);\n }\n }\n }\n }); // feed incoming data to the front of the parsing pipeline\n\n this.push = function (data) {\n packetStream.push(data);\n }; // flush any buffered data\n\n\n this.flush = function () {\n // Start at the top of the pipeline and flush all pending work\n packetStream.flush();\n }; // Caption data has to be reset when seeking outside buffered range\n\n\n this.resetCaptions = function () {\n captionStream.reset();\n }; // Re-emit any data coming from the coalesce stream to the outside world\n\n\n coalesceStream$1.on('data', function (event) {\n self.trigger('data', event);\n }); // Let the consumer know we have finished flushing the entire pipeline\n\n coalesceStream$1.on('done', function () {\n self.trigger('done');\n });\n };\n\n _Transmuxer$1.prototype = new stream(); // forward compatibility\n\n var transmuxer$1 = _Transmuxer$1;\n\n // http://download.macromedia.com/f4v/video_file_format_spec_v10_1.pdf.\n // Technically, this function returns the header and a metadata FLV tag\n // if duration is greater than zero\n // duration in seconds\n // @return {object} the bytes of the FLV header as a Uint8Array\n\n\n var getFlvHeader = function getFlvHeader(duration, audio, video) {\n // :ByteArray {\n var headBytes = new Uint8Array(3 + 1 + 1 + 4),\n head = new DataView(headBytes.buffer),\n metadata,\n result,\n metadataLength; // default arguments\n\n duration = duration || 0;\n audio = audio === undefined ? true : audio;\n video = video === undefined ? true : video; // signature\n\n head.setUint8(0, 0x46); // 'F'\n\n head.setUint8(1, 0x4c); // 'L'\n\n head.setUint8(2, 0x56); // 'V'\n // version\n\n head.setUint8(3, 0x01); // flags\n\n head.setUint8(4, (audio ? 0x04 : 0x00) | (video ? 0x01 : 0x00)); // data offset, should be 9 for FLV v1\n\n head.setUint32(5, headBytes.byteLength); // init the first FLV tag\n\n if (duration <= 0) {\n // no duration available so just write the first field of the first\n // FLV tag\n result = new Uint8Array(headBytes.byteLength + 4);\n result.set(headBytes);\n result.set([0, 0, 0, 0], headBytes.byteLength);\n return result;\n } // write out the duration metadata tag\n\n\n metadata = new flvTag(flvTag.METADATA_TAG);\n metadata.pts = metadata.dts = 0;\n metadata.writeMetaDataDouble('duration', duration);\n metadataLength = metadata.finalize().length;\n result = new Uint8Array(headBytes.byteLength + metadataLength);\n result.set(headBytes);\n result.set(head.byteLength, metadataLength);\n return result;\n };\n\n var flvHeader = getFlvHeader;\n\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n */\n\n var flv = {\n tag: flvTag,\n Transmuxer: transmuxer$1,\n getFlvHeader: flvHeader\n };\n\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n */\n\n var m2ts$1 = m2ts_1;\n\n var ONE_SECOND_IN_TS$4 = clock.ONE_SECOND_IN_TS;\n /**\n * Constructs a single-track, ISO BMFF media segment from AAC data\n * events. The output of this stream can be fed to a SourceBuffer\n * configured with a suitable initialization segment.\n */\n\n var AudioSegmentStream = function AudioSegmentStream(track, options) {\n var adtsFrames = [],\n sequenceNumber = 0,\n earliestAllowedDts = 0,\n audioAppendStartTs = 0,\n videoBaseMediaDecodeTime = Infinity,\n segmentStartPts = null,\n segmentEndPts = null;\n options = options || {};\n AudioSegmentStream.prototype.init.call(this);\n\n this.push = function (data) {\n trackDecodeInfo.collectDtsInfo(track, data);\n\n if (track) {\n audioProperties.forEach(function (prop) {\n track[prop] = data[prop];\n });\n } // buffer audio data until end() is called\n\n\n adtsFrames.push(data);\n };\n\n this.setEarliestDts = function (earliestDts) {\n earliestAllowedDts = earliestDts;\n };\n\n this.setVideoBaseMediaDecodeTime = function (baseMediaDecodeTime) {\n videoBaseMediaDecodeTime = baseMediaDecodeTime;\n };\n\n this.setAudioAppendStart = function (timestamp) {\n audioAppendStartTs = timestamp;\n };\n\n this.processFrames_ = function () {\n var frames, moof, mdat, boxes, timingInfo; // return early if no audio data has been observed\n\n if (adtsFrames.length === 0) {\n return;\n }\n\n frames = audioFrameUtils.trimAdtsFramesByEarliestDts(adtsFrames, track, earliestAllowedDts);\n\n if (frames.length === 0) {\n // return early if the frames are all after the earliest allowed DTS\n // TODO should we clear the adtsFrames?\n return;\n }\n\n track.baseMediaDecodeTime = trackDecodeInfo.calculateTrackBaseMediaDecodeTime(track, options.keepOriginalTimestamps);\n audioFrameUtils.prefixWithSilence(track, frames, audioAppendStartTs, videoBaseMediaDecodeTime); // we have to build the index from byte locations to\n // samples (that is, adts frames) in the audio data\n\n track.samples = audioFrameUtils.generateSampleTable(frames); // concatenate the audio data to constuct the mdat\n\n mdat = mp4Generator.mdat(audioFrameUtils.concatenateFrameData(frames));\n adtsFrames = [];\n moof = mp4Generator.moof(sequenceNumber, [track]); // bump the sequence number for next time\n\n sequenceNumber++;\n track.initSegment = mp4Generator.initSegment([track]); // it would be great to allocate this array up front instead of\n // throwing away hundreds of media segment fragments\n\n boxes = new Uint8Array(moof.byteLength + mdat.byteLength);\n boxes.set(moof);\n boxes.set(mdat, moof.byteLength);\n trackDecodeInfo.clearDtsInfo(track);\n\n if (segmentStartPts === null) {\n segmentEndPts = segmentStartPts = frames[0].pts;\n }\n\n segmentEndPts += frames.length * (ONE_SECOND_IN_TS$4 * 1024 / track.samplerate);\n timingInfo = {\n start: segmentStartPts\n };\n this.trigger('timingInfo', timingInfo);\n this.trigger('data', {\n track: track,\n boxes: boxes\n });\n };\n\n this.flush = function () {\n this.processFrames_(); // trigger final timing info\n\n this.trigger('timingInfo', {\n start: segmentStartPts,\n end: segmentEndPts\n });\n this.resetTiming_();\n this.trigger('done', 'AudioSegmentStream');\n };\n\n this.partialFlush = function () {\n this.processFrames_();\n this.trigger('partialdone', 'AudioSegmentStream');\n };\n\n this.endTimeline = function () {\n this.flush();\n this.trigger('endedtimeline', 'AudioSegmentStream');\n };\n\n this.resetTiming_ = function () {\n trackDecodeInfo.clearDtsInfo(track);\n segmentStartPts = null;\n segmentEndPts = null;\n };\n\n this.reset = function () {\n this.resetTiming_();\n adtsFrames = [];\n this.trigger('reset');\n };\n };\n\n AudioSegmentStream.prototype = new stream();\n var audioSegmentStream = AudioSegmentStream;\n\n var VideoSegmentStream = function VideoSegmentStream(track, options) {\n var sequenceNumber = 0,\n nalUnits = [],\n frameCache = [],\n // gopsToAlignWith = [],\n config,\n pps,\n segmentStartPts = null,\n segmentEndPts = null,\n gops,\n ensureNextFrameIsKeyFrame = true;\n options = options || {};\n VideoSegmentStream.prototype.init.call(this);\n\n this.push = function (nalUnit) {\n trackDecodeInfo.collectDtsInfo(track, nalUnit);\n\n if (typeof track.timelineStartInfo.dts === 'undefined') {\n track.timelineStartInfo.dts = nalUnit.dts;\n } // record the track config\n\n\n if (nalUnit.nalUnitType === 'seq_parameter_set_rbsp' && !config) {\n config = nalUnit.config;\n track.sps = [nalUnit.data];\n videoProperties.forEach(function (prop) {\n track[prop] = config[prop];\n }, this);\n }\n\n if (nalUnit.nalUnitType === 'pic_parameter_set_rbsp' && !pps) {\n pps = nalUnit.data;\n track.pps = [nalUnit.data];\n } // buffer video until flush() is called\n\n\n nalUnits.push(nalUnit);\n };\n\n this.processNals_ = function (cacheLastFrame) {\n var i;\n nalUnits = frameCache.concat(nalUnits); // Throw away nalUnits at the start of the byte stream until\n // we find the first AUD\n\n while (nalUnits.length) {\n if (nalUnits[0].nalUnitType === 'access_unit_delimiter_rbsp') {\n break;\n }\n\n nalUnits.shift();\n } // Return early if no video data has been observed\n\n\n if (nalUnits.length === 0) {\n return;\n }\n\n var frames = frameUtils.groupNalsIntoFrames(nalUnits);\n\n if (!frames.length) {\n return;\n } // note that the frame cache may also protect us from cases where we haven't\n // pushed data for the entire first or last frame yet\n\n\n frameCache = frames[frames.length - 1];\n\n if (cacheLastFrame) {\n frames.pop();\n frames.duration -= frameCache.duration;\n frames.nalCount -= frameCache.length;\n frames.byteLength -= frameCache.byteLength;\n }\n\n if (!frames.length) {\n nalUnits = [];\n return;\n }\n\n this.trigger('timelineStartInfo', track.timelineStartInfo);\n\n if (ensureNextFrameIsKeyFrame) {\n gops = frameUtils.groupFramesIntoGops(frames);\n\n if (!gops[0][0].keyFrame) {\n gops = frameUtils.extendFirstKeyFrame(gops);\n\n if (!gops[0][0].keyFrame) {\n // we haven't yet gotten a key frame, so reset nal units to wait for more nal\n // units\n nalUnits = [].concat.apply([], frames).concat(frameCache);\n frameCache = [];\n return;\n }\n\n frames = [].concat.apply([], gops);\n frames.duration = gops.duration;\n }\n\n ensureNextFrameIsKeyFrame = false;\n }\n\n if (segmentStartPts === null) {\n segmentStartPts = frames[0].pts;\n segmentEndPts = segmentStartPts;\n }\n\n segmentEndPts += frames.duration;\n this.trigger('timingInfo', {\n start: segmentStartPts,\n end: segmentEndPts\n });\n\n for (i = 0; i < frames.length; i++) {\n var frame = frames[i];\n track.samples = frameUtils.generateSampleTableForFrame(frame);\n var mdat = mp4Generator.mdat(frameUtils.concatenateNalDataForFrame(frame));\n trackDecodeInfo.clearDtsInfo(track);\n trackDecodeInfo.collectDtsInfo(track, frame);\n track.baseMediaDecodeTime = trackDecodeInfo.calculateTrackBaseMediaDecodeTime(track, options.keepOriginalTimestamps);\n var moof = mp4Generator.moof(sequenceNumber, [track]);\n sequenceNumber++;\n track.initSegment = mp4Generator.initSegment([track]);\n var boxes = new Uint8Array(moof.byteLength + mdat.byteLength);\n boxes.set(moof);\n boxes.set(mdat, moof.byteLength);\n this.trigger('data', {\n track: track,\n boxes: boxes,\n sequence: sequenceNumber,\n videoFrameDts: frame.dts,\n videoFramePts: frame.pts\n });\n }\n\n nalUnits = [];\n };\n\n this.resetTimingAndConfig_ = function () {\n config = undefined;\n pps = undefined;\n segmentStartPts = null;\n segmentEndPts = null;\n };\n\n this.partialFlush = function () {\n this.processNals_(true);\n this.trigger('partialdone', 'VideoSegmentStream');\n };\n\n this.flush = function () {\n this.processNals_(false); // reset config and pps because they may differ across segments\n // for instance, when we are rendition switching\n\n this.resetTimingAndConfig_();\n this.trigger('done', 'VideoSegmentStream');\n };\n\n this.endTimeline = function () {\n this.flush();\n this.trigger('endedtimeline', 'VideoSegmentStream');\n };\n\n this.reset = function () {\n this.resetTimingAndConfig_();\n frameCache = [];\n nalUnits = [];\n ensureNextFrameIsKeyFrame = true;\n this.trigger('reset');\n };\n };\n\n VideoSegmentStream.prototype = new stream();\n var videoSegmentStream = VideoSegmentStream;\n\n var isLikelyAacData$2 = utils.isLikelyAacData;\n\n var createPipeline = function createPipeline(object) {\n object.prototype = new stream();\n object.prototype.init.call(object);\n return object;\n };\n\n var tsPipeline = function tsPipeline(options) {\n var pipeline = {\n type: 'ts',\n tracks: {\n audio: null,\n video: null\n },\n packet: new m2ts_1.TransportPacketStream(),\n parse: new m2ts_1.TransportParseStream(),\n elementary: new m2ts_1.ElementaryStream(),\n timestampRollover: new m2ts_1.TimestampRolloverStream(),\n adts: new codecs.Adts(),\n h264: new codecs.h264.H264Stream(),\n captionStream: new m2ts_1.CaptionStream(options),\n metadataStream: new m2ts_1.MetadataStream()\n };\n pipeline.headOfPipeline = pipeline.packet; // Transport Stream\n\n pipeline.packet.pipe(pipeline.parse).pipe(pipeline.elementary).pipe(pipeline.timestampRollover); // H264\n\n pipeline.timestampRollover.pipe(pipeline.h264); // Hook up CEA-608/708 caption stream\n\n pipeline.h264.pipe(pipeline.captionStream);\n pipeline.timestampRollover.pipe(pipeline.metadataStream); // ADTS\n\n pipeline.timestampRollover.pipe(pipeline.adts);\n pipeline.elementary.on('data', function (data) {\n if (data.type !== 'metadata') {\n return;\n }\n\n for (var i = 0; i < data.tracks.length; i++) {\n if (!pipeline.tracks[data.tracks[i].type]) {\n pipeline.tracks[data.tracks[i].type] = data.tracks[i];\n pipeline.tracks[data.tracks[i].type].timelineStartInfo.baseMediaDecodeTime = options.baseMediaDecodeTime;\n }\n }\n\n if (pipeline.tracks.video && !pipeline.videoSegmentStream) {\n pipeline.videoSegmentStream = new videoSegmentStream(pipeline.tracks.video, options);\n pipeline.videoSegmentStream.on('timelineStartInfo', function (timelineStartInfo) {\n if (pipeline.tracks.audio && !options.keepOriginalTimestamps) {\n pipeline.audioSegmentStream.setEarliestDts(timelineStartInfo.dts - options.baseMediaDecodeTime);\n }\n });\n pipeline.videoSegmentStream.on('timingInfo', pipeline.trigger.bind(pipeline, 'videoTimingInfo'));\n pipeline.videoSegmentStream.on('data', function (data) {\n pipeline.trigger('data', {\n type: 'video',\n data: data\n });\n });\n pipeline.videoSegmentStream.on('done', pipeline.trigger.bind(pipeline, 'done'));\n pipeline.videoSegmentStream.on('partialdone', pipeline.trigger.bind(pipeline, 'partialdone'));\n pipeline.videoSegmentStream.on('endedtimeline', pipeline.trigger.bind(pipeline, 'endedtimeline'));\n pipeline.h264.pipe(pipeline.videoSegmentStream);\n }\n\n if (pipeline.tracks.audio && !pipeline.audioSegmentStream) {\n pipeline.audioSegmentStream = new audioSegmentStream(pipeline.tracks.audio, options);\n pipeline.audioSegmentStream.on('data', function (data) {\n pipeline.trigger('data', {\n type: 'audio',\n data: data\n });\n });\n pipeline.audioSegmentStream.on('done', pipeline.trigger.bind(pipeline, 'done'));\n pipeline.audioSegmentStream.on('partialdone', pipeline.trigger.bind(pipeline, 'partialdone'));\n pipeline.audioSegmentStream.on('endedtimeline', pipeline.trigger.bind(pipeline, 'endedtimeline'));\n pipeline.audioSegmentStream.on('timingInfo', pipeline.trigger.bind(pipeline, 'audioTimingInfo'));\n pipeline.adts.pipe(pipeline.audioSegmentStream);\n } // emit pmt info\n\n\n pipeline.trigger('trackinfo', {\n hasAudio: !!pipeline.tracks.audio,\n hasVideo: !!pipeline.tracks.video\n });\n });\n pipeline.captionStream.on('data', function (caption) {\n var timelineStartPts;\n\n if (pipeline.tracks.video) {\n timelineStartPts = pipeline.tracks.video.timelineStartInfo.pts || 0;\n } else {\n // This will only happen if we encounter caption packets before\n // video data in a segment. This is an unusual/unlikely scenario,\n // so we assume the timeline starts at zero for now.\n timelineStartPts = 0;\n } // Translate caption PTS times into second offsets into the\n // video timeline for the segment\n\n\n caption.startTime = clock.metadataTsToSeconds(caption.startPts, timelineStartPts, options.keepOriginalTimestamps);\n caption.endTime = clock.metadataTsToSeconds(caption.endPts, timelineStartPts, options.keepOriginalTimestamps);\n pipeline.trigger('caption', caption);\n });\n pipeline = createPipeline(pipeline);\n pipeline.metadataStream.on('data', pipeline.trigger.bind(pipeline, 'id3Frame'));\n return pipeline;\n };\n\n var aacPipeline = function aacPipeline(options) {\n var pipeline = {\n type: 'aac',\n tracks: {\n audio: null\n },\n metadataStream: new m2ts_1.MetadataStream(),\n aacStream: new aac(),\n audioRollover: new m2ts_1.TimestampRolloverStream('audio'),\n timedMetadataRollover: new m2ts_1.TimestampRolloverStream('timed-metadata'),\n adtsStream: new adts(true)\n }; // set up the parsing pipeline\n\n pipeline.headOfPipeline = pipeline.aacStream;\n pipeline.aacStream.pipe(pipeline.audioRollover).pipe(pipeline.adtsStream);\n pipeline.aacStream.pipe(pipeline.timedMetadataRollover).pipe(pipeline.metadataStream);\n pipeline.metadataStream.on('timestamp', function (frame) {\n pipeline.aacStream.setTimestamp(frame.timeStamp);\n });\n pipeline.aacStream.on('data', function (data) {\n if (data.type !== 'timed-metadata' && data.type !== 'audio' || pipeline.audioSegmentStream) {\n return;\n }\n\n pipeline.tracks.audio = pipeline.tracks.audio || {\n timelineStartInfo: {\n baseMediaDecodeTime: options.baseMediaDecodeTime\n },\n codec: 'adts',\n type: 'audio'\n }; // hook up the audio segment stream to the first track with aac data\n\n pipeline.audioSegmentStream = new audioSegmentStream(pipeline.tracks.audio, options);\n pipeline.audioSegmentStream.on('data', function (data) {\n pipeline.trigger('data', {\n type: 'audio',\n data: data\n });\n });\n pipeline.audioSegmentStream.on('partialdone', pipeline.trigger.bind(pipeline, 'partialdone'));\n pipeline.audioSegmentStream.on('done', pipeline.trigger.bind(pipeline, 'done'));\n pipeline.audioSegmentStream.on('endedtimeline', pipeline.trigger.bind(pipeline, 'endedtimeline'));\n pipeline.audioSegmentStream.on('timingInfo', pipeline.trigger.bind(pipeline, 'audioTimingInfo')); // Set up the final part of the audio pipeline\n\n pipeline.adtsStream.pipe(pipeline.audioSegmentStream);\n pipeline.trigger('trackinfo', {\n hasAudio: !!pipeline.tracks.audio,\n hasVideo: !!pipeline.tracks.video\n });\n }); // set the pipeline up as a stream before binding to get access to the trigger function\n\n pipeline = createPipeline(pipeline);\n pipeline.metadataStream.on('data', pipeline.trigger.bind(pipeline, 'id3Frame'));\n return pipeline;\n };\n\n var setupPipelineListeners = function setupPipelineListeners(pipeline, transmuxer) {\n pipeline.on('data', transmuxer.trigger.bind(transmuxer, 'data'));\n pipeline.on('done', transmuxer.trigger.bind(transmuxer, 'done'));\n pipeline.on('partialdone', transmuxer.trigger.bind(transmuxer, 'partialdone'));\n pipeline.on('endedtimeline', transmuxer.trigger.bind(transmuxer, 'endedtimeline'));\n pipeline.on('audioTimingInfo', transmuxer.trigger.bind(transmuxer, 'audioTimingInfo'));\n pipeline.on('videoTimingInfo', transmuxer.trigger.bind(transmuxer, 'videoTimingInfo'));\n pipeline.on('trackinfo', transmuxer.trigger.bind(transmuxer, 'trackinfo'));\n pipeline.on('id3Frame', function (event) {\n // add this to every single emitted segment even though it's only needed for the first\n event.dispatchType = pipeline.metadataStream.dispatchType; // keep original time, can be adjusted if needed at a higher level\n\n event.cueTime = clock.videoTsToSeconds(event.pts);\n transmuxer.trigger('id3Frame', event);\n });\n pipeline.on('caption', function (event) {\n transmuxer.trigger('caption', event);\n });\n };\n\n var Transmuxer = function Transmuxer(options) {\n var pipeline = null,\n hasFlushed = true;\n options = options || {};\n Transmuxer.prototype.init.call(this);\n options.baseMediaDecodeTime = options.baseMediaDecodeTime || 0;\n\n this.push = function (bytes) {\n if (hasFlushed) {\n var isAac = isLikelyAacData$2(bytes);\n\n if (isAac && (!pipeline || pipeline.type !== 'aac')) {\n pipeline = aacPipeline(options);\n setupPipelineListeners(pipeline, this);\n } else if (!isAac && (!pipeline || pipeline.type !== 'ts')) {\n pipeline = tsPipeline(options);\n setupPipelineListeners(pipeline, this);\n }\n\n hasFlushed = false;\n }\n\n pipeline.headOfPipeline.push(bytes);\n };\n\n this.flush = function () {\n if (!pipeline) {\n return;\n }\n\n hasFlushed = true;\n pipeline.headOfPipeline.flush();\n };\n\n this.partialFlush = function () {\n if (!pipeline) {\n return;\n }\n\n pipeline.headOfPipeline.partialFlush();\n };\n\n this.endTimeline = function () {\n if (!pipeline) {\n return;\n }\n\n pipeline.headOfPipeline.endTimeline();\n };\n\n this.reset = function () {\n if (!pipeline) {\n return;\n }\n\n pipeline.headOfPipeline.reset();\n };\n\n this.setBaseMediaDecodeTime = function (baseMediaDecodeTime) {\n if (!options.keepOriginalTimestamps) {\n options.baseMediaDecodeTime = baseMediaDecodeTime;\n }\n\n if (!pipeline) {\n return;\n }\n\n if (pipeline.tracks.audio) {\n pipeline.tracks.audio.timelineStartInfo.dts = undefined;\n pipeline.tracks.audio.timelineStartInfo.pts = undefined;\n trackDecodeInfo.clearDtsInfo(pipeline.tracks.audio);\n\n if (pipeline.audioRollover) {\n pipeline.audioRollover.discontinuity();\n }\n }\n\n if (pipeline.tracks.video) {\n if (pipeline.videoSegmentStream) {\n pipeline.videoSegmentStream.gopCache_ = [];\n }\n\n pipeline.tracks.video.timelineStartInfo.dts = undefined;\n pipeline.tracks.video.timelineStartInfo.pts = undefined;\n trackDecodeInfo.clearDtsInfo(pipeline.tracks.video); // pipeline.captionStream.reset();\n }\n\n if (pipeline.timestampRollover) {\n pipeline.timestampRollover.discontinuity();\n }\n };\n\n this.setRemux = function (val) {\n options.remux = val;\n\n if (pipeline && pipeline.coalesceStream) {\n pipeline.coalesceStream.setRemux(val);\n }\n };\n\n this.setAudioAppendStart = function (audioAppendStart) {\n if (!pipeline || !pipeline.tracks.audio || !pipeline.audioSegmentStream) {\n return;\n }\n\n pipeline.audioSegmentStream.setAudioAppendStart(audioAppendStart);\n }; // TODO GOP alignment support\n // Support may be a bit trickier than with full segment appends, as GOPs may be split\n // and processed in a more granular fashion\n\n\n this.alignGopsWith = function (gopsToAlignWith) {\n return;\n };\n };\n\n Transmuxer.prototype = new stream();\n var transmuxer$2 = Transmuxer;\n\n var partial = {\n Transmuxer: transmuxer$2\n };\n\n var MAX_UINT32 = Math.pow(2, 32);\n\n var parseSidx = function parseSidx(data) {\n var view = new DataView(data.buffer, data.byteOffset, data.byteLength),\n result = {\n version: data[0],\n flags: new Uint8Array(data.subarray(1, 4)),\n references: [],\n referenceId: view.getUint32(4),\n timescale: view.getUint32(8)\n },\n i = 12;\n\n if (result.version === 0) {\n result.earliestPresentationTime = view.getUint32(i);\n result.firstOffset = view.getUint32(i + 4);\n i += 8;\n } else {\n // read 64 bits\n result.earliestPresentationTime = view.getUint32(i) * MAX_UINT32 + view.getUint32(i + 4);\n result.firstOffset = view.getUint32(i + 8) * MAX_UINT32 + view.getUint32(i + 12);\n i += 16;\n }\n\n i += 2; // reserved\n\n var referenceCount = view.getUint16(i);\n i += 2; // start of references\n\n for (; referenceCount > 0; i += 12, referenceCount--) {\n result.references.push({\n referenceType: (data[i] & 0x80) >>> 7,\n referencedSize: view.getUint32(i) & 0x7FFFFFFF,\n subsegmentDuration: view.getUint32(i + 4),\n startsWithSap: !!(data[i + 8] & 0x80),\n sapType: (data[i + 8] & 0x70) >>> 4,\n sapDeltaTime: view.getUint32(i + 8) & 0x0FFFFFFF\n });\n }\n\n return result;\n };\n\n var parseSidx_1 = parseSidx;\n\n var MAX_UINT32$1 = Math.pow(2, 32);\n\n var inspectMp4,\n _textifyMp,\n parseMp4Date = function parseMp4Date(seconds) {\n return new Date(seconds * 1000 - 2082844800000);\n },\n nalParse = function nalParse(avcStream) {\n var avcView = new DataView(avcStream.buffer, avcStream.byteOffset, avcStream.byteLength),\n result = [],\n i,\n length;\n\n for (i = 0; i + 4 < avcStream.length; i += length) {\n length = avcView.getUint32(i);\n i += 4; // bail if this doesn't appear to be an H264 stream\n\n if (length <= 0) {\n result.push('MALFORMED DATA');\n continue;\n }\n\n switch (avcStream[i] & 0x1F) {\n case 0x01:\n result.push('slice_layer_without_partitioning_rbsp');\n break;\n\n case 0x05:\n result.push('slice_layer_without_partitioning_rbsp_idr');\n break;\n\n case 0x06:\n result.push('sei_rbsp');\n break;\n\n case 0x07:\n result.push('seq_parameter_set_rbsp');\n break;\n\n case 0x08:\n result.push('pic_parameter_set_rbsp');\n break;\n\n case 0x09:\n result.push('access_unit_delimiter_rbsp');\n break;\n\n default:\n result.push('UNKNOWN NAL - ' + avcStream[i] & 0x1F);\n break;\n }\n }\n\n return result;\n },\n // registry of handlers for individual mp4 box types\n parse = {\n // codingname, not a first-class box type. stsd entries share the\n // same format as real boxes so the parsing infrastructure can be\n // shared\n avc1: function avc1(data) {\n var view = new DataView(data.buffer, data.byteOffset, data.byteLength);\n return {\n dataReferenceIndex: view.getUint16(6),\n width: view.getUint16(24),\n height: view.getUint16(26),\n horizresolution: view.getUint16(28) + view.getUint16(30) / 16,\n vertresolution: view.getUint16(32) + view.getUint16(34) / 16,\n frameCount: view.getUint16(40),\n depth: view.getUint16(74),\n config: inspectMp4(data.subarray(78, data.byteLength))\n };\n },\n avcC: function avcC(data) {\n var view = new DataView(data.buffer, data.byteOffset, data.byteLength),\n result = {\n configurationVersion: data[0],\n avcProfileIndication: data[1],\n profileCompatibility: data[2],\n avcLevelIndication: data[3],\n lengthSizeMinusOne: data[4] & 0x03,\n sps: [],\n pps: []\n },\n numOfSequenceParameterSets = data[5] & 0x1f,\n numOfPictureParameterSets,\n nalSize,\n offset,\n i; // iterate past any SPSs\n\n offset = 6;\n\n for (i = 0; i < numOfSequenceParameterSets; i++) {\n nalSize = view.getUint16(offset);\n offset += 2;\n result.sps.push(new Uint8Array(data.subarray(offset, offset + nalSize)));\n offset += nalSize;\n } // iterate past any PPSs\n\n\n numOfPictureParameterSets = data[offset];\n offset++;\n\n for (i = 0; i < numOfPictureParameterSets; i++) {\n nalSize = view.getUint16(offset);\n offset += 2;\n result.pps.push(new Uint8Array(data.subarray(offset, offset + nalSize)));\n offset += nalSize;\n }\n\n return result;\n },\n btrt: function btrt(data) {\n var view = new DataView(data.buffer, data.byteOffset, data.byteLength);\n return {\n bufferSizeDB: view.getUint32(0),\n maxBitrate: view.getUint32(4),\n avgBitrate: view.getUint32(8)\n };\n },\n edts: function edts(data) {\n return {\n boxes: inspectMp4(data)\n };\n },\n elst: function elst(data) {\n var view = new DataView(data.buffer, data.byteOffset, data.byteLength),\n result = {\n version: view.getUint8(0),\n flags: new Uint8Array(data.subarray(1, 4)),\n edits: []\n },\n entryCount = view.getUint32(4),\n i;\n\n for (i = 8; entryCount; entryCount--) {\n if (result.version === 0) {\n result.edits.push({\n segmentDuration: view.getUint32(i),\n mediaTime: view.getInt32(i + 4),\n mediaRate: view.getUint16(i + 8) + view.getUint16(i + 10) / (256 * 256)\n });\n i += 12;\n } else {\n result.edits.push({\n segmentDuration: view.getUint32(i) * MAX_UINT32$1 + view.getUint32(i + 4),\n mediaTime: view.getUint32(i + 8) * MAX_UINT32$1 + view.getUint32(i + 12),\n mediaRate: view.getUint16(i + 16) + view.getUint16(i + 18) / (256 * 256)\n });\n i += 20;\n }\n }\n\n return result;\n },\n esds: function esds(data) {\n return {\n version: data[0],\n flags: new Uint8Array(data.subarray(1, 4)),\n esId: data[6] << 8 | data[7],\n streamPriority: data[8] & 0x1f,\n decoderConfig: {\n objectProfileIndication: data[11],\n streamType: data[12] >>> 2 & 0x3f,\n bufferSize: data[13] << 16 | data[14] << 8 | data[15],\n maxBitrate: data[16] << 24 | data[17] << 16 | data[18] << 8 | data[19],\n avgBitrate: data[20] << 24 | data[21] << 16 | data[22] << 8 | data[23],\n decoderConfigDescriptor: {\n tag: data[24],\n length: data[25],\n audioObjectType: data[26] >>> 3 & 0x1f,\n samplingFrequencyIndex: (data[26] & 0x07) << 1 | data[27] >>> 7 & 0x01,\n channelConfiguration: data[27] >>> 3 & 0x0f\n }\n }\n };\n },\n ftyp: function ftyp(data) {\n var view = new DataView(data.buffer, data.byteOffset, data.byteLength),\n result = {\n majorBrand: parseType_1(data.subarray(0, 4)),\n minorVersion: view.getUint32(4),\n compatibleBrands: []\n },\n i = 8;\n\n while (i < data.byteLength) {\n result.compatibleBrands.push(parseType_1(data.subarray(i, i + 4)));\n i += 4;\n }\n\n return result;\n },\n dinf: function dinf(data) {\n return {\n boxes: inspectMp4(data)\n };\n },\n dref: function dref(data) {\n return {\n version: data[0],\n flags: new Uint8Array(data.subarray(1, 4)),\n dataReferences: inspectMp4(data.subarray(8))\n };\n },\n hdlr: function hdlr(data) {\n var view = new DataView(data.buffer, data.byteOffset, data.byteLength),\n result = {\n version: view.getUint8(0),\n flags: new Uint8Array(data.subarray(1, 4)),\n handlerType: parseType_1(data.subarray(8, 12)),\n name: ''\n },\n i = 8; // parse out the name field\n\n for (i = 24; i < data.byteLength; i++) {\n if (data[i] === 0x00) {\n // the name field is null-terminated\n i++;\n break;\n }\n\n result.name += String.fromCharCode(data[i]);\n } // decode UTF-8 to javascript's internal representation\n // see http://ecmanaut.blogspot.com/2006/07/encoding-decoding-utf8-in-javascript.html\n\n\n result.name = decodeURIComponent(escape(result.name));\n return result;\n },\n mdat: function mdat(data) {\n return {\n byteLength: data.byteLength,\n nals: nalParse(data)\n };\n },\n mdhd: function mdhd(data) {\n var view = new DataView(data.buffer, data.byteOffset, data.byteLength),\n i = 4,\n language,\n result = {\n version: view.getUint8(0),\n flags: new Uint8Array(data.subarray(1, 4)),\n language: ''\n };\n\n if (result.version === 1) {\n i += 4;\n result.creationTime = parseMp4Date(view.getUint32(i)); // truncating top 4 bytes\n\n i += 8;\n result.modificationTime = parseMp4Date(view.getUint32(i)); // truncating top 4 bytes\n\n i += 4;\n result.timescale = view.getUint32(i);\n i += 8;\n result.duration = view.getUint32(i); // truncating top 4 bytes\n } else {\n result.creationTime = parseMp4Date(view.getUint32(i));\n i += 4;\n result.modificationTime = parseMp4Date(view.getUint32(i));\n i += 4;\n result.timescale = view.getUint32(i);\n i += 4;\n result.duration = view.getUint32(i);\n }\n\n i += 4; // language is stored as an ISO-639-2/T code in an array of three 5-bit fields\n // each field is the packed difference between its ASCII value and 0x60\n\n language = view.getUint16(i);\n result.language += String.fromCharCode((language >> 10) + 0x60);\n result.language += String.fromCharCode(((language & 0x03e0) >> 5) + 0x60);\n result.language += String.fromCharCode((language & 0x1f) + 0x60);\n return result;\n },\n mdia: function mdia(data) {\n return {\n boxes: inspectMp4(data)\n };\n },\n mfhd: function mfhd(data) {\n return {\n version: data[0],\n flags: new Uint8Array(data.subarray(1, 4)),\n sequenceNumber: data[4] << 24 | data[5] << 16 | data[6] << 8 | data[7]\n };\n },\n minf: function minf(data) {\n return {\n boxes: inspectMp4(data)\n };\n },\n // codingname, not a first-class box type. stsd entries share the\n // same format as real boxes so the parsing infrastructure can be\n // shared\n mp4a: function mp4a(data) {\n var view = new DataView(data.buffer, data.byteOffset, data.byteLength),\n result = {\n // 6 bytes reserved\n dataReferenceIndex: view.getUint16(6),\n // 4 + 4 bytes reserved\n channelcount: view.getUint16(16),\n samplesize: view.getUint16(18),\n // 2 bytes pre_defined\n // 2 bytes reserved\n samplerate: view.getUint16(24) + view.getUint16(26) / 65536\n }; // if there are more bytes to process, assume this is an ISO/IEC\n // 14496-14 MP4AudioSampleEntry and parse the ESDBox\n\n if (data.byteLength > 28) {\n result.streamDescriptor = inspectMp4(data.subarray(28))[0];\n }\n\n return result;\n },\n moof: function moof(data) {\n return {\n boxes: inspectMp4(data)\n };\n },\n moov: function moov(data) {\n return {\n boxes: inspectMp4(data)\n };\n },\n mvex: function mvex(data) {\n return {\n boxes: inspectMp4(data)\n };\n },\n mvhd: function mvhd(data) {\n var view = new DataView(data.buffer, data.byteOffset, data.byteLength),\n i = 4,\n result = {\n version: view.getUint8(0),\n flags: new Uint8Array(data.subarray(1, 4))\n };\n\n if (result.version === 1) {\n i += 4;\n result.creationTime = parseMp4Date(view.getUint32(i)); // truncating top 4 bytes\n\n i += 8;\n result.modificationTime = parseMp4Date(view.getUint32(i)); // truncating top 4 bytes\n\n i += 4;\n result.timescale = view.getUint32(i);\n i += 8;\n result.duration = view.getUint32(i); // truncating top 4 bytes\n } else {\n result.creationTime = parseMp4Date(view.getUint32(i));\n i += 4;\n result.modificationTime = parseMp4Date(view.getUint32(i));\n i += 4;\n result.timescale = view.getUint32(i);\n i += 4;\n result.duration = view.getUint32(i);\n }\n\n i += 4; // convert fixed-point, base 16 back to a number\n\n result.rate = view.getUint16(i) + view.getUint16(i + 2) / 16;\n i += 4;\n result.volume = view.getUint8(i) + view.getUint8(i + 1) / 8;\n i += 2;\n i += 2;\n i += 2 * 4;\n result.matrix = new Uint32Array(data.subarray(i, i + 9 * 4));\n i += 9 * 4;\n i += 6 * 4;\n result.nextTrackId = view.getUint32(i);\n return result;\n },\n pdin: function pdin(data) {\n var view = new DataView(data.buffer, data.byteOffset, data.byteLength);\n return {\n version: view.getUint8(0),\n flags: new Uint8Array(data.subarray(1, 4)),\n rate: view.getUint32(4),\n initialDelay: view.getUint32(8)\n };\n },\n sdtp: function sdtp(data) {\n var result = {\n version: data[0],\n flags: new Uint8Array(data.subarray(1, 4)),\n samples: []\n },\n i;\n\n for (i = 4; i < data.byteLength; i++) {\n result.samples.push({\n dependsOn: (data[i] & 0x30) >> 4,\n isDependedOn: (data[i] & 0x0c) >> 2,\n hasRedundancy: data[i] & 0x03\n });\n }\n\n return result;\n },\n sidx: parseSidx_1,\n smhd: function smhd(data) {\n return {\n version: data[0],\n flags: new Uint8Array(data.subarray(1, 4)),\n balance: data[4] + data[5] / 256\n };\n },\n stbl: function stbl(data) {\n return {\n boxes: inspectMp4(data)\n };\n },\n ctts: function ctts(data) {\n var view = new DataView(data.buffer, data.byteOffset, data.byteLength),\n result = {\n version: view.getUint8(0),\n flags: new Uint8Array(data.subarray(1, 4)),\n compositionOffsets: []\n },\n entryCount = view.getUint32(4),\n i;\n\n for (i = 8; entryCount; i += 8, entryCount--) {\n result.compositionOffsets.push({\n sampleCount: view.getUint32(i),\n sampleOffset: view[result.version === 0 ? 'getUint32' : 'getInt32'](i + 4)\n });\n }\n\n return result;\n },\n stss: function stss(data) {\n var view = new DataView(data.buffer, data.byteOffset, data.byteLength),\n result = {\n version: view.getUint8(0),\n flags: new Uint8Array(data.subarray(1, 4)),\n syncSamples: []\n },\n entryCount = view.getUint32(4),\n i;\n\n for (i = 8; entryCount; i += 4, entryCount--) {\n result.syncSamples.push(view.getUint32(i));\n }\n\n return result;\n },\n stco: function stco(data) {\n var view = new DataView(data.buffer, data.byteOffset, data.byteLength),\n result = {\n version: data[0],\n flags: new Uint8Array(data.subarray(1, 4)),\n chunkOffsets: []\n },\n entryCount = view.getUint32(4),\n i;\n\n for (i = 8; entryCount; i += 4, entryCount--) {\n result.chunkOffsets.push(view.getUint32(i));\n }\n\n return result;\n },\n stsc: function stsc(data) {\n var view = new DataView(data.buffer, data.byteOffset, data.byteLength),\n entryCount = view.getUint32(4),\n result = {\n version: data[0],\n flags: new Uint8Array(data.subarray(1, 4)),\n sampleToChunks: []\n },\n i;\n\n for (i = 8; entryCount; i += 12, entryCount--) {\n result.sampleToChunks.push({\n firstChunk: view.getUint32(i),\n samplesPerChunk: view.getUint32(i + 4),\n sampleDescriptionIndex: view.getUint32(i + 8)\n });\n }\n\n return result;\n },\n stsd: function stsd(data) {\n return {\n version: data[0],\n flags: new Uint8Array(data.subarray(1, 4)),\n sampleDescriptions: inspectMp4(data.subarray(8))\n };\n },\n stsz: function stsz(data) {\n var view = new DataView(data.buffer, data.byteOffset, data.byteLength),\n result = {\n version: data[0],\n flags: new Uint8Array(data.subarray(1, 4)),\n sampleSize: view.getUint32(4),\n entries: []\n },\n i;\n\n for (i = 12; i < data.byteLength; i += 4) {\n result.entries.push(view.getUint32(i));\n }\n\n return result;\n },\n stts: function stts(data) {\n var view = new DataView(data.buffer, data.byteOffset, data.byteLength),\n result = {\n version: data[0],\n flags: new Uint8Array(data.subarray(1, 4)),\n timeToSamples: []\n },\n entryCount = view.getUint32(4),\n i;\n\n for (i = 8; entryCount; i += 8, entryCount--) {\n result.timeToSamples.push({\n sampleCount: view.getUint32(i),\n sampleDelta: view.getUint32(i + 4)\n });\n }\n\n return result;\n },\n styp: function styp(data) {\n return parse.ftyp(data);\n },\n tfdt: parseTfdt,\n tfhd: parseTfhd,\n tkhd: function tkhd(data) {\n var view = new DataView(data.buffer, data.byteOffset, data.byteLength),\n i = 4,\n result = {\n version: view.getUint8(0),\n flags: new Uint8Array(data.subarray(1, 4))\n };\n\n if (result.version === 1) {\n i += 4;\n result.creationTime = parseMp4Date(view.getUint32(i)); // truncating top 4 bytes\n\n i += 8;\n result.modificationTime = parseMp4Date(view.getUint32(i)); // truncating top 4 bytes\n\n i += 4;\n result.trackId = view.getUint32(i);\n i += 4;\n i += 8;\n result.duration = view.getUint32(i); // truncating top 4 bytes\n } else {\n result.creationTime = parseMp4Date(view.getUint32(i));\n i += 4;\n result.modificationTime = parseMp4Date(view.getUint32(i));\n i += 4;\n result.trackId = view.getUint32(i);\n i += 4;\n i += 4;\n result.duration = view.getUint32(i);\n }\n\n i += 4;\n i += 2 * 4;\n result.layer = view.getUint16(i);\n i += 2;\n result.alternateGroup = view.getUint16(i);\n i += 2; // convert fixed-point, base 16 back to a number\n\n result.volume = view.getUint8(i) + view.getUint8(i + 1) / 8;\n i += 2;\n i += 2;\n result.matrix = new Uint32Array(data.subarray(i, i + 9 * 4));\n i += 9 * 4;\n result.width = view.getUint16(i) + view.getUint16(i + 2) / 65536;\n i += 4;\n result.height = view.getUint16(i) + view.getUint16(i + 2) / 65536;\n return result;\n },\n traf: function traf(data) {\n return {\n boxes: inspectMp4(data)\n };\n },\n trak: function trak(data) {\n return {\n boxes: inspectMp4(data)\n };\n },\n trex: function trex(data) {\n var view = new DataView(data.buffer, data.byteOffset, data.byteLength);\n return {\n version: data[0],\n flags: new Uint8Array(data.subarray(1, 4)),\n trackId: view.getUint32(4),\n defaultSampleDescriptionIndex: view.getUint32(8),\n defaultSampleDuration: view.getUint32(12),\n defaultSampleSize: view.getUint32(16),\n sampleDependsOn: data[20] & 0x03,\n sampleIsDependedOn: (data[21] & 0xc0) >> 6,\n sampleHasRedundancy: (data[21] & 0x30) >> 4,\n samplePaddingValue: (data[21] & 0x0e) >> 1,\n sampleIsDifferenceSample: !!(data[21] & 0x01),\n sampleDegradationPriority: view.getUint16(22)\n };\n },\n trun: parseTrun,\n 'url ': function url(data) {\n return {\n version: data[0],\n flags: new Uint8Array(data.subarray(1, 4))\n };\n },\n vmhd: function vmhd(data) {\n var view = new DataView(data.buffer, data.byteOffset, data.byteLength);\n return {\n version: data[0],\n flags: new Uint8Array(data.subarray(1, 4)),\n graphicsmode: view.getUint16(4),\n opcolor: new Uint16Array([view.getUint16(6), view.getUint16(8), view.getUint16(10)])\n };\n }\n };\n /**\n * Return a javascript array of box objects parsed from an ISO base\n * media file.\n * @param data {Uint8Array} the binary data of the media to be inspected\n * @return {array} a javascript array of potentially nested box objects\n */\n\n\n inspectMp4 = function inspectMp4(data) {\n var i = 0,\n result = [],\n view,\n size,\n type,\n end,\n box; // Convert data from Uint8Array to ArrayBuffer, to follow Dataview API\n\n var ab = new ArrayBuffer(data.length);\n var v = new Uint8Array(ab);\n\n for (var z = 0; z < data.length; ++z) {\n v[z] = data[z];\n }\n\n view = new DataView(ab);\n\n while (i < data.byteLength) {\n // parse box data\n size = view.getUint32(i);\n type = parseType_1(data.subarray(i + 4, i + 8));\n end = size > 1 ? i + size : data.byteLength; // parse type-specific data\n\n box = (parse[type] || function (data) {\n return {\n data: data\n };\n })(data.subarray(i + 8, end));\n\n box.size = size;\n box.type = type; // store this box and move to the next\n\n result.push(box);\n i = end;\n }\n\n return result;\n };\n /**\n * Returns a textual representation of the javascript represtentation\n * of an MP4 file. You can use it as an alternative to\n * JSON.stringify() to compare inspected MP4s.\n * @param inspectedMp4 {array} the parsed array of boxes in an MP4\n * file\n * @param depth {number} (optional) the number of ancestor boxes of\n * the elements of inspectedMp4. Assumed to be zero if unspecified.\n * @return {string} a text representation of the parsed MP4\n */\n\n\n _textifyMp = function textifyMp4(inspectedMp4, depth) {\n var indent;\n depth = depth || 0;\n indent = new Array(depth * 2 + 1).join(' '); // iterate over all the boxes\n\n return inspectedMp4.map(function (box, index) {\n // list the box type first at the current indentation level\n return indent + box.type + '\\n' + // the type is already included and handle child boxes separately\n Object.keys(box).filter(function (key) {\n return key !== 'type' && key !== 'boxes'; // output all the box properties\n }).map(function (key) {\n var prefix = indent + ' ' + key + ': ',\n value = box[key]; // print out raw bytes as hexademical\n\n if (value instanceof Uint8Array || value instanceof Uint32Array) {\n var bytes = Array.prototype.slice.call(new Uint8Array(value.buffer, value.byteOffset, value.byteLength)).map(function (byte) {\n return ' ' + ('00' + byte.toString(16)).slice(-2);\n }).join('').match(/.{1,24}/g);\n\n if (!bytes) {\n return prefix + '<>';\n }\n\n if (bytes.length === 1) {\n return prefix + '<' + bytes.join('').slice(1) + '>';\n }\n\n return prefix + '<\\n' + bytes.map(function (line) {\n return indent + ' ' + line;\n }).join('\\n') + '\\n' + indent + ' >';\n } // stringify generic objects\n\n\n return prefix + JSON.stringify(value, null, 2).split('\\n').map(function (line, index) {\n if (index === 0) {\n return line;\n }\n\n return indent + ' ' + line;\n }).join('\\n');\n }).join('\\n') + ( // recursively textify the child boxes\n box.boxes ? '\\n' + _textifyMp(box.boxes, depth + 1) : '');\n }).join('\\n');\n };\n\n var mp4Inspector = {\n inspect: inspectMp4,\n textify: _textifyMp,\n parseType: parseType_1,\n findBox: findBox_1,\n parseTraf: parse.traf,\n parseTfdt: parse.tfdt,\n parseHdlr: parse.hdlr,\n parseTfhd: parse.tfhd,\n parseTrun: parse.trun,\n parseSidx: parse.sidx\n };\n\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n */\n\n var tagTypes = {\n 0x08: 'audio',\n 0x09: 'video',\n 0x12: 'metadata'\n },\n hex = function hex(val) {\n return '0x' + ('00' + val.toString(16)).slice(-2).toUpperCase();\n },\n hexStringList = function hexStringList(data) {\n var arr = [],\n i;\n\n while (data.byteLength > 0) {\n i = 0;\n arr.push(hex(data[i++]));\n data = data.subarray(i);\n }\n\n return arr.join(' ');\n },\n parseAVCTag = function parseAVCTag(tag, obj) {\n var avcPacketTypes = ['AVC Sequence Header', 'AVC NALU', 'AVC End-of-Sequence'],\n compositionTime = tag[1] & parseInt('01111111', 2) << 16 | tag[2] << 8 | tag[3];\n obj = obj || {};\n obj.avcPacketType = avcPacketTypes[tag[0]];\n obj.CompositionTime = tag[1] & parseInt('10000000', 2) ? -compositionTime : compositionTime;\n\n if (tag[0] === 1) {\n obj.nalUnitTypeRaw = hexStringList(tag.subarray(4, 100));\n } else {\n obj.data = hexStringList(tag.subarray(4));\n }\n\n return obj;\n },\n parseVideoTag = function parseVideoTag(tag, obj) {\n var frameTypes = ['Unknown', 'Keyframe (for AVC, a seekable frame)', 'Inter frame (for AVC, a nonseekable frame)', 'Disposable inter frame (H.263 only)', 'Generated keyframe (reserved for server use only)', 'Video info/command frame'],\n codecID = tag[0] & parseInt('00001111', 2);\n obj = obj || {};\n obj.frameType = frameTypes[(tag[0] & parseInt('11110000', 2)) >>> 4];\n obj.codecID = codecID;\n\n if (codecID === 7) {\n return parseAVCTag(tag.subarray(1), obj);\n }\n\n return obj;\n },\n parseAACTag = function parseAACTag(tag, obj) {\n var packetTypes = ['AAC Sequence Header', 'AAC Raw'];\n obj = obj || {};\n obj.aacPacketType = packetTypes[tag[0]];\n obj.data = hexStringList(tag.subarray(1));\n return obj;\n },\n parseAudioTag = function parseAudioTag(tag, obj) {\n var formatTable = ['Linear PCM, platform endian', 'ADPCM', 'MP3', 'Linear PCM, little endian', 'Nellymoser 16-kHz mono', 'Nellymoser 8-kHz mono', 'Nellymoser', 'G.711 A-law logarithmic PCM', 'G.711 mu-law logarithmic PCM', 'reserved', 'AAC', 'Speex', 'MP3 8-Khz', 'Device-specific sound'],\n samplingRateTable = ['5.5-kHz', '11-kHz', '22-kHz', '44-kHz'],\n soundFormat = (tag[0] & parseInt('11110000', 2)) >>> 4;\n obj = obj || {};\n obj.soundFormat = formatTable[soundFormat];\n obj.soundRate = samplingRateTable[(tag[0] & parseInt('00001100', 2)) >>> 2];\n obj.soundSize = (tag[0] & parseInt('00000010', 2)) >>> 1 ? '16-bit' : '8-bit';\n obj.soundType = tag[0] & parseInt('00000001', 2) ? 'Stereo' : 'Mono';\n\n if (soundFormat === 10) {\n return parseAACTag(tag.subarray(1), obj);\n }\n\n return obj;\n },\n parseGenericTag = function parseGenericTag(tag) {\n return {\n tagType: tagTypes[tag[0]],\n dataSize: tag[1] << 16 | tag[2] << 8 | tag[3],\n timestamp: tag[7] << 24 | tag[4] << 16 | tag[5] << 8 | tag[6],\n streamID: tag[8] << 16 | tag[9] << 8 | tag[10]\n };\n },\n inspectFlvTag = function inspectFlvTag(tag) {\n var header = parseGenericTag(tag);\n\n switch (tag[0]) {\n case 0x08:\n parseAudioTag(tag.subarray(11), header);\n break;\n\n case 0x09:\n parseVideoTag(tag.subarray(11), header);\n break;\n }\n\n return header;\n },\n inspectFlv = function inspectFlv(bytes) {\n var i = 9,\n // header\n dataSize,\n parsedResults = [],\n tag; // traverse the tags\n\n i += 4; // skip previous tag size\n\n while (i < bytes.byteLength) {\n dataSize = bytes[i + 1] << 16;\n dataSize |= bytes[i + 2] << 8;\n dataSize |= bytes[i + 3];\n dataSize += 11;\n tag = bytes.subarray(i, i + dataSize);\n parsedResults.push(inspectFlvTag(tag));\n i += dataSize + 4;\n }\n\n return parsedResults;\n },\n textifyFlv = function textifyFlv(flvTagArray) {\n return JSON.stringify(flvTagArray, null, 2);\n };\n\n var flvInspector = {\n inspectTag: inspectFlvTag,\n inspect: inspectFlv,\n textify: textifyFlv\n };\n\n var parsePid = function parsePid(packet) {\n var pid = packet[1] & 0x1f;\n pid <<= 8;\n pid |= packet[2];\n return pid;\n };\n\n var parsePayloadUnitStartIndicator = function parsePayloadUnitStartIndicator(packet) {\n return !!(packet[1] & 0x40);\n };\n\n var parseAdaptionField = function parseAdaptionField(packet) {\n var offset = 0; // if an adaption field is present, its length is specified by the\n // fifth byte of the TS packet header. The adaptation field is\n // used to add stuffing to PES packets that don't fill a complete\n // TS packet, and to specify some forms of timing and control data\n // that we do not currently use.\n\n if ((packet[3] & 0x30) >>> 4 > 0x01) {\n offset += packet[4] + 1;\n }\n\n return offset;\n };\n\n var parseType$2 = function parseType(packet, pmtPid) {\n var pid = parsePid(packet);\n\n if (pid === 0) {\n return 'pat';\n } else if (pid === pmtPid) {\n return 'pmt';\n } else if (pmtPid) {\n return 'pes';\n }\n\n return null;\n };\n\n var parsePat = function parsePat(packet) {\n var pusi = parsePayloadUnitStartIndicator(packet);\n var offset = 4 + parseAdaptionField(packet);\n\n if (pusi) {\n offset += packet[offset] + 1;\n }\n\n return (packet[offset + 10] & 0x1f) << 8 | packet[offset + 11];\n };\n\n var parsePmt = function parsePmt(packet) {\n var programMapTable = {};\n var pusi = parsePayloadUnitStartIndicator(packet);\n var payloadOffset = 4 + parseAdaptionField(packet);\n\n if (pusi) {\n payloadOffset += packet[payloadOffset] + 1;\n } // PMTs can be sent ahead of the time when they should actually\n // take effect. We don't believe this should ever be the case\n // for HLS but we'll ignore \"forward\" PMT declarations if we see\n // them. Future PMT declarations have the current_next_indicator\n // set to zero.\n\n\n if (!(packet[payloadOffset + 5] & 0x01)) {\n return;\n }\n\n var sectionLength, tableEnd, programInfoLength; // the mapping table ends at the end of the current section\n\n sectionLength = (packet[payloadOffset + 1] & 0x0f) << 8 | packet[payloadOffset + 2];\n tableEnd = 3 + sectionLength - 4; // to determine where the table is, we have to figure out how\n // long the program info descriptors are\n\n programInfoLength = (packet[payloadOffset + 10] & 0x0f) << 8 | packet[payloadOffset + 11]; // advance the offset to the first entry in the mapping table\n\n var offset = 12 + programInfoLength;\n\n while (offset < tableEnd) {\n var i = payloadOffset + offset; // add an entry that maps the elementary_pid to the stream_type\n\n programMapTable[(packet[i + 1] & 0x1F) << 8 | packet[i + 2]] = packet[i]; // move to the next table entry\n // skip past the elementary stream descriptors, if present\n\n offset += ((packet[i + 3] & 0x0F) << 8 | packet[i + 4]) + 5;\n }\n\n return programMapTable;\n };\n\n var parsePesType = function parsePesType(packet, programMapTable) {\n var pid = parsePid(packet);\n var type = programMapTable[pid];\n\n switch (type) {\n case streamTypes.H264_STREAM_TYPE:\n return 'video';\n\n case streamTypes.ADTS_STREAM_TYPE:\n return 'audio';\n\n case streamTypes.METADATA_STREAM_TYPE:\n return 'timed-metadata';\n\n default:\n return null;\n }\n };\n\n var parsePesTime = function parsePesTime(packet) {\n var pusi = parsePayloadUnitStartIndicator(packet);\n\n if (!pusi) {\n return null;\n }\n\n var offset = 4 + parseAdaptionField(packet);\n\n if (offset >= packet.byteLength) {\n // From the H 222.0 MPEG-TS spec\n // \"For transport stream packets carrying PES packets, stuffing is needed when there\n // is insufficient PES packet data to completely fill the transport stream packet\n // payload bytes. Stuffing is accomplished by defining an adaptation field longer than\n // the sum of the lengths of the data elements in it, so that the payload bytes\n // remaining after the adaptation field exactly accommodates the available PES packet\n // data.\"\n //\n // If the offset is >= the length of the packet, then the packet contains no data\n // and instead is just adaption field stuffing bytes\n return null;\n }\n\n var pes = null;\n var ptsDtsFlags; // PES packets may be annotated with a PTS value, or a PTS value\n // and a DTS value. Determine what combination of values is\n // available to work with.\n\n ptsDtsFlags = packet[offset + 7]; // PTS and DTS are normally stored as a 33-bit number. Javascript\n // performs all bitwise operations on 32-bit integers but javascript\n // supports a much greater range (52-bits) of integer using standard\n // mathematical operations.\n // We construct a 31-bit value using bitwise operators over the 31\n // most significant bits and then multiply by 4 (equal to a left-shift\n // of 2) before we add the final 2 least significant bits of the\n // timestamp (equal to an OR.)\n\n if (ptsDtsFlags & 0xC0) {\n pes = {}; // the PTS and DTS are not written out directly. For information\n // on how they are encoded, see\n // http://dvd.sourceforge.net/dvdinfo/pes-hdr.html\n\n pes.pts = (packet[offset + 9] & 0x0E) << 27 | (packet[offset + 10] & 0xFF) << 20 | (packet[offset + 11] & 0xFE) << 12 | (packet[offset + 12] & 0xFF) << 5 | (packet[offset + 13] & 0xFE) >>> 3;\n pes.pts *= 4; // Left shift by 2\n\n pes.pts += (packet[offset + 13] & 0x06) >>> 1; // OR by the two LSBs\n\n pes.dts = pes.pts;\n\n if (ptsDtsFlags & 0x40) {\n pes.dts = (packet[offset + 14] & 0x0E) << 27 | (packet[offset + 15] & 0xFF) << 20 | (packet[offset + 16] & 0xFE) << 12 | (packet[offset + 17] & 0xFF) << 5 | (packet[offset + 18] & 0xFE) >>> 3;\n pes.dts *= 4; // Left shift by 2\n\n pes.dts += (packet[offset + 18] & 0x06) >>> 1; // OR by the two LSBs\n }\n }\n\n return pes;\n };\n\n var parseNalUnitType = function parseNalUnitType(type) {\n switch (type) {\n case 0x05:\n return 'slice_layer_without_partitioning_rbsp_idr';\n\n case 0x06:\n return 'sei_rbsp';\n\n case 0x07:\n return 'seq_parameter_set_rbsp';\n\n case 0x08:\n return 'pic_parameter_set_rbsp';\n\n case 0x09:\n return 'access_unit_delimiter_rbsp';\n\n default:\n return null;\n }\n };\n\n var videoPacketContainsKeyFrame = function videoPacketContainsKeyFrame(packet) {\n var offset = 4 + parseAdaptionField(packet);\n var frameBuffer = packet.subarray(offset);\n var frameI = 0;\n var frameSyncPoint = 0;\n var foundKeyFrame = false;\n var nalType; // advance the sync point to a NAL start, if necessary\n\n for (; frameSyncPoint < frameBuffer.byteLength - 3; frameSyncPoint++) {\n if (frameBuffer[frameSyncPoint + 2] === 1) {\n // the sync point is properly aligned\n frameI = frameSyncPoint + 5;\n break;\n }\n }\n\n while (frameI < frameBuffer.byteLength) {\n // look at the current byte to determine if we've hit the end of\n // a NAL unit boundary\n switch (frameBuffer[frameI]) {\n case 0:\n // skip past non-sync sequences\n if (frameBuffer[frameI - 1] !== 0) {\n frameI += 2;\n break;\n } else if (frameBuffer[frameI - 2] !== 0) {\n frameI++;\n break;\n }\n\n if (frameSyncPoint + 3 !== frameI - 2) {\n nalType = parseNalUnitType(frameBuffer[frameSyncPoint + 3] & 0x1f);\n\n if (nalType === 'slice_layer_without_partitioning_rbsp_idr') {\n foundKeyFrame = true;\n }\n } // drop trailing zeroes\n\n\n do {\n frameI++;\n } while (frameBuffer[frameI] !== 1 && frameI < frameBuffer.length);\n\n frameSyncPoint = frameI - 2;\n frameI += 3;\n break;\n\n case 1:\n // skip past non-sync sequences\n if (frameBuffer[frameI - 1] !== 0 || frameBuffer[frameI - 2] !== 0) {\n frameI += 3;\n break;\n }\n\n nalType = parseNalUnitType(frameBuffer[frameSyncPoint + 3] & 0x1f);\n\n if (nalType === 'slice_layer_without_partitioning_rbsp_idr') {\n foundKeyFrame = true;\n }\n\n frameSyncPoint = frameI - 2;\n frameI += 3;\n break;\n\n default:\n // the current byte isn't a one or zero, so it cannot be part\n // of a sync sequence\n frameI += 3;\n break;\n }\n }\n\n frameBuffer = frameBuffer.subarray(frameSyncPoint);\n frameI -= frameSyncPoint;\n frameSyncPoint = 0; // parse the final nal\n\n if (frameBuffer && frameBuffer.byteLength > 3) {\n nalType = parseNalUnitType(frameBuffer[frameSyncPoint + 3] & 0x1f);\n\n if (nalType === 'slice_layer_without_partitioning_rbsp_idr') {\n foundKeyFrame = true;\n }\n }\n\n return foundKeyFrame;\n };\n\n var probe$1 = {\n parseType: parseType$2,\n parsePat: parsePat,\n parsePmt: parsePmt,\n parsePayloadUnitStartIndicator: parsePayloadUnitStartIndicator,\n parsePesType: parsePesType,\n parsePesTime: parsePesTime,\n videoPacketContainsKeyFrame: videoPacketContainsKeyFrame\n };\n\n var handleRollover$1 = timestampRolloverStream.handleRollover;\n var probe$2 = {};\n probe$2.ts = probe$1;\n probe$2.aac = utils;\n var ONE_SECOND_IN_TS$5 = clock.ONE_SECOND_IN_TS;\n var MP2T_PACKET_LENGTH$1 = 188,\n // bytes\n SYNC_BYTE$1 = 0x47;\n /**\n * walks through segment data looking for pat and pmt packets to parse out\n * program map table information\n */\n\n var parsePsi_ = function parsePsi_(bytes, pmt) {\n var startIndex = 0,\n endIndex = MP2T_PACKET_LENGTH$1,\n packet,\n type;\n\n while (endIndex < bytes.byteLength) {\n // Look for a pair of start and end sync bytes in the data..\n if (bytes[startIndex] === SYNC_BYTE$1 && bytes[endIndex] === SYNC_BYTE$1) {\n // We found a packet\n packet = bytes.subarray(startIndex, endIndex);\n type = probe$2.ts.parseType(packet, pmt.pid);\n\n switch (type) {\n case 'pat':\n if (!pmt.pid) {\n pmt.pid = probe$2.ts.parsePat(packet);\n }\n\n break;\n\n case 'pmt':\n if (!pmt.table) {\n pmt.table = probe$2.ts.parsePmt(packet);\n }\n\n break;\n } // Found the pat and pmt, we can stop walking the segment\n\n\n if (pmt.pid && pmt.table) {\n return;\n }\n\n startIndex += MP2T_PACKET_LENGTH$1;\n endIndex += MP2T_PACKET_LENGTH$1;\n continue;\n } // If we get here, we have somehow become de-synchronized and we need to step\n // forward one byte at a time until we find a pair of sync bytes that denote\n // a packet\n\n\n startIndex++;\n endIndex++;\n }\n };\n /**\n * walks through the segment data from the start and end to get timing information\n * for the first and last audio pes packets\n */\n\n\n var parseAudioPes_ = function parseAudioPes_(bytes, pmt, result) {\n var startIndex = 0,\n endIndex = MP2T_PACKET_LENGTH$1,\n packet,\n type,\n pesType,\n pusi,\n parsed;\n var endLoop = false; // Start walking from start of segment to get first audio packet\n\n while (endIndex <= bytes.byteLength) {\n // Look for a pair of start and end sync bytes in the data..\n if (bytes[startIndex] === SYNC_BYTE$1 && (bytes[endIndex] === SYNC_BYTE$1 || endIndex === bytes.byteLength)) {\n // We found a packet\n packet = bytes.subarray(startIndex, endIndex);\n type = probe$2.ts.parseType(packet, pmt.pid);\n\n switch (type) {\n case 'pes':\n pesType = probe$2.ts.parsePesType(packet, pmt.table);\n pusi = probe$2.ts.parsePayloadUnitStartIndicator(packet);\n\n if (pesType === 'audio' && pusi) {\n parsed = probe$2.ts.parsePesTime(packet);\n\n if (parsed) {\n parsed.type = 'audio';\n result.audio.push(parsed);\n endLoop = true;\n }\n }\n\n break;\n }\n\n if (endLoop) {\n break;\n }\n\n startIndex += MP2T_PACKET_LENGTH$1;\n endIndex += MP2T_PACKET_LENGTH$1;\n continue;\n } // If we get here, we have somehow become de-synchronized and we need to step\n // forward one byte at a time until we find a pair of sync bytes that denote\n // a packet\n\n\n startIndex++;\n endIndex++;\n } // Start walking from end of segment to get last audio packet\n\n\n endIndex = bytes.byteLength;\n startIndex = endIndex - MP2T_PACKET_LENGTH$1;\n endLoop = false;\n\n while (startIndex >= 0) {\n // Look for a pair of start and end sync bytes in the data..\n if (bytes[startIndex] === SYNC_BYTE$1 && (bytes[endIndex] === SYNC_BYTE$1 || endIndex === bytes.byteLength)) {\n // We found a packet\n packet = bytes.subarray(startIndex, endIndex);\n type = probe$2.ts.parseType(packet, pmt.pid);\n\n switch (type) {\n case 'pes':\n pesType = probe$2.ts.parsePesType(packet, pmt.table);\n pusi = probe$2.ts.parsePayloadUnitStartIndicator(packet);\n\n if (pesType === 'audio' && pusi) {\n parsed = probe$2.ts.parsePesTime(packet);\n\n if (parsed) {\n parsed.type = 'audio';\n result.audio.push(parsed);\n endLoop = true;\n }\n }\n\n break;\n }\n\n if (endLoop) {\n break;\n }\n\n startIndex -= MP2T_PACKET_LENGTH$1;\n endIndex -= MP2T_PACKET_LENGTH$1;\n continue;\n } // If we get here, we have somehow become de-synchronized and we need to step\n // forward one byte at a time until we find a pair of sync bytes that denote\n // a packet\n\n\n startIndex--;\n endIndex--;\n }\n };\n /**\n * walks through the segment data from the start and end to get timing information\n * for the first and last video pes packets as well as timing information for the first\n * key frame.\n */\n\n\n var parseVideoPes_ = function parseVideoPes_(bytes, pmt, result) {\n var startIndex = 0,\n endIndex = MP2T_PACKET_LENGTH$1,\n packet,\n type,\n pesType,\n pusi,\n parsed,\n frame,\n i,\n pes;\n var endLoop = false;\n var currentFrame = {\n data: [],\n size: 0\n }; // Start walking from start of segment to get first video packet\n\n while (endIndex < bytes.byteLength) {\n // Look for a pair of start and end sync bytes in the data..\n if (bytes[startIndex] === SYNC_BYTE$1 && bytes[endIndex] === SYNC_BYTE$1) {\n // We found a packet\n packet = bytes.subarray(startIndex, endIndex);\n type = probe$2.ts.parseType(packet, pmt.pid);\n\n switch (type) {\n case 'pes':\n pesType = probe$2.ts.parsePesType(packet, pmt.table);\n pusi = probe$2.ts.parsePayloadUnitStartIndicator(packet);\n\n if (pesType === 'video') {\n if (pusi && !endLoop) {\n parsed = probe$2.ts.parsePesTime(packet);\n\n if (parsed) {\n parsed.type = 'video';\n result.video.push(parsed);\n endLoop = true;\n }\n }\n\n if (!result.firstKeyFrame) {\n if (pusi) {\n if (currentFrame.size !== 0) {\n frame = new Uint8Array(currentFrame.size);\n i = 0;\n\n while (currentFrame.data.length) {\n pes = currentFrame.data.shift();\n frame.set(pes, i);\n i += pes.byteLength;\n }\n\n if (probe$2.ts.videoPacketContainsKeyFrame(frame)) {\n var firstKeyFrame = probe$2.ts.parsePesTime(frame); // PTS/DTS may not be available. Simply *not* setting\n // the keyframe seems to work fine with HLS playback\n // and definitely preferable to a crash with TypeError...\n\n if (firstKeyFrame) {\n result.firstKeyFrame = firstKeyFrame;\n result.firstKeyFrame.type = 'video';\n } else {\n // eslint-disable-next-line\n console.warn('Failed to extract PTS/DTS from PES at first keyframe. ' + 'This could be an unusual TS segment, or else mux.js did not ' + 'parse your TS segment correctly. If you know your TS ' + 'segments do contain PTS/DTS on keyframes please file a bug ' + 'report! You can try ffprobe to double check for yourself.');\n }\n }\n\n currentFrame.size = 0;\n }\n }\n\n currentFrame.data.push(packet);\n currentFrame.size += packet.byteLength;\n }\n }\n\n break;\n }\n\n if (endLoop && result.firstKeyFrame) {\n break;\n }\n\n startIndex += MP2T_PACKET_LENGTH$1;\n endIndex += MP2T_PACKET_LENGTH$1;\n continue;\n } // If we get here, we have somehow become de-synchronized and we need to step\n // forward one byte at a time until we find a pair of sync bytes that denote\n // a packet\n\n\n startIndex++;\n endIndex++;\n } // Start walking from end of segment to get last video packet\n\n\n endIndex = bytes.byteLength;\n startIndex = endIndex - MP2T_PACKET_LENGTH$1;\n endLoop = false;\n\n while (startIndex >= 0) {\n // Look for a pair of start and end sync bytes in the data..\n if (bytes[startIndex] === SYNC_BYTE$1 && bytes[endIndex] === SYNC_BYTE$1) {\n // We found a packet\n packet = bytes.subarray(startIndex, endIndex);\n type = probe$2.ts.parseType(packet, pmt.pid);\n\n switch (type) {\n case 'pes':\n pesType = probe$2.ts.parsePesType(packet, pmt.table);\n pusi = probe$2.ts.parsePayloadUnitStartIndicator(packet);\n\n if (pesType === 'video' && pusi) {\n parsed = probe$2.ts.parsePesTime(packet);\n\n if (parsed) {\n parsed.type = 'video';\n result.video.push(parsed);\n endLoop = true;\n }\n }\n\n break;\n }\n\n if (endLoop) {\n break;\n }\n\n startIndex -= MP2T_PACKET_LENGTH$1;\n endIndex -= MP2T_PACKET_LENGTH$1;\n continue;\n } // If we get here, we have somehow become de-synchronized and we need to step\n // forward one byte at a time until we find a pair of sync bytes that denote\n // a packet\n\n\n startIndex--;\n endIndex--;\n }\n };\n /**\n * Adjusts the timestamp information for the segment to account for\n * rollover and convert to seconds based on pes packet timescale (90khz clock)\n */\n\n\n var adjustTimestamp_ = function adjustTimestamp_(segmentInfo, baseTimestamp) {\n if (segmentInfo.audio && segmentInfo.audio.length) {\n var audioBaseTimestamp = baseTimestamp;\n\n if (typeof audioBaseTimestamp === 'undefined' || isNaN(audioBaseTimestamp)) {\n audioBaseTimestamp = segmentInfo.audio[0].dts;\n }\n\n segmentInfo.audio.forEach(function (info) {\n info.dts = handleRollover$1(info.dts, audioBaseTimestamp);\n info.pts = handleRollover$1(info.pts, audioBaseTimestamp); // time in seconds\n\n info.dtsTime = info.dts / ONE_SECOND_IN_TS$5;\n info.ptsTime = info.pts / ONE_SECOND_IN_TS$5;\n });\n }\n\n if (segmentInfo.video && segmentInfo.video.length) {\n var videoBaseTimestamp = baseTimestamp;\n\n if (typeof videoBaseTimestamp === 'undefined' || isNaN(videoBaseTimestamp)) {\n videoBaseTimestamp = segmentInfo.video[0].dts;\n }\n\n segmentInfo.video.forEach(function (info) {\n info.dts = handleRollover$1(info.dts, videoBaseTimestamp);\n info.pts = handleRollover$1(info.pts, videoBaseTimestamp); // time in seconds\n\n info.dtsTime = info.dts / ONE_SECOND_IN_TS$5;\n info.ptsTime = info.pts / ONE_SECOND_IN_TS$5;\n });\n\n if (segmentInfo.firstKeyFrame) {\n var frame = segmentInfo.firstKeyFrame;\n frame.dts = handleRollover$1(frame.dts, videoBaseTimestamp);\n frame.pts = handleRollover$1(frame.pts, videoBaseTimestamp); // time in seconds\n\n frame.dtsTime = frame.dts / ONE_SECOND_IN_TS$5;\n frame.ptsTime = frame.pts / ONE_SECOND_IN_TS$5;\n }\n }\n };\n /**\n * inspects the aac data stream for start and end time information\n */\n\n\n var inspectAac_ = function inspectAac_(bytes) {\n var endLoop = false,\n audioCount = 0,\n sampleRate = null,\n timestamp = null,\n frameSize = 0,\n byteIndex = 0,\n packet;\n\n while (bytes.length - byteIndex >= 3) {\n var type = probe$2.aac.parseType(bytes, byteIndex);\n\n switch (type) {\n case 'timed-metadata':\n // Exit early because we don't have enough to parse\n // the ID3 tag header\n if (bytes.length - byteIndex < 10) {\n endLoop = true;\n break;\n }\n\n frameSize = probe$2.aac.parseId3TagSize(bytes, byteIndex); // Exit early if we don't have enough in the buffer\n // to emit a full packet\n\n if (frameSize > bytes.length) {\n endLoop = true;\n break;\n }\n\n if (timestamp === null) {\n packet = bytes.subarray(byteIndex, byteIndex + frameSize);\n timestamp = probe$2.aac.parseAacTimestamp(packet);\n }\n\n byteIndex += frameSize;\n break;\n\n case 'audio':\n // Exit early because we don't have enough to parse\n // the ADTS frame header\n if (bytes.length - byteIndex < 7) {\n endLoop = true;\n break;\n }\n\n frameSize = probe$2.aac.parseAdtsSize(bytes, byteIndex); // Exit early if we don't have enough in the buffer\n // to emit a full packet\n\n if (frameSize > bytes.length) {\n endLoop = true;\n break;\n }\n\n if (sampleRate === null) {\n packet = bytes.subarray(byteIndex, byteIndex + frameSize);\n sampleRate = probe$2.aac.parseSampleRate(packet);\n }\n\n audioCount++;\n byteIndex += frameSize;\n break;\n\n default:\n byteIndex++;\n break;\n }\n\n if (endLoop) {\n return null;\n }\n }\n\n if (sampleRate === null || timestamp === null) {\n return null;\n }\n\n var audioTimescale = ONE_SECOND_IN_TS$5 / sampleRate;\n var result = {\n audio: [{\n type: 'audio',\n dts: timestamp,\n pts: timestamp\n }, {\n type: 'audio',\n dts: timestamp + audioCount * 1024 * audioTimescale,\n pts: timestamp + audioCount * 1024 * audioTimescale\n }]\n };\n return result;\n };\n /**\n * inspects the transport stream segment data for start and end time information\n * of the audio and video tracks (when present) as well as the first key frame's\n * start time.\n */\n\n\n var inspectTs_ = function inspectTs_(bytes) {\n var pmt = {\n pid: null,\n table: null\n };\n var result = {};\n parsePsi_(bytes, pmt);\n\n for (var pid in pmt.table) {\n if (pmt.table.hasOwnProperty(pid)) {\n var type = pmt.table[pid];\n\n switch (type) {\n case streamTypes.H264_STREAM_TYPE:\n result.video = [];\n parseVideoPes_(bytes, pmt, result);\n\n if (result.video.length === 0) {\n delete result.video;\n }\n\n break;\n\n case streamTypes.ADTS_STREAM_TYPE:\n result.audio = [];\n parseAudioPes_(bytes, pmt, result);\n\n if (result.audio.length === 0) {\n delete result.audio;\n }\n\n break;\n }\n }\n }\n\n return result;\n };\n /**\n * Inspects segment byte data and returns an object with start and end timing information\n *\n * @param {Uint8Array} bytes The segment byte data\n * @param {Number} baseTimestamp Relative reference timestamp used when adjusting frame\n * timestamps for rollover. This value must be in 90khz clock.\n * @return {Object} Object containing start and end frame timing info of segment.\n */\n\n\n var inspect = function inspect(bytes, baseTimestamp) {\n var isAacData = probe$2.aac.isLikelyAacData(bytes);\n var result;\n\n if (isAacData) {\n result = inspectAac_(bytes);\n } else {\n result = inspectTs_(bytes);\n }\n\n if (!result || !result.audio && !result.video) {\n return null;\n }\n\n adjustTimestamp_(result, baseTimestamp);\n return result;\n };\n\n var tsInspector = {\n inspect: inspect,\n parseAudioPes_: parseAudioPes_\n };\n\n var muxjs = {\n codecs: codecs,\n mp4: mp4,\n flv: flv,\n mp2t: m2ts$1,\n partial: partial\n }; // include all the tools when the full library is required\n\n muxjs.mp4.tools = mp4Inspector;\n muxjs.flv.tools = flvInspector;\n muxjs.mp2t.tools = tsInspector;\n var lib = muxjs;\n\n return lib;\n\n})));\n\n/*\n @license\n Shaka Player\n Copyright 2016 Google LLC\n SPDX-License-Identifier: Apache-2.0\n*/\n(function(){var innerGlobal=typeof window!=\"undefined\"?window:global;var exportTo={};(function(window,global,module){/*\n\n Copyright The Closure Library Authors.\n SPDX-License-Identifier: Apache-2.0\n*/\nvar $jscomp=$jscomp||{};$jscomp.scope={};$jscomp.arrayIteratorImpl=function(a){var b=0;return function(){return b>>0,$jscomp.propertyToPolyfillSymbol[e]=$jscomp.IS_SYMBOL_NATIVE?\n$jscomp.global.Symbol(e):$jscomp.POLYFILL_PREFIX+c+\"$\"+e),$jscomp.defineProperty(d,$jscomp.propertyToPolyfillSymbol[e],{configurable:!0,writable:!0,value:b})))};$jscomp.initSymbol=function(){};\n$jscomp.polyfill(\"Symbol\",function(a){if(a)return a;var b=function(f,g){this.$jscomp$symbol$id_=f;$jscomp.defineProperty(this,\"description\",{configurable:!0,writable:!0,value:g})};b.prototype.toString=function(){return this.$jscomp$symbol$id_};var c=\"jscomp_symbol_\"+(1E9*Math.random()>>>0)+\"_\",d=0,e=function(f){if(this instanceof e)throw new TypeError(\"Symbol is not a constructor\");return new b(c+(f||\"\")+\"_\"+d++,f)};return e},\"es6\",\"es3\");\n$jscomp.polyfill(\"Symbol.iterator\",function(a){if(a)return a;a=Symbol(\"Symbol.iterator\");for(var b=\"Array Int8Array Uint8Array Uint8ClampedArray Int16Array Uint16Array Int32Array Uint32Array Float32Array Float64Array\".split(\" \"),c=0;cc&&(c=Math.max(c+e,0));ce||1114111=e?c+=String.fromCharCode(e):(e-=65536,c+=String.fromCharCode(e>>>10&1023|55296),c+=String.fromCharCode(e&1023|56320))}return c}},\"es6\",\"es3\");\n$jscomp.findInternal=function(a,b,c){a instanceof String&&(a=String(a));for(var d=a.length,e=0;e=f}},\"es6\",\"es3\");$jscomp.polyfill(\"Object.entries\",function(a){return a?a:function(b){var c=[],d;for(d in b)$jscomp.owns(b,d)&&c.push([d,b[d]]);return c}},\"es8\",\"es3\");\n$jscomp.assign=$jscomp.TRUST_ES6_POLYFILLS&&\"function\"==typeof Object.assign?Object.assign:function(a,b){for(var c=1;cb||1342177279>>=1)c+=c;return d}},\"es6\",\"es3\");\n$jscomp.polyfill(\"Number.isFinite\",function(a){return a?a:function(b){return\"number\"!==typeof b?!1:!isNaN(b)&&Infinity!==b&&-Infinity!==b}},\"es6\",\"es3\");$jscomp.polyfill(\"Object.values\",function(a){return a?a:function(b){var c=[],d;for(d in b)$jscomp.owns(b,d)&&c.push(b[d]);return c}},\"es8\",\"es3\");$jscomp.polyfill(\"Math.log2\",function(a){return a?a:function(b){return Math.log(b)/Math.LN2}},\"es6\",\"es3\");$jscomp.polyfill(\"Number.MAX_SAFE_INTEGER\",function(){return 9007199254740991},\"es6\",\"es3\");\n$jscomp.polyfill(\"String.prototype.endsWith\",function(a){return a?a:function(b,c){var d=$jscomp.checkStringArgs(this,b,\"endsWith\");b+=\"\";void 0===c&&(c=d.length);c=Math.max(0,Math.min(c|0,d.length));for(var e=b.length;0=e}},\"es6\",\"es3\");$jscomp.polyfill(\"Math.trunc\",function(a){return a?a:function(b){b=Number(b);if(isNaN(b)||Infinity===b||-Infinity===b||0===b)return b;var c=Math.floor(Math.abs(b));return 0>b?-c:c}},\"es6\",\"es3\");\n$jscomp.atMethod=function(a){a=Math.trunc(a)||0;0>a&&(a+=this.length);if(!(0>a||a>=this.length))return this[a]};$jscomp.polyfill(\"Array.prototype.at\",function(a){return a?a:$jscomp.atMethod},\"es_next\",\"es5\");$jscomp.typedArrayAt=function(a){return a?a:$jscomp.atMethod};$jscomp.polyfill(\"Int8Array.prototype.at\",$jscomp.typedArrayAt,\"es_next\",\"es5\");$jscomp.polyfill(\"Uint8Array.prototype.at\",$jscomp.typedArrayAt,\"es_next\",\"es5\");\n$jscomp.polyfill(\"Uint8ClampedArray.prototype.at\",$jscomp.typedArrayAt,\"es_next\",\"es5\");$jscomp.polyfill(\"Int16Array.prototype.at\",$jscomp.typedArrayAt,\"es_next\",\"es5\");$jscomp.polyfill(\"Uint16Array.prototype.at\",$jscomp.typedArrayAt,\"es_next\",\"es5\");$jscomp.polyfill(\"Int32Array.prototype.at\",$jscomp.typedArrayAt,\"es_next\",\"es5\");$jscomp.polyfill(\"Uint32Array.prototype.at\",$jscomp.typedArrayAt,\"es_next\",\"es5\");$jscomp.polyfill(\"Float32Array.prototype.at\",$jscomp.typedArrayAt,\"es_next\",\"es5\");\n$jscomp.polyfill(\"Float64Array.prototype.at\",$jscomp.typedArrayAt,\"es_next\",\"es5\");$jscomp.polyfill(\"String.prototype.at\",function(a){return a?a:$jscomp.atMethod},\"es_next\",\"es5\");var COMPILED=!0,goog=goog||{};goog.global=this||self;\ngoog.exportPath_=function(a,b,c,d){a=a.split(\".\");d=d||goog.global;a[0]in d||\"undefined\"==typeof d.execScript||d.execScript(\"var \"+a[0]);for(var e;a.length&&(e=a.shift());)if(a.length||void 0===b)d=d[e]&&d[e]!==Object.prototype[e]?d[e]:d[e]={};else if(!c&&goog.isObject(b)&&goog.isObject(d[e]))for(var f in b)b.hasOwnProperty(f)&&(d[e][f]=b[f]);else d[e]=b};\ngoog.define=function(a,b){if(!COMPILED){var c=goog.global.CLOSURE_UNCOMPILED_DEFINES,d=goog.global.CLOSURE_DEFINES;c&&void 0===c.nodeType&&Object.prototype.hasOwnProperty.call(c,a)?b=c[a]:d&&void 0===d.nodeType&&Object.prototype.hasOwnProperty.call(d,a)&&(b=d[a])}return b};goog.FEATURESET_YEAR=2012;goog.DEBUG=!0;goog.LOCALE=\"en\";goog.getLocale=function(){return goog.LOCALE};goog.TRUSTED_SITE=!0;goog.DISALLOW_TEST_ONLY_CODE=COMPILED&&!goog.DEBUG;goog.ENABLE_CHROME_APP_SAFE_SCRIPT_LOADING=!1;\ngoog.provide=function(a){if(goog.isInModuleLoader_())throw Error(\"goog.provide cannot be used within a module.\");if(!COMPILED&&goog.isProvided_(a))throw Error('Namespace \"'+a+'\" already declared.');goog.constructNamespace_(a)};goog.constructNamespace_=function(a,b,c){if(!COMPILED){delete goog.implicitNamespaces_[a];for(var d=a;(d=d.substring(0,d.lastIndexOf(\".\")))&&!goog.getObjectByName(d);)goog.implicitNamespaces_[d]=!0}goog.exportPath_(a,b,c)};goog.NONCE_PATTERN_=/^[\\w+/_-]+[=]{0,2}$/;\ngoog.getScriptNonce_=function(a){a=(a||goog.global).document;return(a=a.querySelector&&a.querySelector(\"script[nonce]\"))&&(a=a.nonce||a.getAttribute(\"nonce\"))&&goog.NONCE_PATTERN_.test(a)?a:\"\"};goog.VALID_MODULE_RE_=/^[a-zA-Z_$][a-zA-Z0-9._$]*$/;\ngoog.module=function(a){if(\"string\"!==typeof a||!a||-1==a.search(goog.VALID_MODULE_RE_))throw Error(\"Invalid module identifier\");if(!goog.isInGoogModuleLoader_())throw Error(\"Module \"+a+\" has been loaded incorrectly. Note, modules cannot be loaded as normal scripts. They require some kind of pre-processing step. You're likely trying to load a module via a script tag or as a part of a concatenated bundle without rewriting the module. For more info see: https://github.com/google/closure-library/wiki/goog.module:-an-ES6-module-like-alternative-to-goog.provide.\");if(goog.moduleLoaderState_.moduleName)throw Error(\"goog.module may only be called once per module.\");\ngoog.moduleLoaderState_.moduleName=a;if(!COMPILED){if(goog.isProvided_(a))throw Error('Namespace \"'+a+'\" already declared.');delete goog.implicitNamespaces_[a]}};goog.module.get=function(a){return goog.module.getInternal_(a)};goog.module.getInternal_=function(a){if(!COMPILED){if(a in goog.loadedModules_)return goog.loadedModules_[a].exports;if(!goog.implicitNamespaces_[a])return a=goog.getObjectByName(a),null!=a?a:null}return null};goog.ModuleType={ES6:\"es6\",GOOG:\"goog\"};goog.moduleLoaderState_=null;\ngoog.isInModuleLoader_=function(){return goog.isInGoogModuleLoader_()||goog.isInEs6ModuleLoader_()};goog.isInGoogModuleLoader_=function(){return!!goog.moduleLoaderState_&&goog.moduleLoaderState_.type==goog.ModuleType.GOOG};goog.isInEs6ModuleLoader_=function(){if(goog.moduleLoaderState_&&goog.moduleLoaderState_.type==goog.ModuleType.ES6)return!0;var a=goog.global.$jscomp;return a?\"function\"!=typeof a.getCurrentModulePath?!1:!!a.getCurrentModulePath():!1};\ngoog.module.declareLegacyNamespace=function(){if(!COMPILED&&!goog.isInGoogModuleLoader_())throw Error(\"goog.module.declareLegacyNamespace must be called from within a goog.module\");if(!COMPILED&&!goog.moduleLoaderState_.moduleName)throw Error(\"goog.module must be called prior to goog.module.declareLegacyNamespace.\");goog.moduleLoaderState_.declareLegacyNamespace=!0};\ngoog.declareModuleId=function(a){if(!COMPILED){if(!goog.isInEs6ModuleLoader_())throw Error(\"goog.declareModuleId may only be called from within an ES6 module\");if(goog.moduleLoaderState_&&goog.moduleLoaderState_.moduleName)throw Error(\"goog.declareModuleId may only be called once per module.\");if(a in goog.loadedModules_)throw Error('Module with namespace \"'+a+'\" already exists.');}if(goog.moduleLoaderState_)goog.moduleLoaderState_.moduleName=a;else{var b=goog.global.$jscomp;if(!b||\"function\"!=typeof b.getCurrentModulePath)throw Error('Module with namespace \"'+\na+'\" has been loaded incorrectly.');b=b.require(b.getCurrentModulePath());goog.loadedModules_[a]={exports:b,type:goog.ModuleType.ES6,moduleId:a}}};goog.setTestOnly=function(a){if(goog.DISALLOW_TEST_ONLY_CODE)throw a=a||\"\",Error(\"Importing test-only code into non-debug environment\"+(a?\": \"+a:\".\"));};goog.forwardDeclare=function(a){};COMPILED||(goog.isProvided_=function(a){return a in goog.loadedModules_||!goog.implicitNamespaces_[a]&&null!=goog.getObjectByName(a)},goog.implicitNamespaces_={\"goog.module\":!0});\ngoog.getObjectByName=function(a,b){a=a.split(\".\");b=b||goog.global;for(var c=0;c>>0);goog.uidCounter_=0;goog.cloneObject=function(a){var b=goog.typeOf(a);if(\"object\"==b||\"array\"==b){if(\"function\"===typeof a.clone)return a.clone();if(\"undefined\"!==typeof Map&&a instanceof Map)return new Map(a);if(\"undefined\"!==typeof Set&&a instanceof Set)return new Set(a);b=\"array\"==b?[]:{};for(var c in a)b[c]=goog.cloneObject(a[c]);return b}return a};goog.bindNative_=function(a,b,c){return a.call.apply(a.bind,arguments)};\ngoog.bindJs_=function(a,b,c){if(!a)throw Error();if(2\").replace(/'/g,\"'\").replace(/"/g,'\"').replace(/&/g,\"&\"));b&&(a=a.replace(/\\{\\$([^}]+)}/g,function(d,e){return null!=b&&e in b?b[e]:d}));return a};goog.getMsgWithFallback=function(a,b){return a};goog.exportSymbol=function(a,b,c){goog.exportPath_(a,b,!0,c)};goog.exportProperty=function(a,b,c){a[b]=c};\ngoog.inherits=function(a,b){function c(){}c.prototype=b.prototype;a.superClass_=b.prototype;a.prototype=new c;a.prototype.constructor=a;a.base=function(d,e,f){for(var g=Array(arguments.length-2),h=2;h\\x3c/script>';f+=\"\";f=goog.Dependency.defer_?f+(\"document.getElementById('script-\"+e+\"').onload = function() {\\n goog.Dependency.callback_('\"+e+\"', this);\\n};\\n\"):f+(\"goog.Dependency.callback_('\"+e+\"', document.getElementById('script-\"+e+\"'));\");f+=\"\\x3c/script>\";b.write(goog.TRUSTED_TYPES_POLICY_?goog.TRUSTED_TYPES_POLICY_.createHTML(f):f)}else{var g=b.createElement(\"script\");g.defer=goog.Dependency.defer_;g.async=!1;c&&(g.nonce=\nc);g.onload=function(){g.onload=null;a.loaded()};g.src=goog.TRUSTED_TYPES_POLICY_?goog.TRUSTED_TYPES_POLICY_.createScriptURL(this.path):this.path;b.head.appendChild(g)}}else goog.logToConsole_(\"Cannot use default debug loader outside of HTML documents.\"),\"deps.js\"==this.relativePath?(goog.logToConsole_(\"Consider setting CLOSURE_IMPORT_SCRIPT before loading base.js, or setting CLOSURE_NO_DEPS to true.\"),a.loaded()):a.pause()},goog.Es6ModuleDependency=function(a,b,c,d,e){goog.Dependency.call(this,a,\nb,c,d,e)},goog.inherits(goog.Es6ModuleDependency,goog.Dependency),goog.Es6ModuleDependency.prototype.load=function(a){function b(l,m){var n=\"\",p=goog.getScriptNonce_();p&&(n=' nonce=\"'+p+'\"');l=m?'